hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e168edf2fd4a9f7e34f7a83253f12b25417c2d21
| 3,113
|
py
|
Python
|
examples/utils/interaction.py
|
epuzanov/pivy
|
2f049c19200ab4a3a1e4740268450496c12359f9
|
[
"ISC"
] | 29
|
2019-12-28T10:37:16.000Z
|
2022-02-09T10:48:04.000Z
|
examples/utils/interaction.py
|
epuzanov/pivy
|
2f049c19200ab4a3a1e4740268450496c12359f9
|
[
"ISC"
] | 29
|
2019-12-26T13:46:11.000Z
|
2022-03-29T18:14:33.000Z
|
examples/utils/interaction.py
|
epuzanov/pivy
|
2f049c19200ab4a3a1e4740268450496c12359f9
|
[
"ISC"
] | 17
|
2019-12-29T11:49:32.000Z
|
2022-02-23T00:28:18.000Z
|
import sys
from PySide2.QtWidgets import QApplication
from PySide2.QtGui import QColor
from pivy import quarter, coin, graphics, utils
class ConnectionMarker(graphics.Marker):
def __init__(self, points):
super(ConnectionMarker, self).__init__(points, True)
class ConnectionPolygon(graphics.Polygon):
std_col = "green"
def __init__(self, markers):
super(ConnectionPolygon, self).__init__(
sum([m.points for m in markers], []), True)
self.markers = markers
for m in self.markers:
m.on_drag.append(self.update_polygon)
def update_polygon(self):
self.points = sum([m.points for m in self.markers], [])
@property
def drag_objects(self):
return self.markers
def check_dependency(self):
if any([m._delete for m in self.markers]):
self.delete()
class ConnectionLine(graphics.Line):
def __init__(self, markers):
super(ConnectionLine, self).__init__(
sum([m.points for m in markers], []), True)
self.markers = markers
for m in self.markers:
m.on_drag.append(self.update_line)
def update_line(self):
self.points = sum([m.points for m in self.markers], [])
@property
def drag_objects(self):
return self.markers
def check_dependency(self):
if any([m._delete for m in self.markers]):
self.delete()
def main():
app = QApplication(sys.argv)
utils.addMarkerFromSvg("test.svg", "CUSTOM_MARKER", 40)
viewer = quarter.QuarterWidget()
root = graphics.InteractionSeparator(viewer.sorendermanager)
root.pick_radius = 40
m1 = ConnectionMarker([[-1, -1, -1]])
m2 = ConnectionMarker([[-1, 1, -1]])
m3 = ConnectionMarker([[ 1, 1, -1]])
m4 = ConnectionMarker([[ 1, -1, -1]])
m5 = ConnectionMarker([[-1, -1, 1]])
m6 = ConnectionMarker([[-1, 1, 1]])
m7 = ConnectionMarker([[ 1, 1, 1]])
m8 = ConnectionMarker([[ 1, -1, 1]])
points = [m1, m2, m3, m4, m5, m6, m7, m8]
l01 = ConnectionLine([m1, m2])
l02 = ConnectionLine([m2, m3])
l03 = ConnectionLine([m3, m4])
l04 = ConnectionLine([m4, m1])
l05 = ConnectionLine([m5, m6])
l06 = ConnectionLine([m6, m7])
l07 = ConnectionLine([m7, m8])
l08 = ConnectionLine([m8, m5])
l09 = ConnectionLine([m1, m5])
l10 = ConnectionLine([m2, m6])
l11 = ConnectionLine([m3, m7])
l12 = ConnectionLine([m4, m8])
lines = [l01, l02, l03, l04, l05, l06, l07, l08, l09, l10, l11, l12]
p1 = ConnectionPolygon([m1, m2, m3, m4])
p2 = ConnectionPolygon([m8, m7, m6, m5])
p3 = ConnectionPolygon([m5, m6, m2, m1])
p4 = ConnectionPolygon([m6, m7, m3, m2])
p5 = ConnectionPolygon([m7, m8, m4, m3])
p6 = ConnectionPolygon([m8, m5, m1, m4])
polygons = [p1, p2, p3, p4, p5, p6]
root += points + lines + polygons
root.register()
viewer.setSceneGraph(root)
viewer.setBackgroundColor(QColor(255, 255, 255))
viewer.setWindowTitle("minimal")
viewer.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 28.824074
| 72
| 0.615805
| 1,277
| 0.410215
| 0
| 0
| 130
| 0.04176
| 0
| 0
| 51
| 0.016383
|
e169422102864465f508bcbb4404b6271b866de1
| 4,673
|
py
|
Python
|
utils/data_process.py
|
cltl/a-proof-zonmw
|
f6d1a83fc77223bf8b58c9d465aae301269bb679
|
[
"Apache-2.0"
] | 2
|
2021-02-08T08:24:06.000Z
|
2021-11-12T10:23:23.000Z
|
utils/data_process.py
|
cltl/a-proof-zonmw
|
f6d1a83fc77223bf8b58c9d465aae301269bb679
|
[
"Apache-2.0"
] | null | null | null |
utils/data_process.py
|
cltl/a-proof-zonmw
|
f6d1a83fc77223bf8b58c9d465aae301269bb679
|
[
"Apache-2.0"
] | 2
|
2021-12-07T22:14:56.000Z
|
2021-12-14T09:06:16.000Z
|
"""
Functions used in pre-processing of data for the machine learning pipelines.
"""
import pandas as pd
from pandas.api.types import is_scalar
from pathlib import Path
from sklearn.model_selection import GroupShuffleSplit
def concat_annotated(datadir):
"""
Concatenate all "annotated_df_*_parsed*.pkl" files in `datadir`.
The pkl's of the core team should end with "dedup.pkl", i.e. they should be deduplicated by the `parse_annotations.py` script.
The ze pkl's need not be deduplicated, as only notes that are not in the annotations of the core team are included.
Parameters
----------
datadir: Path
path to directory with data
Returns
-------
DataFrame
df of concatenated parsed annotations
"""
# load core team annotations; pickles are deduplicated during processing
annot = pd.concat([pd.read_pickle(fp) for fp in datadir.glob('*_dedup.pkl')], ignore_index=True)
# load ze annotations and remove IAA files
ze = pd.concat(
[pd.read_pickle(fp) for fp in datadir.glob('annotated_df_ze_*.pkl')], ignore_index=True
).query("~NotitieID.isin(@annot.NotitieID)", engine='python')
return pd.concat([annot, ze], ignore_index=True)
def drop_disregard(df):
"""
If one token in a note is marked 'disregard', remove the whole note from df.
Parameters
----------
df: DataFrame
parsed token-level annotations df (created by `parse_annotations.py`)
Returns
-------
DataFrame
df without 'disregard' notes
"""
df['disregard_note'] = df.groupby('NotitieID').disregard.transform('any')
return df.query(
"not disregard_note"
).drop(columns=['disregard', 'disregard_note'])
def fix_week_14(df):
"""
For annotations from week 14:
- Replace MBW values with `False`
- Replace MBW-lvl values with NaN
We remove this domain from week 14 since the guidelines for it were changed after this week.
Parameters
----------
df: DataFrame
parsed token-level annotations df (created by `parse_annotations.py`)
Returns
-------
DataFrame
df without MBW and MBW_lvl labels for week 14
"""
df['MBW'] = df.MBW.mask(df.batch == 'week_14', other=False)
df['MBW_lvl'] = df.MBW_lvl.mask(df.batch == 'week_14')
return df
def pad_sen_id(id):
"""
Add padding zeroes to sen_id.
"""
note_id, sen_no = id.split('_')
return '_'.join([note_id, f"{sen_no:0>4}"])
def anonymize(txt, nlp):
"""
Replace entities of type PERSON and GPE with 'PERSON', 'GPE'.
Return anonymized text and its length.
"""
doc = nlp(txt)
anonym = str(doc)
to_repl = {str(ent):ent.label_ for ent in doc.ents if ent.label_ in ['PERSON', 'GPE']}
for string, replacement in to_repl.items():
anonym = anonym.replace(string, replacement)
return anonym, len(doc)
def data_split_groups(
df,
X_col,
y_col,
group_col,
train_size,
):
"""
Split data to train / dev / test, while taking into account groups that should stay together.
Parameters
----------
df: DataFrame
df with the data to split
X_col: str
name of the column with the data (text)
y_col: str
name of the column with the gold labels
group_col: str
name of the column with the groups to take into account when splitting
train_size: float
proportion of data that should go to the training set
Returns
-------
train, dev, test: DataFrame's
df with train data, df with dev data, df with test data
"""
# create training set of `train_size`
gss = GroupShuffleSplit(n_splits=1, test_size=1-train_size, random_state=19)
for train_idx, other_idx in gss.split(df[X_col], df[y_col], groups=df[group_col]):
train = df.iloc[train_idx]
other = df.iloc[other_idx]
# the non-train data is split 50/50 into development and test
gss = GroupShuffleSplit(n_splits=1, test_size=0.5, random_state=19)
for dev_idx, test_idx in gss.split(other[X_col], other[y_col], groups=other[group_col]):
dev = other.iloc[dev_idx]
test = other.iloc[test_idx]
return train, dev, test
def flatten_preds_if_necessary(df):
"""
Flatten predictions if they are a list in a list.
This is necessary because of an issue with the predict.py script prior to the update performed on 15-09-2021.
"""
cols = [col for col in df.columns if 'pred' in col]
for col in cols:
test = df[col].iloc[0]
if is_scalar(test[0]):
continue
df[col] = df[col].str[0]
return df
| 29.024845
| 130
| 0.650332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,731
| 0.584421
|
e16c926aa6450fc30f72e50b4463f6a0fcd7d9ad
| 276
|
py
|
Python
|
venv/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 11
|
2020-06-28T04:30:26.000Z
|
2022-03-26T08:40:47.000Z
|
venv/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 150
|
2019-09-30T11:22:36.000Z
|
2021-08-02T06:19:29.000Z
|
venv/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 20
|
2021-11-07T13:55:56.000Z
|
2021-12-02T10:54:01.000Z
|
import numpy as np
np.deprecate(1) # E: No overload variant
np.deprecate_with_doc(1) # E: incompatible type
np.byte_bounds(1) # E: incompatible type
np.who(1) # E: incompatible type
np.lookfor(None) # E: incompatible type
np.safe_eval(None) # E: incompatible type
| 19.714286
| 48
| 0.721014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.485507
|
e16db721abb59e634b09680c4bdf3796a1a5328b
| 6,115
|
py
|
Python
|
VoiceAssistant/Project_Basic_struct/speakListen.py
|
TheRealMilesLee/Python
|
d145c848a7ba76e8e523e4fe06e2a0add7e2fae1
|
[
"MIT"
] | 1
|
2018-12-05T11:04:47.000Z
|
2018-12-05T11:04:47.000Z
|
VoiceAssistant/Project_Basic_struct/speakListen.py
|
MarkHooland/Python
|
d145c848a7ba76e8e523e4fe06e2a0add7e2fae1
|
[
"MIT"
] | null | null | null |
VoiceAssistant/Project_Basic_struct/speakListen.py
|
MarkHooland/Python
|
d145c848a7ba76e8e523e4fe06e2a0add7e2fae1
|
[
"MIT"
] | null | null | null |
import time
from colorama import Fore, Back, Style
import speech_recognition as sr
import os
import pyttsx3
import datetime
from rich.progress import Progress
python = pyttsx3.init("sapi5") # name of the engine is set as Python
voices = python.getProperty("voices")
#print(voices)
python.setProperty("voice", voices[1].id)
python.setProperty("rate", 140)
def speak(text):
"""[This function would speak aloud some text provided as parameter]
Args:
text ([str]): [It is the speech to be spoken]
"""
python.say(text)
python.runAndWait()
def greet(g):
"""Uses the datetime library to generate current time and then greets accordingly.
Args:
g (str): To decide whether to say hello or good bye
"""
if g == "start" or g == "s":
h = datetime.datetime.now().hour
text = ''
if h > 12 and h < 17:
text = "Hello ! Good Afternoon "
elif h < 12 and h > 0:
text = "Hello! Good Morning "
elif h >= 17 :
text = "Hello! Good Evening "
text += " I am Python, How may i help you ?"
speak(text)
elif g == "quit" or g == "end" or g == "over" or g == "e":
text = 'Thank you!. Good Bye ! '
speak(text)
def hear():
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = 9) # option
#speech = r.listen(source)
# convert speech to text
try:
#print("Recognizing...")
recognizing()
speech = r.recognize_google(speech)
print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
def recognizing():
"""Uses the Rich library to print a simulates version of "recognizing" by printing a loading bar.
"""
with Progress() as pr:
rec = pr.add_task("[red]Recognizing...", total = 100)
while not pr.finished:
pr.update(rec, advance = 1.0)
time.sleep(0.01)
def long_hear(duration_time = 60):
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
the difference between the hear() and long_hear() is that - the
hear() - records users voice for 9 seconds
long_hear() - will record user's voice for the time specified by user. By default, it records for 60 seconds.
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = duration_time) # option
#speech = r.listen(source)
# convert speech to text
try:
print(Fore.RED +"Recognizing...")
#recognizing()
speech = r.recognize_google(speech)
#print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
def short_hear(duration_time = 5):
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
the difference between the hear() and long_hear() is that - the
hear() - records users voice for 9 seconds
long_hear - will record user's voice for the time specified by user. By default, it records for 60 seconds.
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = duration_time) # option
#speech = r.listen(source)
# convert speech to text
try:
print(Fore.RED +"Recognizing...")
#recognizing()
speech = r.recognize_google(speech)
#print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
if __name__ == '__main__':
# print("Enter your name")
# name = hear()
# speak("Hello " + name)
# greet("s")
# greet("e")
pass
#hear()
#recognizing()
| 35.970588
| 118
| 0.594113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,325
| 0.543745
|
e16e5534d48d16f412f05cd80a5b2d4be81a0792
| 702
|
py
|
Python
|
api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py
|
bacuarabrasil/krenak
|
ad6a3af5ff162783ec9bd40d07a82f09bf35071b
|
[
"MIT"
] | null | null | null |
api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py
|
bacuarabrasil/krenak
|
ad6a3af5ff162783ec9bd40d07a82f09bf35071b
|
[
"MIT"
] | 26
|
2021-03-10T22:07:57.000Z
|
2021-03-11T12:13:35.000Z
|
api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py
|
bacuarabrasil/krenak
|
ad6a3af5ff162783ec9bd40d07a82f09bf35071b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-05-06 23:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mentorships', '0001_initial'),
('activities', '0007_activity_enrollment'),
]
operations = [
migrations.RemoveField(
model_name='activity',
name='enrollment',
),
migrations.AddField(
model_name='activity',
name='mentorship',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='mentorships.mentorship', verbose_name='Mentorship'),
),
]
| 28.08
| 175
| 0.633903
| 576
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 204
| 0.290598
|
e16f4dbb7cb5166fe73ac09acb1a07de0138e0d3
| 2,330
|
py
|
Python
|
Data_pre/encoding_feature.py
|
KaifangXu/API
|
47cb17e35a381e50b25bbda9aa7e5216482af022
|
[
"MIT"
] | null | null | null |
Data_pre/encoding_feature.py
|
KaifangXu/API
|
47cb17e35a381e50b25bbda9aa7e5216482af022
|
[
"MIT"
] | null | null | null |
Data_pre/encoding_feature.py
|
KaifangXu/API
|
47cb17e35a381e50b25bbda9aa7e5216482af022
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from scipy import signal,stats
from flask import Flask,request,jsonify
import json
import re
import os
import data_utils as utils
import sklearn.preprocessing as pre
configpath=os.path.join(os.path.dirname(__file__),'config.txt')
try:
config = utils.py_configs(configpath)
Signal_SERVER = config["Signal_SERVER"]
Signal_PORT = config["Signal_PORT"]
except:
raise Exception("Configuration error")
app = Flask(__name__)
@app.route('/Preprocessing/encoding_feature',methods=['POST'])
def encoding_feature(): ### 参数以json格式传送
try:
file_key=list(request.files.to_dict().keys())
print('k: ',file_key)
## 'file' 是输入的信号数据文件; 'window'表示窗口,一般为`boxcar`, `triang`, `blackman`, `hamming`, `hann`等; 'index'是数据文件中需要进行小波分析的该组数据
keys=['file']
for key in keys:
if (key not in file_key):
code = 2
output = {"code": code, "KeyError": str(key)}
output = json.dumps(output)
return output
file=request.files.get('file')
index=int(request.form['index'])
df=pd.read_csv(file)
data=df.values
cols=df.columns
onehot_dict={}
for i in range(data.shape[1]):
attr_set=set(data[:,i])
temp_dict={}
j=0
for attr in attr_set:
temp_dict[attr]=j
j+=1
onehot_dict[cols[i]]=temp_dict
data_to_bi = []
for i in range(data.shape[0]):
rows=[]
for j in range(data.shape[1]):
onehot=onehot_dict[cols[j]][data[i][j]]
rows.append(onehot)
data_to_bi.append(rows)
enc = pre.OneHotEncoder()
print(onehot_dict)
print(data_to_bi)
enc.fit(data_to_bi)
a=enc.transform(data_to_bi).toarray()
result={'OneHot Result':str(a)}
return jsonify(result)
except Exception as e:
print('Exception: ',e)
code = 1
result = {"code":code,"error":re.findall("'([\w\d _]+)'",str(type(e)))[0]}
result = jsonify(result)
return result
if __name__=="__main__":
app.run(host=Signal_SERVER, port=int(Signal_PORT))
| 31.066667
| 125
| 0.564807
| 0
| 0
| 0
| 0
| 1,834
| 0.758478
| 0
| 0
| 427
| 0.176592
|
e16fa8e2b0d20fbbd86dbb386c3a783cd3b7617b
| 13,708
|
py
|
Python
|
src/utils.py
|
mmin0/SigDFP
|
e2a93faa658741d693b8070bcc7038d2fb7c3e74
|
[
"MIT"
] | null | null | null |
src/utils.py
|
mmin0/SigDFP
|
e2a93faa658741d693b8070bcc7038d2fb7c3e74
|
[
"MIT"
] | null | null | null |
src/utils.py
|
mmin0/SigDFP
|
e2a93faa658741d693b8070bcc7038d2fb7c3e74
|
[
"MIT"
] | 1
|
2022-02-28T23:26:23.000Z
|
2022-02-28T23:26:23.000Z
|
import torch
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerTuple
from matplotlib.ticker import FormatStrFormatter
#from tqdm import tqdm
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.rc('xtick', labelsize=22) # fontsize of the tick labels
plt.rc('ytick', labelsize=22)
plt.rc('legend', fontsize=25)
plt.rc('axes', labelsize=25)
plt.rcParams["figure.figsize"] = (7.5, 6)
colors = ['lightcoral', 'mediumseagreen', 'darkorange']
def epoch_time(start_time, end_time):
elap_time = end_time - start_time
elap_min = elap_time//60
elap_sec = elap_time % 60
return elap_min, elap_sec
def train(model, dataloader, optimizer, criterion, initial, prev_m, device, depth=4):
"""
train model for alpha for one loop over dataloader
"""
epoch_loss = 0
model.train() # set model to train mode
i = 0
for batch in dataloader:
optimizer.zero_grad()
bm, cn = batch
X = model(bm.to(device),
cn.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size], strategy)
loss.backward(retain_graph=True)
optimizer.step()
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def evaluate(model, dataloader, criterion, initial, prev_m, device, depth=4):
epoch_loss = 0
#initialize
#N = prev_m.size()[0] - 1
#m = torch.zeros(N+1, 1, device=device)
#sigs = torch.zeros(signatory.signature_channels(2, depth), device=device)
model.eval() # set model to train mode
i = 0
for batch in dataloader:
bm, cn = batch
X = model(bm.to(device),
cn.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size], strategy)
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def train1(model, dataloader, optimizer, criterion, initial, prev_m, device, depth=4):
"""
train model for alpha for one loop over dataloader
"""
epoch_loss = 0
model.train() # set model to train mode
i = 0
for batch in dataloader:
optimizer.zero_grad()
bm, cn, typeVec = batch
X = model(bm.to(device),
cn.to(device),
typeVec.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size], strategy)
loss.backward(retain_graph=True)
optimizer.step()
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def evaluate1(model, dataloader, criterion, initial, prev_m, device, depth=4):
epoch_loss = 0
model.eval() # set model to train mode
i = 0
for batch in dataloader:
bm, cn, typeVec = batch
X = model(bm.to(device),
cn.to(device),
typeVec.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size], strategy)
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def train2(model, dataloader, optimizer, criterion, initial, prev_m, prev_c, device, depth=4):
"""
train model for alpha for one loop over dataloader
"""
epoch_loss = 0
model.train() # set model to train mode
i = 0
for batch in dataloader:
optimizer.zero_grad()
bm, cn, typeVec = batch
X = model(bm.to(device),
cn.to(device),
typeVec.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
prev_c[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
strategy, prev_c[i*dataloader.batch_size:(i+1)*dataloader.batch_size])
loss.backward(retain_graph=True)
optimizer.step()
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def evaluate2(model, dataloader, criterion, initial, prev_m, prev_c, device, depth=4):
epoch_loss = 0
#initialize
#N = prev_m.size()[0] - 1
#m = torch.zeros(N+1, 1, device=device)
#sigs = torch.zeros(signatory.signature_channels(2, depth), device=device)
model.eval() # set model to train mode
i = 0
for batch in dataloader:
bm, cn, typeVec = batch
X = model(bm.to(device),
cn.to(device),
typeVec.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
prev_c[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
strategy, prev_c[i*dataloader.batch_size:(i+1)*dataloader.batch_size])
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def plotErrors(error, target_addr, title, filename):
fig = plt.figure()
plt.title(title)
plt.xlabel("FP rounds")
plt.ylabel("Errors")
plt.plot(error, color='blue')
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotUtil(util, ylim, ytrue, target_addr, title, filename, ins_loc=None, ins_ylim=None, cost=False):
if cost:
n = len(util)
fig, ax = plt.subplots(figsize=(7.5, 6))
if title:
plt.title(title)
if ylim:
ax.set_ylim(ylim)
ax.set_xlabel(r"FP iterations $n$")
ax.set_ylabel("validation cost")
l1 = ax.axhline(ytrue, color="indianred", ls="--")
l2, = ax.plot(util, color='darkcyan', ls="-")
if ins_loc:
axins = ax.inset_axes(ins_loc)
if ins_ylim:
axins.set_ylim(ins_ylim)
axins.plot(range(n-50, n), util[-50:], color='darkcyan', ls="-")
axins.axhline(ytrue, color="indianred", ls="--")
ax.indicate_inset_zoom(axins)
ax.legend((l1, l2), ("true cost", "validation cost"), loc="upper center")
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
else:
n = len(util)
fig, ax = plt.subplots(figsize=(7.5, 6))
if title:
plt.title(title)
if ylim:
ax.set_ylim(ylim)
ax.set_xlabel(r"FP iterations $n$")
ax.set_ylabel("validation utility")
l1 = ax.axhline(ytrue, color="indianred", ls="--")
l2, = ax.plot(util, color='darkcyan', ls="-")
if ins_loc:
axins = ax.inset_axes(ins_loc)
if ins_ylim:
axins.set_ylim(ins_ylim)
axins.plot(range(n-50, n), util[-50:], color='darkcyan', ls="-")
axins.axhline(ytrue, color="indianred", ls="--")
ax.indicate_inset_zoom(axins)
ax.legend((l1, l2), ("true utility", "validation utility"), loc="upper center")
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotMeanDiff_bencmarkvspredicted(data, target_addr, title, filename, ylim=None, label1=None, label2=None, ylabel=None, legendloc=None, round_=False):
fig = plt.figure()
if title:
plt.title(title)
x, next_m, m = data
if ylim:
plt.ylim(ylim)
c = len(next_m)
lines = []
lines_pred = []
for i in range(c):
l, = plt.plot(x, next_m[i], color=colors[i])
lines.append(l)
for i in range(c):
l, = plt.plot(x, m[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
#plt.plot(x, next_m-m, label='diff')
plt.xlabel(r"time $t$")
if ylabel:
plt.ylabel(ylabel)
if legendloc:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc=legendloc, ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
else:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left", ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
if round_:
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def L2distance(x, y):
b, N, _ = x.size()
return ((torch.sum(torch.pow(x - y, 2))/N/b)**0.5).item()
def plotSDE(benchmark, predicted, target_addr, title, filename, ylim=None, label1=None, label2=None, legendloc=None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
fig = plt.figure()
if title:
plt.title(title)
if ylim:
plt.ylim(ylim)
t = [i/100 for i in range(101)]
c = len(benchmark)
lines = []
lines_pred = []
for i in range(c):
l, = plt.plot(t, benchmark[i], color=colors[i], ls='-')
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
if legendloc:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc=legendloc, ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
else:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left", ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
plt.ylabel(r"$X_t$ and $\widehat{X}_t$")
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotC(benchmark, predicted, target_addr, title, filename, label1=None, label2=None, ylabel=None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
t = [i/100 for i in range(100)]
fig = plt.figure()
if title:
plt.title(title)
c = len(benchmark)
lines = []
lines_pred = []
for i in range(c):
l, = plt.plot(t, benchmark[i], color=colors[i], ls='-')
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left",ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
if ylabel:
plt.ylabel(ylabel)
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotpi(benchmark, predicted, target_addr, title, filename, ylim = None, label1=None, label2=None, ylabel=None, legendloc = None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
fig = plt.figure()
if title:
plt.title(title)
if ylim:
plt.ylim(ylim)
t = [i/100 for i in range(100)]
c = len(benchmark)
lines = []
lines_pred = []
for i in range(c):
l, = plt.plot(t, benchmark[i], color=colors[i], ls='-')
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
if legendloc:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc=legendloc, ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
else:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left", ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
if ylabel:
plt.ylabel(ylabel)
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotmC(benchmark, predicted, target_addr, title, filename, label1=None, label2=None, ylabel=None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
N = predicted.shape[1]
t = [1/N*i for i in range(N)]
fig = plt.figure()
if title:
plt.title(title)
c = len(predicted)
lines = []
lines_pred = []
l, = plt.plot(t, benchmark, color='darkgrey', ls='-', linewidth=5)
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left", ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
if ylabel:
plt.ylabel(ylabel)
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
| 33.031325
| 153
| 0.587978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,783
| 0.13007
|
e170d7139c31119e1eb476ae084b331e0ed0a722
| 100
|
py
|
Python
|
lambdata_doinalangille/__init__.py
|
doinalangille/lambdata_doinalangille
|
f57e1f9f87615bc9d1d1cfada530a542ea4551a1
|
[
"MIT"
] | null | null | null |
lambdata_doinalangille/__init__.py
|
doinalangille/lambdata_doinalangille
|
f57e1f9f87615bc9d1d1cfada530a542ea4551a1
|
[
"MIT"
] | 3
|
2020-03-24T18:29:36.000Z
|
2021-02-02T22:42:20.000Z
|
lambdata_doinalangille/__init__.py
|
doinalangille/lambdata_doinalangille
|
f57e1f9f87615bc9d1d1cfada530a542ea4551a1
|
[
"MIT"
] | 1
|
2020-02-11T23:05:07.000Z
|
2020-02-11T23:05:07.000Z
|
"""
lambdata - a collection of Data Science helper functions
"""
import pandas as pd
import sklearn
| 16.666667
| 56
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.64
|
e171d508606a36edd465712b9674cad13c99de99
| 1,554
|
py
|
Python
|
jsConsole/__init__.py
|
Animenosekai/jsConsole
|
e2604988f20a0d0d93578f786ee7beaf72b9afbc
|
[
"MIT"
] | null | null | null |
jsConsole/__init__.py
|
Animenosekai/jsConsole
|
e2604988f20a0d0d93578f786ee7beaf72b9afbc
|
[
"MIT"
] | null | null | null |
jsConsole/__init__.py
|
Animenosekai/jsConsole
|
e2604988f20a0d0d93578f786ee7beaf72b9afbc
|
[
"MIT"
] | null | null | null |
"""
pyJsConsole wrapper.
© Anime no Sekai - 2020
"""
from .internal.javascript import classes as JSClass
console = JSClass._Console()
document = JSClass._Document()
history = JSClass._History()
Math = JSClass._Math()
navigator = JSClass._Navigator()
screen = JSClass._Screen()
window = JSClass._Window()
browser = JSClass.BrowserObject
'''
import threading
from lifeeasy import sleep
def reloadElements():
global document
global window
lastURL = 'data:,'
while True:
sleep(0.1)
try:
if JSClass.evaluate('window.location.href') != lastURL:
document = JSClass._Document()
window = JSClass._Window()
lastURL = JSClass.evaluate('window.location.href')
except:
break
thread = threading.Thread(target=reloadElements)
thread.daemon = True
thread.start()
'''
def newDocument():
return JSClass._Document()
def newWindow():
return JSClass._Window()
def newHistory():
return JSClass._History()
def fresh():
return (JSClass._Document(), JSClass._Window(), JSClass._History())
def clearInterval(intervalID):
JSClass.clearInterval(intervalID)
def clearTimeout(timeoutID):
JSClass.clearTimeout(timeoutID)
def evaluate(code_to_execute, return_value=False):
return JSClass.evaluate(code_to_execute, return_value=return_value)
def setInterval(function, milliseconds):
return JSClass.setInterval(function, milliseconds)
def setTimeout(function, milliseconds):
return JSClass.setTimeout(function, milliseconds)
| 23.19403
| 71
| 0.70592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 582
| 0.374277
|
e1741137c9f22621cbb2d5cd7d5c872d48ea9402
| 45,528
|
py
|
Python
|
grr/client/client_actions/file_finder_test.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
grr/client/client_actions/file_finder_test.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
grr/client/client_actions/file_finder_test.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Tests the client file finder action."""
import collections
import glob
import hashlib
import os
import platform
import shutil
import subprocess
import unittest
import mock
import psutil
import unittest
from grr.client import comms
from grr.client.client_actions import file_finder as client_file_finder
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import file_finder as rdf_file_finder
from grr.lib.rdfvalues import standard as rdf_standard
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
def MyStat(path):
stat_obj = MyStat.old_target(path)
if path.endswith("auth.log"):
res = list(stat_obj)
# Sets atime, ctime, and mtime to some time in 2022.
res[-1] = 1672466423
res[-2] = 1672466423
res[-3] = 1672466423
return os.stat_result(res)
return stat_obj
class FileFinderTest(client_test_lib.EmptyActionTest):
def setUp(self):
super(FileFinderTest, self).setUp()
self.stat_action = rdf_file_finder.FileFinderAction.Stat()
def _GetRelativeResults(self, raw_results, base_path=None):
base_path = base_path or self.base_path
return [
result.stat_entry.pathspec.path[len(base_path) + 1:]
for result in raw_results
]
def _RunFileFinder(self,
paths,
action,
conditions=None,
follow_links=True,
**kw):
return self.RunAction(
client_file_finder.FileFinderOS,
arg=rdf_file_finder.FileFinderArgs(
paths=paths,
action=action,
conditions=conditions,
process_non_regular_files=True,
follow_links=follow_links,
**kw))
def testFileFinder(self):
paths = [self.base_path + "/*"]
results = self._RunFileFinder(paths, self.stat_action)
self.assertEqual(
self._GetRelativeResults(results), os.listdir(self.base_path))
profiles_path = os.path.join(self.base_path, "profiles/v1.0")
paths = [os.path.join(self.base_path, "profiles/v1.0") + "/*"]
results = self._RunFileFinder(paths, self.stat_action)
self.assertEqual(
self._GetRelativeResults(results, base_path=profiles_path),
os.listdir(profiles_path))
def testRecursiveGlob(self):
paths = [self.base_path + "/**3"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b", relative_results)
self.assertIn("a/b/c", relative_results)
self.assertIn("a/b/d", relative_results)
self.assertNotIn("a/b/c/helloc.txt", relative_results)
self.assertNotIn("a/b/d/hellod.txt", relative_results)
paths = [self.base_path + "/**4"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b", relative_results)
self.assertIn("a/b/c", relative_results)
self.assertIn("a/b/d", relative_results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
def testRegexGlob(self):
paths = [self.base_path + "/rekall*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
for glob_result in glob.glob(self.base_path + "/rekall*gz"):
self.assertIn(os.path.basename(glob_result), relative_results)
def testRecursiveRegexGlob(self):
paths = [self.base_path + "/**3/*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("profiles/v1.0/nt/index.gz", relative_results)
self.assertIn("bigquery/ExportedFile.json.gz", relative_results)
for r in relative_results:
self.assertEqual(os.path.splitext(r)[1], ".gz")
paths = [self.base_path + "/**2/*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertNotIn("profiles/v1.0/nt/index.gz", relative_results)
self.assertIn("bigquery/ExportedFile.json.gz", relative_results)
for r in relative_results:
self.assertEqual(os.path.splitext(r)[1], ".gz")
def testDoubleRecursionFails(self):
paths = [self.base_path + "/**/**/test.exe"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
def testInvalidInput(self):
paths = [self.base_path + "/r**z"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
paths = [self.base_path + "/**.exe"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
paths = [self.base_path + "/test**"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
def testGroupings(self):
paths = [self.base_path + "/a/b/{c,d}/hello*"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
paths = [self.base_path + "/a/b/*/hello{c,d}.txt"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
def testFollowLinks(self):
try:
# This sets up a structure as follows:
# tmp_dir/lnk_test/contains_lnk
# tmp_dir/lnk_test/contains_lnk/lnk
# tmp_dir/lnk_test/lnk_target
# tmp_dir/lnk_test/lnk_target/target
# lnk is a symbolic link to lnk_target. A recursive find in
# contains_lnk will find the target iff follow_links is allowed.
test_dir = os.path.join(self.temp_dir, "lnk_test")
contains_lnk = os.path.join(test_dir, "contains_lnk")
lnk = os.path.join(contains_lnk, "lnk")
lnk_target = os.path.join(test_dir, "lnk_target")
lnk_target_contents = os.path.join(lnk_target, "target")
os.mkdir(test_dir)
os.mkdir(contains_lnk)
os.mkdir(lnk_target)
os.symlink(lnk_target, lnk)
with open(lnk_target_contents, "wb") as fd:
fd.write("sometext")
paths = [contains_lnk + "/**"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results, base_path=test_dir)
self.assertIn("contains_lnk/lnk", relative_results)
self.assertIn("contains_lnk/lnk/target", relative_results)
results = self._RunFileFinder(paths, self.stat_action, follow_links=False)
relative_results = self._GetRelativeResults(results, base_path=test_dir)
self.assertIn("contains_lnk/lnk", relative_results)
self.assertNotIn("contains_lnk/lnk/target", relative_results)
finally:
try:
shutil.rmtree(test_dir)
except OSError:
pass
def _PrepareTimestampedFiles(self):
searching_path = os.path.join(self.base_path, "searching")
test_dir = os.path.join(self.temp_dir, "times_test")
os.mkdir(test_dir)
for f in ["dpkg.log", "dpkg_false.log", "auth.log"]:
src = os.path.join(searching_path, f)
dst = os.path.join(test_dir, f)
shutil.copy(src, dst)
return test_dir
def RunAndCheck(self,
paths,
action=None,
conditions=None,
expected=None,
unexpected=None,
base_path=None,
**kw):
action = action or self.stat_action
raw_results = self._RunFileFinder(
paths, action, conditions=conditions, **kw)
relative_results = self._GetRelativeResults(
raw_results, base_path=base_path)
for f in unexpected:
self.assertNotIn(f, relative_results)
for f in expected:
self.assertIn(f, relative_results)
def testLiteralMatchCondition(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
literal = "pam_unix(ssh:session)"
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal, bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
relative_results = self._GetRelativeResults(
raw_results, base_path=searching_path)
self.assertEqual(len(relative_results), 1)
self.assertIn("auth.log", relative_results)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
orig_data = open(os.path.join(searching_path, "auth.log")).read()
self.assertEqual(
len(buffer_ref.data), bytes_before + len(literal) + bytes_after)
self.assertEqual(
orig_data[buffer_ref.offset:buffer_ref.offset + buffer_ref.length],
buffer_ref.data)
def testLiteralMatchConditionAllHits(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
literal = "mydomain.com"
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 6)
for buffer_ref in raw_results[0].matches:
self.assertEqual(
buffer_ref.data[bytes_before:bytes_before + len(literal)], literal)
def testLiteralMatchConditionLargeFile(self):
paths = [os.path.join(self.base_path, "new_places.sqlite")]
literal = "RecentlyBookmarked"
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
with open(paths[0], "rb") as fd:
fd.seek(buffer_ref.offset)
self.assertEqual(buffer_ref.data, fd.read(buffer_ref.length))
self.assertEqual(
buffer_ref.data[bytes_before:bytes_before + len(literal)], literal)
def testRegexMatchCondition(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
regex = r"pa[nm]_o?unix\(s{2}h"
bytes_before = 10
bytes_after = 20
crmc = rdf_file_finder.FileFinderContentsRegexMatchCondition
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_REGEX_MATCH",
contents_regex_match=crmc(
regex=regex,
bytes_before=bytes_before,
bytes_after=bytes_after,
))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
relative_results = self._GetRelativeResults(
raw_results, base_path=searching_path)
self.assertEqual(len(relative_results), 1)
self.assertIn("auth.log", relative_results)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
orig_data = open(os.path.join(searching_path, "auth.log")).read()
self.assertEqual(
orig_data[buffer_ref.offset:buffer_ref.offset + buffer_ref.length],
buffer_ref.data)
def testRegexMatchConditionAllHits(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
bytes_before = 10
bytes_after = 20
crmc = rdf_file_finder.FileFinderContentsRegexMatchCondition
regex = r"mydo....\.com"
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_REGEX_MATCH",
contents_regex_match=crmc(
regex=regex,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after,
))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 6)
for buffer_ref in raw_results[0].matches:
needle = "mydomain.com"
self.assertEqual(buffer_ref.data[bytes_before:bytes_before + len(needle)],
needle)
def testHashAction(self):
paths = [os.path.join(self.base_path, "hello.exe")]
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH)
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
data = open(paths[0], "rb").read()
self.assertEqual(res.hash_entry.num_bytes, len(data))
self.assertEqual(res.hash_entry.md5.HexDigest(),
hashlib.md5(data).hexdigest())
self.assertEqual(res.hash_entry.sha1.HexDigest(),
hashlib.sha1(data).hexdigest())
self.assertEqual(res.hash_entry.sha256.HexDigest(),
hashlib.sha256(data).hexdigest())
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH,
hash=rdf_file_finder.FileFinderHashActionOptions(
max_size=100, oversized_file_policy="SKIP"))
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
self.assertFalse(res.HasField("hash"))
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH,
hash=rdf_file_finder.FileFinderHashActionOptions(
max_size=100, oversized_file_policy="HASH_TRUNCATED"))
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
data = open(paths[0], "rb").read()[:100]
self.assertEqual(res.hash_entry.num_bytes, len(data))
self.assertEqual(res.hash_entry.md5.HexDigest(),
hashlib.md5(data).hexdigest())
self.assertEqual(res.hash_entry.sha1.HexDigest(),
hashlib.sha1(data).hexdigest())
self.assertEqual(res.hash_entry.sha256.HexDigest(),
hashlib.sha256(data).hexdigest())
def _RunFileFinderDownloadHello(self, upload, opts=None):
action = rdf_file_finder.FileFinderAction.Download()
action.download = opts
upload.return_value = rdf_client.UploadedFile(
bytes_uploaded=42, file_id="foo", hash=rdf_crypto.Hash())
hello_path = os.path.join(self.base_path, "hello.exe")
return self._RunFileFinder([hello_path], action)
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionDefault(self, upload):
results = self._RunFileFinderDownloadHello(upload)
self.assertEquals(len(results), 1)
self.assertTrue(upload.called_with(max_bytes=None))
self.assertTrue(results[0].HasField("uploaded_file"))
self.assertEquals(results[0].uploaded_file, upload.return_value)
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionSkip(self, upload):
opts = rdf_file_finder.FileFinderDownloadActionOptions(
max_size=0, oversized_file_policy="SKIP")
results = self._RunFileFinderDownloadHello(upload, opts=opts)
self.assertEquals(len(results), 1)
self.assertFalse(upload.called)
self.assertFalse(results[0].HasField("uploaded_file"))
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionTruncate(self, upload):
opts = rdf_file_finder.FileFinderDownloadActionOptions(
max_size=42, oversized_file_policy="DOWNLOAD_TRUNCATED")
results = self._RunFileFinderDownloadHello(upload, opts=opts)
self.assertEquals(len(results), 1)
self.assertTrue(upload.called_with(max_bytes=42))
self.assertTrue(results[0].HasField("uploaded_file"))
self.assertEquals(results[0].uploaded_file, upload.return_value)
EXT2_COMPR_FL = 0x00000004
EXT2_IMMUTABLE_FL = 0x00000010
# TODO(hanuszczak): Maybe it would make sense to refactor this to a helper
# constructor of the `rdf_file_finder.FileFinderAction`.
@staticmethod
def _StatAction(**kwargs):
action_type = rdf_file_finder.FileFinderAction.Action.STAT
opts = rdf_file_finder.FileFinderStatActionOptions(**kwargs)
return rdf_file_finder.FileFinderAction(action_type=action_type, stat=opts)
@unittest.skipIf(platform.system() != "Linux", "requires Linux")
def testStatExtFlags(self):
with test_lib.AutoTempFilePath() as temp_filepath:
if subprocess.call(["which", "chattr"]) != 0:
raise unittest.SkipTest("`chattr` command is not available")
if subprocess.call(["chattr", "+c", temp_filepath]) != 0:
reason = "extended attributes not supported by filesystem"
raise unittest.SkipTest(reason)
action = self._StatAction()
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
stat_entry = results[0].stat_entry
self.assertTrue(stat_entry.st_flags_linux & self.EXT2_COMPR_FL)
self.assertFalse(stat_entry.st_flags_linux & self.EXT2_IMMUTABLE_FL)
def testStatExtAttrs(self):
with test_lib.AutoTempFilePath() as temp_filepath:
self._SetExtAttr(temp_filepath, "user.foo", "bar")
self._SetExtAttr(temp_filepath, "user.quux", "norf")
action = self._StatAction()
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
ext_attrs = results[0].stat_entry.ext_attrs
self.assertEqual(ext_attrs[0].name, "user.foo")
self.assertEqual(ext_attrs[0].value, "bar")
self.assertEqual(ext_attrs[1].name, "user.quux")
self.assertEqual(ext_attrs[1].value, "norf")
action = self._StatAction(ext_attrs=False)
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
ext_attrs = results[0].stat_entry.ext_attrs
self.assertFalse(ext_attrs)
@classmethod
def _SetExtAttr(cls, filepath, name, value):
if platform.system() == "Linux":
cls._SetExtAttrLinux(filepath, name, value)
elif platform.system() == "Darwin":
cls._SetExtAttrOsx(filepath, name, value)
else:
raise unittest.SkipTest("unsupported system")
@classmethod
def _SetExtAttrLinux(cls, filepath, name, value):
if subprocess.call(["which", "setfattr"]) != 0:
raise unittest.SkipTest("`setfattr` command is not available")
if subprocess.call(["setfattr", filepath, "-n", name, "-v", value]) != 0:
raise unittest.SkipTest("extended attributes not supported by filesystem")
@classmethod
def _SetExtAttrOsx(cls, filepath, name, value):
if subprocess.call(["xattr", "-w", name, value, filepath]) != 0:
raise unittest.SkipTest("extended attributes not supported")
def testLinkStat(self):
"""Tests resolving symlinks when getting stat entries."""
test_dir = os.path.join(self.temp_dir, "lnk_stat_test")
lnk = os.path.join(test_dir, "lnk")
lnk_target = os.path.join(test_dir, "lnk_target")
os.mkdir(test_dir)
with open(lnk_target, "wb") as fd:
fd.write("sometext")
os.symlink(lnk_target, lnk)
paths = [lnk]
link_size = os.lstat(lnk).st_size
target_size = os.stat(lnk).st_size
for expected_size, resolve_links in [(link_size, False), (target_size,
True)]:
stat_action = rdf_file_finder.FileFinderAction.Stat(
resolve_links=resolve_links)
results = self._RunFileFinder(paths, stat_action)
self.assertEqual(len(results), 1)
res = results[0]
self.assertEqual(res.stat_entry.st_size, expected_size)
def testModificationTimeCondition(self):
with utils.Stubber(os, "lstat", MyStat):
test_dir = self._PrepareTimestampedFiles()
# We have one "old" file, auth.log, and two "new" ones, dpkg*.
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
change_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
modification_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="MODIFICATION_TIME",
modification_time=rdf_file_finder.FileFinderModificationTimeCondition(
max_last_modified_time=change_time))
self.RunAndCheck(
paths,
conditions=[modification_time_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
# Now just the file from 2022.
modification_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="MODIFICATION_TIME",
modification_time=rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=change_time))
self.RunAndCheck(
paths,
conditions=[modification_time_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
def testAccessTimeCondition(self):
with utils.Stubber(os, "lstat", MyStat):
test_dir = self._PrepareTimestampedFiles()
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
change_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
# Check we can get the normal files.
access_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="ACCESS_TIME",
access_time=rdf_file_finder.FileFinderAccessTimeCondition(
max_last_access_time=change_time))
self.RunAndCheck(
paths,
conditions=[access_time_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
# Now just the file from 2022.
access_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="ACCESS_TIME",
access_time=rdf_file_finder.FileFinderAccessTimeCondition(
min_last_access_time=change_time))
self.RunAndCheck(
paths,
conditions=[access_time_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
def testInodeChangeTimeCondition(self):
with utils.Stubber(os, "lstat", MyStat):
test_dir = self._PrepareTimestampedFiles()
# We have one "old" file, auth.log, and two "new" ones, dpkg*.
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
# Check we can get the auth log only (huge ctime).
change_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
ichange_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="INODE_CHANGE_TIME",
inode_change_time=rdf_file_finder.FileFinderInodeChangeTimeCondition(
min_last_inode_change_time=change_time))
self.RunAndCheck(
paths,
conditions=[ichange_time_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
# Now just the others.
ichange_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="INODE_CHANGE_TIME",
inode_change_time=rdf_file_finder.FileFinderInodeChangeTimeCondition(
max_last_inode_change_time=change_time))
self.RunAndCheck(
paths,
conditions=[ichange_time_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
def testSizeCondition(self):
test_dir = self._PrepareTimestampedFiles()
# We have one "old" file, auth.log, and two "new" ones, dpkg*.
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
# Auth.log is 770 bytes, the other two ~620 each.
size_condition = rdf_file_finder.FileFinderCondition(
condition_type="SIZE",
size=rdf_file_finder.FileFinderSizeCondition(min_file_size=700))
self.RunAndCheck(
paths,
conditions=[size_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
size_condition = rdf_file_finder.FileFinderCondition(
condition_type="SIZE",
size=rdf_file_finder.FileFinderSizeCondition(max_file_size=700))
self.RunAndCheck(
paths,
conditions=[size_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
def testXDEV(self):
test_dir = os.path.join(self.temp_dir, "xdev_test")
local_dev_dir = os.path.join(test_dir, "local_dev")
net_dev_dir = os.path.join(test_dir, "net_dev")
os.mkdir(test_dir)
os.mkdir(local_dev_dir)
os.mkdir(net_dev_dir)
local_file = os.path.join(local_dev_dir, "local_file")
net_file = os.path.join(net_dev_dir, "net_file")
with open(local_file, "wb") as fd:
fd.write("local_data")
with open(net_file, "wb") as fd:
fd.write("net_data")
all_mountpoints = [local_dev_dir, net_dev_dir, "/some/other/dir"]
local_mountpoints = [local_dev_dir]
def MyDiskPartitions(all=False): # pylint: disable=redefined-builtin
mp = collections.namedtuple("MountPoint", ["mountpoint"])
if all:
return [mp(mountpoint=m) for m in all_mountpoints]
else:
return [mp(mountpoint=m) for m in local_mountpoints]
with utils.Stubber(psutil, "disk_partitions", MyDiskPartitions):
paths = [test_dir + "/**5"]
self.RunAndCheck(
paths,
expected=[
"local_dev", "local_dev/local_file", "net_dev", "net_dev/net_file"
],
unexpected=[],
base_path=test_dir,
xdev="ALWAYS")
self.RunAndCheck(
paths,
expected=["local_dev", "local_dev/local_file", "net_dev"],
unexpected=["net_dev/net_file"],
base_path=test_dir,
xdev="LOCAL")
self.RunAndCheck(
paths,
expected=["local_dev", "net_dev"],
unexpected=["local_dev/local_file", "net_dev/net_file"],
base_path=test_dir,
xdev="NEVER")
class RegexMatcherTest(unittest.TestCase):
@staticmethod
def _RegexMatcher(string):
regex = rdf_standard.RegularExpression(string)
return client_file_finder.RegexMatcher(regex)
def testMatchLiteral(self):
matcher = self._RegexMatcher("foo")
span = matcher.Match("foobar", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 3)
span = matcher.Match("foobarfoobar", 2)
self.assertTrue(span)
self.assertEqual(span.begin, 6)
self.assertEqual(span.end, 9)
def testNoMatchLiteral(self):
matcher = self._RegexMatcher("baz")
span = matcher.Match("foobar", 0)
self.assertFalse(span)
span = matcher.Match("foobazbar", 5)
self.assertFalse(span)
def testMatchWildcard(self):
matcher = self._RegexMatcher("foo.*bar")
span = matcher.Match("foobar", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 6)
span = matcher.Match("quuxfoobazbarnorf", 2)
self.assertTrue(span)
self.assertEqual(span.begin, 4)
self.assertEqual(span.end, 13)
def testMatchRepeated(self):
matcher = self._RegexMatcher("qu+x")
span = matcher.Match("quuuux", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 6)
span = matcher.Match("qx", 0)
self.assertFalse(span)
span = matcher.Match("qvvvvx", 0)
self.assertFalse(span)
class LiteralMatcherTest(unittest.TestCase):
def testMatchLiteral(self):
matcher = client_file_finder.LiteralMatcher("bar")
span = matcher.Match("foobarbaz", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 3)
self.assertEqual(span.end, 6)
span = matcher.Match("barbarbar", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 3)
span = matcher.Match("barbarbar", 4)
self.assertTrue(span)
self.assertEqual(span.begin, 6)
self.assertEqual(span.end, 9)
def testNoMatchLiteral(self):
matcher = client_file_finder.LiteralMatcher("norf")
span = matcher.Match("quux", 0)
self.assertFalse(span)
span = matcher.Match("norf", 2)
self.assertFalse(span)
span = matcher.Match("quuxnorf", 5)
self.assertFalse(span)
class ConditionTestMixin(object):
def setUp(self):
super(ConditionTestMixin, self).setUp()
self.temp_filepath = test_lib.TempFilePath()
def tearDown(self):
super(ConditionTestMixin, self).tearDown()
os.remove(self.temp_filepath)
@unittest.skipIf(platform.system() == "Windows", "requires Unix-like system")
class MetadataConditionTestMixin(ConditionTestMixin):
def Stat(self):
return utils.Stat(self.temp_filepath, follow_symlink=False)
def Touch(self, mode, date):
self.assertIn(mode, ["-m", "-a"])
result = subprocess.call(["touch", mode, "-t", date, self.temp_filepath])
# Sanity check in case something is wrong with the test.
self.assertEqual(result, 0)
class ModificationTimeConditionTest(MetadataConditionTestMixin,
unittest.TestCase):
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.ModificationTimeCondition(params)
self.Touch("-m", "198309121200") # 1983-09-12 12:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-m", "201710020815") # 2017-10-02 8:15
self.assertTrue(condition.Check(self.Stat()))
def testMinTime(self):
time = rdfvalue.RDFDatetime.FromHumanReadable("2017-12-24 19:00:00")
params = rdf_file_finder.FileFinderCondition()
params.modification_time.min_last_modified_time = time
condition = client_file_finder.ModificationTimeCondition(params)
self.Touch("-m", "201712240100") # 2017-12-24 1:30
self.assertFalse(condition.Check(self.Stat()))
self.Touch("-m", "201806141700") # 2018-06-14 17:00
self.assertTrue(condition.Check(self.Stat()))
def testMaxTime(self):
time = rdfvalue.RDFDatetime.FromHumanReadable("2125-12-28 18:45")
params = rdf_file_finder.FileFinderCondition()
params.modification_time.max_last_modified_time = time
condition = client_file_finder.ModificationTimeCondition(params)
self.Touch("-m", "211811111200") # 2118-11-11 12:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-m", "222510201500") # 2225-10-20 15:00
self.assertFalse(condition.Check(self.Stat()))
class AccessTimeConditionTest(MetadataConditionTestMixin, unittest.TestCase):
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.AccessTimeCondition(params)
self.Touch("-a", "241007151200") # 2410-07-15 12:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-a", "201005160745") # 2010-05-16 7:45
self.assertTrue(condition.Check(self.Stat()))
def testRange(self):
min_time = rdfvalue.RDFDatetime.FromHumanReadable("2156-01-27")
max_time = rdfvalue.RDFDatetime.FromHumanReadable("2191-12-05")
params = rdf_file_finder.FileFinderCondition()
params.access_time.min_last_access_time = min_time
params.access_time.max_last_access_time = max_time
condition = client_file_finder.AccessTimeCondition(params)
self.Touch("-a", "215007280000") # 2150-07-28 0:00
self.assertFalse(condition.Check(self.Stat()))
self.Touch("-a", "219101010000") # 2191-01-01 0:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-a", "221003010000") # 2210-03-01 0:00
self.assertFalse(condition.Check(self.Stat()))
class SizeConditionTest(MetadataConditionTestMixin, unittest.TestCase):
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.SizeCondition(params)
with open(self.temp_filepath, "wb") as fd:
fd.write("1234567")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("")
self.assertTrue(condition.Check(self.Stat()))
def testRange(self):
params = rdf_file_finder.FileFinderCondition()
params.size.min_file_size = 2
params.size.max_file_size = 6
condition = client_file_finder.SizeCondition(params)
with open(self.temp_filepath, "wb") as fd:
fd.write("1")
self.assertFalse(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("12")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("1234")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("123456")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("1234567")
self.assertFalse(condition.Check(self.Stat()))
class ExtFlagsConditionTest(MetadataConditionTestMixin, unittest.TestCase):
# https://github.com/apple/darwin-xnu/blob/master/bsd/sys/stat.h
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_HIDDEN = 0x00008000
# https://github.com/torvalds/linux/blob/master/include/uapi/linux/fs.h
FS_COMPR_FL = 0x00000004
FS_IMMUTABLE_FL = 0x00000010
FS_NODUMP_FL = 0x00000040
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.ExtFlagsCondition(params)
self.assertTrue(condition.Check(self.Stat()))
def testNoMatchOsxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_set = self.UF_IMMUTABLE | self.UF_NODUMP
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["nodump"])
self.assertFalse(condition.Check(self.Stat()))
def testNoMatchOsxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_unset = self.UF_NODUMP | self.UF_HIDDEN
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["hidden"])
self.assertFalse(condition.Check(self.Stat()))
def testNoMatchLinuxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_set = self.FS_IMMUTABLE_FL
condition = client_file_finder.ExtFlagsCondition(params)
self.assertFalse(condition.Check(self.Stat()))
def testNoMatchLinuxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_unset = self.FS_COMPR_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+c", "+d"])
self.assertFalse(condition.Check(self.Stat()))
def testMatchOsxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_set = self.UF_NODUMP | self.UF_HIDDEN
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["nodump", "hidden", "uappend"])
try:
self.assertTrue(condition.Check(self.Stat()))
finally:
# Make the test file deletable.
self._Chflags(["nouappend"])
def testMatchLinuxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_set = self.FS_COMPR_FL | self.FS_NODUMP_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+c", "+d"])
self.assertTrue(condition.Check(self.Stat()))
def testMatchOsxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_unset = self.UF_NODUMP | self.UF_IMMUTABLE
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["hidden", "uappend"])
try:
self.assertTrue(condition.Check(self.Stat()))
finally:
# Make the test file deletable.
self._Chflags(["nouappend"])
def testMatchLinuxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_unset = self.FS_IMMUTABLE_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+c", "+d"])
self.assertTrue(condition.Check(self.Stat()))
def testMatchOsxBitsMixed(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_set = self.UF_NODUMP
params.ext_flags.osx_bits_unset = self.UF_HIDDEN
params.ext_flags.linux_bits_unset = self.FS_NODUMP_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["nodump", "uappend"])
try:
self.assertTrue(condition.Check(self.Stat()))
finally:
# Make the test file deletable.
self._Chflags(["nouappend"])
def testMatchLinuxBitsMixed(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_set = self.FS_NODUMP_FL
params.ext_flags.linux_bits_unset = self.FS_COMPR_FL
params.ext_flags.osx_bits_unset = self.UF_IMMUTABLE
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+d"])
self.assertTrue(condition.Check(self.Stat()))
def _Chattr(self, args):
if platform.system() != "Linux":
raise unittest.SkipTest("requires Linux")
if subprocess.call(["which", "chattr"]) != 0:
raise unittest.SkipTest("the `chattr` command is not available")
if subprocess.call(["chattr"] + args + [self.temp_filepath]) != 0:
reason = "extended attributes are not supported by filesystem"
raise unittest.SkipTest(reason)
def _Chflags(self, args):
if platform.system() != "Darwin":
raise unittest.SkipTest("requires macOS")
subprocess.check_call(["chflags", ",".join(args), self.temp_filepath])
# TODO(hanuszczak): Write tests for the metadata change condition.
class LiteralMatchConditionTest(ConditionTestMixin, unittest.TestCase):
def testNoHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo bar quux")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "baz"
params.contents_literal_match.mode = "ALL_HITS"
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertFalse(results)
def testSomeHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo bar foo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "foo"
params.contents_literal_match.mode = "ALL_HITS"
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].data, "foo")
self.assertEqual(results[0].offset, 0)
self.assertEqual(results[0].length, 3)
self.assertEqual(results[1].data, "foo")
self.assertEqual(results[1].offset, 8)
self.assertEqual(results[1].length, 3)
def testFirstHit(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("bar foo baz foo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "foo"
params.contents_literal_match.mode = "FIRST_HIT"
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].data, "foo")
self.assertEqual(results[0].offset, 4)
self.assertEqual(results[0].length, 3)
def testContext(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo foo foo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "foo"
params.contents_literal_match.mode = "ALL_HITS"
params.contents_literal_match.bytes_before = 3
params.contents_literal_match.bytes_after = 2
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 3)
self.assertEqual(results[0].data, "foo f")
self.assertEqual(results[0].offset, 0)
self.assertEqual(results[0].length, 5)
self.assertEqual(results[1].data, "oo foo f")
self.assertEqual(results[1].offset, 1)
self.assertEqual(results[1].length, 8)
self.assertEqual(results[2].data, "oo foo")
self.assertEqual(results[2].offset, 5)
self.assertEqual(results[2].length, 6)
def testStartOffset(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("oooooooo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "ooo"
params.contents_literal_match.mode = "ALL_HITS"
params.contents_literal_match.start_offset = 2
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].data, "ooo")
self.assertEqual(results[0].offset, 2)
self.assertEqual(results[0].length, 3)
self.assertEqual(results[1].data, "ooo")
self.assertEqual(results[1].offset, 5)
self.assertEqual(results[1].length, 3)
class RegexMatchCondition(ConditionTestMixin, unittest.TestCase):
def testNoHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo bar quux")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "\\d+"
params.contents_regex_match.mode = "FIRST_HIT"
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertFalse(results)
def testSomeHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo 7 bar 49 baz343")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "\\d+"
params.contents_regex_match.mode = "ALL_HITS"
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 3)
self.assertEqual(results[0].data, "7")
self.assertEqual(results[0].offset, 4)
self.assertEqual(results[0].length, 1)
self.assertEqual(results[1].data, "49")
self.assertEqual(results[1].offset, 10)
self.assertEqual(results[1].length, 2)
self.assertEqual(results[2].data, "343")
self.assertEqual(results[2].offset, 16)
self.assertEqual(results[2].length, 3)
def testFirstHit(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("4 8 15 16 23 42 foo 108 bar")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "[a-z]+"
params.contents_regex_match.mode = "FIRST_HIT"
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].data, "foo")
self.assertEqual(results[0].offset, 16)
self.assertEqual(results[0].length, 3)
def testContext(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foobarbazbaaarquux")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "ba+r"
params.contents_regex_match.mode = "ALL_HITS"
params.contents_regex_match.bytes_before = 3
params.contents_regex_match.bytes_after = 4
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].data, "foobarbazb")
self.assertEqual(results[0].offset, 0)
self.assertEqual(results[0].length, 10)
self.assertEqual(results[1].data, "bazbaaarquux")
self.assertEqual(results[1].offset, 6)
self.assertEqual(results[1].length, 12)
def testStartOffset(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("ooooooo")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "o+"
params.contents_regex_match.mode = "FIRST_HIT"
params.contents_regex_match.start_offset = 3
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].data, "oooo")
self.assertEqual(results[0].offset, 3)
self.assertEqual(results[0].length, 4)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 35.708235
| 80
| 0.698405
| 44,255
| 0.972039
| 0
| 0
| 3,754
| 0.082455
| 0
| 0
| 5,712
| 0.125461
|
e174818f6b393a98ed554aec714f2c139a01e0c8
| 1,624
|
py
|
Python
|
lesson13/sunzhaohui/reboot/deploy/models.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
lesson13/sunzhaohui/reboot/deploy/models.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
lesson13/sunzhaohui/reboot/deploy/models.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
from django.db import models
from users.models import UserProfile
class Deploy(models.Model):
STATUS = (
(0, '申请'),
(1, '审核'),
(2, '上线'),
(3, '已取消'),
(4, '已上线'),
(5,'失败')
)
name = models.CharField(max_length=40, verbose_name='项目名称')
version = models.CharField(max_length=40, verbose_name='上线版本')
version_desc = models.CharField(max_length=100, verbose_name='版本描述')
applicant = models.ForeignKey(UserProfile, verbose_name='申请人', on_delete=models.CASCADE,
related_name="applicant")
reviewer = models.ForeignKey(UserProfile, verbose_name='审核人', on_delete=models.CASCADE,blank=True, null=True,
related_name="reviewer")
handler = models.ForeignKey(UserProfile, verbose_name='最终处理人', blank=True, null=True,
on_delete=models.CASCADE, related_name='handler')
update_detail = models.TextField(verbose_name='更新详情')
status = models.IntegerField(default=0, choices=STATUS, verbose_name='上线状态')
apply_time = models.DateTimeField(auto_now_add=True, verbose_name='申请时间')
review_time = models.DateTimeField(auto_now=False, verbose_name='审核时间',null=True)
deploy_time = models.DateTimeField(auto_now=False, verbose_name='上线时间',null=True)
end_time = models.DateTimeField(auto_now=False, verbose_name='结束时间',null=True)
build_serial = models.IntegerField(verbose_name='构建序号',default=0,null=True)
build_url = models.CharField(max_length=100,verbose_name='构建链接',null=True)
| 40.6
| 113
| 0.67734
| 1,631
| 0.925653
| 0
| 0
| 0
| 0
| 0
| 0
| 303
| 0.171964
|
e177afe5c4e52b6ea7d71deed0bddae35b953491
| 83
|
py
|
Python
|
config.py
|
zombodotcom/twitchUserData
|
50c702b832515a946d55c2f5ca79b51436352ef2
|
[
"MIT",
"Unlicense"
] | 1
|
2019-10-22T06:23:56.000Z
|
2019-10-22T06:23:56.000Z
|
config.py
|
zombodotcom/twitchUserData
|
50c702b832515a946d55c2f5ca79b51436352ef2
|
[
"MIT",
"Unlicense"
] | null | null | null |
config.py
|
zombodotcom/twitchUserData
|
50c702b832515a946d55c2f5ca79b51436352ef2
|
[
"MIT",
"Unlicense"
] | null | null | null |
Client_ID = "<Your Client ID>"
Authorization = "Bearer <Insert Bearer token Here>"
| 41.5
| 51
| 0.73494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.638554
|
e17842bac608c397e2ccd355daad8b350e2c2102
| 1,900
|
py
|
Python
|
echo_client.py
|
gauravssnl/Python3-Network-Programming
|
32bb5a4872bce60219c6387b6f3c2e7f31b0654a
|
[
"MIT"
] | 4
|
2017-12-04T15:05:35.000Z
|
2021-03-24T11:53:39.000Z
|
echo_client.py
|
gauravssnl/Python3-Network-Programming
|
32bb5a4872bce60219c6387b6f3c2e7f31b0654a
|
[
"MIT"
] | null | null | null |
echo_client.py
|
gauravssnl/Python3-Network-Programming
|
32bb5a4872bce60219c6387b6f3c2e7f31b0654a
|
[
"MIT"
] | null | null | null |
import socket
host = 'localhost'
# we need to define encode function for converting string to bytes string
# this will be use for sending/receiving data via socket
encode = lambda text: text.encode()
# we need to define deocde function for converting bytes string to string
# this will convert bytes string sent/recieved via socket to string
decode = lambda byte_text: byte_text.decode()
def echo_client(port, message="Hello"):
# create a TCP socket
sock = socket.socket()
server_address = (host, port)
# connect to server
print("Connecting to server ")
sock.connect(server_address)
# send data
try:
# send message
print("Sending data: {}".format(message))
# sendall need bytes string ,so we need to use encode to convert plain
# string to bytes string
sock.sendall(encode(message))
# Look for response
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
data = sock.recv(16)
amount_received += len(data)
print("Recieved from server: {}".format(decode(data)))
except socket.error as e:
print("socket error: {}".format(e))
except Exception as e:
print("other exception: {}".format(e))
finally:
print("Closing connection to server")
sock.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Simple TCP echo client')
parser.add_argument("--port", action="store",
dest="port", type=int, required=True)
parser.add_argument("--message", action="store",
dest="message", required=False)
get_args = parser.parse_args()
port = get_args.port
message = get_args.message
if message:
echo_client(port, message)
else:
echo_client(port)
| 31.666667
| 79
| 0.644211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 685
| 0.360526
|
e179f0630567fb54dca2d83ae47722317d2637db
| 2,247
|
py
|
Python
|
my_collection/paxos/proposer.py
|
khanh-nguyen-code/my-collection
|
31581ef0b1dae67aafb1f4e64b9973a38cc01edf
|
[
"MIT"
] | null | null | null |
my_collection/paxos/proposer.py
|
khanh-nguyen-code/my-collection
|
31581ef0b1dae67aafb1f4e64b9973a38cc01edf
|
[
"MIT"
] | null | null | null |
my_collection/paxos/proposer.py
|
khanh-nguyen-code/my-collection
|
31581ef0b1dae67aafb1f4e64b9973a38cc01edf
|
[
"MIT"
] | null | null | null |
from typing import Optional
from my_collection.paxos.common import NodeId, Router, ProposalId, Value, PrepareRequest, is_majority, PrepareResponse, \
Proposal, ProposeRequest, ProposeResponse, CODE_OK
class Proposer:
node_id: NodeId
acceptor_id_list: list[NodeId]
router: Router
current_proposal_id: ProposalId # init {0, node_id}
def __init__(self, node_id: NodeId, acceptor_id_list: list[NodeId], router: Router):
self.node_id = node_id
self.acceptor_id_list = acceptor_id_list
self.router = router
self.current_proposal_id = ProposalId(id=0, node_id=node_id)
async def propose_once(self, value: Value) -> Optional[Value]:
proposal_id = self.current_proposal_id
self.current_proposal_id.id += 1
request = PrepareRequest(proposal_id=proposal_id)
response_list: list[PrepareResponse] = [
await self.router(acceptor_id, request, PrepareResponse.parse_obj)
for acceptor_id in self.acceptor_id_list
]
response_list: list[PrepareResponse] = [
response
for response in response_list
if response is not None and response.code == CODE_OK
]
if not is_majority(len(self.acceptor_id_list), len(response_list)):
return None
accepted_proposal_list = [
response.proposal
for response in response_list
if response.proposal is not None
]
if len(accepted_proposal_list) > 0:
proposal = max(accepted_proposal_list, key=lambda x: x.id)
else:
proposal = Proposal(id=proposal_id, value=value)
request = ProposeRequest(proposal=proposal)
response_list: list[ProposeResponse] = [
await self.router(acceptor_id, request, ProposeResponse.parse_obj)
for acceptor_id in self.acceptor_id_list
]
response_list: list[ProposeResponse] = [
response
for response in response_list
if response is not None and response.code == CODE_OK
]
if not is_majority(len(self.acceptor_id_list), len(response_list)):
return None
return response_list[0].proposal.value
| 38.084746
| 121
| 0.659546
| 2,038
| 0.906987
| 0
| 0
| 0
| 0
| 1,618
| 0.720071
| 19
| 0.008456
|
e17a77153a0967bee562363294a90df123d695b6
| 6,551
|
py
|
Python
|
.leetcode/749.contain-virus.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/749.contain-virus.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/749.contain-virus.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
# @lc app=leetcode id=749 lang=python3
#
# [749] Contain Virus
#
# https://leetcode.com/problems/contain-virus/description/
#
# algorithms
# Hard (49.14%)
# Likes: 190
# Dislikes: 349
# Total Accepted: 7.5K
# Total Submissions: 15.2K
# Testcase Example: '[[0,1,0,0,0,0,0,1],[0,1,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0]]'
#
# A virus is spreading rapidly, and your task is to quarantine the infected
# area by installing walls.
#
# The world is modeled as an m x n binary grid isInfected, where
# isInfected[i][j] == 0 represents uninfected cells, and isInfected[i][j] == 1
# represents cells contaminated with the virus. A wall (and only one wall) can
# be installed between any two 4-directionally adjacent cells, on the shared
# boundary.
#
# Every night, the virus spreads to all neighboring cells in all four
# directions unless blocked by a wall. Resources are limited. Each day, you can
# install walls around only one region (i.e., the affected area (continuous
# block of infected cells) that threatens the most uninfected cells the
# following night). There will never be a tie.
#
# Return the number of walls used to quarantine all the infected regions. If
# the world will become fully infected, return the number of walls used.
#
#
# Example 1:
#
#
# Input: isInfected =
# [[0,1,0,0,0,0,0,1],[0,1,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0]]
# Output: 10
# Explanation: There are 2 contaminated regions.
# On the first day, add 5 walls to quarantine the viral region on the left. The
# board after the virus spreads is:
#
# On the second day, add 5 walls to quarantine the viral region on the right.
# The virus is fully contained.
#
#
#
# Example 2:
#
#
# Input: isInfected = [[1,1,1],[1,0,1],[1,1,1]]
# Output: 4
# Explanation: Even though there is only one cell saved, there are 4 walls
# built.
# Notice that walls are only built on the shared boundary of two different
# cells.
#
#
# Example 3:
#
#
# Input: isInfected =
# [[1,1,1,0,0,0,0,0,0],[1,0,1,0,1,1,1,1,1],[1,1,1,0,0,0,0,0,0]]
# Output: 13
# Explanation: The region on the left only builds two new walls.
#
#
#
# Constraints:
#
#
# m == isInfected.length
# n == isInfected[i].length
# 1 <= m, n <= 50
# isInfected[i][j] is either 0 or 1.
# There is always a contiguous viral region throughout the described process
# that will infect strictly more uncontaminated squares in the next round.
#
#
#
# @lc tags=hash-table
# @lc imports=start
from typing_extensions import get_args
from imports import *
# @lc imports=end
# @lc idea=start
#
# 用墙阻止感染。每次围上传播最多的区块。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Area:
def __init__(self,
wallCount=0,
contaminatedArea=None,
uninfectedArea=None) -> None:
self.wallCount = wallCount
self.contaminatedArea = contaminatedArea if contaminatedArea is not None else []
self.uninfectedArea = uninfectedArea if uninfectedArea is not None else set(
)
pass
class Solution:
def containVirus(self, isInfected: List[List[int]]) -> int:
rows, cols = len(isInfected), len(isInfected[0])
dires = [(0, 1), (0, -1), (-1, 0), (1, 0)]
def generateAllArea() -> List[Area]:
visited = set()
areas = []
for i, j in product(range(rows), range(cols)):
if (i, j) in visited:
continue
visited.add((i, j))
state = isInfected[i][j]
# new contaminated area
if state == 1:
wallCount = 0
contaminatedArea = [(i, j)]
uninfectedArea = set()
idx = 0
while idx < len(contaminatedArea):
i, j = contaminatedArea[idx]
for oi, oj in dires:
ni, nj = i + oi, j + oj
if 0 <= ni < rows and 0 <= nj < cols:
ns = isInfected[ni][nj]
if ns == 0:
uninfectedArea.add((ni, nj))
wallCount += 1
if ns == 1 and (ni, nj) not in visited:
visited.add((ni, nj))
contaminatedArea.append((ni, nj))
idx += 1
area = Area(wallCount=wallCount,
contaminatedArea=contaminatedArea,
uninfectedArea=uninfectedArea)
areas.append(area)
return areas
def countWall():
res = 0
areas = generateAllArea()
while len(areas) > 0:
area = max(areas, key=lambda area: len(area.uninfectedArea))
res += area.wallCount
for i, j in area.contaminatedArea:
isInfected[i][j] = -1
areas.remove(area)
for area in areas:
for i, j in area.uninfectedArea:
isInfected[i][j] = 1
areas = generateAllArea()
return res
return countWall()
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print(
'isInfected =[[0,1,0,0,0,0,0,1],[0,1,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0]]'
)
print('Exception :')
print('10')
print('Output :')
print(
str(Solution().containVirus([[0, 1, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0]])))
print()
print('Example 2:')
print('Input : ')
print('isInfected = [[1,1,1],[1,0,1],[1,1,1]]')
print('Exception :')
print('4')
print('Output :')
print(str(Solution().containVirus([[1, 1, 1], [1, 0, 1], [1, 1, 1]])))
print()
print('Example 3:')
print('Input : ')
print(
'isInfected =[[1,1,1,0,0,0,0,0,0],[1,0,1,0,1,1,1,1,1],[1,1,1,0,0,0,0,0,0]]'
)
print('Exception :')
print('13')
print('Output :')
print(
str(Solution().containVirus([[1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0, 0]])))
print()
pass
# @lc main=end
| 29.376682
| 96
| 0.523432
| 2,657
| 0.403125
| 0
| 0
| 0
| 0
| 0
| 0
| 2,925
| 0.443787
|
e17b963468c4245e4f159926617ef232d646797a
| 4,309
|
py
|
Python
|
src/robotcontrol.py
|
social-robotics-lab/dog_sample
|
e70706bdbdbb7be222ee71cd9529dc433bf705ce
|
[
"MIT"
] | null | null | null |
src/robotcontrol.py
|
social-robotics-lab/dog_sample
|
e70706bdbdbb7be222ee71cd9529dc433bf705ce
|
[
"MIT"
] | null | null | null |
src/robotcontrol.py
|
social-robotics-lab/dog_sample
|
e70706bdbdbb7be222ee71cd9529dc433bf705ce
|
[
"MIT"
] | null | null | null |
import json
import os.path
import socket
import subprocess
from pydub import AudioSegment
from typing import Dict, List
class RCClient(object):
"""
RobotControllerを操作するためのクラス
"""
def __init__(self, host:str, speech_port=22222, pose_port=22223, read_port=22224):
self.host = host
self.speech_port = speech_port
self.pose_port = pose_port
self.read_port = read_port
self.home_servomap = dict(HEAD_R=0, HEAD_P=0, HEAD_Y=0, BODY_Y=0, L_SHOU=-90, L_ELBO=0, R_SHOU=90, R_ELBO=0)
self.home_ledmap = dict(L_EYE_R=255, L_EYE_G=255, L_EYE_B=255, R_EYE_R=255, R_EYE_G=255, R_EYE_B=255)
def say_text(self, text:str, speed=1.0, emotion='normal') -> float:
"""
発話させる。
"""
output_file = '{}_say.wav'.format(self.host)
make_wav(text, speed, emotion, output_file)
with open(output_file, 'rb') as f:
data = f.read()
send(self.host, self.speech_port, data)
sound = AudioSegment.from_file(output_file, 'wav')
return sound.duration_seconds
def play_wav(self, wav_file:str) -> float:
"""
音声ファイルを再生する。
"""
with open(wav_file, 'rb') as f:
data = f.read()
send(self.host, self.speech_port, data)
sound = AudioSegment.from_file(wav_file, 'wav')
return sound.duration_seconds
def read_axes(self) -> dict:
"""
現在の全関節の角度値を読む。
"""
data = recv(self.host, self.read_port)
axes = json.loads(data)
return axes
def play_pose(self, pose:dict) -> float:
"""
ポーズを実行する。
"""
data = json.dumps(pose).encode('utf-8')
send(self.host, self.pose_port, data)
return pose['Msec'] / 1000.0
def reset_pose(self, speed=1.0):
"""
ポーズをホームポジションに戻す関数。
"""
msec = int(1000 / speed)
pose = dict(Msec=msec, ServoMap=self.home_servomap, LedMap=self.home_ledmap)
data = json.dumps(pose).encode('utf-8')
send(self.host, self.pose_port, data)
#---------------------
# Low level functions
#---------------------
def recv(ip:str, port:int) -> str:
conn = connect(ip, port)
size = read_size(conn)
data = read_data(conn, size)
close(conn)
return data.decode('utf-8')
def send(ip:str, port:int, data:str):
conn = connect(ip, port)
size = len(data)
conn.send(size.to_bytes(4, byteorder='big'))
conn.send(data)
close(conn)
def connect(ip:str, port:int):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((ip, port))
return conn
def close(conn:socket):
conn.shutdown(1)
conn.close()
def read_size(conn:socket):
b_size = conn.recv(4)
return int.from_bytes(b_size, byteorder='big')
def read_data(conn:socket, size:int):
chunks = []
bytes_recved = 0
while bytes_recved < size:
chunk = conn.recv(size - bytes_recved)
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recved += len(chunk)
return b''.join(chunks)
# Path to which OpenJTalk was installed
OPENJTALK_BINPATH = '/usr/bin'
OPENJTALK_DICPATH = '/var/lib/mecab/dic/open-jtalk/naist-jdic'
OPENJTALK_VOICEPATH = '/usr/share/hts-voice/mei/mei_{emotion}.htsvoice'
def make_wav(text, speed=1.0, emotion='normal', output_file='__temp.wav', output_dir=os.getcwd()):
"""
Function to make a wav file using OpenJTalk.
args:
speed: The speed of speech. (Default: 1.0)
emotion: Voice emotion. You can specify 'normal', 'happy', 'bashful', 'angry', or 'sad'.
output_file: The file name made by this function. (Default: '__temp.wav')
output_dir: The directory of output_file. (Default: Current directory)
"""
open_jtalk = [OPENJTALK_BINPATH + '/open_jtalk']
mech = ['-x', OPENJTALK_DICPATH]
htsvoice = ['-m', OPENJTALK_VOICEPATH.format(emotion=emotion)]
speed = ['-r', str(speed)]
outwav = ['-ow', os.path.join(output_dir, output_file)]
cmd = open_jtalk + mech + htsvoice + speed + outwav
c = subprocess.Popen(cmd,stdin=subprocess.PIPE)
c.stdin.write(text.encode('utf-8'))
c.stdin.close()
c.wait()
return os.path.join(output_dir, output_file)
| 31.683824
| 116
| 0.620562
| 2,103
| 0.47269
| 0
| 0
| 0
| 0
| 0
| 0
| 1,109
| 0.249269
|
e17ccdc4212c86466c9d7221473dc6138120cb0b
| 1,375
|
py
|
Python
|
exercises/shortest-path/ShortestPath.py
|
maxwellmattryan/cs-313e
|
462a871475ba956e364a0faf98284633462984b8
|
[
"MIT"
] | 1
|
2020-02-05T23:56:16.000Z
|
2020-02-05T23:56:16.000Z
|
exercises/shortest-path/ShortestPath.py
|
maxwellmattryan/cs-313e
|
462a871475ba956e364a0faf98284633462984b8
|
[
"MIT"
] | null | null | null |
exercises/shortest-path/ShortestPath.py
|
maxwellmattryan/cs-313e
|
462a871475ba956e364a0faf98284633462984b8
|
[
"MIT"
] | 2
|
2020-03-09T16:26:00.000Z
|
2021-07-23T03:17:11.000Z
|
import math
class Point (object):
# constructor
def __init__ (self, x = 0, y = 0):
self.x = x
self.y = y
# get the distance to another Point object
def dist (self, other):
return math.hypot (self.x - other.x, self.y - other.y)
# string representation of a Point
def __str__ (self):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
# test for equality of two Point objects
def __eq__ (self, other):
tol = 1.0e-16
return ((abs(self.x - other.x) < tol) and (abs(self.y - other.y) < tol))
def getPoints():
myFile = open("points.txt", "r")
points = []
for line in myFile:
line = line.strip()
x = int(line.split("\t")[0])
y = int(line.split("\t")[1])
z = x * y
print(z)
point = Point(x, y)
points.append(point)
#print(point)
return(points)
def getShortestDistance(points):
shortestDistance = -1
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
if(shortestDistance == -1):
shortestDistance = points[i].dist(points[j])
shortestDistance = min(shortestDistance, points[i].dist(points))
print(shortestDistance)
return(shortestDistance)
def main():
# create an empty list of Point objects
points = getPoints()
shortestDistance = getShortestDistance(points)
main()
| 25.462963
| 76
| 0.589091
| 518
| 0.376727
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.155636
|
e17d136893ad674b6eda0ec3efee1f8fda058d2d
| 341
|
py
|
Python
|
Data Analysis/csv remove other label.py
|
byew/python-do-differernt-csv
|
094b154834ee48210c2ee4a6a529d8fe76055fb7
|
[
"MIT"
] | null | null | null |
Data Analysis/csv remove other label.py
|
byew/python-do-differernt-csv
|
094b154834ee48210c2ee4a6a529d8fe76055fb7
|
[
"MIT"
] | null | null | null |
Data Analysis/csv remove other label.py
|
byew/python-do-differernt-csv
|
094b154834ee48210c2ee4a6a529d8fe76055fb7
|
[
"MIT"
] | null | null | null |
import pandas as pd
exa = pd.read_csv('en_dup.csv')
exa.loc[exa['label'] =='F', 'label']= 0
exa.loc[exa['label'] =='T', 'label']= 1
exa.loc[exa['label'] =='U', 'label']= 2
#不读取label2, 只读取0,1标签
exa0 = exa.loc[exa["label"] == 0]
exa1 = exa.loc[exa["label"] == 1]
exa = [exa0, exa1]
exa = pd.concat(exa)
exa.to_csv('train.csv', index=0)
| 17.947368
| 39
| 0.595308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.350975
|
e17d6ab7a795e35c2eccfd187299cdaa6e5f367c
| 60,009
|
py
|
Python
|
pySPACE/resources/dataset_defs/stream.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 32
|
2015-02-20T09:03:09.000Z
|
2022-02-25T22:32:52.000Z
|
pySPACE/resources/dataset_defs/stream.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 5
|
2015-05-18T15:08:40.000Z
|
2020-03-05T19:18:01.000Z
|
pySPACE/resources/dataset_defs/stream.py
|
pyspace/pyspace
|
763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62
|
[
"BSD-3-Clause"
] | 18
|
2015-09-28T07:16:38.000Z
|
2021-01-20T13:52:19.000Z
|
""" Reader objects and main class for continuous data (time series)
Depending on the storage format, the fitting reader is loaded and takes care
of reading the files.
.. todo:: unify with analyzer collection!
eeg source and analyzer sink node should work together
this connection should be documented when tested
"""
import os
import glob
import re
import numpy
import scipy
from scipy.io import loadmat
import warnings
import csv
from pySPACE.missions.support.windower import MarkerWindower
import logging
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.missions.support.WindowerInterface import AbstractStreamReader
class StreamDataset(BaseDataset):
""" Wrapper for dealing with stream datasets like raw EEG datasets
For loading streaming data you need the
:class:`~pySPACE.missions.nodes.source.time_series_source.Stream2TimeSeriesSourceNode`
as described in :ref:`tutorial_node_chain_operation`.
If ``file_name`` is given in the :ref:`meta data <storage>`,
the corresponding file is loaded, otherwise ``storage_format`` is used
to search for the needed file.
Some formats are already supported, like EEG data in the .eeg/.vhdr/.vmrk
format and other streaming data in edf or csv format. It is also possible to
load EEGLAB format (.set/.fdt) which itself can import a variety of
different EEG formats (http://sccn.ucsd.edu/eeglab/).
**csv**
Labels can be coded with the help of an extra channel as a column
in the csv-file or an extra file.
Normally the label is transformed immediately to the label
or this is done later on with extra algorithms.
The file suffix should be *csv*.
Special Parameters in the metadata:
:sampling_frequency:
Frequency of the input data (corresponds to
1/(number of samples of one second))
(*optional, default: 1*)
:marker:
Name of the marker channel. If it is not found,
no marker is forwarded.
(*optional, default: 'marker'*)
:marker_file:
If the marker is not a column in the data file,
an external csv file in the same folder can be specified
with one column with the heading named like the *marker*
parameter and one column named *time* with increasing
numbers, which correspond to the index in the data file.
(First sample corresponds index one.)
Here, the relative path is needed as for file_name.
(*optional, default: None*)
**BP_eeg**
Here the standard BrainProducts format is expected with the corresponding
*.vhdr* and *.vmrk* with the same base name as the *.eeg* file.
**set**
EEGLABs format with two files (extension .set and .fdt) is expected.
**edf**
When using the European Data Format there are two different specifications
that are supported:
Plain EDF (see `EDF Spec <http://www.edfplus.info/specs/edf.html>`_) and
EDF+ (see `EDF+ Spec <http://www.edfplus.info/specs/edfplus.html>`_).
When using EDF there is no annotation- or marker-channel inside the data-
segment. You can process the data originating from a EDF file but be sure,
that you don't have any marker-information at hand, to later cut
the continuous data into interesting segments.
EDF+ extended the original EDF-Format by an annotations-channel
(named 'EDF+C') and added a feature to combine non-continuous
data segments (named 'EDF+D') in one file.
The EDF+C Format is fully supported i.e. the annotations-channel is
parsed and is forwarded in combination with the corresponding data
so that the data can later be cut into meaningful segments (windowing).
Files, which make use of the EDF+D option, can be streamed - BUT: The
information about different segments in the file is completely ignored!
The file is treated as if it contains EDF+C data. The full support for
EDF+D files may be integrated in a future release.
In any case, the file suffix should be *edf*.
.. warning:: Currently only one streaming dataset can be loaded
as testing data.
.. todo:: Implement loading of training and testing data.
**Parameters**
:dataset_md:
A dictionary with all the meta data.
(*optional, default: None*)
:dataset_dir:
The (absolute) directory of the dataset.
(*obligatory, default: None*)
:Author: Johannes Teiwes (johannes.teiwes@dfki.de)
:Date: 2010/10/13
:refactored: 2013/06/10 Johannes Teiwes and Mario Michael Krell
"""
def __init__(self, dataset_md=None, dataset_dir=None, **kwargs):
super(StreamDataset, self).__init__(dataset_md=dataset_md)
self.dataset_dir = dataset_dir
if not self.meta_data.has_key('storage_format'):
warnings.warn(
str("Storage Format not set for current dataset in %s" %
dataset_dir))
if self.meta_data.has_key("file_name"):
data_files = [os.path.join(dataset_dir,self.meta_data["file_name"])]
if not "storage_format" in self.meta_data:
self.meta_data["storage_format"] = \
os.path.splitext(data_files[0])[1].lower()
elif self.meta_data.has_key('storage_format'):
self.meta_data["storage_format"] = \
self.meta_data['storage_format'].lower()
# mapping of storage format to file suffix
suffix = self.meta_data['storage_format']
if "eeg" in suffix:
suffix = "eeg"
# searching files
data_files = glob.glob(os.path.join(
dataset_dir, str("*.%s" % suffix)))
if len(data_files) == 0 and suffix == "eeg":
suffix = "dat"
data_files = glob.glob(os.path.join(
dataset_dir, str("*.%s" % suffix)))
if len(data_files) == 0:
raise IOError, str("Cannot find any .%s file in %s" %
(suffix, dataset_dir))
if len(data_files) != 1:
raise IOError, str("Found more than one *.%s file in %s" %
(suffix, dataset_dir))
else:
# assume .eeg files
data_files = glob.glob(dataset_dir + os.sep + "*.eeg")
if len(data_files) == 0:
data_files = glob.glob(dataset_dir + os.sep + "*.dat")
assert len(data_files) == 1, \
"Error locating eeg-data files (.eeg/.dat)"
self.data_file = data_files[0]
self.reader = None
ec_files = glob.glob(dataset_dir + os.sep + "*.elc")
assert len(ec_files) <= 1, "More than one electrode position file found!"
if len(ec_files)==1:
try:
ec = {}
ec_file = open(ec_files[0], 'r')
while (ec_file.readline() != "Positions"):
pass
for line in ec_file:
if line == "Labels":
break
pair = line.split(":")
ec[pair[0]] = \
numpy.array([int(x) for x in pair[1].split(" ")])
nas = ec["NAS"]
lpa = ec["LPA"]
rpa = ec["RPA"]
origin = (rpa + lpa) * 0.5
vx = nas - origin
vx = vx / numpy.linalg.norm(vx)
vz = numpy.cross(vx, lpa - rpa)
vz = vz / numpy.linalg.norm(vz)
vy = numpy.cross(vz, vx)
vy = vy / numpy.linalg.norm(vy)
rotMat = numpy.linalg.inv(numpy.matrix([vx, vy, vz]))
transMat = numpy.dot(-rotMat, origin)
for k, v in self.ec.iteritems():
ec[k] = numpy.dot(transMat, numpy.dot(v, rotMat))
self.meta_data["electrode_coordinates"] = ec
self._log("Loaded dataset specific electrode position file", logging.INFO)
except Exception, e:
print e
#self.meta_data["electrode_coordinates"] = StreamDataset.ec
finally:
file.close()
# Spherical electrode coordinates (x-axis points to the right,
# y-axis to the front, z-axis runs through the vertex; 3 params: r (radius)
# set to 1 on standard caps, theta (angle between z-axis and line connecting
# point and coordinate origin, < 0 in left hemisphere, > 0 in right
# hemisphere) and phi (angle between x-axis and projection of the line
# connecting the point and coordinate origin on the xy plane, > 0 for front
# right and back left quadrants, < 0 for front left and back right)) are
# exported from analyzer2 (generic export; saved in header file) and
# converted to Cartesian coordinates via
# x = r * sin(rad(theta)) * cos(rad(phi))
# y = r * sin(rad(theta)) * sin(rad(phi))
# z = r * cos(rad(theta))
# electrodes FP1/Fp1 and FP2/Fp2 have same coordinates
ec = { 'CPP5h': (-0.72326832569043442, -0.50643793379675761, 0.46947156278589086),
'AFF1h': (-0.11672038362490393, 0.83050868362971098, 0.5446390350150272),
'O2': (0.30901699437494745, -0.95105651629515353, 6.123233995736766e-17),
'O1': (-0.30901699437494745, -0.95105651629515353, 6.123233995736766e-17),
'FCC6h': (0.82034360384187455, 0.1743694158206236, 0.5446390350150272),
'TPP8h': (0.86385168719631511, -0.47884080932566353, 0.15643446504023092),
'PPO10h': (0.69411523801289432, -0.69411523801289421, -0.1908089953765448),
'TP7': (-0.95105651629515353, -0.3090169943749474, 6.123233995736766e-17),
'CPz': (2.293803827831453e-17, -0.37460659341591201, 0.92718385456678742),
'CCP4h': (0.54232717509597328, -0.18673822182292288, 0.8191520442889918),
'TP9': (-0.87545213915725872, -0.28445164312142457, -0.3907311284892736),
'TP8': (0.95105651629515353, -0.3090169943749474, 6.123233995736766e-17),
'FCC5h': (-0.82034360384187455, 0.1743694158206236, 0.5446390350150272),
'CPP2h': (0.16769752048474765, -0.54851387399083462, 0.8191520442889918),
'FFC1h': (-0.16769752048474765, 0.54851387399083462, 0.8191520442889918),
'TPP7h': (-0.86385168719631511, -0.47884080932566353, 0.15643446504023092),
'PO10': (0.54105917752298882, -0.7447040698476447, -0.3907311284892736),
'FTT8h': (0.96671406082679645, 0.17045777155400837, 0.19080899537654492),
'Oz': (6.123233995736766e-17, -1.0, 6.123233995736766e-17),
'AFF2h': (0.11672038362490393, 0.83050868362971098, 0.5446390350150272),
'CCP3h': (-0.54232717509597328, -0.18673822182292288, 0.8191520442889918),
'CP1': (-0.35777550984135725, -0.37048738597260156, 0.85716730070211233),
'CP2': (0.35777550984135725, -0.37048738597260156, 0.85716730070211233),
'CP3': (-0.66008387202973706, -0.36589046498407451, 0.6560590289905075),
'CP4': (0.66008387202973706, -0.36589046498407451, 0.6560590289905075),
'CP5': (-0.87157241273869712, -0.33456530317942912, 0.35836794954530016),
'CP6': (0.87157241273869712, -0.33456530317942912, 0.35836794954530016),
'FFT7h': (-0.86385168719631511, 0.47884080932566353, 0.15643446504023092),
'FTT7h': (-0.96671406082679645, 0.17045777155400837, 0.19080899537654492),
'PPO5h': (-0.5455036073850148, -0.7790598895575418, 0.30901699437494745),
'AFp1': (-0.13661609910710645, 0.97207405517694545, 0.19080899537654492),
'AFp2': (0.13661609910710645, 0.97207405517694545, 0.19080899537654492),
'FT10': (0.87545213915725872, 0.28445164312142457, -0.3907311284892736),
'POO9h': (-0.44564941557132876, -0.87463622477252034, -0.1908089953765448),
'POO10h': (0.44564941557132876, -0.87463622477252034, -0.1908089953765448),
'T8': (1.0, -0.0, 6.123233995736766e-17),
'FT7': (-0.95105651629515353, 0.3090169943749474, 6.123233995736766e-17),
'FT9': (-0.87545213915725872, 0.28445164312142457, -0.3907311284892736),
'FT8': (0.95105651629515353, 0.3090169943749474, 6.123233995736766e-17),
'FFC3h': (-0.48133227677866169, 0.53457365038161042, 0.69465837045899737),
'P10': (0.74470406984764481, -0.54105917752298871, -0.3907311284892736),
'AF8': (0.58778525229247325, 0.80901699437494734, 6.123233995736766e-17),
'T7': (-1.0, -0.0, 6.123233995736766e-17),
'AF4': (0.36009496929665602, 0.89126632448749754, 0.27563735581699916),
'AF7': (-0.58778525229247325, 0.80901699437494734, 6.123233995736766e-17),
'AF3': (-0.36009496929665602, 0.89126632448749754, 0.27563735581699916),
'P2': (0.28271918486560565, -0.69975453766943163, 0.6560590289905075),
'P3': (-0.5450074457687164, -0.67302814507021891, 0.50000000000000011),
'CPP4h': (0.48133227677866169, -0.53457365038161042, 0.69465837045899737),
'P1': (-0.28271918486560565, -0.69975453766943163, 0.6560590289905075),
'P6': (0.72547341102583851, -0.63064441484306177, 0.27563735581699916),
'P7': (-0.80901699437494745, -0.58778525229247314, 6.123233995736766e-17),
'P4': (0.5450074457687164, -0.67302814507021891, 0.50000000000000011),
'P5': (-0.72547341102583851, -0.63064441484306177, 0.27563735581699916),
'P8': (0.80901699437494745, -0.58778525229247314, 6.123233995736766e-17),
'P9': (-0.74470406984764481, -0.54105917752298871, -0.3907311284892736),
'PPO2h': (0.11672038362490393, -0.83050868362971098, 0.5446390350150272),
'F10': (0.74470406984764481, 0.54105917752298871, -0.3907311284892736),
'TPP9h': (-0.87463622477252045, -0.4456494155713287, -0.1908089953765448),
'FTT9h': (-0.96954172390250215, 0.1535603233115839, -0.1908089953765448),
'CCP5h': (-0.82034360384187455, -0.1743694158206236, 0.5446390350150272),
'AFF6h': (0.5455036073850148, 0.7790598895575418, 0.30901699437494745),
'FFC2h': (0.16769752048474765, 0.54851387399083462, 0.8191520442889918),
'FCz': (2.293803827831453e-17, 0.37460659341591201, 0.92718385456678742),
'FCC2h': (0.1949050434465294, 0.19490504344652934, 0.96126169593831889),
'CPP1h': (-0.16769752048474765, -0.54851387399083462, 0.8191520442889918),
'FTT10h': (0.96954172390250215, 0.1535603233115839, -0.1908089953765448),
'Fz': (4.3297802811774658e-17, 0.70710678118654746, 0.70710678118654757),
'TTP8h': (0.96671406082679645, -0.17045777155400837, 0.19080899537654492),
'FFT9h': (-0.87463622477252045, 0.4456494155713287, -0.1908089953765448),
'Pz': (4.3297802811774658e-17, -0.70710678118654746, 0.70710678118654757),
'FFC4h': (0.48133227677866169, 0.53457365038161042, 0.69465837045899737),
'C3': (-0.70710678118654746, -0.0, 0.70710678118654757),
'C2': (0.39073112848927372, -0.0, 0.92050485345244037),
'C1': (-0.39073112848927372, -0.0, 0.92050485345244037),
'C6': (0.92718385456678731, -0.0, 0.37460659341591218),
'C5': (-0.92718385456678731, -0.0, 0.37460659341591218),
'C4': (0.70710678118654746, -0.0, 0.70710678118654757),
'TTP7h': (-0.96671406082679645, -0.17045777155400837, 0.19080899537654492),
'FC1': (-0.35777550984135725, 0.37048738597260156, 0.85716730070211233),
'FC2': (0.35777550984135725, 0.37048738597260156, 0.85716730070211233),
'FC3': (-0.66008387202973706, 0.36589046498407451, 0.6560590289905075),
'FC4': (0.66008387202973706, 0.36589046498407451, 0.6560590289905075),
'FC5': (-0.87157241273869712, 0.33456530317942912, 0.35836794954530016),
'FC6': (0.87157241273869712, 0.33456530317942912, 0.35836794954530016),
'FCC1h': (-0.1949050434465294, 0.19490504344652934, 0.96126169593831889),
'CPP6h': (0.72326832569043442, -0.50643793379675761, 0.46947156278589086),
'F1': (-0.28271918486560565, 0.69975453766943163, 0.6560590289905075),
'F2': (0.28271918486560565, 0.69975453766943163, 0.6560590289905075),
'F3': (-0.5450074457687164, 0.67302814507021891, 0.50000000000000011),
'F4': (0.5450074457687164, 0.67302814507021891, 0.50000000000000011),
'F5': (-0.72547341102583851, 0.63064441484306177, 0.27563735581699916),
'F6': (0.72547341102583851, 0.63064441484306177, 0.27563735581699916),
'F7': (-0.80901699437494745, 0.58778525229247314, 6.123233995736766e-17),
'F8': (0.80901699437494745, 0.58778525229247314, 6.123233995736766e-17),
'F9': (-0.74470406984764481, 0.54105917752298871, -0.3907311284892736),
'FFT8h': (0.86385168719631511, 0.47884080932566353, 0.15643446504023092),
'FFT10h': (0.87463622477252045, 0.4456494155713287, -0.1908089953765448),
'Cz': (0.0, 0.0, 1.0),
'FFC5h': (-0.72326832569043442, 0.50643793379675761, 0.46947156278589086),
'FCC4h': (0.54232717509597328, 0.18673822182292288, 0.8191520442889918),
'TP10': (0.87545213915725872, -0.28445164312142457, -0.3907311284892736),
'POz': (5.6364666119006729e-17, -0.92050485345244037, 0.39073112848927372),
'CPP3h': (-0.48133227677866169, -0.53457365038161042, 0.69465837045899737),
'FFC6h': (0.72326832569043442, 0.50643793379675761, 0.46947156278589086),
'PPO1h': (-0.11672038362490393, -0.83050868362971098, 0.5446390350150272),
'Fpz': (6.123233995736766e-17, 1.0, 6.123233995736766e-17),
'POO2': (0.13661609910710645, -0.97207405517694545, 0.19080899537654492),
'POO1': (-0.13661609910710645, -0.97207405517694545, 0.19080899537654492),
'I1': (-0.28651556797120703, -0.88180424668940116, -0.37460659341591207),
'I2': (0.28651556797120703, -0.88180424668940116, -0.37460659341591207),
'PPO9h': (-0.69411523801289432, -0.69411523801289421, -0.1908089953765448),
'FP1': (-0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'OI2h': (0.15356032331158395, -0.96954172390250215, -0.1908089953765448),
'FP2': (0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'CCP6h': (0.82034360384187455, -0.1743694158206236, 0.5446390350150272),
'FCC3h': (-0.54232717509597328, 0.18673822182292288, 0.8191520442889918),
'PO8': (0.58778525229247325, -0.80901699437494734, 6.123233995736766e-17),
'PO9': (-0.54105917752298882, -0.7447040698476447, -0.3907311284892736),
'PO7': (-0.58778525229247325, -0.80901699437494734, 6.123233995736766e-17),
'PO4': (0.36009496929665602, -0.89126632448749754, 0.27563735581699916),
'PO3': (-0.36009496929665602, -0.89126632448749754, 0.27563735581699916),
'Fp1': (-0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'Fp2': (0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'PPO6h': (0.5455036073850148, -0.7790598895575418, 0.30901699437494745),
'CCP2h': (0.1949050434465294, -0.19490504344652934, 0.96126169593831889),
'Iz': (5.6773636985816068e-17, -0.92718385456678742, -0.37460659341591207),
'AFF5h': (-0.5455036073850148, 0.7790598895575418, 0.30901699437494745),
'TPP10h': (0.87463622477252045, -0.4456494155713287, -0.1908089953765448),
'OI1h': (-0.15356032331158395, -0.96954172390250215, -0.1908089953765448),
'CCP1h': (-0.1949050434465294, -0.19490504344652934, 0.96126169593831889)
}
def store(self, result_dir, s_format="multiplexed"):
""" Not yet implemented! """
raise NotImplementedError("Storing of StreamDataset is currently not supported!")
@staticmethod
def project2d(ec_3d):
"""
Take a dictionary of 3d Cartesian electrode coordinates and return a
dictionary of their 2d projection in Cartesian coordinates.
"""
keys = []
x = []
y = []
z = []
for k, v in ec_3d.iteritems():
keys.append(k)
x.append(v[0])
y.append(v[1])
z.append(v[2])
x = numpy.array(x)
y = numpy.array(y)
z = numpy.array(z)
z = z - numpy.max(z)
# get spherical coordinates: normally this can be done via:
# phi = deg(atan2(y,x)); if < -90 -> + 180, if > 90 -> - 180
# theta = deg(arccos(z/r)); if x < 0 -> * (-1)
hypotxy = numpy.hypot(x, y)
r = numpy.hypot(hypotxy, z)
phi = numpy.arctan2(z, hypotxy)
theta = numpy.arctan2(y, x)
phi = numpy.maximum(phi, 0.001)
r2 = r / numpy.power(numpy.cos(phi), 0.2)
x = r2 * numpy.cos(theta) * 60
y = r2 * numpy.sin(theta) * 60
ec_2d = {}
for i in xrange(0, len(keys)):
ec_2d[keys[i]] = (x[i], y[i])
return ec_2d
def set_window_defs(self, window_definition, nullmarker_stride_ms=1000,
no_overlap=False, data_consistency_check=False):
""" Takes the window definition dictionary for later reading
The parameters are later on mainly forwarded to the
:class:`~pySPACE.missions.support.windower.MarkerWindower`.
To find more about these parameters, check out its documentation.
"""
self.window_definition = window_definition
self.nullmarker_stride_ms = nullmarker_stride_ms
self.no_overlap = no_overlap
self.data_consistency_check = data_consistency_check
def get_data(self, run_nr, split_nr, train_test):
if not (run_nr, split_nr, train_test) == (0, 0, "test"):
return self.data[(run_nr, split_nr, train_test)]
if self.meta_data.has_key('storage_format'):
if "bp_eeg" in self.meta_data['storage_format']:
# remove ".eeg" suffix
self.reader = EEGReader(self.data_file[:-4],
blocksize=100)
elif "set" in self.meta_data['storage_format']:
self.reader = SETReader(self.data_file[:-4])
elif "edf" in self.meta_data['storage_format']:
self.reader = EDFReader(self.data_file)
elif "csv" in self.meta_data['storage_format']:
sf = self.meta_data.get("sampling_frequency", 1)
try:
delimiter = self.meta_data["delimiter"]
except KeyError:
delimiter=None
try:
mf = os.path.join(self.dataset_dir,
self.meta_data["marker_file"])
except KeyError:
mf = None
if "marker" in self.meta_data:
marker = self.meta_data["marker"]
else:
marker = "marker"
self.reader = CsvReader(self.data_file, sampling_frequency=sf,
marker=marker, marker_file=mf,
delimiter=delimiter)
else:
self.reader = EEGReader(self.data_file, blocksize=100)
# Creates a windower that splits the training data into windows
# based in the window definitions provided
# and assigns correct labels to these windows
self.marker_windower = MarkerWindower(
self.reader, self.window_definition,
nullmarker_stride_ms=self.nullmarker_stride_ms,
no_overlap=self.no_overlap,
data_consistency_check=self.data_consistency_check)
return self.marker_windower
def parse_float(param):
""" Work around to catch colon instead of floating point """
try:
return float(param)
except ValueError, e:
warnings.warn("Failed float conversion from csv file.")
try:
return float(param.replace(".", "").replace(",", "."))
except:
warnings.warn("Secondary attempt at conversion also failed. " +
"Treating the value as string and return a 0 as " +
"placeholder.")
return float(0)
def get_csv_handler(file_handler):
"""Helper function to get a DictReader from csv"""
try:
dialect = csv.Sniffer().sniff(file_handler.read(2048))
file_handler.seek(0)
return csv.DictReader(file_handler, dialect=dialect)
except csv.Error, e:
class excel_space(csv.excel):
delimiter = ' '
warnings.warn(str(e))
csv.register_dialect("excel_space", excel_space)
file_handler.seek(0)
return csv.DictReader(file_handler, dialect=excel_space)
class CsvReader(AbstractStreamReader):
""" Load time series data from csv file
**Parameters**
:file_path:
Path of the file to be loaded.
(*optional, default: 'data.csv'*)
:sampling_frequency:
Underlying sampling frequency of the data in Hz
(*optional, default: 1*)
:marker:
Name of the marker channel. If it is not found,
no marker is forwarded.
(*optional, default: 'marker'*)
:marker_file:
If the marker is not a column in the data file,
an external csv file in the same folder can be specified
with one column with the heading named like the *marker*
parameter and one column named *time* with increasing
numbers, which correspond to the index in the data file.
(first time point gets zero.)
Here the absolute path is needed.
(*optional, default: None*)
:delimiter:
Delimiter used in the csv file.
(*optional, default: None*)
"""
def __init__(self, file_path, sampling_frequency=1, marker="marker",
marker_file=None, delimiter=None):
try:
self.file = open(file_path, "r")
except IOError as io:
warnings.warn("Failed to open file at [%s]" % file_path)
raise io
self._dSamplingInterval = sampling_frequency
self.marker = marker
self._markerids = dict()
self._markerNames = dict()
self.callbacks = list()
self.new_marker_id = 1
self.time_index = 1
try:
if not marker_file is None:
marker_file = open(marker_file, "r")
except IOError:
warnings.warn("Failed to open marker file at [%s]. Now ignored."
% marker_file)
self._markerids["null"] = 0
self._markerNames[0] = "null"
if delimiter is None:
self.DictReader = get_csv_handler(self.file)
else:
self.DictReader = csv.DictReader(self.file, delimiter=delimiter)
self.first_entry = self.DictReader.next()
self._channelNames = self.first_entry.keys()
self.MarkerReader = None
if not marker_file is None:
self.MarkerReader = get_csv_handler(marker_file)
if not self.MarkerReader is None:
self.update_marker()
if self.next_marker[0] == self.time_index:
self.first_marker = self.next_marker[1]
self.update_marker()
else:
self.first_marker = ""
elif self.marker in self._channelNames:
self._channelNames.remove(self.marker)
self.first_marker = self.first_entry.pop(self.marker)
else:
self.first_marker = ""
@property
def dSamplingInterval(self):
""" actually the sampling frequency """
return self._dSamplingInterval
@property
def stdblocksize(self):
""" standard block size (int) """
return 1
@property
def markerids(self):
""" mapping of markers/events in stream and unique integer (dict)
The dict has to contain the mapping 'null' -> 0 to use the
nullmarkerstride option in the windower.
"""
return self._markerids
@property
def channelNames(self):
""" list of channel/sensor names """
return self._channelNames
@property
def markerNames(self):
""" inverse mapping of markerids (dict) """
return self._markerNames
def regcallback(self, func):
""" register a function as consumer of the stream """
self.callbacks.append(func)
def read(self, nblocks=1):
""" Read *nblocks* of the stream and pass it to registers functions """
n = 0
while nblocks == -1 or n < nblocks:
if not self.first_entry is None:
samples, marker = self.first_entry, self.first_marker
self.first_entry = None
else:
try:
samples = self.DictReader.next()
except IOError:
break
if not self.MarkerReader is None:
if self.next_marker[0] == self.time_index:
marker = self.next_marker[1]
self.update_marker()
else:
marker = ""
elif self.marker in samples.keys():
marker = samples.pop(self.marker)
else:
marker = ""
# add marker to dict
if not marker == "" and not marker in self._markerids:
self._markerids[marker] = self.new_marker_id
self._markerNames[self.new_marker_id] = marker
self.new_marker_id += 1
# convert marker to array
markers = numpy.ones(1)*(-1)
if not marker == "":
markers[0] = self._markerids[marker]
# convert samples to array
# special handling of marker in channel names
# if the marker is in channelNames,
if self.marker in self.channelNames:
array_samples = numpy.zeros((len(self.channelNames)-1, 1))
else:
array_samples = numpy.zeros((len(self.channelNames), 1))
offset = 0
for index, channel in enumerate(self.channelNames):
if self.marker == channel:
offset -= 1
else:
array_samples[index + offset] = parse_float(samples[channel])
n += 1
for c in self.callbacks:
c(array_samples, markers)
self.time_index += 1
return n
def update_marker(self):
"""Update `next_marker` from `MarkerReader` information"""
try:
next = self.MarkerReader.next()
self.next_marker = (next["time"], next[self.marker])
except IOError:
pass
class EDFReader(AbstractStreamReader):
""" Read EDF-Data
On Instantiation it will automatically assign the value
for the blocksize coded in the edf-file to its own
attribute 'stdblocksize'.
The Feature, that different signals can have different
sampling rates is eliminated in a way, that every value
of a lower sampled signal is repeated so that it fits
the highest sampling rate present in the dataset. This
is needed to have the same length for every signal
in the returned array.
"""
def __init__(self, abs_edffile_path):
"""Initializes module and opens specified file."""
try:
self.edffile = open(abs_edffile_path, "r")
except IOError as io:
warnings.warn(str("failed to open file at [%s]" % abs_edffile_path))
raise io
# variables to later overwrite
# the properties from AbstractStreamReader
self.callbacks = list()
self._dSamplingInterval = 0
self._stdblocksize = 0
self._markerids = dict()
self._channelNames = dict()
self._markerNames = dict()
# gains, frequency for each channel
self.gains = []
self.phy_min = []
self.dig_min = []
self.frequency = []
self.num_channels = 0
self.num_samples = []
self.edf_plus = False
self.edf_header_length = 0
self.annotations = None
self.num_samples_anno = None
self.timepoint = 0.0
self.generate_meta_data()
def __str__(self):
return ("EDFReader Object (%d@%s)\n" + \
"\tEDF File:\t %s\n" + \
"\tFile Format:\t %s\n" + \
"\tBlocksize:\t %d\n" + \
"\tnChannels:\t %d\n"
"\tfrequency:\t %d [Hz] (interval: %d [ns])\n") % (
os.getpid(), os.uname()[1],
os.path.realpath(self.edffile.name),
"EDF+" if self.edf_plus else "EDF",
self.stdblocksize, len(self.channelNames),
self.dSamplingInterval, 1000000/self.dSamplingInterval)
@property
def dSamplingInterval(self):
return self._dSamplingInterval
@property
def stdblocksize(self):
return self._stdblocksize
@property
def markerids(self):
return self._markerids
@property
def channelNames(self):
return self._channelNames[:-1] if self.edf_plus else self._channelNames
@property
def markerNames(self):
return self._markerNames
def read_edf_header(self):
"""Read edf-header information"""
m = dict()
m["version"] = self.edffile.read(8)
m["subject_id"] = self.edffile.read(80).strip()
m["recording_id"] = self.edffile.read(80).strip()
m["start_date"] = self.edffile.read(8)
m["start_time"] = self.edffile.read(8)
m["num_bytes_header"] = int(self.edffile.read(8).strip())
m["edf_c_d"] = self.edffile.read(44).strip()
m["num_data_records"] = self.edffile.read(8)
m["single_record_duration"] = float(self.edffile.read(8))
m["num_channels"] = int(self.edffile.read(4))
m["channel_names"] = list()
for i in range(m["num_channels"]):
m["channel_names"].append(self.edffile.read(16).strip())
m["electrode_type"] = list()
for i in range(m["num_channels"]):
m["electrode_type"].append(self.edffile.read(80).strip())
m["phy_dims"] = list()
for i in range(m["num_channels"]):
m["phy_dims"].append(self.edffile.read(8).strip())
m["phy_min"] = list()
for i in range(m["num_channels"]):
m["phy_min"].append(float(self.edffile.read(8).strip()))
m["phy_max"] = list()
for i in range(m["num_channels"]):
m["phy_max"].append(float(self.edffile.read(8).strip()))
m["dig_min"] = list()
for i in range(m["num_channels"]):
m["dig_min"].append(float(self.edffile.read(8).strip()))
m["dig_max"] = list()
for i in range(m["num_channels"]):
m["dig_max"].append(float(self.edffile.read(8).strip()))
m["prefilter"] = list()
for i in range(m["num_channels"]):
m["prefilter"].append(self.edffile.read(80).strip())
m["single_record_num_samples"] = list()
for i in range(m["num_channels"]):
m["single_record_num_samples"].append(int(self.edffile.read(8).strip()))
m["reserved"] = self.edffile.read(32*m["num_channels"])
# check position in file!
assert self.edffile.tell() == m["num_bytes_header"], "EDF Header corrupt!"
self.edf_header_length = self.edffile.tell()
return m
def read_edf_data(self):
"""read one record inside the data section of the edf-file"""
edfsignal = []
edfmarkers = numpy.ones(max(self.num_samples))*(-1)
# get markers from self.annotations
if self.annotations is not None:
current_annotations = numpy.where(
numpy.array(self.annotations.keys()) <
self.timepoint+self.delta)[0]
for c in current_annotations:
tmarker = self.annotations.keys()[c]-self.timepoint
pmarker = int((tmarker/self.delta)*max(self.num_samples))
edfmarkers[pmarker] = self.markerids[self.annotations[self.annotations.keys()[c]]]
self.annotations.pop(self.annotations.keys()[c])
self.timepoint += self.delta
# in EDF+ the last channel has the annotations,
# otherwise it is treated as regular signal channel
if self.edf_plus:
for i,n in enumerate(self.num_samples):
data = self.edffile.read(n*2)
if len(data) != n*2:
raise IOError
channel = numpy.fromstring(data, dtype=numpy.int16).astype(numpy.float32)
signal = (channel - self.dig_min[i]) * self.gains[i] + self.phy_min[i]
# simple upsampling for integer factors
# TODO: may use scipy.resample ..
if signal.shape[0] != max(self.num_samples):
factor = max(self.num_samples)/signal.shape[0]
assert type(factor) == int, str("Signal cannot be upsampled by non-int factor %f!" % factor)
signal = signal.repeat(factor, axis=0)
edfsignal.append(signal)
else:
for i,n in enumerate(self.num_samples):
data = self.edffile.read(n*2)
if len(data) != n*2:
raise IOError
channel = numpy.fromstring(data, dtype=numpy.int16).astype(numpy.float32)
signal = (channel - self.dig_min[i]) * self.gains[i] + self.phy_min[i]
# simple upsampling for integer factors
# TODO: may use scipy.resample ..
if signal.shape[0] != max(self.num_samples):
factor = max(self.num_samples)/signal.shape[0]
assert type(factor) == int, str("Signal cannot be upsampled by non-int factor %f!" % factor)
signal = signal.repeat(factor, axis=0)
edfsignal.append(signal)
return edfsignal, edfmarkers
def parse_annotations(self):
""" Parses times and names of the annotations
This is done beforehand - annotations are later
added to the streamed data. """
self.edffile.seek(self.edf_header_length, os.SEEK_SET)
self.annotations = dict()
data_bytes_to_skip = sum(self.num_samples)*2
while True:
self.edffile.read(data_bytes_to_skip)
anno = self.edffile.read(self.num_samples_anno*2)
if len(anno) != self.num_samples_anno*2:
break
anno = anno.strip()
marker = anno.split(chr(20))
if marker[2][1:].startswith(chr(0)):
continue
base = float(marker[0])
offset = float(marker[2][1:])
name = str(marker[3])
self.annotations[base+offset] = name.strip()
def generate_meta_data(self):
""" Generate the necessary meta data for the windower """
m = self.read_edf_header()
# calculate gain for each channel
self.gains = [(px-pn)/(dx-dn) for px,pn,dx,dn in zip(m["phy_max"], m["phy_min"], m["dig_max"], m["dig_min"])]
self.dig_min = m["dig_min"]
self.phy_min = m["phy_min"]
self._channelNames = m["channel_names"]
self.num_channels = m["num_channels"]
self.num_samples = m["single_record_num_samples"]
# separate data from annotation channel
if m["edf_c_d"] in ["EDF+D", "EDF+C"]:
self.edf_plus = True
# the annotation channel is called "EDF Annotations" and is the last channel
assert "EDF Annotations" == m["channel_names"][-1], "Cannot determine Annotations Channel!"
if m["edf_c_d"] in ["EDF+D"]:
warnings.warn(str("The file %s contains non-continuous data-segments.\n"
"This feature is not supported and may lead to unwanted results!") % self.edffile.name)
self.num_samples_anno = self.num_samples.pop() # ignore sampling rate of the annotations channel
else :
self.edf_plus = False
# calculate sampling interval for each channel
self.frequency = [ns/m["single_record_duration"] for ns in self.num_samples]
self._dSamplingInterval = max(self.frequency)
self._stdblocksize = max(self.num_samples)
self.delta = self.stdblocksize / max(self.frequency)
# generate all marker names and ids
self._markerids['null'] = 0
# in edf+ case we can parse them from annotations
if self.edf_plus :
self.parse_annotations()
for i,(t,name) in enumerate(self.annotations.iteritems()):
self._markerids[name] = i+1
else:
warnings.warn("no marker channel is set - no markers will be streamed!")
for s in range(1,256,1):
self._markerids[str('S%3d' % s)] = s
for r in range(1,256,1):
self._markerids[str('R%3d' % r)] = r+256
# generate reverse mapping
for k,v in zip(self._markerids.iterkeys(), self._markerids.itervalues()):
self._markerNames[v] = k
# reset file position to begin of data section
self.edffile.seek(self.edf_header_length, os.SEEK_SET)
# Register callback function
def regcallback(self, func):
self.callbacks.append(func)
# Forwards block of data until all data is send
def read(self, nblocks=1, verbose=False):
"""read data and call registered callbacks """
n = 0
while nblocks == -1 or n < nblocks:
try:
samples, markers = self.read_edf_data()
except IOError:
break
n += 1
for c in self.callbacks:
c(samples, markers)
return n
class SETReader(AbstractStreamReader):
""" Load eeglab .set format
Read eeglab format when the data has not been segmented yet. It is further
assumed that the data is stored binary in another file with extension .fdt.
Further possibilities are .dat format or to store everything in the .set
file. Both is currently not supported.
"""
def __init__(self, abs_setfile_path, blocksize=100, verbose=False):
self.abs_setfile_path = abs_setfile_path
self._stdblocksize = blocksize
self.callbacks = list()
self._dSamplingInterval = 0
self._markerids = {"null": 0}
self._channelNames = dict()
self._markerNames = {0: "null"}
self.read_set_file()
self.fdt_handle = open(self.abs_data_path,'rb')
self.latency = 0
self.current_marker_index = 0
@property
def dSamplingInterval(self):
return self._dSamplingInterval
@property
def stdblocksize(self):
return self._stdblocksize
@property
def markerids(self):
return self._markerids
@property
def channelNames(self):
return self._channelNames
@property
def markerNames(self):
return self._markerNames
def read_set_file(self):
setdata = loadmat(self.abs_setfile_path + '.set', appendmat=False)
# check if stream data
ntrials = setdata['EEG']['trials'][0][0][0][0]
assert(ntrials == 1), "Data consists of more than one trial. This is not supported!"
# check if data is stored in fdt format
datafilename = setdata['EEG']['data'][0][0][0]
assert(datafilename.split('.')[-1] == 'fdt'), "Data is not in fdt format!"
# collect meta information
self._dSamplingInterval = setdata['EEG']['srate'][0][0][0][0]
self._channelNames = numpy.hstack(setdata['EEG']['chanlocs'][0][0][ \
'labels'][0]).astype(numpy.str_).tolist()
self.nChannels = setdata['EEG']['nbchan'][0][0][0][0]
self.marker_data = numpy.hstack(setdata['EEG']['event'][0][0][ \
'type'][0]).astype(numpy.str_)
for marker in numpy.unique(self.marker_data):
marker_number = len(self._markerNames)
self._markerNames[marker_number] = marker
self._markerids[marker] = marker_number
self.marker_times = numpy.hstack(setdata['EEG']['event'][0][0][ \
'latency'][0]).flatten()
self.abs_data_path = os.path.join(os.path.dirname(self.abs_setfile_path),
datafilename)
def regcallback(self, func):
self.callbacks.append(func)
def read(self, nblocks=1, verbose=False):
readblocks = 0
while (readblocks < nblocks or nblocks == -1):
ret, samples, markers = self.read_fdt_data()
if ret:
for f in self.callbacks:
f(samples, markers)
else:
break
readblocks += 1
return readblocks
def read_fdt_data(self):
if self.fdt_handle == None:
return False, None, None
num_samples = self.nChannels * self._stdblocksize
markers = numpy.zeros(self._stdblocksize)
markers.fill(-1)
###### READ DATA FROM FILE ######
try:
samples = numpy.fromfile(self.fdt_handle, dtype=numpy.float32,
count=num_samples)
except MemoryError:
# assuming, that a MemoryError only occurs when file is finished
self.fdt_handle.close()
self.fdt_handle = None
return False, None, None
# True when EOF reached in last or current block
if samples.size < num_samples:
self.fdt_handle.close()
self.fdt_handle = None
if samples.size == 0:
return False, None, None
temp = samples
samples = numpy.zeros(num_samples)
numpy.put(samples, range(temp.size), temp)
# need channel x time matrix
samples = samples.reshape((self.stdblocksize, self.nChannels)).T
###### READ MARKERS FROM FILE ######
for l in range(self.current_marker_index,len(self.marker_times)):
if self.marker_times[l] > self.latency + self._stdblocksize:
self.current_marker_index = l
self.latency += self._stdblocksize
break
else:
rel_marker_pos = (self.marker_times[l] - 1) % self._stdblocksize
markers[rel_marker_pos] = self._markerids[self.marker_data[l]]
return True, samples, markers
class EEGReader(AbstractStreamReader):
""" Load raw EEG data in the .eeg brain products format
This module does the Task of parsing
.vhdr, .vmrk end .eeg/.dat files and then hand them
over to the corresponding windower which
iterates over the aggregated data.
"""
def __init__(self, abs_eegfile_path, blocksize=100, verbose=False):
self.abs_eegfile_path = abs_eegfile_path
self._stdblocksize = blocksize
self.eeg_handle = None
self.mrk_handle = None
self.eeg_dtype = numpy.int16
self.callbacks = list()
# variable names with capitalization correspond to
# structures members defined in RecorderRDA.h
self.nChannels, \
self._dSamplingInterval, \
self.resolutions, \
self._channelNames, \
self.channelids, \
self._markerids, \
self._markerNames, \
self.nmarkertypes = self.bp_meta()
if verbose:
print "channelNames:", self.channelNames, "\n"
print "channelids:", self.channelids, "\n"
print "markerNames:", self.markerNames, "\n"
print "markerids:", self.markerids, "\n"
print "resolutions:", self.resolutions, "\n"
# open the eeg-file
if self.eeg_handle == None:
try:
self.eeg_handle = open(self.abs_eegfile_path + '.eeg', 'rb')
except IOError:
try:
self.eeg_handle = open(self.abs_eegfile_path + '.dat', 'rb')
except IOError:
raise IOError, "EEG-file [%s.{dat,eeg}] could not be opened!" % os.path.realpath(self.abs_eegfile_path)
self.callbacks = list()
self.ndsamples = None # last sample block read
self.ndmarkers = None # last marker block read
@property
def dSamplingInterval(self):
return self._dSamplingInterval
@property
def stdblocksize(self):
return self._stdblocksize
@property
def markerids(self):
return self._markerids
@property
def channelNames(self):
return self._channelNames
@property
def markerNames(self):
return self._markerNames
# This function gathers meta information from the .vhdr and .vmrk files.
# Only the relevant information is then stored in variables, the windower
# accesses during the initialisation phase.
def bp_meta(self):
nChannels = 0
dSamplingInterval = 0
resolutions = list()
channelNames = list()
channelids = dict()
markerids = dict()
markerNames = dict()
nmarkertypes = 0
prefix = ''
# helper function to convert resolutions
# 0 = 100 nV, 1 = 500 nV, 2 = 10 {mu}V, 3 = 152.6 {mu}V
def res_conv(num, res):
# convert num to nV
if ord(res[0]) == 194:
num = num*1000
if num <= 100: return 0
if num <= 500: return 1
if num <= 10000: return 2
return 3
# Start with vhdr file
file_path = self.abs_eegfile_path + '.vhdr'
hdr = open(file_path)
for line in hdr:
if line.startswith(";"): continue
# Read the words between brackets like "[Common Infos]"
if line.startswith('['):
prefix = line.partition("[")[2].partition("]")[0].lower()
continue
if line.find("=") == -1: continue
# Common Infos and Binary Infos
if(prefix == 'common infos' or prefix == 'binary infos'):
key, value = line.split('=')
key = key.lower()
value = value.lower()
if(key == 'datafile'):
pass # something like filename.eeg
elif(key == 'markerfile'):
mrk_file = value
elif(key == 'dataformat'):
pass # usually BINARY
elif(key == 'dataorientation'):
eeg_data_or = value
elif(key == 'datatype'):
pass # something like TIMEDOMAIN
elif(key == 'numberofchannels'):
nChannels = int(value)
elif(key == 'datapoints'):
pass # the number of datapoints in the whole set
elif(key == 'samplinginterval'):
dSamplingInterval = int(1000000/float(value))
elif(key == 'binaryformat'):
if re.match("int_16", value, flags=re.IGNORECASE) == None:
self.eeg_dtype = numpy.float32
else:
self.eeg_dtype = numpy.int16
elif(key == 'usebigendianorder'):
bin_byteorder = value
# Channel Infos
# ; Each entry: Ch<Channel number>=<Name>,<Reference channel name>,
# ; <Resolution in "Unit">,<Unit>,
elif(prefix == 'channel infos'):
key, value = line.split('=')
if re.match("^[a-z]{2}[0-9]{1,3}", key, flags=re.IGNORECASE) == None:
continue
ch_id = int(re.findall(r'\d+', key)[0])
ch_name = value.split(',')[0]
ch_ref = value.split(',')[1]
if len(re.findall(r'\d+', value.split(',')[2])) == 0:
ch_res_f = 0
else:
ch_res_f = float(re.findall(r'\d+', value.split(',')[2])[0])
ch_res_unit = value.split(',')[3]
channelNames.append(ch_name)
channelids[ch_name] = ch_id
resolutions.append(res_conv(ch_res_f, ch_res_unit))
# Everything thats left..
else:
#print "parsing finished!"
break
hdr.close()
# Continue with marker file
# Priority:
# 1: Path from .vhdr
# 2: Path constructed from eegfile path
prefix = ''
markerNames[0] = 'null'
try:
self.mrk_handle = open(os.path.basename(self.abs_eegfile_path) + mrk_file)
except IOError:
try:
self.mrk_handle = open(self.abs_eegfile_path + '.vmrk')
except IOError:
raise IOError, str("Could not open [%s.vmrk]!" % os.path.realpath(self.abs_eegfile_path))
# Parse file
for line in self.mrk_handle:
if line.startswith(";"): continue
# Read the words between brackets like "[Common Infos]"
if line.startswith('['):
prefix = line.partition("[")[2].partition("]")[0].lower()
continue
if line.find("=") == -1: continue
if prefix == "marker infos":
mrk_name = line.split(',')[1]
if mrk_name != "" and mrk_name not in markerNames.values():
markerNames[len(markerNames)] = mrk_name
# rewinds the marker file
self.mrk_handle.seek(0, os.SEEK_SET)
# helper struct for finding markers
self.mrk_info = dict()
self.mrk_info['line'] = ""
self.mrk_info['position'] = 0
# advance to first marker line
while(re.match("^Mk1=", self.mrk_info['line'], re.IGNORECASE) == None):
try:
self.mrk_info['line'] = self.mrk_handle.next()
except StopIteration:
self.mrk_handle.close()
raise StopIteration, str("Reached EOF while searching for first Marker in [%s]" % os.path.realpath(self.mrk_handle.name))
# TODO: Sort markerNames?
def compare (x,y):
return cmp(self.int(re.findall(r'\d+', x)[0]), int(re.findall(r'\d+', y)[0]))
for key in markerNames:
markerids[markerNames[key]] = key
markertypes = len(markerids)
return nChannels, \
dSamplingInterval, \
resolutions, \
channelNames, \
channelids, \
markerids, \
markerNames, \
markertypes
# This function reads the eeg-file and the marker-file for every
# block of data which is processed.
def bp_read(self, verbose=False):
if self.eeg_handle == None:
return False, None, None
num_samples = self.nChannels*self.stdblocksize
markers = numpy.zeros(self.stdblocksize)
markers.fill(-1)
samples = numpy.zeros(num_samples)
###### READ EEG-DATA FROM FILE ######
try:
samples = numpy.fromfile(self.eeg_handle, dtype=self.eeg_dtype, count=num_samples)
except MemoryError:
# assuming, that a MemoryError only occurs when file is finished
self.eeg_handle.close()
self.eeg_handle = None
return False, None, None
# True when EEG-File's EOF reached in last or current block
if samples.size < num_samples:
self.eeg_handle.close()
self.eeg_handle = None
if samples.size == 0:
return False, None, None
temp = samples
samples = numpy.zeros(num_samples)
numpy.put(samples, range(temp.size), temp)
samples = samples.reshape((self.stdblocksize, self.nChannels))
samples = scipy.transpose(samples)
###### READ MARKERS FROM FILE ######
self.mrk_info['position'] += self.stdblocksize
mk_posi = 0
mk_desc = ""
while True:
mk = self.mrk_info['line'].split(',')
if len(mk) < 2 or mk[1] == "":
try:
self.mrk_info['line'] = self.mrk_handle.next()
except:
self.mrk_handle.close()
#self._log("WARNING: EOF[%s]\n" % os.path.realpath(self.mrk_handle.name))
break
continue
mk_desc = mk[1]
mk_posi = int(mk[2])
if mk_posi > self.mrk_info['position']:
break
# special treatment for 'malformed' markerfiles
mk_rel_position = (mk_posi-1) % self.stdblocksize
if markers[mk_rel_position] != -1 :
# store marker for next point
mk[2] = str(mk_posi+1)
self.mrk_info['line'] = ",".join(["%s" % (m) for m in mk])
#self._log(str("WARNING: shifted position of marker \"%s\" from %d to %d!\n" % (mk_desc, mk_posi, mk_posi+1)))
if mk_rel_position+1 > self.stdblocksize-1:
return True, samples, markers
else:
continue
else :
markers[mk_rel_position] = self.markerids[mk_desc]
self.mrk_info['line'] = ""
# try to read next line from markerfile
try:
self.mrk_info['line'] = self.mrk_handle.next()
except:
self.mrk_handle.close()
break
return True, samples, markers
# string representation with interesting information
def __str__(self):
return ("EEGReader Object (%d@%s)\n" + \
"\tEEG File:\t %s\n" + \
"\tMRK File:\t %s\n" + \
"\tFile Format:\t %s\n" + \
"\tBlocksize:\t %d\n" + \
"\tnChannels:\t %d\n") % (os.getpid(), os.uname()[1], os.path.realpath(self.eeg_handle.name),
os.path.realpath(self.mrk_handle.name), self.eeg_dtype,
self.stdblocksize, self.nChannels)
# Register callback function
def regcallback(self, func):
self.callbacks.append(func)
# Reads data from .eeg/.dat file until EOF
def read(self, nblocks=1, verbose=False):
self.stop = False
readblocks = 0
while (readblocks < nblocks or nblocks == -1):
ret, self.ndsamples, self.ndmarkers = self.bp_read()
if ret:
for f in self.callbacks:
f(self.ndsamples, self.ndmarkers)
else:
break
readblocks += 1
return readblocks
| 42.111579
| 137
| 0.580613
| 58,352
| 0.972387
| 0
| 0
| 3,077
| 0.051276
| 0
| 0
| 16,529
| 0.275442
|
e17db1fd4e96affffe66942426ac284e73e8b345
| 10,463
|
py
|
Python
|
tests/base/test_endpoints_authentication.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 8
|
2018-07-04T09:54:46.000Z
|
2022-03-17T08:21:06.000Z
|
tests/base/test_endpoints_authentication.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 19
|
2018-04-18T07:24:55.000Z
|
2022-03-04T01:03:15.000Z
|
tests/base/test_endpoints_authentication.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 7
|
2018-07-03T12:17:50.000Z
|
2021-05-05T04:33:32.000Z
|
from restapi.connectors import Connector
from restapi.env import Env
from restapi.services.authentication import BaseAuthentication, Role
from restapi.tests import API_URI, BaseTests, FlaskClient
from restapi.utilities.logs import log
class TestApp(BaseTests):
def test_no_auth(self, client: FlaskClient) -> None:
r = client.get(f"{API_URI}/tests/noauth")
assert r.status_code == 200
assert self.get_content(r) == "OK"
if Env.get_bool("AUTH_ENABLE"):
headers, _ = self.do_login(client, None, None)
# Tokens are ignored
r = client.get(f"{API_URI}/tests/noauth", headers=headers)
assert r.status_code == 200
assert self.get_content(r) == "OK"
# Tokens are ignored even if invalid
r = client.get(
f"{API_URI}/tests/noauth", headers={"Authorization": "Bearer invalid"}
)
assert r.status_code == 200
assert self.get_content(r) == "OK"
def test_auth(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
r = client.get(f"{API_URI}/tests/authentication")
assert r.status_code == 401
r = client.get(
f"{API_URI}/tests/authentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
headers, token = self.do_login(client, None, None)
r = client.get(f"{API_URI}/tests/authentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
if not Env.get_bool("ALLOW_ACCESS_TOKEN_PARAMETER"):
# access token parameter is not allowed by default
r = client.get(
f"{API_URI}/tests/authentication", query_string={"access_token": token}
)
assert r.status_code == 401
def test_optional_auth(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
# Optional authentication can accept missing tokens
r = client.get(f"{API_URI}/tests/optionalauthentication")
assert r.status_code == 204
headers, token = self.do_login(client, None, None)
# Or valid tokens
r = client.get(f"{API_URI}/tests/optionalauthentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
# But not invalid tokens, i.e. if presented the tokens is always validated
r = client.get(
f"{API_URI}/tests/authentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
if not Env.get_bool("ALLOW_ACCESS_TOKEN_PARAMETER"):
# access token parameter is not allowed by default
r = client.get(
f"{API_URI}/tests/optionalauthentication",
query_string={"access_token": token},
)
# query token is ignored but the endpoint accepts missing tokens
assert r.status_code == 204
r = client.get(
f"{API_URI}/tests/optionalauthentication",
query_string={"access_token": "invalid"},
)
# invalid tokens should be rejected, but query token is ignored
assert r.status_code == 204
def test_access_token_parameter(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
r = client.get(f"{API_URI}/tests/queryauthentication")
assert r.status_code == 401
r = client.get(
f"{API_URI}/tests/queryauthentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
headers, token = self.do_login(client, None, None)
r = client.get(f"{API_URI}/tests/queryauthentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/queryauthentication", query_string={"access_token": token}
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/queryauthentication",
query_string={"access_token": "invalid"},
)
assert r.status_code == 401
def test_optional_access_token_parameter(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
# Optional authentication can accept missing tokens
r = client.get(f"{API_URI}/tests/optionalqueryauthentication")
assert r.status_code == 204
headers, token = self.do_login(client, None, None)
# Or valid tokens
r = client.get(f"{API_URI}/tests/optionalqueryauthentication", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
# But not invalid tokens, i.e. if presented the tokens is always validated
r = client.get(
f"{API_URI}/tests/optionalqueryauthentication",
headers={"Authorization": "Bearer invalid"},
)
assert r.status_code == 401
r = client.get(
f"{API_URI}/tests/optionalqueryauthentication",
query_string={"access_token": token},
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/optionalqueryauthentication",
query_string={"access_token": "invalid"},
)
# invalid tokens should be rejected, but query token is ignored
assert r.status_code == 401
def test_authentication_with_multiple_roles(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
r = client.get(f"{API_URI}/tests/manyrolesauthentication")
assert r.status_code == 401
r = client.get(f"{API_URI}/tests/unknownroleauthentication")
assert r.status_code == 401
admin_headers, _ = self.do_login(client, None, None)
r = client.get(
f"{API_URI}/tests/manyrolesauthentication", headers=admin_headers
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == BaseAuthentication.default_user
r = client.get(
f"{API_URI}/tests/unknownroleauthentication", headers=admin_headers
)
assert r.status_code == 401
if Env.get_bool("MAIN_LOGIN_ENABLE"):
uuid, data = self.create_user(client, roles=[Role.USER])
user_header, _ = self.do_login(
client, data.get("email"), data.get("password")
)
r = client.get(
f"{API_URI}/tests/manyrolesauthentication", headers=user_header
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == data.get("email")
r = client.get(
f"{API_URI}/tests/unknownroleauthentication", headers=user_header
)
assert r.status_code == 401
self.delete_user(client, uuid)
def test_authentication_with_auth_callback(self, client: FlaskClient) -> None:
if not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping authentication tests")
return
auth = Connector.get_authentication_instance()
user = auth.get_user(username=BaseAuthentication.default_user)
assert user is not None
VALID = f"/tests/preloadcallback/{user.uuid}"
INVALID = "/tests/preloadcallback/12345678-90ab-cdef-1234-567890abcdef"
admin_headers, _ = self.do_login(client, None, None)
# Verify both endpoint ...
r = client.get(
f"{API_URI}{VALID}", query_string={"test": True}, headers=admin_headers
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
assert len(content) == 1
assert "email" in content
assert content["email"] == user.email
r = client.get(
f"{API_URI}{INVALID}", query_string={"test": True}, headers=admin_headers
)
assert r.status_code == 401
# and get_schema!
r = client.get(
f"{API_URI}{VALID}",
query_string={"get_schema": True},
headers=admin_headers,
)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, list)
assert len(content) == 1
assert content[0]["key"] == "test"
assert content[0]["type"] == "boolean"
r = client.get(
f"{API_URI}{INVALID}",
query_string={"get_schema": True},
headers=admin_headers,
)
assert r.status_code == 401
| 35.228956
| 88
| 0.60346
| 10,225
| 0.977253
| 0
| 0
| 0
| 0
| 0
| 0
| 2,793
| 0.266941
|
e17f627e7014eaf9f501344de8ac94066bc5da4f
| 70,860
|
py
|
Python
|
katsdpscripts/rts_session.py
|
ska-sa/katsdpscripts
|
f9eaa867aad8b94c715f7286953124df00b5781c
|
[
"BSD-3-Clause"
] | null | null | null |
katsdpscripts/rts_session.py
|
ska-sa/katsdpscripts
|
f9eaa867aad8b94c715f7286953124df00b5781c
|
[
"BSD-3-Clause"
] | 21
|
2019-09-16T15:26:53.000Z
|
2022-01-11T09:14:39.000Z
|
katsdpscripts/rts_session.py
|
ska-sa/katsdpscripts
|
f9eaa867aad8b94c715f7286953124df00b5781c
|
[
"BSD-3-Clause"
] | 1
|
2019-11-11T11:47:54.000Z
|
2019-11-11T11:47:54.000Z
|
###############################################################################
# SKA South Africa (http://ska.ac.za/) #
# Author: cam@ska.ac.za #
# Copyright @ 2013 SKA SA. All rights reserved. #
# #
# THIS SOFTWARE MAY NOT BE COPIED OR DISTRIBUTED IN ANY FORM WITHOUT THE #
# WRITTEN PERMISSION OF SKA SA. #
###############################################################################
"""CaptureSession encompassing data capturing and standard observations with RTS.
This defines the :class:`CaptureSession` class, which encompasses the capturing
of data and the performance of standard scans with the RTS system. It also
provides a fake :class:`TimeSession` class, which goes through the motions in
order to time them, but without performing any real actions.
"""
import time
import logging
import sys
import os.path
import numpy as np
import katpoint
# This is used to document available spherical projections (and set them in case of TimeSession)
from katcorelib.targets import Offset
from .array import Array
from .katcp_client import KATClient
from .defaults import user_logger, activity_logger
from katmisc.utils.utils import dynamic_doc
# Obtain list of spherical projections and the default projection from antenna proxy
projections, default_proj = Offset.PROJECTIONS.keys(), Offset.DEFAULT_PROJECTION
# Move default projection to front of list
projections.remove(default_proj)
projections.insert(0, default_proj)
def ant_array(kat, ants, name='ants'):
"""Create sub-array of antennas from flexible specification.
Parameters
----------
kat : :class:`utility.KATCoreConn` object
KAT connection object
ants : :class:`Array` or :class:`KATClient` object, or list, or string
Antennas specified by an Array object containing antenna devices, or
a single antenna device or a list of antenna devices, or a string of
comma-separated antenna names, or the string 'all' for all antennas
controlled via the KAT connection associated with this session
Returns
-------
array : :class:`Array` object
Array object containing selected antenna devices
Raises
------
ValueError
If antenna with a specified name is not found on KAT connection object
"""
if isinstance(ants, Array):
return ants
elif isinstance(ants, KATClient):
return Array(name, [ants])
elif isinstance(ants, basestring):
if ants.strip() == 'all':
return kat.ants
else:
try:
return Array(name, [getattr(kat, ant.strip()) for ant in ants.split(',')])
except AttributeError:
raise ValueError("Antenna '%s' not found (i.e. no kat.%s exists)" % (ant, ant))
else:
# The default assumes that *ants* is a list of antenna devices
return Array(name, ants)
def report_compact_traceback(tb):
"""Produce a compact traceback report."""
print '--------------------------------------------------------'
print 'Session interrupted while doing (most recent call last):'
print '--------------------------------------------------------'
while tb:
f = tb.tb_frame
print '%s %s(), line %d' % (f.f_code.co_filename, f.f_code.co_name, f.f_lineno)
tb = tb.tb_next
print '--------------------------------------------------------'
class ScriptLogHandler(logging.Handler):
"""Logging handler that writes logging records to HDF5 file via ingest.
Parameters
----------
data : :class:`KATClient` object
Data proxy device for the session
"""
def __init__(self, data):
logging.Handler.__init__(self)
self.data = data
def emit(self, record):
"""Emit a logging record."""
try:
msg = self.format(record)
# XXX This probably has to go to cam2spead as a req/sensor combo [YES]
# self.data.req.k7w_script_log(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class ObsParams(dict):
"""Dictionary-ish that writes observation parameters to CAM SPEAD stream.
Parameters
----------
data : :class:`KATClient` object
Data proxy device for the session
product : string
Name of data product
"""
def __init__(self, data, product):
dict.__init__(self)
self.data = data
self.product = product
def __setitem__(self, key, value):
# XXX Changing data product name -> ID in a hard-coded fashion
self.data.req.set_obs_param(self.product, key, repr(value))
dict.__setitem__(self, key, value)
class RequestSensorError(Exception):
"""Critical request failed or critical sensor could not be read."""
pass
class CaptureSessionBase(object):
def get_ant_names(self):
return ','.join(co for co in self.kat.controlled_objects
if co in self.kat.katconfig.arrays['ants'])
class CaptureSession(CaptureSessionBase):
"""Context manager that encapsulates a single data capturing session.
A data capturing *session* results in a single data file, potentially
containing multiple scans and compound scans. An *experiment* may consist of
multiple sessions. This object ensures that the capturing process is
started and completed cleanly, even if exceptions occur during the session.
It also provides canned routines for simple observations such as tracks,
single scans and raster scans on a specific source.
The initialisation of the session object does basic preparation of the data
capturing subsystem (ingest) and logging. It tries to do the minimum to
enable data capturing. The experimental setup is usually completed by
calling :meth:`standard_setup` on the instantiated session object.
The actual data capturing only starts once :meth:`capture_start` is called.
Parameters
----------
kat : :class:`utility.KATCoreConn` object
KAT connection object associated with this experiment
product : string, optional
Data product (unchanged by default)
dump_rate : float, optional
Correlator dump rate, in Hz (will be set by default)
kwargs : dict, optional
Ignore any other keyword arguments (simplifies passing options as dict)
Raises
------
ValueError
If data proxy is not connected
RequestSensorError
If ingest system did not initialise or data product could not be selected
"""
def __init__(self, kat, product=None, dump_rate=1.0, **kwargs):
try:
self.kat = kat
# Hard-code the RTS data proxy for now
data, katsys = kat.data_rts, kat.sys
if not data.is_connected():
raise ValueError("Data proxy '%s' is not connected "
"(is the KAT system running?)" % (data.name,))
self.data = self.dbe = data
# Default settings for session parameters (in case standard_setup is not called)
self.ants = None
self.experiment_id = 'interactive'
self.stow_when_done = False
self.nd_params = {'diode': 'default', 'on': 0., 'off': 0., 'period': -1.}
self.last_nd_firing = 0.
self.output_file = ''
self.horizon = 3.0
# Requested dump period, replaced by actual value after capture started
self.dump_period = self._requested_dump_period = 1.0 / dump_rate
# # XXX last dump timestamp?
# self._end_of_previous_session = data.sensor.k7w_last_dump_timestamp.get_value()
# XXX Hard-code product name for now
self.product = 'c856M32k' if product is None else product
data.req.product_configure(self.product, dump_rate, timeout=330)
# Enable logging to the new HDF5 file via the usual logger (using same formatting and filtering)
self._script_log_handler = ScriptLogHandler(data)
if len(user_logger.handlers) > 0:
self._script_log_handler.setLevel(user_logger.handlers[0].level)
self._script_log_handler.setFormatter(user_logger.handlers[0].formatter)
user_logger.addHandler(self._script_log_handler)
user_logger.info('==========================')
user_logger.info('New data capturing session')
user_logger.info('--------------------------')
user_logger.info('Data proxy used = %s' % (data.name,))
user_logger.info('Data product = %s' % (self.product,))
# XXX file name? SB ID? Program block ID? -> [file via capture_done]
# # Obtain the name of the file currently being written to
# reply = data.req.k7w_get_current_file()
# outfile = reply[1] if reply.succeeded else '<unknown file>'
outfile = '<unknown file>'
user_logger.info('Opened output file = %s' % (outfile,))
user_logger.info('')
activity_logger.info("----- Script starting %s (%s). Output file %s" % (sys.argv[0], ' '.join(sys.argv[1:]), outfile))
# Log details of the script to the back-end
self.obs_params = ObsParams(data, self.product)
katsys.req.set_script_param('script-starttime', time.time())
katsys.req.set_script_param('script-endtime', '')
katsys.req.set_script_param('script-name', sys.argv[0])
katsys.req.set_script_param('script-arguments', ' '.join(sys.argv[1:]))
katsys.req.set_script_param('script-status', 'busy')
except Exception, e:
msg = 'CaptureSession failed to initialise (%s)' % (e,)
user_logger.error(msg)
activity_logger.info(msg)
if hasattr(self, '_script_log_handler'):
user_logger.removeHandler(self._script_log_handler)
raise
def __enter__(self):
"""Enter the data capturing session."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the data capturing session, closing the data file."""
if exc_value is not None:
exc_msg = str(exc_value)
msg = "Session interrupted by exception (%s%s)" % \
(exc_value.__class__.__name__,
(": '%s'" % (exc_msg,)) if exc_msg else '')
if exc_type is KeyboardInterrupt:
user_logger.warning(msg)
activity_logger.warning(msg)
else:
user_logger.error(msg, exc_info=True)
activity_logger.error(msg, exc_info=True)
self.end(interrupted=True)
else:
self.end(interrupted=False)
# Suppress KeyboardInterrupt so as not to scare the lay user,
# but allow other exceptions that occurred in the body of with-statement
if exc_type is KeyboardInterrupt:
report_compact_traceback(traceback)
return True
else:
return False
def get_centre_freq(self):
"""Get RF (sky) frequency associated with middle CBF channel.
Returns
-------
centre_freq : float
Actual centre frequency in MHz (or NaN if something went wrong)
"""
# XXX Something like this? [YES]
# return self.data.sensor.cbf_${product}_centerfrequency.get_value()
return 1284.0
def set_centre_freq(self, centre_freq):
"""Set RF (sky) frequency associated with middle CBF channel.
Parameters
----------
centre_freq : float
Desired centre frequency in MHz
"""
# XXX This will be a data product change instead...
pass
def standard_setup(self, observer, description, experiment_id=None,
nd_params=None, stow_when_done=None, horizon=None, **kwargs):
"""Perform basic experimental setup including antennas, LO and dump rate.
This performs the basic high-level setup that most experiments require.
It should usually be called as the first step in a new session
(unless the experiment has special requirements, such as holography).
The user selects a subarray of antennas that will take part in the
experiment, identifies him/herself and describes the experiment.
Optionally, the user may also set the RF centre frequency, dump rate
and noise diode firing strategy, amongst others. All optional settings
are left unchanged if unspecified, except for the dump rate, which has
to be set (due to the fact that there is currently no way to determine
the dump rate...).
The antenna specification *ants* do not have a default, which forces the
user to specify them explicitly. This is for safety reasons, to remind
the user of which antennas will be moved around by the script. The
*observer* and *description* similarly have no default, to force the
user to document the observation to some extent.
Parameters
----------
ants : :class:`Array` or :class:`KATClient` object, or list, or string
Antennas that will participate in the capturing session, as an Array
object containing antenna devices, or a single antenna device or a
list of antenna devices, or a string of comma-separated antenna
names, or the string 'all' for all antennas controlled via the
KAT connection associated with this session
observer : string
Name of person doing the observation
description : string
Short description of the purpose of the capturing session
experiment_id : string, optional
Experiment ID, a unique string used to link the data files of an
experiment together with blog entries, etc. (unchanged by default)
nd_params : dict, optional
Dictionary containing parameters that control firing of the noise
diode during canned commands. These parameters are in the form of
keyword-value pairs, and matches the parameters of the
:meth:`fire_noise_diode` method. This is unchanged by default
(typically disabling automatic firing).
stow_when_done : {False, True}, optional
If True, stow the antennas when the capture session completes
(unchanged by default)
horizon : float, optional
Elevation limit serving as horizon for session, in degrees
kwargs : dict, optional
Ignore any other keyword arguments (simplifies passing options as dict)
Raises
------
ValueError
If antenna with a specified name is not found on KAT connection object
RequestSensorError
If Data centre frequency could not be set
"""
# Create references to allow easy copy-and-pasting from this function
session, kat, data, katsys = self, self.kat, self.data, self.kat.sys
session.ants = ants = ant_array(kat, self.get_ant_names())
ant_names = [ant.name for ant in ants]
# Override provided session parameters (or initialize them from existing parameters if not provided)
session.experiment_id = experiment_id = session.experiment_id if experiment_id is None else experiment_id
session.nd_params = nd_params = session.nd_params if nd_params is None else nd_params
session.stow_when_done = stow_when_done = session.stow_when_done if stow_when_done is None else stow_when_done
session.horizon = session.horizon if horizon is None else horizon
# Prep capturing system
data.req.capture_init(self.product)
# Setup strategies for the sensors we might be wait()ing on
ants.req.sensor_sampling('lock', 'event')
ants.req.sensor_sampling('scan.status', 'event')
ants.req.sensor_sampling('mode', 'event')
# XXX can we still get these sensors somewhere?
# data.req.sensor_sampling('k7w.spead_dump_period', 'event')
# data.req.sensor_sampling('k7w.last_dump_timestamp', 'event')
centre_freq = self.get_centre_freq()
# Check this...
# # The data proxy needs to know the dump period (in s) as well as the RF centre frequency
# # of 400-MHz downconverted band (in Hz), which is used for fringe stopping / delay tracking
# data.req.capture_setup(1. / dump_rate, session.get_centre_freq(200.0) * 1e6)
user_logger.info('Antennas used = %s' % (' '.join(ant_names),))
user_logger.info('Observer = %s' % (observer,))
user_logger.info("Description ='%s'" % (description,))
user_logger.info('Experiment ID = %s' % (experiment_id,))
user_logger.info('Data product = %s' % (self.product,))
user_logger.info("RF centre frequency = %g MHz, dump rate = %g Hz" % (centre_freq, 1.0 / self.dump_period))
if nd_params['period'] > 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s, every %g s if possible" % \
(nd_params['diode'], nd_params['on'], nd_params['off'], nd_params['period'])
elif nd_params['period'] == 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s at every opportunity" % \
(nd_params['diode'], nd_params['on'], nd_params['off'])
else:
nd_info = "Noise diode will not fire automatically"
user_logger.info(nd_info + " while performing canned commands")
# Send script options to SPEAD stream
self.obs_params['observer'] = observer
self.obs_params['description'] = description
self.obs_params['experiment_id'] = experiment_id
self.obs_params['nd_params'] = nd_params
self.obs_params['stow_when_done'] = stow_when_done
self.obs_params['horizon'] = session.horizon
self.obs_params['centre_freq'] = centre_freq
self.obs_params['product'] = self.product
self.obs_params.update(kwargs)
# Send script options to CAM system
katsys.req.set_script_param('script-ants', ','.join(ant_names))
katsys.req.set_script_param('script-observer', observer)
katsys.req.set_script_param('script-description', description)
katsys.req.set_script_param('script-experiment-id', experiment_id)
katsys.req.set_script_param('script-rf-params',
'Centre freq=%g MHz, Dump rate=%g Hz' % (centre_freq, 1.0 / self.dump_period))
katsys.req.set_script_param('script-nd-params', 'Diode=%s, On=%g s, Off=%g s, Period=%g s' %
(nd_params['diode'], nd_params['on'], nd_params['off'], nd_params['period']))
# If the CBF is simulated, it will have position update commands
if hasattr(data.req, 'cbf_pointing_az') and hasattr(data.req, 'cbf_pointing_el'):
def listener_actual_azim(update_seconds, value_seconds, status, value):
#Listener callback now includes status, use it here
if status == 'nominal':
data.req.cbf_pointing_az(value)
def listener_actual_elev(update_seconds, value_seconds, status, value):
#Listener callback now includes status, use it here
if status == 'nominal':
data.req.cbf_pointing_el(value)
first_ant = ants[0]
# The minimum time between position updates is fraction of dump period to ensure fresh data at every dump
update_period_seconds = 0.4 * self.dump_period
# Tell the position sensors to report their values periodically at this rate
first_ant.sensor.pos_actual_scan_azim.set_strategy('period', str(float(update_period_seconds)))
first_ant.sensor.pos_actual_scan_elev.set_strategy('period', str(float(update_period_seconds)))
# Tell the Data simulator where the first antenna is so that it can generate target flux at the right time
first_ant.sensor.pos_actual_scan_azim.register_listener(listener_actual_azim, update_period_seconds)
first_ant.sensor.pos_actual_scan_elev.register_listener(listener_actual_elev, update_period_seconds)
user_logger.info("CBF simulator receives position updates from antenna '%s'" % (first_ant.name,))
user_logger.info("--------------------------")
def capture_start(self):
"""Start capturing data to HDF5 file."""
# This starts the data product stream
self.data.req.capture_start(self.product)
def label(self, label):
"""Add timestamped label to HDF5 file.
The label is typically a single word used to indicate the start of a
new compound scan.
"""
if label:
# XXX Changing data product name -> ID in a hard-coded fashion
self.data.req.set_obs_label(self.product, label)
user_logger.info("New compound scan: '%s'" % (label,))
def on_target(self, target):
"""Determine whether antennas are tracking a given target.
If all connected antennas in the sub-array participating in the session
have the given *target* as target and are locked in mode 'POINT', we
conclude that the array is on target.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to check, as an object or description string
Returns
-------
on_target : {True, False}
True if antennas are tracking the given target
"""
if self.ants is None:
return False
# Turn target object into description string (or use string as is)
target = getattr(target, 'description', target)
for ant in self.ants:
# Ignore disconnected antennas or ones with missing sensors
if not ant.is_connected() or any([s not in ant.sensor for s in ('target', 'mode', 'lock')]):
continue
if (ant.sensor.target.get_value() != target) or (ant.sensor.mode.get_value() != 'POINT') or \
(ant.sensor.lock.get_value() != '1'):
return False
return True
def target_visible(self, target, duration=0., timeout=300.):
"""Check whether target is visible for given duration.
This checks whether the *target* is currently above the session horizon
and also above the horizon for the next *duration* seconds, taking into
account the *timeout* on slewing to the target. If the target is not
visible, an appropriate message is logged. The target location is not
very accurate, as it does not include refraction, and this is therefore
intended as a rough check only.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to check, as an object or description string
duration : float, optional
Duration of observation of target, in seconds
timeout : float, optional
Timeout involved when antenna cannot reach the target
Returns
-------
visible : {True, False}
True if target is visible from all antennas for entire duration
"""
if self.ants is None:
return False
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
horizon = katpoint.deg2rad(self.horizon)
# Include an average time to slew to the target (worst case about 90 seconds, so half that)
now = time.time() + 45.
average_el, visible_before, visible_after = [], [], []
# Ignore disconnected antennas or ones with missing sensors
ant_descriptions = [ant.sensor.observer.get_value() for ant in self.ants
if ant.is_connected() and 'observer' in ant.sensor]
# Also ignore antennas with empty or missing observer strings
antennas = [katpoint.Antenna(descr) for descr in ant_descriptions if descr]
if not antennas:
user_logger.warning("No usable antennas found - target '%s' assumed to be down" % (target.name,))
return False
for antenna in antennas:
az, el = target.azel(now, antenna)
average_el.append(katpoint.rad2deg(el))
# If not up yet, see if the target will pop out before the timeout
if el < horizon:
now += timeout
az, el = target.azel(now, antenna)
visible_before.append(el >= horizon)
# Check what happens at end of observation
az, el = target.azel(now + duration, antenna)
visible_after.append(el >= horizon)
if all(visible_before) and all(visible_after):
return True
always_invisible = any(~np.array(visible_before) & ~np.array(visible_after))
if always_invisible:
user_logger.warning("Target '%s' is never up during requested period (average elevation is %g degrees)" %
(target.name, np.mean(average_el)))
else:
user_logger.warning("Target '%s' will rise or set during requested period" % (target.name,))
return False
def fire_noise_diode(self, diode='coupler', on=10.0, off=10.0, period=0.0, align=True, announce=True):
"""Switch noise diode on and off.
This switches the selected noise diode on and off for all the antennas
doing the observation.
The on and off durations can be specified. Additionally, setting the
*period* allows the noise diode to be fired on a semi-regular basis. The
diode will only be fired if more than *period* seconds have elapsed since
the last firing. If *period* is 0, the diode is fired unconditionally.
On the other hand, if *period* is negative it is not fired at all.
Parameters
----------
diode : {'coupler', 'pin'}
Noise diode source to use (pin diode is situated in feed horn and
produces high-level signal, while coupler diode couples into
electronics after the feed at a much lower level)
on : float, optional
Minimum duration for which diode is switched on, in seconds
off : float, optional
Minimum duration for which diode is switched off, in seconds
period : float, optional
Minimum time between noise diode firings, in seconds. (The maximum
time is determined by the duration of individual slews and scans,
which are considered atomic and won't be interrupted.) If 0, fire
diode unconditionally. If negative, don't fire diode at all.
align : {True, False}, optional
True if noise diode transitions should be aligned with correlator
dump boundaries, or False if they should happen as soon as possible
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
fired : {True, False}
True if noise diode fired
Notes
-----
When the function returns, data will still be recorded to the HDF5 file.
The specified *off* duration is therefore a minimum value. Remember to
run :meth:`end` to close the file and finally stop the observation
(automatically done when this object is used in a with-statement)!
"""
# XXX This needs a rethink...
return False
#
# if self.ants is None:
# raise ValueError('No antennas specified for session - please run session.standard_setup first')
# # Create references to allow easy copy-and-pasting from this function
# session, kat, ants, data, dump_period = self, self.kat, self.ants, self.data, self.dump_period
#
# # Wait for the dump period to become known, as it is needed to set a good timeout for the first dump
# if dump_period == 0.0:
# if not data.wait('k7w_spead_dump_period', lambda sensor: sensor.value > 0, timeout=1.5 * session._requested_dump_period, poll_period=0.2 * session._requested_dump_period):
# dump_period = session.dump_period = session._requested_dump_period
# user_logger.warning('SPEAD metadata header is overdue at ingest - noise diode will be out of sync')
# else:
# # Get actual dump period in seconds (as opposed to the requested period)
# dump_period = session.dump_period = data.sensor.k7w_spead_dump_period.get_value()
# # This can still go wrong if the sensor times out - again fall back to requested period
# if dump_period is None:
# dump_period = session.dump_period = session._requested_dump_period
# user_logger.warning('Could not read actual dump period - noise diode will be out of sync')
# # Wait for the first correlator dump to appear, both as a check that capturing works and to align noise diode
# last_dump = data.sensor.k7w_last_dump_timestamp.get_value()
# if last_dump == session._end_of_previous_session or last_dump is None:
# user_logger.info('waiting for correlator dump to arrive')
# # Wait for the first correlator dump to appear
# if not data.wait('k7w_last_dump_timestamp', lambda sensor: sensor.value > session._end_of_previous_session,
# timeout=2.2 * dump_period, poll_period=0.2 * dump_period):
# last_dump = time.time()
# user_logger.warning('Correlator dump is overdue at k7_capture - noise diode will be out of sync')
# else:
# last_dump = data.sensor.k7w_last_dump_timestamp.get_value()
# if last_dump is None:
# last_dump = time.time()
# user_logger.warning('Could not read last dump timestamp - noise diode will be out of sync')
# else:
# user_logger.info('correlator dump arrived')
#
# # If period is non-negative, quit if it is not yet time to fire the noise diode
# if period < 0.0 or (time.time() - session.last_nd_firing) < period:
# return False
#
# if align:
# # Round "on" duration up to the nearest integer multiple of dump period
# on = np.ceil(float(on) / dump_period) * dump_period
# # The last fully complete dump is more than 1 dump period in the past
# next_dump = last_dump + 2 * dump_period
# # The delay in setting up noise diode firing - next dump should be at least this far in future
# lead_time = 0.25
# # Find next suitable dump boundary
# now = time.time()
# while next_dump < now + lead_time:
# next_dump += dump_period
#
# if announce:
# user_logger.info("Firing '%s' noise diode (%g seconds on, %g seconds off)" % (diode, on, off))
# else:
# user_logger.info('firing noise diode')
#
# if align:
# # Schedule noise diode switch-on on all antennas at the next suitable dump boundary
# ants.req.rfe3_rfe15_noise_source_on(diode, 1, 1000 * next_dump, 0)
# # If using Data simulator, fire the simulated noise diode for desired period to toggle power levels in output
# if hasattr(data.req, 'data_fire_nd') and dump_period > 0:
# time.sleep(max(next_dump - time.time(), 0))
# data.req.data_fire_nd(np.ceil(float(on) / dump_period))
# # Wait until the noise diode is on
# time.sleep(max(next_dump + 0.5 * on - time.time(), 0))
# # Schedule noise diode switch-off on all antennas a duration of "on" seconds later
# ants.req.rfe3_rfe15_noise_source_on(diode, 0, 1000 * (next_dump + on), 0)
# time.sleep(max(next_dump + on + off - time.time(), 0))
# # Mark on -> off transition as last firing
# session.last_nd_firing = next_dump + on
# else:
# # Switch noise diode on on all antennas
# ants.req.rfe3_rfe15_noise_source_on(diode, 1, 'now', 0)
# # If using Data simulator, fire the simulated noise diode for desired period to toggle power levels in output
# if hasattr(data.req, 'data_fire_nd'):
# data.req.data_fire_nd(np.ceil(float(on) / dump_period))
# time.sleep(on)
# # Mark on -> off transition as last firing
# session.last_nd_firing = time.time()
# # Switch noise diode off on all antennas
# ants.req.rfe3_rfe15_noise_source_on(diode, 0, 'now', 0)
# time.sleep(off)
#
# user_logger.info('noise diode fired')
# return True
def set_target(self, target):
"""Set target to use for tracking or scanning.
This sets the target on all antennas involved in the session, as well as
on the CBF (where it serves as delay-tracking centre). It also moves the
test target in the Data simulator to match the requested target (if it is
a stationary 'azel' type).
Parameters
----------
target : :class:`katpoint.Target` object or string
Target as an object or description string
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
ants, data = self.ants, self.data
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
# Set the antenna target (antennas will already move there if in mode 'POINT')
ants.req.target(target)
# Provide target to the data proxy, which will use it as delay-tracking center
# XXX No fringe stopping support in data_rts yet
# data.req.target(target)
# If using Data simulator and target is azel type, move test target here (allows changes in correlation power)
if hasattr(data.req, 'cbf_test_target') and target.body_type == 'azel':
azel = katpoint.rad2deg(np.array(target.azel()))
data.req.cbf_test_target(azel[0], azel[1], 100.)
def track(self, target, duration=20.0, announce=True):
"""Track a target.
This tracks the specified target with all antennas involved in the
session.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to track, as an object or description string
duration : float, optional
Minimum duration of track, in seconds
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
success : {True, False}
True if track was successfully completed
Notes
-----
When the function returns, the antennas will still track the target and
data will still be recorded to the HDF5 file. The specified *duration*
is therefore a minimum value. Remember to run :meth:`end` to close the
file and finally stop the observation (automatically done when this
object is used in a with-statement)!
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
session, ants = self, self.ants
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second track on target '%s'" % (duration, target.name))
if not session.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
session.set_target(target)
session.fire_noise_diode(announce=False, **session.nd_params)
# Avoid slewing if we are already on target
if not session.on_target(target):
user_logger.info('slewing to target')
# Start moving each antenna to the target
ants.req.mode('POINT')
# Wait until they are all in position (with 5 minute timeout)
ants.wait('lock', True, 300)
user_logger.info('target reached')
session.fire_noise_diode(announce=False, **session.nd_params)
user_logger.info('tracking target')
# Do nothing else for the duration of the track
time.sleep(duration)
user_logger.info('target tracked for %g seconds' % (duration,))
session.fire_noise_diode(announce=False, **session.nd_params)
return True
@dynamic_doc("', '".join(projections), default_proj)
def scan(self, target, duration=30.0, start=(-3.0, 0.0), end=(3.0, 0.0),
index=-1, projection=default_proj, announce=True):
"""Scan across a target.
This scans across a target with all antennas involved in the session.
The scan starts at an offset of *start* degrees from the target and ends
at an offset of *end* degrees. These offsets are calculated in a projected
coordinate system (see *Notes* below). The scan lasts for *duration*
seconds.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to scan across, as an object or description string
duration : float, optional
Minimum duration of scan across target, in seconds
start : sequence of 2 floats, optional
Initial scan position as (x, y) offset in degrees (see *Notes* below)
end : sequence of 2 floats, optional
Final scan position as (x, y) offset in degrees (see *Notes* below)
index : integer, optional
Scan index, used for display purposes when this is part of a raster
projection : {'%s'}, optional
Name of projection in which to perform scan relative to target
(default = '%s')
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
success : {True, False}
True if scan was successfully completed
Notes
-----
Take note that scanning is done in a projection on the celestial sphere,
and the scan start and end are in the projected coordinates. The azimuth
coordinate of a scan in azimuth will therefore change more than the
*start* and *end* parameters suggest, especially at high elevations
(unless the 'plate-carree' projection is used). This ensures that the
same scan parameters will lead to the same qualitative scan for any
position on the celestial sphere.
When the function returns, the antennas will still track the end-point of
the scan and data will still be recorded to the HDF5 file. The specified
*duration* is therefore a minimum value. Remember to run :meth:`end` to
close the file and finally stop the observation (automatically done when
this object is used in a with-statement)!
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
session, ants = self, self.ants
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
scan_name = 'scan' if index < 0 else 'scan %d' % (index,)
if announce:
user_logger.info("Initiating %g-second scan across target '%s'" % (duration, target.name))
if not session.target_visible(target, duration):
user_logger.warning("Skipping scan, as target '%s' will be below horizon" % (target.name,))
return False
session.set_target(target)
session.fire_noise_diode(announce=False, **session.nd_params)
user_logger.info('slewing to start of %s' % (scan_name,))
# Move each antenna to the start position of the scan
ants.req.scan_asym(start[0], start[1], end[0], end[1], duration, projection)
ants.req.mode('POINT')
# Wait until they are all in position (with 5 minute timeout)
ants.wait('lock', True, 300)
user_logger.info('start of %s reached' % (scan_name,))
session.fire_noise_diode(announce=False, **session.nd_params)
user_logger.info('performing %s' % (scan_name,))
# Start scanning the antennas
ants.req.mode('SCAN')
# Wait until they are all finished scanning (with 5 minute timeout)
ants.wait('scan_status', 'after', 300)
user_logger.info('%s complete' % (scan_name,))
session.fire_noise_diode(announce=False, **session.nd_params)
return True
@dynamic_doc("', '".join(projections), default_proj)
def raster_scan(self, target, num_scans=3, scan_duration=30.0, scan_extent=6.0, scan_spacing=0.5,
scan_in_azimuth=True, projection=default_proj, announce=True):
"""Perform raster scan on target.
A *raster scan* is a series of scans across a target performed by all
antennas involved in the session, scanning in either azimuth or
elevation while the other coordinate is changed in steps for each scan.
Each scan is offset by the same amount on both sides of the target along
the scanning coordinate (and therefore has the same extent), and the
scans are arranged symmetrically around the target in the non-scanning
(stepping) direction. If an odd number of scans are done, the middle
scan will therefore pass directly over the target. The default is to
scan in azimuth and step in elevation, leading to a series of horizontal
scans. Each scan is scanned in the opposite direction to the previous
scan to save time. Additionally, the first scan always starts at the top
left of the target, regardless of scan direction.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to scan across, as an object or description string
num_scans : integer, optional
Number of scans across target (an odd number is better, as this will
scan directly over the source during the middle scan)
scan_duration : float, optional
Minimum duration of each scan across target, in seconds
scan_extent : float, optional
Extent (angular length) of scan along scanning coordinate, in degrees
(see *Notes* below)
scan_spacing : float, optional
Separation between each consecutive scan along the coordinate that is
not scanned but stepped, in degrees
scan_in_azimuth : {True, False}
True if azimuth changes during scan while elevation remains fixed;
False if scanning in elevation and stepping in azimuth instead
projection : {'%s'}, optional
Name of projection in which to perform scan relative to target
(default = '%s')
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
success : {True, False}
True if raster scan was successfully completed
Notes
-----
Take note that scanning is done in a projection on the celestial sphere,
and the scan extent and spacing apply to the projected coordinates.
The azimuth coordinate of a scan in azimuth will therefore change more
than the *scan_extent* parameter suggests, especially at high elevations.
This ensures that the same scan parameters will lead to the same
qualitative scan for any position on the celestial sphere.
When the function returns, the antennas will still track the end-point of
the last scan and data will still be recorded to the HDF5 file. The
specified *scan_duration* is therefore a minimum value. Remember to run
:meth:`end` to close the files and finally stop the observation
(automatically done when this object is used in a with-statement)!
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
session = self
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating raster scan (%d %g-second scans extending %g degrees) on target '%s'" %
(num_scans, scan_duration, scan_extent, target.name))
# Calculate average time that noise diode is operated per scan, to add to scan duration in check below
nd_time = session.nd_params['on'] + session.nd_params['off']
nd_time *= scan_duration / max(session.nd_params['period'], scan_duration)
nd_time = nd_time if session.nd_params['period'] >= 0 else 0.
# Check whether the target will be visible for entire duration of raster scan
if not session.target_visible(target, (scan_duration + nd_time) * num_scans):
user_logger.warning("Skipping raster scan, as target '%s' will be below horizon" % (target.name,))
return False
# Create start and end positions of each scan, based on scan parameters
scan_levels = np.arange(-(num_scans // 2), num_scans // 2 + 1)
scanning_coord = (scan_extent / 2.0) * (-1) ** scan_levels
stepping_coord = scan_spacing * scan_levels
# Flip sign of elevation offsets to ensure that the first scan always starts at the top left of target
scan_starts = zip(scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, -scanning_coord)
scan_ends = zip(-scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, scanning_coord)
# Perform multiple scans across the target
for scan_index, (start, end) in enumerate(zip(scan_starts, scan_ends)):
session.scan(target, duration=scan_duration, start=start, end=end,
index=scan_index, projection=projection, announce=False)
return True
def end(self, interrupted=False):
"""End the session, which stops data capturing and closes the data file.
This does not affect the antennas, which continue to perform their
last action (unless explicitly asked to stow).
Parameters
----------
interrupted : {False, True}, optional
True if session got interrupted via an exception
"""
try:
# Create references to allow easy copy-and-pasting from this function
session, ants, data, katsys = self, self.ants, self.data, self.kat.sys
# XXX still relevant? -> via [capture_done]
# # Obtain the name of the file currently being written to
# reply = data.req.k7w_get_current_file()
# outfile = reply[1].replace('writing', 'unaugmented') if reply.succeeded else '<unknown file>'
# user_logger.info('Scans complete, data captured to %s' % (outfile,))
# # The final output file name after augmentation
# session.output_file = os.path.basename(outfile).replace('.unaugmented', '')
# Stop the data flow
data.req.capture_stop(self.product)
# Stop streaming KATCP sensor updates to the capture thread
# data.req.katcp2spead_stop_stream()
user_logger.info('Ended data capturing session with experiment ID %s' % (session.experiment_id,))
katsys.req.set_script_param('script-endtime', time.time())
katsys.req.set_script_param('script-status', 'interrupted' if interrupted else 'completed')
activity_logger.info('Ended data capturing session (%s) with experiment ID %s' %
('interrupted' if interrupted else 'completed', session.experiment_id,))
if session.stow_when_done and self.ants is not None:
user_logger.info('stowing dishes')
activity_logger.info('Stowing dishes')
ants.req.mode('STOW')
user_logger.info('==========================')
finally:
# Disable logging to HDF5 file
user_logger.removeHandler(self._script_log_handler)
# Finally close the HDF5 file and prepare for augmentation after all logging and parameter settings are done
data.req.capture_done(self.product)
activity_logger.info("----- Script ended %s (%s)" % (sys.argv[0], ' '.join(sys.argv[1:])))
class TimeSession(CaptureSessionBase):
"""Fake CaptureSession object used to estimate the duration of an experiment."""
def __init__(self, kat, product=None, dump_rate=1.0, **kwargs):
self.kat = kat
self.data = kat.data_rts
# Default settings for session parameters (in case standard_setup is not called)
self.ants = None
self.experiment_id = 'interactive'
self.stow_when_done = False
self.nd_params = {'diode': 'coupler', 'on': 0., 'off': 0., 'period': -1.}
self.last_nd_firing = 0.
self.output_file = ''
self.dump_period = self._requested_dump_period = 1.0 / dump_rate
self.horizon = 3.0
self.start_time = self._end_of_previous_session = time.time()
self.time = self.start_time
self.projection = ('ARC', 0., 0.)
# Actual antenna elevation limit (as opposed to user-requested session horizon)
self.el_limit = 2.5
# Usurp time module functions that deal with the passage of real time, and connect them to session time instead
self._realtime, self._realsleep = time.time, time.sleep
time.time = lambda: self.time
def simsleep(seconds):
self.time += seconds
time.sleep = simsleep
self._fake_ants = []
# Modify logging so that only stream handlers are active and timestamps are prepended with a tilde
for handler in user_logger.handlers:
if isinstance(handler, logging.StreamHandler):
form = handler.formatter
form.old_datefmt = form.datefmt
form.datefmt = 'DRY-RUN: ' + (form.datefmt if form.datefmt else '%Y-%m-%d %H:%M:%S')
else:
handler.old_level = handler.level
handler.setLevel(100)
user_logger.info('Estimating duration of experiment starting now (nothing real will happen!)')
user_logger.info('==========================')
user_logger.info('New data capturing session')
user_logger.info('--------------------------')
user_logger.info("Data proxy used = %s" % (self.data.name,))
if product is None:
user_logger.info('Data product = unknown to simulator')
else:
user_logger.info('Data product = %s' % (product,))
activity_logger.info("Timing simulation. ----- Script starting %s (%s). Output file None" % (sys.argv[0], ' '.join(sys.argv[1:])))
def __enter__(self):
"""Start time estimate, overriding the time module."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Finish time estimate, restoring the time module."""
self.end()
# Do not suppress any exceptions that occurred in the body of with-statement
return False
def _azel(self, target, timestamp, antenna):
"""Target (az, el) position in degrees (including offsets in degrees)."""
projection_type, x, y = self.projection
az, el = target.plane_to_sphere(katpoint.deg2rad(x), katpoint.deg2rad(y), timestamp, antenna, projection_type)
return katpoint.rad2deg(az), katpoint.rad2deg(el)
def _teleport_to(self, target, mode='POINT'):
"""Move antennas instantaneously onto target (or nearest point on horizon)."""
for m in range(len(self._fake_ants)):
antenna = self._fake_ants[m][0]
az, el = self._azel(target, self.time, antenna)
self._fake_ants[m] = (antenna, mode, az, max(el, self.el_limit))
def _slew_to(self, target, mode='POINT', timeout=300.):
"""Slew antennas to target (or nearest point on horizon), with timeout."""
slew_times = []
for ant, ant_mode, ant_az, ant_el in self._fake_ants:
def estimate_slew(timestamp):
"""Obtain instantaneous target position and estimate time to slew there."""
# Target position right now
az, el = self._azel(target, timestamp, ant)
# If target is below horizon, aim at closest point on horizon
az_dist, el_dist = np.abs(az - ant_az), np.abs(max(el, self.el_limit) - ant_el)
# Ignore azimuth wraps and drive strategies
az_dist = az_dist if az_dist < 180. else 360. - az_dist
# Assume az speed of 2 deg/s, el speed of 1 deg/s and overhead of 1 second
slew_time = max(0.5 * az_dist, 1.0 * el_dist) + 1.0
return az, el, slew_time
# Initial estimate of slew time, based on a stationary target
az1, el1, slew_time = estimate_slew(self.time)
# Crude adjustment for target motion: chase target position for 2 iterations
az2, el2, slew_time = estimate_slew(self.time + slew_time)
az2, el2, slew_time = estimate_slew(self.time + slew_time)
# Ensure slew does not take longer than timeout
slew_time = min(slew_time, timeout)
# If source is below horizon, handle timeout and potential rise in that interval
if el2 < self.el_limit:
# Position after timeout
az_after_timeout, el_after_timeout = self._azel(target, self.time + timeout, ant)
# If source is still down, slew time == timeout, else estimate rise time through linear interpolation
slew_time = (self.el_limit - el1) / (el_after_timeout - el1) * timeout \
if el_after_timeout > self.el_limit else timeout
az2, el2 = self._azel(target, self.time + slew_time, ant)
el2 = max(el2, self.el_limit)
slew_times.append(slew_time)
# print "%s slewing from (%.1f, %.1f) to (%.1f, %.1f) in %.1f seconds" % \
# (ant.name, ant_az, ant_el, az2, el2, slew_time)
# The overall slew time is the max for all antennas - adjust current time to reflect the slew
self.time += (np.max(slew_times) if len(slew_times) > 0 else 0.)
# Blindly assume all antennas are on target (or on horizon) after this interval
self._teleport_to(target, mode)
def get_centre_freq(self):
"""Get RF (sky) frequency associated with middle CBF channel.
Returns
-------
centre_freq : float
Actual centre frequency in MHz
"""
return 1284.0
def set_centre_freq(self, centre_freq):
"""Set RF (sky) frequency associated with middle CBF channel.
Parameters
----------
centre_freq : float
Desired centre frequency in MHz
"""
pass
def standard_setup(self, observer, description, experiment_id=None,
centre_freq=None, nd_params=None,
stow_when_done=None, horizon=None, no_mask=False, **kwargs):
"""Perform basic experimental setup including antennas, LO and dump rate."""
self.ants = ant_array(self.kat, self.get_ant_names())
for ant in self.ants:
try:
self._fake_ants.append((katpoint.Antenna(ant.sensor.observer.get_value()),
ant.sensor.mode.get_value(),
ant.sensor.pos_actual_scan_azim.get_value(),
ant.sensor.pos_actual_scan_elev.get_value()))
except AttributeError:
pass
# Override provided session parameters (or initialize them from existing parameters if not provided)
self.experiment_id = experiment_id = self.experiment_id if experiment_id is None else experiment_id
self.nd_params = nd_params = self.nd_params if nd_params is None else nd_params
self.stow_when_done = stow_when_done = self.stow_when_done if stow_when_done is None else stow_when_done
self.horizon = self.horizon if horizon is None else horizon
user_logger.info('Antennas used = %s' % (' '.join([ant[0].name for ant in self._fake_ants]),))
user_logger.info('Observer = %s' % (observer,))
user_logger.info("Description ='%s'" % (description,))
user_logger.info('Experiment ID = %s' % (experiment_id,))
# There is no way to find out the centre frequency in this fake session... maybe
centre_freq = self.get_centre_freq()
if centre_freq is None:
user_logger.info('RF centre frequency = unknown to simulator, dump rate = %g Hz' % (1.0 / self.dump_period,))
else:
user_logger.info('RF centre frequency = %g MHz, dump rate = %g Hz' % (centre_freq, 1.0 / self.dump_period))
if nd_params['period'] > 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s, every %g s if possible" % \
(nd_params['diode'], nd_params['on'], nd_params['off'], nd_params['period'])
elif nd_params['period'] == 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s at every opportunity" % \
(nd_params['diode'], nd_params['on'], nd_params['off'])
else:
nd_info = "Noise diode will not fire automatically"
user_logger.info(nd_info + " while performing canned commands")
user_logger.info('--------------------------')
def capture_start(self):
"""Starting capture has no timing effect."""
pass
def label(self, label):
"""Adding label has no timing effect."""
if label:
user_logger.info("New compound scan: '%s'" % (label,))
def on_target(self, target):
"""Determine whether antennas are tracking a given target."""
if not self._fake_ants:
return False
for antenna, mode, ant_az, ant_el in self._fake_ants:
az, el = self._azel(target, self.time, antenna)
# Checking for lock and checking for target identity considered the same thing
if (az != ant_az) or (el != ant_el) or (mode != 'POINT'):
return False
return True
def target_visible(self, target, duration=0., timeout=300., operation='scan'):
"""Check whether target is visible for given duration."""
if not self._fake_ants:
return False
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
horizon = katpoint.deg2rad(self.horizon)
# Include an average time to slew to the target (worst case about 90 seconds, so half that)
now = self.time + 45.
average_el, visible_before, visible_after = [], [], []
for antenna, mode, ant_az, ant_el in self._fake_ants:
az, el = target.azel(now, antenna)
average_el.append(katpoint.rad2deg(el))
# If not up yet, see if the target will pop out before the timeout
if el < horizon:
now += timeout
az, el = target.azel(now, antenna)
visible_before.append(el >= horizon)
# Check what happens at end of observation
az, el = target.azel(now + duration, antenna)
visible_after.append(el >= horizon)
if all(visible_before) and all(visible_after):
return True
always_invisible = any(~np.array(visible_before) & ~np.array(visible_after))
if always_invisible:
user_logger.warning("Target '%s' is never up during requested period (average elevation is %g degrees)" %
(target.name, np.mean(average_el)))
else:
user_logger.warning("Target '%s' will rise or set during requested period" % (target.name,))
return False
def fire_noise_diode(self, diode='coupler', on=10.0, off=10.0, period=0.0, align=True, announce=True):
"""Estimate time taken to fire noise diode."""
return False
# XXX needs a rethink
# if not self._fake_ants:
# raise ValueError('No antennas specified for session - please run session.standard_setup first')
# if self.dump_period == 0.0:
# # Wait for the first correlator dump to appear
# user_logger.info('waiting for correlator dump to arrive')
# self.dump_period = self._requested_dump_period
# time.sleep(self.dump_period)
# user_logger.info('correlator dump arrived')
# if period < 0.0 or (self.time - self.last_nd_firing) < period:
# return False
# if announce:
# user_logger.info("Firing '%s' noise diode (%g seconds on, %g seconds off)" % (diode, on, off))
# else:
# user_logger.info('firing noise diode')
# self.time += on
# self.last_nd_firing = self.time + 0.
# self.time += off
# user_logger.info('fired noise diode')
# return True
def set_target(self, target):
"""Setting target has no timing effect."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
def track(self, target, duration=20.0, announce=True):
"""Estimate time taken to perform track."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second track on target '%s'" % (duration, target.name))
if not self.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
self.fire_noise_diode(announce=False, **self.nd_params)
if not self.on_target(target):
user_logger.info('slewing to target')
self._slew_to(target)
user_logger.info('target reached')
self.fire_noise_diode(announce=False, **self.nd_params)
user_logger.info('tracking target')
self.time += duration + 1.0
user_logger.info('target tracked for %g seconds' % (duration,))
self.fire_noise_diode(announce=False, **self.nd_params)
self._teleport_to(target)
return True
def scan(self, target, duration=30.0, start=(-3.0, 0.0), end=(3.0, 0.0),
index=-1, projection=default_proj, announce=True):
"""Estimate time taken to perform single linear scan."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
scan_name = 'scan' if index < 0 else 'scan %d' % (index,)
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second scan across target '%s'" % (duration, target.name))
if not self.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
self.fire_noise_diode(announce=False, **self.nd_params)
projection = Offset.PROJECTIONS[projection]
self.projection = (projection, start[0], start[1])
user_logger.info('slewing to start of %s' % (scan_name,))
self._slew_to(target, mode='SCAN')
user_logger.info('start of %s reached' % (scan_name,))
self.fire_noise_diode(announce=False, **self.nd_params)
# Assume antennas can keep up with target (and doesn't scan too fast either)
user_logger.info('performing %s' % (scan_name,))
self.time += duration + 1.0
user_logger.info('%s complete' % (scan_name,))
self.fire_noise_diode(announce=False, **self.nd_params)
self.projection = (projection, end[0], end[1])
self._teleport_to(target)
return True
def raster_scan(self, target, num_scans=3, scan_duration=30.0, scan_extent=6.0, scan_spacing=0.5,
scan_in_azimuth=True, projection=default_proj, announce=True):
"""Estimate time taken to perform raster scan."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
projection = Offset.PROJECTIONS[projection]
if announce:
user_logger.info("Initiating raster scan (%d %g-second scans extending %g degrees) on target '%s'" %
(num_scans, scan_duration, scan_extent, target.name))
nd_time = self.nd_params['on'] + self.nd_params['off']
nd_time *= scan_duration / max(self.nd_params['period'], scan_duration)
nd_time = nd_time if self.nd_params['period'] >= 0 else 0.
if not self.target_visible(target, (scan_duration + nd_time) * num_scans):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
# Create start and end positions of each scan, based on scan parameters
scan_levels = np.arange(-(num_scans // 2), num_scans // 2 + 1)
scanning_coord = (scan_extent / 2.0) * (-1) ** scan_levels
stepping_coord = scan_spacing * scan_levels
# Flip sign of elevation offsets to ensure that the first scan always starts at the top left of target
scan_starts = zip(scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, -scanning_coord)
scan_ends = zip(-scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, scanning_coord)
self.fire_noise_diode(announce=False, **self.nd_params)
# Perform multiple scans across the target
for scan_index, (start, end) in enumerate(zip(scan_starts, scan_ends)):
self.projection = (projection, start[0], start[1])
user_logger.info('slewing to start of scan %d' % (scan_index,))
self._slew_to(target, mode='SCAN')
user_logger.info('start of scan %d reached' % (scan_index,))
self.fire_noise_diode(announce=False, **self.nd_params)
# Assume antennas can keep up with target (and doesn't scan too fast either)
user_logger.info('performing scan %d' % (scan_index,))
self.time += scan_duration + 1.0
user_logger.info('scan %d complete' % (scan_index,))
self.fire_noise_diode(announce=False, **self.nd_params)
self.projection = (projection, end[0], end[1])
self._teleport_to(target)
return True
def end(self):
"""Stop data capturing to shut down the session and close the data file."""
user_logger.info('Scans complete, no data captured as this is a timing simulation...')
user_logger.info('Ended data capturing session with experiment ID %s' % (self.experiment_id,))
activity_logger.info('Timing simulation. Ended data capturing session with experiment ID %s' % (self.experiment_id,))
if self.stow_when_done and self._fake_ants:
user_logger.info("Stowing dishes.")
activity_logger.info('Timing simulation. Stowing dishes.')
self._teleport_to(katpoint.Target("azel, 0.0, 90.0"), mode="STOW")
user_logger.info('==========================')
duration = self.time - self.start_time
# Let KATCoreConn know how long the estimated observation time was.
self.kat.set_estimated_duration(duration)
if duration <= 100:
duration = '%d seconds' % (np.ceil(duration),)
elif duration <= 100 * 60:
duration = '%d minutes' % (np.ceil(duration / 60.),)
else:
duration = '%.1f hours' % (duration / 3600.,)
msg = "Experiment estimated to last %s until this time" % (duration,)
user_logger.info(msg + "\n")
activity_logger.info("Timing simulation. %s" % (msg,))
# Restore time module functions
time.time, time.sleep = self._realtime, self._realsleep
# Restore logging
for handler in user_logger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.formatter.datefmt = handler.formatter.old_datefmt
del handler.formatter.old_datefmt
else:
handler.setLevel(handler.old_level)
del handler.old_level
activity_logger.info("Timing simulation. ----- Script ended %s (%s). Output file None" % (sys.argv[0], ' '.join(sys.argv[1:])))
| 50.14862
| 185
| 0.62869
| 67,226
| 0.948716
| 0
| 0
| 9,948
| 0.14039
| 0
| 0
| 40,117
| 0.566145
|
e17fa16cc2830c70bdb6cc63e17b12437230ec42
| 499
|
py
|
Python
|
projects/webptspy/apps/account/admin.py
|
codelieche/testing
|
1f4a3393f761654d98588c9ba90596a307fa59db
|
[
"MIT"
] | 2
|
2017-08-10T03:40:22.000Z
|
2017-08-17T13:20:16.000Z
|
projects/webptspy/apps/account/admin.py
|
codelieche/webpts
|
1f4a3393f761654d98588c9ba90596a307fa59db
|
[
"MIT"
] | null | null | null |
projects/webptspy/apps/account/admin.py
|
codelieche/webpts
|
1f4a3393f761654d98588c9ba90596a307fa59db
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
from django.contrib import admin
from .models import UserProfile
# Register your models here.
class UserProfileModelAdmin(admin.ModelAdmin):
"""
用户管理Model
"""
list_display = ('id', 'username', 'nike_name', 'mobile',
'email', 'is_active')
list_filter = ('is_active',)
list_display_links = ('id', 'username')
search_fields = ('username', 'email', 'mobile', 'nike_name')
admin.site.register(UserProfile, UserProfileModelAdmin)
| 26.263158
| 64
| 0.653307
| 329
| 0.648915
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.384615
|
e18273cb48126cd36d2e98bfcd448716a51f67d4
| 396
|
py
|
Python
|
axicli.py
|
notpeter/AxiDraw_API
|
d9c35eb93fd85f96cf197908415822af9a725b41
|
[
"MIT"
] | null | null | null |
axicli.py
|
notpeter/AxiDraw_API
|
d9c35eb93fd85f96cf197908415822af9a725b41
|
[
"MIT"
] | 3
|
2021-01-17T04:31:57.000Z
|
2021-01-17T04:36:41.000Z
|
axicli.py
|
notpeter/AxiDraw_API
|
d9c35eb93fd85f96cf197908415822af9a725b41
|
[
"MIT"
] | null | null | null |
'''
axicli.py - Command line interface (CLI) for AxiDraw.
For quick help:
python axicli.py --help
Full user guide:
https://axidraw.com/doc/cli_api/
This script is a stand-alone version of AxiDraw Control, accepting
various options and providing a facility for setting default values.
'''
from axicli.axidraw_cli import axidraw_CLI
if __name__ == '__main__':
axidraw_CLI()
| 19.8
| 68
| 0.729798
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 315
| 0.795455
|
e183076e1912547a48c02bb69c7456b82ec312ba
| 852
|
py
|
Python
|
qualification_round_2017/C.py
|
asukakenji/codejam2018
|
a519f522337d7faf3d07a84f6e24f0161f95c880
|
[
"MIT"
] | null | null | null |
qualification_round_2017/C.py
|
asukakenji/codejam2018
|
a519f522337d7faf3d07a84f6e24f0161f95c880
|
[
"MIT"
] | null | null | null |
qualification_round_2017/C.py
|
asukakenji/codejam2018
|
a519f522337d7faf3d07a84f6e24f0161f95c880
|
[
"MIT"
] | null | null | null |
# code jam: Qualification Round 2017: Problem C. Bathroom Stalls
def read_int():
return int(raw_input())
def read_int_n():
return map(int, raw_input().split())
def get_y_z(n, k):
if k == 1:
if n & 1 == 0:
# Even Number
return n >> 1, (n >> 1) - 1
else:
# Odd Number
return n >> 1, n >> 1
else:
if n & 1 == 0:
# Even Number
if k & 1 == 0:
# Even Number
return get_y_z(n >> 1, k >> 1)
else:
# Odd Number
return get_y_z((n >> 1) - 1, k >> 1)
else:
# Odd Number
return get_y_z(n >> 1, k >> 1)
T = read_int()
x = 1
while x <= T:
N, K = read_int_n()
y, z = get_y_z(N, K)
print 'Case #{}: {} {}'.format(x, y, z)
x += 1
| 23.027027
| 64
| 0.419014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 156
| 0.183099
|
e1842583bfd3115c7825344cdde05a9fbfaf3644
| 1,143
|
py
|
Python
|
tests/integration/modules/test_vmc_vm_stats.py
|
kdsalvy/salt-ext-modules-vmware-1
|
9fdc941692e4c526f575f33b2ce23c1470582934
|
[
"Apache-2.0"
] | 10
|
2021-11-02T20:24:44.000Z
|
2022-03-11T05:54:27.000Z
|
tests/integration/modules/test_vmc_vm_stats.py
|
waynew/salt-ext-modules-vmware
|
9f693382772061676c846c850df6ff508b7f3a91
|
[
"Apache-2.0"
] | 83
|
2021-10-01T15:13:02.000Z
|
2022-03-31T16:22:40.000Z
|
tests/integration/modules/test_vmc_vm_stats.py
|
waynew/salt-ext-modules-vmware
|
9f693382772061676c846c850df6ff508b7f3a91
|
[
"Apache-2.0"
] | 15
|
2021-09-30T23:17:27.000Z
|
2022-03-23T06:54:22.000Z
|
"""
Integration Tests for vmc_vm_stats execution module
"""
import pytest
@pytest.fixture
def vm_id(salt_call_cli, vmc_vcenter_connect):
ret = salt_call_cli.run("vmc_sddc.get_vms", **vmc_vcenter_connect)
vm_obj = ret.json[0]
return vm_obj["vm"]
def test_get_cpu_stats_for_vm_smoke_test(salt_call_cli, vmc_vcenter_connect, vm_id):
ret = salt_call_cli.run(
"vmc_vm_stats.get", vm_id=vm_id, stats_type="cpu", **vmc_vcenter_connect
)
assert ret is not None
assert "error" not in ret.json
def test_get_memory_stats_for_vm_smoke_test(salt_call_cli, vmc_vcenter_connect, vm_id):
ret = salt_call_cli.run(
"vmc_vm_stats.get", vm_id=vm_id, stats_type="memory", **vmc_vcenter_connect
)
assert ret is not None
assert "error" not in ret.json
def test_get_memory_stats_when_vm_does_not_exist(salt_call_cli, vmc_vcenter_connect):
ret = salt_call_cli.run(
"vmc_vm_stats.get", vm_id="vm-abc", stats_type="memory", **vmc_vcenter_connect
)
assert ret is not None
result = ret.json
assert "error" in result
assert result["error"]["error_type"] == "NOT_FOUND"
| 30.078947
| 87
| 0.724409
| 0
| 0
| 0
| 0
| 182
| 0.15923
| 0
| 0
| 219
| 0.191601
|
e18531179510c781afbb8fd1aa7db0ab08f90a40
| 2,134
|
py
|
Python
|
uncompy3/decomp_dec.py
|
Alpha-Demon404/RE-14
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 39
|
2020-02-26T09:44:36.000Z
|
2022-03-23T00:18:25.000Z
|
uncompy3/decomp_dec.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 15
|
2020-05-14T10:07:26.000Z
|
2022-01-06T02:55:32.000Z
|
uncompy3/decomp_dec.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 41
|
2020-03-16T22:36:38.000Z
|
2022-03-17T14:47:19.000Z
|
# MENTOL
# At:Sun Nov 24 15:04:31 2019
if len(bytecode) == 0:
print('\x1b[1;93mbyte code kosong\nharap masukkan bytecodenya\x1b[0m')
exit()
import marshal, sys, os, random, string, time
try:
from uncompyle6.main import decompile
except:
os.system('pip install uncompyle6')
from uncompyle6.main import decompile
def echo(text):
w = 'mhkbucp'
for z in w:
text = text.replace('+%s' % z, '\x1b[%d;1m' % (91 + w.index(z)))
text += '\x1b[0m'
text = text.replace('+0', '\x1b[0m')
print(text)
def run(text):
try:
w = 'mhkbucp'
for z in w:
text = text.replace('+%s' % z, '\x1b[%d;1m' % (91 + w.index(z)))
text += '\x1b[0m'
text = text.replace('+0', '\x1b[0m')
for i in text + '\n':
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(0.01)
except (KeyboardInterrupt, EOFError):
exit('')
n = ''.join((random.choice(string.ascii_lowercase) for _ in range(4)))
fl = n + '-dec.py'
logo = '\n+m 888 +h,8,"88b,\n+m e88 888 ,e e, e88\'888 e88 88e 888 888 8e +p888 88e Y8b Y888P +h " ,88P\'\n+md888 888 d88 88b d888 \'8 d888 888b 888 888 88b +p888 888b Y8b Y8P +h C8K\n+mY888 888 888 , Y888 , Y888 888P 888 888 888 +p888 888P Y8b Y +h e `88b,\n+m "88 888 "YeeP" "88,e8\' "88 88" 888 888 888 +p888 88" 888 +h"8",88P\'\n +p888 888\n +p888 888+p\n\t\t+ccoded by: +pZhu Bai Lee AKA AnonyMass\n\t\t+cteam : +pBlack Coder Crush'
def decom():
try:
os.system('clear')
echo(logo)
x = decompile(3.7, marshal.loads(bytecode), open(fl, 'w'))
run('\t\t\t+hdecompile sukses :)+p')
run('\t\t\t+hfile disimpan: +p' + fl)
exit()
except Exception as e:
try:
os.system('clear')
echo(logo)
echo('+mdecompile gagal+p')
exit()
finally:
e = None
del e
decom()
| 32.830769
| 643
| 0.49672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 971
| 0.455014
|
e189d16da8174a3b154f79a433e1c07828f194cc
| 650
|
py
|
Python
|
tests/unit/proxy/roundtrip/test_janus_graph_proxy.py
|
joshthoward/amundsenmetadatalibrary
|
87e2b44f0e44ca643f087bff6bd6b39d4ae9e9ad
|
[
"Apache-2.0"
] | 3
|
2021-02-09T13:52:03.000Z
|
2022-02-26T02:36:02.000Z
|
tests/unit/proxy/roundtrip/test_janus_graph_proxy.py
|
joshthoward/amundsenmetadatalibrary
|
87e2b44f0e44ca643f087bff6bd6b39d4ae9e9ad
|
[
"Apache-2.0"
] | 1
|
2021-02-08T23:21:04.000Z
|
2021-02-08T23:21:04.000Z
|
tests/unit/proxy/roundtrip/test_janus_graph_proxy.py
|
joshthoward/amundsenmetadatalibrary
|
87e2b44f0e44ca643f087bff6bd6b39d4ae9e9ad
|
[
"Apache-2.0"
] | 2
|
2021-02-23T18:23:35.000Z
|
2022-03-18T15:12:25.000Z
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Mapping
import unittest
from .abstract_gremlin_proxy_tests import abstract_gremlin_proxy_test_class
from .roundtrip_janusgraph_proxy import RoundtripJanusGraphProxy
class JanusGraphGremlinProxyTest(
abstract_gremlin_proxy_test_class(), unittest.TestCase): # type: ignore
def _create_gremlin_proxy(self, config: Mapping[str, Any]) -> RoundtripJanusGraphProxy:
# Don't use PROXY_HOST, PROXY_PORT, PROXY_PASSWORD. They might not be JanusGraph
return RoundtripJanusGraphProxy(host=config['JANUS_GRAPH_URL'])
| 40.625
| 91
| 0.803077
| 368
| 0.566154
| 0
| 0
| 0
| 0
| 0
| 0
| 198
| 0.304615
|
e18aabf2262561727c96e58389bdf2da5dd573c7
| 3,829
|
py
|
Python
|
scripts/performance/config.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 5
|
2020-09-29T00:36:57.000Z
|
2022-02-16T06:51:32.000Z
|
scripts/performance/config.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 27
|
2019-11-02T02:18:34.000Z
|
2022-02-24T18:49:08.000Z
|
scripts/performance/config.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 20
|
2019-11-28T16:02:25.000Z
|
2022-01-06T05:56:58.000Z
|
from builtins import str
from builtins import range
from builtins import object
import os
import fixtures
import testtools
from vn_test import VNFixture
from vm_test import VMFixture
from common.connections import ContrailConnections
from policy_test import PolicyFixture
from policy.config import AttachPolicyFixture
from time import sleep
from tcutils.commands import ssh, execute_cmd, execute_cmd_out
class ConfigPerformance(object):
def config_vm(self, vn_fix, vm_name, node_name=None, image_name='ubuntu-netperf', flavor='contrail_flavor_large'):
vm_fixture = self.useFixture(VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=vn_fix.obj, vm_name=vm_name, node_name=node_name, image_name=image_name, flavor=flavor))
return vm_fixture
def set_cpu_performance(self, hosts):
sessions = {}
cmd = 'for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo performance > $f; cat $f; done'
for i in range(0, 2):
session = ssh(hosts[i]['host_ip'], hosts[i]['username'], hosts[i]['password'])
execute_cmd(session, cmd, self.logger)
return
def start_tcp_dump(self, vm_fixture):
sessions =[]
vm_name = vm_fixture.vm_name
host = self.inputs.host_data[vm_fixture.vm_node_ip]
inspect_h = self.agent_inspect[vm_fixture.vm_node_ip]
tapintf = inspect_h.get_vna_tap_interface_by_ip(vm_fixture.vm_ip)[0]['name']
pcap = '/tmp/%s.pcap' % tapintf
cmd = "sudo tcpdump -ni %s udp -w %s" % (tapintf, pcap)
session = ssh(host['host_ip'], host['username'], host['password'])
self.logger.info("Staring tcpdump to capture the packets.")
execute_cmd(session, cmd, self.logger)
sessions.extend((session, pcap))
return sessions
def stop_tcp_dump(self, sessions):
self.logger.info("Waiting for the tcpdump write to complete.")
sleep(30)
cmd = 'sudo kill $(pidof tcpdump)'
execute_cmd(sessions[0], cmd, self.logger)
execute_cmd(sessions[0], 'sync', self.logger)
cmd = 'sudo tcpdump -r %s | wc -l' % sessions[1]
out, err = execute_cmd_out(sessions[0], cmd, self.logger)
count = int(out.strip('\n'))
#cmd = 'rm -f %s' % sessions[1]
#execute_cmd(sessions[0], cmd, self.logger)
return count
def changeEncap_setting(self, encap1='MPLSoUDP', encap2='MPLSoGRE', encap3='VXLAN'):
self.logger.info('Deleting any Encap before continuing')
out=self.connections.delete_vrouter_encap()
if ( out!='No config id found'):
self.addCleanup(self.connections.set_vrouter_config_encap,out[0],out[1],out[2])
self.logger.info('Setting new Encap before continuing')
config_id=self.connections.set_vrouter_config_encap(encap1, encap2, encap3)
self.logger.info('Created.UUID is %s'%(config_id))
self.addCleanup(self.connections.delete_vrouter_encap)
encap_list_to_be_configured = [str(encap1),str(encap2),str(encap3)]
encap_list_configured=self.connections.read_vrouter_config_encap()
if encap_list_to_be_configured != encap_list_configured:
self.logger.error( "Configured Encap Priority order is NOT matching with expected order. Configured: %s,\
Expected: %s" %(encap_list_configured, encap_list_to_be_configured))
assert False
else:
self.logger.info( "Configured Encap Priority order is matching with expected order. Configured: %s,\
Expected: %s" %(encap_list_configured,encap_list_to_be_configured))
return
| 47.271605
| 121
| 0.659441
| 3,422
| 0.893706
| 0
| 0
| 0
| 0
| 0
| 0
| 917
| 0.239488
|
e18bc1c9af4bc77f3e745030b60675eb8770630a
| 4,048
|
py
|
Python
|
deepspeech_pytorch/testing.py
|
Chudbrochil/deepspeech.pytorch-2.1
|
d5d01e33ef383edb79c6a5b1584c134587108deb
|
[
"MIT"
] | 13
|
2022-01-25T01:26:56.000Z
|
2022-03-18T00:46:38.000Z
|
deepspeech_pytorch/testing.py
|
Chudbrochil/deepspeech.pytorch-2.1
|
d5d01e33ef383edb79c6a5b1584c134587108deb
|
[
"MIT"
] | null | null | null |
deepspeech_pytorch/testing.py
|
Chudbrochil/deepspeech.pytorch-2.1
|
d5d01e33ef383edb79c6a5b1584c134587108deb
|
[
"MIT"
] | 1
|
2021-03-03T06:14:21.000Z
|
2021-03-03T06:14:21.000Z
|
import hydra
import torch
from tqdm import tqdm
from deepspeech_pytorch.configs.inference_config import EvalConfig
from deepspeech_pytorch.decoder import GreedyDecoder
from deepspeech_pytorch.loader.data_loader import SpectrogramDataset, AudioDataLoader
from deepspeech_pytorch.utils import load_model, load_decoder
@torch.no_grad()
def evaluate(cfg: EvalConfig):
device = torch.device("cuda" if cfg.model.cuda else "cpu")
model = load_model(device=device,
model_path=cfg.model.model_path,
use_half=cfg.model.use_half)
decoder = load_decoder(labels=model.labels,
cfg=cfg.lm)
target_decoder = GreedyDecoder(model.labels,
blank_index=model.labels.index('_'))
test_dataset = SpectrogramDataset(audio_conf=model.audio_conf,
manifest_filepath=hydra.utils.to_absolute_path(cfg.test_manifest),
labels=model.labels,
normalize=True)
test_loader = AudioDataLoader(test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers)
wer, cer, output_data = run_evaluation(test_loader=test_loader,
device=device,
model=model,
decoder=decoder,
target_decoder=target_decoder,
save_output=cfg.save_output,
verbose=cfg.verbose,
use_half=cfg.model.use_half)
print('Test Summary \t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(wer=wer, cer=cer))
if cfg.save_output:
torch.save(output_data, hydra.utils.to_absolute_path(cfg.save_output))
@torch.no_grad()
def run_evaluation(test_loader,
device,
model,
decoder,
target_decoder,
save_output=None,
verbose=False,
use_half=False):
model.eval()
total_cer, total_wer, num_tokens, num_chars = 0, 0, 0, 0
output_data = []
for i, (data) in tqdm(enumerate(test_loader), total=len(test_loader)):
inputs, targets, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
inputs = inputs.to(device)
if use_half:
inputs = inputs.half()
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
out, output_sizes = model(inputs, input_sizes)
decoded_output, _ = decoder.decode(out, output_sizes)
target_strings = target_decoder.convert_to_strings(split_targets)
if save_output is not None:
# add output to data array, and continue
output_data.append((out.cpu(), output_sizes, target_strings))
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer_inst = decoder.wer(transcript, reference)
cer_inst = decoder.cer(transcript, reference)
total_wer += wer_inst
total_cer += cer_inst
num_tokens += len(reference.split())
num_chars += len(reference.replace(' ', ''))
if verbose:
print("Ref:", reference.lower())
print("Hyp:", transcript.lower())
print("WER:", float(wer_inst) / len(reference.split()),
"CER:", float(cer_inst) / len(reference.replace(' ', '')), "\n")
wer = float(total_wer) / num_tokens
cer = float(total_cer) / num_chars
return wer * 100, cer * 100, output_data
| 42.610526
| 104
| 0.561759
| 0
| 0
| 0
| 0
| 3,725
| 0.920208
| 0
| 0
| 178
| 0.043972
|
e18d4a143a071cc6ef045ea47a03a5bc0de604f9
| 483
|
py
|
Python
|
api/aps3/tasklist/tasklist/main.py
|
Gustavobb/megadados
|
6a653314e0c93c866ec86be2119d64bf297d2f5a
|
[
"MIT"
] | null | null | null |
api/aps3/tasklist/tasklist/main.py
|
Gustavobb/megadados
|
6a653314e0c93c866ec86be2119d64bf297d2f5a
|
[
"MIT"
] | null | null | null |
api/aps3/tasklist/tasklist/main.py
|
Gustavobb/megadados
|
6a653314e0c93c866ec86be2119d64bf297d2f5a
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-module-docstring
from fastapi import FastAPI
from .routers import task, user
tags_metadata = [
{
'name': 'task',
'description': 'Operations related to tasks.',
},
]
app = FastAPI(
title='Task list',
description='Task-list project for the **Megadados** course',
openapi_tags=tags_metadata,
)
app.include_router(task.router, prefix='/task', tags=['task'])
app.include_router(user.router, prefix='/user', tags=['user'])
| 23
| 65
| 0.672878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.376812
|
e18d760e51cdf1f8ab9695881861681dcd4595c4
| 214
|
py
|
Python
|
silver_bullet/contain_value.py
|
Hojung-Jeong/Silver-Bullet-Encryption-Tool
|
5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7
|
[
"Apache-2.0"
] | null | null | null |
silver_bullet/contain_value.py
|
Hojung-Jeong/Silver-Bullet-Encryption-Tool
|
5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7
|
[
"Apache-2.0"
] | null | null | null |
silver_bullet/contain_value.py
|
Hojung-Jeong/Silver-Bullet-Encryption-Tool
|
5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7
|
[
"Apache-2.0"
] | null | null | null |
'''
>List of functions
1. contain(value,limit) - contains a value between 0 to limit
'''
def contain(value,limit):
if value<0:
return value+limit
elif value>=limit:
return value-limit
else:
return value
| 16.461538
| 62
| 0.705607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.415888
|
e18de74748857a558fcaeea0ccf6405de06e2047
| 6,388
|
py
|
Python
|
code/train.py
|
TONGJINGLV/dagan
|
4b6701f0a31026d9a45ab988a645f0a2249cc45c
|
[
"MIT"
] | null | null | null |
code/train.py
|
TONGJINGLV/dagan
|
4b6701f0a31026d9a45ab988a645f0a2249cc45c
|
[
"MIT"
] | null | null | null |
code/train.py
|
TONGJINGLV/dagan
|
4b6701f0a31026d9a45ab988a645f0a2249cc45c
|
[
"MIT"
] | null | null | null |
from data import NumericalField, CategoricalField, Iterator
from data import Dataset
from synthesizer import MaskGenerator_MLP, ObservedGenerator_MLP, Discriminator, Handler, ObservedGenerator_LSTM
from random import choice
import multiprocessing
import pandas as pd
import numpy as np
import torch
import argparse
import json
import os
parameters_space = {
"batch_size":[64, 128, 256],
"z_dim":[100, 200, 300],
"gen_num_layers":[1,2,3],
"gen_hidden_dim":[100, 200, 300, 400],
"gen_feature_dim":[100, 200, 300, 400, 500],
"gen_lstm_dim":[100,200,300,400,500],
"dis_hidden_dim":[100, 200, 300],
"dis_num_layers":[1,2,3],
"lr":[0.0001,0.0002,0.0005],
"cp":[0.01],
"dis_train_num" :[1, 2, 5]
}
def parameter_search(gen_model):
param = {}
param["batch_size"] = choice(parameters_space["batch_size"])
param["z_dim"] = choice(parameters_space["z_dim"])
param["mask_gen_hidden_dims"] = []
gen_num_layers = choice(parameters_space["gen_num_layers"])
for l in range(gen_num_layers):
dim = choice(parameters_space["gen_hidden_dim"])
if l > 0 and param["mask_gen_hidden_dims"][l-1] > dim:
dim = param["mask_gen_hidden_dims"][l-1]
param["mask_gen_hidden_dims"].append(dim)
if gen_model == "MLP":
param["obs_gen_hidden_dims"] = []
gen_num_layers = choice(parameters_space["gen_num_layers"])
for l in range(gen_num_layers):
dim = choice(parameters_space["gen_hidden_dim"])
if l > 0 and param["obs_gen_hidden_dims"][l-1] > dim:
dim = param["obs_gen_hidden_dims"][l-1]
param["obs_gen_hidden_dims"].append(dim)
elif gen_model == "LSTM":
param["obs_gen_feature_dim"] = choice(parameters_space["gen_feature_dim"])
param["obs_gen_lstm_dim"] = choice(parameters_space["gen_lstm_dim"])
param["obs_dis_hidden_dims"] = []
dis_num_layers = choice(parameters_space["dis_num_layers"])
for l in range(dis_num_layers):
dim = choice(parameters_space["dis_hidden_dim"])
if l > 0 and param["obs_dis_hidden_dims"][l-1] < dim:
dim = param["obs_dis_hidden_dims"][l-1]
param["obs_dis_hidden_dims"].append(dim)
param["mask_dis_hidden_dims"] = []
dis_num_layers = choice(parameters_space["dis_num_layers"])
for l in range(dis_num_layers):
dim = choice(parameters_space["dis_hidden_dim"])
if l > 0 and param["mask_dis_hidden_dims"][l-1] < dim:
dim = param["mask_dis_hidden_dims"][l-1]
param["mask_dis_hidden_dims"].append(dim)
param["lr"] = choice(parameters_space["lr"])
param["cp"] = choice(parameters_space["cp"])
param["dis_train_num"] = choice(parameters_space["dis_train_num"])
return param
def thread_run(path, search, config, source_dst, target_dst, GPU):
if config["rand_search"] == "yes":
param = parameter_search(gen_model=config["gen_model"])
else:
param = config["param"]
with open(path+"exp_params.json", "a") as f:
json.dump(param, f)
f.write("\n")
source_it = Iterator(dataset=source_dst, batch_size=param["batch_size"], shuffle=False, labels=config["labels"], mask=config["source_mask"])
target_it = Iterator(dataset=target_dst, batch_size=param["batch_size"], shuffle=False, labels=config["labels"], mask=config["target_mask"])
x_dim = source_it.data.shape[1]
col_ind = source_dst.col_ind
col_dim = source_dst.col_dim
col_type = source_dst.col_type
mask_dim = target_it.masks.shape[1]
if config["Gm"] == "yes":
mask_gen = MaskGenerator_MLP(param["z_dim"], x_dim, param["mask_gen_hidden_dims"], mask_dim)
mask_dis = Discriminator(mask_dim, param["mask_dis_hidden_dims"], c_dim=x_dim, condition=True)
else:
mask_gen = None
mask_dis = None
if config["Gx"] == "yes":
if config["gen_model"] == "LSTM":
obs_gen = ObservedGenerator_LSTM(param["z_dim"], param["obs_gen_feature_dim"], param["obs_gen_lstm_dim"], col_dim, col_type, col_ind, x_dim, mask_dim)
elif config["gen_model"] == "MLP":
obs_gen = ObservedGenerator_MLP(param["z_dim"], param["obs_gen_hidden_dims"], x_dim, mask_dim, col_type, col_ind)
else:
obs_gen = None
obs_dis = Discriminator(x_dim, param["obs_dis_hidden_dims"])
print(mask_gen)
print(mask_dis)
print(obs_gen)
print(obs_dis)
handler = Handler(source_it, target_it, source_dst, path)
if mask_gen is None and obs_gen is None:
handler.translate(mask_gen, obs_gen, param["z_dim"], path+"translate_{}".format(search), GPU=True, repeat=1)
else:
mask_gen, obs_gen, mask_dis, obs_dis = handler.train(mask_gen, obs_gen, mask_dis, obs_dis, param, config, search, GPU=GPU)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('configs', help='a json config file')
parser.add_argument('gpu', default=0)
args = parser.parse_args()
gpu = int(args.gpu)
if gpu >= 0:
GPU = True
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
else:
GPU = False
with open(args.configs) as f:
configs = json.load(f)
try:
os.mkdir("expdir")
except:
pass
for config in configs:
path = "expdir/"+config["name"]+"/"
try:
os.mkdir("expdir/"+config["name"])
except:
pass
source = pd.read_csv(config["source"])
target = pd.read_csv(config["target"])
fields = []
col_type = []
if "label" in config.keys():
cond = config["label"]
for i, col in enumerate(list(source)):
if "label" in config.keys() and col in cond:
fields.append((col, CategoricalField("one-hot", noise=0)))
col_type.append("condition")
elif i in config["normalize_cols"]:
fields.append((col,NumericalField("normalize")))
col_type.append("normalize")
elif i in config["gmm_cols"]:
fields.append((col, NumericalField("gmm", n=5)))
col_type.append("gmm")
elif i in config["one-hot_cols"]:
fields.append((col, CategoricalField("one-hot", noise=0)))
col_type.append("one-hot")
elif i in config["ordinal_cols"]:
fields.append((col, CategoricalField("dict")))
col_type.append("ordinal")
source_dst, target_dst = Dataset.split(
fields = fields,
path = ".",
col_type = col_type,
train = config["source"],
validation = config["target"],
format = "csv",
)
source_dst.learn_convert()
target_dst.learn_convert()
print("source row : {}".format(len(source_dst)))
print("target row: {}".format(len(target_dst)))
n_search = config["n_search"]
jobs = [multiprocessing.Process(target=thread_run, args=(path, search, config, source_dst, target_dst, GPU)) for search in range(n_search)]
for j in jobs:
j.start()
for j in jobs:
j.join()
| 32.426396
| 153
| 0.706951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,468
| 0.229806
|
e18f60246990c305ae61d8fc90992d5ea5e04b27
| 4,054
|
py
|
Python
|
front-end/testsuite-python-lib/Python-3.1/Lib/test/test_pep277.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
front-end/testsuite-python-lib/Python-3.1/Lib/test/test_pep277.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | null | null | null |
front-end/testsuite-python-lib/Python-3.1/Lib/test/test_pep277.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
# Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import sys, os, unittest
from test import support
if not os.path.supports_unicode_filenames:
raise unittest.SkipTest("test works only on NT+")
filenames = [
'abc',
'ascii',
'Gr\xfc\xdf-Gott',
'\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
'\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
'\u306b\u307d\u3093',
'\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
'\u66e8\u66e9\u66eb',
'\u66e8\u05e9\u3093\u0434\u0393\xdf',
]
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(str(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = [os.path.join(support.TESTFN, f) for f in filenames]
def setUp(self):
try:
os.mkdir(support.TESTFN)
except OSError:
pass
for name in self.files:
f = open(name, 'wb')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
def tearDown(self):
deltree(support.TESTFN)
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
try:
fn(filename)
raise support.TestFailed("Expected to fail calling '%s(%r)'"
% (fn.__name__, filename))
except expected_exception as details:
if check_fn_in_exception and details.filename != filename:
raise support.TestFailed("Function '%s(%r) failed with "
"bad filename in the exception: %r"
% (fn.__name__, filename,
details.filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'wb')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
def test_listdir(self):
f1 = os.listdir(support.TESTFN)
f2 = os.listdir(str(support.TESTFN.encode("utf-8"),
sys.getfilesystemencoding()))
sf2 = set("\\".join((str(support.TESTFN), f))
for f in f2)
self.failUnlessEqual(len(f1), len(self.files))
self.failUnlessEqual(sf2, set(self.files))
def test_rename(self):
for name in self.files:
os.rename(name,"tmp")
os.rename("tmp",name)
def test_directory(self):
dirname = os.path.join(support.TESTFN,'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = '\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
f = open(filename, 'wb')
f.write((filename + '\n').encode("utf-8"))
f.close()
os.access(filename,os.R_OK)
os.remove(filename)
os.chdir(oldwd)
os.rmdir(dirname)
def test_main():
try:
support.run_unittest(UnicodeFileTests)
finally:
deltree(support.TESTFN)
if __name__ == "__main__":
test_main()
| 35.252174
| 88
| 0.583128
| 2,829
| 0.697829
| 0
| 0
| 0
| 0
| 0
| 0
| 1,018
| 0.25111
|
e18feeef1d44b1fb5822da8d7afcebfd40c0bfe3
| 1,336
|
py
|
Python
|
test/unit/test_utils.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | 1
|
2018-06-20T17:51:20.000Z
|
2018-06-20T17:51:20.000Z
|
test/unit/test_utils.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_utils.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | 1
|
2018-10-18T18:45:38.000Z
|
2018-10-18T18:45:38.000Z
|
import unittest
import dbt.utils
class TestDeepMerge(unittest.TestCase):
def test__simple_cases(self):
cases = [
{'args': [{}, {'a': 1}],
'expected': {'a': 1},
'description': 'one key into empty'},
{'args': [{}, {'b': 1}, {'a': 1}],
'expected': {'a': 1, 'b': 1},
'description': 'three merges'},
]
for case in cases:
actual = dbt.utils.deep_merge(*case['args'])
self.assertEquals(
case['expected'], actual,
'failed on {} (actual {}, expected {})'.format(
case['description'], actual, case['expected']))
class TestMerge(unittest.TestCase):
def test__simple_cases(self):
cases = [
{'args': [{}, {'a': 1}],
'expected': {'a': 1},
'description': 'one key into empty'},
{'args': [{}, {'b': 1}, {'a': 1}],
'expected': {'a': 1, 'b': 1},
'description': 'three merges'},
]
for case in cases:
actual = dbt.utils.deep_merge(*case['args'])
self.assertEquals(
case['expected'], actual,
'failed on {} (actual {}, expected {})'.format(
case['description'], actual, case['expected']))
| 30.363636
| 67
| 0.450599
| 1,296
| 0.97006
| 0
| 0
| 0
| 0
| 0
| 0
| 376
| 0.281437
|
e191203fe2262ef390df012127682a8cc60f4320
| 1,006
|
py
|
Python
|
results.py
|
ejnnr/steerable_pdo_experiments
|
17902e56641cefe305b935c8733b45aa066bf068
|
[
"BSD-3-Clause"
] | null | null | null |
results.py
|
ejnnr/steerable_pdo_experiments
|
17902e56641cefe305b935c8733b45aa066bf068
|
[
"BSD-3-Clause"
] | null | null | null |
results.py
|
ejnnr/steerable_pdo_experiments
|
17902e56641cefe305b935c8733b45aa066bf068
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
from pathlib import Path
import numpy as np
import yaml
# this script takes in a folder path and then recursively collects all
# results.yaml files in that directory. It averages them and prints
# summary statistics
parser = argparse.ArgumentParser(description="Analyze the results")
parser.add_argument("path", type=str, help="path to the folder containing the results")
args = parser.parse_args()
results = []
keys = set()
for path in Path(args.path).rglob("results.yaml"):
with open(path, "r") as file:
results.append(yaml.safe_load(file))
keys = keys.union(results[-1].keys())
print(f"Found {len(results)} files with {len(keys)} different metrics\n")
output = {}
for key in keys:
vals = [result[key] for result in results if key in result]
n = len(vals)
mean = float(np.mean(vals))
std = float(np.std(vals))
output[key] = {
"N runs": n,
"mean": round(mean, 3),
"std": round(std, 3)
}
print(yaml.dump(output))
| 25.794872
| 87
| 0.672962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 329
| 0.327038
|
e19165bb436973e6fab40bd344665853870c9891
| 4,541
|
py
|
Python
|
Pgnet.py
|
rs-lsl/Pgnet
|
b31de7c93619a40bfb194bda0ad2889e732c1db6
|
[
"MIT"
] | 2
|
2021-12-27T06:27:56.000Z
|
2022-03-12T05:19:59.000Z
|
Pgnet.py
|
rs-lsl/Pgnet
|
b31de7c93619a40bfb194bda0ad2889e732c1db6
|
[
"MIT"
] | null | null | null |
Pgnet.py
|
rs-lsl/Pgnet
|
b31de7c93619a40bfb194bda0ad2889e732c1db6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: lsl
E-mail: cug_lsl@cug.edu.cn
"""
import sys
import argparse
sys.path.append("/home/aistudio/code")
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import time
from Pgnet_structure import Pg_net
from Pgnet_dataset import Mydata
from loss_function import SAMLoss
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# hyper parameters
test_batch_size = 1
parser = argparse.ArgumentParser(description='Paddle Pgnet')
# model
parser.add_argument('--model', type=str, default='Pgnet')
# dataset
parser.add_argument('--dataset', type=str, default='WV2')
# train
parser.add_argument('--in_nc', type=int, default=126, help='number of input image channels')
parser.add_argument('--endmember', type=int, default=20, help='number of endmember')
parser.add_argument('--batch_size', type=int, default=15, help='training batch size')
parser.add_argument('--num_epochs', type=int, default=500, help='number of training epochs')
parser.add_argument('--lr', type=float, default=2e-3, help='learning rate')
parser.add_argument('--resume', type=str, default='', help='path to model checkpoint')
parser.add_argument('--start_epoch', type=int, default=1, help='restart epoch number for training')
parser.add_argument('--momentum', type=float, default=0.05, help='momentum')
parser.add_argument('--step', type=int, default=100,
help='Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=100')
# test
parser.add_argument('--test', type=bool, default=False, help='test')
parser.add_argument('--load_para', type=bool, default=False, help='if load model parameters')
parser.add_argument('--test_batch_size', type=int, default=1, help='testing batch size')
opt = parser.parse_args()
print(opt)
def Pgnet(train_hs_image, train_hrpan_image, train_label,
test_hs_image, test_hrpan_image, test_label,
ratio=16):
opt.in_nc = train_hs_image.shape[1]
print(train_hs_image.shape)
print(test_hs_image.shape)
# define data and model
dataset0 = Mydata(train_hs_image, train_hrpan_image, train_label)
train_loader = data.DataLoader(dataset0, num_workers=0, batch_size=opt.batch_size,
shuffle=True, drop_last=True)
dataset1 = Mydata(test_hs_image, test_hrpan_image, test_label)
test_loader = data.DataLoader(dataset1, num_workers=0, batch_size=opt.test_batch_size,
shuffle=False, drop_last=False)
model = Pg_net(band=opt.in_nc, endmember_num=opt.endmember, ratio=ratio).to(device)
print("Total number of paramerters in networks is {} ".format(sum(x.numel() for x in model.parameters())))
L2_loss = nn.MSELoss()
samloss = SAMLoss()
optimizer = optim.Adam(lr=opt.lr, params=model.parameters())
scheduler = optim.lr_scheduler.StepLR(optimizer, opt.step, gamma=opt.momentum)
for epoch in range(opt.num_epochs):
time0 = time.time()
loss_total = 0.0
model.train()
for i, (images_hs, images_pan, labels) in enumerate(train_loader):
images_hs = images_hs.to(device, dtype=torch.float32)
images_pan = images_pan.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.float32)
optimizer.zero_grad()
result = model(images_hs, images_pan)
loss_l2 = L2_loss(result, labels)
loss_sam = samloss(result, labels)
loss = loss_l2 + 0.01*loss_sam
loss.backward()
optimizer.step()
loss_total += loss.item()
if ((epoch+1) % 10) == 0:
print('epoch %d of %d, using time: %.2f , loss of train: %.4f' %
(epoch + 1, opt.num_epochs, time.time() - time0, loss_total))
scheduler.step()
# torch.save(model.state_dict(), 'model.pth')
# testing model
if opt.load_para:
model.load_state_dict(torch.load("model.pth"))
model.eval()
image_all = []
with torch.no_grad():
for (images_hs, images_pan, _) in test_loader:
images_hs = images_hs.to(device, dtype=torch.float32)
images_pan = images_pan.to(device, dtype=torch.float32)
outputs_temp = model(images_hs, images_pan)
image_all.append(outputs_temp)
a = torch.cat(image_all, 0)
return a
| 36.620968
| 120
| 0.657564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 885
| 0.194891
|
e1927fcb892725b69d50542f139eaa6330088fdc
| 14,716
|
py
|
Python
|
tracing/plugins/ath10k_pktlog.py
|
lumag/qca-swiss-army-knife
|
5ede3cc07e9a52f115101c28f833242b772eeaab
|
[
"ISC"
] | 47
|
2016-05-20T02:33:26.000Z
|
2022-03-02T01:48:57.000Z
|
tracing/plugins/ath10k_pktlog.py
|
lumag/qca-swiss-army-knife
|
5ede3cc07e9a52f115101c28f833242b772eeaab
|
[
"ISC"
] | 7
|
2020-04-09T13:40:56.000Z
|
2022-01-24T19:18:50.000Z
|
tracing/plugins/ath10k_pktlog.py
|
lumag/qca-swiss-army-knife
|
5ede3cc07e9a52f115101c28f833242b772eeaab
|
[
"ISC"
] | 41
|
2016-04-19T06:31:14.000Z
|
2022-03-30T06:25:09.000Z
|
#
# Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# trace-cmd pktlog plugin for ath10k, QCA Linux wireless driver
#
# TODO:
#
# o create class for struct ieee80211_hdr each packet headers with
# pack() and unpack() methods
import struct
import binascii
DEBUG = 1
CUR_PKTLOG_VER = 10010
PKTLOG_MAGIC_NUM = 7735225
IEEE80211_FCTL_TODS = 0x0100
IEEE80211_FCTL_FROMDS = 0x0200
TARGET_NUM_MSDU_DESC = (1024 + 400)
MAX_PKT_INFO_MSDU_ID = 192
MAX_10_4_PKT_INFO_MSDU_ID = 1
PKTLOG_MAX_TXCTL_WORDS = 57
# must match with enum ath10k_hw_rev from ath10k and existing values
# should not change
ATH10K_PKTLOG_HW_QCA988X = 0
ATH10K_PKTLOG_HW_QCA6174 = 1
ATH10K_PKTLOG_HW_QCA99X0 = 2
ATH10K_PKTLOG_HW_QCA9888 = 3
ATH10K_PKTLOG_HW_QCA9984 = 4
ATH10K_PKTLOG_HW_QCA9377 = 5
ATH10K_PKTLOG_HW_QCA40XX = 6
ATH10K_PKTLOG_HW_QCA9887 = 7
ATH10K_PKTLOG_TYPE_TX_CTRL = 1
ATH10K_PKTLOG_TYPE_TX_STAT = 2
ATH10K_PKTLOG_TYPE_TX_MSDU_ID = 3
ATH10K_PKTLOG_TYPE_TX_FRM_HDR = 4
ATH10K_PKTLOG_TYPE_RX_STAT = 5
ATH10K_PKTLOG_TYPE_RC_FIND = 6
ATH10K_PKTLOG_TYPE_RC_UPDATE = 7
ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR = 8
ATH10K_PKTLOG_TYPE_DBG_PRINT = 9
ATH10K_PKTLOG_FLG_TYPE_LOCAL_S = 0
ATH10K_PKTLOG_FLG_TYPE_REMOTE_S = 1
ATH10K_PKTLOG_FLG_TYPE_CLONE_S = 2
ATH10K_PKTLOG_FLG_TYPE_UNKNOWN_S = 3
# sizeof(ath10k_pktlog_txctl) = 12 + 4 * 57
ATH10K_PKTLOG_TXCTL_LEN = 240
ATH10K_PKTLOG_MAX_TXCTL_WORDS = 57
# sizeof(ath10k_pktlog_10_4_txctl)2 = 16 + 4 * 153
ATH10K_PKTLOG_10_4_TXCTL_LEN = 624
ATH10K_PKTLOG_10_4_MAX_TXCTL_WORDS = 153
msdu_len_tbl = {}
output_file = None
frm_hdr = None
def dbg(msg):
if DEBUG == 0:
return
print msg
def hexdump(buf, prefix=None):
s = binascii.b2a_hex(buf)
s_len = len(s)
result = ""
if prefix is None:
prefix = ""
for i in range(s_len / 2):
if i % 16 == 0:
result = result + ("%s%04x: " % (prefix, i))
result = result + (s[2 * i] + s[2 * i + 1] + " ")
if (i + 1) % 16 == 0:
result = result + "\n"
# FIXME: if len(s) % 16 == 0 there's an extra \n in the end
return result
# struct ath10k_pktlog_hdr {
# unsigned short flags;
# unsigned short missed_cnt;
# unsigned short log_type;
# unsigned short size;
# unsigned int timestamp;
# unsigned char payload[0];
# } __attribute__((__packed__));
class Ath10kPktlogHdr:
# 2 + 2 + 2 + 2 + 4 = 12
hdr_len = 12
struct_fmt = '<HHHHI'
def unpack(self, buf, offset=0):
(self.flags, self.missed_cnt, self.log_type,
self.size, self.timestamp) = struct.unpack_from(self.struct_fmt, buf, 0)
payload_len = len(buf) - self.hdr_len
if payload_len < self.size:
raise Exception('Payload length invalid: %d != %d' %
(payload_len, self.size))
self.payload = buf[self.hdr_len:]
# excludes payload, you have to write that separately!
def pack(self):
return struct.pack(self.struct_fmt,
self.flags,
self.missed_cnt,
self.log_type,
self.size,
self.timestamp)
def __str__(self):
return 'flags %04x miss %d log_type %d size %d timestamp %d\n' % \
(self.flags, self.missed_cnt,
self.log_type, self.size, self.timestamp)
def __init__(self):
self.flags = 0
self.missed_cnt = 0
self.log_type = 0
self.size = 0
self.timestamp = 0
self.payload = []
# struct ath10k_pktlog_10_4_hdr {
# unsigned short flags;
# unsigned short missed_cnt;
# unsigned short log_type;
# unsigned short size;
# unsigned int timestamp;
# unsigned int type_specific_data;
# unsigned char payload[0];
# } __attribute__((__packed__));
class Ath10kPktlog_10_4_Hdr:
# 2 + 2 + 2 + 2 + 4 + 4 = 16
hdr_len = 16
struct_fmt = '<HHHHII'
def unpack(self, buf, offset=0):
(self.flags, self.missed_cnt, self.log_type,
self.size, self.timestamp, self.type_specific_data) = struct.unpack_from(self.struct_fmt, buf, 0)
payload_len = len(buf) - self.hdr_len
if payload_len != self.size:
raise Exception('Payload length invalid: %d != %d' %
(payload_len, self.size))
self.payload = buf[self.hdr_len:]
# excludes payload, you have to write that separately!
def pack(self):
return struct.pack(self.struct_fmt,
self.flags,
self.missed_cnt,
self.log_type,
self.size,
self.timestamp,
self.type_specific_data)
def __str__(self):
return 'flags %04x miss %d log_type %d size %d timestamp %d type_specific_data %d\n' % \
(self.flags, self.missed_cnt, self.log_type,
self.size, self.timestamp, self.type_specific_data)
def __init__(self):
self.flags = 0
self.missed_cnt = 0
self.log_type = 0
self.size = 0
self.timestamp = 0
self.type_specific_data = 0
self.payload = []
def output_open():
global output_file
# apparently no way to close the file as the python plugin doesn't
# have unregister() callback
output_file = open('pktlog.dat', 'wb')
buf = struct.pack('II', PKTLOG_MAGIC_NUM, CUR_PKTLOG_VER)
output_write(buf)
def output_write(buf):
global output_file
output_file.write(buf)
def pktlog_tx_frm_hdr(frame):
global frm_hdr
try:
# struct ieee80211_hdr
(frame_control, duration_id, addr1a, addr1b, addr1c, addr2a, addr2b, addr2c,
addr3a, addr3b, addr3c, seq_ctrl) = struct.unpack_from('<HHI2BI2BI2BH', frame, 0)
except struct.error as e:
dbg('failed to parse struct ieee80211_hdr: %s' % (e))
return
if frame_control & IEEE80211_FCTL_TODS:
bssid_tail = (addr1b << 8) | addr1c
sa_tail = (addr2b << 8) | addr2c
da_tail = (addr3b << 8) | addr3c
elif frame_control & IEEE80211_FCTL_FROMDS:
bssid_tail = (addr2b << 8) | addr2c
sa_tail = (addr3b << 8) | addr3c
da_tail = (addr1b << 8) | addr1c
else:
bssid_tail = (addr3b << 8) | addr3c
sa_tail = (addr2b << 8) | addr2c
da_tail = (addr1b << 8) | addr1c
resvd = 0
frm_hdr = struct.pack('HHHHHH', frame_control, seq_ctrl, bssid_tail,
sa_tail, da_tail, resvd)
dbg('frm_hdr %d B' % len(frm_hdr))
def pktlog_tx_ctrl(buf, hw_type):
global frm_hdr
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
hdr.unpack(buf)
hdr.size = ATH10K_PKTLOG_TXCTL_LEN
num_txctls = ATH10K_PKTLOG_MAX_TXCTL_WORDS
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX,
ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.unpack(buf)
hdr.size = ATH10K_PKTLOG_10_4_TXCTL_LEN
num_txctls = ATH10K_PKTLOG_10_4_MAX_TXCTL_WORDS
output_write(hdr.pack())
# write struct ath10k_pktlog_frame
if frm_hdr:
output_write(frm_hdr)
else:
tmp = struct.pack('HHHHHH', 0, 0, 0, 0, 0, 0)
output_write(tmp)
txdesc_ctl = hdr.payload[0:]
for i in range(num_txctls):
if len(txdesc_ctl) >= 4:
txctl, = struct.unpack_from('<I', txdesc_ctl)
txdesc_ctl = txdesc_ctl[4:]
else:
txctl = 0
output_write(struct.pack('I', txctl))
def pktlog_tx_msdu_id(buf, hw_type):
global msdu_len_tbl
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
hdr.unpack(buf)
hdr.size = 4 + (192 / 8) + 2 * 192
# write struct ath10k_pktlog_hdr
output_write(hdr.pack())
# parse struct msdu_id_info
# hdr (12) + num_msdu (4) + bound_bmap (24) = 40
msdu_info = hdr.payload[0:28]
id = hdr.payload[28:]
num_msdu, = struct.unpack_from('I', msdu_info)
output_write(msdu_info)
max_pkt_info_msdu_id = MAX_PKT_INFO_MSDU_ID
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX,
ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.unpack(buf)
# write struct ath10k_pktlog_10_4_hdr
output_write(hdr.pack())
# parse struct msdu_id_info
# hdr (16) + num_msdu (4) + bound_bmap (1) = 21
msdu_info = hdr.payload[0:5]
id = hdr.payload[5:]
num_msdu, = struct.unpack_from('I', msdu_info)
output_write(msdu_info)
max_pkt_info_msdu_id = MAX_10_4_PKT_INFO_MSDU_ID
for i in range(max_pkt_info_msdu_id):
if num_msdu > 0:
num_msdu = num_msdu - 1
msdu_id, = struct.unpack_from('<H', id)
id = id[2:]
if msdu_id not in msdu_len_tbl:
dbg('msdu_id %d not found from msdu_len_tbl' % (msdu_id))
msdu_len = 0
else:
msdu_len = msdu_len_tbl[msdu_id]
else:
msdu_len = 0
output_write(struct.pack('H', msdu_len))
def ath10k_htt_pktlog_handler(pevent, trace_seq, event):
hw_type = int(event.get('hw_type', ATH10K_PKTLOG_HW_QCA988X))
buf = event['pktlog'].data
offset = 0
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX,
ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.unpack(buf, offset)
offset = offset + hdr.hdr_len
trace_seq.puts('%s\n' % (hdr))
if hdr.log_type == ATH10K_PKTLOG_TYPE_TX_FRM_HDR:
pktlog_tx_frm_hdr(buf[hdr.hdr_len:])
elif hdr.log_type == ATH10K_PKTLOG_TYPE_TX_CTRL:
pktlog_tx_ctrl(buf, hw_type)
elif hdr.log_type == ATH10K_PKTLOG_TYPE_TX_MSDU_ID:
pktlog_tx_msdu_id(buf, hw_type)
elif hdr.log_type == ATH10K_PKTLOG_TYPE_TX_STAT or \
hdr.log_type == ATH10K_PKTLOG_TYPE_RX_STAT or \
hdr.log_type == ATH10K_PKTLOG_TYPE_RC_FIND or \
hdr.log_type == ATH10K_PKTLOG_TYPE_RC_UPDATE:
output_write(buf[0: offset + hdr.size])
else:
pass
def ath10k_htt_rx_desc_handler(pevent, trace_seq, event):
hw_type = int(event.get('hw_type', ATH10K_PKTLOG_HW_QCA988X))
rxdesc = event['rxdesc'].data
trace_seq.puts('len %d\n' % (len(rxdesc)))
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
hdr.flags = (1 << ATH10K_PKTLOG_FLG_TYPE_REMOTE_S)
hdr.missed_cnt = 0
hdr.log_type = ATH10K_PKTLOG_TYPE_RX_STAT
# rx_desc size for QCA988x chipsets is 248
hdr.size = 248
output_write(hdr.pack())
output_write(rxdesc[0: 32])
output_write(rxdesc[36: 56])
output_write(rxdesc[76: 208])
output_write(rxdesc[228:])
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.flags = (1 << ATH10K_PKTLOG_FLG_TYPE_REMOTE_S)
hdr.missed_cnt = 0
hdr.log_type = ATH10K_PKTLOG_TYPE_RX_STAT
hdr.type_specific_data = 0
hdr.size = len(rxdesc)
output_write(hdr.pack())
output_write(rxdesc)
elif hw_type in [ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.flags = (1 << ATH10K_PKTLOG_FLG_TYPE_REMOTE_S)
hdr.missed_cnt = 0
hdr.log_type = ATH10K_PKTLOG_TYPE_RX_STAT
hdr.type_specific_data = 0
# rx_desc size for QCA9984 and QCA9889 chipsets is 296
hdr.size = 296
output_write(hdr.pack())
output_write(rxdesc[0: 4])
output_write(rxdesc[4: 8])
output_write(rxdesc[12: 24])
output_write(rxdesc[24: 40])
output_write(rxdesc[44: 84])
output_write(rxdesc[100: 104])
output_write(rxdesc[104: 144])
output_write(rxdesc[144: 256])
output_write(rxdesc[292:])
def ath10k_htt_tx_handler(pevent, trace_seq, event):
global msdu_len_tbl
msdu_id = long(event['msdu_id'])
msdu_len = long(event['msdu_len'])
trace_seq.puts('msdu_id %d msdu_len %d\n' % (msdu_id, msdu_len))
if msdu_id > TARGET_NUM_MSDU_DESC:
dbg('Invalid msdu_id in tx: %d' % (msdu_id))
return
msdu_len_tbl[msdu_id] = msdu_len
def ath10k_txrx_tx_unref_handler(pevent, trace_seq, event):
global msdu_len_tbl
msdu_id = long(event['msdu_id'])
trace_seq.puts('msdu_id %d\n' % (msdu_id))
if msdu_id > TARGET_NUM_MSDU_DESC:
dbg('Invalid msdu_id from unref: %d' % (msdu_id))
return
msdu_len_tbl[msdu_id] = 0
def ath10k_tx_hdr_handler(pevent, trace_seq, event):
buf = event['data'].data
pktlog_tx_frm_hdr(buf[0:])
def register(pevent):
output_open()
pevent.register_event_handler("ath10k", "ath10k_htt_pktlog",
lambda *args:
ath10k_htt_pktlog_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_htt_rx_desc",
lambda *args:
ath10k_htt_rx_desc_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_htt_tx",
lambda *args:
ath10k_htt_tx_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_txrx_tx_unref",
lambda *args:
ath10k_txrx_tx_unref_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_tx_hdr",
lambda *args:
ath10k_tx_hdr_handler(pevent, *args))
| 31.177966
| 106
| 0.632509
| 2,603
| 0.176882
| 0
| 0
| 0
| 0
| 0
| 0
| 2,983
| 0.202705
|
e1936080a3e49021025bc658618c985bef1143b7
| 731
|
py
|
Python
|
tgtypes/models/venue.py
|
autogram/tgtypes
|
90f8d0d35d3c372767508e56c20777635e128e38
|
[
"MIT"
] | null | null | null |
tgtypes/models/venue.py
|
autogram/tgtypes
|
90f8d0d35d3c372767508e56c20777635e128e38
|
[
"MIT"
] | null | null | null |
tgtypes/models/venue.py
|
autogram/tgtypes
|
90f8d0d35d3c372767508e56c20777635e128e38
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from ._base import TelegramObject
if TYPE_CHECKING: # pragma: no cover
from .location import Location
class Venue(TelegramObject):
"""
This object represents a venue.
Source: https://core.telegram.org/bots/api#venue
"""
location: Location
"""Venue location"""
title: str
"""Name of the venue"""
address: str
"""Address of the venue"""
foursquare_id: Optional[str] = None
"""Foursquare identifier of the venue"""
foursquare_type: Optional[str] = None
"""Foursquare type of the venue. (For example, 'arts_entertainment/default',
'arts_entertainment/aquarium' or 'food/icecream'.)"""
| 25.206897
| 80
| 0.689466
| 540
| 0.738714
| 0
| 0
| 0
| 0
| 0
| 0
| 362
| 0.495212
|
e19376010ad54eee24bb9ce56fbcb72f63a795cb
| 2,457
|
py
|
Python
|
nsa.py
|
hypervis0r/nsaproductgenerator
|
e384717e0eb2746bd58d041963b49e4772192f0a
|
[
"MIT"
] | null | null | null |
nsa.py
|
hypervis0r/nsaproductgenerator
|
e384717e0eb2746bd58d041963b49e4772192f0a
|
[
"MIT"
] | null | null | null |
nsa.py
|
hypervis0r/nsaproductgenerator
|
e384717e0eb2746bd58d041963b49e4772192f0a
|
[
"MIT"
] | 1
|
2022-02-25T13:06:14.000Z
|
2022-02-25T13:06:14.000Z
|
import random
import argparse
# TODO: Parse word lists from files
words = {
"codenames_adjective": [
"quantum",
"loud",
"red",
"blue",
"green",
"yellow",
"irate",
"angry",
"peeved",
"happy",
"slimy",
"sleepy",
"junior",
"slicker",
"united",
"somber",
"bizarre",
"odd",
"weird",
"wrong",
"latent",
"chilly",
"strange",
"loud",
"silent",
"hopping",
"orange",
"violet",
"violent",
"desolate",
"lone",
"cold",
"solemn",
"raging",
"intelligent",
"american",
],
"codenames_noun": [
"matrix",
"wolf",
"solace",
"whisper",
"felony",
"moon",
"sucker",
"penguin",
"waffle",
"maestro",
"night",
"trinity",
"deity",
"monkey",
"ark",
"squirrel",
"iron",
"bounce",
"farm",
"chef",
"trough",
"net",
"trawl",
"glee",
"water",
"spork",
"plow",
"feed",
"souffle",
"route",
"bagel",
"montana",
"analyst",
"auto",
"watch",
"photo",
"yard",
"source",
"monkey",
"seagull",
"toll",
"spawn",
"gopher",
"chipmunk",
"set",
"calendar",
"artist",
"chaser",
"scan",
"tote",
"beam",
"entourage",
"genesis",
"walk",
"spatula",
"rage",
"fire",
"master"
],
"codenames_suffix": [
" {}000",
"-II",
" {}.0",
" rev{}",
"-HX",
" v{}",
]
}
def parse_args():
parser = argparse.ArgumentParser(description='Generate NSA TAO project names')
parser.add_argument("-n", "--num", required=False, type=int, default=1, help="Number of project names to generate")
return parser.parse_args()
def pick_random(arr):
return arr[random.randrange(len(arr))]
def generate_tao_name():
name = ""
name += pick_random(words["codenames_adjective"]).upper()
name += pick_random(words["codenames_noun"]).upper()
# Hacky way to do 1/5
if (random.randrange(5) == 4):
suffix = pick_random(words["codenames_suffix"])
suffix = suffix.format(random.randrange(1, 9))
name += suffix
return name
def main(args):
for _ in range(args.num):
print("%s" % generate_tao_name())
if __name__ == "__main__":
main(parse_args())
| 16.944828
| 119
| 0.483923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,011
| 0.411477
|
e195525885c756f6c1eaa22f28ac15deda8bb369
| 2,886
|
py
|
Python
|
bob/blitz/extension.py
|
bioidiap/bob.blitz
|
348d7cf3866b549cac576efc3c6f3df24245d9fd
|
[
"BSD-3-Clause"
] | null | null | null |
bob/blitz/extension.py
|
bioidiap/bob.blitz
|
348d7cf3866b549cac576efc3c6f3df24245d9fd
|
[
"BSD-3-Clause"
] | 6
|
2015-01-01T09:15:28.000Z
|
2016-10-20T08:09:26.000Z
|
bob/blitz/extension.py
|
bioidiap/bob.blitz
|
348d7cf3866b549cac576efc3c6f3df24245d9fd
|
[
"BSD-3-Clause"
] | 3
|
2015-08-05T12:16:45.000Z
|
2018-02-01T19:55:40.000Z
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Mon 18 Nov 21:38:19 2013
"""Extension building for using this package
"""
import numpy
from pkg_resources import resource_filename
from bob.extension import Extension as BobExtension
# forward the build_ext command from bob.extension
from bob.extension import build_ext, Library as BobLibrary
from distutils.version import LooseVersion
class Extension(BobExtension):
"""Extension building with pkg-config packages and blitz.array.
See the documentation for :py:class:`distutils.extension.Extension` for more
details on input parameters.
"""
def __init__(self, *args, **kwargs):
"""Initialize the extension with parameters.
This extension adds ``blitz>=0.10`` as a requirement for extensions derived
from this class.
See the help for :py:class:`bob.extension.Extension` for more details on
options.
"""
require = ['blitz>=0.10', 'boost']
kwargs.setdefault('packages', []).extend(require)
self_include_dir = resource_filename(__name__, 'include')
kwargs.setdefault('system_include_dirs', []).append(numpy.get_include())
kwargs.setdefault('include_dirs', []).append(self_include_dir)
macros = [
("PY_ARRAY_UNIQUE_SYMBOL", "BOB_NUMPY_C_API"),
("NO_IMPORT_ARRAY", "1"),
]
if LooseVersion(numpy.__version__) >= LooseVersion('1.7'):
macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"))
kwargs.setdefault('define_macros', []).extend(macros)
# Run the constructor for the base class
BobExtension.__init__(self, *args, **kwargs)
class Library (BobLibrary):
"""Pure C++ library building with blitz array.
See the documentation for :py:class:`bob.extension.Extension` for more
details on input parameters.
"""
def __init__(self, *args, **kwargs):
"""Initialize the library with parameters.
This library adds ``blitz>=0.10`` as a requirement for library derived
from this class.
See the help for :py:class:`bob.extension.Library` for more details on
options.
"""
require = ['blitz>=0.10', 'boost']
kwargs.setdefault('packages', []).extend(require)
self_include_dir = resource_filename(__name__, 'include')
kwargs.setdefault('system_include_dirs', []).append(numpy.get_include())
kwargs.setdefault('include_dirs', []).append(self_include_dir)
# TODO: are these macros required for pure C++ builds?
macros = [
("PY_ARRAY_UNIQUE_SYMBOL", "BOB_NUMPY_C_API"),
("NO_IMPORT_ARRAY", "1"),
]
if LooseVersion(numpy.__version__) >= LooseVersion('1.7'):
macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"))
kwargs.setdefault('define_macros', []).extend(macros)
# Run the constructor for the base class
BobLibrary.__init__(self, *args, **kwargs)
| 31.032258
| 79
| 0.698545
| 2,450
| 0.848926
| 0
| 0
| 0
| 0
| 0
| 0
| 1,562
| 0.541234
|
e195971c01d6f8dcda846bd7ff1f32bb1f7099e8
| 4,248
|
py
|
Python
|
src/RosGazeboLibrary/Gazebo.py
|
hielsnoppe/robotframework-rosgazebolibrary
|
a91d48413d4af95856964644b149898b538c6724
|
[
"Apache-2.0"
] | null | null | null |
src/RosGazeboLibrary/Gazebo.py
|
hielsnoppe/robotframework-rosgazebolibrary
|
a91d48413d4af95856964644b149898b538c6724
|
[
"Apache-2.0"
] | null | null | null |
src/RosGazeboLibrary/Gazebo.py
|
hielsnoppe/robotframework-rosgazebolibrary
|
a91d48413d4af95856964644b149898b538c6724
|
[
"Apache-2.0"
] | null | null | null |
from robot.api.deco import keyword
from robot.libraries.BuiltIn import BuiltIn
class Gazebo(object):
"""Robot Framework test library for the Gazebo simulator
See also http://gazebosim.org/tutorials/?tut=ros_comm
== Table of contents ==
%TOC%
"""
ROBOT_LIBRARY_SCOPE = 'SUITE'
def __init__(self):
self.ros_lib = BuiltIn().get_library_instance('RosGazeboLibrary.ROS')
# Create and destroy models in simulation
# http://gazebosim.org/tutorials/?tut=ros_comm#Services:Createanddestroymodelsinsimulation
@keyword
def spawn_urdf_model(self, urdf_path: str, position: tuple, model_name: str):
''' TODO: Refactor to use service call '''
return self.ros_lib.rosrun('gazebo_ros', 'spawn_model', *[
'-file', urdf_path,
'-urdf',
'-model', model_name,
'-x', position[0],
'-y', position[1],
'-z', position[2],
])
@keyword
def spawn_sdf_model(self, sdf_path: str, position: tuple, model_name: str):
''' TODO: Refactor to use service call '''
return self.ros_lib.rosrun('gazebo_ros', 'spawn_model', *[
'-file', sdf_path,
'-sdf',
'-model', model_name,
'-x', position[0],
'-y', position[1],
'-z', position[2],
])
@keyword
def delete_model(self, model_name: str):
''' Delete a model from simulation
http://gazebosim.org/tutorials/?tut=ros_comm#DeleteModel
'''
return self.ros_lib.rosservice_call(
'gazebo/delete_model', 'gazebo_msgs/DeleteModel',
{ 'model_name': model_name }
)
# State and property setters
# http://gazebosim.org/tutorials/?tut=ros_comm#Services:Stateandpropertysetters
''' TODO
def set_link_properties(self, ...):
def set_physics_properties(self, ...):
def set_model_state(self, ...):
def set_model_configuration(self, ...):
def set_joint_properties(self, ...):
def set_link_state(self, ...):
'''
# State and property getters
# http://gazebosim.org/tutorials/?tut=ros_comm#Services:Stateandpropertygetters
@keyword
def get_model_properties(self, model_name: str):
return self.ros_lib.rosservice_call(
'gazebo/get_model_properties', 'gazebo_msgs/GetModelProperties',
{ 'model_name': model_name }
)
@keyword
def get_model_state(self, model_name: str):
return self.ros_lib.rosservice_call(
'gazebo/get_model_state', 'gazebo_msgs/GetModelState',
{ 'model_name': model_name }
)
''' TODO
def get_world_properties(self, ...):
def get_joint_properties(self, ...):
def get_link_properties(self, ...):
def get_link_state(self, ...):
def get_physics_properties(self, ...):
def link_states(self, ...): # investigate
def model_states(self, ...): # investigate
'''
# Force control
# http://gazebosim.org/tutorials/?tut=ros_comm#Services:Forcecontrol
''' TODO
/gazebo/apply_body_wrench
/gazebo/apply_joint_effort
/gazebo/clear_body_wrenches
/gazebo/clear_joint_forces
'''
# Simulation control
# http://gazebosim.org/tutorials/?tut=ros_comm#Services:Simulationcontrol
@keyword
def reset_simulation(self):
return self.ros_lib.rosservice_call('/gazebo/reset_simulation')
@keyword
def reset_world(self):
return self.ros_lib.rosservice_call('/gazebo/reset_world')
@keyword
def pause_physics(self):
return self.ros_lib.rosservice_call('/gazebo/pause_physics')
@keyword
def unpause_physics(self):
return self.ros_lib.rosservice_call('/gazebo/unpause_physics')
# Undocumented services
# Found via `rosservice list`
'''
/gazebo/delete_light
/gazebo/get_light_properties
/gazebo/get_loggers
/gazebo/set_light_properties
/gazebo/set_logger_level
/gazebo/set_parameters
/gazebo_gui/get_loggers
/gazebo_gui/set_logger_level
'''
# Convenience keywords
@keyword
def launch_empty_world(self):
return self.ros_lib.roslaunch('gazebo_ros', 'empty_world.launch')
| 29.296552
| 94
| 0.638889
| 4,168
| 0.981168
| 0
| 0
| 2,139
| 0.503531
| 0
| 0
| 2,368
| 0.557439
|
e1960d36e95888a08f212b033eea9c1cb048cffe
| 1,143
|
py
|
Python
|
tests/unit/test_hass.py
|
boonhapus/hautomate
|
f111a8ad86d5f07183903ec99c1981569e0ee046
|
[
"MIT"
] | null | null | null |
tests/unit/test_hass.py
|
boonhapus/hautomate
|
f111a8ad86d5f07183903ec99c1981569e0ee046
|
[
"MIT"
] | null | null | null |
tests/unit/test_hass.py
|
boonhapus/hautomate
|
f111a8ad86d5f07183903ec99c1981569e0ee046
|
[
"MIT"
] | null | null | null |
from ward import test, each, raises
from homeassistant.core import HomeAssistant
from hautomate.settings import HautoConfig
from hautomate import Hautomate
import pydantic
from tests.fixtures import cfg_data_hauto
@test('HomeAssistantConfig validates for {feed}', tags=['unit'])
async def _(
cfg_data=cfg_data_hauto,
feed=each('custom_component', 'custom_component', 'websocket'),
hass_cls=each(HomeAssistant, None, None)
):
if hass_cls is not None:
hass = hass_cls()
else:
hass = None
# overwrite any existing api configs
data = cfg_data.copy()
data['api_configs'] = {
'homeassistant': {
'feed': feed,
'hass_interface': hass,
'host': 'http://hautomate.boonhap.us',
'port': 8823,
'access_token': 'damn, granted!'
}
}
if feed == 'custom_component' and hass is None:
with raises(pydantic.ValidationError):
cfg = HautoConfig(**data)
else:
cfg = HautoConfig(**data)
hauto = Hautomate(cfg)
assert hauto.is_running is True
assert hauto.is_ready is False
| 26.581395
| 67
| 0.632546
| 0
| 0
| 0
| 0
| 924
| 0.808399
| 859
| 0.751531
| 270
| 0.23622
|
e19634e1c0e6ad67f639ff7b727b4525f8c022d4
| 1,186
|
py
|
Python
|
test/arguments/with_range_check_code/python/Bit4RangeCheckTest.py
|
dkBrazz/zserio
|
29dd8145b7d851fac682d3afe991185ea2eac318
|
[
"BSD-3-Clause"
] | 86
|
2018-09-06T09:30:53.000Z
|
2022-03-27T01:12:36.000Z
|
test/arguments/with_range_check_code/python/Bit4RangeCheckTest.py
|
dkBrazz/zserio
|
29dd8145b7d851fac682d3afe991185ea2eac318
|
[
"BSD-3-Clause"
] | 362
|
2018-09-04T20:21:24.000Z
|
2022-03-30T15:14:38.000Z
|
test/arguments/with_range_check_code/python/Bit4RangeCheckTest.py
|
dkBrazz/zserio
|
29dd8145b7d851fac682d3afe991185ea2eac318
|
[
"BSD-3-Clause"
] | 20
|
2018-09-10T15:59:02.000Z
|
2021-12-01T15:38:22.000Z
|
import unittest
import zserio
from testutils import getZserioApi
class Bit4RangeCheckTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "with_range_check_code.zs",
extraArgs=["-withRangeCheckCode"]).bit4_range_check
def testBit4LowerBound(self):
self._checkBit4Value(BIT4_LOWER_BOUND)
def testBit4UpperBound(self):
self._checkBit4Value(BIT4_UPPER_BOUND)
def testBit4BelowLowerBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkBit4Value(BIT4_LOWER_BOUND - 1)
def testBit4AboveUpperBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkBit4Value(BIT4_UPPER_BOUND + 1)
def _checkBit4Value(self, value):
bit4RangeCheckCompound = self.api.Bit4RangeCheckCompound(value_=value)
bitBuffer = zserio.serialize(bit4RangeCheckCompound)
readBit4RangeCheckCompound = zserio.deserialize(self.api.Bit4RangeCheckCompound, bitBuffer)
self.assertEqual(bit4RangeCheckCompound, readBit4RangeCheckCompound)
BIT4_LOWER_BOUND = 0
BIT4_UPPER_BOUND = 15
| 34.882353
| 99
| 0.73946
| 1,074
| 0.905565
| 0
| 0
| 189
| 0.159359
| 0
| 0
| 47
| 0.039629
|
e196eb274e00b4e5d8027a1161feb36eab5a1ff6
| 1,931
|
py
|
Python
|
src/MainAPP/migrations/0030_auto_20181211_1246.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | null | null | null |
src/MainAPP/migrations/0030_auto_20181211_1246.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | 9
|
2017-11-21T15:45:18.000Z
|
2022-02-11T03:37:54.000Z
|
src/MainAPP/migrations/0030_auto_20181211_1246.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | 1
|
2020-07-22T02:24:17.000Z
|
2020-07-22T02:24:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-12-11 11:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MainAPP', '0029_auto_20181211_1237'),
]
operations = [
migrations.AlterField(
model_name='sitesettings',
name='ESIOS_TOKEN',
field=models.CharField(blank=True, default='', help_text='The token assigned by the ESIOS service. You should ask for yours to: Consultas Sios <consultasios@ree.es>', max_length=50, verbose_name='Token for the ESIOS page'),
),
migrations.AlterField(
model_name='sitesettings',
name='IBERDROLA_PASSW',
field=models.CharField(blank=True, default='', help_text='Password registered on the Iberdrola Distribucion webpage', max_length=50, verbose_name='Iberdrola password'),
),
migrations.AlterField(
model_name='sitesettings',
name='IBERDROLA_USER',
field=models.CharField(blank=True, default='', help_text='Username registered into the Iberdrola Distribucion webpage', max_length=50, verbose_name='Iberdrola username'),
),
migrations.AlterField(
model_name='sitesettings',
name='OWM_TOKEN',
field=models.CharField(blank=True, default='', help_text='The token assigned by the OpenWeatherMap service. You should ask yours following https://openweathermap.org/appid', max_length=50, verbose_name='Token for the openweathermap page'),
),
migrations.AlterField(
model_name='sitesettings',
name='TELEGRAM_TOKEN',
field=models.CharField(blank=True, default='', help_text='The token assigned by the BothFather', max_length=50, verbose_name='Token for the telegram bot'),
),
]
| 47.097561
| 252
| 0.651476
| 1,765
| 0.914034
| 0
| 0
| 0
| 0
| 0
| 0
| 770
| 0.398757
|
e1970edc7bf4ebc76f1931f011d41021ea8563bf
| 18,954
|
py
|
Python
|
qdms/PulsedProgramming.py
|
3it-nano/QDMS
|
9ec2d4e198c00f394d8882517c4b3b336c7fe8c2
|
[
"MIT"
] | 1
|
2021-11-21T15:18:27.000Z
|
2021-11-21T15:18:27.000Z
|
qdms/PulsedProgramming.py
|
3it-nano/QDMS
|
9ec2d4e198c00f394d8882517c4b3b336c7fe8c2
|
[
"MIT"
] | null | null | null |
qdms/PulsedProgramming.py
|
3it-nano/QDMS
|
9ec2d4e198c00f394d8882517c4b3b336c7fe8c2
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
import time
class PulsedProgramming:
"""
This class contains all the parameters for the Pulsed programming on a memristor model.
After initializing the parameters values, start the simulation with self.simulate()
Parameters
----------
max_voltage : float
The max voltage (V) of a pulse. If 0, no limit is apply.
pulse_algorithm : string
The pulse algorithm use. Those are the available choices (Sources in the methods). Default is 'fabien'.
'fabien' : Use fabien_convergence()
'log' : Use a log_convergence()
tolerance : float
The tolerance_value input is an int that represent the absolute tolerance (Ohm) from the res_states the
pulsed programming will find. Smaller is more precise, but too small can never converge.
is_relative_tolerance : bool
If true, the tolerance_value would be in percentage instead of (Ohm). ex: 10 : if true, 10% : if false, 10 Ohm
variability_write : iterable[float]
A gaussian distribution with (mu=0, sigma=variance_write)
index_variability : int
Index of the current variability. If over 1000, reset to 0.
variance_write : float
Variance of the gaussian distribution on the memristor write. See variability.
graph_resistance : List[Union[float, int]]
Contains all resistance of the simulation. It's used in the creation of plots.
graph_voltages : List[Union[float, int]]
Contains all voltages of the simulation. It's used in the creation of plots.
number_of_reading : int
The number of correct value read before passing to the next state.
max_pulse : int
The max number of pulses.
"""
def __init__(self, memristor_simulation, pulse_algorithm='fabien', max_voltage=0, tolerance=0, is_relative_tolerance=False,
variance_write=0, number_of_reading=1, max_pulse=20000, verbose=False, plot_memristor=0):
self.memristor_simulation = memristor_simulation
self.pulse_algorithm = pulse_algorithm
self.tolerance = tolerance
self.max_voltage = max_voltage
self.is_relative_tolerance = is_relative_tolerance
self.variance_write = variance_write
self.number_of_reading = number_of_reading
self.max_pulse = max_pulse
self.verbose = verbose
self.voltage_output = {}
self.plot_memristor = plot_memristor
self.index_variability = 0
self.variability_write = np.random.normal(0, variance_write, 1000)
self.graph_resistance = []
self.graph_voltages = []
def print(self):
print(self.pulse_algorithm)
print(self.tolerance)
print(self.max_voltage)
print(self.voltage_output)
print(self.is_relative_tolerance)
print(self.variance_write)
print(self.number_of_reading)
print(self.max_pulse)
print(self.verbose)
print(np.array(self.graph_resistance))
print(np.array(self.graph_voltages))
def write_resistance(self, memristor, voltage, t_pulse):
"""
This function change the resistance of the memristor by applying a voltage fo t_pulse.
Parameters
----------
memristor : Memristor
The memristor wrote.
voltage : float
The voltage (V) applied.
t_pulse : float
The time of the writing pulse. (s)
Returns
----------
"""
t = int(t_pulse / memristor.time_series_resolution)
signal = [voltage] * t
memristor.simulate(signal)
self.index_variability = self.index_variability + 1 if self.index_variability < len(self.variability_write) - 1 else 0
memristor.g = 1 / (1 / memristor.g + (1 / memristor.g) * self.variability_write[self.index_variability])
def find_number_iteration(self):
"""
This function find the number of iteration needed to create the resistance list depending on the distribution type
Returns
----------
number_iteration : int
number of iteration
"""
number_iteration = 1
if self.distribution_type == 'full_spread':
number_iteration = self.circuit.number_of_memristor
return number_iteration
def simulate(self, voltages_target, precision=None):
"""
This function will set the memristors to the resistance wanted in each voltages_target package.
Parameters
----------
voltages_target : dict
dict with keys as voltage and package as list of resistance
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
if self.pulse_algorithm != 'fabien' and self.pulse_algorithm != 'log':
raise(Exception(f'Pulse algorithm not supported: {self.pulse_algorithm}'))
# voltages_target_list = list(voltages_target.keys())
# resolution = voltages_target_list[1] - voltages_target_list[0]
index = 1
conf_done = 0
start_time = time.time()
diff_voltage = {}
for v in list(voltages_target.keys()):
if index == 1:
start_time_ = time.time()
self.simulate_list_memristor(voltages_target[v], precision)
self.voltage_output[self.memristor_simulation.circuit.current_v_out()] = [i.read() for i in self.memristor_simulation.circuit.list_memristor]
diff_voltage[abs(v - self.memristor_simulation.circuit.current_v_out())] = [round(1 / np.sum([1/res for res in voltages_target[v]]), 4), round(1 / self.memristor_simulation.circuit.current_conductance(), 4)]
if index == 50 and self.verbose:
conf_done += index
print(f'Conf done: {conf_done}\tTook: {round(time.time() - start_time_, 2)} s\tTime left: {round((time.time() - start_time_) * (len(voltages_target.keys()) - conf_done) / 50, 2)} s')
index = 0
index += 1
if self.verbose:
print(f'Total time: {time.time() - start_time}')
print()
for key in diff_voltage.keys():
print(f'{round(key*1000, 4)} mV\t{diff_voltage.get(key)[0]}\t{diff_voltage.get(key)[1]} (Ohm)')
print(f'Mean diff: {np.mean(list(diff_voltage.keys()))}')
print(f'Min diff: {np.min(list(diff_voltage.keys()))}\tMax diff: {np.max(list(diff_voltage.keys()))}')
return self.voltage_output
def simulate_list_memristor(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if i == self.plot_memristor else False
if self.pulse_algorithm == 'fabien':
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
elif self.pulse_algorithm == 'log':
self.log_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
self.balance(list_resistance, precision)
def balance(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method. If 0,
won't do it.
"""
final_g = np.sum([1 / i for i in list_resistance])
delta_g = final_g - self.memristor_simulation.circuit.current_conductance()
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if -(i+1) == self.plot_memristor else False
final_res = 1 / (self.memristor_simulation.circuit.list_memristor[-(i+1)].g + delta_g)
if self.memristor_simulation.circuit.memristor_model.r_on <= final_res <= self.memristor_simulation.circuit.memristor_model.r_off:
p_tolerance, p_relative = self.tolerance, self.is_relative_tolerance
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[0][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[0][0], precision[0][1]
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[1][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[1][0], precision[1][1]
self.small_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
self.tolerance, self.is_relative_tolerance = p_tolerance, p_relative
break
def small_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor with a
really small increment.
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
step = 0.001
positive_voltage = voltage_set = 0.1
negative_voltage = voltage_reset = -0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
while not flag_finish:
current_res = memristor.read()
if res_min <= current_res <= res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res < res_min:
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
negative_voltage -= step
positive_voltage = voltage_set
elif current_res > res_max:
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
positive_voltage += step
negative_voltage = voltage_reset
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print(f'Got max pulse {self.max_pulse}')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
counter += 1
def log_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor.
From : https://arxiv.org/abs/2103.09931
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
positive_voltage = voltage_set = 0.5
negative_voltage = voltage_reset = -0.5
# additional parameters
min_shift = 0.005
max_shift = 0.2
a = 0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
r_shift = 1
current_res = memristor.read()
while not flag_finish:
if res_min < current_res < res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res > res_max:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
positive_voltage += a * np.log10(abs(target_res - current_res) / r_shift)
elif r_shift > max_shift * (memristor.r_off - memristor.r_on):
positive_voltage = voltage_set
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
elif current_res < res_min:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
negative_voltage -= a * np.log10(abs((target_res - current_res) / r_shift))
elif r_shift > max_shift * (memristor.r_off - memristor.r_on):
negative_voltage = voltage_reset
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print('Got max pulse')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
counter += 1
previous_res = current_res
current_res = memristor.read()
r_shift = abs(current_res - previous_res) if abs(current_res - previous_res) != 0 else 1
def fabien_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor.
From : https://iopscience.iop.org/article/10.1088/0957-4484/23/7/075201
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
step = 0.005
positive_voltage = voltage_set = 0.5
negative_voltage = voltage_reset = -0.5
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
while not flag_finish:
current_res = memristor.read()
if res_min <= current_res <= res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res < res_min:
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
negative_voltage -= step
positive_voltage = voltage_set
elif current_res > res_max:
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
positive_voltage += step
negative_voltage = voltage_reset
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print('Got max pulse')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
# print(f'{self.graph_resistance[-1]}\t{self.graph_voltages[-1]}')
counter += 1
| 43.672811
| 219
| 0.614013
| 18,908
| 0.997573
| 0
| 0
| 0
| 0
| 0
| 0
| 5,883
| 0.310383
|
e197f5d2fbd28b451da6017706229aff6b5fef77
| 462
|
py
|
Python
|
tests/test_push.py
|
associatedpress/datakit-dworld
|
21ccd0e468c7064d62022a2f136c0f8f47bbabb9
|
[
"ISC"
] | 2
|
2019-09-07T02:03:46.000Z
|
2021-03-06T14:43:01.000Z
|
tests/test_push.py
|
associatedpress/datakit-dworld
|
21ccd0e468c7064d62022a2f136c0f8f47bbabb9
|
[
"ISC"
] | 5
|
2019-09-06T22:24:26.000Z
|
2021-04-27T21:42:18.000Z
|
tests/test_push.py
|
associatedpress/datakit-dworld
|
21ccd0e468c7064d62022a2f136c0f8f47bbabb9
|
[
"ISC"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# from unittest import mock
# from datakit_dworld.push import Push
def test_push(capsys):
"""Sample pytest test function with a built-in pytest fixture as an argument.
"""
# cmd = Greeting(None, None, cmd_name='dworld push')
# parsed_args = mock.Mock()
# parsed_args.greeting = 'Hello world!'
# cmd.run(parsed_args)
# out, err = capsys.readouterr()
# assert 'Hello world!' in out
| 25.666667
| 81
| 0.655844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 396
| 0.857143
|
e19851b68758ad2c430ee4ac03534a5235c71909
| 1,742
|
py
|
Python
|
cool_filter.py
|
Andrea055/Imager
|
8463fb18253a97f0c93b11f36104881b7e003f41
|
[
"MIT"
] | 7
|
2019-03-31T00:02:42.000Z
|
2022-01-30T00:30:46.000Z
|
cool_filter.py
|
Andrea055/Imager
|
8463fb18253a97f0c93b11f36104881b7e003f41
|
[
"MIT"
] | 21
|
2018-11-29T14:35:08.000Z
|
2019-01-11T08:00:26.000Z
|
cool_filter.py
|
Andrea055/Imager
|
8463fb18253a97f0c93b11f36104881b7e003f41
|
[
"MIT"
] | 17
|
2018-11-27T01:15:29.000Z
|
2019-12-29T19:41:30.000Z
|
import cv2
import numpy as np
from scipy.interpolate import UnivariateSpline
class Cool(object):
"""cool_filter ---
This class will apply cool filter to an image
by giving a sky blue effect to the input image.
"""
def __init__(self):
# create look-up tables for increasing and decreasing red and blue resp.
self.increaseChannel = self.LUT_8UC1([0, 64, 128, 192, 256],
[0, 70, 140, 210, 256])
self.decreaseChannel = self.LUT_8UC1([0, 64, 128, 192, 256],
[0, 30, 80, 120, 192])
def resize(self,image,window_height = 500):
aspect_ratio = float(image.shape[1])/float(image.shape[0])
window_width = window_height/aspect_ratio
image = cv2.resize(image, (int(window_height),int(window_width)))
return image
def render(self, img_rgb):
img_rgb = cv2.imread(img_rgb)
img_rgb = self.resize(img_rgb, 500)
#cv2.imshow("Original", img_rgb)
r,g,b = cv2.split(img_rgb)
r = cv2.LUT(r, self.increaseChannel).astype(np.uint8)
b = cv2.LUT(b, self.decreaseChannel).astype(np.uint8)
img_rgb = cv2.merge((r,g,b))
# saturation decreased
h,s,v = cv2.split(cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV))
s = cv2.LUT(s, self.decreaseChannel).astype(np.uint8)
return cv2.cvtColor(cv2.merge((h,s,v)), cv2.COLOR_HSV2RGB)
def LUT_8UC1(self, x, y):
#Create look-up table using scipy spline interpolation function
spl = UnivariateSpline(x, y)
return spl(range(256))
def start(self, img_path):
tmp_canvas = Cool() #make a temporary object
file_name = img_path #File_name will come here
res = tmp_canvas.render(file_name)
cv2.imwrite("Cool_version.jpg", res)
cv2.imshow("Cool version", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
print("Image saved as 'Cool_version.jpg'")
| 32.259259
| 74
| 0.703215
| 1,663
| 0.95465
| 0
| 0
| 0
| 0
| 0
| 0
| 427
| 0.245121
|
e198e752d01863b4604a77f7225e25fec572d794
| 623
|
py
|
Python
|
src/evaluators/sample_evaluators/swd_sample_evaluator.py
|
gmum/cwae-pytorch
|
7fb31a5d12a0a637be7dde76f0e11e80ec4a345d
|
[
"MIT"
] | 4
|
2020-08-20T20:51:24.000Z
|
2022-01-26T23:56:35.000Z
|
src/evaluators/sample_evaluators/swd_sample_evaluator.py
|
gmum/cwae-pytorch
|
7fb31a5d12a0a637be7dde76f0e11e80ec4a345d
|
[
"MIT"
] | null | null | null |
src/evaluators/sample_evaluators/swd_sample_evaluator.py
|
gmum/cwae-pytorch
|
7fb31a5d12a0a637be7dde76f0e11e80ec4a345d
|
[
"MIT"
] | 1
|
2021-12-24T14:13:40.000Z
|
2021-12-24T14:13:40.000Z
|
import torch
from metrics.swd import sliced_wasserstein_distance
from evaluators.sample_evaluators.base_sample_evaluator import BaseSampleEvaluator
from noise_creator import NoiseCreator
class SWDSampleEvaluator(BaseSampleEvaluator):
def __init__(self, noise_creator: NoiseCreator):
self.__noise_creator = noise_creator
def evaluate(self, sample: torch.Tensor) -> torch.Tensor:
comparision_sample = self.__noise_creator.create(sample.size(0)).type_as(sample)
swd_penalty_value = sliced_wasserstein_distance(sample, comparision_sample, 50)
return swd_penalty_value
| 38.9375
| 89
| 0.781701
| 426
| 0.683788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e199a6e60b58553046744ab0013002524ed3e824
| 473
|
py
|
Python
|
topic-db/topicdb/core/models/language.py
|
anthcp-infocom/Contextualise
|
0136660fcb965fd70fb4c7a33de7973a69ee9fec
|
[
"MIT"
] | 184
|
2019-01-10T03:50:50.000Z
|
2022-03-31T19:45:16.000Z
|
topic-db/topicdb/core/models/language.py
|
anthcp-infocom/Contextualise
|
0136660fcb965fd70fb4c7a33de7973a69ee9fec
|
[
"MIT"
] | 11
|
2019-04-07T07:39:11.000Z
|
2022-02-17T13:29:32.000Z
|
topic-db/topicdb/core/models/language.py
|
anthcp-infocom/Contextualise
|
0136660fcb965fd70fb4c7a33de7973a69ee9fec
|
[
"MIT"
] | 9
|
2019-10-26T02:43:59.000Z
|
2021-11-03T00:49:10.000Z
|
"""
Language enumeration. Part of the StoryTechnologies project.
June 12, 2016
Brett Alistair Kromkamp (brett.kromkamp@gmail.com)
"""
from enum import Enum
class Language(Enum):
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# https://en.wikipedia.org/wiki/ISO_639-2
ENG = 1 # English
SPA = 2 # Spanish
DEU = 3 # German
ITA = 4 # Italian
FRA = 5 # French
NLD = 6 # Dutch
def __str__(self):
return self.name
| 18.92
| 60
| 0.64482
| 312
| 0.659619
| 0
| 0
| 0
| 0
| 0
| 0
| 280
| 0.591966
|
e199bdb1802d5fdf8365414f161e96d1a070a7b9
| 899
|
py
|
Python
|
utils/migrations/0002_alter_electricitybilling_unit_price_and_more.py
|
shumwe/rental-house-management-system
|
f97f22afa8bc2740ed08baa387c74b93e02fac0c
|
[
"MIT"
] | 1
|
2022-03-16T13:29:30.000Z
|
2022-03-16T13:29:30.000Z
|
utils/migrations/0002_alter_electricitybilling_unit_price_and_more.py
|
shumwe/rental-house-management-system
|
f97f22afa8bc2740ed08baa387c74b93e02fac0c
|
[
"MIT"
] | null | null | null |
utils/migrations/0002_alter_electricitybilling_unit_price_and_more.py
|
shumwe/rental-house-management-system
|
f97f22afa8bc2740ed08baa387c74b93e02fac0c
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-02 17:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('utils', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='electricitybilling',
name='unit_price',
field=models.DecimalField(decimal_places=2, default=24.18, max_digits=9),
),
migrations.AlterField(
model_name='mpesaonline',
name='update_status',
field=models.CharField(choices=[('recieved', 'Recieved'), ('updated', 'Updated')], default='recieved', max_length=10),
),
migrations.AlterField(
model_name='waterbilling',
name='unit_price',
field=models.DecimalField(decimal_places=2, default=53.0, max_digits=9, verbose_name='Unit Price (KES)'),
),
]
| 31
| 130
| 0.604004
| 806
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 220
| 0.244716
|
e19e2e064598322d4426a31b5e25a817d667c8db
| 13,017
|
py
|
Python
|
lemur/roles/views.py
|
pandragoq/lemur
|
4f289c790b6638be49dc6614045bcad01bebf7ba
|
[
"Apache-2.0"
] | null | null | null |
lemur/roles/views.py
|
pandragoq/lemur
|
4f289c790b6638be49dc6614045bcad01bebf7ba
|
[
"Apache-2.0"
] | null | null | null |
lemur/roles/views.py
|
pandragoq/lemur
|
4f289c790b6638be49dc6614045bcad01bebf7ba
|
[
"Apache-2.0"
] | null | null | null |
"""
.. module: lemur.roles.views
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from flask import Blueprint
from flask import make_response, jsonify, abort, g
from flask.ext.restful import reqparse, fields, Api
from lemur.roles import service
from lemur.auth.service import AuthenticatedResource
from lemur.auth.permissions import ViewRoleCredentialsPermission, admin_permission
from lemur.common.utils import marshal_items, paginated_parser
mod = Blueprint('roles', __name__)
api = Api(mod)
FIELDS = {
'name': fields.String,
'description': fields.String,
'id': fields.Integer,
}
class RolesList(AuthenticatedResource):
""" Defines the 'roles' endpoint """
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(RolesList, self).__init__()
@marshal_items(FIELDS)
def get(self):
"""
.. http:get:: /roles
The current role list
**Example request**:
.. sourcecode:: http
GET /roles HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"items": [
{
"id": 1,
"name": "role1",
"description": "this is role1"
},
{
"id": 2,
"name": "role2",
"description": "this is role2"
}
]
"total": 2
}
:query sortBy: field to sort on
:query sortDir: acs or desc
:query page: int. default is 1
:query filter: key value pair. format is k=v;
:query limit: limit number. default is 10
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
parser = paginated_parser.copy()
parser.add_argument('owner', type=str, location='args')
parser.add_argument('id', type=str, location='args')
args = parser.parse_args()
return service.render(args)
@admin_permission.require(http_exception=403)
@marshal_items(FIELDS)
def post(self):
"""
.. http:post:: /roles
Creates a new role
**Example request**:
.. sourcecode:: http
POST /roles HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"name": "role3",
"description": "this is role3",
"username": null,
"password": null,
"users": []
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"id": 3,
"description": "this is role3",
"name": "role3"
}
:arg name: name for new role
:arg description: description for new role
:arg password: password for new role
:arg username: username for new role
:arg users: list, of users to associate with role
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
self.reqparse.add_argument('name', type=str, location='json', required=True)
self.reqparse.add_argument('description', type=str, location='json')
self.reqparse.add_argument('username', type=str, location='json')
self.reqparse.add_argument('password', type=str, location='json')
self.reqparse.add_argument('users', type=list, location='json')
args = self.reqparse.parse_args()
return service.create(args['name'], args.get('password'), args.get('description'), args.get('username'),
args.get('users'))
class RoleViewCredentials(AuthenticatedResource):
def __init__(self):
super(RoleViewCredentials, self).__init__()
def get(self, role_id):
"""
.. http:get:: /roles/1/credentials
View a roles credentials
**Example request**:
.. sourcecode:: http
GET /users/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"username: "ausername",
"password": "apassword"
}
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
permission = ViewRoleCredentialsPermission(role_id)
if permission.can():
role = service.get(role_id)
response = make_response(jsonify(username=role.username, password=role.password), 200)
response.headers['cache-control'] = 'private, max-age=0, no-cache, no-store'
response.headers['pragma'] = 'no-cache'
return response
abort(403)
class Roles(AuthenticatedResource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(Roles, self).__init__()
@marshal_items(FIELDS)
def get(self, role_id):
"""
.. http:get:: /roles/1
Get a particular role
**Example request**:
.. sourcecode:: http
GET /roles/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"id": 1,
"name": "role1",
"description": "this is role1"
}
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
# we want to make sure that we cannot view roles that we are not members of
if not g.current_user.is_admin:
user_role_ids = set([r.id for r in g.current_user.roles])
if role_id not in user_role_ids:
return dict(message="You are not allowed to view a role which you are not a member of"), 400
return service.get(role_id)
@marshal_items(FIELDS)
def put(self, role_id):
"""
.. http:put:: /roles/1
Update a role
**Example request**:
.. sourcecode:: http
PUT /roles/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"name": "role1",
"description": "This is a new description"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"id": 1,
"name": "role1",
"description": "this is a new description"
}
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
permission = ViewRoleCredentialsPermission(role_id)
if permission.can():
self.reqparse.add_argument('name', type=str, location='json', required=True)
self.reqparse.add_argument('description', type=str, location='json')
self.reqparse.add_argument('users', type=list, location='json')
args = self.reqparse.parse_args()
return service.update(role_id, args['name'], args.get('description'), args.get('users'))
abort(403)
@admin_permission.require(http_exception=403)
def delete(self, role_id):
"""
.. http:delete:: /roles/1
Delete a role
**Example request**:
.. sourcecode:: http
DELETE /roles/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"message": "ok"
}
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
service.delete(role_id)
return {'message': 'ok'}
class UserRolesList(AuthenticatedResource):
""" Defines the 'roles' endpoint """
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(UserRolesList, self).__init__()
@marshal_items(FIELDS)
def get(self, user_id):
"""
.. http:get:: /users/1/roles
List of roles for a given user
**Example request**:
.. sourcecode:: http
GET /users/1/roles HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"items": [
{
"id": 1,
"name": "role1",
"description": "this is role1"
},
{
"id": 2,
"name": "role2",
"description": "this is role2"
}
]
"total": 2
}
:query sortBy: field to sort on
:query sortDir: acs or desc
:query page: int. default is 1
:query filter: key value pair. format is k=v;
:query limit: limit number. default is 10
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
parser = paginated_parser.copy()
args = parser.parse_args()
args['user_id'] = user_id
return service.render(args)
class AuthorityRolesList(AuthenticatedResource):
""" Defines the 'roles' endpoint """
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(AuthorityRolesList, self).__init__()
@marshal_items(FIELDS)
def get(self, authority_id):
"""
.. http:get:: /authorities/1/roles
List of roles for a given authority
**Example request**:
.. sourcecode:: http
GET /authorities/1/roles HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"items": [
{
"id": 1,
"name": "role1",
"description": "this is role1"
},
{
"id": 2,
"name": "role2",
"description": "this is role2"
}
]
"total": 2
}
:query sortBy: field to sort on
:query sortDir: acs or desc
:query page: int. default is 1
:query filter: key value pair. format is k=v;
:query limit: limit number. default is 10
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
parser = paginated_parser.copy()
args = parser.parse_args()
args['authority_id'] = authority_id
return service.render(args)
api.add_resource(RolesList, '/roles', endpoint='roles')
api.add_resource(Roles, '/roles/<int:role_id>', endpoint='role')
api.add_resource(RoleViewCredentials, '/roles/<int:role_id>/credentials', endpoint='roleCredentials`')
api.add_resource(AuthorityRolesList, '/authorities/<int:authority_id>/roles', endpoint='authorityRoles')
api.add_resource(UserRolesList, '/users/<int:user_id>/roles', endpoint='userRoles')
| 29.186099
| 112
| 0.520089
| 11,841
| 0.909657
| 0
| 0
| 9,744
| 0.74856
| 0
| 0
| 8,928
| 0.685872
|
e19e4bc332d90affe3ea70c17d43f480bce982e0
| 16,454
|
py
|
Python
|
backend/core/workspaces/dataView.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | null | null | null |
backend/core/workspaces/dataView.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 50
|
2021-03-28T03:06:19.000Z
|
2021-10-18T12:36:16.000Z
|
backend/core/workspaces/dataView.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 1
|
2021-07-30T07:12:46.000Z
|
2021-07-30T07:12:46.000Z
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
import copy
from core.common.objDict import ObjDict
class DataView(object):
"""Build up data-views
"""
disable = False
requireLogin = True
requirePermission = None # a permission is required in the meaning of one of the following
def __init__(self, name=None, uri=None):
self.description = 'UNKNOWN'
if name is None:
self.name = type(self).__name__
else:
self.name = name
if uri is None:
self.uri = self.name
else:
self.uri = uri
self.requireLogin = True
self.properties = []
self.metadata = []
self.dataAction = {}
self.dataUpdateHandler = {}
self.dataUpdates = []
self.dataSyncs = []
self.entrykey = None
self.entrytype = None
self.entryPropList = {}
self.metaDataList = {}
def createMeta(self):
return ObjDict(self.metaDataList.copy())
def addMailMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'email'}
self.metaDataList[name] = None
self.metadata.append(meta)
def addStringMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'string'}
self.metaDataList[name] = None
self.metadata.append(meta)
def addIntegerMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'integer'}
self.metaDataList[name] = None
self.metadata.append(meta)
def addDoubleMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'double'}
self.metaDataList[name] = None
self.metadata.append(meta)
def addBooleanMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'boolean'}
self.metaDataList[name] = None
self.metadata.append(meta)
def addTimeMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'time'}
self.metaDataList[name] = None
self.metadata.append(meta)
def addDateMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'date'}
self.metaDataList[name] = None
self.metadata.append(meta)
def addDatetimeMeta(self, name, label="", group=None, description=""):
meta = {'name': name, 'label': label, 'type': 'datetime'}
self.metaDataList[name] = None
self.metadata.append(meta)
def createEntry(self):
return ObjDict(self.entryPropList.copy())
def getProperties(self):
properties = []
for p in self.properties:
pn = copy.copy(p)
if pn['type'] == 'multiselect' or pn['type'] == 'select':
try:
if callable(p['selection']):
pn['selection'] = p['selection']()
else:
pn['selection'] = p['selection']
except Exception as e:
raise e
properties.append(pn)
return properties
def addMailProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description=""):
prop = {'name': name, 'label': label, 'type': 'email'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addStringProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description="",
hide=False):
prop = {'name': name, 'label': label, 'type': 'string'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['group'] = group
prop['hide'] = hide
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addDoubleProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description=""):
prop = {'name': name, 'label': label, 'type': 'double'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addIntegerProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description="",
hide=False):
prop = {'name': name, 'label': label, 'type': 'integer'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['hide'] = hide
prop['readOnly'] = readOnly
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addDatetimeProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description=""):
prop = {'name': name, 'label': label, 'type': 'datetime'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addTimeProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description=""):
prop = {'name': name, 'label': label, 'type': 'time'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addDateProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description=""):
prop = {'name': name, 'label': label, 'type': 'date'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addBooleanProperty(self,
name,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description="",
hide=False):
prop = {'name': name, 'label': label, 'type': 'boolean'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['group'] = group
prop['hide'] = hide
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addSelectProperty(self,
name,
selectables,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description=""):
prop = {'name': name, 'label': label, 'type': 'select'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['selection'] = selectables
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addMultiSelectProperty(self,
name,
selectables,
label="",
group=None,
updateHandler=None,
isKey=False,
readOnly=True,
description=""):
prop = {'name': name, 'label': label, 'type': 'multiselect'}
if isKey is True:
prop['isKey'] = True
if self.entrykey is not None:
raise KeyError("DataView '{}' already have a key ({}) and cant be overridden with {}".format(
self.name, self.entrykey, name))
self.entrykey = name
prop['readOnly'] = readOnly
prop['selection'] = selectables
prop['group'] = group
prop['description'] = description
self.dataUpdateHandler[str(name)] = updateHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addActionProperty(self,
name,
label,
action,
icon,
actionHandler=None,
isKey=False,
readOnly=True,
color='red',
description=""):
prop = {'name': name, 'label': label, 'type': 'action'}
prop['isKey'] = False
prop['icon'] = icon
prop['color'] = color
prop['action'] = action
prop['description'] = description
self.dataAction[str(action)] = actionHandler
self.properties.append(prop)
self.entryPropList[name] = None
def addRemoveEntryOption(self, name, label):
prop = {'name': name, 'label': label, 'type': 'remove'}
prop['isKey'] = False
prop['icon'] = 'delete'
self.properties.append(prop)
def emitUpdate(self, key, property, value):
self.dataUpdates.append({'key': key, 'property': property, 'value': value, 'view': self.uri})
def emitSyncUpdate(self, key, view=None, workspace=None):
if view is None:
view = self.uri
self.dataSyncs.append({'type': 'update', 'key': key, 'view': view, 'workspace': workspace})
def emitSyncRemove(self, key, view=None, workspace=None):
if view is None:
view = self.uri
self.dataSyncs.append({'type': 'remove', 'key': key, 'view': view, 'workspace': workspace})
def emitSyncCreate(self, key, view=None, workspace=None):
if view is None:
view = self.name
self.dataSyncs.append({'type': 'create', 'key': key, 'view': view, 'workspace': workspace})
# Handler for getting the freshly build view
def getViewHandler(self, user, workspace, query=None):
raise NotImplementedError
# Handler for getting the views meta-data
def getViewMetaHandler(self, user, workspace):
return {}
# Handler for a request to create a new view entry
def createViewEntryHandler(self, user, workspace):
raise NotImplementedError
# Handler for a request to remove a view entry
def removeViewEntryHandler(self, user, workspace, key):
raise NotImplementedError
# Handler for a request to update a single view entry
def updateViewEntryHandler(self, user, workspace, key, entry):
raise NotImplementedError
# Handler for view actions
def executeViewActionHandler(self, user, workspace, action):
try:
return self.dataAction[action.viewAction](user, workspace, action, action.entry[self.entrykey])
except Exception:
return self.dataAction[action.viewAction](user, workspace, action.entry[self.entrykey])
def defineProperties(self):
pass
def defineMetadata(self):
pass
| 38.533958
| 109
| 0.521454
| 15,599
| 0.948037
| 0
| 0
| 0
| 0
| 0
| 0
| 3,115
| 0.189316
|
e19eb98e8bf34e916a97b5b0db5e158713913cb4
| 3,892
|
py
|
Python
|
demo/video_gpuaccel_demo.py
|
chenxinfeng4/mmdetection
|
a99a1aaa5e4a7614f2f89f2350e1b917b2a8ca7e
|
[
"Apache-2.0"
] | 1
|
2021-12-10T15:08:22.000Z
|
2021-12-10T15:08:22.000Z
|
demo/video_gpuaccel_demo.py
|
q3394101/mmdetection
|
ca11860f4f3c3ca2ce8340e2686eeaec05b29111
|
[
"Apache-2.0"
] | null | null | null |
demo/video_gpuaccel_demo.py
|
q3394101/mmdetection
|
ca11860f4f3c3ca2ce8340e2686eeaec05b29111
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
from torchvision.transforms import functional as F
from mmdet.apis import init_detector
from mmdet.datasets.pipelines import Compose
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_img_metas(cfg, ori_wh):
w, h = ori_wh
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
test_pipeline = Compose(cfg.data.test.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8)}
data = test_pipeline(data)
img_metas = data['img_metas'][0].data
return img_metas
def process_img(frame_resize, img_metas, device):
assert frame_resize.shape == img_metas['pad_shape']
frame_cuda = torch.from_numpy(frame_resize).to(device).float()
frame_cuda = frame_cuda.permute(2, 0, 1) # HWC to CHW
mean = torch.from_numpy(img_metas['img_norm_cfg']['mean']).to(device)
std = torch.from_numpy(img_metas['img_norm_cfg']['std']).to(device)
frame_cuda = F.normalize(frame_cuda, mean=mean, std=std, inplace=True)
frame_cuda = frame_cuda[None, :, :, :] # NCHW
data = {'img': [frame_cuda], 'img_metas': [[img_metas]]}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
img_metas = prefetch_img_metas(model.cfg,
(video_origin.width, video_origin.height))
resize_wh = img_metas['pad_shape'][1::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft',
pix_fmt='rgb24')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for frame_resize, frame_origin in zip(
mmcv.track_iter_progress(video_resize), video_origin):
data = process_img(frame_resize, img_metas, args.device)
result = model(return_loss=False, rescale=True, **data)[0]
frame_mask = model.show_result(
frame_origin, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 34.140351
| 77
| 0.659044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 727
| 0.186793
|
e1a102da5af50dd136ac3eab04d096bc659e8951
| 1,234
|
py
|
Python
|
scripts/benchmark_1_rdomset.py
|
bluegenes/spacegraphcats
|
35f8057068e4fe79ab83ac4efe91d1b0f389e1ea
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/benchmark_1_rdomset.py
|
bluegenes/spacegraphcats
|
35f8057068e4fe79ab83ac4efe91d1b0f389e1ea
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/benchmark_1_rdomset.py
|
bluegenes/spacegraphcats
|
35f8057068e4fe79ab83ac4efe91d1b0f389e1ea
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
"""
Benchmark the rdomset (catlas level 1) algorithm, without I/O considerations.
"""
import sys, os
# add spacegraphcats package to import path:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import spacegraphcats
from spacegraphcats.catlas import catlas
import argparse
import sys
import time
def main():
parser = argparse.ArgumentParser()
parser.add_argument("project", help="Project directory",
type=str)
parser.add_argument("radius", help="Catlas radius", type=int)
parser.add_argument("-o", "--output", type=str)
args = parser.parse_args()
proj = catlas.Project(args.project, args.radius, False)
# load the graph
proj.load_furthest_checkpoint()
nodes_in_layer_0 = len(proj.graph)
# build the first layer only, for rdomset benchmarking.
start = time.time()
catlas.CAtlas.build(proj, benchmark_only=True)
end = time.time()
outfp = sys.stdout
if args.output:
outfp = open(args.output, 'at')
print("{},{},{:.1f},{},rdomset".format(nodes_in_layer_0, args.radius,
end - start, args.project), file=outfp)
if __name__ == '__main__':
sys.exit(main())
| 28.697674
| 77
| 0.658833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 330
| 0.267423
|
e1a1374935fa7cc8ec68a7212a8ba5b8c016fac8
| 2,107
|
py
|
Python
|
pyob/mixins/pyob_set_label.py
|
khunspoonzi/pyob
|
b1b134b708585add15d04fa75001f3364f31dd74
|
[
"MIT"
] | null | null | null |
pyob/mixins/pyob_set_label.py
|
khunspoonzi/pyob
|
b1b134b708585add15d04fa75001f3364f31dd74
|
[
"MIT"
] | null | null | null |
pyob/mixins/pyob_set_label.py
|
khunspoonzi/pyob
|
b1b134b708585add15d04fa75001f3364f31dd74
|
[
"MIT"
] | null | null | null |
# ┌─────────────────────────────────────────────────────────────────────────────────────
# │ PYOB SET LABEL MIXIN
# └─────────────────────────────────────────────────────────────────────────────────────
class PyObSetLabelMixin:
"""A mixin class for PyOb set label methods"""
# ┌─────────────────────────────────────────────────────────────────────────────────
# │ LABEL SINGULAR
# └─────────────────────────────────────────────────────────────────────────────────
@property
def label_singular(self):
"""Returns a singular label for the PyOb set"""
# Determine if PyOb set is mixed
is_mixed = self.count() > 1 and self._PyObClass is None
# Get PyOb label
ob_label = "Mixed" if is_mixed else self.ob_label_singular
# Return singular label
return self.__class__.__name__.replace("Ob", ob_label + " ")
# ┌─────────────────────────────────────────────────────────────────────────────────
# │ LABEL PLURAL
# └─────────────────────────────────────────────────────────────────────────────────
@property
def label_plural(self):
"""Returns a plural label for the PyOb set"""
# Return plural label
return self.label_singular + "s"
# ┌─────────────────────────────────────────────────────────────────────────────────
# │ OB LABEL SINGULAR
# └─────────────────────────────────────────────────────────────────────────────────
@property
def ob_label_singular(self):
"""Returns a singular label based on related PyOb if any"""
# Return singular label
return (self._PyObClass and self._PyObClass.label_singular) or "Ob"
# ┌─────────────────────────────────────────────────────────────────────────────────
# │ OB LABEL PLURAL
# └─────────────────────────────────────────────────────────────────────────────────
@property
def ob_label_plural(self):
"""Returns a plural label based on related object if any"""
# Return plural label
return (self._PyObClass and self._PyObClass.label_plural) or "Obs"
| 36.327586
| 88
| 0.366398
| 3,221
| 0.853697
| 0
| 0
| 992
| 0.262921
| 0
| 0
| 3,030
| 0.803074
|
e1a2f60719c963dbaf1979085fd5c1225ad3b6d9
| 1,547
|
py
|
Python
|
proto/twowfuck.py
|
hanss314/twowfuck
|
43d4f7b8c9f6b1e547f57d0c56b1db2972393c1e
|
[
"Unlicense"
] | null | null | null |
proto/twowfuck.py
|
hanss314/twowfuck
|
43d4f7b8c9f6b1e547f57d0c56b1db2972393c1e
|
[
"Unlicense"
] | null | null | null |
proto/twowfuck.py
|
hanss314/twowfuck
|
43d4f7b8c9f6b1e547f57d0c56b1db2972393c1e
|
[
"Unlicense"
] | null | null | null |
import time
from hashlib import sha1
class InfArray():
def __init__(self):
self.left = [0]*16
self.right = [0]*16
def getarr(self, ind):
arr = self.right
if ind < 0: arr, ind = self.left, -ind-1
if ind >= len(arr): arr.extend([0]* (key - len(arr) + 10))
return arr, ind
def __getitem__(self, key):
arr, key = self.getarr(key)
return arr[key]
def __setitem__(self, key, item):
arr, key = self.getarr(key)
arr[key] = item
def __str__(self):
return ' '.join(map(str, reversed(self.left))) + ' ' + ' '.join(map(str, self.right))
def interpreter(prog: str, inp: str):
arr = InfArray()
mptr = 0
pptr = 0
iptr = 0
start = time.time()
while pptr < len(prog) and time.time() - start <= 10*60:
c = ord(prog[pptr]) % 6
if c == 0: mptr -= 1
elif c == 1: mptr += 1
elif c == 2: arr[mptr] += 1
elif c == 3: arr[mptr] -= 1
elif c == 4:
arr[mptr] = ord(inp[iptr])
iptr = (iptr + 1) % len(inp)
elif arr[mptr] == 0: pptr = pptr + 2
else:
if len(prog) - pptr <= 2: unhashed = prog[pptr+1:]
else: unhashed = prog[pptr+1:pptr+3]
pptr = int(sha1(unhashed.encode('utf8')).hexdigest(), 16) % len(prog) - 1
pptr = pptr + 1
return pptr < len(prog), str(arr)
def test_all(progs, inp):
for prog in progs:
nohalt, tape = interpreter(prog, inp)
printf('f {nohalt} {prog} {tape}')
| 28.127273
| 93
| 0.513251
| 599
| 0.387201
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.026503
|
e1a32855943669310a5b6aeb210eb2e39273f98a
| 5,915
|
py
|
Python
|
simba/process_data_log.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 1
|
2021-12-15T07:30:33.000Z
|
2021-12-15T07:30:33.000Z
|
simba/process_data_log.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | null | null | null |
simba/process_data_log.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 1
|
2021-11-14T09:15:30.000Z
|
2021-11-14T09:15:30.000Z
|
import pandas as pd
import os
from configparser import ConfigParser, NoSectionError, NoOptionError
from datetime import datetime
import numpy as np
import glob
from simba.rw_dfs import *
def analyze_process_data_log(configini,chosenlist):
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
configFile = str(configini)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
csv_dir_in = os.path.join(projectPath, 'csv', 'machine_results')
no_targets = config.getint('SML settings', 'No_targets')
filesFound, target_names = [], []
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
vidinfDf = pd.read_csv(os.path.join(projectPath, 'logs', 'video_info.csv'))
loop, videoCounter = 0, 0
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
########### GET TARGET COLUMN NAMES ###########
for ff in range(no_targets):
currentModelNames = 'target_name_' + str(ff+1)
currentModelNames = config.get('SML settings', currentModelNames)
target_names.append(currentModelNames)
print('Analyzing ' + str(len(target_names)) + ' classifier result(s) in ' + str(len(filesFound)) + ' video file(s).')
########### logfile path ###########
log_fn = 'sklearn_' + str(dateTime) + '.csv'
log_fn = os.path.join(projectPath, 'logs', log_fn)
headers = ['Video']
headers_to_insert = [' # bout events', ' total events duration (s)', ' mean bout duration (s)', ' median bout duration (s)', ' first occurance (s)', ' mean interval (s)', ' median interval (s)']
for headerVar in headers_to_insert:
for target in target_names:
currHead = str(target) + headerVar
headers.extend([currHead])
log_df = pd.DataFrame(columns=headers)
for currentFile in filesFound:
videoCounter += 1
currVidName = os.path.basename(currentFile).replace('.' + wfileType, '')
fps = vidinfDf.loc[vidinfDf['Video'] == currVidName]
try:
fps = int(fps['fps'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
print('Analyzing video ' + str(videoCounter) + '/' + str(len(filesFound)) + '...')
dataDf = read_df(currentFile, wfileType)
boutsList, nameList, startTimeList, endTimeList = [], [], [], []
for currTarget in target_names:
groupDf = pd.DataFrame()
v = (dataDf[currTarget] != dataDf[currTarget].shift()).cumsum()
u = dataDf.groupby(v)[currTarget].agg(['all', 'count'])
m = u['all'] & u['count'].ge(1)
groupDf['groups'] = dataDf.groupby(v).apply(lambda x: (x.index[0], x.index[-1]))[m]
for indexes, rows in groupDf.iterrows():
currBout = list(rows['groups'])
boutTime = ((currBout[-1] - currBout[0]) + 1) / fps
startTime = (currBout[0] + 1) / fps
endTime = (currBout[1]) / fps
endTimeList.append(endTime)
startTimeList.append(startTime)
boutsList.append(boutTime)
nameList.append(currTarget)
boutsDf = pd.DataFrame(list(zip(nameList, startTimeList, endTimeList, boutsList)), columns=['Event', 'Start Time', 'End Time', 'Duration'])
boutsDf['Shifted start'] = boutsDf['Start Time'].shift(-1)
boutsDf['Interval duration'] = boutsDf['Shifted start'] - boutsDf['End Time']
firstOccurList, eventNOsList, TotEventDurList, MeanEventDurList, MedianEventDurList, TotEventDurList, meanIntervalList, medianIntervalList = [], [], [], [], [], [], [], []
for targets in target_names:
currDf = boutsDf.loc[boutsDf['Event'] == targets]
try:
firstOccurList.append(round(currDf['Start Time'].min(), 3))
except IndexError:
firstOccurList.append(0)
eventNOsList.append(len(currDf))
TotEventDurList.append(round(currDf['Duration'].sum(), 3))
try:
if len(currDf) > 1:
intervalDf = currDf[:-1].copy()
meanIntervalList.append(round(intervalDf['Interval duration'].mean(), 3))
medianIntervalList.append(round(intervalDf['Interval duration'].median(), 3))
else:
meanIntervalList.append(0)
medianIntervalList.append(0)
except ZeroDivisionError:
meanIntervalList.append(0)
medianIntervalList.append(0)
try:
MeanEventDurList.append(round(currDf["Duration"].mean(), 3))
MedianEventDurList.append(round(currDf['Duration'].median(), 3))
except ZeroDivisionError:
MeanEventDurList.append(0)
MedianEventDurList.append(0)
currentVidList = [currVidName] + eventNOsList + TotEventDurList + MeanEventDurList + MedianEventDurList + firstOccurList + meanIntervalList + medianIntervalList
log_df.loc[loop] = currentVidList
loop += 1
print('File # processed for machine predictions: ' + str(loop) + '/' + str(len(filesFound)))
log_df.fillna(0, inplace=True)
log_df.replace(0, np.NaN)
# drop columns not chosen
for target in target_names:
for col2drop in chosenlist:
currCol = target + ' ' + col2drop
print(currCol)
log_df = log_df.drop(currCol, 1)
print(log_df.columns)
log_df.to_csv(log_fn, index=False)
print('All files processed for machine predictions: data file saved @' + str(log_fn))
| 49.291667
| 200
| 0.598478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,068
| 0.180558
|
e1a33e79602cf7d5b3c579918699d4ae4a866f09
| 3,812
|
py
|
Python
|
SphinxReportPlugins/MatplotlibPlugin/__init__.py
|
Tim-HU/sphinx-report
|
3a0dc225e594c4b2083dff7a93b6d77054256416
|
[
"BSD-2-Clause"
] | 1
|
2020-04-10T12:48:40.000Z
|
2020-04-10T12:48:40.000Z
|
SphinxReportPlugins/MatplotlibPlugin/__init__.py
|
Tim-HU/sphinx-report
|
3a0dc225e594c4b2083dff7a93b6d77054256416
|
[
"BSD-2-Clause"
] | null | null | null |
SphinxReportPlugins/MatplotlibPlugin/__init__.py
|
Tim-HU/sphinx-report
|
3a0dc225e594c4b2083dff7a93b6d77054256416
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import re
import warnings
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
from matplotlib.cbook import exception_to_str
import seaborn
from SphinxReport.Component import *
from SphinxReport import Config, Utils
class MatplotlibPlugin(Component):
capabilities = ['collect']
def __init__(self, *args, **kwargs):
Component.__init__(self,*args,**kwargs)
plt.close('all')
# matplotlib.rcdefaults()
# set a figure size that doesn't overflow typical browser windows
matplotlib.rcParams['figure.figsize'] = (5.5, 4.5)
def collect( self,
blocks,
template_name,
outdir,
rstdir,
builddir,
srcdir,
content,
display_options,
tracker_id,
links = {} ):
'''collect one or more matplotlib figures and
1. save as png, hires-png and pdf
2. save thumbnail
3. insert rendering code at placeholders in output
returns a map of place holder to placeholder text.
'''
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
map_figure2text = {}
# determine the image formats to create
default_format, additional_formats = Utils.getImageFormats( display_options )
all_formats = [default_format,] + additional_formats
# create all the images
for figman in fig_managers:
# create all images
figid = figman.num
outname = "%s_%02d" % (template_name, figid)
for id, format, dpi in all_formats:
outpath = os.path.join(outdir, '%s.%s' % (outname, format))
try:
figman.canvas.figure.savefig( outpath, dpi=dpi )
except:
s = Utils.collectExceptionAsString("Exception running plot %s" % outpath)
warnings.warn(s)
return []
# if format=='png':
# thumbdir = os.path.join(outdir, 'thumbnails')
# try:
# os.makedirs(thumbdir)
# except OSError:
# pass
# thumbfile = str('%s.png' % os.path.join(thumbdir, outname) )
# captionfile = str('%s.txt' % os.path.join(thumbdir, outname) )
# if not os.path.exists(thumbfile):
# # thumbnail only available in matplotlib >= 0.98.4
# try:
# figthumb = image.thumbnail(str(outpath), str(thumbfile), scale=0.3)
# except AttributeError:
# pass
# outfile = open(captionfile,"w")
# outfile.write( "\n".join( content ) + "\n" )
# outfile.close()
# create the text element
rst_output = Utils.buildRstWithImage( outname,
outdir,
rstdir,
builddir,
srcdir,
additional_formats,
tracker_id,
links,
display_options,
default_format)
map_figure2text[ "#$mpl %i$#" % figid] = rst_output
return map_figure2text
| 35.626168
| 97
| 0.471144
| 3,513
| 0.921563
| 0
| 0
| 0
| 0
| 0
| 0
| 1,167
| 0.306139
|
e1a3af379a95b5a4e4e2ab062711e4da48ff07ad
| 4,358
|
py
|
Python
|
nips2018/utils/plotting.py
|
kovacspe/Sinz2018_NIPS
|
c0aad625e516bed57f3ee52b39195c8817527d66
|
[
"MIT"
] | 5
|
2018-10-30T05:39:11.000Z
|
2021-03-20T12:00:25.000Z
|
nips2018/utils/plotting.py
|
kovacspe/Sinz2018_NIPS
|
c0aad625e516bed57f3ee52b39195c8817527d66
|
[
"MIT"
] | null | null | null |
nips2018/utils/plotting.py
|
kovacspe/Sinz2018_NIPS
|
c0aad625e516bed57f3ee52b39195c8817527d66
|
[
"MIT"
] | 5
|
2019-03-08T13:48:39.000Z
|
2020-10-04T13:27:56.000Z
|
import cv2
import imageio
import numpy as np
import matplotlib.pyplot as plt
from itertools import product, zip_longest
import seaborn as sns
def grouper(n, iterable, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def rename(rel, prefix='new_', exclude=[]):
attrs = list(rel.heading.attributes.keys())
original = [x for x in attrs if x not in exclude]
keys = [k for k in exclude if k in attrs]
name_map = {prefix+x: x for x in original}
return rel.proj(*keys, **name_map)
def plot_images(df, prefixes, names=None, brain_area='V1', n_rows=15, order_by='pearson',
panels=('normed_rf', 'normed_mei'), panel_names=('RF', 'MEI'), cmaps=('coolwarm', 'gray'),
y_infos=('{prefix}test_corr', 'pearson'), save_path=None):
if names is None:
names = prefixes
f = (df['brain_area'] == brain_area)
area_data = df[f]
area_data = area_data.sort_values(order_by, ascending=False)
n_rows = min(n_rows, len(area_data))
n_panels = len(panels)
cols = len(prefixes) * n_panels;
with sns.axes_style('white'):
fig, axs = plt.subplots(n_rows, cols, figsize=(4 * cols, round(2 * n_cells)))
st = fig.suptitle('MEIs on Shuffled {} dataset: {}'.format(brain_area, ', '.join(names)))
[ax.set_xticks([]) for ax in axs.ravel()]
[ax.set_yticks([]) for ax in axs.ravel()]
for ax_row, (_, data_row), row_index in zip(axs, area_data.iterrows(), count()):
for ax_group, prefix, name in zip(grouper(n_panels, ax_row), prefixes, names):
for ax, panel, panel_name, y_info, cm in zip(ax_group, panels, panel_names, y_infos, cmaps):
if row_index == 0:
ax.set_title('{}: {}'.format(panel_name, name))
ax.imshow(data_row[prefix + panel].squeeze(), cmap=cm)
if y_info is not None:
ax.set_ylabel('{:0.2f}%'.format(data_row[y_info.format(prefix=prefix)] * 100))
fig.tight_layout()
# shift subplots down:
st.set_y(0.98)
st.set_fontsize(20)
fig.subplots_adjust(top=0.95)
if path is not None:
fig.savefig(save_path)
def gen_gif(images, output_path, duration=5, scale=1, adj_single=False):
h, w = images[0].shape
imgsize = (w * scale, h * scale)
images = np.stack([cv2.resize(img, imgsize) for img in images])
axis = (1, 2) if adj_single else None
images = images - images.min(axis=axis, keepdims=True)
images = images / images.max(axis=axis, keepdims=True) * 255
images = images.astype('uint8')
single_duration = duration / len(images)
if not output_path.endswith('.gif'):
output_path += '.gif'
imageio.mimsave(output_path, images, duration=single_duration)
def rescale_images(images, low=0, high=1, together=True):
axis = None if together else (1, 2)
images = images - images.min(axis=axis, keepdims=True)
images = images / images.max(axis=axis, keepdims=True) * (high - low) + low
return images
def scale_imagesize(images, scale=(2, 2)):
h, w = images[0].shape
imgsize = (w * scale[1], h * scale[0])
return np.stack([cv2.resize(img, imgsize) for img in images])
def tile_images(images, rows, cols, vpad=0, hpad=0, normalize=False, base=0):
n_images = len(images)
assert rows * cols >= n_images
h, w = images[0].shape
total_image = np.zeros((h + (h + vpad) * (rows - 1), w + (w + hpad) * (cols - 1))) + base
loc = product(range(rows), range(cols))
for img, (i, j) in zip(images, loc):
if normalize:
img = rescale_images(img)
voffset, hoffset = (h + vpad) * i, (w + hpad) * j
total_image[voffset:voffset + h, hoffset:hoffset + w] = img
return total_image
def repeat_frame(images, frame_pos=0, rep=4):
parts = []
if frame_pos < 0:
frame_pos = len(images) + frame_pos
if frame_pos > 0:
parts.append(images[:frame_pos])
parts.append(np.tile(images[frame_pos], (rep, 1, 1)))
if frame_pos < len(images) - 1:
parts.append(images[frame_pos+1:])
return np.concatenate(parts)
def add_text(image, text, pos, fontsize=1, color=(0, 0, 0)):
image = image.copy()
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image, text, pos, font, fontsize, color, 1, cv2.LINE_8)
return image
| 36.932203
| 106
| 0.630564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.048187
|
e1a3e676ffda7283905e72fd2a219c469d9b17cf
| 2,115
|
py
|
Python
|
src/tests/samples.py
|
BeholdersEye/PyBitmessage
|
362a975fbf1ec831d3107c7442527225bc140162
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5
|
2018-03-24T17:33:03.000Z
|
2019-07-01T07:16:19.000Z
|
src/tests/samples.py
|
BeholdersEye/PyBitmessage
|
362a975fbf1ec831d3107c7442527225bc140162
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 15
|
2018-03-19T00:04:57.000Z
|
2021-12-10T17:21:54.000Z
|
src/tests/samples.py
|
BeholdersEye/PyBitmessage
|
362a975fbf1ec831d3107c7442527225bc140162
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5
|
2019-05-15T08:42:57.000Z
|
2019-09-17T12:21:37.000Z
|
"""Various sample data"""
from binascii import unhexlify
magic = 0xE9BEB4D9
# These keys are from addresses test script
sample_pubsigningkey = unhexlify(
'044a367f049ec16cb6b6118eb734a9962d10b8db59c890cd08f210c43ff08bdf09d'
'16f502ca26cd0713f38988a1237f1fc8fa07b15653c996dc4013af6d15505ce')
sample_pubencryptionkey = unhexlify(
'044597d59177fc1d89555d38915f581b5ff2286b39d022ca0283d2bdd5c36be5d3c'
'e7b9b97792327851a562752e4b79475d1f51f5a71352482b241227f45ed36a9')
sample_privsigningkey = \
b'93d0b61371a54b53df143b954035d612f8efa8a3ed1cf842c2186bfd8f876665'
sample_privencryptionkey = \
b'4b0b73a54e19b059dc274ab69df095fe699f43b17397bca26fdf40f4d7400a3a'
sample_ripe = b'003cd097eb7f35c87b5dc8b4538c22cb55312a9f'
# stream: 1, version: 2
sample_address = 'BM-onkVu1KKL2UaUss5Upg9vXmqd3esTmV79'
sample_factor = 66858749573256452658262553961707680376751171096153613379801854825275240965733
# G * sample_factor
sample_point = (
33567437183004486938355437500683826356288335339807546987348409590129959362313,
94730058721143827257669456336351159718085716196507891067256111928318063085006
)
sample_seed = 'TIGER, tiger, burning bright. In the forests of the night'
# Deterministic addresses with stream 1 and versions 3, 4
sample_deterministic_ripe = b'00cfb69416ae76f68a81c459de4e13460c7d17eb'
sample_deterministic_addr3 = 'BM-2DBPTgeSawWYZceFD69AbDT5q4iUWtj1ZN'
sample_deterministic_addr4 = 'BM-2cWzSnwjJ7yRP3nLEWUV5LisTZyREWSzUK'
sample_daddr3_512 = 18875720106589866286514488037355423395410802084648916523381
sample_daddr4_512 = 25152821841976547050350277460563089811513157529113201589004
sample_statusbar_msg = "new status bar message"
sample_inbox_msg_ids = ['27e644765a3e4b2e973ee7ccf958ea20', '51fc5531-3989-4d69-bbb5-68d64b756f5b',
'2c975c515f8b414db5eea60ba57ba455', 'bc1f2d8a-681c-4cc0-9a12-6067c7e1ac24']
# second address in sample_test_subscription_address is for the announcement broadcast
sample_test_subscription_address = ['BM-2cWQLCBGorT9pUGkYSuGGVr9LzE4mRnQaq', 'BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw']
sample_subscription_name = 'test sub'
| 47
| 115
| 0.858629
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,170
| 0.553191
|
e1a48794127b8e659f4702ccf90e46361a4d8c86
| 9,123
|
py
|
Python
|
court_scraper/platforms/wicourts/pages/search.py
|
mscarey/court-scraper
|
0e13976d901352a09cfd7e48450bbe427494f48e
|
[
"0BSD"
] | 1
|
2021-08-20T08:24:55.000Z
|
2021-08-20T08:24:55.000Z
|
court_scraper/platforms/wicourts/pages/search.py
|
palewire/court-scraper
|
da4b614fb16806d8b5117373d273f802ca93a8cb
|
[
"0BSD"
] | null | null | null |
court_scraper/platforms/wicourts/pages/search.py
|
palewire/court-scraper
|
da4b614fb16806d8b5117373d273f802ca93a8cb
|
[
"0BSD"
] | null | null | null |
from urllib.parse import parse_qs
from anticaptchaofficial.hcaptchaproxyless import hCaptchaProxyless
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from court_scraper.base.selenium_helpers import SeleniumHelpers
from court_scraper.utils import dates_for_range
from .search_results import SearchResultsPage
from ..search_api import SearchApi
class SearchLocators:
LAST_NAME = (By.NAME, 'lastName')
FIRST_NAME = (By.NAME, 'firstName')
MIDDLE_NAME = (By.NAME, 'middleName')
BIRTH_DATE = (By.NAME, 'dateOfBirth')
BUSINESS_NAME = (By.NAME, 'businessName')
COUNTY = (By.XPATH, '//*[@id="react-select-2--value"]/div[2]/input')
COUNTY_DROPDOWN_ARROW = (By.CSS_SELECTOR, '.Select-arrow-zone')
CASE_NUMBER = (By.NAME, 'caseNo')
CASE_NUMBER_RANGE_YEAR = (By.XPATH, '//*[@id="react-select-3--value"]/div[2]/input')
CASE_NUMBER_RANGE_TYPE = (By.XPATH, '//*[@id="react-select-4--value"]/div[2]/input')
CASE_NUMBER_RANGE_BEGIN = (By.NAME, 'caseNoRange.start')
CASE_NUMBER_RANGE_END = (By.NAME, 'caseNoRange.end')
CASE_RESULTS_TABLE = (By.CSS_SELECTOR, 'table#caseSearchResults')
DATE_CASE_TYPE = (By.XPATH, '//*[@id="react-select-5--value"]/div[2]/input')
DATE_CASE_STATUS = (By.XPATH, '//*[@id="react-select-6--value"]/div[2]/input')
FILING_DATE_RANGE_BEGIN = (By.NAME, 'filingDate.start')
FILING_DATE_RANGE_END = (By.NAME, 'filingDate.end')
DISPOSITION_DATE_RANGE_BEGIN = (By.NAME, 'dispositionDate.start')
DISPOSITION_DATE_RANGE_END = (By.NAME, 'dispositionDate.end')
STATE_BAR_ID = (By.NAME, 'attyNo')
CITATION_NUMBER = (By.NAME, 'citnNo')
DA_CASE_NUMBER = (By.NAME, 'daCaseNo')
ISSUING_AGENCY = (By.XPATH, '//*[@id="react-select-8--value"]/div[2]/input')
OFFENSE_DATE_BEGIN = (By.NAME, 'offenseDate.start')
OFFENSE_DATE_END = (By.NAME, 'offenseDate.end')
SEARCH_BUTTON = (By.NAME, 'search')
RESET_BUTTON = (By.XPATH, '//*[@id="home-container"]/main/div/form/div[11]/div/button[2]')
class SearchPage(SeleniumHelpers):
locators = SearchLocators
def __init__(self, driver, captcha_api_key=None):
self.url = "https://wcca.wicourts.gov/advanced.html"
self.captcha_api_key = captcha_api_key
self.driver = driver
def search_by_case_number(self, county, case_numbers=[]):
payload = []
search_api = SearchApi(county)
for idx, case_num in enumerate(case_numbers):
self.go_to() # advanced search page
self._execute_case_search(county, case_num)
# Solve and apply the captcha on the first search.
# (using it on subsequent case detail API calls causes errors)
kwargs = {
'cookies': self.cookies_as_dict(),
}
if idx == 0:
kwargs['captcha_solution'] = self.solve_captcha()
case_info = search_api.case_details(case_num, **kwargs)
payload.append(case_info)
return payload
def search_by_date(self, county, start_date, end_date, case_types=[]):
date_format = "%m-%d-%Y"
dates = dates_for_range(start_date, end_date, output_format=date_format)
payload = []
for idx, day in enumerate(dates):
self.go_to() # advanced search page
self._execute_date_search(county, day, day, case_types)
if not self.search_has_results(self.driver.current_url):
continue
# Solve the captcha on the first search,
# save the solution for re-use, and apply the solution
# on the first case of the first day's search results
# (using it on subsequent case detail API calls causes errors)
result_kwargs = {
'use_captcha_solution': False
}
if idx == 0:
captcha_solution = self.solve_captcha()
result_kwargs['use_captcha_solution'] = True
# Searches that yield a single result redirect automatically
# to case detail page rather than search results listing page.
# For these cases, immediately execute the case detail query
if 'caseDetail' in self.driver.current_url:
case_info = self._get_case_details(
county,
self.driver.current_url,
captcha_solution,
result_kwargs['use_captcha_solution']
)
results = [case_info]
else:
results_page = SearchResultsPage(self.driver, county, self.captcha_api_key, captcha_solution)
results = results_page.results.get(**result_kwargs)
# TODO: if results_page.results_found():
# results_page.display_max_results()
payload.extend(results)
return payload
def _get_case_details(self, county, url, captcha_solution, use_captcha_solution):
# caseNo=2021SC000082&countyNo=2
query_str = url.split('?')[-1]
param_strs = query_str.split('&')
params = {}
for param_pair in param_strs:
key, val = param_pair.split('=')
params[key] = val
case_num = params['caseNo']
search_api = SearchApi(county)
kwargs = {
'cookies': self.cookies_as_dict(),
'county_num': int(params['countyNo'])
}
if use_captcha_solution:
kwargs['captcha_solution'] = captcha_solution
return search_api.case_details(case_num, **kwargs)
def _execute_case_search(self, county, case_number):
self.wait_until_visible(self.locators.COUNTY)
clean_county = self._county_titlecase(county)
self.fill_form_field(self.locators.COUNTY, clean_county)
self.fill_form_field(self.locators.CASE_NUMBER, case_number)
self.click(self.locators.SEARCH_BUTTON)
def _execute_date_search(self, county, start_date, end_date, case_types=[]):
# Wait until the county dropdown-menu arrow is clickable before filling the form field,
# in order to avoid overwriting of the field value by the "Statewide" option default
county_label_obj = self.driver.find_element_by_xpath("//label[contains(text(), 'County')]")
self.wait_until_clickable(self.locators.COUNTY_DROPDOWN_ARROW, driver=county_label_obj)
clean_county = self._county_titlecase(county)
self.fill_form_field(self.locators.COUNTY, clean_county)
self.fill_form_field(self.locators.FILING_DATE_RANGE_BEGIN, start_date)
self.fill_form_field(self.locators.FILING_DATE_RANGE_END, end_date)
if case_types:
self._select_case_types(case_types)
self.click(self.locators.SEARCH_BUTTON)
def _county_titlecase(self, county):
return county.replace('_', ' ').title()
def _select_case_types(self, case_types):
# TODO: Refactor to use locators
for case_type in case_types:
# Locate the case type menu by name
case_type_label_obj = self.driver.find_element_by_xpath("//label[contains(text(), 'Case types')]")
# Expand the Case types menu
select_arrow = case_type_label_obj.find_element_by_css_selector('.Select-arrow-zone')
select_arrow.click()
# Find and click the selection menu option for the case type
option_divs = (
case_type_label_obj
.find_element_by_css_selector('.Select-menu')
.find_elements_by_tag_name('div')
)
option = [opt for opt in option_divs if opt.text.endswith(f'({case_type})')][0]
option.click()
def solve_captcha(self):
# Solve the captcha
iframe = None
for frame in self.driver.find_elements_by_tag_name('iframe'):
if 'challenge' in frame.get_attribute('src'):
iframe = frame
break
iframe_url = iframe.get_attribute('src')
query_str = iframe_url.split('#')[-1]
site_key = parse_qs(query_str)['sitekey'][0]
solver = hCaptchaProxyless()
solver.set_verbose(1)
solver.set_key(self.captcha_api_key)
solver.set_website_url(self.driver.current_url)
solver.set_website_key(site_key)
g_response = solver.solve_and_return_solution()
return g_response
def search_has_results(self, current_url):
WebDriverWait(self.driver, 10).until(
EC.url_changes(current_url)
)
# Return True if it's a single-result redirect to case detail page
if 'caseDetail' in self.driver.current_url:
return True
WebDriverWait(self.driver, 10).until(
EC.visibility_of_element_located(
self.locators.CASE_RESULTS_TABLE
)
)
if 'No records found' in self.driver.page_source:
return False
else:
# Otherwise, assume there are results
return True
| 45.38806
| 110
| 0.645183
| 8,655
| 0.948701
| 0
| 0
| 0
| 0
| 0
| 0
| 2,180
| 0.238956
|
e1a571d93e123889de55adde281c383678e87c9f
| 392
|
py
|
Python
|
bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_api import Mp3Api
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.information.information_api import InformationApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_muxing_list_query_params import Mp3MuxingListQueryParams
| 78.4
| 113
| 0.903061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e1a57aedd9dcfceea7d88aacf27bf3e654f2a6f2
| 6,107
|
py
|
Python
|
platform/bl_interface.py
|
sumasree98/2022-ectf-insecure-example
|
ed0a1e3618bca226b0cacd0157ff32a7f4fec2d9
|
[
"Apache-2.0"
] | null | null | null |
platform/bl_interface.py
|
sumasree98/2022-ectf-insecure-example
|
ed0a1e3618bca226b0cacd0157ff32a7f4fec2d9
|
[
"Apache-2.0"
] | null | null | null |
platform/bl_interface.py
|
sumasree98/2022-ectf-insecure-example
|
ed0a1e3618bca226b0cacd0157ff32a7f4fec2d9
|
[
"Apache-2.0"
] | 1
|
2022-01-28T02:30:35.000Z
|
2022-01-28T02:30:35.000Z
|
# 2022 eCTF
# Bootloader Interface Emulator
# Ben Janis
#
# (c) 2022 The MITRE Corporation
#
# This source file is part of an example system for MITRE's 2022 Embedded System
# CTF (eCTF). This code is being provided only for educational purposes for the
# 2022 MITRE eCTF competition, and may not meet MITRE standards for quality.
# Use this code at your own risk!
#
# DO NOT CHANGE THIS FILE
import argparse
import os
import logging
import socket
import select
from pathlib import Path
from typing import List, Optional, TypeVar
Message = TypeVar("Message")
LOG_FORMAT = "%(asctime)s:%(name)-s%(levelname)-8s %(message)s"
class Sock:
def __init__(
self,
sock_path: str,
q_len=1,
log_level=logging.INFO,
mode: int = None,
network=False,
):
self.sock_path = sock_path
self.network = network
self.buf = b""
# set up socket
if self.network:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(("0.0.0.0", int(sock_path)))
else:
# Make sure the socket does not already exist
try:
os.unlink(sock_path)
except OSError:
if os.path.exists(sock_path):
raise
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.bind(sock_path)
self.sock.listen(q_len)
self.csock = None
# change permissions if necessary
if mode and not self.network:
os.chmod(sock_path, mode)
# set up logger
fhandler = logging.FileHandler("bl_interface.log")
fhandler.setLevel(log_level)
fhandler.setFormatter(logging.Formatter(LOG_FORMAT))
shandler = logging.StreamHandler()
shandler.setLevel(log_level)
shandler.setFormatter(logging.Formatter(LOG_FORMAT))
self.logger = logging.getLogger(f"{sock_path}_log")
self.logger.addHandler(fhandler)
self.logger.addHandler(shandler)
self.logger.setLevel(log_level)
@staticmethod
def sock_ready(sock: socket.SocketType) -> bool:
ready, _, _ = select.select([sock], [], [], 0)
return bool(ready)
def active(self) -> bool:
# try to accept new client
if not self.csock:
if self.sock_ready(self.sock):
self.logger.info(f"Connection opened on {self.sock_path}")
self.csock, _ = self.sock.accept()
return bool(self.csock)
def deserialize(self) -> bytes:
buf = self.buf
self.buf = b""
return buf
def read_msg(self) -> Optional[Message]:
if not self.active():
return None
try:
if self.sock_ready(self.csock):
data = self.csock.recv(4096)
# connection closed
if not data:
self.close()
return None
self.buf += data
return self.deserialize()
except (ConnectionResetError, BrokenPipeError):
# cleanly handle forced closed connection
self.close()
return None
def read_all_msgs(self) -> List[Message]:
msgs = []
msg = self.read_msg()
while msg:
msgs.append(msg)
msg = self.read_msg()
return msgs
@staticmethod
def serialize(msg: bytes) -> bytes:
return msg
def send_msg(self, msg: Message) -> bool:
if not self.active():
return False
try:
self.csock.sendall(self.serialize(msg))
return True
except (ConnectionResetError, BrokenPipeError):
# cleanly handle forced closed connection
self.close()
return False
def close(self):
self.logger.warning(f"Conection closed on {self.sock_path}")
self.csock = None
self.buf = b""
def poll_data_socks(device_sock: Sock, host_sock: Sock):
if device_sock.active():
msg = device_sock.read_msg()
# send message to host
if host_sock.active():
if msg is not None:
host_sock.send_msg(msg)
if host_sock.active():
msg = host_sock.read_msg()
# send message to device
if device_sock.active():
if msg is not None:
device_sock.send_msg(msg)
def poll_restart_socks(device_sock: Sock, host_sock: Sock):
# First check that device opened a restart port
if device_sock.active():
# Send host restart commands to device
if host_sock.active():
msg = host_sock.read_msg()
if msg is not None:
device_sock.send_msg(msg)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-bl-sock",
type=Path,
required=True,
help="Path to device-side data socket (will be created)",
)
parser.add_argument(
"--data-host-sock",
type=int,
required=True,
help="Port for host-side data socket (must be available)",
)
parser.add_argument(
"--restart-bl-sock",
type=Path,
required=True,
help="Path to device-side data socket (will be created)",
)
parser.add_argument(
"--restart-host-sock",
type=Path,
required=True,
help="Path to device-side data socket (will be created)",
)
return parser.parse_args()
def main():
args = parse_args()
# open all sockets
data_bl = Sock(str(args.data_bl_sock), mode=0o777)
data_host = Sock(str(args.data_host_sock), mode=0o777, network=True)
restart_bl = Sock(str(args.restart_bl_sock), mode=0o777)
restart_host = Sock(str(args.restart_host_sock), mode=0o777)
# poll sockets forever
while True:
poll_data_socks(data_bl, data_host)
poll_restart_socks(restart_bl, restart_host)
if __name__ == "__main__":
main()
| 27.885845
| 80
| 0.595055
| 3,423
| 0.560504
| 0
| 0
| 220
| 0.036024
| 0
| 0
| 1,268
| 0.207631
|
e1a5eed695e57412a97412dd5eb33192adae977e
| 968
|
py
|
Python
|
Excel_python_demo.py
|
SJG88/excel_vba_python
|
ba7413be23796c67f921fe5428cd52592fdb54a9
|
[
"MIT"
] | null | null | null |
Excel_python_demo.py
|
SJG88/excel_vba_python
|
ba7413be23796c67f921fe5428cd52592fdb54a9
|
[
"MIT"
] | null | null | null |
Excel_python_demo.py
|
SJG88/excel_vba_python
|
ba7413be23796c67f921fe5428cd52592fdb54a9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This is a script to demo how to open up a macro enabled excel file, write a pandas dataframe to it
and save it as a new file name.
Created on Mon Mar 1 17:47:41 2021
@author: Shane Gore
"""
import os
import xlwings as xw
import pandas as pd
os.chdir(r"C:\Users\Shane Gore\Desktop\Roisin")
wb = xw.Book("CAO_template.xlsm")
worksheet = wb.sheets['EPOS_Closing_Stock_Detailed']
'Create dataframe'
cars = {'Brand': ['Honda Civic','Toyota Corolla','Ford Focus','Audi A4'],
'Price': [22000,25000,27000,35000]
}
cars_df = pd.DataFrame(cars, columns = ['Brand', 'Price'])
'Write a dataframe to excel'
worksheet.range('A1').value = cars_df
'Create a datafame from and excel sheet'
excel_df = worksheet.range('A1').options(pd.DataFrame, expand='table').value
'Save the excel as a new workbook'
newfilename = ('Test4.xlsm')
wb.save(newfilename)
'close the workbook'
wb.close()
| 23.047619
| 99
| 0.669421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 557
| 0.575413
|
e1a7e0683b31fbc7612eb7456c53935a17b8dbcf
| 31,709
|
py
|
Python
|
code/Elipsoide_Clark_FAT.py
|
birocoles/paper-magnetic-elipsoid
|
81d9b2e39cbb942f619f590ad389eb30d58b46d4
|
[
"BSD-3-Clause"
] | 1
|
2017-02-26T15:19:25.000Z
|
2017-02-26T15:19:25.000Z
|
code/Elipsoide_Clark_FAT.py
|
birocoles/paper-magnetic-elipsoid
|
81d9b2e39cbb942f619f590ad389eb30d58b46d4
|
[
"BSD-3-Clause"
] | null | null | null |
code/Elipsoide_Clark_FAT.py
|
birocoles/paper-magnetic-elipsoid
|
81d9b2e39cbb942f619f590ad389eb30d58b46d4
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division
import numpy as np
from scipy import linalg
from matplotlib import pyplot as plt
from fatiando.gravmag import sphere
from fatiando import mesher, gridder, utils
from fatiando.vis import mpl
import scipy.special
import scipy.interpolate
class GeometricElement(object):
"""
Base class for all geometric elements.
"""
def __init__(self, props):
self.props = {}
if props is not None:
for p in props:
self.props[p] = props[p]
def addprop(self, prop, value):
"""
Add a physical property to this geometric element.
If it already has the property, the given value will overwrite the
existing one.
Parameters:
* prop : str
Name of the physical property.
* value : float
The value of this physical property.
"""
self.props[prop] = value
class Ellipsoid (GeometricElement):
'''
'''
def __init__(self, xc, yc, zc, a, b, c, alfa, delta, gamma, props):
GeometricElement.__init__(self, props)
self.xc = float(xc)
self.yc = float(yc)
self.zc = float(zc)
self.a = float(a)
self.b = float(b)
self.c = float(c)
self.alfa = float(alfa)
self.delta = float(delta)
self.gamma = float(gamma)
self.center = np.array([xc, yc, zc])
self.l1 = l1_v (alfa, delta)
self.l2 = l2_v (alfa, delta, gamma)
self.l3 = l3_v (alfa, delta, gamma)
self.m1 = m1_v (alfa, delta)
self.m2 = m2_v (alfa, delta, gamma)
self.m3 = m3_v (alfa, delta, gamma)
self.n1 = n1_v (delta)
self.n2 = n2_v (delta, gamma)
self.n3 = n3_v (delta, gamma)
self.ln = ln_v (props['remanence'][2], props['remanence'][1])
self.mn = mn_v (props['remanence'][2], props['remanence'][1])
self.nn = nn_v (props['remanence'][1])
self.mcon = np.array([[self.l1, self.m1, self.n1],[self.l2, self.m2, self.n2],[self.l3, self.m3, self.n3]])
self.mconT = (self.mcon).T
self.k_dec = np.array([[props['k1'][2]],[props['k2'][2]],[props['k3'][2]]])
self.k_int = np.array([[props['k1'][0]],[props['k2'][0]],[props['k3'][0]]])
self.k_inc = np.array([[props['k1'][1]],[props['k2'][1]],[props['k3'][1]]])
if props['k1'][0] == props['k2'][0] and props['k1'][0] == props['k3'][0]:
self.km = k_matrix2 (self.k_int,self.l1,self.l2,self.l3,self.m1,self.m2,self.m3,self.n1,self.n2,self.n3)
else:
self.Lr = Lr_v (self.k_dec, self.k_inc)
self.Mr = Mr_v (self.k_dec, self.k_inc)
self.Nr = Nr_v (self.k_inc)
self.km = k_matrix (self.k_int,self.Lr,self.Mr,self.Nr,self.l1,self.l2,self.l3,self.m1,self.m2,self.m3,self.n1,self.n2,self.n3)
#self.Ft = F_e (inten,lt,mt,nt,l1,l2,l3,m1,m2,m3,n1,n2,n3)
#self.JN = JN_e (ellipsoids.props['remanence'][0],ln,mn,nn,l1,l2,l3,m1,m2,m3,n1,n2,n3)
#self.N1,self.N2,self.N3 = N_desmag (ellipsoids.a,ellipsoids.b,ellipsoids.c,F2,E2)
#self.JR = JR_e (km,JN,Ft)
#self.JRD = JRD_e (km,N1,N2,N3,JR)
#self.JRD_carte = mconT.dot(JRD)
#self.JRD_ang = utils.vec2ang(JRD_carte)
def __str__(self):
"""Return a string representation of the ellipsoids."""
names = [('xc', self.xc), ('yc', self.yc), ('zc', self.zc),
('a', self.a), ('b', self.b), ('c', self.c),
('alfa', self.alfa),('delta', self.delta),('gamma', self.gamma)]
names.extend((p, self.props[p]) for p in sorted(self.props))
return ' | '.join('%s:%g' % (n, v) for n, v in names)
def elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids):
'''
Calcula as tres componentes do campo magnetico de um elipsoide.
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
h: escalar - profundidade
alfa: escalar - azimute do elipsoide em relacao ao "a"
delta: escalar - inclinacao do elipsoide em relacao ao "a"
gamma: escalar - angulo entre o semi eixo "b" e a projecao do centro do elipsoide no plano xy
xp: matriz - malha do eixo x
yp: matriz - malha do eixo y
zp: matriz - malha do eixo z
xc: escalar - posicao x do centro do elipsoide
yc: escalar - posicao y do centro do elipsoide
J: vetor - magnetizacao do corpo
'''
# Calculo de parametros de direcao
#l1 = l1_v (ellipsoids.alfa, ellipsoids.delta)
#l2 = l2_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#l3 = l3_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#m1 = m1_v (ellipsoids.alfa, ellipsoids.delta)
#m2 = m2_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#m3 = m3_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#n1 = n1_v (ellipsoids.delta)
#n2 = n2_v (ellipsoids.delta, ellipsoids.gamma)
#n3 = n3_v (ellipsoids.delta, ellipsoids.gamma)
#ln = ln_v (ellipsoids.props['remanence'][2], ellipsoids.props['remanence'][1])
#mn = mn_v (ellipsoids.props['remanence'][2], ellipsoids.props['remanence'][1])
#nn = nn_v (ellipsoids.props['remanence'][1])
#mcon = np.array([[l1, m1, n1],[l2, m2, n2],[l3, m3, n3]])
#mconT = mcon.T
#print mconT
lt = ln_v (dec, inc)
mt = mn_v (dec, inc)
nt = nn_v (inc)
#print l1,m1,n1
#print l2,m2,n2
#print l3,m3,n3
# Coordenadas Cartesianas elipsoide
x1 = x1_e (xp,yp,zp,ellipsoids.xc,ellipsoids.yc,ellipsoids.zc,ellipsoids.l1,ellipsoids.m1,ellipsoids.n1)
x2 = x2_e (xp,yp,zp,ellipsoids.xc,ellipsoids.yc,ellipsoids.zc,ellipsoids.l2,ellipsoids.m2,ellipsoids.n2)
x3 = x3_e (xp,yp,zp,ellipsoids.xc,ellipsoids.yc,ellipsoids.zc,ellipsoids.l3,ellipsoids.m3,ellipsoids.n3)
# Calculos auxiliares
p0 = p0_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3)
p1 = p1_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3)
p2 = p2_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3)
p = p_e (p1,p2)
q = q_e (p0,p1,p2)
teta = teta_e (p,q)
# Raizes da equacao cubica
lamb = lamb_e (p,teta,p2)
# Calculo de parametros para as integrais
F,E,F2,E2,k,teta_linha = parametros_integrais(ellipsoids.a,ellipsoids.b,ellipsoids.c,lamb)
# Magnetizacoes nas coordenadas do elipsoide
#k_dec = np.array([[ellipsoids.props['k1'][2]],[ellipsoids.props['k2'][2]],[ellipsoids.props['k3'][2]]])
#k_int = np.array([[ellipsoids.props['k1'][0]],[ellipsoids.props['k2'][0]],[ellipsoids.props['k3'][0]]])
#k_inc = np.array([[ellipsoids.props['k1'][1]],[ellipsoids.props['k2'][1]],[ellipsoids.props['k3'][1]]])
#if ellipsoids.props['k1'][0] == ellipsoids.props['k2'][0] and ellipsoids.props['k1'][0] == ellipsoids.props['k3'][0]:
# km = k_matrix2 (k_int,l1,l2,l3,m1,m2,m3,n1,n2,n3)
#else:
# Lr = Lr_v (k_dec, k_inc)
# Mr = Mr_v (k_dec, k_inc)
# Nr = Nr_v (k_inc)
# km = k_matrix (k_int,Lr,Mr,Nr,l1,l2,l3,m1,m2,m3,n1,n2,n3)
Ft = F_e (inten,lt,mt,nt,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
JN = JN_e (ellipsoids.props['remanence'][0],ellipsoids.ln,ellipsoids.mn,ellipsoids.nn,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
N1,N2,N3 = N_desmag (ellipsoids.a,ellipsoids.b,ellipsoids.c,F2,E2)
JR = JR_e (ellipsoids.km,JN,Ft)
JRD = JRD_e (ellipsoids.km,N1,N2,N3,JR)
JRD_carte = (ellipsoids.mconT).dot(JRD)
JRD_ang = utils.vec2ang(JRD_carte)
#print Ft
#print JN
#print JRD
#print N1,N2,N3
#print JRD_ang
# Derivadas de lambda em relacao as posicoes
dlambx1 = dlambx1_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
dlambx2 = dlambx2_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
dlambx3 = dlambx3_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
# Calculo das integrais
A, B, C = integrais_elipticas(ellipsoids.a,ellipsoids.b,ellipsoids.c,k,teta_linha,F,E)
# Geometria para o calculo de B (eixo do elipsoide)
cte = cte_m (ellipsoids.a,ellipsoids.b,ellipsoids.c,lamb)
V1, V2, V3 = v_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
# Calculo matriz geometria para B1
m11 = (cte*dlambx1*V1) - A
m12 = cte*dlambx1*V2
m13 = cte*dlambx1*V3
# Calculo matriz geometria para B2
m21 = cte*dlambx2*V1
m22 = (cte*dlambx2*V2) - B
m23 = cte*dlambx2*V3
# Calculo matriz geometria para B3
m31 = cte*dlambx3*V1
m32 = cte*dlambx3*V2
m33 = (cte*dlambx3*V3) - C
# Problema Direto (Calcular o campo externo nas coordenadas do elipsoide)
B1 = B1_e (m11,m12,m13,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
B2 = B2_e (m21,m22,m23,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
B3 = B3_e (m31,m32,m33,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
#constante = constante_nova (a,b,c,lamb,JRD,x1,x2,x3)
#B1 = B1_novo (constante,dlambx1,a,b,c,JRD,A)
#B2 = B2_novo (constante,dlambx2,a,b,c,JRD,B)
#B3 = B3_novo (constante,dlambx3,a,b,c,JRD,C)
# Problema Direto (Calcular o campo externo nas coordenadas geograficas)
Bx = Bx_c (B1,B2,B3,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3)
By = By_c (B1,B2,B3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3)
Bz = Bz_c (B1,B2,B3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
return Bx,By,Bz,JRD_ang
# Problema Direto (Calcular o campo externo e anomalia nas coordenadas geograficas no SI)
def bx_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += bx
res = res*ctemag
return res
def by_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += by
res = res*ctemag
return res
def bz_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += bz
res = res*ctemag
return res
def tf_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
tf = bx*np.cos(inc)*np.cos(dec) + by*np.cos(inc)*np.sin(dec) + bz*np.sin(inc)
res += tf
res = res*ctemag
return res,jrd_ang
def l1_v (alfa, delta):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
l1 = (-np.cos(alfa)*np.cos(delta))
return l1
def l2_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
l2 = (np.cos(alfa)*np.cos(gamma)*np.sin(delta)+np.sin(alfa)*np.sin(gamma))
return l2
def l3_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
l3 = (np.sin(alfa)*np.cos(gamma)-np.cos(alfa)*np.sin(gamma)*np.sin(delta))
return l3
def m1_v (alfa, delta):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
m1 = (-np.sin(alfa)*np.cos(delta))
return m1
def m2_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
m2 = (np.sin(alfa)*np.cos(gamma)*np.sin(delta)-np.cos(alfa)*np.sin(gamma))
return m2
def m3_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
m3 = (-np.cos(alfa)*np.cos(gamma)-np.sin(alfa)*np.sin(gamma)*np.sin(delta))
return m3
def n1_v (delta):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
n1 = (-np.sin(delta))
return n1
def n2_v (delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
n2 = (-np.cos(gamma)*np.cos(delta))
return n2
def n3_v (delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
n3 = (np.sin(gamma)*np.cos(delta))
return n3
def ln_v (declinacao, inclinacao):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
ln = (np.cos(declinacao)*np.cos(inclinacao))
return ln
def mn_v (declinacao, inclinacao):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
mn = (np.sin(declinacao)*np.cos(inclinacao))
return mn
def nn_v (inclinacao):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
nn = np.sin(inclinacao)
return nn
def Lr_v (k_dec, k_inc):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_dec - declinacoes dos vetores de susceptibilidade.
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Lr = np.zeros(3)
for i in range (3):
Lr[i] = np.cos(k_dec[i])*np.cos(k_inc[i])
return Lr
def Mr_v (k_dec, k_inc):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_dec - declinacoes dos vetores de susceptibilidade.
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Mr = np.zeros(3)
for i in range (3):
Mr[i] = np.sin(k_dec[i])*np.cos(k_inc[i])
return Mr
def Nr_v (k_inc):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Nr = np.zeros(3)
for i in range (3):
Nr[i] = np.sin(k_inc[i])
return Nr
def F_e (intensidadeT,lt,mt,nt,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
Transformacao do vetor campo magnetico da Terra para as coordenadas nos eixos do elipsoide.
'''
Ft = intensidadeT*np.ravel(np.array([[(lt*l1+mt*m1+nt*n1)], [(lt*l2+mt*m2+nt*n2)], [(lt*l3+mt*m3+nt*n3)]]))
return Ft
def JN_e (intensidade,ln,mn,nn,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
transformacao do Vetor de magnetizacao remanente para as coordenadas nos eixos do elipsoide.
'''
JN = intensidade*np.ravel(np.array([[(ln*l1+mn*m1+nn*n1)], [(ln*l2+mn*m2+nn*n2)], [(ln*l3+mn*m3+nn*n3)]]))
return JN
def N_desmag (a,b,c,F2,E2):
'''
Fator de desmagnetizacao ao longo do eixo de revolucao (N1) e em relacao ao plano equatorial (N2).
'''
N1 = ((4.*np.pi*a*b*c)/((a**2-b**2)*(a**2-c**2)**0.5)) * (F2-E2)
N2 = (((4.*np.pi*a*b*c)*(a**2-c**2)**0.5)/((a**2-b**2)*(b**2-c**2))) * (E2 - ((b**2-c**2)/(a**2-c**2)) * F2 - ((c*(a**2-b**2))/(a*b*(a**2-c**2)**0.5)))
N3 = ((4.*np.pi*a*b*c)/((b**2-c**2)*(a**2-c**2)**0.5)) * (((b*(a**2-c**2)**0.5)/(a*c)) - E2)
return N1, N2, N3
def k_matrix (k_int,Lr,Mr,Nr,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
Matriz de tensores de susceptibilidade.
'''
l = np.array([[l1],[l2],[l3]])
m = np.array([[m1],[m2],[m3]])
n = np.array([[n1],[n2],[n3]])
k = np.zeros([3,3])
for i in range (3):
for j in range (3):
for r in range (3):
k[i,j] = k[i,j] + (k_int[r]*(Lr[r]*l[i] + Mr[r]*m[i] + Nr[r]*n[i])*(Lr[r]*l[j] + Mr[r]*m[j] + Nr[r]*n[j]))
return k
def k_matrix2 (k_int,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
Matriz de tensores de susceptibilidade.
'''
l = np.array([[l1],[l2],[l3]])
m = np.array([[m1],[m2],[m3]])
n = np.array([[n1],[n2],[n3]])
k = np.zeros([3,3])
for i in range (3):
for j in range (3):
for r in range (3):
k[i,j] = k[i,j] + (k_int[r]*(l[r]*l[i] + m[r]*m[i] + n[r]*n[i])*(l[r]*l[j] + m[r]*m[j] + n[r]*n[j]))
return k
def JR_e (km,JN,Ft):
'''
Vetor de magnetizacao resultante sem correcao da desmagnetizacao.
'''
JR = km.dot(Ft) + JN
return JR
def JRD_e (km,N1,N2,N3,JR):
'''
Vetor de magnetizacao resultante com a correcao da desmagnetizacao.
'''
I = np.identity(3)
kn0 = km[:,0]*N1
kn1 = km[:,1]*N2
kn2 = km[:,2]*N3
kn = (np.vstack((kn0,kn1,kn2))).T
A = I + kn
JRD = (linalg.inv(A)).dot(JR)
return JRD
def x1_e (xp,yp,zp,xc,yc,h,l1,m1,n1):
'''
Calculo da coordenada x no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l1,m1,n1 - Orientacao do elipsoide (eixo x)
output:
x1 - Coordenada x do elipsoide.
'''
x1 = (xp-xc)*l1+(yp-yc)*m1+(-zp-h)*n1
return x1
def x2_e (xp,yp,zp,xc,yc,h,l2,m2,n2):
'''
Calculo da coordenada y no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l2,m2,n2 - Orientacao do elipsoide (eixo y).
output:
x2 - Coordenada y do elipsoide.
'''
x2 = (xp-xc)*l2+(yp-yc)*m2+(-zp-h)*n2
return x2
def x3_e (xp,yp,zp,xc,yc,h,l3,m3,n3):
'''
Calculo da coordenada z no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l3,m3,n3 - Orientacao do elipsoide (eixo z).
output:
x3 - Coordenada z do elipsoide.
'''
x3 = (xp-xc)*l3+(yp-yc)*m3+(-zp-h)*n3
return x3
def p0_e (a,b,c,x1,x2,x3):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p0 = (a*b*c)**2-(b*c*x1)**2-(c*a*x2)**2-(a*b*x3)**2
return p0
def p1_e (a,b,c,x1,x2,x3):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p1 = (a*b)**2+(b*c)**2+(c*a)**2-(b**2+c**2)*x1**2-(c**2+a**2)*x2**2-(a**2+b**2)*x3**2
return p1
def p2_e (a,b,c,x1,x2,x3):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p2 = a**2+b**2+c**2-x1**2-x2**2-x3**2
return p2
def p_e (p1,p2):
'''
Constante
input:
p1,p2 - constantes da equacao cubica
output:
p - Constante.
'''
p = p1-(p2**2)/3.
return p
def q_e (p0,p1,p2):
'''
Constante
input:
p0,p1,p2 - constantes da equacao cubica
output:
q - Constante.
'''
q = p0-((p1*p2)/3.)+2*(p2/3.)**3
return q
def teta_e (p,q):
'''
Constante angular (radianos)
input:
p - constante da equacao cubica
q - constante
output:
teta - Constante.
'''
teta = np.arccos(-q/(2*np.sqrt((-p/3.)**3)))
#teta = np.arccos((-q/2.)*np.sqrt((-p/3.)**3))
return teta
def lamb_e (p,teta,p2):
'''
Maior raiz real da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
p,p2 - constantes da equacao cubica
teta - constante angular (radianos)
output:
lamb - Maior raiz real.
'''
lamb = 2.*((-p/3.)**0.5)*np.cos(teta/3.)-(p2/3.)
#lamb = 2*((-p/3.)*np.cos(teta/3.)-(p2/3.))**0.5
#lamb = 2*((-p/3.)*np.cos(teta/3.))**0.5 - (p2/3.)
return lamb
def mi_e (p,teta,p2):
'''
Raiz intermediaria real da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
p,p2 - constantes da equacao cubica
teta - constante angular (radianos)
output:
mi - Raiz intermediaria real.
'''
mi = -2.*((-p/3.)**0.5)*np.cos(teta/3.+np.pi/3.)-(p2/3.)
return mi
def ni_e (p,teta,p2):
'''
Menor raiz real da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
p,p2 - constantes da equacao cubica
teta - constante angular (radianos)
output:
ni - Menor raiz real.
'''
ni = -2.*np.sqrt(-p/3.)*np.cos(teta/3. - np.pi/3.)-(p2/3.)
return ni
def dlambx1_e (a,b,c,x1,x2,x3,lamb):
'''
Derivada de lamb em relacao ao eixo x1 do elipsoide.
input:
a,b,c, - semi-eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
dlambx1 - escalar
'''
dlambx1 = (2*x1/(a**2+lamb))/((x1/(a**2+lamb))**2+(x2/(b**2+lamb))**2+((x3/(c**2+lamb))**2))
return dlambx1
def dlambx2_e (a,b,c,x1,x2,x3,lamb):
'''
Derivada de lamb em relacao ao eixo x2 do elipsoide.
input:
a,b,c, - semi-eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
dlambx2 - escalar
'''
dlambx2 = (2*x2/(b**2+lamb))/((x1/(a**2+lamb))**2+(x2/(b**2+lamb))**2+((x3/(c**2+lamb))**2))
return dlambx2
def dlambx3_e (a,b,c,x1,x2,x3,lamb):
'''
Derivada de lamb em relacao ao eixo x3 do elipsoide.
input:
a,b,c, - semi-eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
dlambx3 - escalar
'''
dlambx3 = (2*x3/(c**2+lamb))/((x1/(a**2+lamb))**2+(x2/(b**2+lamb))**2+((x3/(c**2+lamb))**2))
return dlambx3
def cte_m (a,b,c,lamb):
'''
Fator geometrico do campo magnetico (fi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
lamb - Maior raiz real da equacao cubica.
output:
cte - constante escalar.
'''
cte = 1/np.sqrt((a**2+lamb)*(b**2+lamb)*(c**2+lamb))
return cte
def v_e (a,b,c,x1,x2,x3,lamb):
'''
Constante do campo magnetico (fi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
v - matriz
'''
V1 = x1/(a**2+lamb)
V2 = x2/(b**2+lamb)
V3 = x3/(c**2+lamb)
return V1, V2, V3
def B1_e (m11,m12,m13,J,a,b,c):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
dlambx1 - matriz: derivada de lambda em relacao ao eixo x1.
cte - matriz
v - matriz
A - matriz: integrais do potencial
J - vetor: magnetizacao
output:
B1 - matriz
'''
B1 = 2*np.pi*a*b*c*(m11*J[0]+m12*J[1]+m13*J[2])
return B1
def B2_e (m21,m22,m23,J,a,b,c):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
dlambx2 - matriz: derivada de lambda em relacao ao eixo x2.
cte - matriz
v - matriz
B - matriz: integrais do potencial
J - vetor: magnetizacao
output:
B2 - matriz
'''
B2 = 2*np.pi*a*b*c*(m21*J[0]+m22*J[1]+m23*J[2])
return B2
def B3_e (m31,m32,m33,J,a,b,c):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
dlambx3 - matriz: derivada de lambda em relacao ao eixo x3.
cte - matriz
v - matriz
C - matriz: integrais do potencial
J - vetor: magnetizacao
output:
B3 - matriz
'''
B3 = 2*np.pi*a*b*c*(m31*J[0]+m32*J[1]+m33*J[2])
return B3
def Bx_c (B1,B2,B3,l1,l2,l3):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos geograficos
input:
B1,B2,B3 - vetores
l1,l2,l3 - escalares.
output:
Bx - matriz
'''
Bx = B1*l1+B2*l2+B3*l3
return Bx
def By_c (B1,B2,B3,m1,m2,m3):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos geograficos
input:
B1,B2,B3 - vetores
m1,m2,m3 - escalares.
output:
By - matriz
'''
By = B1*m1+B2*m2+B3*m3
return By
def Bz_c (B1,B2,B3,n1,n2,n3):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos geograficos
input:
B1,B2,B3 - vetores
n1,n2,n3 - escalares.
output:
Bz - matriz
'''
Bz = B1*n1+B2*n2+B3*n3
return Bz
def Alambda_simp_ext3(a,b,c,lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
A - matriz
'''
A = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./((a**2 + u)*R)
aij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
A.append(aij)
A = np.array(A).reshape((lamb.shape[0], lamb.shape[1]))
return A
def Blambda_simp_ext3(a,b,c,lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
B - matriz
'''
B = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./((b**2 + u)*R)
bij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
B.append(bij)
B = np.array(B).reshape((lamb.shape[0], lamb.shape[1]))
return B
def Clambda_simp_ext3(a,b,c,lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
A - constante escalar
'''
C = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./((c**2 + u)*R)
cij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
C.append(cij)
C = np.array(C).reshape((lamb.shape[0], lamb.shape[1]))
return C
def Dlambda_simp_ext3(a, b, c, lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
D - constante escalar
'''
D = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./R
dij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
D.append(dij)
D = np.array(D).reshape((lamb.shape[0], lamb.shape[1]))
return D
def parametros_integrais(a,b,c,lamb):
'''
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
lamb - Maior raiz real da equacao cubica.
'''
k = np.zeros_like(lamb)
k1 = ((a**2-b**2)/(a**2-c**2))**0.5
k.fill(k1)
k2 = ((a**2-b**2)/(a**2-c**2))**0.5
teta_linha = np.arcsin(((a**2-c**2)/(a**2+lamb))**0.5)
teta_linha2 = np.arccos(c/a)
F = scipy.special.ellipkinc(teta_linha, k)
E = scipy.special.ellipeinc(teta_linha, k)
F2 = scipy.special.ellipkinc(teta_linha2, k2)
E2 = scipy.special.ellipeinc(teta_linha2, k2)
return F,E,F2,E2,k,teta_linha
def integrais_elipticas(a,b,c,k,teta_linha,F,E):
'''
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
k: matriz - parametro de geometria
teta_linha: matriz - parametro de geometria
F: matriz - integrais normais elipticas de primeiro tipo
E: matriz - integrais normais elipticas de segundo tipo
'''
A2 = (2/((a**2-b**2)*(a**2-c**2)**0.5))*(F-E)
B2 = ((2*(a**2-c**2)**0.5)/((a**2-b**2)*(b**2-c**2)))*(E-((b**2-c**2)/(a**2-c**2))*F-((k**2*np.sin(teta_linha)*np.cos(teta_linha))/(1-k**2*np.sin(teta_linha)*np.sin(teta_linha))**0.5))
C2 = (2/((b**2-c**2)*(a**2-c**2)**0.5))*(((np.sin(teta_linha)*((1-k**2*np.sin(teta_linha)*np.sin(teta_linha))**0.5))/np.cos(teta_linha))-E)
return A2,B2,C2
| 30.372605
| 216
| 0.583431
| 3,454
| 0.108928
| 0
| 0
| 0
| 0
| 0
| 0
| 15,257
| 0.481157
|
e1a8d307cf28e74c7cd2efb91e428fd65a4beecc
| 852
|
py
|
Python
|
app.py
|
aserhatdemir/digitalocean
|
4ae2bfc2831b4fae15d9076b3b228c9a4bda44e7
|
[
"MIT"
] | null | null | null |
app.py
|
aserhatdemir/digitalocean
|
4ae2bfc2831b4fae15d9076b3b228c9a4bda44e7
|
[
"MIT"
] | null | null | null |
app.py
|
aserhatdemir/digitalocean
|
4ae2bfc2831b4fae15d9076b3b228c9a4bda44e7
|
[
"MIT"
] | null | null | null |
from do import DigitalOcean
import argparse
import json
def do_play(token):
do = DigitalOcean(token)
# ----
# for i in range(3):
# do.create_droplet(f'node-{i}', 'fra1', 'do-python')
# do.wait_droplet_creation_process('do-python')
# ----
# do.destroy_droplets('do-python')
# ----
drops = do.manager.get_all_droplets(tag_name='do-python')
for drop in drops:
print(drop.status)
keys = do.manager.get_all_sshkeys()
for key in keys:
print(key.public_key)
def parse_input(file):
with open(file, 'r') as f:
config = json.load(f)
print(config["instances"])
print(config["setup"])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-token')
args = parser.parse_args()
# parse_input(args.file)
do_play(args.token)
| 23.027027
| 61
| 0.627934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.293427
|
e1a8f6b9d508e5ab80894a039b117dd4b1afc6ed
| 343
|
py
|
Python
|
convert_twitter_data.py
|
charlieyou/data-science-final-project
|
7968261c3e44fe3544360a08fea271b611d105c1
|
[
"Apache-2.0"
] | null | null | null |
convert_twitter_data.py
|
charlieyou/data-science-final-project
|
7968261c3e44fe3544360a08fea271b611d105c1
|
[
"Apache-2.0"
] | null | null | null |
convert_twitter_data.py
|
charlieyou/data-science-final-project
|
7968261c3e44fe3544360a08fea271b611d105c1
|
[
"Apache-2.0"
] | null | null | null |
import cPickle as pickle
import pandas as pd
if __name__ == '__main__':
fnames = set(['clinton_tweets.json', 'trump_tweets.json'])
for fname in fnames:
df = pd.read_json('data/' + fname)
df = df.transpose()
df = df['text']
pickle.dump([(i, v) for i, v in zip(df.index, df.values)], open(fname, 'wb'))
| 28.583333
| 85
| 0.597668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.195335
|
e1aafb58eef941757b45eb7221687eebd5c2e5ea
| 799
|
py
|
Python
|
app.py
|
mwinel/python-cicd-assignement
|
c4c08772094f983f8105090f0d91dfef5be042aa
|
[
"MIT"
] | null | null | null |
app.py
|
mwinel/python-cicd-assignement
|
c4c08772094f983f8105090f0d91dfef5be042aa
|
[
"MIT"
] | null | null | null |
app.py
|
mwinel/python-cicd-assignement
|
c4c08772094f983f8105090f0d91dfef5be042aa
|
[
"MIT"
] | null | null | null |
import os
import logging
from flask import Flask
app = Flask(__name__)
@app.route('/status')
def health_check():
app.logger.info('Status request successfull')
app.logger.debug('DEBUG message')
return 'OK - healthy'
@app.route('/metrics')
def metrics():
app.logger.info('Metrics request successfull')
app.logger.debug('DEBUG message')
return 'OK - metrics'
@app.route('/')
def hello_world():
target = os.environ.get('TARGET', 'World')
app.logger.info('Main request successfull')
app.logger.debug('DEBUG message')
return 'Hello {}!\n'.format(target)
if __name__ == "__main__":
## stream logs to a file
logging.basicConfig(filename='app.log', level=logging.DEBUG)
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
| 22.828571
| 79
| 0.673342
| 0
| 0
| 0
| 0
| 517
| 0.647059
| 0
| 0
| 264
| 0.330413
|
e1ad6793329afb999758e7af4b085f4de8b95b33
| 93
|
py
|
Python
|
Configuration/StandardSequences/python/L1Reco_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/StandardSequences/python/L1Reco_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/StandardSequences/python/L1Reco_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from L1Trigger.Configuration.L1TReco_cff import *
| 18.6
| 49
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e1af67620fb53577d386b7ebbfdc3bab9dc25e7c
| 2,347
|
py
|
Python
|
pysrc/common/dataset_mod.py
|
Hibiki1020/classification_attitude_estimator
|
7f7435c2ed5b5b5c8d0219df2da0426b108ff3aa
|
[
"MIT"
] | null | null | null |
pysrc/common/dataset_mod.py
|
Hibiki1020/classification_attitude_estimator
|
7f7435c2ed5b5b5c8d0219df2da0426b108ff3aa
|
[
"MIT"
] | null | null | null |
pysrc/common/dataset_mod.py
|
Hibiki1020/classification_attitude_estimator
|
7f7435c2ed5b5b5c8d0219df2da0426b108ff3aa
|
[
"MIT"
] | null | null | null |
import torch.utils.data as data
from PIL import Image
import numpy as np
import math
import csv
class ClassOriginaldataset(data.Dataset):
def __init__(self, data_list, transform, phase, index_dict_path, dim_fc_out):
self.data_list = data_list
self.transform = transform
self.phase = phase
self.index_dict_path = index_dict_path
self.dim_fc_out = dim_fc_out
self.index_dict = []
with open(index_dict_path) as f:
reader = csv.reader(f)
for row in reader:
self.index_dict.append(row)
def search_index(self, number):
index = int(1000000000)
for row in self.index_dict:
if float(number) == float(row[0]):
index = int(row[1])
break
return index
def float_to_array(self, num_float):
num_deg = float((num_float/3.141592)*180.0)
num_upper = 0.0
num_lower = 0.0
tmp_deg = float(int(num_deg))
if tmp_deg < num_deg: # 0 < num_deg
num_lower = tmp_deg
num_upper = num_lower + 1.0
elif num_deg < tmp_deg: # tmp_deg < 0
num_lower = tmp_deg - 1.0
num_upper = tmp_deg
dist_low = math.fabs(num_deg - num_lower)
dist_high = math.fabs(num_deg - num_upper)
lower_ind = int(self.search_index(num_lower))
upper_ind = int(self.search_index(num_upper))
array = np.zeros(self.dim_fc_out)
array[lower_ind] = dist_high
array[upper_ind] = dist_low
return array
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
img_path = self.data_list[index][0]
roll_str = self.data_list[index][4]
pitch_str = self.data_list[index][5]
roll_float = float(roll_str)
pitch_float = float(pitch_str)
roll_list = self.float_to_array(roll_float)
pitch_list = self.float_to_array(pitch_float)
img_pil = Image.open(img_path)
img_pil = img_pil.convert("RGB")
roll_numpy = np.array(roll_list)
pitch_numpy = np.array(pitch_list)
img_trans, roll_trans, pitch_trans = self.transform(img_pil, roll_numpy, pitch_numpy, phase=self.phase)
return img_trans, roll_trans, pitch_trans
| 28.621951
| 111
| 0.611845
| 2,248
| 0.957818
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.013208
|
e1b1063d345266bc6d42ef8301d5659cc8f0a43d
| 1,846
|
py
|
Python
|
scripts/darias_energy_control/moveit_traj_baseline/repulsive_potential_field.py
|
hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators
|
9a8801e9c663174b753c4852b2313c5a3f302434
|
[
"MIT"
] | null | null | null |
scripts/darias_energy_control/moveit_traj_baseline/repulsive_potential_field.py
|
hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators
|
9a8801e9c663174b753c4852b2313c5a3f302434
|
[
"MIT"
] | null | null | null |
scripts/darias_energy_control/moveit_traj_baseline/repulsive_potential_field.py
|
hjw-1014/Multi-Objective-Reactive-Motion-Planning-in-Mobile-Manipulators
|
9a8801e9c663174b753c4852b2313c5a3f302434
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import matplotlib.pyplot as plt
from icecream import ic
def visualize_vector_field(policy, device, min_max = [[-1,-1],[1,1]], fig_number=1):
min_x = min_max[0][0]
max_x = min_max[1][0]
min_y = min_max[0][1]
max_y = min_max[1][1]
n_sample = 100
x = np.linspace(min_x, max_x, n_sample)
y = np.linspace(min_y, max_y, n_sample)
xy = np.meshgrid(x, y)
h = np.concatenate(xy[0])
v = np.concatenate(xy[1])
hv = torch.Tensor(np.stack([h, v]).T).float()
if device is not None:
hv = hv.to(device)
vel = policy(hv)
#vel = to_numpy(vel)
vel = np.nan_to_num(vel)
vel_x = np.reshape(vel[:, 0], (n_sample, n_sample))
vel_y = np.reshape(vel[:, 1], (n_sample, n_sample))
speed = np.sqrt(vel_x ** 2 + vel_y ** 2)
speed = speed/np.max(speed)
plt.streamplot(xy[0], xy[1], vel_x, vel_y, color=speed)
w = 5
Y, X = np.mgrid[-w:w:5j, -w:w:5j]
ic(Y)
ic(X)
import numpy as np
import matplotlib.pyplot as plt
# # Creating dataset
# x = np.arange(0, 10)
# y = np.arange(0, 10)
#
# # Creating grids
# X, Y = np.meshgrid(x, y)
# # ic(X)
# # ic(Y)
#
# # x-component to the right
# u = np.ones((15, 10))
#
# # y-component zero
# v = -np.ones((10, 10))
#
# fig = plt.figure(figsize=(12, 7))
#
# # Plotting stream plot
# plt.streamplot(X, Y, u, v, density=0.5)
#
# # show plot
# # plt.show()
import numpy as np
import matplotlib.pyplot as plt
# Creating data set
w = 3
Y, X = np.mgrid[-w:w:100j, -w:w:100j]
U1 = -1 - X ** 2 + Y
ic(type(U1))
ic(np.shape(U1))
V1 = 1 + X - Y ** 2
ic(np.shape(V1))
U2 = -1.1 - X ** 2 + Y
ic(np.shape(U1))
V2 = 2.1 + X - Y ** 2
# speed = np.sqrt(U ** 2 + V ** 2)
# Creating plot
fig = plt.figure(figsize=(12, 7))
plt.streamplot(X, Y, U1, V1, density=1)
plt.streamplot(X, Y, U2, V2, density=0.8)
# show plot
plt.show()
| 20.977273
| 84
| 0.594258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 453
| 0.245395
|
e1b18b46651e9f66ff6958a9025b0bc1b9f9aca5
| 3,793
|
py
|
Python
|
capa/features/extractors/ida/extractor.py
|
pombredanne/capa
|
b41d23930189c269608d4b705533fa45cf3c064c
|
[
"Apache-2.0"
] | null | null | null |
capa/features/extractors/ida/extractor.py
|
pombredanne/capa
|
b41d23930189c269608d4b705533fa45cf3c064c
|
[
"Apache-2.0"
] | null | null | null |
capa/features/extractors/ida/extractor.py
|
pombredanne/capa
|
b41d23930189c269608d4b705533fa45cf3c064c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import idaapi
import capa.ida.helpers
import capa.features.extractors.elf
import capa.features.extractors.ida.file
import capa.features.extractors.ida.insn
import capa.features.extractors.ida.global_
import capa.features.extractors.ida.function
import capa.features.extractors.ida.basicblock
from capa.features.extractors.base_extractor import FeatureExtractor
class FunctionHandle:
"""this acts like an idaapi.func_t but with __int__()"""
def __init__(self, inner):
self._inner = inner
def __int__(self):
return self.start_ea
def __getattr__(self, name):
return getattr(self._inner, name)
class BasicBlockHandle:
"""this acts like an idaapi.BasicBlock but with __int__()"""
def __init__(self, inner):
self._inner = inner
def __int__(self):
return self.start_ea
def __getattr__(self, name):
return getattr(self._inner, name)
class InstructionHandle:
"""this acts like an idaapi.insn_t but with __int__()"""
def __init__(self, inner):
self._inner = inner
def __int__(self):
return self.ea
def __getattr__(self, name):
return getattr(self._inner, name)
class IdaFeatureExtractor(FeatureExtractor):
def __init__(self):
super(IdaFeatureExtractor, self).__init__()
self.global_features = []
self.global_features.extend(capa.features.extractors.ida.global_.extract_os())
self.global_features.extend(capa.features.extractors.ida.global_.extract_arch())
def get_base_address(self):
return idaapi.get_imagebase()
def extract_global_features(self):
yield from self.global_features
def extract_file_features(self):
yield from capa.features.extractors.ida.file.extract_features()
def get_functions(self):
import capa.features.extractors.ida.helpers as ida_helpers
# data structure shared across functions yielded here.
# useful for caching analysis relevant across a single workspace.
ctx = {}
# ignore library functions and thunk functions as identified by IDA
for f in ida_helpers.get_functions(skip_thunks=True, skip_libs=True):
setattr(f, "ctx", ctx)
yield FunctionHandle(f)
@staticmethod
def get_function(ea):
f = idaapi.get_func(ea)
setattr(f, "ctx", {})
return FunctionHandle(f)
def extract_function_features(self, f):
yield from capa.features.extractors.ida.function.extract_features(f)
def get_basic_blocks(self, f):
import capa.features.extractors.ida.helpers as ida_helpers
for bb in ida_helpers.get_function_blocks(f):
yield BasicBlockHandle(bb)
def extract_basic_block_features(self, f, bb):
yield from capa.features.extractors.ida.basicblock.extract_features(f, bb)
def get_instructions(self, f, bb):
import capa.features.extractors.ida.helpers as ida_helpers
for insn in ida_helpers.get_instructions_in_range(bb.start_ea, bb.end_ea):
yield InstructionHandle(insn)
def extract_insn_features(self, f, bb, insn):
yield from capa.features.extractors.ida.insn.extract_features(f, bb, insn)
| 33.566372
| 111
| 0.714474
| 2,846
| 0.75033
| 1,441
| 0.37991
| 134
| 0.035328
| 0
| 0
| 934
| 0.246243
|
e1b1b1bf75362e9f77713c3b8bcaddbf1477de81
| 55
|
py
|
Python
|
Tests/playground.py
|
mbtaPredict/Main
|
e1c3320ff08b61355ac96f51be9e20c57372f13b
|
[
"MIT"
] | null | null | null |
Tests/playground.py
|
mbtaPredict/Main
|
e1c3320ff08b61355ac96f51be9e20c57372f13b
|
[
"MIT"
] | null | null | null |
Tests/playground.py
|
mbtaPredict/Main
|
e1c3320ff08b61355ac96f51be9e20c57372f13b
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
plt.plot()
plt.show()
| 11
| 31
| 0.745455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e1b370db317e8d54c9c290cf01cbebc1cac20378
| 1,373
|
py
|
Python
|
flex/extensions/jsondata.py
|
AWehrhahn/flex-format
|
7fcc985559cd90e54d3ebde7946455aedc7293d7
|
[
"MIT"
] | null | null | null |
flex/extensions/jsondata.py
|
AWehrhahn/flex-format
|
7fcc985559cd90e54d3ebde7946455aedc7293d7
|
[
"MIT"
] | null | null | null |
flex/extensions/jsondata.py
|
AWehrhahn/flex-format
|
7fcc985559cd90e54d3ebde7946455aedc7293d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import annotations
from tarfile import TarInfo
from typing import BinaryIO, Tuple
from ..base import FlexExtension
class JsonDataExtension(FlexExtension):
"""An extension to store simple json data"""
def __init__(self, header=None, data=None, cls=None):
super().__init__(header=header, cls=cls)
self.data = data
def _prepare(self, name: str) -> tuple[tuple[TarInfo, BinaryIO]]:
cls = self.__class__
header_fname = f"{name}/header.json"
data_fname = f"{name}/data.json"
header_info, header_bio = cls._prepare_json(header_fname, self.header)
data_info, data_bio = cls._prepare_json(data_fname, self.data)
return [(header_info, header_bio), (data_info, data_bio)]
@classmethod
def _parse(cls, header: dict, members: dict) -> JsonDataExtension:
bio = members["data.json"]
data = cls._parse_json(bio)
ext = cls(header=header, data=data)
return ext
def to_dict(self) -> dict:
"""Convert this extension to a dict"""
obj = {"header": self.header, "data": self.data}
return obj
@classmethod
def from_dict(cls, header: dict, data: dict) -> JsonDataExtension:
"""Load this extension from a dict"""
arr = data["data"]
obj = cls(header, arr)
return obj
| 31.930233
| 78
| 0.640932
| 1,213
| 0.883467
| 0
| 0
| 423
| 0.308084
| 0
| 0
| 213
| 0.155135
|
e1b37b3b7be2be9f06bdec60a631822373a8b7f7
| 185
|
py
|
Python
|
awards/forms.py
|
danalvin/Django-IP3
|
6df0adaddf998fd4195b23ee97f81938e741215a
|
[
"MIT"
] | null | null | null |
awards/forms.py
|
danalvin/Django-IP3
|
6df0adaddf998fd4195b23ee97f81938e741215a
|
[
"MIT"
] | 4
|
2020-06-05T19:20:59.000Z
|
2021-09-08T00:32:49.000Z
|
awards/forms.py
|
danalvin/Django-IP3
|
6df0adaddf998fd4195b23ee97f81938e741215a
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Project
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
exclude = ['profile', 'posted_time', 'user']
| 18.5
| 52
| 0.67027
| 128
| 0.691892
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.151351
|
e1b43999f4dbbac898da1e996502f381b7896fa5
| 72,341
|
py
|
Python
|
src/tale/syntax/grammar/TaleParser.py
|
tale-lang/tale
|
1779f94aa13545e58a1d5a8819b85ad02ada4144
|
[
"MIT"
] | 17
|
2020-02-11T10:38:19.000Z
|
2020-09-22T16:36:25.000Z
|
src/tale/syntax/grammar/TaleParser.py
|
tale-lang/tale
|
1779f94aa13545e58a1d5a8819b85ad02ada4144
|
[
"MIT"
] | 18
|
2020-02-14T20:36:25.000Z
|
2020-05-26T21:52:46.000Z
|
src/tale/syntax/grammar/TaleParser.py
|
tale-lang/tale
|
1779f94aa13545e58a1d5a8819b85ad02ada4144
|
[
"MIT"
] | 1
|
2020-02-16T12:04:07.000Z
|
2020-02-16T12:04:07.000Z
|
# Generated from tale/syntax/grammar/Tale.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\20")
buf.write("\u00f1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\3\2\3\2\7\2E\n\2\f\2\16\2H\13\2")
buf.write("\3\2\3\2\3\3\3\3\5\3N\n\3\3\4\3\4\3\4\3\4\3\5\3\5\3\5")
buf.write("\3\5\3\5\5\5Y\n\5\3\6\3\6\3\6\3\7\3\7\3\7\3\b\3\b\3\b")
buf.write("\3\b\3\t\5\tf\n\t\3\t\3\t\3\t\6\tk\n\t\r\t\16\tl\3\n\3")
buf.write("\n\3\13\3\13\5\13s\n\13\3\f\3\f\3\f\6\fx\n\f\r\f\16\f")
buf.write("y\3\r\3\r\5\r~\n\r\3\16\3\16\3\16\3\16\5\16\u0084\n\16")
buf.write("\3\16\3\16\3\17\3\17\5\17\u008a\n\17\3\20\3\20\3\21\3")
buf.write("\21\3\22\3\22\5\22\u0092\n\22\3\23\3\23\3\24\3\24\3\24")
buf.write("\6\24\u0099\n\24\r\24\16\24\u009a\3\24\3\24\3\25\3\25")
buf.write("\3\25\3\25\3\25\5\25\u00a4\n\25\3\26\3\26\3\26\3\26\3")
buf.write("\26\3\26\7\26\u00ac\n\26\f\26\16\26\u00af\13\26\3\27\3")
buf.write("\27\3\27\3\27\3\27\3\27\3\27\5\27\u00b8\n\27\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u00c2\n\30\f\30\16")
buf.write("\30\u00c5\13\30\3\31\3\31\5\31\u00c9\n\31\3\32\5\32\u00cc")
buf.write("\n\32\3\32\3\32\3\32\3\32\6\32\u00d2\n\32\r\32\16\32\u00d3")
buf.write("\3\33\3\33\3\33\5\33\u00d9\n\33\3\34\3\34\3\35\3\35\3")
buf.write("\35\7\35\u00e0\n\35\f\35\16\35\u00e3\13\35\3\36\3\36\5")
buf.write("\36\u00e7\n\36\3\37\3\37\5\37\u00eb\n\37\3 \3 \3!\3!\3")
buf.write("!\3\u00e1\4*.\"\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36")
buf.write(" \"$&(*,.\60\62\64\668:<>@\2\2\2\u00f0\2F\3\2\2\2\4M\3")
buf.write("\2\2\2\6O\3\2\2\2\bX\3\2\2\2\nZ\3\2\2\2\f]\3\2\2\2\16")
buf.write("`\3\2\2\2\20e\3\2\2\2\22n\3\2\2\2\24r\3\2\2\2\26t\3\2")
buf.write("\2\2\30}\3\2\2\2\32\177\3\2\2\2\34\u0089\3\2\2\2\36\u008b")
buf.write("\3\2\2\2 \u008d\3\2\2\2\"\u0091\3\2\2\2$\u0093\3\2\2\2")
buf.write("&\u0095\3\2\2\2(\u00a3\3\2\2\2*\u00a5\3\2\2\2,\u00b7\3")
buf.write("\2\2\2.\u00b9\3\2\2\2\60\u00c8\3\2\2\2\62\u00cb\3\2\2")
buf.write("\2\64\u00d8\3\2\2\2\66\u00da\3\2\2\28\u00dc\3\2\2\2:\u00e6")
buf.write("\3\2\2\2<\u00ea\3\2\2\2>\u00ec\3\2\2\2@\u00ee\3\2\2\2")
buf.write("BE\7\r\2\2CE\5\4\3\2DB\3\2\2\2DC\3\2\2\2EH\3\2\2\2FD\3")
buf.write("\2\2\2FG\3\2\2\2GI\3\2\2\2HF\3\2\2\2IJ\7\2\2\3J\3\3\2")
buf.write("\2\2KN\5\6\4\2LN\5(\25\2MK\3\2\2\2ML\3\2\2\2N\5\3\2\2")
buf.write("\2OP\5\b\5\2PQ\7\3\2\2QR\5\"\22\2R\7\3\2\2\2SY\5\n\6\2")
buf.write("TY\5\f\7\2UY\5\16\b\2VY\5\20\t\2WY\5\22\n\2XS\3\2\2\2")
buf.write("XT\3\2\2\2XU\3\2\2\2XV\3\2\2\2XW\3\2\2\2Y\t\3\2\2\2Z[")
buf.write("\5\24\13\2[\\\7\b\2\2\\\13\3\2\2\2]^\7\n\2\2^_\5\30\r")
buf.write("\2_\r\3\2\2\2`a\5\24\13\2ab\7\n\2\2bc\5\24\13\2c\17\3")
buf.write("\2\2\2df\5\24\13\2ed\3\2\2\2ef\3\2\2\2fj\3\2\2\2gh\7\b")
buf.write("\2\2hi\7\4\2\2ik\5\24\13\2jg\3\2\2\2kl\3\2\2\2lj\3\2\2")
buf.write("\2lm\3\2\2\2m\21\3\2\2\2no\7\b\2\2o\23\3\2\2\2ps\5\30")
buf.write("\r\2qs\5\26\f\2rp\3\2\2\2rq\3\2\2\2s\25\3\2\2\2tw\5\30")
buf.write("\r\2uv\7\5\2\2vx\5\30\r\2wu\3\2\2\2xy\3\2\2\2yw\3\2\2")
buf.write("\2yz\3\2\2\2z\27\3\2\2\2{~\5\32\16\2|~\5\34\17\2}{\3\2")
buf.write("\2\2}|\3\2\2\2~\31\3\2\2\2\177\u0080\7\6\2\2\u0080\u0083")
buf.write("\5\36\20\2\u0081\u0082\7\4\2\2\u0082\u0084\5 \21\2\u0083")
buf.write("\u0081\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0085\3\2\2\2")
buf.write("\u0085\u0086\7\7\2\2\u0086\33\3\2\2\2\u0087\u008a\7\b")
buf.write("\2\2\u0088\u008a\5<\37\2\u0089\u0087\3\2\2\2\u0089\u0088")
buf.write("\3\2\2\2\u008a\35\3\2\2\2\u008b\u008c\7\b\2\2\u008c\37")
buf.write("\3\2\2\2\u008d\u008e\7\b\2\2\u008e!\3\2\2\2\u008f\u0092")
buf.write("\5$\23\2\u0090\u0092\5&\24\2\u0091\u008f\3\2\2\2\u0091")
buf.write("\u0090\3\2\2\2\u0092#\3\2\2\2\u0093\u0094\5(\25\2\u0094")
buf.write("%\3\2\2\2\u0095\u0098\7\17\2\2\u0096\u0099\7\r\2\2\u0097")
buf.write("\u0099\5\4\3\2\u0098\u0096\3\2\2\2\u0098\u0097\3\2\2\2")
buf.write("\u0099\u009a\3\2\2\2\u009a\u0098\3\2\2\2\u009a\u009b\3")
buf.write("\2\2\2\u009b\u009c\3\2\2\2\u009c\u009d\7\20\2\2\u009d")
buf.write("\'\3\2\2\2\u009e\u00a4\5*\26\2\u009f\u00a4\5,\27\2\u00a0")
buf.write("\u00a4\5.\30\2\u00a1\u00a4\5\62\32\2\u00a2\u00a4\58\35")
buf.write("\2\u00a3\u009e\3\2\2\2\u00a3\u009f\3\2\2\2\u00a3\u00a0")
buf.write("\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a3\u00a2\3\2\2\2\u00a4")
buf.write(")\3\2\2\2\u00a5\u00a6\b\26\1\2\u00a6\u00a7\58\35\2\u00a7")
buf.write("\u00a8\7\b\2\2\u00a8\u00ad\3\2\2\2\u00a9\u00aa\f\4\2\2")
buf.write("\u00aa\u00ac\7\b\2\2\u00ab\u00a9\3\2\2\2\u00ac\u00af\3")
buf.write("\2\2\2\u00ad\u00ab\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae+")
buf.write("\3\2\2\2\u00af\u00ad\3\2\2\2\u00b0\u00b1\7\n\2\2\u00b1")
buf.write("\u00b8\5:\36\2\u00b2\u00b3\7\n\2\2\u00b3\u00b4\7\6\2\2")
buf.write("\u00b4\u00b5\5(\25\2\u00b5\u00b6\7\7\2\2\u00b6\u00b8\3")
buf.write("\2\2\2\u00b7\u00b0\3\2\2\2\u00b7\u00b2\3\2\2\2\u00b8-")
buf.write("\3\2\2\2\u00b9\u00ba\b\30\1\2\u00ba\u00bb\5\60\31\2\u00bb")
buf.write("\u00bc\7\n\2\2\u00bc\u00bd\5\60\31\2\u00bd\u00c3\3\2\2")
buf.write("\2\u00be\u00bf\f\4\2\2\u00bf\u00c0\7\n\2\2\u00c0\u00c2")
buf.write("\5\60\31\2\u00c1\u00be\3\2\2\2\u00c2\u00c5\3\2\2\2\u00c3")
buf.write("\u00c1\3\2\2\2\u00c3\u00c4\3\2\2\2\u00c4/\3\2\2\2\u00c5")
buf.write("\u00c3\3\2\2\2\u00c6\u00c9\5*\26\2\u00c7\u00c9\58\35\2")
buf.write("\u00c8\u00c6\3\2\2\2\u00c8\u00c7\3\2\2\2\u00c9\61\3\2")
buf.write("\2\2\u00ca\u00cc\5\64\33\2\u00cb\u00ca\3\2\2\2\u00cb\u00cc")
buf.write("\3\2\2\2\u00cc\u00d1\3\2\2\2\u00cd\u00ce\5\66\34\2\u00ce")
buf.write("\u00cf\7\4\2\2\u00cf\u00d0\5\64\33\2\u00d0\u00d2\3\2\2")
buf.write("\2\u00d1\u00cd\3\2\2\2\u00d2\u00d3\3\2\2\2\u00d3\u00d1")
buf.write("\3\2\2\2\u00d3\u00d4\3\2\2\2\u00d4\63\3\2\2\2\u00d5\u00d9")
buf.write("\5*\26\2\u00d6\u00d9\5.\30\2\u00d7\u00d9\58\35\2\u00d8")
buf.write("\u00d5\3\2\2\2\u00d8\u00d6\3\2\2\2\u00d8\u00d7\3\2\2\2")
buf.write("\u00d9\65\3\2\2\2\u00da\u00db\7\b\2\2\u00db\67\3\2\2\2")
buf.write("\u00dc\u00e1\5:\36\2\u00dd\u00de\7\5\2\2\u00de\u00e0\5")
buf.write(":\36\2\u00df\u00dd\3\2\2\2\u00e0\u00e3\3\2\2\2\u00e1\u00e2")
buf.write("\3\2\2\2\u00e1\u00df\3\2\2\2\u00e29\3\2\2\2\u00e3\u00e1")
buf.write("\3\2\2\2\u00e4\u00e7\7\b\2\2\u00e5\u00e7\5<\37\2\u00e6")
buf.write("\u00e4\3\2\2\2\u00e6\u00e5\3\2\2\2\u00e7;\3\2\2\2\u00e8")
buf.write("\u00eb\5> \2\u00e9\u00eb\5@!\2\u00ea\u00e8\3\2\2\2\u00ea")
buf.write("\u00e9\3\2\2\2\u00eb=\3\2\2\2\u00ec\u00ed\7\t\2\2\u00ed")
buf.write("?\3\2\2\2\u00ee\u00ef\7\13\2\2\u00efA\3\2\2\2\33DFMXe")
buf.write("lry}\u0083\u0089\u0091\u0098\u009a\u00a3\u00ad\u00b7\u00c3")
buf.write("\u00c8\u00cb\u00d3\u00d8\u00e1\u00e6\u00ea")
return buf.getvalue()
class TaleParser ( Parser ):
grammarFileName = "Tale.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'='", "':'", "','", "'('", "')'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "IDENTIFIER", "NUMBER",
"OPERATOR", "STRING", "WS", "NEWLINE", "SKIP_", "INDENT",
"DEDENT" ]
RULE_program = 0
RULE_statement = 1
RULE_assignment = 2
RULE_form = 3
RULE_unaryForm = 4
RULE_prefixOperatorForm = 5
RULE_binaryForm = 6
RULE_keywordForm = 7
RULE_primitiveForm = 8
RULE_parameter = 9
RULE_tupleParameter = 10
RULE_singleParameter = 11
RULE_simpleParameter = 12
RULE_patternMatchingParameter = 13
RULE_parameterName = 14
RULE_parameterType = 15
RULE_assignmentBody = 16
RULE_simpleAssignmentBody = 17
RULE_compoundAssignmentBody = 18
RULE_expression = 19
RULE_unary = 20
RULE_prefixOperator = 21
RULE_binary = 22
RULE_binaryOperand = 23
RULE_keyword = 24
RULE_keywordArgument = 25
RULE_keywordName = 26
RULE_primitive = 27
RULE_primitiveItem = 28
RULE_literal = 29
RULE_intLiteral = 30
RULE_stringLiteral = 31
ruleNames = [ "program", "statement", "assignment", "form", "unaryForm",
"prefixOperatorForm", "binaryForm", "keywordForm", "primitiveForm",
"parameter", "tupleParameter", "singleParameter", "simpleParameter",
"patternMatchingParameter", "parameterName", "parameterType",
"assignmentBody", "simpleAssignmentBody", "compoundAssignmentBody",
"expression", "unary", "prefixOperator", "binary", "binaryOperand",
"keyword", "keywordArgument", "keywordName", "primitive",
"primitiveItem", "literal", "intLiteral", "stringLiteral" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
IDENTIFIER=6
NUMBER=7
OPERATOR=8
STRING=9
WS=10
NEWLINE=11
SKIP_=12
INDENT=13
DEDENT=14
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgramContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(TaleParser.EOF, 0)
def NEWLINE(self, i:int=None):
if i is None:
return self.getTokens(TaleParser.NEWLINE)
else:
return self.getToken(TaleParser.NEWLINE, i)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.StatementContext)
else:
return self.getTypedRuleContext(TaleParser.StatementContext,i)
def getRuleIndex(self):
return TaleParser.RULE_program
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProgram" ):
listener.enterProgram(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProgram" ):
listener.exitProgram(self)
def program(self):
localctx = TaleParser.ProgramContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_program)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 68
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << TaleParser.T__3) | (1 << TaleParser.IDENTIFIER) | (1 << TaleParser.NUMBER) | (1 << TaleParser.OPERATOR) | (1 << TaleParser.STRING) | (1 << TaleParser.NEWLINE))) != 0):
self.state = 66
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [TaleParser.NEWLINE]:
self.state = 64
self.match(TaleParser.NEWLINE)
pass
elif token in [TaleParser.T__3, TaleParser.IDENTIFIER, TaleParser.NUMBER, TaleParser.OPERATOR, TaleParser.STRING]:
self.state = 65
self.statement()
pass
else:
raise NoViableAltException(self)
self.state = 70
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 71
self.match(TaleParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignment(self):
return self.getTypedRuleContext(TaleParser.AssignmentContext,0)
def expression(self):
return self.getTypedRuleContext(TaleParser.ExpressionContext,0)
def getRuleIndex(self):
return TaleParser.RULE_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatement" ):
listener.enterStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatement" ):
listener.exitStatement(self)
def statement(self):
localctx = TaleParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_statement)
try:
self.state = 75
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 73
self.assignment()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 74
self.expression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def form(self):
return self.getTypedRuleContext(TaleParser.FormContext,0)
def assignmentBody(self):
return self.getTypedRuleContext(TaleParser.AssignmentBodyContext,0)
def getRuleIndex(self):
return TaleParser.RULE_assignment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignment" ):
listener.enterAssignment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignment" ):
listener.exitAssignment(self)
def assignment(self):
localctx = TaleParser.AssignmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_assignment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 77
self.form()
self.state = 78
self.match(TaleParser.T__0)
self.state = 79
self.assignmentBody()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FormContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def unaryForm(self):
return self.getTypedRuleContext(TaleParser.UnaryFormContext,0)
def prefixOperatorForm(self):
return self.getTypedRuleContext(TaleParser.PrefixOperatorFormContext,0)
def binaryForm(self):
return self.getTypedRuleContext(TaleParser.BinaryFormContext,0)
def keywordForm(self):
return self.getTypedRuleContext(TaleParser.KeywordFormContext,0)
def primitiveForm(self):
return self.getTypedRuleContext(TaleParser.PrimitiveFormContext,0)
def getRuleIndex(self):
return TaleParser.RULE_form
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterForm" ):
listener.enterForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitForm" ):
listener.exitForm(self)
def form(self):
localctx = TaleParser.FormContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_form)
try:
self.state = 86
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 81
self.unaryForm()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 82
self.prefixOperatorForm()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 83
self.binaryForm()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 84
self.keywordForm()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 85
self.primitiveForm()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnaryFormContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameter(self):
return self.getTypedRuleContext(TaleParser.ParameterContext,0)
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def getRuleIndex(self):
return TaleParser.RULE_unaryForm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnaryForm" ):
listener.enterUnaryForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnaryForm" ):
listener.exitUnaryForm(self)
def unaryForm(self):
localctx = TaleParser.UnaryFormContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_unaryForm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 88
self.parameter()
self.state = 89
self.match(TaleParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrefixOperatorFormContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPERATOR(self):
return self.getToken(TaleParser.OPERATOR, 0)
def singleParameter(self):
return self.getTypedRuleContext(TaleParser.SingleParameterContext,0)
def getRuleIndex(self):
return TaleParser.RULE_prefixOperatorForm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrefixOperatorForm" ):
listener.enterPrefixOperatorForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrefixOperatorForm" ):
listener.exitPrefixOperatorForm(self)
def prefixOperatorForm(self):
localctx = TaleParser.PrefixOperatorFormContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_prefixOperatorForm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 91
self.match(TaleParser.OPERATOR)
self.state = 92
self.singleParameter()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BinaryFormContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameter(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.ParameterContext)
else:
return self.getTypedRuleContext(TaleParser.ParameterContext,i)
def OPERATOR(self):
return self.getToken(TaleParser.OPERATOR, 0)
def getRuleIndex(self):
return TaleParser.RULE_binaryForm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBinaryForm" ):
listener.enterBinaryForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBinaryForm" ):
listener.exitBinaryForm(self)
def binaryForm(self):
localctx = TaleParser.BinaryFormContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_binaryForm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 94
self.parameter()
self.state = 95
self.match(TaleParser.OPERATOR)
self.state = 96
self.parameter()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class KeywordFormContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameter(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.ParameterContext)
else:
return self.getTypedRuleContext(TaleParser.ParameterContext,i)
def IDENTIFIER(self, i:int=None):
if i is None:
return self.getTokens(TaleParser.IDENTIFIER)
else:
return self.getToken(TaleParser.IDENTIFIER, i)
def getRuleIndex(self):
return TaleParser.RULE_keywordForm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterKeywordForm" ):
listener.enterKeywordForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitKeywordForm" ):
listener.exitKeywordForm(self)
def keywordForm(self):
localctx = TaleParser.KeywordFormContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_keywordForm)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 99
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.state = 98
self.parameter()
self.state = 104
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 101
self.match(TaleParser.IDENTIFIER)
self.state = 102
self.match(TaleParser.T__1)
self.state = 103
self.parameter()
self.state = 106
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==TaleParser.IDENTIFIER):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrimitiveFormContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def getRuleIndex(self):
return TaleParser.RULE_primitiveForm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrimitiveForm" ):
listener.enterPrimitiveForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrimitiveForm" ):
listener.exitPrimitiveForm(self)
def primitiveForm(self):
localctx = TaleParser.PrimitiveFormContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_primitiveForm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.match(TaleParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParameterContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def singleParameter(self):
return self.getTypedRuleContext(TaleParser.SingleParameterContext,0)
def tupleParameter(self):
return self.getTypedRuleContext(TaleParser.TupleParameterContext,0)
def getRuleIndex(self):
return TaleParser.RULE_parameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameter" ):
listener.enterParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameter" ):
listener.exitParameter(self)
def parameter(self):
localctx = TaleParser.ParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_parameter)
try:
self.state = 112
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,6,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 110
self.singleParameter()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 111
self.tupleParameter()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TupleParameterContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def singleParameter(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.SingleParameterContext)
else:
return self.getTypedRuleContext(TaleParser.SingleParameterContext,i)
def getRuleIndex(self):
return TaleParser.RULE_tupleParameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTupleParameter" ):
listener.enterTupleParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTupleParameter" ):
listener.exitTupleParameter(self)
def tupleParameter(self):
localctx = TaleParser.TupleParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_tupleParameter)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 114
self.singleParameter()
self.state = 117
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 115
self.match(TaleParser.T__2)
self.state = 116
self.singleParameter()
self.state = 119
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==TaleParser.T__2):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SingleParameterContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def simpleParameter(self):
return self.getTypedRuleContext(TaleParser.SimpleParameterContext,0)
def patternMatchingParameter(self):
return self.getTypedRuleContext(TaleParser.PatternMatchingParameterContext,0)
def getRuleIndex(self):
return TaleParser.RULE_singleParameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSingleParameter" ):
listener.enterSingleParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSingleParameter" ):
listener.exitSingleParameter(self)
def singleParameter(self):
localctx = TaleParser.SingleParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_singleParameter)
try:
self.state = 123
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [TaleParser.T__3]:
self.enterOuterAlt(localctx, 1)
self.state = 121
self.simpleParameter()
pass
elif token in [TaleParser.IDENTIFIER, TaleParser.NUMBER, TaleParser.STRING]:
self.enterOuterAlt(localctx, 2)
self.state = 122
self.patternMatchingParameter()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SimpleParameterContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def parameterName(self):
return self.getTypedRuleContext(TaleParser.ParameterNameContext,0)
def parameterType(self):
return self.getTypedRuleContext(TaleParser.ParameterTypeContext,0)
def getRuleIndex(self):
return TaleParser.RULE_simpleParameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSimpleParameter" ):
listener.enterSimpleParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSimpleParameter" ):
listener.exitSimpleParameter(self)
def simpleParameter(self):
localctx = TaleParser.SimpleParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_simpleParameter)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 125
self.match(TaleParser.T__3)
self.state = 126
self.parameterName()
self.state = 129
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==TaleParser.T__1:
self.state = 127
self.match(TaleParser.T__1)
self.state = 128
self.parameterType()
self.state = 131
self.match(TaleParser.T__4)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PatternMatchingParameterContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def literal(self):
return self.getTypedRuleContext(TaleParser.LiteralContext,0)
def getRuleIndex(self):
return TaleParser.RULE_patternMatchingParameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternMatchingParameter" ):
listener.enterPatternMatchingParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternMatchingParameter" ):
listener.exitPatternMatchingParameter(self)
def patternMatchingParameter(self):
localctx = TaleParser.PatternMatchingParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_patternMatchingParameter)
try:
self.state = 135
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [TaleParser.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 133
self.match(TaleParser.IDENTIFIER)
pass
elif token in [TaleParser.NUMBER, TaleParser.STRING]:
self.enterOuterAlt(localctx, 2)
self.state = 134
self.literal()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParameterNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def getRuleIndex(self):
return TaleParser.RULE_parameterName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterName" ):
listener.enterParameterName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterName" ):
listener.exitParameterName(self)
def parameterName(self):
localctx = TaleParser.ParameterNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_parameterName)
try:
self.enterOuterAlt(localctx, 1)
self.state = 137
self.match(TaleParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParameterTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def getRuleIndex(self):
return TaleParser.RULE_parameterType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterType" ):
listener.enterParameterType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterType" ):
listener.exitParameterType(self)
def parameterType(self):
localctx = TaleParser.ParameterTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_parameterType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 139
self.match(TaleParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentBodyContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def simpleAssignmentBody(self):
return self.getTypedRuleContext(TaleParser.SimpleAssignmentBodyContext,0)
def compoundAssignmentBody(self):
return self.getTypedRuleContext(TaleParser.CompoundAssignmentBodyContext,0)
def getRuleIndex(self):
return TaleParser.RULE_assignmentBody
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignmentBody" ):
listener.enterAssignmentBody(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignmentBody" ):
listener.exitAssignmentBody(self)
def assignmentBody(self):
localctx = TaleParser.AssignmentBodyContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_assignmentBody)
try:
self.state = 143
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [TaleParser.IDENTIFIER, TaleParser.NUMBER, TaleParser.OPERATOR, TaleParser.STRING]:
self.enterOuterAlt(localctx, 1)
self.state = 141
self.simpleAssignmentBody()
pass
elif token in [TaleParser.INDENT]:
self.enterOuterAlt(localctx, 2)
self.state = 142
self.compoundAssignmentBody()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SimpleAssignmentBodyContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(TaleParser.ExpressionContext,0)
def getRuleIndex(self):
return TaleParser.RULE_simpleAssignmentBody
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSimpleAssignmentBody" ):
listener.enterSimpleAssignmentBody(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSimpleAssignmentBody" ):
listener.exitSimpleAssignmentBody(self)
def simpleAssignmentBody(self):
localctx = TaleParser.SimpleAssignmentBodyContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_simpleAssignmentBody)
try:
self.enterOuterAlt(localctx, 1)
self.state = 145
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CompoundAssignmentBodyContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INDENT(self):
return self.getToken(TaleParser.INDENT, 0)
def DEDENT(self):
return self.getToken(TaleParser.DEDENT, 0)
def NEWLINE(self, i:int=None):
if i is None:
return self.getTokens(TaleParser.NEWLINE)
else:
return self.getToken(TaleParser.NEWLINE, i)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.StatementContext)
else:
return self.getTypedRuleContext(TaleParser.StatementContext,i)
def getRuleIndex(self):
return TaleParser.RULE_compoundAssignmentBody
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCompoundAssignmentBody" ):
listener.enterCompoundAssignmentBody(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCompoundAssignmentBody" ):
listener.exitCompoundAssignmentBody(self)
def compoundAssignmentBody(self):
localctx = TaleParser.CompoundAssignmentBodyContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_compoundAssignmentBody)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 147
self.match(TaleParser.INDENT)
self.state = 150
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 150
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [TaleParser.NEWLINE]:
self.state = 148
self.match(TaleParser.NEWLINE)
pass
elif token in [TaleParser.T__3, TaleParser.IDENTIFIER, TaleParser.NUMBER, TaleParser.OPERATOR, TaleParser.STRING]:
self.state = 149
self.statement()
pass
else:
raise NoViableAltException(self)
self.state = 152
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << TaleParser.T__3) | (1 << TaleParser.IDENTIFIER) | (1 << TaleParser.NUMBER) | (1 << TaleParser.OPERATOR) | (1 << TaleParser.STRING) | (1 << TaleParser.NEWLINE))) != 0)):
break
self.state = 154
self.match(TaleParser.DEDENT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def unary(self):
return self.getTypedRuleContext(TaleParser.UnaryContext,0)
def prefixOperator(self):
return self.getTypedRuleContext(TaleParser.PrefixOperatorContext,0)
def binary(self):
return self.getTypedRuleContext(TaleParser.BinaryContext,0)
def keyword(self):
return self.getTypedRuleContext(TaleParser.KeywordContext,0)
def primitive(self):
return self.getTypedRuleContext(TaleParser.PrimitiveContext,0)
def getRuleIndex(self):
return TaleParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def expression(self):
localctx = TaleParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_expression)
try:
self.state = 161
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,14,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 156
self.unary(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 157
self.prefixOperator()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 158
self.binary(0)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 159
self.keyword()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 160
self.primitive()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnaryContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def primitive(self):
return self.getTypedRuleContext(TaleParser.PrimitiveContext,0)
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def unary(self):
return self.getTypedRuleContext(TaleParser.UnaryContext,0)
def getRuleIndex(self):
return TaleParser.RULE_unary
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnary" ):
listener.enterUnary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnary" ):
listener.exitUnary(self)
def unary(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = TaleParser.UnaryContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 40
self.enterRecursionRule(localctx, 40, self.RULE_unary, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 164
self.primitive()
self.state = 165
self.match(TaleParser.IDENTIFIER)
self._ctx.stop = self._input.LT(-1)
self.state = 171
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = TaleParser.UnaryContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_unary)
self.state = 167
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 168
self.match(TaleParser.IDENTIFIER)
self.state = 173
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PrefixOperatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPERATOR(self):
return self.getToken(TaleParser.OPERATOR, 0)
def primitiveItem(self):
return self.getTypedRuleContext(TaleParser.PrimitiveItemContext,0)
def expression(self):
return self.getTypedRuleContext(TaleParser.ExpressionContext,0)
def getRuleIndex(self):
return TaleParser.RULE_prefixOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrefixOperator" ):
listener.enterPrefixOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrefixOperator" ):
listener.exitPrefixOperator(self)
def prefixOperator(self):
localctx = TaleParser.PrefixOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_prefixOperator)
try:
self.state = 181
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 174
self.match(TaleParser.OPERATOR)
self.state = 175
self.primitiveItem()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 176
self.match(TaleParser.OPERATOR)
self.state = 177
self.match(TaleParser.T__3)
self.state = 178
self.expression()
self.state = 179
self.match(TaleParser.T__4)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BinaryContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def binaryOperand(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.BinaryOperandContext)
else:
return self.getTypedRuleContext(TaleParser.BinaryOperandContext,i)
def OPERATOR(self):
return self.getToken(TaleParser.OPERATOR, 0)
def binary(self):
return self.getTypedRuleContext(TaleParser.BinaryContext,0)
def getRuleIndex(self):
return TaleParser.RULE_binary
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBinary" ):
listener.enterBinary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBinary" ):
listener.exitBinary(self)
def binary(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = TaleParser.BinaryContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 44
self.enterRecursionRule(localctx, 44, self.RULE_binary, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 184
self.binaryOperand()
self.state = 185
self.match(TaleParser.OPERATOR)
self.state = 186
self.binaryOperand()
self._ctx.stop = self._input.LT(-1)
self.state = 193
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = TaleParser.BinaryContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_binary)
self.state = 188
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 189
self.match(TaleParser.OPERATOR)
self.state = 190
self.binaryOperand()
self.state = 195
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class BinaryOperandContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def unary(self):
return self.getTypedRuleContext(TaleParser.UnaryContext,0)
def primitive(self):
return self.getTypedRuleContext(TaleParser.PrimitiveContext,0)
def getRuleIndex(self):
return TaleParser.RULE_binaryOperand
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBinaryOperand" ):
listener.enterBinaryOperand(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBinaryOperand" ):
listener.exitBinaryOperand(self)
def binaryOperand(self):
localctx = TaleParser.BinaryOperandContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_binaryOperand)
try:
self.state = 198
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,18,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 196
self.unary(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 197
self.primitive()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class KeywordContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def keywordArgument(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.KeywordArgumentContext)
else:
return self.getTypedRuleContext(TaleParser.KeywordArgumentContext,i)
def keywordName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.KeywordNameContext)
else:
return self.getTypedRuleContext(TaleParser.KeywordNameContext,i)
def getRuleIndex(self):
return TaleParser.RULE_keyword
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterKeyword" ):
listener.enterKeyword(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitKeyword" ):
listener.exitKeyword(self)
def keyword(self):
localctx = TaleParser.KeywordContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_keyword)
try:
self.enterOuterAlt(localctx, 1)
self.state = 201
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,19,self._ctx)
if la_ == 1:
self.state = 200
self.keywordArgument()
self.state = 207
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 203
self.keywordName()
self.state = 204
self.match(TaleParser.T__1)
self.state = 205
self.keywordArgument()
else:
raise NoViableAltException(self)
self.state = 209
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,20,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class KeywordArgumentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def unary(self):
return self.getTypedRuleContext(TaleParser.UnaryContext,0)
def binary(self):
return self.getTypedRuleContext(TaleParser.BinaryContext,0)
def primitive(self):
return self.getTypedRuleContext(TaleParser.PrimitiveContext,0)
def getRuleIndex(self):
return TaleParser.RULE_keywordArgument
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterKeywordArgument" ):
listener.enterKeywordArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitKeywordArgument" ):
listener.exitKeywordArgument(self)
def keywordArgument(self):
localctx = TaleParser.KeywordArgumentContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_keywordArgument)
try:
self.state = 214
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,21,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 211
self.unary(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 212
self.binary(0)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 213
self.primitive()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class KeywordNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def getRuleIndex(self):
return TaleParser.RULE_keywordName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterKeywordName" ):
listener.enterKeywordName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitKeywordName" ):
listener.exitKeywordName(self)
def keywordName(self):
localctx = TaleParser.KeywordNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_keywordName)
try:
self.enterOuterAlt(localctx, 1)
self.state = 216
self.match(TaleParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrimitiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def primitiveItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(TaleParser.PrimitiveItemContext)
else:
return self.getTypedRuleContext(TaleParser.PrimitiveItemContext,i)
def getRuleIndex(self):
return TaleParser.RULE_primitive
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrimitive" ):
listener.enterPrimitive(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrimitive" ):
listener.exitPrimitive(self)
def primitive(self):
localctx = TaleParser.PrimitiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_primitive)
try:
self.enterOuterAlt(localctx, 1)
self.state = 218
self.primitiveItem()
self.state = 223
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,22,self._ctx)
while _alt!=1 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1+1:
self.state = 219
self.match(TaleParser.T__2)
self.state = 220
self.primitiveItem()
self.state = 225
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,22,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrimitiveItemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(TaleParser.IDENTIFIER, 0)
def literal(self):
return self.getTypedRuleContext(TaleParser.LiteralContext,0)
def getRuleIndex(self):
return TaleParser.RULE_primitiveItem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrimitiveItem" ):
listener.enterPrimitiveItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrimitiveItem" ):
listener.exitPrimitiveItem(self)
def primitiveItem(self):
localctx = TaleParser.PrimitiveItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_primitiveItem)
try:
self.state = 228
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [TaleParser.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 226
self.match(TaleParser.IDENTIFIER)
pass
elif token in [TaleParser.NUMBER, TaleParser.STRING]:
self.enterOuterAlt(localctx, 2)
self.state = 227
self.literal()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def intLiteral(self):
return self.getTypedRuleContext(TaleParser.IntLiteralContext,0)
def stringLiteral(self):
return self.getTypedRuleContext(TaleParser.StringLiteralContext,0)
def getRuleIndex(self):
return TaleParser.RULE_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLiteral" ):
listener.enterLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLiteral" ):
listener.exitLiteral(self)
def literal(self):
localctx = TaleParser.LiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_literal)
try:
self.state = 232
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [TaleParser.NUMBER]:
self.enterOuterAlt(localctx, 1)
self.state = 230
self.intLiteral()
pass
elif token in [TaleParser.STRING]:
self.enterOuterAlt(localctx, 2)
self.state = 231
self.stringLiteral()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntLiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NUMBER(self):
return self.getToken(TaleParser.NUMBER, 0)
def getRuleIndex(self):
return TaleParser.RULE_intLiteral
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntLiteral" ):
listener.enterIntLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntLiteral" ):
listener.exitIntLiteral(self)
def intLiteral(self):
localctx = TaleParser.IntLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_intLiteral)
try:
self.enterOuterAlt(localctx, 1)
self.state = 234
self.match(TaleParser.NUMBER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StringLiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def STRING(self):
return self.getToken(TaleParser.STRING, 0)
def getRuleIndex(self):
return TaleParser.RULE_stringLiteral
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringLiteral" ):
listener.enterStringLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringLiteral" ):
listener.exitStringLiteral(self)
def stringLiteral(self):
localctx = TaleParser.StringLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_stringLiteral)
try:
self.enterOuterAlt(localctx, 1)
self.state = 236
self.match(TaleParser.STRING)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[20] = self.unary_sempred
self._predicates[22] = self.binary_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def unary_sempred(self, localctx:UnaryContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 2)
def binary_sempred(self, localctx:BinaryContext, predIndex:int):
if predIndex == 1:
return self.precpred(self._ctx, 2)
| 34.317362
| 239
| 0.590868
| 64,548
| 0.892274
| 0
| 0
| 0
| 0
| 0
| 0
| 7,549
| 0.104353
|
e1b490b033e953f1585ccd81fdcb489a598e5706
| 353
|
py
|
Python
|
004.py
|
gabrieleliasdev/python-cev
|
45390963b5112a982e673f6a6866da422bf9ae6d
|
[
"MIT"
] | null | null | null |
004.py
|
gabrieleliasdev/python-cev
|
45390963b5112a982e673f6a6866da422bf9ae6d
|
[
"MIT"
] | null | null | null |
004.py
|
gabrieleliasdev/python-cev
|
45390963b5112a982e673f6a6866da422bf9ae6d
|
[
"MIT"
] | null | null | null |
print('Olá, Mundo!')
print(7+4)
print('7'+'4')
print('Olá', 5)
# Toda variável é um objeto
# Um objeto é mais do que uma variável
nome = 'Gabriel'
idade = 30
peso = 79
print(nome,idade,peso)
nome = input('>>> Nome ')
idade = input('>>> Idade ')
peso = input('>>> Peso ')
print(nome,idade,peso)
print(f'Nome:{nome} ,Idade:{idade} ,Peso:{peso}')
| 14.12
| 49
| 0.620397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.501393
|
e1b5d39efe358fd9f5a0abeb927321f0eef6f285
| 680
|
py
|
Python
|
examples/create_mac_table_entry.py
|
open-switch/opx-docs
|
f448f3f3dc0de38822bbf16c1e173eb108925a40
|
[
"CC-BY-4.0"
] | 122
|
2017-02-10T01:47:04.000Z
|
2022-03-23T20:11:11.000Z
|
examples/create_mac_table_entry.py
|
open-switch/opx-docs
|
f448f3f3dc0de38822bbf16c1e173eb108925a40
|
[
"CC-BY-4.0"
] | 37
|
2017-03-01T07:07:22.000Z
|
2021-11-11T16:47:42.000Z
|
examples/create_mac_table_entry.py
|
open-switch/opx-docs
|
f448f3f3dc0de38822bbf16c1e173eb108925a40
|
[
"CC-BY-4.0"
] | 39
|
2017-01-18T16:22:58.000Z
|
2020-11-18T13:23:43.000Z
|
#Python code block to configure MAC address table entry
import cps_utils
#Register the attribute type
cps_utils.add_attr_type('base-mac/table/mac-address', 'mac')
#Define the MAC address, interface index and VLAN attributes
d = {'mac-address': '00:0a:0b:cc:0d:0e', 'ifindex': 18, 'vlan': '100'}
#Create a CPS object
obj = cps_utils.CPSObject('base-mac/table', data=d)
#Associate the operation to the CPS object
tr_obj = ('create', obj.get())
#Create a transaction object
transaction = cps_utils.CPSTransaction([tr_obj])
#Check for failure
ret = transaction.commit()
if not ret:
raise RuntimeError('Error creating MAC Table Entry')
print 'Successfully created'
| 27.2
| 70
| 0.738235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 422
| 0.620588
|
e1b62639aea1ec0a6c6d66e1d90f5f610c374034
| 4,397
|
py
|
Python
|
win/GUI/Configuration.py
|
WeAreAVP/mdqc
|
3130a713c70017bc54d8e5da6bb5766ba9d97423
|
[
"Apache-2.0"
] | 8
|
2019-01-18T08:58:02.000Z
|
2021-05-20T16:51:14.000Z
|
osx/GUI/Configuration.py
|
WeAreAVP/mdqc
|
3130a713c70017bc54d8e5da6bb5766ba9d97423
|
[
"Apache-2.0"
] | 7
|
2016-02-25T21:50:03.000Z
|
2017-12-13T14:27:29.000Z
|
osx/GUI/Configuration.py
|
WeAreAVP/mdqc
|
3130a713c70017bc54d8e5da6bb5766ba9d97423
|
[
"Apache-2.0"
] | 2
|
2020-02-13T16:00:07.000Z
|
2020-08-12T16:31:49.000Z
|
# -*- coding: UTF-8 -*-
'''
Created on May 14, 2014
@author: Furqan Wasi <furqan@avpreserve.com>
'''
import os, datetime, sys, platform, base64
class Configuration(object):
def __init__(self):
# Constructor
if os.name == 'posix':
self.OsType = 'linux'
elif os.name == 'nt':
self.OsType = 'Windows'
elif os.name == 'os2':
self.OsType = 'check'
self.application_name = 'Metadata Quality Control'
self.application_version = '0.4'
self.user_home_path = os.path.expanduser('~')
if self.OsType == 'Windows':
self.base_path = str(os.getcwd())+str(os.sep)
self.assets_path = r''+(os.path.join(self.base_path, 'assets'+str(os.sep)))
try:
self.avpreserve_img = os.path.join(sys._MEIPASS, 'assets' + (str(os.sep)) +'avpreserve.png')
except:
pass
else:
self.base_path = str(os.getcwd())+str(os.sep)
self.assets_path = r''+(os.path.join(self.base_path, 'assets'+str(os.sep)))
self.avpreserve_img = r''+(os.path.join(self.assets_path) + 'avpreserve.png')
self.logo_sign_small = 'logo_sign_small.png'
def getImagesPath(self):return str(self.assets_path)
def getAvpreserve_img(self):return self.avpreserve_img
def getBasePath(self):return str(self.base_path)
def getApplicationVersion(self):return str(self.application_version)
def getConfig_file_path(self):
return self.config_file_path
def EncodeInfo(self, string_to_be_encoded):
string_to_be_encoded = str(string_to_be_encoded).strip()
return base64.b16encode(base64.b16encode(string_to_be_encoded))
def getLogoSignSmall(self):
if self.getOsType() == 'Windows':
try:
return os.path.join(sys._MEIPASS, 'assets' + (str(os.sep)) + str(self.logo_sign_small))
except:
pass
else:
os.path.join(self.assets_path)
return os.path.join(self.assets_path, str(self.logo_sign_small))
def getOsType(self):return str(self.OsType)
def getApplicationName(self): return str(self.application_name)
def getUserHomePath(self): return str(os.path.expanduser('~'))
def getDebugFilePath(self):return str(self.log_file_path)
def getWindowsInformation(self):
"""
Gets Detail information of Windows
@return: tuple Windows Information
"""
WindowsInformation = {}
try:
major, minor, build, platformType, servicePack = sys.getwindowsversion()
WindowsInformation['major'] = major
WindowsInformation['minor'] = minor
WindowsInformation['build'] = build
WindowsInformation['platformType'] = platformType
WindowsInformation['servicePack'] = servicePack
windowDetailedName = platform.platform()
WindowsInformation['platform'] = windowDetailedName
windowDetailedName = str(windowDetailedName).split('-')
if windowDetailedName[0] is not None and (str(windowDetailedName[0]) == 'Windows' or str(windowDetailedName[0]) == 'windows'):
WindowsInformation['isWindows'] =True
else:
WindowsInformation['isWindows'] =False
if windowDetailedName[1] is not None and (str(windowDetailedName[1]) != ''):
WindowsInformation['WindowsType'] =str(windowDetailedName[1])
else:
WindowsInformation['WindowsType'] =None
WindowsInformation['ProcessorInfo'] = platform.processor()
try:
os.environ["PROGRAMFILES(X86)"]
bits = 64
except:
bits = 32
pass
WindowsInformation['bitType'] = "Win{0}".format(bits)
except:
pass
return WindowsInformation
def CleanStringForBreaks(self,StringToBeCleaned):
"""
@param StringToBeCleaned:
@return:
"""
CleanString = StringToBeCleaned.strip()
try:
CleanString = CleanString.replace('\r\n', '')
CleanString = CleanString.replace('\n', '')
CleanString = CleanString.replace('\r', '')
except:
pass
return CleanString
| 31.407143
| 139
| 0.596771
| 4,249
| 0.966341
| 0
| 0
| 0
| 0
| 0
| 0
| 669
| 0.152149
|
e1b62abc8e468748316b85f828dfc8de03775be8
| 17,306
|
py
|
Python
|
MainController.py
|
samuelvp360/Microbiological-Assay-Calculator
|
36317e266bf499f24f7e7d3f59328864a8723aa4
|
[
"FSFAP"
] | null | null | null |
MainController.py
|
samuelvp360/Microbiological-Assay-Calculator
|
36317e266bf499f24f7e7d3f59328864a8723aa4
|
[
"FSFAP"
] | null | null | null |
MainController.py
|
samuelvp360/Microbiological-Assay-Calculator
|
36317e266bf499f24f7e7d3f59328864a8723aa4
|
[
"FSFAP"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
from pathlib import Path
from datetime import datetime
from PyQt5 import QtCore as qtc
from PyQt5 import QtWidgets as qtw
from PyQt5 import uic
import numpy as np
from Models import AssaysModel, SamplesModel
from DB.AssaysDB import MyZODB
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar
from WellProcessor import WellProcessor
from Assay import Assay
matplotlib.use('Qt5Agg')
p = Path(__file__)
print(p)
isLink = os.path.islink(p)
if isLink:
theLink = os.readlink(p)
path = Path(theLink).resolve().parent
path = f'{path}/'
print('linked')
else:
path = ''
print('unlinked')
class PlotCanvas(FigureCanvasQTAgg):
"""
docstring
"""
def __init__(self, parent=None):
self.fig = Figure(
figsize=(12, 8), dpi=100, facecolor='#2d2a2e', tight_layout=True
)
self.ax = self.fig.add_subplot(111)
super().__init__(self.fig)
class MainWindow(qtw.QMainWindow):
def __init__(self, path):
super().__init__()
self.path = path
uic.loadUi(f'{path}Views/uiMainWindow.ui', self)
self.database = MyZODB(path)
self.canvas = PlotCanvas(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.uiToolbarLayout.addWidget(self.toolbar)
self.uiPlotLayout.addWidget(self.canvas)
self.assaysList = self.LoadAssays()
self.assaysToRemove = []
self.model = AssaysModel(self.assaysList)
self.uiAssaysTableView.setModel(self.model)
self.uiAssaysTableView.resizeColumnsToContents()
self.uiAssaysTableView.resizeRowsToContents()
self.selectedAssay = None
self.selectedSamples = None
# ---------------- SIGNALS ---------------
self.uiCommitButton.clicked.connect(self.StoreChanges)
self.uiAddAssayButton.clicked.connect(self.AddAssay)
self.uiDelAssayButton.clicked.connect(self.RemoveAssay)
self.uiAddSampleButton.clicked.connect(self.AddSample)
self.uiDelSampleButton.clicked.connect(self.RemoveSample)
self.uiDiscardButton.clicked.connect(self.DiscardChanges)
self.uiPlotButton.clicked.connect(lambda: self.Plot(ext=True))
self.uiAssaysTableView.selectionModel().selectionChanged.connect(self.SetSelectedAssay)
self.uiSamplesTableView.doubleClicked.connect(self.TriggerWell)
def Plot(self, ext=False):
if ext:
fig, ax = plt.subplots()
self.canvas.ax.clear()
assay = self.assaysList[self.selectedAssay]
samples = [assay.samples[i] for i in self.selectedSamples]
n = len(samples)
x = np.arange(len(assay.conc))
limit = 0.4
width = 2 * limit / n
if n == 1:
factor = np.zeros(1)
else:
factor = np.linspace(-limit + width / 2, limit - width / 2, n)
for i, sample in enumerate(samples):
mean = sample['Inhibition'].loc['Mean', ::-1]
std = sample['Inhibition'].loc['Std', ::-1]
if ext:
ax.bar(
x + factor[i], mean, width, label=sample['Name'],
yerr=std, capsize=15 / n, edgecolor='black'
)
ax.axhline(
100, color='black', linestyle='--', linewidth=0.8
)
self.canvas.ax.bar(
x + factor[i], mean, width, label=sample['Name'],
yerr=std, capsize=15 / n, edgecolor='black'
)
self.canvas.ax.axhline(
100, color='black', linestyle='--', linewidth=0.8
)
self.canvas.ax.set_title(assay.name, color='#ae81ff')
self.canvas.ax.set_xlabel(u'Concentration (\u00B5g/mL)', color='#f92672')
self.canvas.ax.set_ylabel('%Inhibition', color='#f92672')
self.canvas.ax.set_xticks(x)
self.canvas.ax.set_xticklabels(assay.conc[::-1])
self.canvas.ax.tick_params(axis='x', colors='#66d9ef')
self.canvas.ax.tick_params(axis='y', colors='#66d9ef')
self.canvas.ax.legend()
self.canvas.draw()
if ext:
ax.set_title(assay.name, color='#ae81ff')
ax.set_xlabel(u'Concentrations (\u00B5g/mL)', color='#f92672')
ax.set_ylabel('%Inhibition', color='#f92672')
ax.set_xticks(x)
ax.set_xticklabels(assay.conc)
ax.tick_params(axis='x', colors='#66d9ef')
ax.tick_params(axis='y', colors='#66d9ef')
ax.legend()
fig.tight_layout()
plt.show()
def LoadAssays(self):
DB = self.database.FetchDB()
if not len(DB):
return []
else:
assayNames = DB.keys()
return [DB.get(i) for i in assayNames]
def SetSelectedAssay(self):
indexes = self.uiAssaysTableView.selectedIndexes()
if indexes:
self.selectedAssay = indexes[0].row()
assay = self.assaysList[self.selectedAssay]
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.uiSamplesTableView.resizeColumnsToContents()
self.samplesModel.layoutChanged.emit()
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
else:
self.selectedAssay = None
def SetSelectedSamples(self):
indexes = self.uiSamplesTableView.selectedIndexes()
if indexes:
self.selectedSamples = tuple(set([i.row() for i in indexes]))
if self.selectedAssay is not None:
self.Plot()
else:
qtw.QMessageBox.warning(
self, 'Not Assay Selected!',
'Please select the corresponding assay before showing the plot'
)
else:
self.selectedSamples = None
def SetConcentrations(self):
value, ok = qtw.QInputDialog.getText(
self, 'Concentrations', 'Please enter the highest concentration'
)
if ok:
try:
# el número 6 se puede cambiar según sea el número de
# diluciones seriadas
conc = [str(float(value.replace(',', '.')) / 2 ** i) for i in range(6)]
return conc
except ValueError:
qtw.QMessageBox.warning(
self, 'Not a valid number!',
'You have not enter a valid number, please try again'
)
return False
def AddAssay(self):
items = ('MIC', 'MTT')
typeOfAssay, ok = qtw.QInputDialog.getItem(
self, 'Type of Assay', 'Choose the type of assay to add',
items, 0, False
)
name = self.SetAssayName()
conc = self.SetConcentrations()
while not conc:
conc = self.SetConcentrations()
date = datetime.now()
assay = Assay(typeOfAssay, name, conc, date)
self.assaysList.append(assay)
self.model.layoutChanged.emit()
self.uiAssaysTableView.resizeColumnsToContents()
self.uiAssaysTableView.resizeRowsToContents()
def AddSample(self):
items = ['1', '2', '3', '4']
if self.selectedAssay is not None:
numOfSamples, ok1 = qtw.QInputDialog.getItem(
self, 'Number of Samples', 'Choose the number of samples per plate',
items, 0, False
)
if int(numOfSamples) == 3:
del items[3]
elif int(numOfSamples) == 4:
del items[2:]
replicas, ok2 = qtw.QInputDialog.getItem(
self, 'Number of Samples', 'Choose the number of replicas',
items, 0, False
)
if ok1 and ok2:
self.wellProcessor = WellProcessor(
self.path, self.assaysList[self.selectedAssay].name,
self.assaysList[self.selectedAssay].conc,
int(numOfSamples), int(replicas)
)
self.wellProcessor.submitted.connect(self.SampleProcessor)
self.wellProcessor.show()
else:
return False
else:
qtw.QMessageBox.warning(
self, 'No Assay Selection',
'You have not selected an assay, please choose one assay before adding a sample'
)
def RemoveAssay(self):
if self.selectedAssay is not None:
self.assaysToRemove.append(self.assaysList[self.selectedAssay].name)
if self.assaysList[self.selectedAssay].stored:
self.database.RemoveAssay(self.assaysList[self.selectedAssay].name)
del self.assaysList[self.selectedAssay]
self.selectedAssay = self.selectedAssay - 1 if self.selectedAssay - 1 >= 0 else 0
if len(self.assaysList) > 0:
index = self.uiAssaysTableView.model().index(self.selectedAssay, 0, qtc.QModelIndex())
self.uiAssaysTableView.setCurrentIndex(index)
self.uiAssaysTableView.selectionModel().selectionChanged.connect(self.SetSelectedAssay)
self.model.layoutChanged.emit()
def RemoveSample(self):
if self.selectedAssay is not None and self.selectedSamples is not None:
self.assaysList[self.selectedAssay].RemoveSample(self.selectedSamples)
self.model.layoutChanged.emit()
assay = self.assaysList[self.selectedAssay]
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.selectedSamples = [self.selectedSamples[0] - 1 if self.selectedSamples[0] - 1 >= 0 else 0]
if len(self.assaysList[self.selectedAssay].samples) > 0:
index = self.uiSamplesTableView.model().index(self.selectedSamples[0], 0, qtc.QModelIndex())
self.uiSamplesTableView.setCurrentIndex(index)
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
self.samplesModel.layoutChanged.emit()
@qtc.pyqtSlot(list, list, list, object, object)
def SampleProcessor(self, samples, sampleNames, samplesPositions, TF, T0):
assay = self.assaysList[self.selectedAssay]
for index, name in enumerate(sampleNames):
exist = [True if s['Name'] == name else False for s in assay.samples]
if True in exist:
reply = qtw.QMessageBox.question(
self, 'Existing Sample',
f'The sample {name} already exists in {assay.name}. Do you want to overwrite it?',
qtw.QMessageBox.Yes | qtw.QMessageBox.No,
qtw.QMessageBox.No
)
if reply == qtw.QMessageBox.Yes:
for idx, value in enumerate(exist):
if value:
del assay.samples[idx]
assay.StoreSample(samples[index], index, sampleNames, samplesPositions, TF, T0)
elif reply == qtw.QMessageBox.No:
continue
else:
assay.StoreSample(samples[index], index, sampleNames, samplesPositions, TF, T0)
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
self.samplesModel.layoutChanged.emit()
self.uiSamplesTableView.resizeColumnsToContents()
def SetAssayName(self):
text, ok = qtw.QInputDialog.getText(
self, 'Assay Name', 'Please enter the name of the assay'
)
if ok:
return text
def TriggerWell(self):
if self.selectedAssay is not None and self.selectedSamples is not None:
assay = self.assaysList[self.selectedAssay]
sample = assay.samples[self.selectedSamples[0]]
self.wellProcessor = WellProcessor(
self.path, assay.name, assay.conc, len(sample['Name of samples']),
sample['TF'].shape[0], sample['T0'], sample['TF'],
sample['Name of samples'], sample['Positions']
)
self.wellProcessor.submitted.connect(self.SampleProcessor)
self.wellProcessor.show()
def TrackChanges(self):
assaysToStore = [index for index, assay in enumerate(self.assaysList) if not assay.stored]
assaysToUpdate = [index for index, assay in enumerate(self.assaysList) if assay._p_changed]
assaysToRemove = self.assaysToRemove
return assaysToStore, assaysToUpdate, assaysToRemove
def StoreChanges(self):
assaysToStore, assaysToUpdate, assaysToRemove = self.TrackChanges()
toStore = len(assaysToStore)
toUpdate = len(assaysToUpdate)
toRemove = len(assaysToRemove)
message = qtw.QMessageBox()
message.setWindowTitle('Changes to save')
message.setStandardButtons(qtw.QMessageBox.Ok | qtw.QMessageBox.Cancel)
text = []
if toStore > 0:
text1 = ['\nTo Store: ' + self.assaysList[i].name for i in assaysToStore]
text.extend(text1)
if toUpdate > 0:
text2 = ['\nTo Update: ' + self.assaysList[i].name for i in assaysToUpdate]
text.extend(text2)
if toRemove > 0:
text3 = ['\nTo Remove: ' + name for name in assaysToRemove]
text.extend(text3)
if toStore + toUpdate + toRemove > 0:
message.setText(
'The following assays will be stored, removed or updated:{}'.format(''.join(text))
)
returnValue = message.exec()
if returnValue == qtw.QMessageBox.Ok:
for index in assaysToStore:
self.database.StoreAssay(self.assaysList[index])
if len(assaysToStore) == 0 and len(assaysToUpdate) > 0:
self.database.Commit()
if len(assaysToStore) == 0 and len(assaysToRemove) > 0:
self.database.Commit()
else:
self.database.Abort()
else:
message2 = qtw.QMessageBox()
message2.setText('There are no changes to be saved')
message2.exec()
def DiscardChanges(self):
self.database.Abort()
self.assaysList = self.LoadAssays()
self.model = AssaysModel(self.assaysList)
self.uiAssaysTableView.setModel(self.model)
self.uiAssaysTableView.resizeColumnsToContents()
self.uiAssaysTableView.resizeRowsToContents()
if len(self.assaysList) > 0:
index = self.uiAssaysTableView.model().index(self.selectedAssay, 0, qtc.QModelIndex())
self.uiAssaysTableView.setCurrentIndex(index)
self.uiAssaysTableView.selectionModel().selectionChanged.connect(self.SetSelectedAssay)
self.model.layoutChanged.emit()
if len(self.assaysList[self.selectedAssay].samples) > 0:
index = self.uiSamplesTableView.model().index(self.selectedSamples[0], 0, qtc.QModelIndex())
self.uiSamplesTableView.setCurrentIndex(index)
assay = self.assaysList[self.selectedAssay]
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
self.samplesModel.layoutChanged.emit()
self.uiSamplesTableView.resizeColumnsToContents()
def closeEvent(self, event):
try:
assaysToStore, assaysToUpdate, assaysToRemove = self.TrackChanges()
toStore = len(assaysToStore)
toUpdate = len(assaysToUpdate)
toRemove = len(assaysToRemove)
if toStore + toUpdate + toRemove > 0:
reply = qtw.QMessageBox.question(
self, 'Window Close',
'Some changes have not been stored yet, do you want to save them',
qtw.QMessageBox.Yes | qtw.QMessageBox.No | qtw.QMessageBox.Cancel,
qtw.QMessageBox.No
)
if reply == qtw.QMessageBox.Yes:
self.StoreChanges()
self.database.Close()
event.accept()
elif reply == qtw.QMessageBox.No:
self.database.Abort()
self.database.Close()
event.accept()
else:
event.ignore()
else:
self.database.Close()
event.accept()
except AttributeError:
self.database.Close()
event.accept()
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
window = MainWindow(path)
window.show()
sys.exit(app.exec_())
| 42.836634
| 108
| 0.596729
| 16,343
| 0.944191
| 0
| 0
| 1,473
| 0.0851
| 0
| 0
| 1,490
| 0.086082
|
e1b6ebd37b97bc9b109f511037c684ea5fa2de9b
| 225
|
py
|
Python
|
events/defaults.py
|
bozbalci/cython-experiments
|
a675571e09297e3cda9154e8b611562bb8b14f7e
|
[
"Unlicense"
] | 1
|
2018-06-23T17:52:20.000Z
|
2018-06-23T17:52:20.000Z
|
events/defaults.py
|
bozbalci/cython-experiments
|
a675571e09297e3cda9154e8b611562bb8b14f7e
|
[
"Unlicense"
] | null | null | null |
events/defaults.py
|
bozbalci/cython-experiments
|
a675571e09297e3cda9154e8b611562bb8b14f7e
|
[
"Unlicense"
] | null | null | null |
# defaults.py: contains the built-in variables, events and methods
# used for scripting the C program
import event
events = {}
_event_names = ["on_start", "on_exit"]
for evt in _event_names:
events[evt] = event.Event()
| 22.5
| 66
| 0.724444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.528889
|
e1b7011c0667fc12e337dc4c85e26236aa831c39
| 15,526
|
py
|
Python
|
src/parse_text.py
|
rflieshman/BOLSTM
|
be7551957001a9bdaab6051145f96ad9d7415209
|
[
"Apache-2.0"
] | 13
|
2019-04-11T02:20:49.000Z
|
2021-03-26T11:00:10.000Z
|
src/parse_text.py
|
rflieshman/BOLSTM
|
be7551957001a9bdaab6051145f96ad9d7415209
|
[
"Apache-2.0"
] | 5
|
2018-12-05T14:38:40.000Z
|
2021-12-13T19:46:53.000Z
|
src/parse_text.py
|
lasigeBioTM/BOLSTM
|
c33a8b2a7722acb5e3ff55c3735591aea4f76f49
|
[
"Apache-2.0"
] | 2
|
2019-10-13T13:47:19.000Z
|
2021-01-16T10:43:33.000Z
|
from itertools import combinations
import numpy as np
import spacy
import sys
from spacy.tokenizer import Tokenizer
import re
from subprocess import PIPE, Popen
import os
import logging
import networkx as nx
import en_core_web_sm
import string
from neg_gv import neg_gv_list
SSTDIR = "sst-light-0.4/"
TEMP_DIR = "temp/"
def prevent_sentence_segmentation(doc):
for token in doc:
# This will entirely disable spaCy's sentence detection
token.is_sent_start = False
return doc
nlp = en_core_web_sm.load(disable=["ner"])
nlp.add_pipe(prevent_sentence_segmentation, name="prevent-sbd", before="parser")
# https://stackoverflow.com/a/41817795/3605086
def get_network_graph_spacy(document):
"""
Convert the dependencies of the spacy document object to a networkX graph
:param document: spacy parsed document object
:return: networkX graph object and nodes list
"""
edges = []
nodes = []
# ensure that every token is connected
# edges.append(("ROOT", '{0}-{1}'.format(list(document)[0].lower_, list(document)[0].i)))
for s in document.sents:
edges.append(("ROOT", "{0}-{1}".format(s.root.lower_, s.root.i)))
for token in document:
nodes.append("{0}-{1}".format(token.lower_, token.i))
# edges.append(("ROOT", '{0}-{1}'.format(token.lower_, token.i)))
# print('{0}-{1}'.format(token.lower_, token.i))
# FYI https://spacy.io/docs/api/token
for child in token.children:
# print("----", '{0}-{1}'.format(child.lower_, child.i))
edges.append(
(
"{0}-{1}".format(token.lower_, token.i),
"{0}-{1}".format(child.lower_, child.i),
)
)
return nx.Graph(edges), nodes
def get_head_tokens(entities, sentence):
"""
:param entities: dictionary mapping entity IDs to (offset, text)
:param sentence: sentence parsed by spacy
:return: dictionary mapping head tokens word-idx to entity IDs
"""
sentence_head_tokens = {}
for eid in entities:
offset = (entities[eid][0][0], entities[eid][0][-1])
# starts = {tok.i: tok.idx for tok in doc}
# entity_tokens = sentence.char_span(offset[0], offset[1])
entity_tokens = [
(t, i) for i, t in enumerate(sentence.token) if t.beginChar == offset[0]
]
# if not entity_tokens:
# try to include the next char
# entity_tokens = sentence.char_span(offset[0], offset[1] + 1)
# entity_tokens = [t for t in sentence.token if t.beginChar == offset[0]]
if not entity_tokens:
logging.warning(
(
"no tokens found:",
entities[eid],
sentence.text,
"|".join(
[
"{}({}-{})".format(t.word, t.beginChar, t.endChar)
for t in sentence.token
]
),
)
)
# sys.exit()
else:
head_token = "{0}-{1}".format(
entity_tokens[0][0].word.lower(), entity_tokens[0][1]
)
if head_token in sentence_head_tokens:
logging.warning(
(
"head token conflict:",
sentence_head_tokens[head_token],
entities[eid],
)
)
sentence_head_tokens[head_token] = eid
return sentence_head_tokens
def get_head_tokens_spacy(entities, sentence, positive_entities):
"""
:param entities: dictionary mapping entity IDs to (offset, text)
:param sentence: sentence parsed by spacy
:return: dictionary mapping head tokens word-idx to entity IDs
"""
sentence_head_tokens = {}
pos_gv = set()
neg_gv = set()
for eid in entities:
offset = (entities[eid][0][0], entities[eid][0][-1])
# starts = {tok.i: tok.idx for tok in doc}
entity_tokens = sentence.char_span(offset[0], offset[1])
# if not entity_tokens:
# try to include the next char
# entity_tokens = sentence.char_span(offset[0], offset[1] + 1)
i = 1
while not entity_tokens and i + offset[1] < len(sentence.text) + 1:
entity_tokens = sentence.char_span(offset[0], offset[1] + i)
i += 1
i = 0
while not entity_tokens and offset[0] - i > 0:
entity_tokens = sentence.char_span(offset[0] - i, offset[1])
i += 1
if not entity_tokens:
logging.warning(
(
"no tokens found:",
entities[eid],
sentence.text,
"|".join([t.text for t in sentence]),
)
)
else:
head_token = "{0}-{1}".format(
entity_tokens.root.lower_, entity_tokens.root.i
)
if eid in positive_entities:
pos_gv.add(entity_tokens.root.head.lower_)
else:
neg_gv.add(entity_tokens.root.head.lower_)
if head_token in sentence_head_tokens:
logging.warning(
(
"head token conflict:",
sentence_head_tokens[head_token],
entities[eid],
)
)
sentence_head_tokens[head_token] = eid
return sentence_head_tokens, pos_gv, neg_gv
def run_sst(token_seq):
chunk_size = 500
wordnet_tags = {}
sent_ids = list(token_seq.keys())
chunks = [sent_ids[i : i + chunk_size] for i in range(0, len(sent_ids), chunk_size)]
for i, chunk in enumerate(chunks):
sentence_file = open("{}/sentences_{}.txt".format(TEMP_DIR, i), "w")
for sent in chunk:
sentence_file.write("{}\t{}\t.\n".format(sent, "\t".join(token_seq[sent])))
sentence_file.close()
sst_args = [
"sst",
"bitag",
"{}/MODELS/WSJPOSc_base_20".format(SSTDIR),
"{}/DATA/WSJPOSc.TAGSET".format(SSTDIR),
"{}/MODELS/SEM07_base_12".format(SSTDIR),
"{}/DATA/WNSS_07.TAGSET".format(SSTDIR),
"{}/sentences_{}.txt".format(TEMP_DIR, i),
"0",
"0",
]
p = Popen(sst_args, stdout=PIPE)
p.communicate()
with open("{}/sentences_{}.txt.tags".format(TEMP_DIR, i)) as f:
output = f.read()
sstoutput = parse_sst_results(output)
wordnet_tags.update(sstoutput)
return wordnet_tags
def parse_sst_results(results):
sentences = {}
lines = results.strip().split("\n")
for l in lines:
values = l.split("\t")
wntags = [x.split(" ")[-1].split("-")[-1] for x in values[1:]]
sentences[values[0]] = wntags
return sentences
def parse_sentence_spacy(sentence_text, sentence_entities):
# use spacy to parse a sentence
for e in sentence_entities:
idx = sentence_entities[e][0]
sentence_text = (
sentence_text[: idx[0]]
+ sentence_text[idx[0] : idx[1]].replace(" ", "_")
+ sentence_text[idx[1] :]
)
# clean text to make tokenization easier
sentence_text = sentence_text.replace(";", ",")
sentence_text = sentence_text.replace("*", " ")
sentence_text = sentence_text.replace(":", ",")
sentence_text = sentence_text.replace(" - ", " ; ")
parsed = nlp(sentence_text)
return parsed
def process_sentence_spacy(
sentence,
sentence_entities,
sentence_pairs,
positive_entities,
wordnet_tags=None,
mask_entities=True,
min_sdp_len=0,
max_sdp_len=15,
):
"""
Process sentence to obtain labels, instances and classes for a ML classifier
:param sentence: sentence processed by spacy
:param sentence_entities: dictionary mapping entity ID to ((e_start, e_end), text, paths_to_root)
:param sentence_pairs: dictionary mapping pairs of known entities in this sentence to pair types
:return: labels of each pair (according to sentence_entities,
word vectors and classes (pair types according to sentence_pairs)
"""
left_word_vectors = []
right_word_vectors = []
left_wordnets = []
right_wordnets = []
classes = []
labels = []
graph, nodes_list = get_network_graph_spacy(sentence)
sentence_head_tokens, pos_gv, neg_gv = get_head_tokens_spacy(
sentence_entities, sentence, positive_entities
)
# print(neg_gv - pos_gv)
entity_offsets = [sentence_entities[x][0][0] for x in sentence_entities]
# print(sentence_head_tokens)
for (e1, e2) in combinations(sentence_head_tokens, 2):
# print()
# print(sentence_head_tokens[e1], e1, sentence_head_tokens[e2], e2)
# reorder according to entity ID
if int(sentence_head_tokens[e1].split("e")[-1]) > int(
sentence_head_tokens[e2].split("e")[-1]
):
e1, e2 = e2, e1
e1_text = sentence_entities[sentence_head_tokens[e1]]
e2_text = sentence_entities[sentence_head_tokens[e2]]
if e1_text[1].lower() == e2_text[1].lower():
# logging.debug("skipped same text: {} {}".format(e1_text, e2_text))
continue
middle_text = sentence.text[e1_text[0][-1] : e2_text[0][0]]
# if middle_text.strip() == "or" or middle_text.strip() == "and":
# logging.debug("skipped entity list: {} {} {}".format(e1_text, middle_text, e2_text))
# continue
if middle_text.strip() in string.punctuation:
# logging.debug("skipped punctuation: {} {} {}".format(e1_text, middle_text, e2_text))
continue
# if len(middle_text) < 3:
# logging.debug("skipped entity list: {} {} {}".format(e1_text, middle_text, e2_text))
# continue
head_token1_idx = int(e1.split("-")[-1])
head_token2_idx = int(e2.split("-")[-1])
try:
sdp = nx.shortest_path(graph, source=e1, target=e2)
if len(sdp) < min_sdp_len or len(sdp) > max_sdp_len:
# logging.debug("skipped short sdp: {} {} {}".format(e1_text, str(sdp), e2_text))
continue
neg = False
is_neg_gv = False
for i, element in enumerate(sdp):
token_idx = int(element.split("-")[-1]) # get the index of the token
token_text = element.split("-")[0]
if (i == 1 or i == len(sdp) - 2) and token_text in neg_gv_list:
logging.info("skipped gv {} {}:".format(token_text, str(sdp)))
# is_neg_gv = True
sdp_token = sentence[token_idx] # get the token obj
# if any(c.dep_ == 'neg' for c in sdp_token.children):
# neg = True
if neg or is_neg_gv:
continue
# if len(sdp) < 3: # len=2, just entities
# sdp = [sdp[0]] + nodes_list[head_token1_idx-2:head_token1_idx]
# sdp += nodes_list[head_token2_idx+1:head_token2_idx+3] + [sdp[-1]]
# print(e1_text[1:], e2_text[1:], sdp)
# if len(sdp) == 2:
# add context words
vector = []
wordnet_vector = []
negations = 0
head_token_position = None
for i, element in enumerate(sdp):
if element != "ROOT":
token_idx = int(
element.split("-")[-1]
) # get the index of the token
sdp_token = sentence[token_idx] # get the token obj
# if any(c.dep_ == 'neg' for c in sdp_token.children):
# token is negated!
# vector.append("not")
# negations += 1
# logging.info("negated!: {}<->{} {}: {}".format(e1_text, e2_text, sdp_token.text, sentence.text))
if mask_entities and sdp_token.idx in entity_offsets:
vector.append("drug")
else:
vector.append(sdp_token.text)
if wordnet_tags:
wordnet_vector.append(wordnet_tags[token_idx])
# print(element, sdp_token.text, head_token, sdp)
head_token = "{}-{}".format(
sdp_token.head.lower_, sdp_token.head.i
) # get the key of head token
# head token must not have its head in the path, otherwise that would be the head token
# in some cases the token is its own head
if head_token not in sdp or head_token == element:
# print("found head token of:", e1_text, e2_text, sdp_token.text, sdp)
head_token_position = i + negations
# vector.append(parsed[token_idx].text)
# print(vector)
if head_token_position is None:
print("head token not found:", e1_text, e2_text, sdp)
sys.exit()
else:
left_vector = vector[: head_token_position + 1]
right_vector = vector[head_token_position:]
left_wordnet = wordnet_vector[: head_token_position + 1]
right_wordnet = wordnet_vector[head_token_position:]
# word_vectors.append(vector)
left_word_vectors.append(left_vector)
right_word_vectors.append(right_vector)
left_wordnets.append(left_wordnet)
right_wordnets.append(right_wordnet)
# if (sentence_head_tokens[e1], sentence_head_tokens[e2]) in sentence_pairs:
# print(sdp, e1, e2, sentence_text)
# print(e1_text, e2_text, sdp, sentence_text)
# instances.append(sdp)
except nx.exception.NetworkXNoPath:
# pass
logging.warning("no path:", e1_text, e2_text, graph.nodes())
left_word_vectors.append([])
right_word_vectors.append([])
left_wordnets.append([])
right_wordnets.append([])
# print("no path:", e1_text, e2_text, sentence_text, parsed.print_tree(light=True))
# sys.exit()
except nx.NodeNotFound:
logging.warning(
(
"node not found:",
e1_text,
e2_text,
e1,
e2,
list(sentence),
graph.nodes(),
)
)
left_word_vectors.append([])
right_word_vectors.append([])
left_wordnets.append([])
right_wordnets.append([])
labels.append((sentence_head_tokens[e1], sentence_head_tokens[e2]))
# print(sentence_head_tokens[e1], sentence_head_tokens[e2])
if (sentence_head_tokens[e1], sentence_head_tokens[e2]) in sentence_pairs:
classes.append(
sentence_pairs[(sentence_head_tokens[e1], sentence_head_tokens[e2])]
)
else:
classes.append(0)
return (
labels,
(left_word_vectors, right_word_vectors),
(left_wordnets, right_wordnets),
classes,
pos_gv,
neg_gv,
)
| 37.502415
| 119
| 0.550818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,642
| 0.298982
|
e1b73e252109287a68039d70bc02eba7d5e821da
| 1,049
|
py
|
Python
|
metadata-ingestion/examples/library/dataset_set_tag.py
|
cuong-pham/datahub
|
cb4eb001758f55622add0f4dc3650cf483609cba
|
[
"Apache-2.0"
] | 1,603
|
2016-03-03T17:21:03.000Z
|
2020-01-22T22:12:02.000Z
|
metadata-ingestion/examples/library/dataset_set_tag.py
|
cuong-pham/datahub
|
cb4eb001758f55622add0f4dc3650cf483609cba
|
[
"Apache-2.0"
] | 1,157
|
2016-03-03T19:29:22.000Z
|
2020-01-20T14:41:59.000Z
|
metadata-ingestion/examples/library/dataset_set_tag.py
|
cuong-pham/datahub
|
cb4eb001758f55622add0f4dc3650cf483609cba
|
[
"Apache-2.0"
] | 570
|
2016-03-03T17:21:05.000Z
|
2020-01-21T06:54:10.000Z
|
# Imports for urn construction utility methods
import logging
from datahub.emitter.mce_builder import make_dataset_urn, make_tag_urn
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
# Imports for metadata model classes
from datahub.metadata.schema_classes import (
ChangeTypeClass,
GlobalTagsClass,
TagAssociationClass,
)
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
dataset_urn = make_dataset_urn(platform="hive", name="realestate_db.sales", env="PROD")
tag_urn = make_tag_urn("purchase")
event: MetadataChangeProposalWrapper = MetadataChangeProposalWrapper(
entityType="dataset",
changeType=ChangeTypeClass.UPSERT,
entityUrn=dataset_urn,
aspectName="globalTags",
aspect=GlobalTagsClass(tags=[TagAssociationClass(tag=tag_urn)]),
)
# Create rest emitter
rest_emitter = DatahubRestEmitter(gms_server="http://localhost:8080")
rest_emitter.emit(event)
log.info(f"Set tags to {tag_urn} for dataset {dataset_urn}")
| 32.78125
| 87
| 0.804576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.228789
|
e1b7f693b03922194b579f49635c8089ae32b745
| 517
|
py
|
Python
|
examples/Old Format/matrix_latex.py
|
waldyrious/galgebra
|
b5eb070340434d030dd737a5656fbf709538b0b1
|
[
"BSD-3-Clause"
] | 151
|
2018-09-18T12:30:14.000Z
|
2022-03-16T08:02:48.000Z
|
examples/Old Format/matrix_latex.py
|
abrombo/galgebra
|
5ae058c9ba2c17b1baf46c58f77124e82eaf428a
|
[
"BSD-3-Clause"
] | 454
|
2018-09-19T01:42:30.000Z
|
2022-01-18T14:02:00.000Z
|
examples/Old Format/matrix_latex.py
|
abrombo/galgebra
|
5ae058c9ba2c17b1baf46c58f77124e82eaf428a
|
[
"BSD-3-Clause"
] | 30
|
2019-02-22T08:25:50.000Z
|
2022-01-15T05:20:22.000Z
|
from __future__ import print_function
from sympy import symbols, Matrix
from galgebra.printer import xpdf, Format
def main():
Format()
a = Matrix ( 2, 2, ( 1, 2, 3, 4 ) )
b = Matrix ( 2, 1, ( 5, 6 ) )
c = a * b
print(a,b,'=',c)
x, y = symbols ( 'x, y' )
d = Matrix ( 1, 2, ( x ** 3, y ** 3 ))
e = Matrix ( 2, 2, ( x ** 2, 2 * x * y, 2 * x * y, y ** 2 ) )
f = d * e
print('%',d,e,'=',f)
# xpdf()
xpdf(pdfprog=None)
return
if __name__ == "__main__":
main()
| 19.884615
| 65
| 0.475822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.06383
|
e1b88db11881c00abc4ca3f31868a0861378a947
| 780
|
py
|
Python
|
hopsapp/__init__.py
|
mrahman013/Hope4Hops-web-applcation
|
d5bde1463c6fbc1ea5424cb656504119393c6ce2
|
[
"MIT"
] | null | null | null |
hopsapp/__init__.py
|
mrahman013/Hope4Hops-web-applcation
|
d5bde1463c6fbc1ea5424cb656504119393c6ce2
|
[
"MIT"
] | null | null | null |
hopsapp/__init__.py
|
mrahman013/Hope4Hops-web-applcation
|
d5bde1463c6fbc1ea5424cb656504119393c6ce2
|
[
"MIT"
] | null | null | null |
"""Implements a basic flask app that provides hashes of text."""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import flask_login
#pylint: disable=invalid-name
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://yjjuylsytqewni:d0d63322c6abd33e2dadeafd7ef2501f73af54cf2d39596e464ea2c18b0234a3@ec2-23-23-78-213.compute-1.amazonaws.com:5432/d3gdnt7fkmonn1' #pylint: disable=line-too-long
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.secret_key = 'HGTYNVK123LOL908973'
db = SQLAlchemy(app)
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
# This import need to be here that's why disabling pylint
#pylint: disable=wrong-import-position
import hopsapp.models
import hopsapp.routes
| 35.454545
| 224
| 0.815385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 456
| 0.584615
|
e1b8fdfc631946eef5fedb38c2e25e5e6c2e1add
| 800
|
py
|
Python
|
npytoImage.py
|
x35yao/camera
|
0ee77f5de72d785ba68bef44a557470ec425d702
|
[
"MIT"
] | null | null | null |
npytoImage.py
|
x35yao/camera
|
0ee77f5de72d785ba68bef44a557470ec425d702
|
[
"MIT"
] | null | null | null |
npytoImage.py
|
x35yao/camera
|
0ee77f5de72d785ba68bef44a557470ec425d702
|
[
"MIT"
] | null | null | null |
import numpy as np;
import cv2;
n = 428671
img_RS_color = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_RS_color.npy'.format(n))
cv2.imshow('RS Color Image {}'.format(n), img_RS_color)
#
# # img_RS_depth = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_RS_depth.npy'.format(n))
# # cv2.imshow('RS Depth Image {}'.format(n), img_RS_depth)
#
# img_ZED_color = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_ZED_color.npy'.format(n))
# cv2.imshow('ZED Color Image {}'.format(n), img_ZED_color)
#
# # img_ZED_depth = np.load('/home/p4bhattachan/gripper/3DCameraServer/testImages/npyFiles/{}_ZED_depth.npy'.format(n))
# # cv2.imshow('ZED Depth Image {}'.format(n), img_ZED_depth)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 38.095238
| 119
| 0.7475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 633
| 0.79125
|
e1b94f246fa698d25573d863b176f320113a2877
| 11,217
|
py
|
Python
|
magenta/music/sequences_lib.py
|
jellysquider/magenta
|
0fc8188870f5d1c988b76dae434b21e58362516c
|
[
"Apache-2.0"
] | null | null | null |
magenta/music/sequences_lib.py
|
jellysquider/magenta
|
0fc8188870f5d1c988b76dae434b21e58362516c
|
[
"Apache-2.0"
] | null | null | null |
magenta/music/sequences_lib.py
|
jellysquider/magenta
|
0fc8188870f5d1c988b76dae434b21e58362516c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines sequence of notes objects for creating datasets."""
import collections
import copy
# internal imports
from magenta.music import constants
from magenta.protobuf import music_pb2
# Set the quantization cutoff.
# Note events before this cutoff are rounded down to nearest step. Notes
# above this cutoff are rounded up to nearest step. The cutoff is given as a
# fraction of a step.
# For example, with quantize_cutoff = 0.75 using 0-based indexing,
# if .75 < event <= 1.75, it will be quantized to step 1.
# If 1.75 < event <= 2.75 it will be quantized to step 2.
# A number close to 1.0 gives less wiggle room for notes that start early,
# and they will be snapped to the previous step.
QUANTIZE_CUTOFF = 0.5
# Shortcut to chord symbol text annotation type.
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
class BadTimeSignatureException(Exception):
pass
class MultipleTimeSignatureException(Exception):
pass
class MultipleTempoException(Exception):
pass
class NegativeTimeException(Exception):
pass
def extract_subsequence(sequence, start_time, end_time):
"""Extracts a subsequence from a NoteSequence.
Notes starting before `start_time` are not included. Notes ending after
`end_time` are truncated.
Args:
sequence: The NoteSequence to extract a subsequence from.
start_time: The float time in seconds to start the subsequence.
end_time: The float time in seconds to end the subsequence.
Returns:
A new NoteSequence that is a subsequence of `sequence` in the specified time
range.
"""
subsequence = music_pb2.NoteSequence()
subsequence.CopyFrom(sequence)
del subsequence.notes[:]
for note in sequence.notes:
if note.start_time < start_time or note.start_time >= end_time:
continue
new_note = subsequence.notes.add()
new_note.CopyFrom(note)
new_note.end_time = min(note.end_time, end_time)
subsequence.total_time = min(sequence.total_time, end_time)
return subsequence
def is_power_of_2(x):
return x and not x & (x - 1)
class QuantizedSequence(object):
"""Holds notes and chords which have been quantized to time steps.
Notes contain a pitch, velocity, start time, and end time. Notes
are stored in tracks (which can be different instruments or the same
instrument). There is also a time signature and key signature.
Notes stored in this object are not guaranteed to be sorted by time.
Attributes:
tracks: A dictionary mapping track number to list of Note tuples. Track
number is taken from the instrument number of each NoteSequence note.
chords: A list of ChordSymbol tuples.
qpm: Quarters per minute. This is needed to recover tempo if converting back
to MIDI.
time_signature: This determines the length of a bar of music. This is just
needed to compute the number of quantization steps per bar, though it
can also communicate more high level aspects of the music
(see https://en.wikipedia.org/wiki/Time_signature).
steps_per_quarter: How many quantization steps per quarter note of music.
total_steps: The total number of steps in the quantized sequence.
"""
# Disabling pylint since it is recognizing these as attributes instead of
# classes.
# pylint: disable=invalid-name
Note = collections.namedtuple(
'Note',
['pitch', 'velocity', 'start', 'end', 'instrument', 'program', 'is_drum'])
TimeSignature = collections.namedtuple('TimeSignature',
['numerator', 'denominator'])
ChordSymbol = collections.namedtuple('ChordSymbol', ['step', 'figure'])
# pylint: enable=invalid-name
def __init__(self):
self._reset()
def _reset(self):
self.tracks = {}
self.chords = []
self.qpm = 120.0
self.time_signature = QuantizedSequence.TimeSignature(numerator=4,
denominator=4)
self.steps_per_quarter = 4
self.total_steps = 0
def steps_per_bar(self):
"""Calculates steps per bar.
Returns:
Steps per bar as a floating point number.
"""
quarters_per_beat = 4.0 / self.time_signature.denominator
quarters_per_bar = (quarters_per_beat * self.time_signature.numerator)
steps_per_bar_float = (self.steps_per_quarter * quarters_per_bar)
return steps_per_bar_float
def from_note_sequence(self, note_sequence, steps_per_quarter):
"""Populate self with a music_pb2.NoteSequence proto.
Notes and time signature are saved to self with notes' start and end times
quantized. If there is no time signature 4/4 is assumed. If there is more
than one time signature an exception is raised.
The quarter notes per minute stored in `note_sequence` is used to normalize
tempo. Regardless of how fast or slow quarter notes are played, a note that
is played for 1 quarter note will last `steps_per_quarter` time steps in
the quantized result.
A note's start and end time are snapped to a nearby quantized step. See
the comments above `QUANTIZE_CUTOFF` for details.
Args:
note_sequence: A music_pb2.NoteSequence protocol buffer.
steps_per_quarter: Each quarter note of music will be divided into this
many quantized time steps.
Raises:
MultipleTimeSignatureException: If there is a change in time signature
in `note_sequence`.
MultipleTempoException: If there is a change in tempo in `note_sequence`.
BadTimeSignatureException: If the time signature found in `note_sequence`
has a denominator which is not a power of 2.
NegativeTimeException: If a note or chord occurs at a negative time.
"""
self._reset()
self.steps_per_quarter = steps_per_quarter
if note_sequence.time_signatures:
time_signatures = sorted(note_sequence.time_signatures,
key=lambda ts: ts.time)
# There is an implicit 4/4 time signature at 0 time. So if the first time
# signature is something other than 4/4 and it's at a time other than 0,
# that's an implicit time signature change.
if time_signatures[0].time != 0 and not (
time_signatures[0].numerator == 4 and
time_signatures[0].denominator == 4):
raise MultipleTimeSignatureException(
'NoteSequence has an implicit change from initial 4/4 time '
'signature.')
self.time_signature = QuantizedSequence.TimeSignature(
time_signatures[0].numerator, time_signatures[0].denominator)
for time_signature in time_signatures[1:]:
if (time_signature.numerator != self.time_signature.numerator or
time_signature.denominator != self.time_signature.denominator):
raise MultipleTimeSignatureException(
'NoteSequence has at least one time signature change.')
if not is_power_of_2(self.time_signature.denominator):
raise BadTimeSignatureException(
'Denominator is not a power of 2. Time signature: %d/%d' %
(self.time_signature.numerator, self.time_signature.denominator))
if note_sequence.tempos:
tempos = sorted(note_sequence.tempos, key=lambda t: t.time)
# There is an implicit 120.0 qpm tempo at 0 time. So if the first tempo is
# something other that 120.0 and it's at a time other than 0, that's an
# implicit tempo change.
if tempos[0].time != 0 and tempos[0].qpm != 120.0:
raise MultipleTempoException(
'NoteSequence has an implicit tempo change from initial 120.0 qpm')
self.qpm = tempos[0].qpm
for tempo in tempos[1:]:
if tempo.qpm != self.qpm:
raise MultipleTempoException(
'NoteSequence has at least one tempo change.')
else:
self.qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
# Compute quantization steps per second.
steps_per_second = steps_per_quarter * self.qpm / 60.0
quantize = lambda x: int(x + (1 - QUANTIZE_CUTOFF))
self.total_steps = quantize(note_sequence.total_time * steps_per_second)
for note in note_sequence.notes:
# Quantize the start and end times of the note.
start_step = quantize(note.start_time * steps_per_second)
end_step = quantize(note.end_time * steps_per_second)
if end_step == start_step:
end_step += 1
# Do not allow notes to start or end in negative time.
if start_step < 0 or end_step < 0:
raise NegativeTimeException(
'Got negative note time: start_step = %s, end_step = %s' %
(start_step, end_step))
# Extend quantized sequence if necessary.
if end_step > self.total_steps:
self.total_steps = end_step
if note.instrument not in self.tracks:
self.tracks[note.instrument] = []
self.tracks[note.instrument].append(
QuantizedSequence.Note(pitch=note.pitch,
velocity=note.velocity,
start=start_step,
end=end_step,
instrument=note.instrument,
program=note.program,
is_drum=note.is_drum))
# Also add chord symbol annotations to the quantized sequence.
for annotation in note_sequence.text_annotations:
if annotation.annotation_type == CHORD_SYMBOL:
# Quantize the chord time, disallowing negative time.
step = quantize(annotation.time * steps_per_second)
if step < 0:
raise NegativeTimeException(
'Got negative chord time: step = %s' % step)
self.chords.append(
QuantizedSequence.ChordSymbol(step=step, figure=annotation.text))
def __eq__(self, other):
if not isinstance(other, QuantizedSequence):
return False
for track in self.tracks:
if (track not in other.tracks or
set(self.tracks[track]) != set(other.tracks[track])):
return False
return (
self.qpm == other.qpm and
self.time_signature == other.time_signature and
self.steps_per_quarter == other.steps_per_quarter and
self.total_steps == other.total_steps and
set(self.chords) == set(other.chords))
def __deepcopy__(self, unused_memo=None):
new_copy = type(self)()
new_copy.tracks = copy.deepcopy(self.tracks)
new_copy.chords = copy.deepcopy(self.chords)
new_copy.qpm = self.qpm
new_copy.time_signature = self.time_signature
new_copy.steps_per_quarter = self.steps_per_quarter
new_copy.total_steps = self.total_steps
return new_copy
| 38.67931
| 80
| 0.692788
| 8,771
| 0.781938
| 0
| 0
| 0
| 0
| 0
| 0
| 5,436
| 0.484622
|
e1ba5c72da56a9dbf7ee8bd79a41429f11457824
| 8,404
|
py
|
Python
|
tests/index_jsonurl_test.py
|
Stidsty/dismantle
|
26fb8fe7ba97349a67498715bb47a19329b1a4c7
|
[
"Apache-2.0"
] | null | null | null |
tests/index_jsonurl_test.py
|
Stidsty/dismantle
|
26fb8fe7ba97349a67498715bb47a19329b1a4c7
|
[
"Apache-2.0"
] | null | null | null |
tests/index_jsonurl_test.py
|
Stidsty/dismantle
|
26fb8fe7ba97349a67498715bb47a19329b1a4c7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Gary Stidston-Broadbent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from hashlib import md5
from json import JSONDecodeError
from shutil import copy2
import pytest
from pytest_httpserver import HTTPServer
from pytest_httpserver.httpserver import HandlerType
from requests import ConnectionError
from dismantle.index import IndexHandler, JsonUrlIndexHandler
def test_invalid_server(httpserver: HTTPServer, tmpdir):
with pytest.raises(ConnectionError):
JsonUrlIndexHandler('http://invalid.server/notfound.json', tmpdir)
httpserver.check_assertions()
def test_notfound(httpserver: HTTPServer, tmpdir):
httpserver.no_handler_status_code = 404
params = {"uri": "/invalid.json", 'handler_type': HandlerType.ONESHOT}
httpserver.expect_request(**params).respond_with_data("")
with pytest.raises(FileNotFoundError):
JsonUrlIndexHandler(httpserver.url_for('notfound.json'), tmpdir)
def test_blank(httpserver: HTTPServer, tmpdir):
params = {"uri": "/blank.json", 'handler_type': HandlerType.ONESHOT}
httpserver.expect_request(**params).respond_with_data("")
with pytest.raises(JSONDecodeError):
JsonUrlIndexHandler(httpserver.url_for('blank.json'), tmpdir)
httpserver.check_assertions()
def test_empty(httpserver: HTTPServer, datadir):
with open(datadir.join('index_empty.json')) as json_file:
data = json_file.read()
params = {"uri": "/empty.json", 'handler_type': HandlerType.ONESHOT}
httpserver.expect_request(**params).respond_with_data(data)
index = JsonUrlIndexHandler(httpserver.url_for('empty.json'), datadir)
httpserver.check_assertions()
assert len(index) == 0
def test_broken(httpserver: HTTPServer, datadir):
with open(datadir.join('index_broken.json')) as json_file:
data = json_file.read()
params = {"uri": "/broken.json", 'handler_type': HandlerType.ONESHOT}
httpserver.expect_request(**params).respond_with_data(data)
with pytest.raises(JSONDecodeError):
JsonUrlIndexHandler(httpserver.url_for('broken.json'), datadir)
httpserver.check_assertions()
def test_populated(httpserver: HTTPServer, datadir):
with open(datadir.join('index_populated.json')) as json_file:
data = json_file.read()
params = {"uri": "/populated.json", 'handler_type': HandlerType.ONESHOT}
httpserver.expect_request(**params).respond_with_data(data)
index = JsonUrlIndexHandler(httpserver.url_for('populated.json'), datadir)
assert isinstance(index, IndexHandler) is True
httpserver.check_assertions()
def test_latest(httpserver: HTTPServer, datadir):
copy2(datadir.join('index_populated.json'), datadir.join('index.json'))
file_digest = md5() # noqa: S303
with open(datadir.join('index_populated.json'), "rb") as cached_index:
for block in iter(lambda: cached_index.read(65536), b""):
file_digest.update(block)
digest = file_digest.hexdigest()
headers = {"If-None-Match": digest}
params = {
"uri": "/latest.json",
"headers": headers,
'handler_type': HandlerType.ONESHOT
}
httpserver.expect_request(**params).respond_with_data("", status=304)
index = JsonUrlIndexHandler(httpserver.url_for('latest.json'), datadir)
assert isinstance(index, IndexHandler) is True
assert index._updated is False
def test_current(httpserver: HTTPServer, datadir):
copy2(datadir.join('index_populated.json'), datadir.join('index.json'))
file_digest = md5() # noqa: S303
with open(datadir.join('index_populated.json'), "rb") as cached_index:
for block in iter(lambda: cached_index.read(65536), b""):
file_digest.update(block)
digest = file_digest.hexdigest()
headers = {"If-None-Match": digest}
params = {
"uri": "/current.json",
"headers": headers,
'handler_type': HandlerType.PERMANENT
}
httpserver.expect_request(**params).respond_with_data("", status=304)
index = JsonUrlIndexHandler(httpserver.url_for('current.json'), datadir)
assert isinstance(index, IndexHandler) is True
assert index.outdated is False
def test_outdated(httpserver: HTTPServer, datadir):
copy2(datadir.join('index_empty.json'), datadir.join('index.json'))
file_digest = md5() # noqa: S303
with open(datadir.join('index_empty.json'), "rb") as cached_index:
for block in iter(lambda: cached_index.read(65536), b""):
file_digest.update(block)
digest = file_digest.hexdigest()
headers = {"If-None-Match": digest}
params = {
"uri": "/outdated.json",
"headers": headers,
"method": "GET",
'handler_type': HandlerType.ONESHOT
}
httpserver.expect_request(**params).respond_with_data("", status=304)
index = JsonUrlIndexHandler(httpserver.url_for('outdated.json'), datadir)
assert isinstance(index, IndexHandler) is True
assert index._updated is False
params['method'] = 'HEAD'
with open(datadir.join('index_populated.json')) as json_file:
data = json_file.read()
httpserver.expect_request(**params).respond_with_data(data, status=200)
assert index.outdated is True
def test_create(httpserver: HTTPServer, datadir):
with open(datadir.join('index_populated.json')) as json_file:
data = json_file.read()
httpserver.expect_request(
"/create.json"
).respond_with_data(data, status=200)
index = JsonUrlIndexHandler(httpserver.url_for('create.json'), datadir)
assert isinstance(index, IndexHandler) is True
assert index._updated is True
def test_update(httpserver: HTTPServer, datadir):
with open(datadir.join('index_populated.json')) as json_file:
data = json_file.read()
httpserver.expect_request(
"/update.json"
).respond_with_data(data, status=200)
index = JsonUrlIndexHandler(httpserver.url_for('update.json'), datadir)
assert isinstance(index, IndexHandler) is True
assert index._updated is True
def test_populated_index_length(httpserver: HTTPServer, datadir):
with open(datadir.join('index_populated.json')) as json_file:
data = json_file.read()
httpserver.expect_request("/populated.json").respond_with_data(data)
index = JsonUrlIndexHandler(httpserver.url_for('populated.json'), datadir)
httpserver.check_assertions()
assert len(index) == 6
assert len(index.find('@scope-one/package-one')) == 1
assert len(index.find('package-one')) == 3
assert len(index.find('package-two')) == 2
assert len(index.find('package-three')) == 1
assert len(index.find('@scope-one')) == 3
assert len(index.find('@scope-two')) == 2
assert len(index.find('@scope-three')) == 1
def test_populated_pkg_exists(httpserver: HTTPServer, datadir):
with open(datadir.join('index_populated.json')) as json_file:
data = json_file.read()
params = {"uri": "/exists.json", 'handler_type': HandlerType.ONESHOT}
httpserver.expect_request(**params).respond_with_data(data)
index = JsonUrlIndexHandler(httpserver.url_for('exists.json'), datadir)
httpserver.check_assertions()
package = index["@scope-one/package-one"]
assert package["name"] == "@scope-one/package-one"
assert package["version"] == "0.1.0"
assert package["path"] == "@scope-one/package-one"
def test_populated_pkg_nonexistant(httpserver: HTTPServer, datadir):
with open(datadir.join('index_populated.json')) as json_file:
data = json_file.read()
params = {"uri": "/nonexist.json", 'handler_type': HandlerType.ONESHOT}
httpserver.expect_request(**params).respond_with_data(data)
index = JsonUrlIndexHandler(httpserver.url_for('nonexist.json'), datadir)
with pytest.raises(KeyError):
index["@scope-four/package-one"]
httpserver.check_assertions()
| 42.02
| 79
| 0.707996
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,997
| 0.237625
|
e1ba723285119341020fa35acb08aec8be4bb131
| 200
|
py
|
Python
|
src/resdk/__init__.py
|
AGregorc/resolwe-bio-py
|
62304e5d4c54c917575421701c6977dc63fc3a8f
|
[
"Apache-2.0"
] | 4
|
2016-09-28T16:00:05.000Z
|
2018-08-16T16:14:10.000Z
|
src/resdk/__init__.py
|
AGregorc/resolwe-bio-py
|
62304e5d4c54c917575421701c6977dc63fc3a8f
|
[
"Apache-2.0"
] | 229
|
2016-03-28T19:41:00.000Z
|
2022-03-16T15:02:15.000Z
|
src/resdk/__init__.py
|
AGregorc/resolwe-bio-py
|
62304e5d4c54c917575421701c6977dc63fc3a8f
|
[
"Apache-2.0"
] | 18
|
2016-03-10T16:11:57.000Z
|
2021-06-01T10:01:49.000Z
|
"""Resolwe SDK for Python."""
from .collection_tables import CollectionTables # noqa
from .resdk_logger import log_to_stdout, start_logging # noqa
from .resolwe import Resolwe, ResolweQuery # noqa
| 40
| 62
| 0.79
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.235
|
e1bade04e7403e544b5faa2f08e7005733a09b95
| 4,842
|
py
|
Python
|
helper/validation_scripts/launch-lm-profile.py
|
NanoMembers/DeepFlow
|
0235fe460d15a95f90202a1fdb3d3405d774511a
|
[
"Apache-2.0"
] | 3
|
2020-10-29T19:00:29.000Z
|
2020-12-21T12:24:28.000Z
|
helper/validation_scripts/launch-lm-profile.py
|
NanoMembers/DeepFlow
|
0235fe460d15a95f90202a1fdb3d3405d774511a
|
[
"Apache-2.0"
] | null | null | null |
helper/validation_scripts/launch-lm-profile.py
|
NanoMembers/DeepFlow
|
0235fe460d15a95f90202a1fdb3d3405d774511a
|
[
"Apache-2.0"
] | null | null | null |
#!/tools/python/python3.8.3/bin/python
import os
import shutil
import subprocess
import numpy as np
batch_list=[i*1024 for i in range(2,7)]
seq_list=[10]
hidden_list=[i*1024 for i in range(2,7)]
vocab_list=[2048] #[int(i) for i in (2**np.linspace(10,13,20)//2*2)]
layer_list=[1]
bpe_list=[10]
epoch_list=[3]
def run_command(cmd, var, result):
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
output = output.strip().replace(',','')
result[var] = float(output) if output != "" else output
except:
print("command for {} did not work".format(var))
output_dir="/mnt/home/newsha/baidu/developement/MechaFlow/validation/benchmarks/rnnlm/profile_gemm"
result_file="{}/result.csv".format(output_dir)
if os.path.exists(output_dir):
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
print("Created {}".format(output_dir))
with open(result_file, "w") as f:
f.write("Batch Seq Hidden Vocab Layers Epoch BPE core_util dram_util l2_util dram_read dram_write l2_access fp16_inst fma_inst\n\n")
print("Batch Seq Hidden Vocab Layers Epoch BPE core_util dram_util l2_util dram_read dram_write l2_access fp16_inst fma_inst\n\n")
for b in batch_list:
for s in seq_list:
for d in hidden_list:
for v in vocab_list:
for l in layer_list:
for bpe in bpe_list:
for e in epoch_list:
bpe = min(bpe, 25000//b)
fname = "B{}-S{}-D{}-V{}-L{}-E{}-P{}".format(b,s,d,v,l,e,bpe)
output_file = "{}/{}.out".format(output_dir, fname)
command1="/tools/cuda/cuda-11.0.1/bin/ncu --metrics \"regex:.*\" -k volta_fp16_s884gemm_fp16_... -s 0 -c 1 '/tools/venvs/tensorflow/tensorflow-2.2.0/bin/python' lm-fp16.py -m train -train data/test-index.txt -test data/test-index.txt -valid data/test-index.txt -b{} -s{} -d{} -v{} -l{} -p{} -e{} > {} 2>&1".format(b, s, d, v, l, bpe, e, output_file)
#command1 = "/tools/cuda/cuda-11.0.1/bin/nsys profile -t cuda,osrt,cudnn,cublas,nvtx,mpi -o profile/{} --stats=true -f true python lm-fp16.py -b{} -s{} -d{} -v{} -l{} -p{} -e{} -m train -train data/test-index.txt -test data/test-index.txt -valid data/test-index.txt > {} 2>&1".format(fname, b, s, d, v, l, bpe, e, output_file)
command2 = "cat {} | grep \"sm__pipe_tensor_op_hmma_cycles_active.avg.pct_of_peak_sustained_active\"| awk {{'print $3'}}".format(output_file) #unit
command3 = "cat {} | grep \"dram__throughput.avg.pct_of_peak_sustained_active\"| awk {{'print $3'}}".format(output_file) #unit
command4 = "cat {} | grep lts__t_sectors.avg.pct_of_peak_sustained_active | awk {{'print $3'}}".format(output_file) #unit
command5 = "cat {} | grep dram_read_bytes | grep sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command6 = "cat {} | grep dram_write_bytes | grep sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command7 = "cat {} | grep lts__t_bytes.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command8 = "cat {} | grep sm__sass_thread_inst_executed_op_fp16_pred_on.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command9 = "cat {} | grep sm__sass_thread_inst_executed_ops_fadd_fmul_ffma_pred_on.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
result = {'ncu':-1, 'core_util':-1, 'dram_util':-1,
'l2_util':-1, 'dram_read':-1, 'dram_write':-1,
'l2_access':-1, 'fp16_inst':-1, 'fma_inst':-1}
run_command(command1, 'ncu', result)
run_command(command2, 'core_util', result)
run_command(command3, 'dram_util', result)
run_command(command4, 'l2_util', result)
run_command(command5, 'dram_read', result)
run_command(command6, 'dram_write', result)
run_command(command7, 'l2_access', result)
run_command(command8, 'fp16_inst', result)
run_command(command9, 'fma_inst', result)
with open(result_file, "a+") as f:
f.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d} {6:d} {7:.2f} {8:.2f} {9:.2f} {10:,} {11:,} {12:,} {13:,} {14:,}\n".format(b, s, d, v, l, e, bpe, result['core_util'], result['dram_util'], result['l2_util'], result['dram_read'], result['dram_write'], result['l2_access'], int(result['fp16_inst']), int(result['fma_inst'])))
print("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d} {6:d} {7:.2f} {8:.2f} {9:.2f} {10:,} {11:,} {12:,} {13:,} {14:,}\n".format(b, s, d, v, l, e, bpe, result['core_util'], result['dram_util'], result['l2_util'], result['dram_read'], result['dram_write'], result['l2_access'], int(result['fp16_inst']), int(result['fma_inst'])))
| 63.710526
| 363
| 0.621437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,470
| 0.51012
|
e1bb9b6b3739ef931135cf1fd4f2fa3e0d1cab30
| 8,171
|
py
|
Python
|
src/planet_box_extractor/extractor.py
|
V-AI-S/planet-box-extractor
|
5404bc97e7a2e1f6d90c7503d9106973038e4387
|
[
"MIT"
] | 6
|
2021-05-31T14:51:55.000Z
|
2022-01-27T14:44:04.000Z
|
src/planet_box_extractor/extractor.py
|
V-AI-S/planet-box-extractor
|
5404bc97e7a2e1f6d90c7503d9106973038e4387
|
[
"MIT"
] | null | null | null |
src/planet_box_extractor/extractor.py
|
V-AI-S/planet-box-extractor
|
5404bc97e7a2e1f6d90c7503d9106973038e4387
|
[
"MIT"
] | null | null | null |
from .geo_utils import boundingBox
import time
import PIL.Image
import urllib.request
import mercantile
import numpy as np
class PlanetBoxExtractor:
"""
Extract bounding boxes from satellite images using Planet Tiles API
@radius: distance from the center of the image to the edge in kilometers
@zoom: level of zoom in the Mercantiles
@map_id: url-id of the basemap from the Planet Tiles API, can be found in the Planet Explorer
@api_key: Planet Tiles API Key
base_url: placeholder url of using the Planet Tiles API, containing the API_KEY
IMGSIZE: the size of the images from the Planet Tiles API (256 default)
locations: clockwise order of the tiles
Usage:
extractor = PlanetBoxExtractor(radius, zoom, map_id, API_KEY)
image = extractor.Process(latitude, longitude)
"""
def __init__(self, radius, zoom, map_id, api_key):
self.radius = radius
self.zoom = zoom
self.base_url = 'https://tiles.planet.com/basemaps/v1/planet-tiles/' + map_id + '/gmap/{}/{}/{}.png?api_key=' + api_key
self.IMGSIZE = 256
self.locations = ['upleft', 'upright', 'downleft', 'downright']
def Download(self, latitude, longitude):
"""
Method to retrieve Mercartor tiles corresponding to the bounding box around longitude and latitude with radius self.radius
Returns a list of 4 items, either the image of a tile or an empty object (None)
Parameters
----------
latitude: latitude coordinate of the center of the desired bounding box in degrees
longitude: longitude coordinate of the center of the desired bounding box in degrees
Returns
-------
images: list of PNG images corresponding to the Mercartor tiles
"""
minLat, minLon, maxLat, maxLon = boundingBox(latitude, longitude, self.radius)
tiles = [
mercantile.tile(minLon, maxLat, self.zoom), # upleft
mercantile.tile(maxLon, maxLat, self.zoom), # upright
mercantile.tile(minLon, minLat, self.zoom), # downleft
mercantile.tile(maxLon, minLat, self.zoom), # downright
]
urls = []
images = []
for i, location in enumerate(self.locations):
tile = tiles[i]
url = self.base_url.format(tile.z, tile.x, tile.y)
if url in urls:
images.append(None)
else:
urls.append(urls.append(url))
images.append(PIL.Image.open(urllib.request.urlopen(url)))
time.sleep(0.2)
return images
def Stitch(self, images):
"""
Method to place Mercartor tile images in correct order
Parameters
----------
images: list of images of tiles and empty objects (None) for empty tiles
Returns
-------
img: stitched image with size (self.IMGSIZE * n) x (self.IMGSIZE * m) with n the number of tile rows and m the number of tile columns
"""
total = [(img is not None) for i, img in enumerate(images)]
if sum(total) == 1:
padx, pady = 0, 0
img = np.zeros((self.IMGSIZE, self.IMGSIZE, 3), 'uint8')
elif sum(total) == 2:
if sum(total[:2]) % 2 == 0:
# up/down
padx, pady = 0, self.IMGSIZE
img = np.zeros((self.IMGSIZE, self.IMGSIZE * 2, 3), 'uint8')
else:
# left/right
padx, pady = self.IMGSIZE, 0
img = np.zeros((self.IMGSIZE * 2, self.IMGSIZE, 3), 'uint8')
elif sum(total) == 4:
padx, pady = self.IMGSIZE, self.IMGSIZE
img = np.zeros((self.IMGSIZE * 2, self.IMGSIZE * 2, 3), 'uint8')
#
for location, image in zip(self.locations, images):
if image is None:
continue
if location == 'upleft':
img[:self.IMGSIZE, :self.IMGSIZE] = np.array(image)[:,:,:3]
elif location == 'upright':
img[:self.IMGSIZE, self.IMGSIZE:] = np.array(image)[:,:,:3]
elif location == 'downright':
img[self.IMGSIZE:, self.IMGSIZE:] = np.array(image)[:,:,:3]
elif location == 'downleft':
img[self.IMGSIZE:, :self.IMGSIZE] = np.array(image)[:,:,:3]
return img
def coord2pixel(self, lon, lat, box):
"""
Method to convert longitude and latitude to their corresponding pixel location given the bounding box of the Mercartor containing the coordinates
Parameters
----------
lon: longitude in degrees
lat: latitude in degrees
box: bounding box of the Mercartor tile containing the coordinates returned from mercantile.bounds()
Returns
-------
tuple of 2 pixel locations corresponding to the given longitude and latitude
"""
return int((lon - box.west)/(box.east - box.west)*self.IMGSIZE), int((lat - box.north)/(box.south - box.north)*self.IMGSIZE)
def Bounds(self, latitude, longitude):
"""
Method to calculate the pixel locations of the bounding box with a radius of self.radius given the center longitude and latitude coordinates
Parameters
----------
latitude: latitude coordinate of the center of the bounding box in degrees
longitude: longitude coordinate of the center of the bounding box in degrees
Returns
-------
minY: starting pixel location of the bounding box on the Y-axis
maxY: ending pixel location of the bounding box on the Y-axis
minX: starting pixel location of the bounding box on the X-axis
maxX: ending pixel location of the bounding box on the X-axis
"""
minLat, minLon, maxLat, maxLon = boundingBox(latitude, longitude, self.radius)
minX, minY = self.coord2pixel(minLon, maxLat, mercantile.bounds(mercantile.tile(longitude, latitude, self.zoom)))
maxX, maxY = self.coord2pixel(maxLon, minLat, mercantile.bounds(mercantile.tile(longitude, latitude, self.zoom)))
if minY < 0:
minY += self.IMGSIZE
maxY += self.IMGSIZE
if minX < 0:
minX += self.IMGSIZE
maxX += self.IMGSIZE
return minY, maxY, minX, maxX
def Crop(self, image, minY, maxY, minX, maxX):
"""
Method to perform the cropping of the stitched image to return the bounding box region
Parameters
----------
image: stitched image of the Mercartor tiles
minY: starting pixel location of the bounding box on the Y-axis
maxY: ending pixel location of the bounding box on the Y-axis
minX: starting pixel location of the bounding box on the X-axis
maxX: ending pixel location of the bounding box on the X-axis
Returns
-------
partial image corresponding to the bounding box region
"""
return image[minY:maxY, minX:maxX]
def Process(self, latitude, longitude):
"""
Method that combines the main steps of the API to extract a bounding box image given a latitude and longitude
Parameters
----------
latitude: latitude coordinate of the center of the bounding box in degrees
longitude: latitude coordinate of the center of the bounding box in degrees
Returns
-------
image: partial image corresponding to the bounding box region
"""
images = self.Download(latitude, longitude)
stitched_image = self.Stitch(images)
minY, maxY, minX, maxX = self.Bounds(latitude, longitude)
image = self.Crop(stitched_image, minY, maxY, minX, maxX)
return image
if __name__ == '__main__':
latitude, longitude = 5, 20
zoom = 15
radius = 0.2
API_KEY = ''
map_id = ''
extractor = PlanetBoxExtractor(radius, zoom, map_id, API_KEY)
image = extractor.Process(latitude, longitude)
| 41.060302
| 153
| 0.598947
| 7,789
| 0.953249
| 0
| 0
| 0
| 0
| 0
| 0
| 4,176
| 0.511076
|
e1bbdc48371fed473f16ae3afb93373be31ead4e
| 21,365
|
py
|
Python
|
Kai/python/modules/mctruth.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:29:38.000Z
|
2022-01-17T17:29:38.000Z
|
Kai/python/modules/mctruth.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | null | null | null |
Kai/python/modules/mctruth.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2021-12-15T10:56:50.000Z
|
2021-12-15T10:56:50.000Z
|
from __future__ import (division, print_function)
import os
import ROOT
#import numpy as np
#import itertools
#from collections import OrderedDict
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.tools import * #deltaR, deltaPhi, etc.
_rootLeafType2rootBranchType = { 'UChar_t':'b', 'Char_t':'B', 'UInt_t':'i', 'Int_t':'I', 'Float_t':'F', 'Double_t':'D', 'ULong64_t':'l', 'Long64_t':'L', 'Bool_t':'O' }
class MCTruth(Module):
def __init__(self, verbose=False, makeHistos=False):
self.writeHistFile=True
self.MADEHistos=False
#load the criteria for brevity
#self.CFG = selectionConfig
#choose whether we want verbose output or to produce cut histograms
self._verbose = verbose
self.makeHistos = makeHistos
#event counters
self.counter = 0
self.maxEventsToProcess = -1
# #Muon CFG loading from config file
# self.cfg_mMaxEta = self.CFG["Muon"]["Common"]["Eta"] #Max Eta acceptance
# self.cfg_mSelPt = self.CFG["Muon"]["Select"]["Pt"] #min Pt for selection
# self.cfg_mSelRelIso = self.CFG["Muon"]["Select"]["RelIso"] #Relative Isolation
# self.cfg_mSelId = self.CFG["Muon"]["Select"]["IdLevel"]
# #All muons in NanoAOD are loose Muons or better, so loose-only are just those failing the "mediumId"
# #Jet CFG loading from config file
# self.cfg_jMaxEta = self.CFG["Jet"]["Common"]["Eta"] #Max Eta acceptance
# self.cfg_jId = self.CFG["Jet"]["Common"]["JetId"]
# self.cfg_jNSelPt = self.CFG["Jet"]["NonBJet"]["Pt"] #Non-B Jet minimum Pt
# self.cfg_jBSelPt = self.CFG["Jet"]["BJet"]["Pt"] #B Jet minimum Pt
# self.cfg_jBAlgo = self.CFG["Jet"]["Algo"] #bTagging Algorithm
# self.cfg_jBWP = self.CFG["Jet"]["WP"] #working point, like "Medium" or "Tight"
# self.cfg_jBThresh = self.CFG["Jet"][self.cfg_jBAlgo][self.cfg_jBWP]
# self.cfg_jClnTyp = self.CFG["Jet"]["CleanType"]
# self.cfg_jMaxdR = self.CFG["Jet"]["MaxDeltaR"]
# #Event CFG loading from config file
# self.cfg_lowMRes_cent = self.CFG["Event"]["LowMassReson"]["Center"] #Low mass resonance veto center
# self.cfg_lowMRes_hwidth = self.CFG["Event"]["LowMassReson"]["HalfWidth"] #low mass resonance veto half-width
# self.cfg_ZMRes_cent = self.CFG["Event"]["ZMassReson"]["Center"] #Z mass resonance veto center
# self.cfg_ZMRes_hwidth = self.CFG["Event"]["ZMassReson"]["HalfWidth"] #Z mass resonance veto half-width
# self.cfg_HTMin = self.CFG["Event"]["HTMin"] #Minimum HT
# self.cfg_nBJetMin = self.CFG["Event"]["nBJetMin"] #Minimum bTagged jets
# self.cfg_nTotJetMin = self.CFG["Event"]["nTotJetMin"] #Minimum total jets
# self.cfg_minMET = self.CFG["MET"]["MinMET"] #Minimum MET
def beginJob(self,histFile=None,histDirName=None):
print("histfile=" + str(histFile) + " directoryname=" + str(histDirName))
if histFile != None and histDirName != None:
#self.writeHistFile=True
prevdir = ROOT.gDirectory
self.histFile = histFile
self.histFile.cd()
self.dir = self.histFile.mkdir( histDirName )
prevdir.cd()
self.objs = []
if self.makeHistos:
# self.h_jSel_map = ROOT.TH2F('h_jSel_map', ';Jet Eta;Jet Phi', 40, -2.5, 2.5, 20, -3.14, 3.14)
# self.addObject(self.h_jSel_map)
self.MADEHistos=True
def endJob(self):
if hasattr(self, 'objs') and self.objs != None:
prevdir = ROOT.gDirectory
self.dir.cd()
for obj in self.objs:
obj.Write()
prevdir.cd()
if hasattr(self, 'histFile') and self.histFile != None :
self.histFile.Close()
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
# _brlist = inputTree.GetListOfBranches()
# branches = [_brlist.At(i) for i in xrange(_brlist.GetEntries())]
# placeholder = []
# for elem in self.input:
# placeholder.append([])
# self.brlist_sep = dict(zip(self.input.keys(), placeholder))
# for key in self.input.keys():
# self.brlist_sep[key] = self.filterBranchNames(branches,self.input[key])
self.out = wrappedOutputTree
# self.out.branch("Electron_PES", "O", lenVar="nElectron", title="Boolean for electrons passing the event selection criteria")
# self.out.branch("Muon_PES", "O", lenVar="nMuon", title="Boolean for muons passing the event selection criteria")
# self.out.branch("Jet_PES", "O", lenVar="nJet", title="Boolean for jets passing the event selection criteria")
# self.out.branch("Jet_Tagged", "O", lenVar="nJet", title="Boolean for jets passing the B-tagging and corresponding Pt requirements")
# self.out.branch("Jet_Untagged", "O", lenVar="nJet", title="Boolean for jets passing the non-B-tagged and corresponding Pt criteria")
#Define the EventVar_ subvariables' type (Double, Int, Bool...)
# self.varDict = OrderedDict([("H", "D"),
# ("H2M", "D"),
# ("HT", "D"),
# ("HT2M", "D"),
# ("HTH", "D"),
# ("HTRat", "D"),
# ("nBTagJet", "I"),
# ("nTotJet", "I"),
# ("Trig_MuMu", "O"),
# ("Trig_ElMu", "O"),
# ("Trig_ElEl", "O"),
# ("Trig_Mu", "O"),
# ])
#Define the EventVar_ subvariables' titles
# self.titleDict = OrderedDict([("H", "Sum of selected jet's 3-momentum P (magnitude)"),
# ("H2M", "Sum of slected jet 3-momentum (magnitude) except the 2 highest-pt B-tagged jets"),
# ("HT", "Sum of selected jet transverse momentum"),
# ("HT2M", "Sum of slected jet transverse momentum except the 2 highest-Pt B-tagged jets"),
# ("HTH", "Ratio of HT to H in the event"),
# ("HTRat", "Ratio of the sum of 2 highest selected jet Pts to HT"),
# ("nBTagJet", "Number of post-selection and cross-cleaned b-tagged jets"),
# ("nTotJet", "Number of total post-selection and cross-cleaned jets"),
# ("Trig_MuMu", "Event passed one of the dimuon HLT triggers"),
# ("Trig_ElMu", "Event passed one of the electron-muon HLT triggers"),
# ("Trig_ElEl", "Event passed one of the dielectron HLT triggers"),
# ("Trig_Mu", "Event passed one of the solo-muon HLT triggers"),
# ])
# for name, valType in self.varDict.items():
# self.out.branch("EventVar_%s"%(name), valType, title=self.titleDict[name])
#Use method from CollectionMerger.py
def filterBranchNames(self,branches,collection):
out = []
for br in branches:
name = br.GetName()
if not name.startswith(collection+'_'): continue
out.append(name.replace(collection+'_',''))
self.branchType[collection][out[-1]] = br.FindLeaf(br.GetName()).GetTypeName() #Need to fix this type part for multi collections...
return out
def analyze(self, event): #called by the eventloop per-event
"""process event, return True (go to next module) or False (fail, go to next event)"""
#Increment counter and skip events past the maxEventsToProcess, if larger than -1
self.counter +=1
# if -1 < self.maxEventsToProcess < self.counter:
# return False
if 5 < self.counter:
return False
# if (self.counter % 5000) == 0:
# print("Processed {0:2d} Events".format(self.counter))
###############################################
### Collections and Objects and isData check###
###############################################
#Check whether this file is Data from Runs or Monte Carlo simulations
self.isData = True
if hasattr(event, "nGenPart") or hasattr(event, "nGenJet") or hasattr(event, "nGenJetAK8"):
self.isData = False
if self.isData:
print("WARNING: Attempt to run MCTruth Module on Data (Detected)!")
return False
PV = Object(event, "PV")
otherPV = Collection(event, "OtherPV")
SV = Collection(event, "SV")
electrons = Collection(event, "Electron")
muons = Collection(event, "Muon")
jets = Collection(event, "Jet")
fatjets = Collection(event, "FatJet") #Correct capitalization?
subjets = Collection(event, "SubJet")
#met = Object(event, "MET")
met = Object(event, "METFixEE2017") #FIXME: Placeholder until passed in via configuration?
HLT = Object(event, "HLT")
Filters = Object(event, "Flag") #For Data use only
gens = Collection(event, "GenPart")
genjets = Collection(event, "GenJet")
genfatjets = Collection(event, "GenJetAK8")
gensubjets = Collection(event, "SubGenJetAK8")
genmet = Object(event, "GenMET")
generator = Object(event, "Generator")
btagweight = Object(event, "btagWeight") #contains .CSVV2 and .DeepCSVB float weights
#genweight =
#lhe = Object(event, "FIXME")
#weights = FIXME
#PSWeights = FIXME
##################################
### Print info for exploration ###
##################################
print("\n\n\nRun: " + str(event.run) + " Lumi: " + str(event.luminosityBlock) + " Event: " + str(event.event))
print("\n==========Generator==========")
print("{0:>10s} {1:>5s} {2:>5s} {3:>10s} {4:>10s} {5:>7s} {6:>7s} {7:>10s} {8:>10s}"
.format("Bin Var", "ID 1", "ID 2", "Q2 Scale", "gen wght", "x1 mom.", "x2 mom.", "x*pdf 1", "x*pdf 2"))
print("{0:>10f} {1:>5d} {2:>5d} {3:>10.4f} {4:>10.4f} {5:>7.3f} {6:>7.3f} {7:>10.4f} {8:>10.4f}"
.format(generator.binvar, generator.id1, generator.id2,
generator.scalePDF, generator.weight, generator.x1,
generator.x2, generator.xpdf1, generator.xpdf2))
print("\n==========btag Weight==========")
print("CSVv2: " + str(btagweight.CSVV2) + " DeepCSV: " + str(btagweight.DeepCSVB))
print("\n\n==========Here be thy jets==========")
print("=====Selected Jets=====\n{0:>5s} {1:>10s} {2:>10s} {3:>10s} {4:>10s} {5:10s} {6:>10s}"
.format("IdX", "pt", "eta", "phi","DeepFlavB", "genJetIdx", "jID"))
for nj, jet in enumerate(jets):
if not jet.PES:
continue
print("{0:>5d} {1:>10.4f} {2:>10.4f} {3:>10.4f} {4:>10.4f} {5:>10d} {6:>10d}".format(nj, jet.pt, jet.eta, jet.phi, jet.btagDeepFlavB, jet.genJetIdx, jet.jetId))
print("=====Unselected Jets=====\n{0:>5s} {1:>10s} {2:>10s} {3:>10s} {4:>10s} {5:10s} {6:>10s}"
.format("IdX", "pt", "eta", "phi","DeepFlavB", "genJetIdx", "jID"))
for nj, jet in enumerate(jets):
if jet.PES:
continue
print("{0:>5d} {1:>10.4f} {2:>10.4f} {3:>10.4f} {4:>10.4f} {5:>10d} {6:>10d}".format(nj, jet.pt, jet.eta, jet.phi, jet.btagDeepFlavB, jet.genJetIdx, jet.jetId))
print("=====Gen Jets=====\n{0:>5s} {1:>10s} {2:>10s} {3:>10s} {4:>10s} {5:10s}"
.format("IdX", "pt", "eta", "phi","Had Flav", "Part Flav"))
for ngj, genjet in enumerate(genjets):
print("{0:>5d} {1:>10.4f} {2:>10.4f} {3:>10.4f} {4:>10d} {5:>10d}".format(ngj, genjet.pt, genjet.eta, genjet.phi, genjet.hadronFlavour, genjet.partonFlavour))
print("\n\n==========Here be thy fatjets==========")
print("=====Fatjets=====\n{0:>5s} {1:>10s} {2:>10s} {3:>10s} {4:>5s} {5:10s}"
.format("IdX", "pt", "eta", "phi","jID", "TvsQCD"))
for nfj, jet in enumerate(fatjets):
print("{0:>5d} {1:>10.4f} {2:>10.4f} {3:>10.4f} {4:>5d} {5:>10.4f}".format(nfj, jet.pt, jet.eta, jet.phi, jet.jetId, jet.deepTag_TvsQCD))
print("=====Gen Fatjets=====\n{0:>5s} {1:>10s} {2:>10s} {3:>10s} {4:>10s} {5:10s}"
.format("IdX", "pt", "eta", "phi","Had Flav", "Part Flav"))
for ngfj, genjet in enumerate(genfatjets):
print("{0:>5d} {1:>10.4f} {2:>10.4f} {3:>10.4f} {4:>10d} {5:>10d}".format(ngfj, genjet.pt, genjet.eta, genjet.phi, genjet.hadronFlavour, genjet.partonFlavour))
print("\n\n==========Here be thy GenParts==========")
print("=====Gen Fatjets=====\n{0:>5s} {1:>10s} {2:>10s} {3:>10s} {4:>10s} {5:10s} {6:>10s} {7:>10s}"
.format("IdX", "pt", "eta", "phi","Moth ID", "PDG ID", "Status", "Stat. Flgs"))
for np, gen in enumerate(gens):
print("{0:>5d} {1:>10.4f} {2:>10.4f} {3:>10.4f} {4:>10d} {5:>10d} {6:>10d} {7:>10d}".format(np, gen.pt, gen.eta, gen.phi, gen.genIdxMother, gen.pdgId, gen.status, gen.statusFlags))
################################################
### Initialize Branch Variables to be Filled ###
################################################
#Arrays
# electrons_PES = []
# muons_PES = []
# jets_PES = []
# jets_Tagged = []
# jets_Untagged = []
# for i in xrange(len(electrons)):
# electrons_PES.append(False)
# for j in xrange(len(muons)):
# muons_PES.append(False)
# for k in xrange(len(jets)):
# jets_PES.append(False)
# jets_Tagged.append(False)
# jets_Untagged.append(False)
#genTop_VARS
#########################
### Prepare Variables ###
#########################
#############
### MUONS ###
#############
# for jInd, jet in enumerate(jets):
# #Skip any jets that are below the threshold chosen (pass Loose: +1, pass Tight: +2 , pass TightLepVeto: +4
# #In 2017 data, pass Loose is always a fail (doesn't exist), so pass Tight and not TLV = 2, pass both = 6
# if jet.jetId < self.cfg_jId:
# continue
# #Eta acceptance
# if abs(jet.eta) > self.cfg_jMaxEta:
# continue
# #Walk through lepton-jet cleaning methods, starting with most accurate and cheapest: PartonMatching
# if self.cfg_jClnTyp == "PartonMatching":
# if jInd in crosslinkJetIdx:
# continue
# #Next check if DeltaR was requested instead. More computation required in NanoAOD format, less accurate
# elif self.cfg_jClnTyp == "DeltaR":
# #raise NotImplementedError("In eventselector class, in jet loop, selected cleaning type of DeltaR matching has not been implemented")
# failCleaning = False
# #DeltaR against muons
# for mIdx in mIndex:
# if deltaR(muons[mIdx], jet) < self.cfg_jMaxdR:
# failCleaning = True
# #DeltaR against electrons
# for eIdx in eIndex:
# if deltaR(electrons[eIdx], jet) < self.cfg_jMaxdR:
# failCleaning = True
# #Check if jet survived cleaning or now
# if failCleaning:
# continue
# #And protect against invalid lepton-jet cleaning methods, or add in alternative ones here
# else:
# raise RuntimeError("The requested Lepton-Jet cross-cleaning algorithm [{0:s}] is not available."
# "Please use \"PartonMatching\" or \"DeltaR\"".format(self.cfg_jClnTyp))
# #https://twiki.cern.ch/twiki/bin/viewauth/CMS/BtagRecommendation94X
# ##if jet.ChosenBTag > ChosenBtagWorkingPoint's Threshold and jet.pt > BTaggedJet's minimum cut
# if getattr(jet, self.cfg_jBAlgo) > self.cfg_jBThresh and jet.pt > self.cfg_jBSelPt:
# #Flip PassEventSelection and (B)Tagged bits
# jets_PES[jInd] = True
# jets_Tagged[jInd] = True
# nBJets += 1
# #Add to HTRat_Numerator if counter is less than 2 (i.e., one of the two highest Pt jets to pass event selection)
# if HTRat_Counter < 2:
# HTRat_Numerator += jet.pt
# HTRat_Counter += 1
# #Fill jet histos (Ideally replaced with hsnap() in the future)
# if self.MADEHistos: self.h_jBSel_map.Fill(jet.eta, jet.phi)
# if self.MADEHistos: self.h_jBSel_pt.Fill(jet.pt)
# #Add jet index to list for collection filling
# jBIndex.append(jInd)
# #Add momentum to HT, H variables here
# HT += jet.pt
# H += (jet.p4()).P()
# #Improper calculation below, fix later
# if nBJets > 2:
# #Add momentum to HT2M, H2M variables here
# #FIXME: If tagging selection changes from medium, this will be incorrect! (by definition)
# HT2M += jet.pt
# H2M += (jet.p4()).P()
# elif jet.pt > self.cfg_jNSelPt:
# #Flip PassEventSelection and Untagged bits
# jets_PES[jInd] = True
# jets_Untagged[jInd] = True
# #Add to HTRat_Numerator if counter is less than 2, relying on cascading through b-tagging/non-b-tagging selection
# if HTRat_Counter < 2:
# HTRat_Numerator += jet.pt
# HTRat_Counter += 1
# #Increment jet counter
# nOthJets +=1
# #Fill jet histos (Ideally replaced with hsnap() in the future)
# #FIXME: This isn't the distribution for the jets post event selection!
# if self.MADEHistos: self.h_jSel_map.Fill(jet.eta, jet.phi)
# if self.MADEHistos: self.h_jSel_pt.Fill(jet.pt)
# #Add jet index to list for collection filling
# jNBIndex.append(jInd)
# #Add momentum to event variables, no restrictions since these are all untagged jets
# HT += jet.pt
# H += (jet.p4()).P()
# HT2M += jet.pt
# H2M += (jet.p4()).P()
# #Cut events that don't have minimum number of b-tagged jets
# if nBJets < self.cfg_nBJetMin:
# if self.MADEHistos: self.h_cutHisto.Fill(6.5)
# return False
# #Cut events that don't have minimum number of selected, cross-cleaned jets
# if nBJets + nOthJets < self.cfg_nTotJetMin:
# if self.MADEHistos: self.h_cutHisto.Fill(7.5)
# return False
# #Cut events that don't have minimum value of HT
# if HT < self.cfg_HTMin:
# if self.MADEHistos: self.h_cutHisto.Fill(8.5)
# if self.cutOnHT: return False
# #Calculate HTRat and HTH, since more than 4 jets in the events reaching this point
# HTH = HT/H
# #HTRat = Pt of two highest pt jets / HT
# HTRat = HTRat_Numerator / HT
#############################################
### Write out slimmed selection variables ###
#############################################
#Make dictionary that makes this more automated, as in the branch creation
# self.out.fillBranch("Electron_PES", electrons_PES)
# self.out.fillBranch("Muon_PES", muons_PES)
# self.out.fillBranch("Jet_PES", jets_PES)
# self.out.fillBranch("Jet_Tagged", jets_Tagged)
# self.out.fillBranch("Jet_Untagged", jets_Untagged)
# self.out.fillBranch("EventVar_H", H)
# self.out.fillBranch("EventVar_H2M", H2M)
# self.out.fillBranch("EventVar_HT", HT)
# self.out.fillBranch("EventVar_HT2M", HT2M)
# self.out.fillBranch("EventVar_HTH", HTH)
# self.out.fillBranch("EventVar_HTRat", HTRat)
# self.out.fillBranch("EventVar_nBTagJet", nBJets)
# self.out.fillBranch("EventVar_nTotJet", (nOthJets + nBJets))
# self.out.fillBranch("EventVar_Trig_MuMu", passMuMu)
# self.out.fillBranch("EventVar_Trig_ElMu", passElMu)
# self.out.fillBranch("EventVar_Trig_ElEl", passElEl)
# self.out.fillBranch("EventVar_Trig_Mu", passMu)
return True
| 54.088608
| 192
| 0.539246
| 20,743
| 0.970887
| 0
| 0
| 0
| 0
| 0
| 0
| 14,430
| 0.675404
|
e1bced98ae2a678cded5046d18c92e44944d6925
| 1,214
|
py
|
Python
|
skimpy/utils/namespace.py
|
AQ18/skimpy
|
435fc50244f2ca815bbb39d525a82a4692f5c0ac
|
[
"Apache-2.0"
] | 13
|
2020-11-05T10:59:13.000Z
|
2022-03-21T01:38:31.000Z
|
skimpy/utils/namespace.py
|
AQ18/skimpy
|
435fc50244f2ca815bbb39d525a82a4692f5c0ac
|
[
"Apache-2.0"
] | 4
|
2022-01-27T10:23:40.000Z
|
2022-03-10T18:16:06.000Z
|
skimpy/utils/namespace.py
|
AQ18/skimpy
|
435fc50244f2ca815bbb39d525a82a4692f5c0ac
|
[
"Apache-2.0"
] | 6
|
2020-08-04T17:01:33.000Z
|
2022-03-21T01:38:32.000Z
|
# -*- coding: utf-8 -*-
"""
.. module:: pytfa
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Simulation types """
QSSA = 'qssa'
TQSSA = 'tqssa'
MCA = 'mca'
ODE = 'ode'
ELEMENTARY = 'elementary'
""" Jacobian Types"""
NUMERICAL = 'numerical'
SYMBOLIC = 'symbolic'
""" MCA Types """
NET = 'net'
SPLIT = 'split'
""" Item types """
PARAMETER = 'parameter'
VARIABLE = 'variable'
""" Units """
KCAL = 'kcal'
KJ = 'kJ'
JOULE = 'JOULE'
""" OTHER """
WATER_FORMULA = 'H2O'
| 21.298246
| 72
| 0.702636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,041
| 0.857496
|
e1bdf0a5854ba7b15ac7f77144b26d66246bcec0
| 517
|
py
|
Python
|
app/urlshortener.py
|
felixbade/minimal-url-shortener
|
4f2b4e318fff4eab2b37f863230198a5116e7a7e
|
[
"MIT"
] | null | null | null |
app/urlshortener.py
|
felixbade/minimal-url-shortener
|
4f2b4e318fff4eab2b37f863230198a5116e7a7e
|
[
"MIT"
] | null | null | null |
app/urlshortener.py
|
felixbade/minimal-url-shortener
|
4f2b4e318fff4eab2b37f863230198a5116e7a7e
|
[
"MIT"
] | null | null | null |
class URLShortener:
def __init__(self):
self.id_counter = 0
self.links = {}
def getURL(self, short_id):
return self.links.get(short_id)
def shorten(self, url):
short_id = self.getNextId()
self.links.update({short_id: url})
return short_id
def getNextId(self):
self.id_counter += 1
# Id got from a URL is type str anyway so it is easiest to just use
# type str everywhere after this point.
return str(self.id_counter)
| 25.85
| 75
| 0.611219
| 516
| 0.998066
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.205029
|
e1be426bfe54febaaf2747236ba29b8bea95325e
| 2,897
|
py
|
Python
|
designate/storage/impl_sqlalchemy/migrate_repo/versions/051_scoped_tsig.py
|
cneill/designate-testing
|
7bf320062d85a12bff2aee8d26c133941a289fc4
|
[
"Apache-2.0"
] | null | null | null |
designate/storage/impl_sqlalchemy/migrate_repo/versions/051_scoped_tsig.py
|
cneill/designate-testing
|
7bf320062d85a12bff2aee8d26c133941a289fc4
|
[
"Apache-2.0"
] | null | null | null |
designate/storage/impl_sqlalchemy/migrate_repo/versions/051_scoped_tsig.py
|
cneill/designate-testing
|
7bf320062d85a12bff2aee8d26c133941a289fc4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from sqlalchemy import Enum
from sqlalchemy.schema import Table, MetaData, Column
from migrate.changeset.constraint import UniqueConstraint
from designate.sqlalchemy.types import UUID
meta = MetaData()
# Get the default pool_id from the config file
default_pool_id = cfg.CONF['service:central'].default_pool_id.replace('-', '')
TSIG_SCOPES = ['POOL', 'ZONE']
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Load the TSIG Keys tables
tsigkeys_table = Table('tsigkeys', meta, autoload=True)
scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES)
scopes.create()
# Create the scope and resource columns
scope_col = Column('scope', scopes, nullable=False, server_default='POOL')
scope_col.create(tsigkeys_table)
# Start with nullable=True and populate_default=True, then convert
# to nullable=False once all rows have been populted with a resource_id
resource_id_col = Column('resource_id', UUID, default=default_pool_id,
nullable=True)
resource_id_col.create(tsigkeys_table, populate_default=True)
# Now that we've populated the default pool id in existing rows, MySQL
# will let us convert this over to nullable=False
tsigkeys_table.c.resource_id.alter(nullable=False)
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
# Add missing unique index
constraint = UniqueConstraint('name', name='unique_tsigkey_name',
table=tsigkeys_table)
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
# Load the TSIG Keys tables
tsigkeys_table = Table('tsigkeys', meta, autoload=True)
scopes = Enum(name='tsig_scopes', metadata=meta, *TSIG_SCOPES)
# Create the scope and resource columns
tsigkeys_table.c.scope.drop()
tsigkeys_table.c.resource_id.drop()
scopes.drop()
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
# Add missing unique index
constraint = UniqueConstraint('name', name='unique_tsigkey_name',
table=tsigkeys_table)
constraint.create()
| 36.2125
| 78
| 0.713497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,296
| 0.447359
|
e1bf68076ea2cc2d9234c0759575b80d167f8b2e
| 680
|
py
|
Python
|
geomat/stein/migrations/0060_remove_mineraltype_mohs_scale.py
|
mimischi/django-geomat
|
8c5bc4c9ba9759b58b52ddf339ccaec40ec5f6ea
|
[
"BSD-3-Clause"
] | 3
|
2017-01-13T15:53:39.000Z
|
2017-05-05T11:57:55.000Z
|
geomat/stein/migrations/0060_remove_mineraltype_mohs_scale.py
|
mimischi/django-geomat
|
8c5bc4c9ba9759b58b52ddf339ccaec40ec5f6ea
|
[
"BSD-3-Clause"
] | 233
|
2016-11-05T15:19:48.000Z
|
2021-09-07T23:33:47.000Z
|
geomat/stein/migrations/0060_remove_mineraltype_mohs_scale.py
|
GeoMatDigital/django-geomat
|
8c5bc4c9ba9759b58b52ddf339ccaec40ec5f6ea
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-05-04 07:33
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('stein', '0059_mineraltype_new_mohs_scale'),
]
operations = [
migrations.AlterField(
model_name='mineraltype',
name='mohs_scale',
field=models.CharField(max_length=20, verbose_name="mohs scale", default="")
),
migrations.RemoveField(
model_name='mineraltype',
name='mohs_scale',
),
migrations.RenameField(model_name="mineraltype", old_name="new_mohs_scale", new_name="mohs_scale")
]
| 26.153846
| 106
| 0.632353
| 566
| 0.832353
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.282353
|
e1c057aa5119875fed8dba5a07e37ff673709a2b
| 14,420
|
py
|
Python
|
bamboo/core/calculator.py
|
pld/bamboo
|
a0fc77aebd6ff6b1087ba46896b0ce705fbb25a3
|
[
"BSD-3-Clause"
] | 27
|
2015-01-14T15:57:54.000Z
|
2020-12-27T19:34:41.000Z
|
bamboo/core/calculator.py
|
biswapanda/bamboo
|
72fc260822a27ce52cbe65de178f8fa1b60311f3
|
[
"BSD-3-Clause"
] | 2
|
2015-08-06T15:23:28.000Z
|
2016-01-28T00:05:25.000Z
|
bamboo/core/calculator.py
|
biswapanda/bamboo
|
72fc260822a27ce52cbe65de178f8fa1b60311f3
|
[
"BSD-3-Clause"
] | 10
|
2015-08-07T01:50:39.000Z
|
2019-05-15T21:41:18.000Z
|
from collections import defaultdict
from celery.task import task
from pandas import concat, DataFrame
from bamboo.core.aggregator import Aggregator
from bamboo.core.frame import add_parent_column, join_dataset
from bamboo.core.parser import Parser
from bamboo.lib.datetools import recognize_dates
from bamboo.lib.jsontools import df_to_jsondict
from bamboo.lib.mongo import MONGO_ID
from bamboo.lib.parsing import parse_columns
from bamboo.lib.query_args import QueryArgs
from bamboo.lib.utils import combine_dicts, flatten, to_list
def calculate_columns(dataset, calculations):
"""Calculate and store new columns for `calculations`.
The new columns are join t othe Calculation dframe and replace the
dataset's observations.
.. note::
This can result in race-conditions when:
- deleting ``controllers.Datasets.DELETE``
- updating ``controllers.Datasets.POST([dataset_id])``
Therefore, perform these actions asychronously.
:param dataset: The dataset to calculate for.
:param calculations: A list of calculations.
"""
new_cols = None
for c in calculations:
if c.aggregation:
aggregator = __create_aggregator(
dataset, c.formula, c.name, c.groups_as_list)
aggregator.save(dataset)
else:
columns = parse_columns(dataset, c.formula, c.name)
if new_cols is None:
new_cols = DataFrame(columns[0])
else:
new_cols = new_cols.join(columns[0])
if new_cols is not None:
dataset.update_observations(new_cols)
# propagate calculation to any merged child datasets
[__propagate_column(x, dataset) for x in dataset.merged_datasets]
@task(default_retry_delay=5, ignore_result=True)
def calculate_updates(dataset, new_data, new_dframe_raw=None,
parent_dataset_id=None, update_id=None):
"""Update dataset with `new_data`.
This can result in race-conditions when:
- deleting ``controllers.Datasets.DELETE``
- updating ``controllers.Datasets.POST([dataset_id])``
Therefore, perform these actions asychronously.
:param new_data: Data to update this dataset with.
:param new_dframe_raw: DataFrame to update this dataset with.
:param parent_dataset_id: If passed add ID as parent ID to column,
default is None.
"""
if not __update_is_valid(dataset, new_dframe_raw):
dataset.remove_pending_update(update_id)
return
__ensure_ready(dataset, update_id)
if new_dframe_raw is None:
new_dframe_raw = dframe_from_update(dataset, new_data)
new_dframe = recognize_dates(new_dframe_raw, dataset.schema)
new_dframe = __add_calculations(dataset, new_dframe)
# set parent id if provided
if parent_dataset_id:
new_dframe = add_parent_column(new_dframe, parent_dataset_id)
dataset.append_observations(new_dframe)
dataset.clear_summary_stats()
propagate(dataset, new_dframe=new_dframe, update={'add': new_dframe_raw})
dataset.update_complete(update_id)
def dframe_from_update(dataset, new_data):
"""Make a DataFrame for the `new_data`.
:param new_data: Data to add to dframe.
:type new_data: List.
"""
filtered_data = []
columns = dataset.columns
labels_to_slugs = dataset.schema.labels_to_slugs
num_columns = len(columns)
num_rows = dataset.num_rows
dframe_empty = not num_columns
if dframe_empty:
columns = dataset.schema.keys()
for row in new_data:
filtered_row = dict()
for col, val in row.iteritems():
# special case for reserved keys (e.g. _id)
if col == MONGO_ID:
if (not num_columns or col in columns) and\
col not in filtered_row.keys():
filtered_row[col] = val
else:
# if col is a label take slug, if it's a slug take col
slug = labels_to_slugs.get(
col, col if col in labels_to_slugs.values() else None)
# if slug is valid or there is an empty dframe
if (slug or col in labels_to_slugs.keys()) and (
dframe_empty or slug in columns):
filtered_row[slug] = dataset.schema.convert_type(
slug, val)
filtered_data.append(filtered_row)
index = range(num_rows, num_rows + len(filtered_data))
new_dframe = DataFrame(filtered_data, index=index)
return new_dframe
@task(default_retry_delay=5, ignore_result=True)
def propagate(dataset, new_dframe=None, update=None):
"""Propagate changes in a modified dataset."""
__update_aggregate_datasets(dataset, new_dframe, update=update)
if update:
__update_merged_datasets(dataset, update)
__update_joined_datasets(dataset, update)
def __add_calculations(dataset, new_dframe):
labels_to_slugs = dataset.schema.labels_to_slugs
for calculation in dataset.calculations(include_aggs=False):
function = Parser.parse_function(calculation.formula)
new_column = new_dframe.apply(function, axis=1, args=(dataset, ))
potential_name = calculation.name
if potential_name not in dataset.dframe().columns:
if potential_name in labels_to_slugs:
new_column.name = labels_to_slugs[potential_name]
else:
new_column.name = potential_name
new_dframe = new_dframe.join(new_column)
return new_dframe
def __calculation_data(dataset):
"""Create a list of aggregate calculation information.
Builds a list of calculation information from the current datasets
aggregated datasets and aggregate calculations.
"""
calcs_to_data = defaultdict(list)
calculations = dataset.calculations(only_aggs=True)
names_to_formulas = {c.name: c.formula for c in calculations}
names = set(names_to_formulas.keys())
for group, dataset in dataset.aggregated_datasets:
labels_to_slugs = dataset.schema.labels_to_slugs
calculations_for_dataset = list(set(
labels_to_slugs.keys()).intersection(names))
for calc in calculations_for_dataset:
calcs_to_data[calc].append((
names_to_formulas[calc], labels_to_slugs[calc], group,
dataset))
return flatten(calcs_to_data.values())
def __update_is_valid(dataset, new_dframe):
"""Check if the update is valid.
Check whether this is a right-hand side of any joins
and deny the update if the update would produce an invalid
join as a result.
:param dataset: The dataset to check if update valid for.
:param new_dframe: The update dframe to check.
:returns: True is the update is valid, False otherwise.
"""
select = {on: 1 for on in dataset.on_columns_for_rhs_of_joins if on in
new_dframe.columns and on in dataset.columns}
dframe = dataset.dframe(query_args=QueryArgs(select=select))
for on in select.keys():
merged_join_column = concat([new_dframe[on], dframe[on]])
if len(merged_join_column) != merged_join_column.nunique():
return False
return True
def __create_aggregator(dataset, formula, name, groups, dframe=None):
# TODO this should work with index eventually
columns = parse_columns(dataset, formula, name, dframe, no_index=True)
dependent_columns = Parser.dependent_columns(formula, dataset)
aggregation = Parser.parse_aggregation(formula)
# get dframe with only the necessary columns
select = combine_dicts({group: 1 for group in groups},
{col: 1 for col in dependent_columns})
# ensure at least one column (MONGO_ID) for the count aggregation
query_args = QueryArgs(select=select or {MONGO_ID: 1})
dframe = dataset.dframe(query_args=query_args, keep_mongo_keys=not select)
return Aggregator(dframe, groups, aggregation, name, columns)
def __ensure_ready(dataset, update_id):
# dataset must not be pending
if not dataset.is_ready or (
update_id and dataset.has_pending_updates(update_id)):
dataset.reload()
raise calculate_updates.retry()
def __find_merge_offset(dataset, merged_dataset):
offset = 0
for parent_id in merged_dataset.parent_ids:
if dataset.dataset_id == parent_id:
break
offset += dataset.find_one(parent_id).num_rows
return offset
def __propagate_column(dataset, parent_dataset):
"""Propagate columns in `parent_dataset` to `dataset`.
When a new calculation is added to a dataset this will propagate the
new column to all child (merged) datasets.
:param dataset: THe child dataet.
:param parent_dataset: The dataset to propagate.
"""
# delete the rows in this dataset from the parent
dataset.remove_parent_observations(parent_dataset.dataset_id)
# get this dataset without the out-of-date parent rows
dframe = dataset.dframe(keep_parent_ids=True)
# create new dframe from the upated parent and add parent id
parent_dframe = add_parent_column(parent_dataset.dframe(),
parent_dataset.dataset_id)
# merge this new dframe with the existing dframe
updated_dframe = concat([dframe, parent_dframe])
# save new dframe (updates schema)
dataset.replace_observations(updated_dframe)
dataset.clear_summary_stats()
# recur into merged dataset
[__propagate_column(x, dataset) for x in dataset.merged_datasets]
def __remapped_data(dataset_id, mapping, slugified_data):
column_map = mapping.get(dataset_id) if mapping else None
if column_map:
slugified_data = [{column_map.get(k, k): v for k, v in row.items()}
for row in slugified_data]
return slugified_data
def __slugify_data(new_data, labels_to_slugs):
slugified_data = []
new_data = to_list(new_data)
for row in new_data:
for key, value in row.iteritems():
if labels_to_slugs.get(key) and key != MONGO_ID:
del row[key]
row[labels_to_slugs[key]] = value
slugified_data.append(row)
return slugified_data
def __update_aggregate_datasets(dataset, new_dframe, update=None):
calcs_to_data = __calculation_data(dataset)
for formula, slug, groups, a_dataset in calcs_to_data:
__update_aggregate_dataset(dataset, formula, new_dframe, slug, groups,
a_dataset, update is None)
def __update_aggregate_dataset(dataset, formula, new_dframe, name, groups,
a_dataset, reducible):
"""Update the aggregated dataset built for `dataset` with `calculation`.
Proceed with the following steps:
- delete the rows in this dataset from the parent
- recalculate aggregated dataframe from aggregation
- update aggregated dataset with new dataframe and add parent id
- recur on all merged datasets descending from the aggregated
dataset
:param formula: The formula to execute.
:param new_dframe: The DataFrame to aggregate on.
:param name: The name of the aggregation.
:param groups: A column or columns to group on.
:type group: String, list of strings, or None.
:param a_dataset: The DataSet to store the aggregation in.
"""
# parse aggregation and build column arguments
aggregator = __create_aggregator(
dataset, formula, name, groups, dframe=new_dframe)
new_agg_dframe = aggregator.update(dataset, a_dataset, formula, reducible)
# jsondict from new dframe
new_data = df_to_jsondict(new_agg_dframe)
for merged_dataset in a_dataset.merged_datasets:
# remove rows in child from this merged dataset
merged_dataset.remove_parent_observations(a_dataset.dataset_id)
# calculate updates for the child
calculate_updates(merged_dataset, new_data,
parent_dataset_id=a_dataset.dataset_id)
def __update_joined_datasets(dataset, update):
"""Update any joined datasets."""
if 'add' in update:
new_dframe = update['add']
for direction, other_dataset, on, j_dataset in dataset.joined_datasets:
if 'add' in update:
if direction == 'left':
# only proceed if on in new dframe
if on in new_dframe.columns:
left_dframe = other_dataset.dframe(padded=True)
# only proceed if new on value is in on column in lhs
if len(set(new_dframe[on]).intersection(
set(left_dframe[on]))):
merged_dframe = join_dataset(left_dframe, dataset, on)
j_dataset.replace_observations(merged_dframe)
# TODO is it OK not to propagate the join here?
else:
# if on in new data join with existing data
if on in new_dframe:
new_dframe = join_dataset(new_dframe, other_dataset, on)
calculate_updates(j_dataset, df_to_jsondict(new_dframe),
parent_dataset_id=dataset.dataset_id)
elif 'delete' in update:
j_dataset.delete_observation(update['delete'])
elif 'edit' in update:
j_dataset.update_observation(*update['edit'])
def __update_merged_datasets(dataset, update):
if 'add' in update:
data = df_to_jsondict(update['add'])
# store slugs as labels for child datasets
data = __slugify_data(data, dataset.schema.labels_to_slugs)
# update the merged datasets with new_dframe
for mapping, merged_dataset in dataset.merged_datasets_with_map:
if 'add' in update:
mapped_data = __remapped_data(dataset.dataset_id, mapping, data)
calculate_updates(merged_dataset, mapped_data,
parent_dataset_id=dataset.dataset_id)
elif 'delete' in update:
offset = __find_merge_offset(dataset, merged_dataset)
merged_dataset.delete_observation(update['delete'] + offset)
elif 'edit' in update:
offset = __find_merge_offset(dataset, merged_dataset)
index, data = update['edit']
merged_dataset.update_observation(index + offset, data)
| 35.343137
| 78
| 0.676283
| 0
| 0
| 0
| 0
| 1,686
| 0.116921
| 0
| 0
| 3,879
| 0.269001
|
e1c0ee31e7c392fd9a301672456d03f86541b8f3
| 265
|
py
|
Python
|
modules/python3/tests/unittests/scripts/glm.py
|
ImagiaViz/inviwo
|
a00bb6b0551bc1cf26dc0366c827c1a557a9603d
|
[
"BSD-2-Clause"
] | 349
|
2015-01-30T09:21:52.000Z
|
2022-03-25T03:10:02.000Z
|
modules/python3/tests/unittests/scripts/glm.py
|
ImagiaViz/inviwo
|
a00bb6b0551bc1cf26dc0366c827c1a557a9603d
|
[
"BSD-2-Clause"
] | 641
|
2015-09-23T08:54:06.000Z
|
2022-03-23T09:50:55.000Z
|
modules/python3/tests/unittests/scripts/glm.py
|
ImagiaViz/inviwo
|
a00bb6b0551bc1cf26dc0366c827c1a557a9603d
|
[
"BSD-2-Clause"
] | 124
|
2015-02-27T23:45:02.000Z
|
2022-02-21T09:37:14.000Z
|
import inviwopy
from inviwopy.glm import *
v1 = vec3(1,2,3)
v2 = size2_t(4,5)
m1 = mat4(1)
m2 = mat3(0,1,0,-1,0,0,0,0,2)
v3 = m2 * v1
v4 = vec4(1,2,3,4)
w = v4.w
a = v4.a
q = v4.q
z = v4.z
b = v4.b
p = v4.p
y = v4.y
g = v4.g
t = v4.t
x = v4.x
r = v4.r
s = v4.s
| 10.6
| 29
| 0.532075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e1c1c2a4c0c8d49965261747c7efaf839f60298c
| 7,538
|
py
|
Python
|
djangoexample/thumbs/views.py
|
arneb/sorethumb
|
5b224fbf30eaeb83640510d11a0dea40592e76ad
|
[
"BSD-3-Clause"
] | null | null | null |
djangoexample/thumbs/views.py
|
arneb/sorethumb
|
5b224fbf30eaeb83640510d11a0dea40592e76ad
|
[
"BSD-3-Clause"
] | null | null | null |
djangoexample/thumbs/views.py
|
arneb/sorethumb
|
5b224fbf30eaeb83640510d11a0dea40592e76ad
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.conf import settings
from models import ThumbTest
from sorethumb.filters.defaultfilters import *
from sorethumb.filters.drawfilters import *
from sorethumb.djangothumbnail import DjangoThumbnail
class SmallThumb(DjangoThumbnail):
filters = [ThumbnailFilter(120, 100)]
class Square(DjangoThumbnail):
filters = [ThumbnailFilter(100, 100),
SquareFilter(),]
class RoundedCorners5(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(5)]
class RoundedCorners10(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10)]
class RoundedCorners20(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(20)]
class RoundedCornersEdged(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#000')]
class RoundedCornersBackground(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#333'),
ResizeCanvasFilter(130, 110, '#fff'),
OpaqueFilter('#fff'),]
class RoundedCornersBackgroundGradient(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#000'),
ResizeCanvasFilter(130, 110, '#e2e2ff', background_opacity=0),
VerticalGradientFilter('#fff', '#88e'),]
class MaskThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
ResizeCanvasFilter(120, 100, '#000', background_opacity=0),
MaskFilter(settings.MEDIA_ROOT+'/alpha.png')]
class GrayThumb(DjangoThumbnail):
filters = [ThumbnailFilter(120, 100),
GrayscaleFilter()]
class FadedThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
OpacityFilter(.5)]
class OverlayThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
OverlayFilter(settings.MEDIA_ROOT+'/user.png')]
thumb_examples = [
{
'thumb':'small_thumb',
'title':'Basic thumbnail',
'description':"""Here we have a basic thumbnail that uses PIL's thumbnail operation to reduce an image to fit in a defined dimensions.""",
'code' : '''class SmallThumb(DjangoThumbnail):
filters = [ThumbnailFilter(120, 100)]'''
},
{
'thumb':'square',
'title':'Square',
'description':'As above, but cropped to be square. Since uploaded images can be any old size, they can tend to look ragged when presented in rows. Square thumbs look better in rows, at the expense of a little cropping',
'code':"""class Square(DjangoThumbnail):
filters = [ThumbnailFilter(100, 100),
SquareFilter()] """
},
{
'thumb':'rounded_corners5',
'title':'5 pixels rounded corner',
'description':"""Rounded corners without CSS3, on a transparent background. What is it with designers and rounded corners anyway?""",
'code':"""class RoundedCorners5(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(5)] """,
},
{
'thumb':'rounded_corners10',
'title':'10 pixels rounded corner',
'description':'As above, but 10 pixels.',
},
{
'thumb':'rounded_corners20',
'title':'20 pixels rounded corner',
'description':'Even more rounded corners',
},
{
'thumb':'rounded_corners_edged',
'title':'Rounded corners with a border',
'description':'The rounded corner filter also supports a coloured border',
'code':"""class RoundedCornersEdged(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#333')]""",
},
{
'thumb':'rounded_corners_background',
'title':"Rounded corners on an opaque background",
'description':"Rounded corners on an opaque backround for browsers with poor support for per-pixel transparency — IE6 I'm looking at you!",
'code':"""class RoundedCornersBackground(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#333'),
ResizeCanvasFilter(130, 110, '#fff'),
OpaqueFilter('#fff')] """
},
{
'thumb':'rounded_corners_background_gradient',
'title':"Rounded corners on a gadient",
'description':"As above, but on a gradient background. The vertical gradient filter replaces transparent areas with a smooth gradient between two colours.",
'code':"""class RoundedCornersBackgroundGradient(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
RoundedCornerFilter(10, border='#000'),
ResizeCanvasFilter(130, 110, '#e2e2ff', background_opacity=0),
VerticalGradientFilter('#fff', '#88e')] """
},
{'thumb':'mask_thumb',
'title':'Masked thumbnail',
'description': 'This thumbnail uses MaskFilter which replaces the alpha channel with another image, to create some interesting effects.',
'code':"""class MaskThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
ResizeCanvasFilter(120, 100, '#000', background_opacity=0),
MaskFilter(settings.MEDIA_ROOT+'/alpha.png')]
"""
},
{
'thumb':'gray_thumb',
'title':'Grayscale',
'description':'A grayscale thumb, could be used as a hover state.',
'code':"""class GrayThumb(DjangoThumbnail):
filters = [ThumbnailFilter(120, 100),
GrayscaleFilter()]"""
},
{
'thumb':'faded_thumb',
'title':'50% opacity',
'description':'The OpacityFilter sets the opacity of the thumbnail.',
'code':"""class FadedThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
OpacityFilter(.5)] """
},
{
'thumb':'overlay_thumb',
'title':'Thumbnail with overlay',
'description':"""A thumbnail with an overlayed transparent png. Could be used to indicate online status.""",
'code' : '''class OverlayThumb(DjangoThumbnail):
format = 'png'
filters = [ThumbnailFilter(120, 100),
OverlayFilter(settings.MEDIA_ROOT+'/user.png')]'''
},
]
def examples(request):
context = {'examples':thumb_examples}
if request.method == 'POST':
thumb = ThumbTest(image_file=request.FILES.get('file'))
thumb.save()
try:
dbobject = ThumbTest.objects.all().order_by('-pk')[0]
except IndexError:
dbobject = None
context['dbobject'] = dbobject
return render_to_response('thumbs.html',
context,
RequestContext(request))
| 31.805907
| 223
| 0.602282
| 2,052
| 0.272221
| 0
| 0
| 0
| 0
| 0
| 0
| 3,995
| 0.529981
|
e1c3efdf6d1bcb608ddb86a4384fd1aed1e4458f
| 117
|
py
|
Python
|
hello_world.py
|
michaeljamieson/Python01
|
96777e5252aaf58e5b424dd5b39186b395d9d859
|
[
"Apache-2.0"
] | null | null | null |
hello_world.py
|
michaeljamieson/Python01
|
96777e5252aaf58e5b424dd5b39186b395d9d859
|
[
"Apache-2.0"
] | null | null | null |
hello_world.py
|
michaeljamieson/Python01
|
96777e5252aaf58e5b424dd5b39186b395d9d859
|
[
"Apache-2.0"
] | null | null | null |
print ('hello world')
print ('hey i did something')
print ('what happens if i do a ;');
print ('apparently nothing')
| 23.4
| 35
| 0.683761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.683761
|