hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2f62dfb448387dd95e74c860291fd442ab3a194
| 43
|
py
|
Python
|
iterasi/for.py
|
CodeinnAja/Learn-Python-Fundamental
|
12847cf998d69f2a02efe2e238b9e4693af86d37
|
[
"MIT"
] | 2
|
2022-03-07T14:49:10.000Z
|
2022-03-07T14:52:28.000Z
|
iterasi/for.py
|
CodeinnAja/Learn-Python-Fundamental
|
12847cf998d69f2a02efe2e238b9e4693af86d37
|
[
"MIT"
] | null | null | null |
iterasi/for.py
|
CodeinnAja/Learn-Python-Fundamental
|
12847cf998d69f2a02efe2e238b9e4693af86d37
|
[
"MIT"
] | null | null | null |
#part 1
for isi in range(5):
print(isi)
| 14.333333
| 20
| 0.627907
|
5f1df573429c34010d5ff1a9dd9063aba255450e
| 399
|
py
|
Python
|
exercices/questao16.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
exercices/questao16.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
exercices/questao16.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
def inverterOrdem(phrasex):
word_lst = []
string = ""
phrasex += " "
for letter in phrasex:
if letter == " ":
word_lst.insert(0, string)
string = ""
else:
string += letter
new_phrase = ""
for word in word_lst:
new_phrase += word + " "
return new_phrase[:-1]
print(inverterOrdem("Romeu e Julieta"))
| 19.95
| 39
| 0.506266
|
02a8288d311501c3c9f56ef702c1d187cc82cca9
| 4,100
|
py
|
Python
|
tests/extension/fsm_/as_module_delayed/test_fsm_as_module_delayed.py
|
jesseclin/veriloggen
|
a645f2c53f04e5b88213eef17779d212192ea2b5
|
[
"Apache-2.0"
] | 232
|
2015-09-01T16:07:48.000Z
|
2022-03-28T14:53:28.000Z
|
tests/extension/fsm_/as_module_delayed/test_fsm_as_module_delayed.py
|
jesseclin/veriloggen
|
a645f2c53f04e5b88213eef17779d212192ea2b5
|
[
"Apache-2.0"
] | 34
|
2015-08-21T09:13:03.000Z
|
2022-03-21T23:52:44.000Z
|
tests/extension/fsm_/as_module_delayed/test_fsm_as_module_delayed.py
|
jesseclin/veriloggen
|
a645f2c53f04e5b88213eef17779d212192ea2b5
|
[
"Apache-2.0"
] | 46
|
2015-09-24T14:39:57.000Z
|
2022-02-23T21:59:56.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import fsm_as_module_delayed
expected_verilog = """
module test;
reg CLK;
reg RST;
wire valid;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.valid(valid)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#1000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg valid
);
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] _d1_fsm;
reg _fsm_cond_2_0_1;
reg _fsm_cond_6_1_1;
reg _fsm_cond_7_2_1;
reg _fsm_cond_8_3_1;
reg _fsm_cond_9_4_1;
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_3 = 3;
localparam fsm_4 = 4;
localparam fsm_5 = 5;
localparam fsm_6 = 6;
localparam fsm_7 = 7;
localparam fsm_8 = 8;
localparam fsm_9 = 9;
localparam fsm_10 = 10;
wire [32-1:0] _fsm_out_0;
always @(*) begin
fsm = _fsm_out_0;
end
wire [32-1:0] __d1_fsm_out_1;
always @(*) begin
_d1_fsm = __d1_fsm_out_1;
end
wire _fsm_valid_2;
always @(*) begin
valid = _fsm_valid_2;
end
sub_fsm
inst_sub_fsm
(
.CLK(CLK),
.RST(RST),
.fsm(_fsm_out_0),
._d1_fsm(__d1_fsm_out_1),
.valid(_fsm_valid_2)
);
endmodule
module sub_fsm
(
input CLK,
input RST,
output reg [32-1:0] fsm,
output reg [32-1:0] _d1_fsm,
output reg valid
);
localparam fsm_init = 0;
localparam fsm_2 = 2;
localparam fsm_6 = 6;
localparam fsm_7 = 7;
localparam fsm_8 = 8;
localparam fsm_9 = 9;
localparam fsm_1 = 1;
localparam fsm_3 = 3;
localparam fsm_4 = 4;
localparam fsm_5 = 5;
localparam fsm_10 = 10;
reg _fsm_cond_2_0_1;
reg _fsm_cond_6_1_1;
reg _fsm_cond_7_2_1;
reg _fsm_cond_8_3_1;
reg _fsm_cond_9_4_1;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
_d1_fsm <= fsm_init;
valid <= 0;
_fsm_cond_2_0_1 <= 0;
_fsm_cond_6_1_1 <= 0;
_fsm_cond_7_2_1 <= 0;
_fsm_cond_8_3_1 <= 0;
_fsm_cond_9_4_1 <= 0;
end else begin
_d1_fsm <= fsm;
case(_d1_fsm)
fsm_2: begin
if(_fsm_cond_2_0_1) begin
valid <= 0;
end
end
fsm_6: begin
if(_fsm_cond_6_1_1) begin
valid <= 0;
end
end
fsm_7: begin
if(_fsm_cond_7_2_1) begin
valid <= 0;
end
end
fsm_8: begin
if(_fsm_cond_8_3_1) begin
valid <= 0;
end
end
fsm_9: begin
if(_fsm_cond_9_4_1) begin
valid <= 0;
end
end
endcase
case(fsm)
fsm_init: begin
fsm <= fsm_1;
end
fsm_1: begin
fsm <= fsm_2;
end
fsm_2: begin
valid <= 1;
_fsm_cond_2_0_1 <= 1;
fsm <= fsm_3;
end
fsm_3: begin
fsm <= fsm_4;
end
fsm_4: begin
fsm <= fsm_5;
end
fsm_5: begin
fsm <= fsm_6;
end
fsm_6: begin
valid <= 1;
_fsm_cond_6_1_1 <= 1;
fsm <= fsm_7;
end
fsm_7: begin
valid <= 1;
_fsm_cond_7_2_1 <= 1;
fsm <= fsm_8;
end
fsm_8: begin
valid <= 1;
_fsm_cond_8_3_1 <= 1;
fsm <= fsm_9;
end
fsm_9: begin
valid <= 1;
_fsm_cond_9_4_1 <= 1;
fsm <= fsm_10;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = fsm_as_module_delayed.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| 17.748918
| 69
| 0.56122
|
b2097b16770dff69757b0bd6d3b8e0aec04daa70
| 10,461
|
py
|
Python
|
bank2ynab/b2y_utilities.py
|
mkjeldsen/bank2ynab
|
449b664ee51597a5c57c4e5427144dab1d295761
|
[
"MIT"
] | 1
|
2021-04-14T13:20:57.000Z
|
2021-04-14T13:20:57.000Z
|
bank2ynab/b2y_utilities.py
|
jtpedersen/bank2ynab
|
407b10acab6bf181d0da245aa179e2d2a812e67d
|
[
"MIT"
] | null | null | null |
bank2ynab/b2y_utilities.py
|
jtpedersen/bank2ynab
|
407b10acab6bf181d0da245aa179e2d2a812e67d
|
[
"MIT"
] | null | null | null |
import os
import configparser
import chardet
import logging
import codecs
import csv
# Generic utilities
def get_configs():
""" Retrieve all configuration parameters."""
# TODO - fix path for these
path = os.path.realpath(__file__)
parent_dir = os.path.dirname(path)
project_dir = os.path.dirname(parent_dir)
conf_files = [
os.path.join(project_dir, "bank2ynab.conf"),
os.path.join(project_dir, "user_configuration.conf"),
]
try:
if not os.path.exists(conf_files[0]):
raise FileNotFoundError
except FileNotFoundError:
s = "Configuration file not found: {}".format(conf_files[0])
logging.error(s)
raise FileNotFoundError(s)
else:
config = configparser.RawConfigParser()
config.read(conf_files, encoding="utf-8")
return config
def fix_conf_params(conf_obj, section_name):
"""from a ConfigParser object, return a dictionary of all parameters
for a given section in the expected format.
Because ConfigParser defaults to values under [DEFAULT] if present, these
values should always appear unless the file is really bad.
:param configparser_object: ConfigParser instance
:param section_name: string of section name in config file
(e.g. "MyBank" matches "[MyBank]" in file)
:return: dict with all parameters
"""
config = {
"input_columns": ["Input Columns", False, ","],
"output_columns": ["Output Columns", False, ","],
"input_filename": ["Source Filename Pattern", False, ""],
"path": ["Source Path", False, ""],
"ext": ["Source Filename Extension", False, ""],
"regex": ["Use Regex For Filename", True, ""],
"fixed_prefix": ["Output Filename Prefix", False, ""],
"input_delimiter": ["Source CSV Delimiter", False, ""],
"header_rows": ["Header Rows", False, ""],
"footer_rows": ["Footer Rows", False, ""],
"date_format": ["Date Format", False, ""],
"delete_original": ["Delete Source File", True, ""],
"cd_flags": ["Inflow or Outflow Indicator", False, ","],
"payee_to_memo": ["Use Payee for Memo", True, ""],
"plugin": ["Plugin", False, ""],
"api_token": ["YNAB API Access Token", False, ""],
"api_account": ["YNAB Account ID", False, "|"],
}
for key in config:
config[key] = get_config_line(conf_obj, section_name, config[key])
config["bank_name"] = section_name
# quick n' dirty fix for tabs as delimiters
if config["input_delimiter"] == "\\t":
config["input_delimiter"] = "\t"
return config
def get_config_line(conf_obj, section_name, args):
"""Get parameter for a given section in the expected format."""
param = args[0]
boolean = args[1]
splitter = args[2]
if boolean is True:
line = conf_obj.getboolean(section_name, param)
else:
line = conf_obj.get(section_name, param)
if splitter != "":
line = line.split(splitter)
return line
def find_directory(filepath):
"""finds the downloads folder for the active user if filepath is not set"""
if filepath == "":
if os.name == "nt":
# Windows
try:
import winreg
except ImportError:
import _winreg as winreg
shell_path = (
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion"
"\\Explorer\\Shell Folders"
)
dl_key = "{374DE290-123F-4565-9164-39C4925E467B}"
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, shell_path) as key:
input_dir = winreg.QueryValueEx(key, dl_key)[0]
else:
# Linux, OSX
userhome = os.path.expanduser("~")
input_dir = os.path.join(userhome, "Downloads")
else:
if not os.path.exists(filepath):
s = "Error: Input directory not found: {}"
raise FileNotFoundError(s.format(filepath))
input_dir = filepath
return input_dir
def option_selection(options, msg):
"""
Used to select from a list of options
If only one item in list, selects that by default
Otherwise displays "msg" asking for input selection (integer only)
:param options: list of [name, option] pairs to select from
:param msg: the message to display on the input line
:return option_selected: the selected item from the list
"""
selection = 1
count = len(options)
if count > 1:
index = 0
for option in options:
index += 1
print("| {} | {}".format(index, option[0]))
selection = int_input(1, count, msg)
option_selected = options[selection - 1][1]
return option_selected
def int_input(min, max, msg):
"""
Makes a user select an integer between min & max stated values
:param min: the minimum acceptable integer value
:param max: the maximum acceptable integer value
:param msg: the message to display on the input line
:return user_input: sanitised integer input in acceptable range
"""
while True:
try:
user_input = int(
input("{} (range {} - {}): ".format(msg, min, max))
)
if user_input not in range(min, max + 1):
raise ValueError
break
except ValueError:
logging.info(
"This integer is not in the acceptable range, try again!"
)
return user_input
def string_num_diff(str1, str2):
"""
converts strings to floats and subtracts 1 from 2
also convert output to "milliunits"
"""
try:
num1 = float(str1)
except ValueError:
num1 = 0.0
try:
num2 = float(str2)
except ValueError:
num2 = 0.0
difference = int(1000 * (num2 - num1))
return difference
def detect_encoding(filepath):
"""
Utility to detect file encoding. This is imperfect, but
should work for the most common cases.
:param filepath: string path to a given file
:return: encoding alias that can be used with open()
"""
# First try to guess the encoding with chardet. Take it if the
# confidence is >50% (randomly chosen)
with open(filepath, "rb") as f:
file_content = f.read()
rslt = chardet.detect(file_content)
confidence, encoding = rslt["confidence"], rslt["encoding"]
if confidence > 0.5:
logging.info(
"Using encoding {} with confidence {}".format(
encoding, confidence
)
)
return encoding
# because some encodings will happily encode anything even if wrong,
# keeping the most common near the top should make it more likely that
# we're doing the right thing.
encodings = [
"ascii",
"utf-8",
"utf-16",
"cp1251",
"utf_32",
"utf_32_be",
"utf_32_le",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8_sig",
"cp850",
"cp852",
"latin_1",
"big5",
"big5hkscs",
"cp037",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"euc_jp",
"euc_jis_2004",
"euc_jisx0213",
"euc_kr",
"gb2312",
"gbk",
"gb18030",
"hz",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"latin_1",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"johab",
"koi8_r",
"koi8_u",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"ptcp154",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
]
result = None
error = (ValueError, UnicodeError, UnicodeDecodeError, UnicodeEncodeError)
for enc in encodings:
try:
with codecs.open(filepath, "r", encoding=enc) as f:
for line in f:
line.encode("utf-8")
return enc
except error:
continue
return result
# classes dealing with input and output charsets
class EncodingFileContext(object):
""" ContextManager class for common operations on files"""
def __init__(self, file_path, **kwds):
self.file_path = os.path.abspath(file_path)
self.stream = None
self.csv_object = None
self.params = kwds
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
# cleanup
del self.csv_object
if self.stream is not None:
self.stream.close()
if exc_type is not None:
# this signals not to suppress any exception
return False
class EncodingCsvReader(EncodingFileContext):
""" context manager returning a csv.Reader-compatible object"""
def __enter__(self):
encoding = detect_encoding(self.file_path)
self.stream = open(self.file_path, encoding=encoding)
self.csv_object = csv.reader(self.stream, **self.params)
return self.csv_object
class EncodingCsvWriter(EncodingFileContext):
"""context manager returning a csv.Writer-compatible object
regardless of Python version"""
def __enter__(self):
self.stream = open(self.file_path, "w", encoding="utf-8", newline="")
self.csv_object = csv.writer(self.stream, **self.params)
return self.csv_object
# -- end of utilities
| 29.22067
| 79
| 0.570118
|
f0c26255459d57e348a3a962afcfa98534f42cdc
| 5,719
|
py
|
Python
|
wgdi/dotplot.py
|
xuzhougeng/wgdi
|
c1dcea0ed48860b4011dea274908a48277a26318
|
[
"BSD-2-Clause"
] | null | null | null |
wgdi/dotplot.py
|
xuzhougeng/wgdi
|
c1dcea0ed48860b4011dea274908a48277a26318
|
[
"BSD-2-Clause"
] | null | null | null |
wgdi/dotplot.py
|
xuzhougeng/wgdi
|
c1dcea0ed48860b4011dea274908a48277a26318
|
[
"BSD-2-Clause"
] | null | null | null |
import re
import sys
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import wgdi.base as base
class dotplot():
def __init__(self, options):
self.multiple = 1
self.score = 100
self.evalue = 1e-5
self.repeat_number = 20
self.markersize = 0.5
self.figsize = 'default'
self.position = 'order'
self.ancestor_top = None
self.ancestor_left = None
self.blast_reverse = 'False'
for k, v in options:
setattr(self, str(k), v)
print(k, ' = ', v)
if self.ancestor_top == 'none' or self.ancestor_top == '':
self.ancestor_top = None
if self.ancestor_left == 'none' or self.ancestor_left == '':
self.ancestor_left = None
def pair_positon(self, blast, gff1, gff2, rednum, repeat_number):
blast['color'] = ''
blast['loc1'] = blast[0].map(gff1['loc'])
blast['loc2'] = blast[1].map(gff2['loc'])
bluenum = 5+rednum
index = [group.sort_values(by=[11], ascending=[False])[:repeat_number].index.tolist()
for name, group in blast.groupby([0])]
reddata = np.array([k[:rednum] for k in index], dtype=object)
bluedata = np.array([k[rednum:bluenum] for k in index], dtype=object)
graydata = np.array([k[bluenum:repeat_number] for k in index], dtype=object)
if len(reddata):
redindex = np.concatenate(reddata)
else:
redindex = []
if len(bluedata):
blueindex = np.concatenate(bluedata)
else:
blueindex = []
if len(graydata):
grayindex = np.concatenate(graydata)
else:
grayindex = []
blast.loc[redindex, 'color'] = 'red'
blast.loc[blueindex, 'color'] = 'blue'
blast.loc[grayindex, 'color'] = 'gray'
return blast[blast['color'].str.contains('\w')]
def run(self):
length = 1
axis = [0, 1, 1, 0]
left, right, top, bottom = 0.07, 0.97, 0.93, 0.03
lens1 = base.newlens(self.lens1, self.position)
lens2 = base.newlens(self.lens2, self.position)
step1 = 1 / float(lens1.sum())
step2 = 1 / float(lens2.sum())
if self.ancestor_left != None:
axis[0] = -0.02
lens_ancestor_left = pd.read_csv(
self.ancestor_left, sep="\t", header=None)
lens_ancestor_left[0] = lens_ancestor_left[0].astype(str)
lens_ancestor_left[3] = lens_ancestor_left[3].astype(str)
lens_ancestor_left = lens_ancestor_left[lens_ancestor_left[0].isin(
lens1.index)]
if self.ancestor_top != None:
axis[3] = -0.02
lens_ancestor_top = pd.read_csv(
self.ancestor_top, sep="\t", header=None)
lens_ancestor_top[0] = lens_ancestor_top[0].astype(str)
lens_ancestor_top[3] = lens_ancestor_top[3].astype(str)
lens_ancestor_top = lens_ancestor_top[lens_ancestor_top[0].isin(
lens2.index)]
if re.search('\d', self.figsize):
self.figsize = [float(k) for k in self.figsize.split(',')]
else:
self.figsize = np.array(
[1, float(lens1.sum())/float(lens2.sum())])*10
plt.rcParams['ytick.major.pad'] = 0
fig, ax = plt.subplots(figsize=self.figsize)
ax.xaxis.set_ticks_position('top')
base.dotplot_frame(fig, ax, lens1, lens2, step1, step2,
self.genome1_name, self.genome2_name, [axis[0], axis[3]])
gff1 = base.newgff(self.gff1)
gff2 = base.newgff(self.gff2)
gff1 = base.gene_location(gff1, lens1, step1, self.position)
gff2 = base.gene_location(gff2, lens2, step2, self.position)
if self.ancestor_top != None:
top = top
self.ancestor_posion(ax, gff2, lens_ancestor_top, 'top')
if self.ancestor_left != None:
left = left
self.ancestor_posion(ax, gff1, lens_ancestor_left, 'left')
blast = base.newblast(self.blast, int(self.score),
float(self.evalue), gff1, gff2, self.blast_reverse)
df = self.pair_positon(blast, gff1, gff2,
int(self.multiple), int(self.repeat_number))
ax.scatter(df['loc2'], df['loc1'], s=float(self.markersize), c=df['color'],
alpha=0.5, edgecolors=None, linewidths=0, marker='o')
ax.axis(axis)
plt.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
plt.savefig(self.savefig, dpi=500)
plt.show()
sys.exit(0)
def Rectangle(self, ax, loc, heigt, width, color, alpha):
p = mpatches.Rectangle(
loc, width, heigt, edgecolor=None, facecolor=color, alpha=alpha)
ax.add_patch(p)
def ancestor_posion(self, ax, gff, lens, mark):
for index, row in lens.iterrows():
loc1 = gff[(gff['chr'] == row[0]) & (
gff['order'] == int(row[1]))].index
loc2 = gff[(gff['chr'] == row[0]) & (
gff['order'] == int(row[2]))].index
loc1, loc2 = gff.loc[[loc1[0], loc2[0]], 'loc']
if mark == 'top':
width = abs(loc1-loc2)
loc = [min(loc1, loc2), 0]
height = -0.02
self.Rectangle(ax, loc, height, width, row[3], 1)
if mark == 'left':
height = abs(loc1-loc2)
loc = [-0.02, min(loc1, loc2), ]
width = 0.02
self.Rectangle(ax, loc, height, width, row[3], 1)
return None
| 41.744526
| 93
| 0.553768
|
9cb965ed410135a4a71957c22b53158b5d29d1dd
| 1,217
|
py
|
Python
|
PolicyGradient/DDPG/pendulum/tensorflow/main_tf.py
|
SuperSaiyan-God/Reinforcement-Learning
|
b43a2997e28ec3bf437c37d060637f6deecf89c6
|
[
"MIT"
] | null | null | null |
PolicyGradient/DDPG/pendulum/tensorflow/main_tf.py
|
SuperSaiyan-God/Reinforcement-Learning
|
b43a2997e28ec3bf437c37d060637f6deecf89c6
|
[
"MIT"
] | null | null | null |
PolicyGradient/DDPG/pendulum/tensorflow/main_tf.py
|
SuperSaiyan-God/Reinforcement-Learning
|
b43a2997e28ec3bf437c37d060637f6deecf89c6
|
[
"MIT"
] | null | null | null |
import os
import gym
import numpy as np
from ddpg_orig_tf import Agent
from utils import plotLearning
# Uncomment the lines below to specify which gpu to run on
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if __name__ == '__main__':
env = gym.make('Pendulum-v0')
agent = Agent(alpha=0.00005, beta=0.0005, input_dims=[3], tau=0.001,
env=env, batch_size=64, layer1_size=800, layer2_size=600,
n_actions=1)
np.random.seed(0)
score_history = []
for i in range(1000):
obs = env.reset()
done = False
score = 0
while not done:
act = agent.choose_action(obs)
new_state, reward, done, info = env.step(act)
agent.remember(obs, act, reward, new_state, int(done))
agent.learn()
score += reward
obs = new_state
#env.render()
score_history.append(score)
print('episode ', i, 'score %.2f' % score,
'trailing 100 games avg %.3f' % np.mean(score_history[-100:]))
filename = 'Pendulum-alpha00005-beta0005-800-600-optimized.png'
plotLearning(score_history, filename, window=100)
| 33.805556
| 76
| 0.612161
|
8b52c06f50ce0d4dd7979c26ea50fe435a9b1e43
| 2,448
|
py
|
Python
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_Y.py
|
pkienzle/scipy
|
424bfb976ada4164a007a0b62bc62ae67629c606
|
[
"BSD-3-Clause"
] | 1
|
2022-03-16T21:20:15.000Z
|
2022-03-16T21:20:15.000Z
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_Y.py
|
pkienzle/scipy
|
424bfb976ada4164a007a0b62bc62ae67629c606
|
[
"BSD-3-Clause"
] | null | null | null |
benchmarks/benchmarks/go_benchmark_functions/go_funcs_Y.py
|
pkienzle/scipy
|
424bfb976ada4164a007a0b62bc62ae67629c606
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from numpy import abs, sum, cos, pi
from .go_benchmark import Benchmark
class YaoLiu04(Benchmark):
r"""
Yao-Liu 4 objective function.
This class defines the Yao-Liu function 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu04}}(x) = {max}_i \left\{ \left | x_i \right | ,
1 \leq i \leq n \right\}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1201. Gavana code and documentation differ.
max(abs(x)) != abs(max(x))
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return abs(x).max()
class YaoLiu09(Benchmark):
r"""
Yao-Liu 9 objective function.
This class defines the Yao-Liu [1]_ function 9 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu09}}(x) = \sum_{i=1}^n \left [ x_i^2
- 10 \cos(2 \pi x_i ) + 10 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Yao-Liu Fast Evolutionary programming is the the original ref.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 2.0 - 10.0 * cos(2 * pi * x) + 10)
| 28.8
| 84
| 0.58701
|
cd70cd58cc872c05aefc953a26bd48588a4f5871
| 178,050
|
py
|
Python
|
src/transformers/__init__.py
|
manuelciosici/transformers
|
c33f6046c3dab8f41bedf893404e6469dea3bce8
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/__init__.py
|
manuelciosici/transformers
|
c33f6046c3dab8f41bedf893404e6469dea3bce8
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/__init__.py
|
manuelciosici/transformers
|
c33f6046c3dab8f41bedf893404e6469dea3bce8
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# When adding a new object to this init, remember to add it twice: once inside the `_import_structure` dictionary and
# once inside the `if TYPE_CHECKING` branch. The `TYPE_CHECKING` should have import statements as usual, but they are
# only there for type checking. The `_import_structure` is a dictionary submodule to list of object names, and is used
# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names
# in the namespace without actually importing anything (and especially none of the backends).
__version__ = "4.19.0.dev0"
from typing import TYPE_CHECKING
# Check the dependencies satisfy the minimal versions required.
from . import dependency_versions_check
from .utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_scatter_available,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
logging,
)
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Base objects, independent of any specific backend
_import_structure = {
"benchmark": [],
"commands": [],
"configuration_utils": ["PretrainedConfig"],
"convert_graph_to_onnx": [],
"convert_slow_tokenizers_checkpoints_to_fast": [],
"convert_tf_hub_seq_to_seq_bert_to_pytorch": [],
"data": [
"DataProcessor",
"InputExample",
"InputFeatures",
"SingleSentenceClassificationProcessor",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
],
"data.data_collator": [
"DataCollator",
"DataCollatorForLanguageModeling",
"DataCollatorForPermutationLanguageModeling",
"DataCollatorForSeq2Seq",
"DataCollatorForSOP",
"DataCollatorForTokenClassification",
"DataCollatorForWholeWordMask",
"DataCollatorWithPadding",
"DefaultDataCollator",
"default_data_collator",
],
"data.metrics": [],
"data.processors": [],
"debug_utils": [],
"dependency_versions_check": [],
"dependency_versions_table": [],
"dynamic_module_utils": [],
"feature_extraction_sequence_utils": ["SequenceFeatureExtractor"],
"feature_extraction_utils": ["BatchFeature", "FeatureExtractionMixin"],
"file_utils": [],
"hf_argparser": ["HfArgumentParser"],
"integrations": [
"is_comet_available",
"is_optuna_available",
"is_ray_available",
"is_ray_tune_available",
"is_sigopt_available",
"is_tensorboard_available",
"is_wandb_available",
],
"modelcard": ["ModelCard"],
"modeling_tf_pytorch_utils": [
"convert_tf_weight_name_to_pt_weight_name",
"load_pytorch_checkpoint_in_tf2_model",
"load_pytorch_model_in_tf2_model",
"load_pytorch_weights_in_tf2_model",
"load_tf2_checkpoint_in_pytorch_model",
"load_tf2_model_in_pytorch_model",
"load_tf2_weights_in_pytorch_model",
],
"models": [],
# Models
"models.albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"],
"models.auto": [
"ALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CONFIG_MAPPING",
"FEATURE_EXTRACTOR_MAPPING",
"MODEL_NAMES_MAPPING",
"PROCESSOR_MAPPING",
"TOKENIZER_MAPPING",
"AutoConfig",
"AutoFeatureExtractor",
"AutoProcessor",
"AutoTokenizer",
],
"models.bart": ["BartConfig", "BartTokenizer"],
"models.barthez": [],
"models.bartpho": [],
"models.beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig"],
"models.bert": [
"BERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BasicTokenizer",
"BertConfig",
"BertTokenizer",
"WordpieceTokenizer",
],
"models.bert_generation": ["BertGenerationConfig"],
"models.bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"],
"models.bertweet": ["BertweetTokenizer"],
"models.big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig"],
"models.bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
],
"models.blenderbot": ["BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotTokenizer"],
"models.blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallTokenizer",
],
"models.bort": [],
"models.byt5": ["ByT5Tokenizer"],
"models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"],
"models.canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig", "CanineTokenizer"],
"models.clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPTextConfig",
"CLIPTokenizer",
"CLIPVisionConfig",
],
"models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"],
"models.convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig"],
"models.cpm": [],
"models.ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer"],
"models.data2vec": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecAudioConfig",
"Data2VecTextConfig",
"Data2VecVisionConfig",
],
"models.deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer"],
"models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"],
"models.decision_transformer": ["DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "DecisionTransformerConfig"],
"models.deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig"],
"models.detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig"],
"models.dialogpt": [],
"models.distilbert": ["DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertTokenizer"],
"models.dit": [],
"models.dpr": [
"DPR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DPRConfig",
"DPRContextEncoderTokenizer",
"DPRQuestionEncoderTokenizer",
"DPRReaderOutput",
"DPRReaderTokenizer",
],
"models.dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"],
"models.electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraTokenizer"],
"models.encoder_decoder": ["EncoderDecoderConfig"],
"models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"],
"models.fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"],
"models.fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig", "FSMTTokenizer"],
"models.funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig", "FunnelTokenizer"],
"models.glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"],
"models.gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2Tokenizer"],
"models.gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig"],
"models.gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig"],
"models.herbert": ["HerbertTokenizer"],
"models.hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"],
"models.ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig"],
"models.imagegpt": ["IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ImageGPTConfig"],
"models.layoutlm": ["LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMTokenizer"],
"models.layoutlmv2": [
"LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv2Config",
"LayoutLMv2FeatureExtractor",
"LayoutLMv2Processor",
"LayoutLMv2Tokenizer",
],
"models.layoutxlm": ["LayoutXLMProcessor"],
"models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"],
"models.longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer"],
"models.luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig", "LukeTokenizer"],
"models.lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer"],
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"],
"models.marian": ["MarianConfig"],
"models.maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"models.mbart": ["MBartConfig"],
"models.mbart50": [],
"models.megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
"models.megatron_gpt2": [],
"models.mluke": [],
"models.mmbt": ["MMBTConfig"],
"models.mobilebert": ["MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertTokenizer"],
"models.mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig", "MPNetTokenizer"],
"models.mt5": ["MT5Config"],
"models.nystromformer": [
"NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NystromformerConfig",
],
"models.openai": ["OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig", "OpenAIGPTTokenizer"],
"models.pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig", "PegasusTokenizer"],
"models.perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverTokenizer"],
"models.phobert": ["PhobertTokenizer"],
"models.plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"],
"models.poolformer": ["POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig"],
"models.prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig", "ProphetNetTokenizer"],
"models.qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"],
"models.rag": ["RagConfig", "RagRetriever", "RagTokenizer"],
"models.realm": ["REALM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RealmConfig", "RealmTokenizer"],
"models.reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"],
"models.regnet": ["REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "RegNetConfig"],
"models.rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig"],
"models.resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig"],
"models.retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig", "RetriBertTokenizer"],
"models.roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaTokenizer"],
"models.roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerTokenizer"],
"models.segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig"],
"models.sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"],
"models.sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"],
"models.speech_encoder_decoder": ["SpeechEncoderDecoderConfig"],
"models.speech_to_text": [
"SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Speech2TextConfig",
],
"models.speech_to_text_2": [
"SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Speech2Text2Config",
"Speech2Text2Processor",
"Speech2Text2Tokenizer",
],
"models.splinter": ["SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SplinterConfig", "SplinterTokenizer"],
"models.squeezebert": ["SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertTokenizer"],
"models.swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig"],
"models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"],
"models.tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig", "TapasTokenizer"],
"models.tapex": ["TapexTokenizer"],
"models.transfo_xl": [
"TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TransfoXLConfig",
"TransfoXLCorpus",
"TransfoXLTokenizer",
],
"models.trocr": [
"TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrOCRConfig",
"TrOCRProcessor",
],
"models.unispeech": [
"UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP",
"UniSpeechConfig",
],
"models.unispeech_sat": [
"UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"UniSpeechSatConfig",
],
"models.van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"],
"models.vilt": ["VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig", "ViltFeatureExtractor", "ViltProcessor"],
"models.vision_encoder_decoder": ["VisionEncoderDecoderConfig"],
"models.vision_text_dual_encoder": ["VisionTextDualEncoderConfig", "VisionTextDualEncoderProcessor"],
"models.visual_bert": ["VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VisualBertConfig"],
"models.vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"],
"models.vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"],
"models.wav2vec2": [
"WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Wav2Vec2Config",
"Wav2Vec2CTCTokenizer",
"Wav2Vec2FeatureExtractor",
"Wav2Vec2Processor",
"Wav2Vec2Tokenizer",
],
"models.wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"],
"models.wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"],
"models.wavlm": [
"WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"WavLMConfig",
],
"models.xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"],
"models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"],
"models.xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"],
"models.xlm_roberta": ["XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig"],
"models.xlm_roberta_xl": ["XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig"],
"models.xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"],
"models.yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig"],
"models.yoso": ["YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP", "YosoConfig"],
"onnx": [],
"pipelines": [
"AudioClassificationPipeline",
"AutomaticSpeechRecognitionPipeline",
"Conversation",
"ConversationalPipeline",
"CsvPipelineDataFormat",
"FeatureExtractionPipeline",
"FillMaskPipeline",
"ImageClassificationPipeline",
"ImageSegmentationPipeline",
"JsonPipelineDataFormat",
"NerPipeline",
"ObjectDetectionPipeline",
"PipedPipelineDataFormat",
"Pipeline",
"PipelineDataFormat",
"QuestionAnsweringPipeline",
"SummarizationPipeline",
"TableQuestionAnsweringPipeline",
"Text2TextGenerationPipeline",
"TextClassificationPipeline",
"TextGenerationPipeline",
"TokenClassificationPipeline",
"TranslationPipeline",
"ZeroShotClassificationPipeline",
"ZeroShotImageClassificationPipeline",
"pipeline",
],
"processing_utils": ["ProcessorMixin"],
"testing_utils": [],
"tokenization_utils": ["PreTrainedTokenizer"],
"tokenization_utils_base": [
"AddedToken",
"BatchEncoding",
"CharSpan",
"PreTrainedTokenizerBase",
"SpecialTokensMixin",
"TokenSpan",
],
"trainer_callback": [
"DefaultFlowCallback",
"EarlyStoppingCallback",
"PrinterCallback",
"ProgressCallback",
"TrainerCallback",
"TrainerControl",
"TrainerState",
],
"trainer_utils": ["EvalPrediction", "IntervalStrategy", "SchedulerType", "enable_full_determinism", "set_seed"],
"training_args": ["TrainingArguments"],
"training_args_seq2seq": ["Seq2SeqTrainingArguments"],
"training_args_tf": ["TFTrainingArguments"],
"utils": [
"CONFIG_NAME",
"MODEL_CARD_NAME",
"PYTORCH_PRETRAINED_BERT_CACHE",
"PYTORCH_TRANSFORMERS_CACHE",
"SPIECE_UNDERLINE",
"TF2_WEIGHTS_NAME",
"TF_WEIGHTS_NAME",
"TRANSFORMERS_CACHE",
"WEIGHTS_NAME",
"TensorType",
"add_end_docstrings",
"add_start_docstrings",
"cached_path",
"is_apex_available",
"is_datasets_available",
"is_faiss_available",
"is_flax_available",
"is_phonemizer_available",
"is_psutil_available",
"is_py3nvml_available",
"is_pyctcdecode_available",
"is_scipy_available",
"is_sentencepiece_available",
"is_sklearn_available",
"is_speech_available",
"is_tf_available",
"is_timm_available",
"is_tokenizers_available",
"is_torch_available",
"is_torch_tpu_available",
"is_vision_available",
"logging",
],
}
# sentencepiece-backed objects
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_sentencepiece_objects
_import_structure["utils.dummy_sentencepiece_objects"] = [
name for name in dir(dummy_sentencepiece_objects) if not name.startswith("_")
]
else:
_import_structure["models.albert"].append("AlbertTokenizer")
_import_structure["models.barthez"].append("BarthezTokenizer")
_import_structure["models.bartpho"].append("BartphoTokenizer")
_import_structure["models.bert_generation"].append("BertGenerationTokenizer")
_import_structure["models.big_bird"].append("BigBirdTokenizer")
_import_structure["models.camembert"].append("CamembertTokenizer")
_import_structure["models.cpm"].append("CpmTokenizer")
_import_structure["models.deberta_v2"].append("DebertaV2Tokenizer")
_import_structure["models.fnet"].append("FNetTokenizer")
_import_structure["models.layoutxlm"].append("LayoutXLMTokenizer")
_import_structure["models.m2m_100"].append("M2M100Tokenizer")
_import_structure["models.marian"].append("MarianTokenizer")
_import_structure["models.mbart"].append("MBartTokenizer")
_import_structure["models.mbart50"].append("MBart50Tokenizer")
_import_structure["models.mluke"].append("MLukeTokenizer")
_import_structure["models.mt5"].append("MT5Tokenizer")
_import_structure["models.pegasus"].append("PegasusTokenizer")
_import_structure["models.plbart"].append("PLBartTokenizer")
_import_structure["models.reformer"].append("ReformerTokenizer")
_import_structure["models.rembert"].append("RemBertTokenizer")
_import_structure["models.speech_to_text"].append("Speech2TextTokenizer")
_import_structure["models.t5"].append("T5Tokenizer")
_import_structure["models.xglm"].append("XGLMTokenizer")
_import_structure["models.xlm_prophetnet"].append("XLMProphetNetTokenizer")
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizer")
_import_structure["models.xlnet"].append("XLNetTokenizer")
# tokenizers-backed objects
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_tokenizers_objects
_import_structure["utils.dummy_tokenizers_objects"] = [
name for name in dir(dummy_tokenizers_objects) if not name.startswith("_")
]
else:
# Fast tokenizers structure
_import_structure["models.albert"].append("AlbertTokenizerFast")
_import_structure["models.bart"].append("BartTokenizerFast")
_import_structure["models.barthez"].append("BarthezTokenizerFast")
_import_structure["models.bert"].append("BertTokenizerFast")
_import_structure["models.big_bird"].append("BigBirdTokenizerFast")
_import_structure["models.blenderbot"].append("BlenderbotTokenizerFast")
_import_structure["models.blenderbot_small"].append("BlenderbotSmallTokenizerFast")
_import_structure["models.camembert"].append("CamembertTokenizerFast")
_import_structure["models.clip"].append("CLIPTokenizerFast")
_import_structure["models.convbert"].append("ConvBertTokenizerFast")
_import_structure["models.cpm"].append("CpmTokenizerFast")
_import_structure["models.deberta"].append("DebertaTokenizerFast")
_import_structure["models.deberta_v2"].append("DebertaV2TokenizerFast")
_import_structure["models.distilbert"].append("DistilBertTokenizerFast")
_import_structure["models.dpr"].extend(
["DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast"]
)
_import_structure["models.electra"].append("ElectraTokenizerFast")
_import_structure["models.fnet"].append("FNetTokenizerFast")
_import_structure["models.funnel"].append("FunnelTokenizerFast")
_import_structure["models.gpt2"].append("GPT2TokenizerFast")
_import_structure["models.herbert"].append("HerbertTokenizerFast")
_import_structure["models.layoutlm"].append("LayoutLMTokenizerFast")
_import_structure["models.layoutlmv2"].append("LayoutLMv2TokenizerFast")
_import_structure["models.layoutxlm"].append("LayoutXLMTokenizerFast")
_import_structure["models.led"].append("LEDTokenizerFast")
_import_structure["models.longformer"].append("LongformerTokenizerFast")
_import_structure["models.lxmert"].append("LxmertTokenizerFast")
_import_structure["models.mbart"].append("MBartTokenizerFast")
_import_structure["models.mbart50"].append("MBart50TokenizerFast")
_import_structure["models.mobilebert"].append("MobileBertTokenizerFast")
_import_structure["models.mpnet"].append("MPNetTokenizerFast")
_import_structure["models.mt5"].append("MT5TokenizerFast")
_import_structure["models.openai"].append("OpenAIGPTTokenizerFast")
_import_structure["models.pegasus"].append("PegasusTokenizerFast")
_import_structure["models.realm"].append("RealmTokenizerFast")
_import_structure["models.reformer"].append("ReformerTokenizerFast")
_import_structure["models.rembert"].append("RemBertTokenizerFast")
_import_structure["models.retribert"].append("RetriBertTokenizerFast")
_import_structure["models.roberta"].append("RobertaTokenizerFast")
_import_structure["models.roformer"].append("RoFormerTokenizerFast")
_import_structure["models.splinter"].append("SplinterTokenizerFast")
_import_structure["models.squeezebert"].append("SqueezeBertTokenizerFast")
_import_structure["models.t5"].append("T5TokenizerFast")
_import_structure["models.xglm"].append("XGLMTokenizerFast")
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizerFast")
_import_structure["models.xlnet"].append("XLNetTokenizerFast")
_import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"]
try:
if not (is_sentencepiece_available() and is_tokenizers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_sentencepiece_and_tokenizers_objects
_import_structure["utils.dummy_sentencepiece_and_tokenizers_objects"] = [
name for name in dir(dummy_sentencepiece_and_tokenizers_objects) if not name.startswith("_")
]
else:
_import_structure["convert_slow_tokenizer"] = ["SLOW_TO_FAST_CONVERTERS", "convert_slow_tokenizer"]
# Speech-specific objects
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_speech_objects
_import_structure["utils.dummy_speech_objects"] = [
name for name in dir(dummy_speech_objects) if not name.startswith("_")
]
else:
_import_structure["models.speech_to_text"].append("Speech2TextFeatureExtractor")
try:
if not (is_sentencepiece_available() and is_speech_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_sentencepiece_and_speech_objects
_import_structure["utils.dummy_sentencepiece_and_speech_objects"] = [
name for name in dir(dummy_sentencepiece_and_speech_objects) if not name.startswith("_")
]
else:
_import_structure["models.speech_to_text"].append("Speech2TextProcessor")
# Vision-specific objects
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_vision_objects
_import_structure["utils.dummy_vision_objects"] = [
name for name in dir(dummy_vision_objects) if not name.startswith("_")
]
else:
_import_structure["image_utils"] = ["ImageFeatureExtractionMixin"]
_import_structure["models.beit"].append("BeitFeatureExtractor")
_import_structure["models.clip"].append("CLIPFeatureExtractor")
_import_structure["models.clip"].append("CLIPProcessor")
_import_structure["models.convnext"].append("ConvNextFeatureExtractor")
_import_structure["models.deit"].append("DeiTFeatureExtractor")
_import_structure["models.detr"].append("DetrFeatureExtractor")
_import_structure["models.dpt"].append("DPTFeatureExtractor")
_import_structure["models.glpn"].append("GLPNFeatureExtractor")
_import_structure["models.imagegpt"].append("ImageGPTFeatureExtractor")
_import_structure["models.layoutlmv2"].append("LayoutLMv2FeatureExtractor")
_import_structure["models.layoutlmv2"].append("LayoutLMv2Processor")
_import_structure["models.layoutxlm"].append("LayoutXLMProcessor")
_import_structure["models.maskformer"].append("MaskFormerFeatureExtractor")
_import_structure["models.perceiver"].append("PerceiverFeatureExtractor")
_import_structure["models.poolformer"].append("PoolFormerFeatureExtractor")
_import_structure["models.segformer"].append("SegformerFeatureExtractor")
_import_structure["models.vilt"].append("ViltFeatureExtractor")
_import_structure["models.vilt"].append("ViltProcessor")
_import_structure["models.vit"].append("ViTFeatureExtractor")
_import_structure["models.yolos"].append("YolosFeatureExtractor")
# Timm-backed objects
try:
if not (is_timm_available() and is_vision_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_timm_objects
_import_structure["utils.dummy_timm_objects"] = [
name for name in dir(dummy_timm_objects) if not name.startswith("_")
]
else:
_import_structure["models.detr"].extend(
[
"DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"DetrForObjectDetection",
"DetrForSegmentation",
"DetrModel",
"DetrPreTrainedModel",
]
)
try:
if not is_scatter_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_scatter_objects
_import_structure["utils.dummy_scatter_objects"] = [
name for name in dir(dummy_scatter_objects) if not name.startswith("_")
]
else:
_import_structure["models.tapas"].extend(
[
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
)
# PyTorch-backed objects
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_pt_objects
_import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
else:
_import_structure["activations"] = []
_import_structure["benchmark.benchmark"] = ["PyTorchBenchmark"]
_import_structure["benchmark.benchmark_args"] = ["PyTorchBenchmarkArguments"]
_import_structure["data.datasets"] = [
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"SquadDataset",
"SquadDataTrainingArguments",
"TextDataset",
"TextDatasetForNextSentencePrediction",
]
_import_structure["deepspeed"] = []
_import_structure["generation_beam_constraints"] = [
"Constraint",
"ConstraintListState",
"DisjunctiveConstraint",
"PhrasalConstraint",
]
_import_structure["generation_beam_search"] = ["BeamScorer", "BeamSearchScorer", "ConstrainedBeamSearchScorer"]
_import_structure["generation_logits_process"] = [
"ForcedBOSTokenLogitsProcessor",
"ForcedEOSTokenLogitsProcessor",
"HammingDiversityLogitsProcessor",
"InfNanRemoveLogitsProcessor",
"LogitsProcessor",
"LogitsProcessorList",
"LogitsWarper",
"MinLengthLogitsProcessor",
"NoBadWordsLogitsProcessor",
"NoRepeatNGramLogitsProcessor",
"PrefixConstrainedLogitsProcessor",
"RepetitionPenaltyLogitsProcessor",
"TemperatureLogitsWarper",
"TopKLogitsWarper",
"TopPLogitsWarper",
]
_import_structure["generation_stopping_criteria"] = [
"MaxLengthCriteria",
"MaxTimeCriteria",
"StoppingCriteria",
"StoppingCriteriaList",
]
_import_structure["generation_utils"] = ["top_k_top_p_filtering"]
_import_structure["modeling_outputs"] = []
_import_structure["modeling_utils"] = ["PreTrainedModel"]
# PyTorch models structure
_import_structure["models.albert"].extend(
[
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
)
_import_structure["models.auto"].extend(
[
"MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
"MODEL_FOR_AUDIO_XVECTOR_MAPPING",
"MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING",
"MODEL_FOR_CAUSAL_LM_MAPPING",
"MODEL_FOR_CTC_MAPPING",
"MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
"MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
"MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
"MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
"MODEL_FOR_MASKED_LM_MAPPING",
"MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"MODEL_FOR_OBJECT_DETECTION_MAPPING",
"MODEL_FOR_PRETRAINING_MAPPING",
"MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
"MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
"MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"MODEL_FOR_VISION_2_SEQ_MAPPING",
"MODEL_MAPPING",
"MODEL_WITH_LM_HEAD_MAPPING",
"AutoModel",
"AutoModelForAudioClassification",
"AutoModelForAudioFrameClassification",
"AutoModelForAudioXVector",
"AutoModelForCausalLM",
"AutoModelForCTC",
"AutoModelForImageClassification",
"AutoModelForImageSegmentation",
"AutoModelForInstanceSegmentation",
"AutoModelForMaskedImageModeling",
"AutoModelForMaskedLM",
"AutoModelForMultipleChoice",
"AutoModelForNextSentencePrediction",
"AutoModelForObjectDetection",
"AutoModelForPreTraining",
"AutoModelForQuestionAnswering",
"AutoModelForSemanticSegmentation",
"AutoModelForSeq2SeqLM",
"AutoModelForSequenceClassification",
"AutoModelForSpeechSeq2Seq",
"AutoModelForTableQuestionAnswering",
"AutoModelForTokenClassification",
"AutoModelForVision2Seq",
"AutoModelWithLMHead",
]
)
_import_structure["models.bart"].extend(
[
"BART_PRETRAINED_MODEL_ARCHIVE_LIST",
"BartForCausalLM",
"BartForConditionalGeneration",
"BartForQuestionAnswering",
"BartForSequenceClassification",
"BartModel",
"BartPretrainedModel",
"PretrainedBartModel",
]
)
_import_structure["models.beit"].extend(
[
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
)
_import_structure["models.bert"].extend(
[
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
)
_import_structure["models.bert_generation"].extend(
[
"BertGenerationDecoder",
"BertGenerationEncoder",
"BertGenerationPreTrainedModel",
"load_tf_weights_in_bert_generation",
]
)
_import_structure["models.big_bird"].extend(
[
"BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdForCausalLM",
"BigBirdForMaskedLM",
"BigBirdForMultipleChoice",
"BigBirdForPreTraining",
"BigBirdForQuestionAnswering",
"BigBirdForSequenceClassification",
"BigBirdForTokenClassification",
"BigBirdLayer",
"BigBirdModel",
"BigBirdPreTrainedModel",
"load_tf_weights_in_big_bird",
]
)
_import_structure["models.bigbird_pegasus"].extend(
[
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
)
_import_structure["models.blenderbot"].extend(
[
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
)
_import_structure["models.blenderbot_small"].extend(
[
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
)
_import_structure["models.camembert"].extend(
[
"CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CamembertForCausalLM",
"CamembertForMaskedLM",
"CamembertForMultipleChoice",
"CamembertForQuestionAnswering",
"CamembertForSequenceClassification",
"CamembertForTokenClassification",
"CamembertModel",
]
)
_import_structure["models.canine"].extend(
[
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
)
_import_structure["models.clip"].extend(
[
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPVisionModel",
]
)
_import_structure["models.convbert"].extend(
[
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
)
_import_structure["models.convnext"].extend(
[
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
]
)
_import_structure["models.ctrl"].extend(
[
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
)
_import_structure["models.data2vec"].extend(
[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
"Data2VecVisionForImageClassification",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
)
_import_structure["models.deberta"].extend(
[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
)
_import_structure["models.deberta_v2"].extend(
[
"DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaV2ForMaskedLM",
"DebertaV2ForMultipleChoice",
"DebertaV2ForQuestionAnswering",
"DebertaV2ForSequenceClassification",
"DebertaV2ForTokenClassification",
"DebertaV2Model",
"DebertaV2PreTrainedModel",
]
)
_import_structure["models.decision_transformer"].extend(
[
"DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DecisionTransformerGPT2Model",
"DecisionTransformerGPT2PreTrainedModel",
"DecisionTransformerModel",
"DecisionTransformerPreTrainedModel",
]
)
_import_structure["models.deit"].extend(
[
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
)
_import_structure["models.dpr"].extend(
[
"DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPRContextEncoder",
"DPRPretrainedContextEncoder",
"DPRPreTrainedModel",
"DPRPretrainedQuestionEncoder",
"DPRPretrainedReader",
"DPRQuestionEncoder",
"DPRReader",
]
)
_import_structure["models.dpt"].extend(
[
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
)
_import_structure["models.electra"].extend(
[
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
)
_import_structure["models.encoder_decoder"].append("EncoderDecoderModel")
_import_structure["models.flaubert"].extend(
[
"FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaubertForMultipleChoice",
"FlaubertForQuestionAnswering",
"FlaubertForQuestionAnsweringSimple",
"FlaubertForSequenceClassification",
"FlaubertForTokenClassification",
"FlaubertModel",
"FlaubertWithLMHeadModel",
]
)
_import_structure["models.fnet"].extend(
[
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
)
_import_structure["models.fsmt"].extend(["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"])
_import_structure["models.funnel"].extend(
[
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
)
_import_structure["models.glpn"].extend(
[
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNModel",
"GLPNPreTrainedModel",
]
)
_import_structure["models.gpt2"].extend(
[
"GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPT2DoubleHeadsModel",
"GPT2ForSequenceClassification",
"GPT2ForTokenClassification",
"GPT2LMHeadModel",
"GPT2Model",
"GPT2PreTrainedModel",
"load_tf_weights_in_gpt2",
]
)
_import_structure["models.gpt_neo"].extend(
[
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForSequenceClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
)
_import_structure["models.gptj"].extend(
[
"GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTJForCausalLM",
"GPTJForQuestionAnswering",
"GPTJForSequenceClassification",
"GPTJModel",
"GPTJPreTrainedModel",
]
)
_import_structure["models.hubert"].extend(
[
"HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"HubertForCTC",
"HubertForSequenceClassification",
"HubertModel",
"HubertPreTrainedModel",
]
)
_import_structure["models.ibert"].extend(
[
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
)
_import_structure["models.imagegpt"].extend(
[
"IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ImageGPTForCausalImageModeling",
"ImageGPTForImageClassification",
"ImageGPTModel",
"ImageGPTPreTrainedModel",
"load_tf_weights_in_imagegpt",
]
)
_import_structure["models.layoutlm"].extend(
[
"LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMForMaskedLM",
"LayoutLMForSequenceClassification",
"LayoutLMForTokenClassification",
"LayoutLMModel",
"LayoutLMPreTrainedModel",
]
)
_import_structure["models.layoutlmv2"].extend(
[
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
)
_import_structure["models.led"].extend(
[
"LED_PRETRAINED_MODEL_ARCHIVE_LIST",
"LEDForConditionalGeneration",
"LEDForQuestionAnswering",
"LEDForSequenceClassification",
"LEDModel",
"LEDPreTrainedModel",
]
)
_import_structure["models.longformer"].extend(
[
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
)
_import_structure["models.luke"].extend(
[
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
)
_import_structure["models.lxmert"].extend(
[
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
)
_import_structure["models.m2m_100"].extend(
[
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
)
_import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"])
_import_structure["models.maskformer"].extend(
[
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
)
_import_structure["models.mbart"].extend(
[
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
)
_import_structure["models.megatron_bert"].extend(
[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
)
_import_structure["models.mmbt"].extend(["MMBTForClassification", "MMBTModel", "ModalEmbeddings"])
_import_structure["models.mobilebert"].extend(
[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
)
_import_structure["models.mpnet"].extend(
[
"MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"MPNetForMaskedLM",
"MPNetForMultipleChoice",
"MPNetForQuestionAnswering",
"MPNetForSequenceClassification",
"MPNetForTokenClassification",
"MPNetLayer",
"MPNetModel",
"MPNetPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["MT5EncoderModel", "MT5ForConditionalGeneration", "MT5Model"])
_import_structure["models.nystromformer"].extend(
[
"NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"NystromformerForMaskedLM",
"NystromformerForMultipleChoice",
"NystromformerForQuestionAnswering",
"NystromformerForSequenceClassification",
"NystromformerForTokenClassification",
"NystromformerLayer",
"NystromformerModel",
"NystromformerPreTrainedModel",
]
)
_import_structure["models.openai"].extend(
[
"OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OpenAIGPTDoubleHeadsModel",
"OpenAIGPTForSequenceClassification",
"OpenAIGPTLMHeadModel",
"OpenAIGPTModel",
"OpenAIGPTPreTrainedModel",
"load_tf_weights_in_openai_gpt",
]
)
_import_structure["models.pegasus"].extend(
["PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel", "PegasusPreTrainedModel"]
)
_import_structure["models.perceiver"].extend(
[
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
)
_import_structure["models.plbart"].extend(
[
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
)
_import_structure["models.poolformer"].extend(
[
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
)
_import_structure["models.prophetnet"].extend(
[
"PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ProphetNetDecoder",
"ProphetNetEncoder",
"ProphetNetForCausalLM",
"ProphetNetForConditionalGeneration",
"ProphetNetModel",
"ProphetNetPreTrainedModel",
]
)
_import_structure["models.qdqbert"].extend(
[
"QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"QDQBertForMaskedLM",
"QDQBertForMultipleChoice",
"QDQBertForNextSentencePrediction",
"QDQBertForQuestionAnswering",
"QDQBertForSequenceClassification",
"QDQBertForTokenClassification",
"QDQBertLayer",
"QDQBertLMHeadModel",
"QDQBertModel",
"QDQBertPreTrainedModel",
"load_tf_weights_in_qdqbert",
]
)
_import_structure["models.rag"].extend(
["RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration"]
)
_import_structure["models.realm"].extend(
[
"REALM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RealmEmbedder",
"RealmForOpenQA",
"RealmKnowledgeAugEncoder",
"RealmPreTrainedModel",
"RealmReader",
"RealmRetriever",
"RealmScorer",
"load_tf_weights_in_realm",
]
)
_import_structure["models.reformer"].extend(
[
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
)
_import_structure["models.regnet"].extend(
[
"REGNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"RegNetForImageClassification",
"RegNetModel",
"RegNetPreTrainedModel",
]
)
_import_structure["models.rembert"].extend(
[
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
)
_import_structure["models.resnet"].extend(
[
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
]
)
_import_structure["models.retribert"].extend(
["RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RetriBertModel", "RetriBertPreTrainedModel"]
)
_import_structure["models.roberta"].extend(
[
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
)
_import_structure["models.roformer"].extend(
[
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
)
_import_structure["models.segformer"].extend(
[
"SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SegformerDecodeHead",
"SegformerForImageClassification",
"SegformerForSemanticSegmentation",
"SegformerLayer",
"SegformerModel",
"SegformerPreTrainedModel",
]
)
_import_structure["models.sew"].extend(
[
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
)
_import_structure["models.sew_d"].extend(
[
"SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWDForCTC",
"SEWDForSequenceClassification",
"SEWDModel",
"SEWDPreTrainedModel",
]
)
_import_structure["models.speech_encoder_decoder"].extend(["SpeechEncoderDecoderModel"])
_import_structure["models.speech_to_text"].extend(
[
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
)
_import_structure["models.speech_to_text_2"].extend(["Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel"])
_import_structure["models.splinter"].extend(
[
"SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SplinterForQuestionAnswering",
"SplinterLayer",
"SplinterModel",
"SplinterPreTrainedModel",
]
)
_import_structure["models.squeezebert"].extend(
[
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
)
_import_structure["models.swin"].extend(
[
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
]
)
_import_structure["models.t5"].extend(
[
"T5_PRETRAINED_MODEL_ARCHIVE_LIST",
"T5EncoderModel",
"T5ForConditionalGeneration",
"T5Model",
"T5PreTrainedModel",
"load_tf_weights_in_t5",
]
)
_import_structure["models.transfo_xl"].extend(
[
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
)
_import_structure["models.trocr"].extend(
["TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel"]
)
_import_structure["models.unispeech"].extend(
[
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
)
_import_structure["models.unispeech_sat"].extend(
[
"UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechSatForAudioFrameClassification",
"UniSpeechSatForCTC",
"UniSpeechSatForPreTraining",
"UniSpeechSatForSequenceClassification",
"UniSpeechSatForXVector",
"UniSpeechSatModel",
"UniSpeechSatPreTrainedModel",
]
)
_import_structure["models.van"].extend(
[
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
)
_import_structure["models.vilt"].extend(
[
"VILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViltForImageAndTextRetrieval",
"ViltForImagesAndTextClassification",
"ViltForMaskedLM",
"ViltForQuestionAnswering",
"ViltLayer",
"ViltModel",
"ViltPreTrainedModel",
]
)
_import_structure["models.vision_encoder_decoder"].extend(["VisionEncoderDecoderModel"])
_import_structure["models.vision_text_dual_encoder"].extend(["VisionTextDualEncoderModel"])
_import_structure["models.visual_bert"].extend(
[
"VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VisualBertForMultipleChoice",
"VisualBertForPreTraining",
"VisualBertForQuestionAnswering",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertLayer",
"VisualBertModel",
"VisualBertPreTrainedModel",
]
)
_import_structure["models.vit"].extend(
[
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
)
_import_structure["models.vit_mae"].extend(
[
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
)
_import_structure["models.wav2vec2"].extend(
[
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
)
_import_structure["models.wavlm"].extend(
[
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
)
_import_structure["models.xglm"].extend(
[
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
)
_import_structure["models.xlm"].extend(
[
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
)
_import_structure["models.xlm_prophetnet"].extend(
[
"XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMProphetNetDecoder",
"XLMProphetNetEncoder",
"XLMProphetNetForCausalLM",
"XLMProphetNetForConditionalGeneration",
"XLMProphetNetModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
]
)
_import_structure["models.xlm_roberta_xl"].extend(
[
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
)
_import_structure["models.xlnet"].extend(
[
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
)
_import_structure["models.yolos"].extend(
[
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
)
_import_structure["models.yoso"].extend(
[
"YOSO_PRETRAINED_MODEL_ARCHIVE_LIST",
"YosoForMaskedLM",
"YosoForMultipleChoice",
"YosoForQuestionAnswering",
"YosoForSequenceClassification",
"YosoForTokenClassification",
"YosoLayer",
"YosoModel",
"YosoPreTrainedModel",
]
)
_import_structure["optimization"] = [
"Adafactor",
"AdamW",
"get_constant_schedule",
"get_constant_schedule_with_warmup",
"get_cosine_schedule_with_warmup",
"get_cosine_with_hard_restarts_schedule_with_warmup",
"get_linear_schedule_with_warmup",
"get_polynomial_decay_schedule_with_warmup",
"get_scheduler",
]
_import_structure["pytorch_utils"] = ["Conv1D", "apply_chunking_to_forward", "prune_layer"]
_import_structure["sagemaker"] = []
_import_structure["trainer"] = ["Trainer"]
_import_structure["trainer_pt_utils"] = ["torch_distributed_zero_first"]
_import_structure["trainer_seq2seq"] = ["Seq2SeqTrainer"]
# TensorFlow-backed objects
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_tf_objects
_import_structure["utils.dummy_tf_objects"] = [name for name in dir(dummy_tf_objects) if not name.startswith("_")]
else:
_import_structure["activations_tf"] = []
_import_structure["benchmark.benchmark_args_tf"] = ["TensorFlowBenchmarkArguments"]
_import_structure["benchmark.benchmark_tf"] = ["TensorFlowBenchmark"]
_import_structure["generation_tf_logits_process"] = [
"TFForcedBOSTokenLogitsProcessor",
"TFForcedEOSTokenLogitsProcessor",
"TFLogitsProcessor",
"TFLogitsProcessorList",
"TFLogitsWarper",
"TFMinLengthLogitsProcessor",
"TFNoBadWordsLogitsProcessor",
"TFNoRepeatNGramLogitsProcessor",
"TFRepetitionPenaltyLogitsProcessor",
"TFTemperatureLogitsWarper",
"TFTopKLogitsWarper",
"TFTopPLogitsWarper",
]
_import_structure["generation_tf_utils"] = ["tf_top_k_top_p_filtering"]
_import_structure["keras_callbacks"] = ["KerasMetricCallback", "PushToHubCallback"]
_import_structure["modeling_tf_outputs"] = []
_import_structure["modeling_tf_utils"] = [
"TFPreTrainedModel",
"TFSequenceSummary",
"TFSharedEmbeddings",
"shape_list",
]
# TensorFlow models structure
_import_structure["models.albert"].extend(
[
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
)
_import_structure["models.auto"].extend(
[
"TF_MODEL_FOR_CAUSAL_LM_MAPPING",
"TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
"TF_MODEL_FOR_MASKED_LM_MAPPING",
"TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"TF_MODEL_FOR_PRETRAINING_MAPPING",
"TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
"TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
"TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"TF_MODEL_FOR_VISION_2_SEQ_MAPPING",
"TF_MODEL_MAPPING",
"TF_MODEL_WITH_LM_HEAD_MAPPING",
"TFAutoModel",
"TFAutoModelForCausalLM",
"TFAutoModelForImageClassification",
"TFAutoModelForMaskedLM",
"TFAutoModelForMultipleChoice",
"TFAutoModelForNextSentencePrediction",
"TFAutoModelForPreTraining",
"TFAutoModelForQuestionAnswering",
"TFAutoModelForSeq2SeqLM",
"TFAutoModelForSequenceClassification",
"TFAutoModelForSpeechSeq2Seq",
"TFAutoModelForTableQuestionAnswering",
"TFAutoModelForTokenClassification",
"TFAutoModelForVision2Seq",
"TFAutoModelWithLMHead",
]
)
_import_structure["models.bart"].extend(["TFBartForConditionalGeneration", "TFBartModel", "TFBartPretrainedModel"])
_import_structure["models.bert"].extend(
[
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
)
_import_structure["models.blenderbot"].extend(
["TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel"]
)
_import_structure["models.blenderbot_small"].extend(
["TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlenderbotSmallPreTrainedModel"]
)
_import_structure["models.camembert"].extend(
[
"TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCamembertForCausalLM",
"TFCamembertForMaskedLM",
"TFCamembertForMultipleChoice",
"TFCamembertForQuestionAnswering",
"TFCamembertForSequenceClassification",
"TFCamembertForTokenClassification",
"TFCamembertModel",
]
)
_import_structure["models.clip"].extend(
[
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
)
_import_structure["models.convbert"].extend(
[
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
)
_import_structure["models.convnext"].extend(
[
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
)
_import_structure["models.ctrl"].extend(
[
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
)
_import_structure["models.data2vec"].extend(
[
"TFData2VecVisionForImageClassification",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
)
_import_structure["models.deberta"].extend(
[
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
)
_import_structure["models.deberta_v2"].extend(
[
"TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaV2ForMaskedLM",
"TFDebertaV2ForQuestionAnswering",
"TFDebertaV2ForSequenceClassification",
"TFDebertaV2ForTokenClassification",
"TFDebertaV2Model",
"TFDebertaV2PreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
)
_import_structure["models.dpr"].extend(
[
"TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDPRContextEncoder",
"TFDPRPretrainedContextEncoder",
"TFDPRPretrainedQuestionEncoder",
"TFDPRPretrainedReader",
"TFDPRQuestionEncoder",
"TFDPRReader",
]
)
_import_structure["models.electra"].extend(
[
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
)
_import_structure["models.encoder_decoder"].append("TFEncoderDecoderModel")
_import_structure["models.flaubert"].extend(
[
"TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFlaubertForMultipleChoice",
"TFFlaubertForQuestionAnsweringSimple",
"TFFlaubertForSequenceClassification",
"TFFlaubertForTokenClassification",
"TFFlaubertModel",
"TFFlaubertPreTrainedModel",
"TFFlaubertWithLMHeadModel",
]
)
_import_structure["models.funnel"].extend(
[
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
)
_import_structure["models.gpt2"].extend(
[
"TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGPT2DoubleHeadsModel",
"TFGPT2ForSequenceClassification",
"TFGPT2LMHeadModel",
"TFGPT2MainLayer",
"TFGPT2Model",
"TFGPT2PreTrainedModel",
]
)
_import_structure["models.gptj"].extend(
[
"TFGPTJForCausalLM",
"TFGPTJForQuestionAnswering",
"TFGPTJForSequenceClassification",
"TFGPTJModel",
"TFGPTJPreTrainedModel",
]
)
_import_structure["models.hubert"].extend(
[
"TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFHubertForCTC",
"TFHubertModel",
"TFHubertPreTrainedModel",
]
)
_import_structure["models.layoutlm"].extend(
[
"TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMForMaskedLM",
"TFLayoutLMForSequenceClassification",
"TFLayoutLMForTokenClassification",
"TFLayoutLMMainLayer",
"TFLayoutLMModel",
"TFLayoutLMPreTrainedModel",
]
)
_import_structure["models.led"].extend(["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"])
_import_structure["models.longformer"].extend(
[
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
)
_import_structure["models.lxmert"].extend(
[
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
)
_import_structure["models.marian"].extend(["TFMarianModel", "TFMarianMTModel", "TFMarianPreTrainedModel"])
_import_structure["models.mbart"].extend(
["TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel"]
)
_import_structure["models.mobilebert"].extend(
[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
)
_import_structure["models.mpnet"].extend(
[
"TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMPNetForMaskedLM",
"TFMPNetForMultipleChoice",
"TFMPNetForQuestionAnswering",
"TFMPNetForSequenceClassification",
"TFMPNetForTokenClassification",
"TFMPNetMainLayer",
"TFMPNetModel",
"TFMPNetPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"])
_import_structure["models.openai"].extend(
[
"TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFOpenAIGPTDoubleHeadsModel",
"TFOpenAIGPTForSequenceClassification",
"TFOpenAIGPTLMHeadModel",
"TFOpenAIGPTMainLayer",
"TFOpenAIGPTModel",
"TFOpenAIGPTPreTrainedModel",
]
)
_import_structure["models.pegasus"].extend(
["TFPegasusForConditionalGeneration", "TFPegasusModel", "TFPegasusPreTrainedModel"]
)
_import_structure["models.rag"].extend(
[
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
)
_import_structure["models.rembert"].extend(
[
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
)
_import_structure["models.roberta"].extend(
[
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
)
_import_structure["models.roformer"].extend(
[
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
)
_import_structure["models.speech_to_text"].extend(
[
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
)
_import_structure["models.t5"].extend(
[
"TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFT5EncoderModel",
"TFT5ForConditionalGeneration",
"TFT5Model",
"TFT5PreTrainedModel",
]
)
_import_structure["models.tapas"].extend(
[
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
)
_import_structure["models.transfo_xl"].extend(
[
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
)
_import_structure["models.vision_encoder_decoder"].extend(["TFVisionEncoderDecoderModel"])
_import_structure["models.vit"].extend(
[
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
)
_import_structure["models.vit_mae"].extend(
[
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
)
_import_structure["models.wav2vec2"].extend(
[
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
]
)
_import_structure["models.xlm"].extend(
[
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
]
)
_import_structure["models.xlnet"].extend(
[
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
)
_import_structure["optimization_tf"] = ["AdamWeightDecay", "GradientAccumulator", "WarmUp", "create_optimizer"]
_import_structure["tf_utils"] = []
_import_structure["trainer_tf"] = ["TFTrainer"]
# FLAX-backed objects
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_flax_objects
_import_structure["utils.dummy_flax_objects"] = [
name for name in dir(dummy_flax_objects) if not name.startswith("_")
]
else:
_import_structure["generation_flax_logits_process"] = [
"FlaxForcedBOSTokenLogitsProcessor",
"FlaxForcedEOSTokenLogitsProcessor",
"FlaxLogitsProcessor",
"FlaxLogitsProcessorList",
"FlaxLogitsWarper",
"FlaxMinLengthLogitsProcessor",
"FlaxTemperatureLogitsWarper",
"FlaxTopKLogitsWarper",
"FlaxTopPLogitsWarper",
]
_import_structure["generation_flax_utils"] = []
_import_structure["modeling_flax_outputs"] = []
_import_structure["modeling_flax_utils"] = ["FlaxPreTrainedModel"]
_import_structure["models.albert"].extend(
[
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
)
_import_structure["models.auto"].extend(
[
"FLAX_MODEL_FOR_CAUSAL_LM_MAPPING",
"FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
"FLAX_MODEL_FOR_MASKED_LM_MAPPING",
"FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"FLAX_MODEL_FOR_PRETRAINING_MAPPING",
"FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING",
"FLAX_MODEL_MAPPING",
"FlaxAutoModel",
"FlaxAutoModelForCausalLM",
"FlaxAutoModelForImageClassification",
"FlaxAutoModelForMaskedLM",
"FlaxAutoModelForMultipleChoice",
"FlaxAutoModelForNextSentencePrediction",
"FlaxAutoModelForPreTraining",
"FlaxAutoModelForQuestionAnswering",
"FlaxAutoModelForSeq2SeqLM",
"FlaxAutoModelForSequenceClassification",
"FlaxAutoModelForTokenClassification",
"FlaxAutoModelForVision2Seq",
]
)
# Flax models structure
_import_structure["models.bart"].extend(
[
"FlaxBartDecoderPreTrainedModel",
"FlaxBartForCausalLM",
"FlaxBartForConditionalGeneration",
"FlaxBartForQuestionAnswering",
"FlaxBartForSequenceClassification",
"FlaxBartModel",
"FlaxBartPreTrainedModel",
]
)
_import_structure["models.beit"].extend(
[
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
)
_import_structure["models.bert"].extend(
[
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
)
_import_structure["models.big_bird"].extend(
[
"FlaxBigBirdForCausalLM",
"FlaxBigBirdForMaskedLM",
"FlaxBigBirdForMultipleChoice",
"FlaxBigBirdForPreTraining",
"FlaxBigBirdForQuestionAnswering",
"FlaxBigBirdForSequenceClassification",
"FlaxBigBirdForTokenClassification",
"FlaxBigBirdModel",
"FlaxBigBirdPreTrainedModel",
]
)
_import_structure["models.blenderbot"].extend(
["FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel"]
)
_import_structure["models.blenderbot_small"].extend(
[
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
)
_import_structure["models.clip"].extend(
[
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
)
_import_structure["models.electra"].extend(
[
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
)
_import_structure["models.encoder_decoder"].append("FlaxEncoderDecoderModel")
_import_structure["models.gpt2"].extend(["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"])
_import_structure["models.gpt_neo"].extend(
["FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel"]
)
_import_structure["models.gptj"].extend(["FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTJPreTrainedModel"])
_import_structure["models.marian"].extend(
[
"FlaxMarianModel",
"FlaxMarianMTModel",
"FlaxMarianPreTrainedModel",
]
)
_import_structure["models.mbart"].extend(
[
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["FlaxMT5ForConditionalGeneration", "FlaxMT5Model"])
_import_structure["models.pegasus"].extend(
[
"FlaxPegasusForConditionalGeneration",
"FlaxPegasusModel",
"FlaxPegasusPreTrainedModel",
]
)
_import_structure["models.roberta"].extend(
[
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
)
_import_structure["models.roformer"].extend(
[
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
)
_import_structure["models.speech_encoder_decoder"].append("FlaxSpeechEncoderDecoderModel")
_import_structure["models.t5"].extend(["FlaxT5ForConditionalGeneration", "FlaxT5Model", "FlaxT5PreTrainedModel"])
_import_structure["models.vision_encoder_decoder"].append("FlaxVisionEncoderDecoderModel")
_import_structure["models.vision_text_dual_encoder"].extend(["FlaxVisionTextDualEncoderModel"])
_import_structure["models.vit"].extend(["FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel"])
_import_structure["models.wav2vec2"].extend(
["FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel"]
)
_import_structure["models.xglm"].extend(
[
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
]
)
# Direct imports for type-checking
if TYPE_CHECKING:
# Configuration
from .configuration_utils import PretrainedConfig
# Data
from .data import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_compute_metrics,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_compute_metrics,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
from .data.data_collator import (
DataCollator,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeq2Seq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .feature_extraction_sequence_utils import SequenceFeatureExtractor
# Feature Extractor
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .hf_argparser import HfArgumentParser
# Integrations
from .integrations import (
is_comet_available,
is_optuna_available,
is_ray_available,
is_ray_tune_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
# Model Cards
from .modelcard import ModelCard
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (
convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_model_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
)
from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
from .models.auto import (
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
MODEL_NAMES_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
)
from .models.bart import BartConfig, BartTokenizer
from .models.beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig
from .models.bert import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BasicTokenizer,
BertConfig,
BertTokenizer,
WordpieceTokenizer,
)
from .models.bert_generation import BertGenerationConfig
from .models.bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
from .models.bertweet import BertweetTokenizer
from .models.big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig
from .models.bigbird_pegasus import BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig
from .models.blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotTokenizer
from .models.blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallTokenizer,
)
from .models.byt5 import ByT5Tokenizer
from .models.camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
from .models.canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig, CanineTokenizer
from .models.clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPTextConfig,
CLIPTokenizer,
CLIPVisionConfig,
)
from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer
from .models.convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig
from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer
from .models.data2vec import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
Data2VecAudioConfig,
Data2VecTextConfig,
Data2VecVisionConfig,
)
from .models.deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer
from .models.deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config
from .models.decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
DecisionTransformerConfig,
)
from .models.deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig
from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig
from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer
from .models.dpr import (
DPR_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPRConfig,
DPRContextEncoderTokenizer,
DPRQuestionEncoderTokenizer,
DPRReaderOutput,
DPRReaderTokenizer,
)
from .models.dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer
from .models.encoder_decoder import EncoderDecoderConfig
from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer
from .models.fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
from .models.fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer
from .models.funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer
from .models.glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer
from .models.gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig
from .models.gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig
from .models.herbert import HerbertTokenizer
from .models.hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig
from .models.ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig
from .models.imagegpt import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ImageGPTConfig
from .models.layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer
from .models.layoutlmv2 import (
LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMv2Config,
LayoutLMv2FeatureExtractor,
LayoutLMv2Processor,
LayoutLMv2Tokenizer,
)
from .models.layoutxlm import LayoutXLMProcessor
from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer
from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer
from .models.luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig, LukeTokenizer
from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer
from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config
from .models.marian import MarianConfig
from .models.maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .models.mbart import MBartConfig
from .models.megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
from .models.mmbt import MMBTConfig
from .models.mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertTokenizer
from .models.mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig, MPNetTokenizer
from .models.mt5 import MT5Config
from .models.nystromformer import NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, NystromformerConfig
from .models.openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig, OpenAIGPTTokenizer
from .models.pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig, PegasusTokenizer
from .models.perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverTokenizer
from .models.phobert import PhobertTokenizer
from .models.plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
from .models.poolformer import POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig
from .models.prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig, ProphetNetTokenizer
from .models.qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig
from .models.rag import RagConfig, RagRetriever, RagTokenizer
from .models.realm import REALM_PRETRAINED_CONFIG_ARCHIVE_MAP, RealmConfig, RealmTokenizer
from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
from .models.regnet import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP, RegNetConfig
from .models.rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig
from .models.resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig
from .models.retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig, RetriBertTokenizer
from .models.roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaTokenizer
from .models.roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerTokenizer
from .models.segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig
from .models.sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
from .models.sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig
from .models.speech_encoder_decoder import SpeechEncoderDecoderConfig
from .models.speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
from .models.speech_to_text_2 import (
SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
Speech2Text2Config,
Speech2Text2Processor,
Speech2Text2Tokenizer,
)
from .models.splinter import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, SplinterConfig, SplinterTokenizer
from .models.squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertTokenizer
from .models.swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig
from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
from .models.tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig, TapasTokenizer
from .models.tapex import TapexTokenizer
from .models.transfo_xl import (
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
TransfoXLConfig,
TransfoXLCorpus,
TransfoXLTokenizer,
)
from .models.trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig, TrOCRProcessor
from .models.unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
from .models.unispeech_sat import UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechSatConfig
from .models.van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
from .models.vilt import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig, ViltFeatureExtractor, ViltProcessor
from .models.vision_encoder_decoder import VisionEncoderDecoderConfig
from .models.vision_text_dual_encoder import VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor
from .models.visual_bert import VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, VisualBertConfig
from .models.vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig
from .models.vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
from .models.wav2vec2 import (
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
Wav2Vec2Config,
Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
Wav2Vec2Tokenizer,
)
from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer
from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM
from .models.wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
from .models.xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer
from .models.xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
from .models.xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
from .models.xlm_roberta_xl import XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig
from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
from .models.yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig
from .models.yoso import YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP, YosoConfig
# Pipelines
from .pipelines import (
AudioClassificationPipeline,
AutomaticSpeechRecognitionPipeline,
Conversation,
ConversationalPipeline,
CsvPipelineDataFormat,
FeatureExtractionPipeline,
FillMaskPipeline,
ImageClassificationPipeline,
ImageSegmentationPipeline,
JsonPipelineDataFormat,
NerPipeline,
ObjectDetectionPipeline,
PipedPipelineDataFormat,
Pipeline,
PipelineDataFormat,
QuestionAnsweringPipeline,
SummarizationPipeline,
TableQuestionAnsweringPipeline,
Text2TextGenerationPipeline,
TextClassificationPipeline,
TextGenerationPipeline,
TokenClassificationPipeline,
TranslationPipeline,
ZeroShotClassificationPipeline,
ZeroShotImageClassificationPipeline,
pipeline,
)
from .processing_utils import ProcessorMixin
# Tokenization
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import (
AddedToken,
BatchEncoding,
CharSpan,
PreTrainedTokenizerBase,
SpecialTokensMixin,
TokenSpan,
)
# Trainer
from .trainer_callback import (
DefaultFlowCallback,
EarlyStoppingCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_utils import EvalPrediction, IntervalStrategy, SchedulerType, enable_full_determinism, set_seed
from .training_args import TrainingArguments
from .training_args_seq2seq import Seq2SeqTrainingArguments
from .training_args_tf import TFTrainingArguments
# Files and general utilities
from .utils import (
CONFIG_NAME,
MODEL_CARD_NAME,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
TensorType,
add_end_docstrings,
add_start_docstrings,
cached_path,
is_apex_available,
is_datasets_available,
is_faiss_available,
is_flax_available,
is_phonemizer_available,
is_psutil_available,
is_py3nvml_available,
is_pyctcdecode_available,
is_scipy_available,
is_sentencepiece_available,
is_sklearn_available,
is_speech_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_vision_available,
logging,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_sentencepiece_objects import *
else:
from .models.albert import AlbertTokenizer
from .models.barthez import BarthezTokenizer
from .models.bartpho import BartphoTokenizer
from .models.bert_generation import BertGenerationTokenizer
from .models.big_bird import BigBirdTokenizer
from .models.camembert import CamembertTokenizer
from .models.cpm import CpmTokenizer
from .models.deberta_v2 import DebertaV2Tokenizer
from .models.fnet import FNetTokenizer
from .models.layoutxlm import LayoutXLMTokenizer
from .models.m2m_100 import M2M100Tokenizer
from .models.marian import MarianTokenizer
from .models.mbart import MBart50Tokenizer, MBartTokenizer
from .models.mluke import MLukeTokenizer
from .models.mt5 import MT5Tokenizer
from .models.pegasus import PegasusTokenizer
from .models.plbart import PLBartTokenizer
from .models.reformer import ReformerTokenizer
from .models.rembert import RemBertTokenizer
from .models.speech_to_text import Speech2TextTokenizer
from .models.t5 import T5Tokenizer
from .models.xglm import XGLMTokenizer
from .models.xlm_prophetnet import XLMProphetNetTokenizer
from .models.xlm_roberta import XLMRobertaTokenizer
from .models.xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_tokenizers_objects import *
else:
# Fast tokenizers imports
from .models.albert import AlbertTokenizerFast
from .models.bart import BartTokenizerFast
from .models.barthez import BarthezTokenizerFast
from .models.bert import BertTokenizerFast
from .models.big_bird import BigBirdTokenizerFast
from .models.blenderbot import BlenderbotTokenizerFast
from .models.blenderbot_small import BlenderbotSmallTokenizerFast
from .models.camembert import CamembertTokenizerFast
from .models.clip import CLIPTokenizerFast
from .models.convbert import ConvBertTokenizerFast
from .models.cpm import CpmTokenizerFast
from .models.deberta import DebertaTokenizerFast
from .models.deberta_v2 import DebertaV2TokenizerFast
from .models.distilbert import DistilBertTokenizerFast
from .models.dpr import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast
from .models.electra import ElectraTokenizerFast
from .models.fnet import FNetTokenizerFast
from .models.funnel import FunnelTokenizerFast
from .models.gpt2 import GPT2TokenizerFast
from .models.herbert import HerbertTokenizerFast
from .models.layoutlm import LayoutLMTokenizerFast
from .models.layoutlmv2 import LayoutLMv2TokenizerFast
from .models.layoutxlm import LayoutXLMTokenizerFast
from .models.led import LEDTokenizerFast
from .models.longformer import LongformerTokenizerFast
from .models.lxmert import LxmertTokenizerFast
from .models.mbart import MBartTokenizerFast
from .models.mbart50 import MBart50TokenizerFast
from .models.mobilebert import MobileBertTokenizerFast
from .models.mpnet import MPNetTokenizerFast
from .models.mt5 import MT5TokenizerFast
from .models.openai import OpenAIGPTTokenizerFast
from .models.pegasus import PegasusTokenizerFast
from .models.realm import RealmTokenizerFast
from .models.reformer import ReformerTokenizerFast
from .models.rembert import RemBertTokenizerFast
from .models.retribert import RetriBertTokenizerFast
from .models.roberta import RobertaTokenizerFast
from .models.roformer import RoFormerTokenizerFast
from .models.splinter import SplinterTokenizerFast
from .models.squeezebert import SqueezeBertTokenizerFast
from .models.t5 import T5TokenizerFast
from .models.xglm import XGLMTokenizerFast
from .models.xlm_roberta import XLMRobertaTokenizerFast
from .models.xlnet import XLNetTokenizerFast
from .tokenization_utils_fast import PreTrainedTokenizerFast
try:
if not (is_sentencepiece_available() and is_tokenizers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummies_sentencepiece_and_tokenizers_objects import *
else:
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_speech_objects import *
else:
from .models.speech_to_text import Speech2TextFeatureExtractor
try:
if not (is_speech_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_sentencepiece_and_speech_objects import *
else:
from .models.speech_to_text import Speech2TextProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_vision_objects import *
else:
from .image_utils import ImageFeatureExtractionMixin
from .models.beit import BeitFeatureExtractor
from .models.clip import CLIPFeatureExtractor, CLIPProcessor
from .models.convnext import ConvNextFeatureExtractor
from .models.deit import DeiTFeatureExtractor
from .models.detr import DetrFeatureExtractor
from .models.dpt import DPTFeatureExtractor
from .models.glpn import GLPNFeatureExtractor
from .models.imagegpt import ImageGPTFeatureExtractor
from .models.layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2Processor
from .models.layoutxlm import LayoutXLMProcessor
from .models.maskformer import MaskFormerFeatureExtractor
from .models.perceiver import PerceiverFeatureExtractor
from .models.poolformer import PoolFormerFeatureExtractor
from .models.segformer import SegformerFeatureExtractor
from .models.vilt import ViltFeatureExtractor, ViltProcessor
from .models.vit import ViTFeatureExtractor
from .models.yolos import YolosFeatureExtractor
# Modeling
try:
if not (is_timm_available() and is_vision_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_timm_objects import *
else:
from .models.detr import (
DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
DetrForObjectDetection,
DetrForSegmentation,
DetrModel,
DetrPreTrainedModel,
)
try:
if not is_scatter_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_scatter_objects import *
else:
from .models.tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import *
else:
# Benchmarks
from .benchmark.benchmark import PyTorchBenchmark
from .benchmark.benchmark_args import PyTorchBenchmarkArguments
from .data.datasets import (
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
SquadDataset,
SquadDataTrainingArguments,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .generation_beam_constraints import (
Constraint,
ConstraintListState,
DisjunctiveConstraint,
PhrasalConstraint,
)
from .generation_beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
from .generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessor,
LogitsProcessorList,
LogitsWarper,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from .generation_stopping_criteria import (
MaxLengthCriteria,
MaxTimeCriteria,
StoppingCriteria,
StoppingCriteriaList,
)
from .generation_utils import top_k_top_p_filtering
from .modeling_utils import PreTrainedModel
# PyTorch model imports
from .models.albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
from .models.auto import (
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
MODEL_FOR_AUDIO_XVECTOR_MAPPING,
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_CTC_MAPPING,
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
MODEL_FOR_OBJECT_DETECTION_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_FOR_VISION_2_SEQ_MAPPING,
MODEL_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoModel,
AutoModelForAudioClassification,
AutoModelForAudioFrameClassification,
AutoModelForAudioXVector,
AutoModelForCausalLM,
AutoModelForCTC,
AutoModelForImageClassification,
AutoModelForImageSegmentation,
AutoModelForInstanceSegmentation,
AutoModelForMaskedImageModeling,
AutoModelForMaskedLM,
AutoModelForMultipleChoice,
AutoModelForNextSentencePrediction,
AutoModelForObjectDetection,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSemanticSegmentation,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForSpeechSeq2Seq,
AutoModelForTableQuestionAnswering,
AutoModelForTokenClassification,
AutoModelForVision2Seq,
AutoModelWithLMHead,
)
from .models.bart import (
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BartForCausalLM,
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
BartPretrainedModel,
PretrainedBartModel,
)
from .models.beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
from .models.bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
from .models.bert_generation import (
BertGenerationDecoder,
BertGenerationEncoder,
BertGenerationPreTrainedModel,
load_tf_weights_in_bert_generation,
)
from .models.big_bird import (
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdLayer,
BigBirdModel,
BigBirdPreTrainedModel,
load_tf_weights_in_big_bird,
)
from .models.bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
from .models.blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
from .models.blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
from .models.camembert import (
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from .models.canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
from .models.clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPVisionModel,
)
from .models.convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
from .models.convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
from .models.ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
from .models.data2vec import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
Data2VecAudioForAudioFrameClassification,
Data2VecAudioForCTC,
Data2VecAudioForSequenceClassification,
Data2VecAudioForXVector,
Data2VecAudioModel,
Data2VecAudioPreTrainedModel,
Data2VecTextForCausalLM,
Data2VecTextForMaskedLM,
Data2VecTextForMultipleChoice,
Data2VecTextForQuestionAnswering,
Data2VecTextForSequenceClassification,
Data2VecTextForTokenClassification,
Data2VecTextModel,
Data2VecTextPreTrainedModel,
Data2VecVisionForImageClassification,
Data2VecVisionForSemanticSegmentation,
Data2VecVisionModel,
Data2VecVisionPreTrainedModel,
)
from .models.deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
from .models.deberta_v2 import (
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaV2ForMaskedLM,
DebertaV2ForMultipleChoice,
DebertaV2ForQuestionAnswering,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2Model,
DebertaV2PreTrainedModel,
)
from .models.decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
DecisionTransformerGPT2Model,
DecisionTransformerGPT2PreTrainedModel,
DecisionTransformerModel,
DecisionTransformerPreTrainedModel,
)
from .models.deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
from .models.distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
from .models.dpr import (
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPRContextEncoder,
DPRPretrainedContextEncoder,
DPRPreTrainedModel,
DPRPretrainedQuestionEncoder,
DPRPretrainedReader,
DPRQuestionEncoder,
DPRReader,
)
from .models.dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
from .models.electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
from .models.encoder_decoder import EncoderDecoderModel
from .models.flaubert import (
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from .models.fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
from .models.fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
from .models.funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
from .models.glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNModel,
GLPNPreTrainedModel,
)
from .models.gpt2 import (
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2DoubleHeadsModel,
GPT2ForSequenceClassification,
GPT2ForTokenClassification,
GPT2LMHeadModel,
GPT2Model,
GPT2PreTrainedModel,
load_tf_weights_in_gpt2,
)
from .models.gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForSequenceClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
from .models.gptj import (
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTJForCausalLM,
GPTJForQuestionAnswering,
GPTJForSequenceClassification,
GPTJModel,
GPTJPreTrainedModel,
)
from .models.hubert import (
HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
HubertForCTC,
HubertForSequenceClassification,
HubertModel,
HubertPreTrainedModel,
)
from .models.ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
from .models.imagegpt import (
IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
ImageGPTForCausalImageModeling,
ImageGPTForImageClassification,
ImageGPTModel,
ImageGPTPreTrainedModel,
load_tf_weights_in_imagegpt,
)
from .models.layoutlm import (
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
LayoutLMPreTrainedModel,
)
from .models.layoutlmv2 import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMv2ForQuestionAnswering,
LayoutLMv2ForSequenceClassification,
LayoutLMv2ForTokenClassification,
LayoutLMv2Model,
LayoutLMv2PreTrainedModel,
)
from .models.led import (
LED_PRETRAINED_MODEL_ARCHIVE_LIST,
LEDForConditionalGeneration,
LEDForQuestionAnswering,
LEDForSequenceClassification,
LEDModel,
LEDPreTrainedModel,
)
from .models.longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
from .models.luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeModel,
LukePreTrainedModel,
)
from .models.lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
from .models.m2m_100 import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
M2M100ForConditionalGeneration,
M2M100Model,
M2M100PreTrainedModel,
)
from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel
from .models.maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .models.mbart import (
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
from .models.megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
from .models.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
from .models.mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
from .models.mpnet import (
MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetLayer,
MPNetModel,
MPNetPreTrainedModel,
)
from .models.mt5 import MT5EncoderModel, MT5ForConditionalGeneration, MT5Model
from .models.nystromformer import (
NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerLayer,
NystromformerModel,
NystromformerPreTrainedModel,
)
from .models.openai import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
OpenAIGPTPreTrainedModel,
load_tf_weights_in_openai_gpt,
)
from .models.pegasus import (
PegasusForCausalLM,
PegasusForConditionalGeneration,
PegasusModel,
PegasusPreTrainedModel,
)
from .models.perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
from .models.plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
from .models.poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
from .models.prophetnet import (
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ProphetNetDecoder,
ProphetNetEncoder,
ProphetNetForCausalLM,
ProphetNetForConditionalGeneration,
ProphetNetModel,
ProphetNetPreTrainedModel,
)
from .models.qdqbert import (
QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
QDQBertForMaskedLM,
QDQBertForMultipleChoice,
QDQBertForNextSentencePrediction,
QDQBertForQuestionAnswering,
QDQBertForSequenceClassification,
QDQBertForTokenClassification,
QDQBertLayer,
QDQBertLMHeadModel,
QDQBertModel,
QDQBertPreTrainedModel,
load_tf_weights_in_qdqbert,
)
from .models.rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
from .models.realm import (
REALM_PRETRAINED_MODEL_ARCHIVE_LIST,
RealmEmbedder,
RealmForOpenQA,
RealmKnowledgeAugEncoder,
RealmPreTrainedModel,
RealmReader,
RealmRetriever,
RealmScorer,
load_tf_weights_in_realm,
)
from .models.reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
from .models.regnet import (
REGNET_PRETRAINED_MODEL_ARCHIVE_LIST,
RegNetForImageClassification,
RegNetModel,
RegNetPreTrainedModel,
)
from .models.rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
from .models.resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
from .models.retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel
from .models.roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
from .models.roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
from .models.segformer import (
SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SegformerDecodeHead,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerLayer,
SegformerModel,
SegformerPreTrainedModel,
)
from .models.sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
from .models.sew_d import (
SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWDForCTC,
SEWDForSequenceClassification,
SEWDModel,
SEWDPreTrainedModel,
)
from .models.speech_encoder_decoder import SpeechEncoderDecoderModel
from .models.speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
Speech2TextForConditionalGeneration,
Speech2TextModel,
Speech2TextPreTrainedModel,
)
from .models.speech_to_text_2 import Speech2Text2ForCausalLM, Speech2Text2PreTrainedModel
from .models.splinter import (
SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST,
SplinterForQuestionAnswering,
SplinterLayer,
SplinterModel,
SplinterPreTrainedModel,
)
from .models.squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
from .models.swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
from .models.t5 import (
T5_PRETRAINED_MODEL_ARCHIVE_LIST,
T5EncoderModel,
T5ForConditionalGeneration,
T5Model,
T5PreTrainedModel,
load_tf_weights_in_t5,
)
from .models.transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
from .models.trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
from .models.unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
from .models.unispeech_sat import (
UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForCTC,
UniSpeechSatForPreTraining,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
UniSpeechSatModel,
UniSpeechSatPreTrainedModel,
)
from .models.van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
from .models.vilt import (
VILT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltLayer,
ViltModel,
ViltPreTrainedModel,
)
from .models.vision_encoder_decoder import VisionEncoderDecoderModel
from .models.vision_text_dual_encoder import VisionTextDualEncoderModel
from .models.visual_bert import (
VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForRegionToPhraseAlignment,
VisualBertForVisualReasoning,
VisualBertLayer,
VisualBertModel,
VisualBertPreTrainedModel,
)
from .models.vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
from .models.vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
from .models.wav2vec2 import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
Wav2Vec2ForAudioFrameClassification,
Wav2Vec2ForCTC,
Wav2Vec2ForMaskedLM,
Wav2Vec2ForPreTraining,
Wav2Vec2ForSequenceClassification,
Wav2Vec2ForXVector,
Wav2Vec2Model,
Wav2Vec2PreTrainedModel,
)
from .models.wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
from .models.xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
from .models.xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
from .models.xlm_prophetnet import (
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMProphetNetDecoder,
XLMProphetNetEncoder,
XLMProphetNetForCausalLM,
XLMProphetNetForConditionalGeneration,
XLMProphetNetModel,
)
from .models.xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from .models.xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
from .models.xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
from .models.yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
from .models.yoso import (
YOSO_PRETRAINED_MODEL_ARCHIVE_LIST,
YosoForMaskedLM,
YosoForMultipleChoice,
YosoForQuestionAnswering,
YosoForSequenceClassification,
YosoForTokenClassification,
YosoLayer,
YosoModel,
YosoPreTrainedModel,
)
# Optimization
from .optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pytorch_utils import Conv1D, apply_chunking_to_forward, prune_layer
# Trainer
from .trainer import Trainer
from .trainer_pt_utils import torch_distributed_zero_first
from .trainer_seq2seq import Seq2SeqTrainer
# TensorFlow
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
# Import the same objects as dummies to get them in the namespace.
# They will raise an import error if the user tries to instantiate / use them.
from .utils.dummy_tf_objects import *
else:
from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments
# Benchmarks
from .benchmark.benchmark_tf import TensorFlowBenchmark
from .generation_tf_logits_process import (
TFForcedBOSTokenLogitsProcessor,
TFForcedEOSTokenLogitsProcessor,
TFLogitsProcessor,
TFLogitsProcessorList,
TFLogitsWarper,
TFMinLengthLogitsProcessor,
TFNoBadWordsLogitsProcessor,
TFNoRepeatNGramLogitsProcessor,
TFRepetitionPenaltyLogitsProcessor,
TFTemperatureLogitsWarper,
TFTopKLogitsWarper,
TFTopPLogitsWarper,
)
from .generation_tf_utils import tf_top_k_top_p_filtering
from .keras_callbacks import KerasMetricCallback, PushToHubCallback
from .modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMMainLayer,
TFLayoutLMModel,
TFLayoutLMPreTrainedModel,
)
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list
# TensorFlow model imports
from .models.albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
from .models.auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
TF_MODEL_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForImageClassification,
TFAutoModelForMaskedLM,
TFAutoModelForMultipleChoice,
TFAutoModelForNextSentencePrediction,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForSpeechSeq2Seq,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelForVision2Seq,
TFAutoModelWithLMHead,
)
from .models.bart import TFBartForConditionalGeneration, TFBartModel, TFBartPretrainedModel
from .models.bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
from .models.blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
from .models.blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
from .models.camembert import (
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCamembertForCausalLM,
TFCamembertForMaskedLM,
TFCamembertForMultipleChoice,
TFCamembertForQuestionAnswering,
TFCamembertForSequenceClassification,
TFCamembertForTokenClassification,
TFCamembertModel,
)
from .models.clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
from .models.convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
from .models.convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
from .models.ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
from .models.data2vec import (
TFData2VecVisionForImageClassification,
TFData2VecVisionModel,
TFData2VecVisionPreTrainedModel,
)
from .models.deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
from .models.deberta_v2 import (
TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaV2ForMaskedLM,
TFDebertaV2ForQuestionAnswering,
TFDebertaV2ForSequenceClassification,
TFDebertaV2ForTokenClassification,
TFDebertaV2Model,
TFDebertaV2PreTrainedModel,
)
from .models.distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
from .models.dpr import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDPRContextEncoder,
TFDPRPretrainedContextEncoder,
TFDPRPretrainedQuestionEncoder,
TFDPRPretrainedReader,
TFDPRQuestionEncoder,
TFDPRReader,
)
from .models.electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
from .models.encoder_decoder import TFEncoderDecoderModel
from .models.flaubert import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertPreTrainedModel,
TFFlaubertWithLMHeadModel,
)
from .models.funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
from .models.gpt2 import (
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGPT2DoubleHeadsModel,
TFGPT2ForSequenceClassification,
TFGPT2LMHeadModel,
TFGPT2MainLayer,
TFGPT2Model,
TFGPT2PreTrainedModel,
)
from .models.gptj import (
TFGPTJForCausalLM,
TFGPTJForQuestionAnswering,
TFGPTJForSequenceClassification,
TFGPTJModel,
TFGPTJPreTrainedModel,
)
from .models.hubert import (
TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFHubertForCTC,
TFHubertModel,
TFHubertPreTrainedModel,
)
from .models.led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel
from .models.longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
from .models.lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
from .models.marian import TFMarianModel, TFMarianMTModel, TFMarianPreTrainedModel
from .models.mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
from .models.mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
from .models.mpnet import (
TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMPNetForMaskedLM,
TFMPNetForMultipleChoice,
TFMPNetForQuestionAnswering,
TFMPNetForSequenceClassification,
TFMPNetForTokenClassification,
TFMPNetMainLayer,
TFMPNetModel,
TFMPNetPreTrainedModel,
)
from .models.mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model
from .models.openai import (
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFOpenAIGPTDoubleHeadsModel,
TFOpenAIGPTForSequenceClassification,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTMainLayer,
TFOpenAIGPTModel,
TFOpenAIGPTPreTrainedModel,
)
from .models.pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel
from .models.rag import TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration
from .models.rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
from .models.roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
from .models.roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
from .models.speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeech2TextForConditionalGeneration,
TFSpeech2TextModel,
TFSpeech2TextPreTrainedModel,
)
from .models.t5 import (
TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST,
TFT5EncoderModel,
TFT5ForConditionalGeneration,
TFT5Model,
TFT5PreTrainedModel,
)
from .models.tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
from .models.transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
from .models.vision_encoder_decoder import TFVisionEncoderDecoderModel
from .models.vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
from .models.vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
from .models.wav2vec2 import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWav2Vec2ForCTC,
TFWav2Vec2Model,
TFWav2Vec2PreTrainedModel,
)
from .models.xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
from .models.xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
)
from .models.xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
# Optimization
from .optimization_tf import AdamWeightDecay, GradientAccumulator, WarmUp, create_optimizer
# Trainer
from .trainer_tf import TFTrainer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
# Import the same objects as dummies to get them in the namespace.
# They will raise an import error if the user tries to instantiate / use them.
from .utils.dummy_flax_objects import *
else:
from .generation_flax_logits_process import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessor,
FlaxLogitsProcessorList,
FlaxLogitsWarper,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
from .modeling_flax_utils import FlaxPreTrainedModel
# Flax model imports
from .models.albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
from .models.auto import (
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
FLAX_MODEL_FOR_PRETRAINING_MAPPING,
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModel,
FlaxAutoModelForCausalLM,
FlaxAutoModelForImageClassification,
FlaxAutoModelForMaskedLM,
FlaxAutoModelForMultipleChoice,
FlaxAutoModelForNextSentencePrediction,
FlaxAutoModelForPreTraining,
FlaxAutoModelForQuestionAnswering,
FlaxAutoModelForSeq2SeqLM,
FlaxAutoModelForSequenceClassification,
FlaxAutoModelForTokenClassification,
FlaxAutoModelForVision2Seq,
)
from .models.bart import (
FlaxBartDecoderPreTrainedModel,
FlaxBartForCausalLM,
FlaxBartForConditionalGeneration,
FlaxBartForQuestionAnswering,
FlaxBartForSequenceClassification,
FlaxBartModel,
FlaxBartPreTrainedModel,
)
from .models.beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
from .models.bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
from .models.big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
FlaxBigBirdPreTrainedModel,
)
from .models.blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
from .models.blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
from .models.clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
from .models.distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
from .models.electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
from .models.encoder_decoder import FlaxEncoderDecoderModel
from .models.gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel
from .models.gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
from .models.gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel
from .models.marian import FlaxMarianModel, FlaxMarianMTModel, FlaxMarianPreTrainedModel
from .models.mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
from .models.mt5 import FlaxMT5ForConditionalGeneration, FlaxMT5Model
from .models.pegasus import FlaxPegasusForConditionalGeneration, FlaxPegasusModel, FlaxPegasusPreTrainedModel
from .models.roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
from .models.roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
from .models.speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
from .models.t5 import FlaxT5ForConditionalGeneration, FlaxT5Model, FlaxT5PreTrainedModel
from .models.vision_encoder_decoder import FlaxVisionEncoderDecoderModel
from .models.vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
from .models.vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
from .models.wav2vec2 import (
FlaxWav2Vec2ForCTC,
FlaxWav2Vec2ForPreTraining,
FlaxWav2Vec2Model,
FlaxWav2Vec2PreTrainedModel,
)
from .models.xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
from .models.xlm_roberta import (
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
extra_objects={"__version__": __version__},
)
if not is_tf_available() and not is_torch_available() and not is_flax_available():
logger.warning(
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. "
"Models won't be available and only tokenizers, configuration "
"and file/data utilities can be used."
)
| 38.773955
| 119
| 0.659787
|
120358f90748f7e1278719ae6b249462e66990ff
| 156
|
py
|
Python
|
config.py
|
infrub/Tensordot
|
f72ed978ba30e231dd393e043f65c2e1664ca4a5
|
[
"MIT"
] | 7
|
2016-08-18T20:59:47.000Z
|
2020-05-02T09:25:16.000Z
|
config.py
|
infrub/Tensordot
|
f72ed978ba30e231dd393e043f65c2e1664ca4a5
|
[
"MIT"
] | 8
|
2016-02-08T07:21:05.000Z
|
2019-06-29T07:11:38.000Z
|
config.py
|
infrub/Tensordot
|
f72ed978ba30e231dd393e043f65c2e1664ca4a5
|
[
"MIT"
] | 7
|
2016-06-07T10:20:20.000Z
|
2022-02-23T07:10:13.000Z
|
import logging
# Default parameters
STYLE = "numpy"
COMMENT_PREFIX = "#"
NUMPY = "np"
INDENT = " "
LOGGING_LEVEL = logging.INFO
DEFAULT_BOND_DIM = 10
| 13
| 28
| 0.698718
|
ff73f005721feb9dd86785bb7153b95760528b2e
| 3,167
|
py
|
Python
|
limix/scripts/iset_analyze.py
|
fpcasale/limix
|
a6bc2850f243fe779991bb53a24ddbebe0ab74d2
|
[
"Apache-2.0"
] | null | null | null |
limix/scripts/iset_analyze.py
|
fpcasale/limix
|
a6bc2850f243fe779991bb53a24ddbebe0ab74d2
|
[
"Apache-2.0"
] | null | null | null |
limix/scripts/iset_analyze.py
|
fpcasale/limix
|
a6bc2850f243fe779991bb53a24ddbebe0ab74d2
|
[
"Apache-2.0"
] | null | null | null |
def entry_point():
import os
import time
from optparse import OptionParser
import numpy as np
import pandas as pd
import scipy as sp
from ..data import BedReader
from ..iSet.iset import fit_iSet
from ..util import unique_variants as f_uni_variants
parser = OptionParser()
parser.add_option("--bfile", dest='bfile', type=str, default=None)
# parser.add_option("--cfile", dest='cfile', type=str, default=None)
parser.add_option("--pfile", dest='pfile', type=str, default=None)
parser.add_option("--wfile", dest='wfile', type=str, default=None)
parser.add_option("--ffile", dest='ffile', type=str, default=None)
parser.add_option("--ifile", dest='ifile', type=str, default=None)
parser.add_option("--resdir", dest='resdir', type=str, default='./')
# start window, end window and permutations
parser.add_option("--n_perms", type=int, default=10)
parser.add_option("--start_wnd", dest='i0', type=int, default=None)
parser.add_option("--end_wnd", dest='i1', type=int, default=None)
parser.add_option("--factr", dest='factr', type=float, default=1e7)
parser.add_option(
"--unique_variants",
action="store_true",
dest='unique_variants',
default=False)
parser.add_option(
"--standardize",
action="store_true",
dest='standardize',
default=False)
(options, args) = parser.parse_args()
print('importing data')
F = sp.loadtxt(options.ffile + '.fe')
Y = sp.loadtxt(options.pfile + '.phe')
if len(Y.shape) == 1:
Y = Y[:, sp.newaxis]
sets = pd.DataFrame.from_csv(
options.wfile + '.wnd', sep='\t', index_col=None)
reader = BedReader(options.bfile)
i0 = 1 if options.i0 is None else options.i0
i1 = sets.shape[0] if options.i1 is None else options.i1
df = pd.DataFrame()
df0 = pd.DataFrame()
if options.ifile is None:
Ie = None
else:
Ie = sp.loadtxt(options.ifile + '.ind').flatten() == 1
res_dir = options.resdir
if not os.path.exists(res_dir):
os.makedirs(res_dir)
n_digits = len(str(sets.shape[0]))
fname = str(i0).zfill(n_digits)
fname += '_' + str(i1).zfill(n_digits)
resfile = os.path.join(res_dir, fname)
for wnd_i in range(i0, i1):
t0 = time.time()
_set = sets.ix[wnd_i]
print('.. set %d: %s' % (wnd_i, _set['setid']))
Xr = reader.getGenotypes(
pos_start=_set['start'],
pos_end=_set['end'],
chrom=_set['chrom'],
impute=True)
if options.unique_variants:
Xr = f_uni_variants(Xr)
if options.standardize:
Xr -= Xr.mean(0)
Xr /= Xr.std(0)
else:
# encoding minor as 0
p = 0.5 * Xr.mean(0)
Xr[:, p > 0.5] = 2 - Xr[:, p > 0.5]
Xr /= np.sqrt(Xr.shape[1])
_df, _df0 = fit_iSet(Y, F=F, Xr=Xr, Ie=Ie, n_nulls=10)
df = df.append(_df)
df0 = df0.append(_df0)
print('Elapsed:', time.time() - t0)
df.to_csv(resfile + '.iSet.real')
df0.to_csv(resfile + '.iSet.perm')
| 29.877358
| 72
| 0.587307
|
26300fbd3cfa17832d744010a5327d419aa62be4
| 582
|
py
|
Python
|
UPD/app.py
|
RIDCorix/UPD
|
8694d119181a4afffafbfbab510f697399c1ea13
|
[
"MIT"
] | null | null | null |
UPD/app.py
|
RIDCorix/UPD
|
8694d119181a4afffafbfbab510f697399c1ea13
|
[
"MIT"
] | null | null | null |
UPD/app.py
|
RIDCorix/UPD
|
8694d119181a4afffafbfbab510f697399c1ea13
|
[
"MIT"
] | null | null | null |
from PySide6.QtCore import QFile, QTextStream
from PySide6.QtWidgets import QApplication, QWidget
import sys
import threading
from extension.utils import get_tools
from peewee import *
get_tools()
from file_cabinet.models import Drawer
app = QApplication(sys.argv)
from main.ui import MainWindow
from main.settings import tool
window = MainWindow()
window.show()
from upd.conf import settings
rendered = open("stylesheet/normal.qss", "r").read().format(**settings.to_dict())
app.setStyleSheet(rendered)
t = threading.Thread(target=window.load)
t.start()
sys.exit(app.exec())
| 21.555556
| 81
| 0.786942
|
5633c380a7dc2a7ca5b35c3366ecc17a080bb1f9
| 3,120
|
py
|
Python
|
lecture5/lecture5/lecture5/settings.py
|
CSUChico-CINS465/CSCI465-Fall2016-Lecture-Examples
|
332df2821aef74c6522c53278e28ceb27cbe2fe6
|
[
"MIT"
] | null | null | null |
lecture5/lecture5/lecture5/settings.py
|
CSUChico-CINS465/CSCI465-Fall2016-Lecture-Examples
|
332df2821aef74c6522c53278e28ceb27cbe2fe6
|
[
"MIT"
] | null | null | null |
lecture5/lecture5/lecture5/settings.py
|
CSUChico-CINS465/CSCI465-Fall2016-Lecture-Examples
|
332df2821aef74c6522c53278e28ceb27cbe2fe6
|
[
"MIT"
] | null | null | null |
"""
Django settings for lecture5 project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '17z=%xm7!wgdx)dukklj5vx0jh%c)%o2vf!4$6)im2!s0u0-7l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'helloworld',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lecture5.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lecture5.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| 25.57377
| 91
| 0.696474
|
58fabe882b4723cd93f0ad8d315ef29264126180
| 1,596
|
py
|
Python
|
meeting/api.py
|
Siddhant6078/meeting
|
6f602f3c5fa68b4aaed863f321639e356238b10e
|
[
"MIT"
] | null | null | null |
meeting/api.py
|
Siddhant6078/meeting
|
6f602f3c5fa68b4aaed863f321639e356238b10e
|
[
"MIT"
] | null | null | null |
meeting/api.py
|
Siddhant6078/meeting
|
6f602f3c5fa68b4aaed863f321639e356238b10e
|
[
"MIT"
] | null | null | null |
import frappe
from frappe import _
from frappe.utils import nowdate, add_days
@frappe.whitelist()
def send_invitation_emails(meeting):
meeting = frappe.get_doc("Meeting",meeting)
meeting.check_permission('email')
if meeting.status == 'Planned':
frappe.sendmail(
recipients = [d.attendee for d in meeting.attendees],
sender = frappe.session.user,
subject = meeting.title,
message = meeting.invitation_message,
reference_doctype = meeting.doctype,
reference_name = meeting.name
)
meeting.status = 'Invitation Sent'
meeting.save()
frappe.msgprint(_("Invitation Sent"))
else:
frappe.msgprint(_("Meeting Status must be 'Planned'"))
@frappe.whitelist()
def get_meetings(start, end):
if not frappe.has_permission("Meeting","read"):
raise frappe.PermissionError
return frappe.db.sql("""select
timestamp(date, from_time) as start,
timestamp(date, to_time) as end,
name,
title,
status,
0 as all_day
from `tabMeeting`
where `date` between %(start)s and %(end)s""",{
"start": start,
"end": end
}, as_dict = True, debug = 1)
def make_orientation_meeting(doc, method):
"""Create an orientation meeting when a new User is added"""
meeting = frappe.get_doc({
"doctype": "Meeting",
"title": "Orientation for {0}".format(doc.first_name),
"date": add_days(nowdate(),1),
"from_time": "09:00",
"to_time": "09:30",
"attendees": [{
"attendee" : doc.name
}]
})
# System Manager might not have permission to create a Meeting
meeting.flags.ignore_permissions = True
meeting.insert()
frappe.msgprint(_("Orientation meeting created"))
| 26.6
| 63
| 0.708647
|
a6a06f44c7356b831c43a7d61b3e45d00997ff10
| 4,049
|
py
|
Python
|
pictures/settings.py
|
Tururocks101/Gallery
|
90826d38505b31a89e7fad65ac7fb9fd70c9798f
|
[
"MIT"
] | 1
|
2020-12-10T20:40:36.000Z
|
2020-12-10T20:40:36.000Z
|
pictures/settings.py
|
Tururocks101/Gallery
|
90826d38505b31a89e7fad65ac7fb9fd70c9798f
|
[
"MIT"
] | 7
|
2021-03-30T14:09:09.000Z
|
2021-09-22T19:32:47.000Z
|
pictures/settings.py
|
Tururocks101/Gallery
|
90826d38505b31a89e7fad65ac7fb9fd70c9798f
|
[
"MIT"
] | 2
|
2020-10-01T15:21:24.000Z
|
2022-03-27T12:38:48.000Z
|
"""
Django settings for gallery project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODE = config('MODE', default='dev')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'picha.apps.PichaConfig',
'bootstrap4',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if config('MODE')=='dev':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
django_heroku.settings(locals())
| 27.174497
| 91
| 0.6898
|
7b888c78e9013d97c634028ab81ce2517a032cf9
| 9,581
|
py
|
Python
|
build/lib/cryspy_editor/widgets/interactive_graph_mod_mono.py
|
ikibalin/cryspy_editor
|
dbc84518c8e0de61185f9c66586ccc07af16350c
|
[
"MIT"
] | null | null | null |
build/lib/cryspy_editor/widgets/interactive_graph_mod_mono.py
|
ikibalin/cryspy_editor
|
dbc84518c8e0de61185f9c66586ccc07af16350c
|
[
"MIT"
] | null | null | null |
build/lib/cryspy_editor/widgets/interactive_graph_mod_mono.py
|
ikibalin/cryspy_editor
|
dbc84518c8e0de61185f9c66586ccc07af16350c
|
[
"MIT"
] | null | null | null |
__author__ = 'ikibalin'
__version__ = "2019_09_10"
import os
import sys
import numpy
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
import matplotlib
import matplotlib.backends.backend_qt5agg
import matplotlib.figure
import matplotlib.pyplot
class cwind_central(QtWidgets.QMainWindow):
def __init__(self, fname):
super(cwind_central, self).__init__()
self.title = "program 'Graph'"
self.setWindowTitle(self.title)
widg_central = cwidg_central(fname)
self.setCentralWidget(widg_central)
self.show()
class cwidg_central(QtWidgets.QWidget):
def __init__(self, ffig_full = None):
super(cwidg_central, self).__init__()
self.init_layout_central(ffig_full)
self.setLayout(self.layout_central)
self.ffig_full = ffig_full
def init_layout_central(self, ffig_full):
lay_main = QtWidgets.QHBoxLayout()
self.graph = Graph(self, width=5, height=4)
lay_1 = QtWidgets.QVBoxLayout()
_b_gm = QtWidgets.QPushButton("values to clipboard")
_b_gm.clicked.connect(self.give_values)
lay_1.addStretch(1)
lay_1.addWidget(_b_gm)
lay_main.addWidget(self.graph)
lay_main.addLayout(lay_1)
self.layout_central = lay_main
def plot_file(self, x, y_exp, y_sig=None):
"""
Temporary file. It will be deleted
"""
self.graph.ax_pri.cla()
self.graph.data_x = x
self.graph.data_y = y_exp
self.graph.data_sy = y_sig
self.graph.set_data_to_graph()
def plot_lines(self, x, l_y=None, l_y_sig=None):
self.graph.ax_pri.cla()
self.graph.data_x = x
if l_y is not None:
self.graph.data_l_y = l_y
if ((l_y_sig is not None) & (self.graph.data_l_y is not None)):
if (len(l_y_sig) == len(self.graph.data_l_y)):
self.graph.data_l_y_sig = l_y_sig
self.graph.set_data_to_graph()
def give_values(self):
val_x = self.graph.data_x
val_y = self.graph.data_y
val_sy = self.graph.data_sy
sval_x = [f"{_:.3f}" for _ in val_x]
sval_y = [" None" if (numpy.isnan(_) | (_ is None)) else f"{_:.3f}" for _ in val_y]
if val_sy is None:
sval_sy = ["" for _ in val_y]
else:
sval_sy = [" None" if (numpy.isnan(_) | (_ is None)) else f"{_:.3f}" for _ in val_sy]
ls_out = [f"{_1:} {_2:} {_3:}" for _1, _2, _3 in zip(sval_x, sval_y, sval_sy)]
cb = QtWidgets.QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
cb.setText("\n".join(ls_out), mode=cb.Clipboard)
class Graph(matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = matplotlib.figure.Figure(figsize=(width, height), dpi = dpi)
fig.subplots_adjust(left = 0.07,
right = 0.97,
top = 0.97,
bottom = 0.07,
wspace = 0.0,
hspace = 0.0)
super(Graph, self).__init__(fig)
self.info_press = (False, False)
self.data_x = []
self.data_y = []
self.data_sy = []
self.data_l_y = []
self.data_l_y_sig = []
self.control = parent
self.figure = fig
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.ax_pri = fig.add_subplot(111)
fig.canvas.mpl_connect("button_press_event", self.onclick)
def set_data_to_graph(self):
col_1 = "#000000"
#self.ax_pri.plot(self.data_x, self.data_y, "-")
_x = self.data_x
_y = self.data_y
_y_sig = self.data_sy
l_y = self.data_l_y
l_y_sig = self.data_l_y_sig
if (len(l_y) != 0):
if (len(l_y)==len(l_y_sig)):
for _y, _y_sig in zip(l_y, l_y_sig):
if _y_sig is None:
self.ax_pri.plot(_x, _y, "k-", linewidth=1.0)
else:
self.ax_pri.errorbar(_x, _y, yerr = _y_sig, ecolor = col_1, fmt='o', color=col_1, linewidth = 0.5)
else:
for _y in l_y:
self.ax_pri.plot(_x, _y, "k-", linewidth=1.0)
else:
if _y_sig is None:
self.ax_pri.plot(_x, _y, "k-", linewidth=1.0)
else:
self.ax_pri.plot(_x, _y_sig, "b-", linewidth=1.0)
#self.ax_pri.errorbar(self.data_x, self.data_y, yerr = self.data_sy, ecolor = col_1, fmt='o', color=col_1, linewidth = 0.5)
self.draw()
def onclick(self, event):
if event.button == 3:
#print(event.xdata, event.ydata)
#self.data_x.append(event.xdata)
#self.data_y.append(event.ydata)
#self.set_data_to_graph()
if self.info_press == (False, False):
self.info_press = (True, False)
self.xlim = [event.xdata]
self.ylim = [event.ydata]
elif self.info_press == (True, False):
self.info_press = (True, True)
self.xlim.append(event.xdata)
self.ylim.append(event.ydata)
if self.info_press == (True, True):
self.info_press = (False, False)
xlim = (min(self.xlim), max(self.xlim))
ylim = (min(self.ylim), max(self.ylim))
self.ax_pri.set_xlim(xlim)
self.ax_pri.set_ylim(ylim)
self.xlim = []
self.ylim = []
self.draw()
elif event.button == 2:
self.info_press == (False, False)
x_min = min(self.data_x)
x_max = max(self.data_x)
if len(self.data_l_y) != 0:
l_y_min, l_y_max = [], []
for _y in self.data_l_y:
if isinstance(_y, numpy.ndarray):
_y_min = min(_y[numpy.logical_not(numpy.isnan(_y))])
_y_max = max(_y[numpy.logical_not(numpy.isnan(_y))])
else:
_y_min = min([_1 for _1 in _y if _1 is not None])
_y_max = max([_1 for _1 in _y if _1 is not None])
l_y_min.append(_y_min)
l_y_max.append(_y_max)
y_min = min(l_y_min)
y_max = max(l_y_max)
else:
y_min = min(self.data_y)
y_max = max(self.data_y)
x_diff = x_max - x_min
y_diff = y_max - y_min
xy_min = min([x_min, y_min])
xy_max = max([x_max, y_max])
xy_diff = xy_max - xy_min
xlim = (xy_min - 0.05*xy_diff, xy_max + 0.05*xy_diff)
x_lim = (x_min - 0.05*x_diff, x_max + 0.05*x_diff)
y_lim = (y_min - 0.05*y_diff, y_max + 0.05*y_diff)
self.ax_pri.set_xlim(x_lim)
self.ax_pri.set_ylim(y_lim)
self.xlim = []
self.ylim = []
self.draw()
else:
self.info_press == (False, False)
self.xlim = []
self.ylim = []
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
fname = "NaCaAlF_exp.out"
fname = "oHoTi_p.dat"
ex = cwind_central(fname)
sys.exit(app.exec_())
"""
def clean_fig(self):
self.ldata = []
self.ldata_simple = []
self.pcanvas.figure.clf()
self.pcanvas.make_plot()
self.pcanvas.draw()
def replot_fig(self):
lhelp=['#005555','#BB5555','#550055','#55BB55','#555500','#5555BB']
for iddata, ddata in enumerate(self.ldata_simple):
lflags = [ (isinstance(hh1,(float, int))&isinstance(hh2,(float, int))&isinstance(hh3,(float, int))&isinstance(hh4,(float, int))) for hh1,hh2,hh3,hh4 in zip(ddata["IntUP"],ddata["sIntUP"],ddata["IntDOWN"],ddata["sIntDOWN"])]
lx = [hh for hh, flag in zip(ddata["ttheta"], lflags) if flag]
li_u_exp = [hh for hh, flag in zip(ddata["IntUP"], lflags) if flag]
lsi_u_exp = [hh for hh, flag in zip(ddata["sIntUP"], lflags) if flag]
li_d_exp = [hh for hh, flag in zip(ddata["IntDOWN"], lflags) if flag]
lsi_d_exp = [hh for hh, flag in zip(ddata["sIntDOWN"], lflags) if flag]
#li_u_exp = ddata["IntUP"]
#lsi_u_exp = ddata["sIntUP"]
#li_d_exp = ddata["IntDOWN"]
#lsi_d_exp = ddata["sIntDOWN"]
ldiff_exp = [hh1-hh2 for hh1, hh2 in zip(li_u_exp, li_d_exp)]
lsdiff_exp = [(hh1**2+hh2**2)**0.5 for hh1, hh2 in zip(lsi_u_exp, lsi_d_exp)]
lcolors=[lhelp[iddata] for hh in range(3)]
self.pcanvas.plot(lx,[li_u_exp,lsi_u_exp,None],[li_d_exp,lsi_d_exp,None],[ldiff_exp,lsdiff_exp,None],None,lcolors)
xmin = float(self.le_xmin.text())
xmax = float(self.le_xmax.text())
ymin = float(self.le_ymin.text())
ymax = float(self.le_ymax.text())
self.pcanvas.set_limits(xmin,xmax,ymin,ymax)
self.pcanvas.draw()
"""
| 36.568702
| 235
| 0.530007
|
e88136b2f099d19ba422b16ae59f18bde23665b8
| 4,177
|
py
|
Python
|
histomicstk/features/compute_gradient_features.py
|
Leengit/HistomicsTK
|
7105b8341647a465f5fa12c3bd4bf181cb0beed7
|
[
"Apache-2.0"
] | null | null | null |
histomicstk/features/compute_gradient_features.py
|
Leengit/HistomicsTK
|
7105b8341647a465f5fa12c3bd4bf181cb0beed7
|
[
"Apache-2.0"
] | null | null | null |
histomicstk/features/compute_gradient_features.py
|
Leengit/HistomicsTK
|
7105b8341647a465f5fa12c3bd4bf181cb0beed7
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
def compute_gradient_features(im_label, im_intensity,
num_hist_bins=10, rprops=None):
"""Calculates gradient features from an intensity image.
Parameters
----------
im_label : array_like
A labeled mask image wherein intensity of a pixel is the ID of the
object it belongs to. Non-zero values are considered to be foreground
objects.
im_intensity : array_like
Intensity image
num_hist_bins: int, optional
Number of bins used to computed the gradient histogram of an object.
Histogram is used to energy and entropy features. Default is 10.
rprops : output of skimage.measure.regionprops, optional
rprops = skimage.measure.regionprops( im_label ). If rprops is not
passed then it will be computed inside which will increase the
computation time.
Returns
-------
fdata: pandas.DataFrame
A pandas dataframe containing the gradient features listed below for
each object/label.
Notes
-----
List of gradient features computed by this function:
Gradient.Mag.Mean : float
Mean of gradient data.
Gradient.Mag.Std : float
Standard deviation of gradient data.
Gradient.Mag.Skewness : float
Skewness of gradient data. Value is 0 when all values are equal.
Gradient.Mag.Kurtosis : float
Kurtosis of gradient data. Value is -3 when all values are equal.
Gradient.Mag.HistEnergy : float
Energy of the gradient magnitude histogram of object pixels
Gradient.Mag.HistEnergy : float
Entropy of the gradient magnitude histogram of object pixels.
Gradient.Canny.Sum : float
Sum of canny filtered gradient data.
Gradient.Canny.Mean : float
Mean of canny filtered gradient data.
References
----------
.. [#] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
and statistics tables and formulae," Crc Press, 1999.
"""
import pandas as pd
import scipy.stats
from skimage.feature import canny
from skimage.measure import regionprops
# List of feature names
feature_list = [
'Gradient.Mag.Mean',
'Gradient.Mag.Std',
'Gradient.Mag.Skewness',
'Gradient.Mag.Kurtosis',
'Gradient.Mag.HistEntropy',
'Gradient.Mag.HistEnergy',
'Gradient.Canny.Sum',
'Gradient.Canny.Mean',
]
# compute object properties if not provided
if rprops is None:
rprops = regionprops(im_label)
# create pandas data frame containing the features for each object
numFeatures = len(feature_list)
numLabels = len(rprops)
fdata = pd.DataFrame(np.zeros((numLabels, numFeatures)),
columns=feature_list)
Gx, Gy = np.gradient(im_intensity)
diffG = np.sqrt(Gx**2 + Gy**2)
cannyG = canny(im_intensity)
for i in range(numLabels):
# get gradients of object pixels
pixelGradients = np.sort(
diffG[rprops[i].coords[:, 0], rprops[i].coords[:, 1]]
)
# compute mean
fdata.at[i, 'Gradient.Mag.Mean'] = np.mean(pixelGradients)
# compute standard deviation
fdata.at[i, 'Gradient.Mag.Std'] = np.std(pixelGradients)
# compute skewness
fdata.at[i, 'Gradient.Mag.Skewness'] = scipy.stats.skew(pixelGradients)
# compute kurtosis
fdata.at[i, 'Gradient.Mag.Kurtosis'] = \
scipy.stats.kurtosis(pixelGradients)
# compute intensity histogram
hist, bins = np.histogram(pixelGradients, bins=num_hist_bins)
prob = hist/np.sum(hist, dtype=np.float32)
# compute entropy
fdata.at[i, 'Gradient.Mag.HistEntropy'] = scipy.stats.entropy(prob)
# compute energy
fdata.at[i, 'Gradient.Mag.HistEnergy'] = np.sum(prob**2)
bw_canny = cannyG[rprops[i].coords[:, 0], rprops[i].coords[:, 1]]
canny_sum = np.sum(bw_canny).astype('float')
fdata.at[i, 'Gradient.Canny.Sum'] = canny_sum
fdata.at[i, 'Gradient.Canny.Mean'] = canny_sum / len(pixelGradients)
return fdata
| 31.406015
| 79
| 0.644721
|
baa2716054483b4fd526567d27e2bc671a4bd40b
| 777
|
py
|
Python
|
forum/urls.py
|
darkun7/Anonim-Forum
|
515d73535c7156e87adee08b39534b6d36f8162e
|
[
"MIT"
] | null | null | null |
forum/urls.py
|
darkun7/Anonim-Forum
|
515d73535c7156e87adee08b39534b6d36f8162e
|
[
"MIT"
] | null | null | null |
forum/urls.py
|
darkun7/Anonim-Forum
|
515d73535c7156e87adee08b39534b6d36f8162e
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns=[
path('', views.home, name="home"),
path('thread/', views.post, name="post"),
path('thread/create/', views.createPost, name="createpost"),
path('thread/show/<str:id>', views.showPost, name="showpost"),
path('thread/edit/<str:id>', views.editPost, name="editpost"),
path('thread/delete/<str:id>', views.deletePost, name="deletepost"),
path('komentar/create/<str:thread_id>', views.createKomentar, name="createkomentar"),
path('komentar/edit/<str:thread_id>/<str:id>', views.editKomentar, name="editkomentar"),
path('komentar/delete/<str:thread_id>/<str:id>', views.deleteKomentar, name="deletekomentar"),
path('author/<str:penulis>', views.authorThreads, name="author")
]
| 43.166667
| 98
| 0.687259
|
0625e50f9a518489a144bb85e6ef1dd7a10e9bf5
| 282
|
py
|
Python
|
HACKEREARTH/Data Structures/Arrays/1-D/chargedUpArray.py
|
belikesayantan/daily-problem-solving
|
f6960edeb151838f00c9444d8232b99b48769784
|
[
"MIT"
] | null | null | null |
HACKEREARTH/Data Structures/Arrays/1-D/chargedUpArray.py
|
belikesayantan/daily-problem-solving
|
f6960edeb151838f00c9444d8232b99b48769784
|
[
"MIT"
] | 2
|
2021-08-24T08:45:10.000Z
|
2021-11-11T06:53:12.000Z
|
HACKEREARTH/Data Structures/Arrays/1-D/chargedUpArray.py
|
belikesayantan/daily-problem-solving
|
f6960edeb151838f00c9444d8232b99b48769784
|
[
"MIT"
] | null | null | null |
def solve (A,N):
sum = 0
for i in range(N):
if A[i] >= pow(2, N) / 2:
sum += A[i]
return int(sum % 1000000007)
T = int(input())
for _ in range(T):
N = int(input())
A = [int(x) for x in input().split()]
out_ = solve(A,N)
print (out_)
| 20.142857
| 41
| 0.475177
|
50b28c95c0a694178abdcec113d25cddf565d45c
| 1,475
|
py
|
Python
|
test/test_base.py
|
yangxuan0261/pyecharts
|
31cbd4dcd8c77988d1678a05b7f3bd0d845160bd
|
[
"MIT"
] | null | null | null |
test/test_base.py
|
yangxuan0261/pyecharts
|
31cbd4dcd8c77988d1678a05b7f3bd0d845160bd
|
[
"MIT"
] | null | null | null |
test/test_base.py
|
yangxuan0261/pyecharts
|
31cbd4dcd8c77988d1678a05b7f3bd0d845160bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
from __future__ import unicode_literals
import json
from pyecharts import Bar
def test_embed_option():
# bar_0
title = "柱状图数据堆叠示例"
attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"]
v1 = [5, 20, 36, 10, 75, 90]
v2 = [10, 25, 8, 60, 20, 80]
bar = Bar(title)
bar.add("商家A", attr, v1, is_stack=True)
bar.add("商家B", attr, v2, is_stack=True)
html = bar.render_embed()
json_encoded_title = json.dumps(title)
assert json_encoded_title in html
assert "<html>" not in html
assert "<body>" not in html
def test_numpy_array():
import numpy as np
title = "柱状图数据堆叠示例"
attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"]
v1 = np.array([5, 20, 36, 10, 75, 90])
bar = Bar(title)
bar.add("商家A", attr, v1, is_stack=True)
html = bar.render_embed()
json_encoded_title = json.dumps(title)
assert json_encoded_title in html
def test_pandas_dataframe():
import pandas as pd
import numpy as np
title = 'Bar chart'
index = pd.date_range('3/8/2017', periods=6, freq='M')
df1 = pd.DataFrame(np.random.randn(6), index=index)
df2 = pd.DataFrame(np.random.randn(6), index=index)
dtvalue1 = [i[0] for i in df1.values]
dtvalue2 = [i[0] for i in df2.values]
bar = Bar(title, 'Profit and loss situation')
bar.add('profit', df1.index, dtvalue1)
bar.add('loss', df2.index, dtvalue2)
html = bar.render_embed()
assert title in html
| 25.877193
| 58
| 0.623729
|
b6c2f30fcf9226c3e38a620a54ca3f340d89c63c
| 2,849
|
py
|
Python
|
examples/scroll-text-in-my-font.py
|
teknolog2000/scroll-phat
|
c5eaf447944deb9b1548e4839751afbe0e009245
|
[
"MIT"
] | 115
|
2015-12-04T13:29:34.000Z
|
2021-11-11T14:17:07.000Z
|
examples/scroll-text-in-my-font.py
|
PDServices/scroll-phat
|
9236c7c6359ed0b23721a8229bdf0211a49e540e
|
[
"MIT"
] | 63
|
2015-11-28T22:30:20.000Z
|
2022-02-09T13:10:59.000Z
|
examples/scroll-text-in-my-font.py
|
PDServices/scroll-phat
|
9236c7c6359ed0b23721a8229bdf0211a49e540e
|
[
"MIT"
] | 76
|
2015-11-26T22:29:05.000Z
|
2022-01-26T18:19:55.000Z
|
#!/usr/bin/env python
import os
import sys
import time
import scrollphat
# -----------------------------------------------------------------------------
# This example uses a custome font set, read from
# the image file my-font, which has the lower-case
# letters replaced with symbols.
# For example:
# a - tick
# b - cross
# f - flag
# h - love heart
# j - smiley face
# u,d,l,r - arrows (up down left and right respectively)
# -------------------------------------------------------------------------------
try:
from PIL import Image
except ImportError:
exit("This script requires the pillow module\nInstall with: sudo pip install pillow")
# -----------------------------------------------------------------------------
# This funtion will read the supplied image
# and convert it into font data for Scoll pHAT.
# The expect format of the image is as follows:
# Each font image contains a 16 x 6 table of squares,
# one for each ASCII character, starting with a space (0x20) and
# incrementing from left to right. Each square is 6x6 box
# boarding the 5x5 image of the individual characters.
def convert_png2font(font_file):
font = {}
font_path = os.path.join(os.path.dirname(__file__), font_file)
font_image = Image.open(font_path)
char = 0x20
gridsize_x = 16
gridsize_y = 6
border = 1
for char_y in range(0, gridsize_y):
for char_x in range(0, gridsize_x):
char_bits = []
for x in range(0, 5):
bits = 0
for y in range(0, 5):
pixel = font_image.getpixel((border + (char_x * 6) + x, border + (char_y * 6) + y))
if pixel == 1:
bits |= (1 << y)
char_bits.append(bits)
# remove all "empty" columns from the right of the character
while len(char_bits) > 0 and char_bits[-1] == 0:
char_bits.pop()
font[char] = char_bits
char += 1
return font
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Main program block
# -----------------------------------------------------------------------------
scrollphat.set_brightness(2)
scrollphat.set_rotate(True)
scrollphat.load_font(convert_png2font('my-font.png'))
if len(sys.argv) != 2:
print("""
Usage: python scroll-text-in-my-font.py "MESSAGE"
Press CTRL-C to exit!
In this example the lettters are read from the "my-font.png" file,
where lower-case letters are replaced with fun symbols and icons.
""")
sys.exit(0)
scrollphat.write_string(sys.argv[1], 11)
while True:
try:
scrollphat.scroll()
time.sleep(0.1)
except KeyboardInterrupt:
scrollphat.clear()
sys.exit(-1)
| 30.308511
| 103
| 0.525799
|
aa6eb3bef0eeef2d2f04e3870b9588b839b6ae02
| 14,525
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_08_01/models/network_management_client_enums.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_08_01/models/network_management_client_enums.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_08_01/models/network_management_client_enums.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class IPAllocationMethod(str, Enum):
static = "Static"
dynamic = "Dynamic"
class SecurityRuleProtocol(str, Enum):
tcp = "Tcp"
udp = "Udp"
asterisk = "*"
class SecurityRuleAccess(str, Enum):
allow = "Allow"
deny = "Deny"
class SecurityRuleDirection(str, Enum):
inbound = "Inbound"
outbound = "Outbound"
class RouteNextHopType(str, Enum):
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
internet = "Internet"
virtual_appliance = "VirtualAppliance"
none = "None"
class PublicIPAddressSkuName(str, Enum):
basic = "Basic"
standard = "Standard"
class IPVersion(str, Enum):
ipv4 = "IPv4"
ipv6 = "IPv6"
class TransportProtocol(str, Enum):
udp = "Udp"
tcp = "Tcp"
all = "All"
class ApplicationGatewayProtocol(str, Enum):
http = "Http"
https = "Https"
class ApplicationGatewayCookieBasedAffinity(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class ApplicationGatewayBackendHealthServerHealth(str, Enum):
unknown = "Unknown"
up = "Up"
down = "Down"
partial = "Partial"
draining = "Draining"
class ApplicationGatewaySkuName(str, Enum):
standard_small = "Standard_Small"
standard_medium = "Standard_Medium"
standard_large = "Standard_Large"
waf_medium = "WAF_Medium"
waf_large = "WAF_Large"
standard_v2 = "Standard_v2"
waf_v2 = "WAF_v2"
class ApplicationGatewayTier(str, Enum):
standard = "Standard"
waf = "WAF"
standard_v2 = "Standard_v2"
waf_v2 = "WAF_v2"
class ApplicationGatewaySslProtocol(str, Enum):
tl_sv1_0 = "TLSv1_0"
tl_sv1_1 = "TLSv1_1"
tl_sv1_2 = "TLSv1_2"
class ApplicationGatewaySslPolicyType(str, Enum):
predefined = "Predefined"
custom = "Custom"
class ApplicationGatewaySslPolicyName(str, Enum):
app_gw_ssl_policy20150501 = "AppGwSslPolicy20150501"
app_gw_ssl_policy20170401 = "AppGwSslPolicy20170401"
app_gw_ssl_policy20170401_s = "AppGwSslPolicy20170401S"
class ApplicationGatewaySslCipherSuite(str, Enum):
tls_ecdhe_rsa_with_aes_256_cbc_sha384 = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"
tls_ecdhe_rsa_with_aes_128_cbc_sha256 = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
tls_ecdhe_rsa_with_aes_256_cbc_sha = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
tls_ecdhe_rsa_with_aes_128_cbc_sha = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
tls_dhe_rsa_with_aes_256_gcm_sha384 = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"
tls_dhe_rsa_with_aes_128_gcm_sha256 = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
tls_dhe_rsa_with_aes_256_cbc_sha = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"
tls_dhe_rsa_with_aes_128_cbc_sha = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"
tls_rsa_with_aes_256_gcm_sha384 = "TLS_RSA_WITH_AES_256_GCM_SHA384"
tls_rsa_with_aes_128_gcm_sha256 = "TLS_RSA_WITH_AES_128_GCM_SHA256"
tls_rsa_with_aes_256_cbc_sha256 = "TLS_RSA_WITH_AES_256_CBC_SHA256"
tls_rsa_with_aes_128_cbc_sha256 = "TLS_RSA_WITH_AES_128_CBC_SHA256"
tls_rsa_with_aes_256_cbc_sha = "TLS_RSA_WITH_AES_256_CBC_SHA"
tls_rsa_with_aes_128_cbc_sha = "TLS_RSA_WITH_AES_128_CBC_SHA"
tls_ecdhe_ecdsa_with_aes_256_gcm_sha384 = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
tls_ecdhe_ecdsa_with_aes_128_gcm_sha256 = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
tls_ecdhe_ecdsa_with_aes_256_cbc_sha384 = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"
tls_ecdhe_ecdsa_with_aes_128_cbc_sha256 = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
tls_ecdhe_ecdsa_with_aes_256_cbc_sha = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
tls_ecdhe_ecdsa_with_aes_128_cbc_sha = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
tls_dhe_dss_with_aes_256_cbc_sha256 = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256"
tls_dhe_dss_with_aes_128_cbc_sha256 = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256"
tls_dhe_dss_with_aes_256_cbc_sha = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"
tls_dhe_dss_with_aes_128_cbc_sha = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"
tls_rsa_with_3_des_ede_cbc_sha = "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
class ApplicationGatewayCustomErrorStatusCode(str, Enum):
http_status403 = "HttpStatus403"
http_status502 = "HttpStatus502"
class ApplicationGatewayRequestRoutingRuleType(str, Enum):
basic = "Basic"
path_based_routing = "PathBasedRouting"
class ApplicationGatewayRedirectType(str, Enum):
permanent = "Permanent"
found = "Found"
see_other = "SeeOther"
temporary = "Temporary"
class ApplicationGatewayOperationalState(str, Enum):
stopped = "Stopped"
starting = "Starting"
running = "Running"
stopping = "Stopping"
class ApplicationGatewayFirewallMode(str, Enum):
detection = "Detection"
prevention = "Prevention"
class ProvisioningState(str, Enum):
succeeded = "Succeeded"
updating = "Updating"
deleting = "Deleting"
failed = "Failed"
class AzureFirewallRCActionType(str, Enum):
allow = "Allow"
deny = "Deny"
class AzureFirewallApplicationRuleProtocolType(str, Enum):
http = "Http"
https = "Https"
class AzureFirewallNatRCActionType(str, Enum):
snat = "Snat"
dnat = "Dnat"
class AzureFirewallNetworkRuleProtocol(str, Enum):
tcp = "TCP"
udp = "UDP"
any = "Any"
icmp = "ICMP"
class AuthorizationUseStatus(str, Enum):
available = "Available"
in_use = "InUse"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(str, Enum):
not_configured = "NotConfigured"
configuring = "Configuring"
configured = "Configured"
validation_needed = "ValidationNeeded"
class Access(str, Enum):
allow = "Allow"
deny = "Deny"
class ExpressRoutePeeringType(str, Enum):
azure_public_peering = "AzurePublicPeering"
azure_private_peering = "AzurePrivatePeering"
microsoft_peering = "MicrosoftPeering"
class ExpressRoutePeeringState(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class CircuitConnectionStatus(str, Enum):
connected = "Connected"
connecting = "Connecting"
disconnected = "Disconnected"
class ExpressRouteCircuitPeeringState(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class ExpressRouteCircuitSkuTier(str, Enum):
standard = "Standard"
premium = "Premium"
basic = "Basic"
class ExpressRouteCircuitSkuFamily(str, Enum):
unlimited_data = "UnlimitedData"
metered_data = "MeteredData"
class ServiceProviderProvisioningState(str, Enum):
not_provisioned = "NotProvisioned"
provisioning = "Provisioning"
provisioned = "Provisioned"
deprovisioning = "Deprovisioning"
class ExpressRouteLinkConnectorType(str, Enum):
lc = "LC"
sc = "SC"
class ExpressRouteLinkAdminState(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class ExpressRoutePortsEncapsulation(str, Enum):
dot1_q = "Dot1Q"
qin_q = "QinQ"
class LoadBalancerSkuName(str, Enum):
basic = "Basic"
standard = "Standard"
class LoadDistribution(str, Enum):
default = "Default"
source_ip = "SourceIP"
source_ip_protocol = "SourceIPProtocol"
class ProbeProtocol(str, Enum):
http = "Http"
tcp = "Tcp"
https = "Https"
class NetworkOperationStatus(str, Enum):
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
class EffectiveSecurityRuleProtocol(str, Enum):
tcp = "Tcp"
udp = "Udp"
all = "All"
class EffectiveRouteSource(str, Enum):
unknown = "Unknown"
user = "User"
virtual_network_gateway = "VirtualNetworkGateway"
default = "Default"
class EffectiveRouteState(str, Enum):
active = "Active"
invalid = "Invalid"
class AssociationType(str, Enum):
associated = "Associated"
contains = "Contains"
class Direction(str, Enum):
inbound = "Inbound"
outbound = "Outbound"
class IpFlowProtocol(str, Enum):
tcp = "TCP"
udp = "UDP"
class NextHopType(str, Enum):
internet = "Internet"
virtual_appliance = "VirtualAppliance"
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
hyper_net_gateway = "HyperNetGateway"
none = "None"
class PcProtocol(str, Enum):
tcp = "TCP"
udp = "UDP"
any = "Any"
class PcStatus(str, Enum):
not_started = "NotStarted"
running = "Running"
stopped = "Stopped"
error = "Error"
unknown = "Unknown"
class PcError(str, Enum):
internal_error = "InternalError"
agent_stopped = "AgentStopped"
capture_failed = "CaptureFailed"
local_file_failed = "LocalFileFailed"
storage_failed = "StorageFailed"
class Protocol(str, Enum):
tcp = "Tcp"
http = "Http"
https = "Https"
icmp = "Icmp"
class HTTPMethod(str, Enum):
get = "Get"
class Origin(str, Enum):
local = "Local"
inbound = "Inbound"
outbound = "Outbound"
class Severity(str, Enum):
error = "Error"
warning = "Warning"
class IssueType(str, Enum):
unknown = "Unknown"
agent_stopped = "AgentStopped"
guest_firewall = "GuestFirewall"
dns_resolution = "DnsResolution"
socket_bind = "SocketBind"
network_security_rule = "NetworkSecurityRule"
user_defined_route = "UserDefinedRoute"
port_throttled = "PortThrottled"
platform = "Platform"
class ConnectionStatus(str, Enum):
unknown = "Unknown"
connected = "Connected"
disconnected = "Disconnected"
degraded = "Degraded"
class ConnectionMonitorSourceStatus(str, Enum):
uknown = "Uknown"
active = "Active"
inactive = "Inactive"
class ConnectionState(str, Enum):
reachable = "Reachable"
unreachable = "Unreachable"
unknown = "Unknown"
class EvaluationState(str, Enum):
not_started = "NotStarted"
in_progress = "InProgress"
completed = "Completed"
class VerbosityLevel(str, Enum):
normal = "Normal"
minimum = "Minimum"
full = "Full"
class PublicIPPrefixSkuName(str, Enum):
standard = "Standard"
class VirtualNetworkPeeringState(str, Enum):
initiated = "Initiated"
connected = "Connected"
disconnected = "Disconnected"
class VirtualNetworkGatewayType(str, Enum):
vpn = "Vpn"
express_route = "ExpressRoute"
class VpnType(str, Enum):
policy_based = "PolicyBased"
route_based = "RouteBased"
class VirtualNetworkGatewaySkuName(str, Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
vpn_gw1 = "VpnGw1"
vpn_gw2 = "VpnGw2"
vpn_gw3 = "VpnGw3"
vpn_gw1_az = "VpnGw1AZ"
vpn_gw2_az = "VpnGw2AZ"
vpn_gw3_az = "VpnGw3AZ"
er_gw1_az = "ErGw1AZ"
er_gw2_az = "ErGw2AZ"
er_gw3_az = "ErGw3AZ"
class VirtualNetworkGatewaySkuTier(str, Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
vpn_gw1 = "VpnGw1"
vpn_gw2 = "VpnGw2"
vpn_gw3 = "VpnGw3"
vpn_gw1_az = "VpnGw1AZ"
vpn_gw2_az = "VpnGw2AZ"
vpn_gw3_az = "VpnGw3AZ"
er_gw1_az = "ErGw1AZ"
er_gw2_az = "ErGw2AZ"
er_gw3_az = "ErGw3AZ"
class VpnClientProtocol(str, Enum):
ike_v2 = "IkeV2"
sstp = "SSTP"
open_vpn = "OpenVPN"
class IpsecEncryption(str, Enum):
none = "None"
des = "DES"
des3 = "DES3"
aes128 = "AES128"
aes192 = "AES192"
aes256 = "AES256"
gcmaes128 = "GCMAES128"
gcmaes192 = "GCMAES192"
gcmaes256 = "GCMAES256"
class IpsecIntegrity(str, Enum):
md5 = "MD5"
sha1 = "SHA1"
sha256 = "SHA256"
gcmaes128 = "GCMAES128"
gcmaes192 = "GCMAES192"
gcmaes256 = "GCMAES256"
class IkeEncryption(str, Enum):
des = "DES"
des3 = "DES3"
aes128 = "AES128"
aes192 = "AES192"
aes256 = "AES256"
gcmaes256 = "GCMAES256"
gcmaes128 = "GCMAES128"
class IkeIntegrity(str, Enum):
md5 = "MD5"
sha1 = "SHA1"
sha256 = "SHA256"
sha384 = "SHA384"
gcmaes256 = "GCMAES256"
gcmaes128 = "GCMAES128"
class DhGroup(str, Enum):
none = "None"
dh_group1 = "DHGroup1"
dh_group2 = "DHGroup2"
dh_group14 = "DHGroup14"
dh_group2048 = "DHGroup2048"
ecp256 = "ECP256"
ecp384 = "ECP384"
dh_group24 = "DHGroup24"
class PfsGroup(str, Enum):
none = "None"
pfs1 = "PFS1"
pfs2 = "PFS2"
pfs2048 = "PFS2048"
ecp256 = "ECP256"
ecp384 = "ECP384"
pfs24 = "PFS24"
pfs14 = "PFS14"
pfsmm = "PFSMM"
class BgpPeerState(str, Enum):
unknown = "Unknown"
stopped = "Stopped"
idle = "Idle"
connecting = "Connecting"
connected = "Connected"
class ProcessorArchitecture(str, Enum):
amd64 = "Amd64"
x86 = "X86"
class AuthenticationMethod(str, Enum):
eaptls = "EAPTLS"
eapmscha_pv2 = "EAPMSCHAPv2"
class VirtualNetworkGatewayConnectionStatus(str, Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
class VirtualNetworkGatewayConnectionType(str, Enum):
ipsec = "IPsec"
vnet2_vnet = "Vnet2Vnet"
express_route = "ExpressRoute"
vpn_client = "VPNClient"
class VirtualNetworkGatewayConnectionProtocol(str, Enum):
ik_ev2 = "IKEv2"
ik_ev1 = "IKEv1"
class OfficeTrafficCategory(str, Enum):
optimize = "Optimize"
optimize_and_allow = "OptimizeAndAllow"
all = "All"
none = "None"
class VpnGatewayTunnelingProtocol(str, Enum):
ike_v2 = "IkeV2"
open_vpn = "OpenVPN"
class VpnConnectionStatus(str, Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
class VirtualWanSecurityProviderType(str, Enum):
external = "External"
native = "Native"
class TunnelConnectionStatus(str, Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
class HubVirtualNetworkConnectionStatus(str, Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
| 20.602837
| 87
| 0.69136
|
b0380758cdb9a9f5ee5ec949a0cf3b752b91fef1
| 538
|
py
|
Python
|
rbacProject/apps/adm/urls_asset.py
|
xeroCBW/testmodel
|
6000d79f91d11bcf6ba76befb3a94a007231ecdc
|
[
"MIT"
] | null | null | null |
rbacProject/apps/adm/urls_asset.py
|
xeroCBW/testmodel
|
6000d79f91d11bcf6ba76befb3a94a007231ecdc
|
[
"MIT"
] | 54
|
2020-06-24T07:12:19.000Z
|
2022-03-12T00:43:57.000Z
|
rbacProject/apps/adm/urls_asset.py
|
xeroCBW/testmodel
|
6000d79f91d11bcf6ba76befb3a94a007231ecdc
|
[
"MIT"
] | null | null | null |
from django.urls import path
from adm import views_asset
app_name='[adm]'
urlpatterns = [
path('', views_asset.AssetView, name='asset'),
path('list/', views_asset.AssetListView, name="list"),
path('create/', views_asset.AssetCreateView, name="create"),
path('update/', views_asset.AssetUpdateView, name="update"),
path('detail/', views_asset.AssetDetailView, name="asset-detail"),
path('delete/', views_asset.AssetDeleteView, name='delete'),
path('upload/', views_asset.AssetUploadView, name='upload'),
]
| 29.888889
| 70
| 0.702602
|
aa75b850dea5a387c2849822b8fce802ac7acf64
| 152
|
py
|
Python
|
table/coffee/apps.py
|
deltaGPhys/CNCCoffeeTable
|
412b1d788a86f78ba3ad57885143f8121508c1fb
|
[
"MIT"
] | null | null | null |
table/coffee/apps.py
|
deltaGPhys/CNCCoffeeTable
|
412b1d788a86f78ba3ad57885143f8121508c1fb
|
[
"MIT"
] | null | null | null |
table/coffee/apps.py
|
deltaGPhys/CNCCoffeeTable
|
412b1d788a86f78ba3ad57885143f8121508c1fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class CoffeeConfig(AppConfig):
name = 'coffee'
| 16.888889
| 39
| 0.730263
|
b9353348d7bf6e1b928900095f3f2ca497a64599
| 10,933
|
py
|
Python
|
maintenancemanagement/views/views_equipmentType.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | 3
|
2021-03-08T19:14:38.000Z
|
2022-02-01T17:57:31.000Z
|
maintenancemanagement/views/views_equipmentType.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
maintenancemanagement/views/views_equipmentType.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module defines the views corresponding to the equipment types."""
import logging
from drf_yasg.utils import swagger_auto_schema
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from maintenancemanagement.models import EquipmentType, FieldGroup, Field, FieldValue
from maintenancemanagement.serializers import (
EquipmentTypeCreateSerializer,
EquipmentTypeDetailsSerializer,
EquipmentTypeQuerySerializer,
EquipmentTypeSerializer,
EquipmentTypeValidationSerializer,
FieldCreateSerializer,
FieldValidationSerializer,
FieldValueCreateSerializer,
FieldValueValidationSerializer,
)
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
logger = logging.getLogger(__name__)
User = settings.AUTH_USER_MODEL
VIEW_EQUIPMENTTYPE = "maintenancemanagement.view_equipmenttype"
ADD_EQUIPMENTTYPE = "maintenancemanagement.add_equipmenttype"
CHANGE_EQUIPMENTTYPE = "maintenancemanagement.change_equipmenttype"
DELETE_EQUIPMENTTYPE = "maintenancemanagement.delete_equipmenttype"
class EquipmentTypeList(APIView):
r"""
\n# List all the equipment types or create a new one.
Parameter :
request (HttpRequest) : the request coming from the front-end
Return :
response (Response) : the response.
GET request : list all equipment types and return the data
POST request :
- create a new equipment type, send HTTP 201. If the request \
is not valid, send HTTP 400.
- If the user doesn't have the permissions, it will send HTTP 401.
- The request must contain name : the name of the equipment \
type (String)
- The request must contain equipment_set : a list (which can \
be empty) of the equipment id (List<int>)
"""
@swagger_auto_schema(
operation_description='Send the list of EquipmentType in the database.',
query_serializer=None,
responses={
200: EquipmentTypeSerializer(many=True),
401: "Unhauthorized",
},
)
def get(self, request):
"""Send the list of EquipmentType in the database."""
if request.user.has_perm(VIEW_EQUIPMENTTYPE):
equipment_types = EquipmentType.objects.all()
serializer = EquipmentTypeSerializer(equipment_types, many=True)
return Response(serializer.data)
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Add an EquipmentType into the database.',
query_serializer=EquipmentTypeQuerySerializer(many=False),
responses={
201: EquipmentTypeDetailsSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
},
)
def post(self, request):
"""Add an EquipmentType into the database."""
if request.user.has_perm(ADD_EQUIPMENTTYPE):
fields = request.data.pop('field', None)
equipment_type_validation_serializer = EquipmentTypeValidationSerializer(data=request.data)
if equipment_type_validation_serializer.is_valid():
data = request.data
if fields:
error = self._validate_fields(fields)
if error:
return error
else:
field_group = self._create_fields(request, fields)
data.update({'fields_groups': [field_group.id]})
equipment_type_serializer = EquipmentTypeCreateSerializer(data=data)
if equipment_type_serializer.is_valid():
equipment_type = equipment_type_serializer.save()
logger.info(
"{user} CREATED EquipmentType with {params}".format(user=request.user, params=request.data)
)
equipment_type_details = EquipmentTypeDetailsSerializer(equipment_type)
return Response(equipment_type_details.data, status=status.HTTP_201_CREATED)
return Response(equipment_type_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(equipment_type_validation_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
def _validate_fields(self, fields):
for field in fields:
field_values = field.get('value', None)
field_validation_serializer = FieldValidationSerializer(data=field)
if not field_validation_serializer.is_valid():
return Response(field_validation_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if field_values:
for field_value in field_values:
field_value_data = {"value": field_value}
field_value_validation_serializer = FieldValueValidationSerializer(data=field_value_data)
if not field_value_validation_serializer.is_valid():
return Response(field_value_validation_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def _create_fields(self, request, fields):
field_group = FieldGroup(name=request.data['name'], is_equipment=True)
field_group.save()
for field in fields:
field_values = field.get('value', None)
field.update({'field_group': field_group.id})
field_serializer = FieldCreateSerializer(data=field)
if field_serializer.is_valid():
field_instance = field_serializer.save()
logger.info("{user} CREATED Field with {params}".format(user=request.user, params=field))
if field_values:
for field_value in field_values:
field_value_data = {"value": field_value}
field_value_data.update({'field': field_instance.id})
field_value_serializer = FieldValueCreateSerializer(data=field_value_data)
if field_value_serializer.is_valid():
field_value_serializer.save()
logger.info(
"{user} CREATED FieldValue with {params}".format(
user=request.user, params=field_value
)
)
return field_group
class EquipmentTypeDetail(APIView):
r"""
\n# Retrieve, update or delete an equipment type.
Parameters :
request (HttpRequest) : the request coming from the front-end
id (int) : the id of the equipment type
Return :
response (Response) : the response.
GET request : return the equipment type's data.
PUT request : change the equipment type with the data on the request \
or if the data isn't well formed, send HTTP 400.
DELETE request: delete the equipment type and send HTTP 204.
If the user doesn't have the permissions, it will send HTTP 401.
If the id doesn't exist, it will send HTTP 404.
The PUT request can contain one or more of the following fields :
- name (String): the name of the equipment type
- equipment_set (List<int>) : a list of equipment's ids
"""
@swagger_auto_schema(
operation_description='Send the EquipmentType corresponding to the given key.',
query_serializer=None,
responses={
200: EquipmentTypeDetailsSerializer(many=False),
401: "Unhauthorized",
404: "Not found",
},
)
def get(self, request, pk):
"""Send the EquipmentType corresponding to the given key."""
try:
equipment_type = EquipmentType.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm(VIEW_EQUIPMENTTYPE):
serializer = EquipmentTypeDetailsSerializer(equipment_type)
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Update the EquipmentType corresponding to the given key.',
query_serializer=EquipmentTypeSerializer(many=False),
responses={
200: EquipmentTypeSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
404: "Not found",
},
)
def put(self, request, pk):
"""Update the EquipmentType corresponding to the given key."""
try:
equipment_type = EquipmentType.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm(CHANGE_EQUIPMENTTYPE):
field_objects = request.data.get("field", None)
if field_objects is not None :
self._update_field_values(field_objects)
serializer = EquipmentTypeSerializer(equipment_type, data=request.data, partial=True)
if serializer.is_valid():
logger.info(
"{user} UPDATED {object} with {params}".format(
user=request.user, object=repr(equipment_type), params=request.data
)
)
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
def _update_field_values(self, field_objects):
for field_object in field_objects:
field = Field.objects.get(pk=field_object.get('id'))
new_values = field_object.get('value')
old_values = field.value_set.all().values_list('value', flat=True)
for value in new_values:
if value not in old_values:
FieldValue.objects.create(value=value, field=field)
@swagger_auto_schema(
operation_description='Delete the EquipmentType corresponding to the given key.',
query_serializer=None,
responses={
204: "No content",
401: "Unhauthorized",
404: "Not found",
},
)
def delete(self, request, pk):
"""Delete the EquipmentType corresponding to the given key."""
try:
equipment_type = EquipmentType.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm(DELETE_EQUIPMENTTYPE):
logger.info("{user} DELETED {object}".format(user=request.user, object=repr(equipment_type)))
equipment_type.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_401_UNAUTHORIZED)
| 43.732
| 117
| 0.647947
|
4c31813dee0de5ea8cc2098e6387d8e8fa21419f
| 2,885
|
py
|
Python
|
lib/concurrence/_unittest.py
|
toymachine/concurrence
|
02f14e90e5f591970f87828c4a6e8f985d67834f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2015-11-05T06:06:33.000Z
|
2015-11-05T06:06:33.000Z
|
lib/concurrence/_unittest.py
|
toymachine/concurrence
|
02f14e90e5f591970f87828c4a6e8f985d67834f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
lib/concurrence/_unittest.py
|
toymachine/concurrence
|
02f14e90e5f591970f87828c4a6e8f985d67834f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
import unittest
import logging
import time
from concurrence import dispatch, Tasklet, Channel, quit, get_version_info
from concurrence.core import EXIT_CODE_TIMEOUT
from concurrence.io import IOStream, Socket
class TestSocket(object):
_installed = {}
def __init__(self, callback):
self._step_count = 0
self._address = None
self._callback = callback
def _step(self, event, args, kwargs):
res = self._callback(self, self._step_count, event, args, kwargs)
self._step_count += 1
return res
def _connect(self, *args, **kwargs):
self._address = args[0]
res = self._step("connect", args, kwargs)
return res
def close(self, *args, **kwargs):
return self._step("close", args, kwargs)
def write(self, *args, **kwargs):
res = self._step("write", args, kwargs)
if res is None: #by default read bytes
return len(args[0].read_bytes(-1))
def read(self, *args, **kwargs):
res = self._step("read", args, kwargs)
if res is None:
return 0 #0 bytes read by default
else:
args[0].write_bytes(res)
return len(res)
@classmethod
def install(cls, addr, callback):
cls._installed[addr] = TestSocket(callback)
def interceptor(addr):
return cls._installed[addr]
Socket.set_interceptor(interceptor)
class TestCase(unittest.TestCase):
def setUp(self):
logging.debug("setUp %s, %s", self, '; '.join(['%s: %s' % (k, v) for k, v in get_version_info().items()]))
def tearDown(self):
try:
Tasklet.yield_() #this make sure that everything gets a change to exit before we start the next test
logging.debug("tearDown %s, tasklet count #%d", self, Tasklet.count())
except:
pass
class _Timer:
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, type, value, traceback):
self._end = time.time()
def __str__(self):
self._end - self._start
def sec(self, n):
return n / (self._end - self._start)
def timer():
return _Timer()
def main(timeout = None):
logging.basicConfig()
logging.root.setLevel(logging.DEBUG)
if timeout is not None:
def quit_test():
logging.error("quiting unittest on timeout")
quit(EXIT_CODE_TIMEOUT)
logging.debug("test will timeout in %s seconds", timeout)
timeout_task = Tasklet.later(timeout, quit_test, name = 'unittest_timeout')()
from concurrence.core import _profile
#_profile(unittest.main)
dispatch(unittest.main)
| 28.284314
| 114
| 0.629116
|
5d6bf1a8c2abdc166dcc91bc68e88bdb8e55099c
| 988
|
py
|
Python
|
admin/migrations/0037_auto_20190202_0956.py
|
rodlukas/UP-admin
|
08f36de0773f39c6222da82016bf1384af2cce18
|
[
"MIT"
] | 4
|
2019-07-19T17:39:04.000Z
|
2022-03-22T21:02:15.000Z
|
admin/migrations/0037_auto_20190202_0956.py
|
rodlukas/UP-admin
|
08f36de0773f39c6222da82016bf1384af2cce18
|
[
"MIT"
] | 53
|
2019-08-04T14:25:40.000Z
|
2022-03-26T20:30:55.000Z
|
admin/migrations/0037_auto_20190202_0956.py
|
rodlukas/UP-admin
|
08f36de0773f39c6222da82016bf1384af2cce18
|
[
"MIT"
] | 3
|
2020-03-09T07:11:03.000Z
|
2020-09-11T01:22:50.000Z
|
# Generated by Django 2.1.5 on 2019-02-02 08:56
import platform
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("admin", "0036_auto_20190128_1144")]
if platform.system() == "Windows":
collation = migrations.RunSQL('CREATE COLLATION cz (locale = "cs-CZ-x-icu")')
else:
collation = migrations.RunSQL('CREATE COLLATION cz (locale = "cs_CZ.utf8")')
operations = [
collation,
migrations.RunSQL(
"ALTER TABLE admin_attendancestate ALTER COLUMN name TYPE VARCHAR COLLATE cz;"
),
migrations.RunSQL("ALTER TABLE admin_client ALTER COLUMN name TYPE VARCHAR COLLATE cz;"),
migrations.RunSQL("ALTER TABLE admin_client ALTER COLUMN surname TYPE VARCHAR COLLATE cz;"),
migrations.RunSQL("ALTER TABLE admin_course ALTER COLUMN name TYPE VARCHAR COLLATE cz;"),
migrations.RunSQL("ALTER TABLE admin_group ALTER COLUMN name TYPE VARCHAR COLLATE cz;"),
]
| 36.592593
| 100
| 0.692308
|
59163e671c4dcc82a60f43b1e00b7c802568b805
| 13,525
|
py
|
Python
|
rabbitmq.py
|
summa-platform/summa-deeptagger
|
9effc39541395999c5b3f5df2f5d9ba1401b820f
|
[
"MIT"
] | null | null | null |
rabbitmq.py
|
summa-platform/summa-deeptagger
|
9effc39541395999c5b3f5df2f5d9ba1401b820f
|
[
"MIT"
] | null | null | null |
rabbitmq.py
|
summa-platform/summa-deeptagger
|
9effc39541395999c5b3f5df2f5d9ba1401b820f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Author: Didzis Gosko <didzis.gosko@leta.lv>
#
import sys, asyncio, json, traceback, inspect
from concurrent.futures import CancelledError
from aio_pika import connect, Message, ExchangeType
import pika.exceptions
try:
from task import NoFinalResult
except ImportError:
class NoFinalResult(Exception): pass
try:
from task import ErrorMessage
except ImportError:
class ErrorMessage(Exception): pass
try:
from task import RejectError
except ImportError:
class RejectError(Exception): pass
try:
from task import RejectRequeueError
except ImportError:
class RejectRequeueError(Exception): pass
try:
from task import NoReply
except ImportError:
class NoReply(Exception): pass
try:
import task
except ImportError:
if __name__ == "__main__":
print('warning: module task not found, use "--dummy" argument to use dummy test task for RabbitMQ client', file=sys.stderr)
import sys
if '--dummy' not in sys.argv[1:]:
raise
class DummyTask:
name = 'DUMMY-TEST-TASK'
def setup_argparser(self, parser):
parser.add_argument('--test', action='store_true', help='test with %s' % self.name)
def init(self, args=None):
print('Initializing %s ...' % self.name, file=sys.stderr)
if args.test:
print('%s test mode enabled')
self.args = args
def shutdown(self):
print('shutting down %s ...' % self.name, file=sys.stderr)
def reset(self):
print('restarting %s ...' % self.name, file=sys.stderr)
async def process_message(self, task_data, loop=None, send_reply=None, **kwargs):
print('%s will process input data:' % self.name, task_data)
if self.args:
print('Test mode enabled')
for i in range(5):
print('Waiting %i seconds for first partial result to be sent' % i)
await asyncio.sleep(i)
if send_reply:
await send_reply('%i. partial result from %s' % (i, self.name))
print('%s is complete!' % self.name)
return 'Final result of %s: SUCCESS' % self.name
task = DummyTask()
async def on_message(message, reply, loop=None, name=task.name, verbose=True, **kwargs):
# with message.process(): # with message auto acknowledgement
routing_keys = message.headers['replyToRoutingKeys']
body_dict = json.loads(message.body.decode("utf-8"))
task_data = body_dict['taskData']
task_metadata = body_dict['taskMetadata']
async def send_reply(result_data, result_type='partialResult'):
await reply(
Message(
bytes(json.dumps(dict(resultData=result_data, resultType=result_type, taskMetadata=task_metadata)), 'utf8'),
headers=dict(resultProducerName=name)
),
routing_keys[result_type]
)
try:
item = task_metadata.get('itemId', 'unknown')
if verbose:
print('New job request for item %s received!' % item)
# print(task_metadata)
# print(task_data)
result_data = await task.process_message(task_data, loop=loop, send_reply=send_reply, metadata=task_metadata, reject=message.reject, **kwargs)
if verbose:
print('Job for item %s completed!' % item)
await send_reply(result_data, 'finalResult')
message.ack()
except NoReply as e:
print('Job for item %s finished:' % item, e)
except NoFinalResult:
print('Job for item %s completed!' % item)
message.ack()
except RejectError as e:
if verbose:
print('Job for item %s rejected:' % item, e)
message.reject(requeue=False)
return
except RejectRequeueError as e:
if verbose:
print('Job for item %s rejected (and requeued):' % item, e)
message.reject(requeue=True)
return
except CancelledError:
# stop, do not send reply, requeue incomming message
if verbose:
print('Job for item %s cancelled!' % item)
try:
message.reject(requeue=True)
except pika.exceptions.ConnectionClosed:
pass
# raise
return
except KeyboardInterrupt:
# stop, do not send reply, requeue incomming message
if verbose:
print('Job for item %s cancelled!' % item)
message.reject(requeue=True)
# raise
return
except ErrorMessage as e:
if verbose:
print('Job for item %s failed with error:' % item, e)
else:
log(e)
await send_reply(str(e), 'processingError')
message.ack()
except Exception as e:
# traceback.print_exc()
exception = ''.join(traceback.format_exception(*sys.exc_info()))
if verbose:
print('Job for item %s failed with error: %s\n%s' % (item, str(e), exception))
await send_reply(exception, 'processingError')
# await send_reply(str(e), 'processingError')
message.ack()
# await reply(
# Message(
# bytes(json.dumps(dict(resultData=result_data, resultType=result_type, taskMetadata=task_metadata)), 'utf8'),
# headers=dict(resultProducerName=name)
# ),
# routing_keys[result_type]
# )
def log(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
async def run(url, input_queue, output_exchange, loop=None, num_parallel=1, reconnect_delay=5,
on_message=None, handle_all_exceptions=True, kwargs={}):
async def reconnect(first=True):
nonlocal reconnect_delay
log('Connecting' if first else 'Reconnecting', 'to', url, end=' ', flush=True)
while True:
try:
if not first:
await asyncio.sleep(reconnect_delay)
else:
first = False
log('.', end='', flush=True)
await _connect()
break
except pika.exceptions.IncompatibleProtocolError:
# keep silent, this happens during rabbitmq startup
pass
except ConnectionRefusedError:
# was unable to connect, will retry
pass
except ConnectionError as e:
log('Connection error:', e)
# except pika.exceptions.ChannelClosed as e:
# pass
except KeyboardInterrupt:
log('RECONNECT INTERRUPTED')
raise
except Exception as e:
if handle_all_exceptions:
log('Unexpected exception at reconnect()')
traceback.print_exc()
await connection.close()
else:
await connection.close()
if hasattr(task, 'shutdown'):
task.shutdown()
# loop.stop()
raise
def on_connection_closed(future):
try:
future.result()
except ConnectionError as e:
log('Connection lost, will reconnect')
# asyncio.sleep(connection.close())
t = asyncio.ensure_future(reconnect(False))
# do not await lost messages
for t in asyncio.Task.all_tasks():
if hasattr(t, 'must_await'):
t.must_await = False
t.cancel()
if hasattr(task, 'reset'):
task.reset()
except Exception as e:
log('Unexpected exception at on_connection_closed()')
traceback.print_exc()
def get_on_message():
global on_message
return on_message
def message_callback(message):
t = asyncio.ensure_future((on_message or get_on_message())(message, exchange_out.publish, loop=loop, **kwargs))
t.must_await = True # hack to identify on_message tasks
async def _connect():
nonlocal connection, queue_in, exchange_out
connection = await connect(url, loop=loop)
connection.add_close_callback(on_connection_closed)
log(' connected!')
try:
channel_in = await connection.channel()
await channel_in.set_qos(prefetch_count=num_parallel)
channel_out = await connection.channel()
exchange_out = await channel_out.declare_exchange(output_exchange, ExchangeType.TOPIC, durable=False)
queue_in = await channel_in.declare_queue(input_queue, passive=False)
if inspect.iscoroutinefunction(queue_in.consume):
await queue_in.consume(message_callback)
else:
queue_in.consume(message_callback)
# queue_in.consume(lambda message: asyncio.ensure_future(
# (on_message or get_on_message())(message, exchange_out.publish, loop=loop, **kwargs))
# )
# except:
except Exception as e:
print('_CONNNECT EXCEPTION:', e)
# await connection.close() # if closed, will not reconnect
# will be handled at reconnect()
raise
connection = None
queue_in = None
exchange_out = None
if not loop:
loop = asyncio.get_event_loop()
await reconnect()
def wait_for_incomplete_message_callbacks(loop):
pending_on_message_tasks = [t for t in asyncio.Task.all_tasks() if hasattr(t, 'must_await') and t.must_await]
loop.run_until_complete(asyncio.gather(*pending_on_message_tasks))
# loop.run_until_complete(loop.shutdown_asyncgens()) # Python 3.6
def run_forever(url=None, queue_in=None, exchange_out=None, num_parallel=1, reconnect_delay=5, debug=False,
on_message=None, handle_all_exceptions=True, **kwargs):
try:
loop = asyncio.get_event_loop()
loop.set_debug(False)
loop.create_task(run(url, queue_in, exchange_out, loop=loop, num_parallel=num_parallel, reconnect_delay=reconnect_delay,
on_message=on_message, handle_all_exceptions=handle_all_exceptions, kwargs=kwargs))
loop.run_forever()
except KeyboardInterrupt:
log('MAIN INTERRUPTED')
for f in asyncio.Task.all_tasks():
f.cancel()
loop.stop()
loop.run_forever()
return
try:
wait_for_incomplete_message_callbacks(loop)
if hasattr(task, 'shutdown'):
wait_for_tasks = task.shutdown()
if wait_for_tasks:
loop.run_until_complete(asyncio.gather(*wait_for_tasks))
except CancelledError:
pass
except KeyboardInterrupt:
log('MAIN INTERRUPTED')
loop.stop()
def main(task=task):
import os
import argparse
parser = argparse.ArgumentParser(description='RabbitMQ Worker', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--parallel', '-n', dest='PARALLEL', type=int, default=os.environ.get('PARALLEL',1),
help='messages to process in parallel (or set env variable PARALLEL)')
parser.add_argument('--reconnect-delay', type=int, default=os.environ.get('RECONNECT_DELAY', 5),
help='number of seconds to wait before reconnect attempt (or set env variable RECONNECT_DELAY)')
parser.add_argument('--startup-delay', type=int, default=os.environ.get('STARTUP_DELAY', 0),
help='number of seconds to wait before starting RabbitMQ client (or set env variable STARTUP_DELAY)')
parser.add_argument('--debug', action='store_true', help='debug mode for asyncio')
parser.add_argument('--verbose', '-v', action='store_true', help='verbose message processing mode')
parser.add_argument('--out', dest='EXCHANGE_OUT', type=str, default=os.environ.get('EXCHANGE_OUT'),
help='output exchange (or set env variable EXCHANGE_OUT)')
parser.add_argument('--in', dest='QUEUE_IN', type=str, default=os.environ.get('QUEUE_IN'),
help='input queue (or set env variable QUEUE_IN)')
parser.add_argument('--url', dest='RABBITMQ_URL', type=str, default=os.environ.get('RABBITMQ_URL'),
help='RabbitMQ URL (or set env variable RABBITMQ_URL)')
if hasattr(task, 'setup_argparser'):
task.setup_argparser(parser)
args = parser.parse_args()
if not args.RABBITMQ_URL:
log("error: RabbitMQ URL is not set")
sys.exit(1)
if not args.QUEUE_IN:
log("error: RabbitMQ input queue is not set")
sys.exit(1)
if not args.EXCHANGE_OUT:
log("error: RabbitMQ output exchange is not set")
sys.exit(1)
try:
if hasattr(task, 'init'):
task.init(args)
if args.startup_delay > 0:
import time
log("Waiting %i seconds before starting RabbitMQ client ..." % args.startup_delay)
time.sleep(args.startup_delay)
except KeyboardInterrupt:
log("INTERRUPTED")
if hasattr(task, 'shutdown'):
task.shutdown()
sys.exit(1)
log("Starting RabbitMQ client ...")
run_forever(args.RABBITMQ_URL, args.QUEUE_IN, args.EXCHANGE_OUT, num_parallel=args.PARALLEL, reconnect_delay=args.reconnect_delay,
debug=args.debug, verbose=args.verbose)
if __name__ == "__main__":
main()
| 36.653117
| 150
| 0.60976
|
708df51432ec170514aabeae3446f15fd6ab671c
| 5,764
|
py
|
Python
|
pulsar/apps/test/plugins/bench.py
|
goodboy/pulsar
|
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
|
[
"BSD-3-Clause"
] | 1
|
2020-11-30T07:36:57.000Z
|
2020-11-30T07:36:57.000Z
|
pulsar/apps/test/plugins/bench.py
|
goodboy/pulsar
|
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
|
[
"BSD-3-Clause"
] | null | null | null |
pulsar/apps/test/plugins/bench.py
|
goodboy/pulsar
|
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
|
[
"BSD-3-Clause"
] | null | null | null |
'''
:class:`.BenchMark` is a :class:`.TestPlugin` for benchmarking test functions.
To use the plugin follow these three steps:
* Flag a ``unittest.TestCase`` class with the ``__benchmark__ = True``
class attribute::
class MyBenchmark(unittest.TestCase):
__benchmark__ = True
def test_mybenchmark_function1(self):
...
def test_mybenchmark_function2(self):
...
* Run the test suite with the ``--benchmark`` command line option.
Alternatively, you can use the ``bench`` command from your ``setup.py`` file.
Simply add the ``bench`` entry in the ``setup.cfg``.
[bench]
test_modules = tests/bench
The test class can implement additional methods to fine-tune how the
benchmark plugin evaluate the performance and display results:
* When implemented, the ``startUp`` method is invoked before each run
of a test function.
* The time taken to run a test once can be modified by implementing
the ``getTime`` method which receives as only argument the time interval
taken.
By default it returns the same time interval.
.. autoclass:: BenchMark
'''
import sys
import time
import math
from inspect import isawaitable
from unittest import TestSuite
import pulsar
from .base import WrapTest, TestPlugin
if sys.platform == "win32": # pragma nocover
default_timer = time.clock
else:
default_timer = time.time
BENCHMARK_TEMPLATE = ('{0[name]}: repeated {0[repeat]}(x{0[times]}) times, '
'average {0[mean]} secs, stdev {0[std]}')
def simple(info, *args):
return info
class BenchTest(WrapTest):
def __init__(self, test, number, repeat):
super().__init__(test)
self.number = number
self.repeat = repeat
def update_summary(self, info, repeat, total_time, total_time2):
mean = total_time/repeat
std = math.sqrt((total_time2 - total_time*mean)/repeat)
std = round(100*std/mean, 2)
info.update({'repeat': repeat,
'times': self.number,
'mean': '%.5f' % mean,
'std': '{0} %'.format(std)})
async def _call(self):
testMethod = self.testMethod
testStartUp = getattr(self.test, 'startUp', lambda: None)
testGetTime = getattr(self.test, 'getTime', lambda dt: dt)
testGetInfo = getattr(self.test, 'getInfo', simple)
testGetSummary = getattr(self.test, 'getSummary', simple)
t = 0
t2 = 0
info = {'name': '%s.%s' % (self.test.__class__.__name__,
testMethod.__name__)}
for r in range(self.repeat):
DT = 0
for r in range(self.number):
testStartUp()
start = default_timer()
result = testMethod()
if isawaitable(result):
await result
delta = default_timer() - start
dt = testGetTime(delta)
testGetInfo(info, delta, dt)
DT += dt
t += DT
t2 += DT*DT
self.update_summary(info, self.repeat, t, t2)
self.set_test_attribute('bench_info',
testGetSummary(info, self.repeat, t, t2))
class BenchMark(TestPlugin):
'''Benchmarking addon for pulsar test suite.'''
desc = '''Run benchmarks function flagged with __benchmark__ attribute'''
repeat = pulsar.Setting(flags=['--repeat'],
type=int,
default=10,
validator=pulsar.validate_pos_int,
desc=('Default number of repetition '
'when benchmarking.'))
def loadTestsFromTestCase(self, test_cls):
bench = getattr(test_cls, '__benchmark__', False)
if self.config.benchmark != bench: # skip the loading
return TestSuite()
def before_test_function_run(self, test, local):
if self.config.benchmark:
method_name = getattr(test, '_testMethodName', None)
if method_name:
method = getattr(test, method_name, None)
bench = getattr(test, '__benchmark__', False)
if not bench and method:
bench = getattr(method, '__benchmark__', False)
if bench:
number = getattr(test, '__number__', 1)
return BenchTest(test, number, self.config.repeat)
def addSuccess(self, test):
if self.config.benchmark and self.stream:
result = getattr(test, 'bench_info', None)
# if result and self.stream.showAll:
if result:
stream = self.stream.handler('benchmark')
template = getattr(test, 'benchmark_template',
BENCHMARK_TEMPLATE)
stream.writeln(template.format(result))
stream.flush()
self.result.addSuccess(test)
return True
def addError(self, test, err):
msg = self._msg(test, 'ERROR')
if msg:
self.result.addError(test, err)
return msg
def addFailure(self, test, err):
msg = self._msg(test, 'FAILURE')
if msg:
self.result.addFailure(test, err)
return msg
def addSkip(self, test, reason):
msg = self._msg(test, 'SKIPPED')
if msg:
self.result.addSkip(test, reason)
return msg
def _msg(self, test, msg):
if self.config.benchmark and self.stream:
stream = self.stream.handler('benchmark')
stream.writeln('%s: %s' % (test, msg))
stream.flush()
return True
| 33.317919
| 78
| 0.573387
|
0d598da391f120ef0af2e36496897550617cde53
| 7,441
|
py
|
Python
|
storages/views.py
|
Kuebler-IT/WebVirtNG
|
2412c5ca2bbd06d64db48e0508a4c3f26248f80b
|
[
"Apache-2.0"
] | null | null | null |
storages/views.py
|
Kuebler-IT/WebVirtNG
|
2412c5ca2bbd06d64db48e0508a4c3f26248f80b
|
[
"Apache-2.0"
] | null | null | null |
storages/views.py
|
Kuebler-IT/WebVirtNG
|
2412c5ca2bbd06d64db48e0508a4c3f26248f80b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
from storages.forms import AddStgPool, AddImage, CloneImage
from vrtManager.storage import wvmStorage, wvmStorages
from libvirt import libvirtError
def storages(request, host_id):
"""
Storage pool block
"""
if not request.user.is_authenticated():
return redirect('login')
errors = []
compute = Compute.objects.get(id=host_id)
try:
conn = wvmStorages(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = conn.get_storages_info()
secrets = conn.get_secrets()
if request.method == 'POST':
if 'create' in request.POST:
form = AddStgPool(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['name'] in storages:
msg = _("Pool name already use")
errors.append(msg)
if data['stg_type'] == 'rbd':
if not data['secret']:
msg = _("You need create secret for pool")
errors.append(msg)
if not data['ceph_pool'] and not data['ceph_host'] and not data['ceph_user']:
msg = _("You need input all fields for creating ceph pool")
errors.append(msg)
if not errors:
if data['stg_type'] == 'rbd':
conn.create_storage_ceph(data['stg_type'], data['name'],
data['ceph_pool'], data['ceph_host'],
data['ceph_user'], data['secret'])
elif data['stg_type'] == 'netfs':
conn.create_storage_netfs(data['stg_type'], data['name'],
data['netfs_host'], data['source'],
data['source_format'], data['target'])
else:
conn.create_storage(data['stg_type'], data['name'], data['source'], data['target'])
return redirect('storage', args=[host_id, data['name']])
conn.close()
except libvirtError as err:
errors.append(err)
return render(request, 'storages.html', locals())
def storage(request, host_id, pool):
"""
Storage pool block
"""
if not request.user.is_authenticated():
return redirect('login')
def handle_uploaded_file(path, f_name):
target = path + '/' + str(f_name)
destination = open(target, 'wb+')
for chunk in f_name.chunks():
destination.write(chunk)
destination.close()
errors = []
compute = Compute.objects.get(id=host_id)
meta_prealloc = False
try:
conn = wvmStorage(compute.hostname,
compute.login,
compute.password,
compute.type,
pool)
storages = conn.get_storages()
state = conn.is_active()
size, free = conn.get_size()
used = (size - free)
if state:
percent = (used * 100) / size
else:
percent = 0
status = conn.get_status()
path = conn.get_target_path()
type = conn.get_type()
autostart = conn.get_autostart()
if state:
conn.refresh()
volumes = conn.update_volumes()
else:
volumes = None
except libvirtError as err:
errors.append(err)
if request.method == 'POST':
if 'start' in request.POST:
try:
conn.start()
return redirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'stop' in request.POST:
try:
conn.stop()
return redirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'delete' in request.POST:
try:
conn.delete()
return redirect('storages', args=[host_id])
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'set_autostart' in request.POST:
try:
conn.set_autostart(1)
return redirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'unset_autostart' in request.POST:
try:
conn.set_autostart(0)
return redirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'add_volume' in request.POST:
form = AddImage(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['meta_prealloc'] and data['format'] == 'qcow2':
meta_prealloc = True
try:
conn.create_volume(data['name'], data['size'], data['format'], meta_prealloc)
return redirect(request.get_full_path())
except libvirtError as err:
errors.append(err)
if 'del_volume' in request.POST:
volname = request.POST.get('volname', '')
try:
vol = conn.get_volume(volname)
vol.delete(0)
return redirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'iso_upload' in request.POST:
if str(request.FILES['file']) in conn.update_volumes():
msg = _("ISO image already exist")
errors.append(msg)
else:
handle_uploaded_file(path, request.FILES['file'])
return redirect(request.get_full_path())
if 'cln_volume' in request.POST:
form = CloneImage(request.POST)
if form.is_valid():
data = form.cleaned_data
img_name = data['name'] + '.img'
meta_prealloc = 0
if img_name in conn.update_volumes():
msg = _("Name of volume name already use")
errors.append(msg)
if not errors:
if data['convert']:
format = data['format']
if data['meta_prealloc'] and data['format'] == 'qcow2':
meta_prealloc = True
else:
format = None
try:
conn.clone_volume(data['image'], data['name'], format, meta_prealloc)
return redirect(request.get_full_path())
except libvirtError as err:
errors.append(err)
conn.close()
return render(request, 'storage.html', locals())
| 38.755208
| 111
| 0.502352
|
39c9c685e26fcfc07eb9c45fbe9035548ed53a1c
| 864
|
py
|
Python
|
Source-codes/VanillaSVDRepresentation.py
|
ad93/FaiRIR
|
c1ad74470a2efe3abef6dc09d6faae583670b6a8
|
[
"MIT"
] | null | null | null |
Source-codes/VanillaSVDRepresentation.py
|
ad93/FaiRIR
|
c1ad74470a2efe3abef6dc09d6faae583670b6a8
|
[
"MIT"
] | null | null | null |
Source-codes/VanillaSVDRepresentation.py
|
ad93/FaiRIR
|
c1ad74470a2efe3abef6dc09d6faae583670b6a8
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from scipy.sparse.linalg import svds
import json
ratings_list = [i.strip().split("::") for i in open('ratings.dat', 'r').readlines()]
ratings_df = pd.DataFrame(ratings_list, columns = ['UserID', 'MovieID', 'Rating', 'Timestamp'], dtype = float)
#print(ratings_df)
R_df = ratings_df.pivot(index = 'UserID', columns ='MovieID', values = 'Rating').fillna(0)
R_df.head()
#print(R_df.head())
print(len(list(R_df.head())))
lists = []
for i in list(R_df.head()):
lists.append(int(i))
json.dump(lists, open('MovieList.txt', 'w'))
R = R_df.as_matrix()
user_ratings_mean = np.mean(R, axis = 1)
R_demeaned = R - user_ratings_mean.reshape(-1, 1)
u, s, vt = svds(R_demeaned, k = 128)
sigma = np.diag(s)
vt = np.dot(sigma, vt)
print('Learning done')
df2 = pd.DataFrame(vt.T)
print(vt.T.shape)
df2.to_csv('Movie_Representation.csv')
| 27.870968
| 110
| 0.690972
|
74219837d1e00c956401808df387bf58cb9361a9
| 2,331
|
py
|
Python
|
bw2calc/utils.py
|
brightway-lca/brightway2-calc-copy
|
770cb83954499199888bb7422442cbdc7ef553b3
|
[
"BSD-3-Clause"
] | 4
|
2021-09-21T14:22:49.000Z
|
2022-02-23T12:45:01.000Z
|
bw2calc/utils.py
|
brightway-lca/brightway2-calc
|
796bfe4cd839f1c8014797f1945f305d57f19c19
|
[
"BSD-3-Clause"
] | 30
|
2020-03-03T09:12:48.000Z
|
2022-03-29T15:24:04.000Z
|
bw2calc/utils.py
|
brightway-lca/brightway2-calc-copy
|
770cb83954499199888bb7422442cbdc7ef553b3
|
[
"BSD-3-Clause"
] | 5
|
2020-06-26T19:01:53.000Z
|
2022-02-21T15:40:17.000Z
|
from pathlib import Path
import bw_processing as bwp
import numpy as np
from fs.base import FS
from fs.osfs import OSFS
from fs.zipfs import ZipFS
from .errors import InconsistentGlobalIndex
def get_seed(seed=None):
"""Get valid Numpy random seed value"""
# https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM
random = np.random.RandomState(seed)
return random.randint(0, 2147483647)
def consistent_global_index(packages, matrix="characterization_matrix"):
global_list = [
resource.get("global_index")
for package in packages
for resource in package.filter_by_attribute("matrix", matrix).filter_by_attribute("kind", "indices").resources
]
if len(set(global_list)) > 1:
raise InconsistentGlobalIndex(
f"Multiple global index values found: {global_list}. If multiple LCIA datapackages are present, they must use the same value for ``GLO``, the global location, in order for filtering for site-generic LCIA to work correctly."
)
return global_list[0] if global_list else None
def wrap_functional_unit(dct):
"""Transform functional units for effective logging.
Turns ``Activity`` objects into their keys."""
data = []
for key, amount in dct.items():
if isinstance(key, int):
data.append({"id": key, "amount": amount})
else:
try:
data.append({"database": key[0], "code": key[1], "amount": amount})
except TypeError:
data.append({"key": key, "amount": amount})
return data
def get_datapackage(obj):
if isinstance(obj, bwp.DatapackageBase):
return obj
elif isinstance(obj, FS):
return bwp.load_datapackage(obj)
elif isinstance(obj, Path) and obj.suffix.lower() == ".zip":
return bwp.load_datapackage(ZipFS(obj))
elif isinstance(obj, Path) and obj.is_dir():
return bwp.load_datapackage(OSFS(obj))
elif isinstance(obj, str) and obj.lower().endswith(".zip") and Path(obj).is_file():
return bwp.load_datapackage(ZipFS(Path(obj)))
elif isinstance(obj, str) and Path(obj).is_dir():
return bwp.load_datapackage(OSFS(Path(obj)))
else:
raise TypeError(
"Unknown input type for loading datapackage: {}: {}".format(type(obj), obj)
)
| 35.861538
| 235
| 0.66538
|
866c3815e4d32d0178bc39510ed2913a9b3718a8
| 9,881
|
py
|
Python
|
build/archive.py
|
dzeromsk/goma
|
350f67319eb985013515b533f03f2f95570c37d3
|
[
"BSD-3-Clause"
] | 4
|
2018-12-26T10:54:24.000Z
|
2022-03-31T21:19:47.000Z
|
build/archive.py
|
dzeromsk/goma
|
350f67319eb985013515b533f03f2f95570c37d3
|
[
"BSD-3-Clause"
] | null | null | null |
build/archive.py
|
dzeromsk/goma
|
350f67319eb985013515b533f03f2f95570c37d3
|
[
"BSD-3-Clause"
] | 1
|
2021-05-31T13:27:25.000Z
|
2021-05-31T13:27:25.000Z
|
#!/usr/bin/env python
# Copyright 2012 The Goma Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates goma client release archives."""
from __future__ import print_function
import hashlib
import optparse
import os
import re
import shutil
import subprocess
import sys
import tarfile
import zipfile
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
GOMACC_CMDS = ('g++', 'gcc', 'javac', 'cc', 'c++', 'clang', 'clang++')
CHROMEOS_GOMACC_CMDS = (
'i686-pc-linux-gnu-gcc',
'i686-pc-linux-gnu-g++',
'armv7a-cros-linux-gnueabi-gcc',
'armv7a-cros-linux-gnueabi-g++',
'x86_64-pc-linux-gnu-gcc',
'x86_64-pc-linux-gnu-g++',
'arm-none-eabi-gcc',
'arm-none-eabi-g++',
'x86_64-cros-linux-gnu-gcc',
'x86_64-cros-linux-gnu-g++')
try:
os.symlink
except AttributeError:
# no os.symlink on Windows.
def __fake_symlink(src, dst):
raise NotImplementedError('symlink %s %s' % (src, dst))
os.symlink = __fake_symlink
def CreatePlatformGomacc(distname, platform):
"""Creates gomacc symlinks in distname.
Args:
distname: distribution directory
platform: platform name
"""
if platform in ('linux', 'mac', 'goobuntu', 'chromeos'):
gomacc = list(GOMACC_CMDS)
else:
raise NotImplementedError(platform)
if platform == 'chromeos':
gomacc.extend(CHROMEOS_GOMACC_CMDS)
for cmd in gomacc:
os.symlink('gomacc', os.path.join(distname, cmd))
def DeleteSymlinksToGomacc(distname):
"""Deletes symlinks to gomacc in distname.
Args:
distname: distribution directory
"""
for name in os.listdir(distname):
abs_name = os.path.join(distname, name)
# since symlink only works on posix, we do not need to check gomacc.exe.
if os.path.islink(abs_name) and os.readlink(abs_name) == 'gomacc':
os.remove(abs_name)
def InstallPlatformFiles(distname, platform):
"""Install platform specific files in distname.
Args:
distname: distribution directory
platform: platname name.
Returns:
a list of files.
"""
if platform in ('linux', 'mac', 'goobuntu'):
return
if platform != 'chromeos':
raise NotImplementedError(platform)
files = ['goma-wrapper', 'goma-make']
for f in files:
shutil.copy(f, distname)
def CreateAndroidDir(distname, platform):
"""Creates android support directory if necessary.
Args:
distname: distribution directory.
platform: platform name.
Returns:
a list of files to be released.
"""
if platform in ('linux', 'mac', 'goobuntu'):
distname = os.path.join(distname, 'android')
shutil.rmtree(distname, ignore_errors=True)
os.mkdir(distname)
for cmd in ('gomacc', 'compiler_proxy', 'goma_fetch',
'goma_auth.py', 'goma_ctl.py'):
os.symlink(os.path.join('..', cmd), os.path.join(distname, cmd))
for cmd in GOMACC_CMDS:
os.symlink('gomacc', os.path.join(distname, cmd))
for prefix in ('arm-eabi', 'arm-linux-androideabi',
'i686-android-linux', 'i686-linux',
'i686-unknown-linux-gnu',
'i686-unknown-linux-gnu-i686-unknown-linux-gnu',
'sh-linux-gnu'):
os.symlink('gomacc', os.path.join(distname, '%s-gcc' % prefix))
os.symlink('gomacc', os.path.join(distname, '%s-g++' % prefix))
def MkTarball(src, dst_tar_file):
"""Make tarball.
Note: basename of |src| would show up as a file's directory name in
a tar file.
e.g.
If you give "/tmp/foo/bar" that has followings inside as |src|:
/tmp/foo/bar/gomacc
/tmp/foo/bar/compiler_proxy
then, the generated archive would have files with following path names:
bar/gomacc
bar/compiler_proxy
Args:
src: an absolute path name of the directory to archive.
dst_tar_file: a filename (with extension) to output tarball.
"""
dirname = os.path.dirname(src)
assert os.path.abspath(dirname)
def Filter(info):
assert info.name.startswith(dirname[1:])
info.name = info.name[len(dirname):]
if info.name:
print('Adding: %s' % info.name)
return info
mode = 'w:gz'
if os.path.splitext(dst_tar_file)[1] == '.tbz':
mode = 'w:bz2'
with tarfile.open(dst_tar_file, mode) as tf:
for path in os.listdir(src):
tf.add(os.path.join(src, path), filter=Filter)
def MkZip(src, dst_zip_file):
"""Make zip file.
Note: basename of |src| would show up as a file's directory name in
a zip file.
e.g.
If you give "c:\\Users\\foo\\bar" that has followings inside as |src|:
c:\\Users\\foo\\bar\\gomacc
c:\\Users\\foo\\bar\\compiler_proxy
then, the generated archive would have files with following path names:
bar\\gomacc
bar\\compiler_proxy
Args:
src: a full path name of the directory to archive.
dst_tar_file: an output zip filename.
"""
dirname = os.path.dirname(src)
with zipfile.ZipFile(dst_zip_file, 'w',
compression=zipfile.ZIP_DEFLATED) as zf:
for dirpath, _, filenames in os.walk(src):
for f in filenames:
orig_path = os.path.join(dirpath, f)
path = orig_path[len(dirname) + 1:]
print('Adding: %s' % path)
zf.write(orig_path, arcname=path)
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--platform',
default={'linux2': 'linux',
'darwin': 'mac',
'win32': 'win',
'cygwin': 'win'}.get(sys.platform, None),
choices=('linux', 'mac', 'win',
'goobuntu', 'chromeos', 'win64'),
help='platform name')
option_parser.add_option('--build_dir', default='out',
help='directory of build output')
option_parser.add_option('--target_dir', default='Release',
help='subdirectory in build_dir to archive')
option_parser.add_option('--dist_dir', default='..',
help='directory to put tgz')
option_parser.add_option('--store_in_commit_dir', action='store_true',
help='store tgz in commit dir under dist_dir')
options, args = option_parser.parse_args()
if args:
option_parser.error('Unsupported args: %s' % ' '.join(args))
dist_top_absdir = os.path.abspath(options.dist_dir)
dist_absdir = dist_top_absdir
src_dir = os.getcwd()
if not os.path.isdir(dist_absdir):
os.makedirs(dist_absdir, 0o755)
if options.store_in_commit_dir:
gitproc = subprocess.Popen(['git', 'log', '-1', '--pretty=%H'],
shell=(sys.platform == 'win32'),
stdout=subprocess.PIPE,
cwd=src_dir)
commit = gitproc.communicate()[0].strip()
if gitproc.returncode:
print('ERROR: git failed to get commit. exit=%d' % gitproc.returncode)
return gitproc.returncode
if not commit:
print('ERROR: empty commit hash?')
return 1
print('Commit: %s' % commit)
dist_absdir = os.path.join(dist_absdir, commit)
shutil.rmtree(dist_absdir, ignore_errors=True)
os.mkdir(dist_absdir, 0o755)
os.chdir(os.path.join(src_dir, options.build_dir, options.target_dir))
distname = 'goma-%s' % options.platform
shutil.rmtree(distname, ignore_errors=True)
print('Preparing files in %s in %s...' % (distname, os.getcwd()))
print('mkdir %s' % distname)
os.mkdir(distname, 0o755)
if options.platform in ('win', 'win64'):
for cmd in ('gomacc.exe', 'compiler_proxy.exe', 'vcflags.exe',
'goma_fetch.exe'):
shutil.copy(cmd, distname)
pdb = os.path.splitext(cmd)[0] + '.pdb'
if not os.path.exists(pdb):
pdb = cmd + '.pdb'
shutil.copy(pdb, distname)
for f in ('.vpython', 'goma_auth.py', 'goma_ctl.py', 'goma_ctl.bat',
'diagnose_goma_log.py', 'compiler_proxy.sym', 'sha256.json',
'gomacc.sym', 'LICENSE'):
shutil.copy(f, distname)
else:
for f in ('.vpython', 'gomacc', 'compiler_proxy', 'goma_fetch',
'report_env.sh', 'diagnose_goma_log.py', 'compiler_proxy.sym',
'goma_auth.py', 'goma_ctl.py', 'sha256.json', 'gomacc.sym',
'LICENSE'):
shutil.copy(f, distname)
CreatePlatformGomacc(distname, options.platform)
InstallPlatformFiles(distname, options.platform)
CreateAndroidDir(distname, options.platform)
# Create an archive file.
if options.platform in ('win', 'win64'):
target_file = os.path.join(dist_absdir, '%s.zip' % distname)
print('Archiving in %s.zip' % distname)
MkZip(os.path.realpath(distname), target_file)
compiler_proxy_path = 'compiler_proxy.exe'
else:
target_file = os.path.join(dist_absdir, '%s.tgz' % distname)
print('Archiving in %s.tgz' % distname)
MkTarball(os.path.realpath(distname), target_file)
compiler_proxy_path = os.path.join(distname, 'compiler_proxy')
# Since CIPD uses this directory for creating CIPD package,
# we need to remove gomacc symlinks.
DeleteSymlinksToGomacc(distname)
print()
print('%s created.' % target_file)
cp = open(compiler_proxy_path, 'rb')
# Finds user-agent string (starts with 'compiler-proxy' and ends with 'Z',
# which is the last letter of timestamp) for compiler_proxy_user_agent.csv
# e.g. "compiler-proxy built by goma at " +
# "9d6775c48911ad1b80624720121a5e0d0c320adf@1330938783 " +
# "on 2012-03-05T09:20:30.931701Z"
m = re.search(r'(compiler-proxy[- a-zA-Z0-9:.@]*Z)', cp.read())
if m:
print('"%s",,%s' % (m.group(1), options.platform))
else:
print('ERROR: user-agent string not found in %s' % compiler_proxy_path)
return 1
cp.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| 33.494915
| 77
| 0.637992
|
11a9f9eea0788e34286d6b68f55629bb4808a2a0
| 10,190
|
py
|
Python
|
pretrain/model.py
|
yuningkang/APIRecX
|
aaef5f3f0b669d7a907ddb3273e6658c9267c68a
|
[
"MIT"
] | 49
|
2020-03-17T08:15:49.000Z
|
2022-03-23T08:42:16.000Z
|
pretrain/model.py
|
yuningkang/APIRecX
|
aaef5f3f0b669d7a907ddb3273e6658c9267c68a
|
[
"MIT"
] | 2
|
2020-07-01T08:29:59.000Z
|
2020-08-17T18:53:48.000Z
|
pretrain/model.py
|
yuningkang/APIRecX
|
aaef5f3f0b669d7a907ddb3273e6658c9267c68a
|
[
"MIT"
] | 4
|
2021-01-14T14:13:48.000Z
|
2021-07-06T12:55:53.000Z
|
import torch
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k, attn_pdrop):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
self.dropout = nn.Dropout(attn_pdrop)
def forward(self, q, k, v, attn_mask):
# |q| : (batch_size, n_heads, q_len, d_k)
# |k| : (batch_size, n_heads, k_len, d_k)
# |v| : (batch_size, n_heads, v_len, d_v)
# |attn_mask| : (batch_size, n_heads, q_len, k_len)
attn_score = torch.matmul(q, k.transpose(-1, -2)) / (self.d_k ** 0.5)
attn_score.masked_fill_(attn_mask, -1e9)
# |attn_scroe| : (batch_size, n_heads, q_len, k_len)
attn_weights = nn.Softmax(dim=-1)(attn_score)
attn_weights = self.dropout(attn_weights)
# |attn_weights| : (batch_size, n_heads, q_len, k_len)
output = torch.matmul(attn_weights, v)
# |output| : (batch_size, n_heads, q_len, d_v)
return output, attn_weights
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, attn_pdrop):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.d_k = self.d_v = d_model//n_heads
self.WQ = nn.Linear(d_model, d_model)
self.WK = nn.Linear(d_model, d_model)
self.WV = nn.Linear(d_model, d_model)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k, attn_pdrop)
self.linear = nn.Linear(n_heads * self.d_v, d_model)
def forward(self, Q, K, V, attn_mask):
# |Q| : (batch_size, q_len(=seq_len), d_model)
# |K| : (batch_size, k_len(=seq_len), d_model)
# |V| : (batch_size, v_len(=seq_len), d_model)
# |attn_mask| : (batch_size, q_len, k_len)
batch_size = Q.size(0)
q_heads = self.WQ(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_heads = self.WK(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_heads = self.WV(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
# |q_heads| : (batch_size, n_heads, q_len, d_k), |k_heads| : (batch_size, n_heads, k_len, d_k), |v_heads| : (batch_size, n_heads, v_len, d_v)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
# |attn_mask| : (batch_size, n_heads, q_len, k_len)
attn, attn_weights = self.scaled_dot_product_attn(q_heads, k_heads, v_heads, attn_mask)
# |attn| : (batch_size, n_heads, q_len, d_v)
# |attn_weights| : (batch_size, n_heads, q_len, k_len)
attn = attn.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
# |attn| : (batch_size, q_len, n_heads * d_v)
outputs = self.linear(attn)
# |outputs| : (batch_size, q_len, d_model)
return outputs, attn_weights
class PositionWiseFeedForwardNetwork(nn.Module):
def __init__(self, d_model, d_ff):
super(PositionWiseFeedForwardNetwork, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.gelu = nn.GELU()
nn.init.normal_(self.linear1.weight, std=0.02)
nn.init.normal_(self.linear2.weight, std=0.02)
def forward(self, inputs):
# |inputs| : (batch_size, seq_len, d_model)
outputs = self.gelu(self.linear1(inputs))
# |outputs| : (batch_size, seq_len, d_ff)
outputs = self.linear2(outputs)
# |outputs| : (batch_size, seq_len, d_model)
return outputs
class DecoderLayer(nn.Module):
def __init__(self, d_model, n_heads, d_ff, attn_pdrop, resid_pdrop):
super(DecoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, n_heads, attn_pdrop)
self.dropout1 = nn.Dropout(resid_pdrop)
self.layernorm1 = nn.LayerNorm(d_model, eps=1e-5)
self.ffn = PositionWiseFeedForwardNetwork(d_model, d_ff)
self.dropout2 = nn.Dropout(resid_pdrop)
self.layernorm2 = nn.LayerNorm(d_model, eps=1e-5)
def forward(self, inputs, attn_mask):
# |inputs| : (batch_size, seq_len, d_model)
# |attn_mask| : (batch_size, seq_len, seq_len)
attn_outputs, attn_weights = self.mha(inputs, inputs, inputs, attn_mask)
attn_outputs = self.dropout1(attn_outputs)
attn_outputs = self.layernorm1(inputs + attn_outputs)
# |attn_outputs| : (batch_size, seq_len, d_model)
# |attn_weights| : (batch_size, n_heads, q_len(=seq_len), k_len(=seq_len))
ffn_outputs = self.ffn(attn_outputs)
ffn_outputs = self.dropout2(ffn_outputs)
ffn_outputs = self.layernorm2(attn_outputs + ffn_outputs)
# |ffn_outputs| : (batch_size, seq_len, d_model)
return ffn_outputs, attn_weights
class TransformerDecoder(nn.Module):
def __init__(self, vocab_size, seq_len, d_model, n_layers, n_heads, d_ff, embd_pdrop, attn_pdrop, resid_pdrop, pad_id):
super(TransformerDecoder, self).__init__()
self.pad_id = pad_id
# layers
self.embedding = nn.Embedding(vocab_size, d_model)
self.dropout = nn.Dropout(embd_pdrop)
self.pos_embedding = nn.Embedding(seq_len+1, d_model)
self.layers = nn.ModuleList([DecoderLayer(d_model, n_heads, d_ff, attn_pdrop, resid_pdrop) for _ in range(n_layers)])
nn.init.normal_(self.embedding.weight, std=0.02)
def forward(self, inputs):
# |inputs| : (batch_size, seq_len)
positions = torch.arange(inputs.size(1), device=inputs.device, dtype=inputs.dtype).repeat(inputs.size(0), 1) + 1
position_pad_mask = inputs.eq(self.pad_id)
positions.masked_fill_(position_pad_mask, 0)
# |positions| : (batch_size, seq_len)
outputs = self.dropout(self.embedding(inputs)) + self.pos_embedding(positions)
# |outputs| : (batch_size, seq_len, d_model)
attn_pad_mask = self.get_attention_padding_mask(inputs, inputs, self.pad_id)
# |attn_pad_mask| : (batch_size, seq_len, seq_len)
subsequent_mask = self.get_attention_subsequent_mask(inputs).to(device=attn_pad_mask.device)
# |subsequent_mask| : (batch_size, seq_len, seq_len)
attn_mask = torch.gt((attn_pad_mask.to(dtype=subsequent_mask.dtype) + subsequent_mask), 0)
# |attn_mask| : (batch_size, seq_len, seq_len)
attention_weights = []
for layer in self.layers:
outputs, attn_weights = layer(outputs, attn_mask)
# |outputs| : (batch_size, seq_len, d_model)
# |attn_weights| : (batch_size, n_heads, seq_len, seq_len)
attention_weights.append(attn_weights)
return outputs, attention_weights
def get_attention_padding_mask(self, q, k, pad_id):
attn_pad_mask = k.eq(pad_id).unsqueeze(1).repeat(1, q.size(1), 1)
# |attn_pad_mask| : (batch_size, q_len, k_len)
return attn_pad_mask
def get_attention_subsequent_mask(self, q):
bs, q_len = q.size()
subsequent_mask = torch.ones(bs, q_len, q_len).triu(diagonal=1)
# |subsequent_mask| : (batch_size, q_len, q_len)
return subsequent_mask
class GPT(nn.Module):
def __init__(self,
vocab_size,
seq_len=512,
d_model=768,
n_layers=12,
n_heads=12,
d_ff=3072,
embd_pdrop=0.1,
attn_pdrop=0.1,
resid_pdrop=0.1,
pad_id=0):
super(GPT, self).__init__()
self.decoder = TransformerDecoder(vocab_size, seq_len, d_model, n_layers, n_heads, d_ff,
embd_pdrop, attn_pdrop, resid_pdrop, pad_id)
def forward(self, inputs):
# |inputs| : (batch_size, seq_len)
outputs, attention_weights = self.decoder(inputs)
# |outputs| : (batch_size, seq_len, d_model)
# |attention_weights| : [(batch_size, n_heads, seq_len, seq_len)] * n_layers
return outputs, attention_weights
class GPTLMHead(nn.Module):
def __init__(self, gpt):
super(GPTLMHead, self).__init__()
vocab_size, d_model = gpt.decoder.embedding.weight.size()
self.gpt = gpt
self.linear = nn.Linear(d_model, vocab_size, bias=False)
self.linear.weight = gpt.decoder.embedding.weight
def forward(self, inputs):
# |inputs| : (batch_size, seq_len)
outputs, attention_weights = self.gpt(inputs)
# |outputs| : (batch_size, seq_len, d_model)
# |attention_weights| : [(batch_size, n_heads, seq_len, seq_len)] * n_layers
lm_logits = self.linear(outputs)
# |lm_logits| : (batch_size, seq_len, vocab_size)
return lm_logits
class GPTClsHead(nn.Module):
def __init__(self, gpt, n_class, cls_token_id, cls_pdrop=0.1):
super(GPTClsHead, self).__init__()
vocab_size, d_model = gpt.decoder.embedding.weight.size()
self.cls_token_id = cls_token_id
self.gpt = gpt
# LM
self.linear1 = nn.Linear(d_model, vocab_size, bias=False)
self.linear1.weight = gpt.decoder.embedding.weight
# Classification
self.linear2 = nn.Linear(d_model, n_class)
self.dropout = nn.Dropout(cls_pdrop)
nn.init.normal_(self.linear2.weight, std=0.02)
nn.init.normal_(self.linear2.bias, 0)
def forward(self, inputs):
# |inputs| : (batch_size, seq_len)
outputs, attention_weights = self.gpt(inputs)
# |outputs| : (batch_size, seq_len, d_model)
# |attention_weights| : [(batch_size, n_heads, seq_len, seq_len)] * n_layers
lm_logits = self.linear1(outputs)
# |lm_logits| : (batch_size, seq_len, vocab_size)
outputs = outputs[inputs.eq(self.cls_token_id)]
# |outputs| : (batch_size, d_model)
cls_logits = self.linear2(self.dropout(outputs))
# |cls_logits| : (batch_size, n_class)
return lm_logits, cls_logits
| 41.08871
| 149
| 0.624338
|
4ffa974e1429618cdcee7cca8c4ffc7b04ab1861
| 2,756
|
py
|
Python
|
setup.py
|
just-work/celery-amqp-events
|
a6a2236ceb9ba982bfd733aa0a858da8443a69e9
|
[
"MIT"
] | 1
|
2021-03-05T20:14:49.000Z
|
2021-03-05T20:14:49.000Z
|
setup.py
|
just-work/celery-amqp-events
|
a6a2236ceb9ba982bfd733aa0a858da8443a69e9
|
[
"MIT"
] | 21
|
2020-09-18T07:52:03.000Z
|
2022-03-06T07:29:21.000Z
|
setup.py
|
just-work/celery-amqp-events
|
a6a2236ceb9ba982bfd733aa0a858da8443a69e9
|
[
"MIT"
] | 2
|
2020-10-01T12:29:37.000Z
|
2020-10-31T17:37:07.000Z
|
import os
import re
import subprocess
from setuptools import setup # type: ignore
from pathlib import Path
with open('README.md') as f:
long_description = f.read()
version_re = re.compile('^Version: (.+)$', re.M)
package_name = 'celery-amqp-events'
def get_version():
"""
Reads version from git status or PKG-INFO
https://gist.github.com/pwithnall/7bc5f320b3bdf418265a
"""
d: Path = Path(__file__).absolute().parent
git_dir = d.joinpath('.git')
if git_dir.is_dir():
# Get the version using "git describe".
cmd = 'git describe --tags --match [0-9]*'.split()
try:
version = subprocess.check_output(cmd).decode().strip()
except subprocess.CalledProcessError:
return None
# PEP 386 compatibility
if '-' in version:
version = '.post'.join(version.split('-')[:2])
# Don't declare a version "dirty" merely because a time stamp has
# changed. If it is dirty, append a ".dev1" suffix to indicate a
# development revision after the release.
with open(os.devnull, 'w') as fd_devnull:
subprocess.call(['git', 'status'],
stdout=fd_devnull, stderr=fd_devnull)
cmd = 'git diff-index --name-only HEAD'.split()
try:
dirty = subprocess.check_output(cmd).decode().strip()
except subprocess.CalledProcessError:
return None
if dirty != '':
version += '.dev1'
else:
# Extract the version from the PKG-INFO file.
try:
with open('PKG-INFO') as v:
version = version_re.search(v.read()).group(1)
except FileNotFoundError:
version = None
return version
setup(
name=package_name,
version=get_version() or 'dev',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['amqp_events'],
url='https://github.com/just-work/celery-amqp-events',
license='MIT',
author='Sergey Tikhonov',
author_email='zimbler@gmail.com',
description='Distributed event handling on top of Celery',
install_requires=[
'Celery',
'typing_extensions; python_version < "3.8.0"',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Distributed Computing',
]
)
| 31.318182
| 73
| 0.600508
|
750d303f385ca519738672d4216bde25702591fc
| 8,816
|
py
|
Python
|
models/frustum_pointnets_v1.py
|
voidrank/Geo-CNN
|
4e8a7d5cc0d14ffa2a1b8bef854f294ae4e25f8b
|
[
"Apache-2.0"
] | 37
|
2019-04-02T08:25:39.000Z
|
2022-01-16T22:26:17.000Z
|
models/frustum_pointnets_v1.py
|
voidrank/Geo-CNN
|
4e8a7d5cc0d14ffa2a1b8bef854f294ae4e25f8b
|
[
"Apache-2.0"
] | 6
|
2019-05-10T02:26:14.000Z
|
2020-05-06T01:30:11.000Z
|
models/frustum_pointnets_v1.py
|
voidrank/Geo-CNN
|
4e8a7d5cc0d14ffa2a1b8bef854f294ae4e25f8b
|
[
"Apache-2.0"
] | 7
|
2019-06-24T17:29:17.000Z
|
2020-04-30T06:28:24.000Z
|
''' Frsutum PointNets v1 Model.
'''
from __future__ import print_function
import sys
import os
import tensorflow as tf
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER, NUM_OBJECT_POINT
from model_util import point_cloud_masking, get_center_regression_net
from model_util import placeholder_inputs, parse_output_to_tensors, get_loss
def get_instance_seg_v1_net(point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D instance segmentation PointNet v1 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict
'''
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
net = tf.expand_dims(point_cloud, 2)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
vis = net
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
point_feat = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(point_feat, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)
logits = tf_util.conv2d(net, 2, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
logits = tf.squeeze(logits, [2]) # BxNxC
end_points['vis'] = vis
return logits, end_points
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg4', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool2')
net = tf.squeeze(net, axis=[1,2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
is_training=is_training, bn_decay=bn_decay)
# The first 3 numbers: box center coordinates (cx,cy,cz),
# the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals
# next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
output = tf_util.fully_connected(net,
3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
return output, end_points
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
''' Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
Output:
end_points: dict (map from name strings to TF tensors)
'''
end_points = {}
# 3D Instance Segmentation PointNet
logits, end_points = get_instance_seg_v1_net(\
point_cloud, one_hot_vec,
is_training, bn_decay, end_points)
end_points['mask_logits'] = logits
# Masking
# select masked points and translate to masked points' centroid
object_point_cloud_xyz, mask_xyz_mean, end_points = \
point_cloud_masking(point_cloud, logits, end_points)
# T-Net and coordinate translation
center_delta, end_points = get_center_regression_net(\
object_point_cloud_xyz, one_hot_vec,
is_training, bn_decay, end_points)
stage1_center = center_delta + mask_xyz_mean # Bx3
end_points['stage1_center'] = stage1_center
# Get object point cloud in object coordinate
object_point_cloud_xyz_new = \
object_point_cloud_xyz - tf.expand_dims(center_delta, 1)
# Amodel Box Estimation PointNet
output, end_points = get_3d_box_estimation_v1_net(\
object_point_cloud_xyz_new, one_hot_vec,
is_training, bn_decay, end_points)
# Parse output to 3D box parameters
end_points = parse_output_to_tensors(output, end_points)
end_points['center'] = end_points['center_boxnet'] + stage1_center # Bx3
return end_points
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,4))
outputs = get_model(inputs, tf.ones((32,3)), tf.constant(True))
for key in outputs:
print((key, outputs[key]))
loss = get_loss(tf.zeros((32,1024),dtype=tf.int32),
tf.zeros((32,3)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,3)), outputs)
print(loss)
| 43.860697
| 101
| 0.607645
|
fc6de5031f3077d3a446554baf4681b394ffaf20
| 3,401
|
py
|
Python
|
ome_data/original_files.py
|
fireofearth/ome_seadragon
|
35cab3a18c4ee559390f63c42199e93940ab2079
|
[
"MIT"
] | 1
|
2019-11-14T14:15:19.000Z
|
2019-11-14T14:15:19.000Z
|
ome_data/original_files.py
|
fireofearth/ome_seadragon
|
35cab3a18c4ee559390f63c42199e93940ab2079
|
[
"MIT"
] | null | null | null |
ome_data/original_files.py
|
fireofearth/ome_seadragon
|
35cab3a18c4ee559390f63c42199e93940ab2079
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import omero.model as om
import omero.rtypes as ot
from omero.model.enums import ChecksumAlgorithmSHA1160
from utils import switch_to_default_search_group
import logging
logger = logging.getLogger(__name__)
class DuplicatedEntryError(Exception):
pass
def save_original_file(connection, name, path, mimetype, size, sha1):
of = get_original_file(connection, name, mimetype)
if of is None:
of = om.OriginalFileI()
of.setName(ot.wrap(name))
of.setPath(ot.wrap(path))
of.setMimetype(ot.wrap(mimetype))
of.setSize(ot.rlong(size))
of.setHash(ot.wrap(sha1))
hasher = om.ChecksumAlgorithmI()
hasher.setValue(ot.wrap(ChecksumAlgorithmSHA1160))
of.setHasher(hasher)
of = connection.getUpdateService().saveAndReturnObject(of)
return of.getId().getValue()
else:
raise DuplicatedEntryError(
'OriginalFile with name %s and mimetype %s already exists' % (name, mimetype)
)
def get_original_files(connection, name, mimetype=None):
switch_to_default_search_group(connection)
query_filter = {'name': name}
if mimetype:
query_filter['mimetype'] = mimetype
return list(connection.getObjects('OriginalFile', attributes=query_filter))
def get_original_file(connection, name, mimetype):
ofiles = get_original_files(connection, name, mimetype)
if len(ofiles) == 0:
return None
elif len(ofiles) == 1:
return ofiles[0]
else:
raise DuplicatedEntryError('File %s with mimetype %s is not unique' %
(name, mimetype))
def delete_original_files(connection, name, mimetype=None):
ofiles = get_original_files(connection, name, mimetype)
of_ids = []
if len(ofiles) > 0:
of_ids = [of.getId() for of in ofiles]
try:
connection.deleteObjects('OriginalFile', of_ids, deleteAnns=False,
deleteChildren=False)
except:
return False, 0
return True, len(of_ids)
def get_original_file_infos(connection, name, mimetype):
ofile = get_original_file(connection, name, mimetype)
if ofile:
return {
'file_path': ofile.getPath(),
'file_hash': ofile.getHash()
}
| 35.8
| 89
| 0.690679
|
03dd3cfe55a5e5124494d5bc293a7fe6def2da4b
| 14,637
|
py
|
Python
|
tests/helpers/test_config_validation.py
|
boojew/home-assistant
|
697c331903f8a440a4ce324a4fb0788351dc86c3
|
[
"Apache-2.0"
] | 1
|
2019-05-19T08:05:02.000Z
|
2019-05-19T08:05:02.000Z
|
tests/helpers/test_config_validation.py
|
boojew/home-assistant
|
697c331903f8a440a4ce324a4fb0788351dc86c3
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:02:40.000Z
|
2022-03-12T00:52:16.000Z
|
tests/helpers/test_config_validation.py
|
boojew/home-assistant
|
697c331903f8a440a4ce324a4fb0788351dc86c3
|
[
"Apache-2.0"
] | 1
|
2019-08-13T11:54:30.000Z
|
2019-08-13T11:54:30.000Z
|
"""Test config validators."""
from datetime import timedelta, datetime, date
import enum
import os
from socket import _GLOBAL_DEFAULT_TIMEOUT
from unittest.mock import Mock, patch
import pytest
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
def test_boolean():
"""Test boolean validation."""
schema = vol.Schema(cv.boolean)
for value in ('T', 'negative', 'lock'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('true', 'On', '1', 'YES', 'enable', 1, True):
assert schema(value)
for value in ('false', 'Off', '0', 'NO', 'disable', 0, False):
assert not schema(value)
def test_latitude():
"""Test latitude validation."""
schema = vol.Schema(cv.latitude)
for value in ('invalid', None, -91, 91, '-91', '91', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-89', 89, '12.34'):
schema(value)
def test_longitude():
"""Test longitude validation."""
schema = vol.Schema(cv.longitude)
for value in ('invalid', None, -181, 181, '-181', '181', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-179', 179, '12.34'):
schema(value)
def test_port():
"""Test TCP/UDP network port."""
schema = vol.Schema(cv.port)
for value in ('invalid', None, -1, 0, 80000, '81000'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('1000', 21, 24574):
schema(value)
def test_isfile():
"""Validate that the value is an existing file."""
schema = vol.Schema(cv.isfile)
fake_file = 'this-file-does-not.exist'
assert not os.path.isfile(fake_file)
for value in ('invalid', None, -1, 0, 80000, fake_file):
with pytest.raises(vol.Invalid):
schema(value)
# patching methods that allow us to fake a file existing
# with write access
with patch('os.path.isfile', Mock(return_value=True)), \
patch('os.access', Mock(return_value=True)):
schema('test.txt')
def test_url():
"""Test URL."""
schema = vol.Schema(cv.url)
for value in ('invalid', None, 100, 'htp://ha.io', 'http//ha.io',
'http://??,**', 'https://??,**'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('http://localhost', 'https://localhost/test/index.html',
'http://home-assistant.io', 'http://home-assistant.io/test/',
'https://community.home-assistant.io/'):
assert schema(value)
def test_platform_config():
"""Test platform config validation."""
options = (
{},
{'hello': 'world'},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.PLATFORM_SCHEMA(value)
options = (
{'platform': 'mqtt'},
{'platform': 'mqtt', 'beer': 'yes'},
)
for value in options:
cv.PLATFORM_SCHEMA(value)
def test_ensure_list():
"""Test ensure_list."""
schema = vol.Schema(cv.ensure_list)
assert [] == schema(None)
assert [1] == schema(1)
assert [1] == schema([1])
assert ['1'] == schema('1')
assert ['1'] == schema(['1'])
assert [{'1': '2'}] == schema({'1': '2'})
def test_entity_id():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_id)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_entity')
assert schema('sensor.LIGHT') == 'sensor.light'
def test_entity_ids():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_ids)
options = (
'invalid_entity',
'sensor.light,sensor_invalid',
['invalid_entity'],
['sensor.light', 'sensor_invalid'],
['sensor.light,sensor_invalid'],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
[],
['sensor.light'],
'sensor.light'
)
for value in options:
schema(value)
assert schema('sensor.LIGHT, light.kitchen ') == [
'sensor.light', 'light.kitchen'
]
def test_entity_domain():
"""Test entity domain validation."""
schema = vol.Schema(cv.entity_domain('sensor'))
options = (
'invalid_entity',
'cover.demo',
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
print(value)
schema(value)
assert schema('sensor.LIGHT') == 'sensor.light'
def test_entities_domain():
"""Test entities domain validation."""
schema = vol.Schema(cv.entities_domain('sensor'))
options = (
None,
'',
'invalid_entity',
['sensor.light', 'cover.demo'],
['sensor.light', 'sensor_invalid'],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
'sensor.light',
['SENSOR.light'],
['sensor.light', 'sensor.demo']
)
for value in options:
schema(value)
assert schema('sensor.LIGHT, sensor.demo ') == [
'sensor.light', 'sensor.demo'
]
assert schema(['sensor.light', 'SENSOR.demo']) == [
'sensor.light', 'sensor.demo'
]
def test_ensure_list_csv():
"""Test ensure_list_csv."""
schema = vol.Schema(cv.ensure_list_csv)
options = (
None,
12,
[],
['string'],
'string1,string2'
)
for value in options:
schema(value)
assert schema('string1, string2 ') == [
'string1', 'string2'
]
def test_event_schema():
"""Test event_schema validation."""
options = (
{}, None,
{
'event_data': {},
},
{
'event': 'state_changed',
'event_data': 1,
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.EVENT_SCHEMA(value)
options = (
{'event': 'state_changed'},
{'event': 'state_changed', 'event_data': {'hello': 'world'}},
)
for value in options:
cv.EVENT_SCHEMA(value)
def test_icon():
"""Test icon validation."""
schema = vol.Schema(cv.icon)
for value in (False, 'work'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema('mdi:work')
schema('custom:prefix')
def test_time_period():
"""Test time_period validation."""
schema = vol.Schema(cv.time_period)
options = (
None, '', 'hello:world', '12:', '12:34:56:78',
{}, {'wrong_key': -10}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
'8:20', '23:59', '-8:20', '-23:59:59', '-48:00', {'minutes': 5}, 1, '5'
)
for value in options:
schema(value)
assert timedelta(seconds=180) == schema('180')
assert timedelta(hours=23, minutes=59) == schema('23:59')
assert -1 * timedelta(hours=1, minutes=15) == schema('-1:15')
def test_service():
"""Test service validation."""
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_turn_on')
schema('homeassistant.turn_on')
def test_service_schema():
"""Test service_schema validation."""
options = (
{}, None,
{
'service': 'homeassistant.turn_on',
'service_template': 'homeassistant.turn_on'
},
{
'data': {'entity_id': 'light.kitchen'},
},
{
'service': 'homeassistant.turn_on',
'data': None
},
{
'service': 'homeassistant.turn_on',
'data_template': {
'brightness': '{{ no_end'
}
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.SERVICE_SCHEMA(value)
options = (
{'service': 'homeassistant.turn_on'},
{
'service': 'homeassistant.turn_on',
'entity_id': 'light.kitchen',
},
{
'service': 'light.turn_on',
'entity_id': 'all',
},
{
'service': 'homeassistant.turn_on',
'entity_id': ['light.kitchen', 'light.ceiling'],
},
)
for value in options:
cv.SERVICE_SCHEMA(value)
def test_slug():
"""Test slug validation."""
schema = vol.Schema(cv.slug)
for value in (None, 'hello world'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (12345, 'hello'):
schema(value)
def test_string():
"""Test string validation."""
schema = vol.Schema(cv.string)
with pytest.raises(vol.Invalid):
schema(None)
with pytest.raises(vol.Invalid):
schema([])
with pytest.raises(vol.Invalid):
schema({})
for value in (True, 1, 'hello'):
schema(value)
def test_temperature_unit():
"""Test temperature unit validation."""
schema = vol.Schema(cv.temperature_unit)
with pytest.raises(vol.MultipleInvalid):
schema('K')
schema('C')
schema('F')
def test_x10_address():
"""Test x10 addr validator."""
schema = vol.Schema(cv.x10_address)
with pytest.raises(vol.Invalid):
schema('Q1')
schema('q55')
schema('garbage_addr')
schema('a1')
schema('C11')
def test_template():
"""Test template validator."""
schema = vol.Schema(cv.template)
for value in (None, '{{ partial_print }', '{% if True %}Hello', ['test']):
with pytest.raises(vol.Invalid,
message='{} not considered invalid'.format(value)):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
)
for value in options:
schema(value)
def test_template_complex():
"""Test template_complex validator."""
schema = vol.Schema(cv.template_complex)
for value in (None, '{{ partial_print }', '{% if True %}Hello'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
{'test': 1, 'test2': '{{ beer }}'},
['{{ beer }}', 1]
)
for value in options:
schema(value)
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleInvalid):
schema('America/Do_Not_Exist')
schema('America/Los_Angeles')
schema('UTC')
def test_date():
"""Test date validation."""
schema = vol.Schema(cv.date)
for value in ['Not a date', '23:42', '2016-11-23T18:59:08']:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().date())
schema('2016-11-23')
def test_time():
"""Test date validation."""
schema = vol.Schema(cv.time)
for value in ['Not a time', '2016-11-23', '2016-11-23T18:59:08']:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().time())
schema('23:42:00')
schema('23:42')
def test_datetime():
"""Test date time validation."""
schema = vol.Schema(cv.datetime)
for value in [date.today(), 'Wrong DateTime', '2016-11-23']:
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema(datetime.now())
schema('2016-11-23T18:59:08')
def test_deprecated(caplog):
"""Test deprecation log."""
schema = vol.Schema({
'venus': cv.boolean,
'mars': cv.boolean
})
deprecated_schema = vol.All(
cv.deprecated('mars'),
schema
)
deprecated_schema({'venus': True})
# pylint: disable=len-as-condition
assert len(caplog.records) == 0
deprecated_schema({'mars': True})
assert len(caplog.records) == 1
assert caplog.records[0].name == __name__
assert ("The 'mars' option (with value 'True') is deprecated, "
"please remove it from your configuration.") in caplog.text
def test_key_dependency():
"""Test key_dependency validator."""
schema = vol.Schema(cv.key_dependency('beer', 'soda'))
options = (
{'beer': None}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
{'beer': None, 'soda': None},
{'soda': None}, {}
)
for value in options:
schema(value)
def test_has_at_least_one_key():
"""Test has_at_least_one_key validator."""
schema = vol.Schema(cv.has_at_least_one_key('beer', 'soda'))
for value in (None, [], {}, {'wine': None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({'beer': None}, {'soda': None}):
schema(value)
def test_enum():
"""Test enum validator."""
class TestEnum(enum.Enum):
"""Test enum."""
value1 = "Value 1"
value2 = "Value 2"
schema = vol.Schema(cv.enum(TestEnum))
with pytest.raises(vol.Invalid):
schema('value3')
def test_socket_timeout(): # pylint: disable=invalid-name
"""Test socket timeout validator."""
schema = vol.Schema(cv.socket_timeout)
with pytest.raises(vol.Invalid):
schema(0.0)
with pytest.raises(vol.Invalid):
schema(-1)
assert _GLOBAL_DEFAULT_TIMEOUT == schema(None)
assert schema(1) == 1.0
def test_matches_regex():
"""Test matches_regex validator."""
schema = vol.Schema(cv.matches_regex('.*uiae.*'))
with pytest.raises(vol.Invalid):
schema(1.0)
with pytest.raises(vol.Invalid):
schema(" nrtd ")
test_str = "This is a test including uiae."
assert(schema(test_str) == test_str)
def test_is_regex():
"""Test the is_regex validator."""
schema = vol.Schema(cv.is_regex)
with pytest.raises(vol.Invalid):
schema("(")
with pytest.raises(vol.Invalid):
schema({"a dict": "is not a regex"})
valid_re = ".*"
schema(valid_re)
def test_comp_entity_ids():
"""Test config validation for component entity IDs."""
schema = vol.Schema(cv.comp_entity_ids)
for valid in ('ALL', 'all', 'AlL', 'light.kitchen', ['light.kitchen'],
['light.kitchen', 'light.ceiling'], []):
schema(valid)
for invalid in (['light.kitchen', 'not-entity-id'], '*', ''):
with pytest.raises(vol.Invalid):
schema(invalid)
| 24.193388
| 79
| 0.568081
|
b4384fc006c1f86bddf400b66621de5289a72069
| 4,827
|
py
|
Python
|
cv2/document_scanner.py
|
NNDEV1/CV2Stuff
|
fc31a51cdd816e313351f0d83ffb0c7339e73ef3
|
[
"MIT"
] | 3
|
2021-01-09T23:55:34.000Z
|
2021-08-15T22:04:34.000Z
|
cv2/document_scanner.py
|
NNDEV1/CV2Stuff
|
fc31a51cdd816e313351f0d83ffb0c7339e73ef3
|
[
"MIT"
] | null | null | null |
cv2/document_scanner.py
|
NNDEV1/CV2Stuff
|
fc31a51cdd816e313351f0d83ffb0c7339e73ef3
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
##########################################
widthImg = 480
heightImg = 640
##########################################
frameWidth = 480
frameHeight = 360
cap = cv2.VideoCapture(0)
cap.set(3, widthImg)
cap.set(4, heightImg)
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
def preprocessing(img):
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, 200, 200)
kernel = np.ones((5, 5))
imgDi = cv2.dilate(imgCanny, kernel, iterations=2)
imgThresh = cv2.erode(imgDi, kernel, iterations=1)
return imgThresh
def get_contours(img):
biggest = np.array([])
maxArea = 0
contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area>5000:
#cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
peri = cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,0.02*peri,True)
if area >maxArea and len(approx) == 4:
biggest = approx
maxArea = area
cv2.drawContours(imgContour, biggest, -1, (255, 0, 0), 20)
return biggest
#print(len(approx))
#objCor = len(approx)
#x, y, w, h = cv2.boundingRect(approx)
def reorder(myPoints):
myPoints = myPoints.reshape((4, 2))
myPointsNew = np.zeros((4, 1, 2), np.int32)
add = myPoints.sum(1)
#print("add", add)
myPointsNew[0] = myPoints[np.argmin(add)]
myPointsNew[3] = myPoints[np.argmax(add)]
diff = np.diff(myPoints, axis = 1)
myPointsNew[1] = myPoints[np.argmin(diff)]
myPointsNew[2] = myPoints[np.argmax(diff)]
#print(myPointsNew)
return myPointsNew
myPoints = myPoints.reshape((4,2))
myPointsNew = np.zeros((4,1,2),np.int32)
add = myPoints.sum(1)
#print("add", add)
myPointsNew[0] = myPoints[np.argmin(add)]
myPointsNew[3] = myPoints[np.argmax(add)]
diff = np.diff(myPoints, axis=1)
myPointsNew[1]= myPoints[np.argmin(diff)]
myPointsNew[2] = myPoints[np.argmax(diff)]
#print("NewPoints",myPointsNew)
return myPointsNew
def getWarp(img,biggest):
biggest = reorder(biggest)
pts1 = np.float32(biggest)
pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgOutput = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
#imgCropped = imgOutput[20:imgOutput.shape[0]-20,20:imgOutput.shape[1]-20]
#imgCropped = cv2.resize(imgCropped,(widthImg,heightImg))
return imgOutput
while True:
success, img = cap.read()
img = cv2.resize(img,(widthImg,heightImg))
imgContour = img.copy()
imgThres = preprocessing(img)
biggest = get_contours(imgThres)
#print(biggest)
if biggest.size !=0:
imgWarped=getWarp(img,biggest)
# imageArray = ([img,imgThres],
# [imgContour,imgWarped])
imageArray = ([imgContour, imgWarped])
cv2.imshow("ImageWarped", imgWarped)
else:
# imageArray = ([img, imgThres],
# [img, img])
imageArray = ([img, imgContour])
stackedImages = stackImages(0.6,imageArray)
cv2.imshow("WorkFlow", stackedImages)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| 34.726619
| 135
| 0.591879
|
493b74fcf53409e0e3da9a1043573b947dfbfb48
| 138
|
py
|
Python
|
junkfile/gui/windows/arrange_window/arrange_model.py
|
IvanFrezzaJr/junkfile2
|
13894955fba1f200bdb6dc8f4175d5320927c55e
|
[
"MIT"
] | null | null | null |
junkfile/gui/windows/arrange_window/arrange_model.py
|
IvanFrezzaJr/junkfile2
|
13894955fba1f200bdb6dc8f4175d5320927c55e
|
[
"MIT"
] | null | null | null |
junkfile/gui/windows/arrange_window/arrange_model.py
|
IvanFrezzaJr/junkfile2
|
13894955fba1f200bdb6dc8f4175d5320927c55e
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
@dataclass
class Model:
directory_in: str = ""
directory_out: str = ""
copy: bool = False
| 15.333333
| 33
| 0.673913
|
561c13eee14d30a42651e4d73bcd63ddcdf5a6b9
| 39,745
|
py
|
Python
|
ofa/imagenet_codebase/data_providers/roi.py
|
ronjian/once-for-all
|
6d0a76be5007fda31c8b366a51f23b626bb8f24c
|
[
"Apache-2.0"
] | null | null | null |
ofa/imagenet_codebase/data_providers/roi.py
|
ronjian/once-for-all
|
6d0a76be5007fda31c8b366a51f23b626bb8f24c
|
[
"Apache-2.0"
] | null | null | null |
ofa/imagenet_codebase/data_providers/roi.py
|
ronjian/once-for-all
|
6d0a76be5007fda31c8b366a51f23b626bb8f24c
|
[
"Apache-2.0"
] | null | null | null |
# import glob
# import math
# import os
# import random
# import shutil
# import time
# from pathlib import Path
# from threading import Thread
# import cv2
# import numpy as np
# import torch
# from PIL import Image, ExifTags
# from torch.utils.data import Dataset
# from tqdm import tqdm
# # from utils.utils import xyxy2xywh, xywh2xyxy
# help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
# img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
# vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# def xyxy2xywh(x):
# # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
# y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
# y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
# y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
# y[:, 2] = x[:, 2] - x[:, 0] # width
# y[:, 3] = x[:, 3] - x[:, 1] # height
# return y
# def xywh2xyxy(x):
# # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
# y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
# y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
# y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
# y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
# y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
# return y
# # Get orientation exif tag
# for orientation in ExifTags.TAGS.keys():
# if ExifTags.TAGS[orientation] == 'Orientation':
# break
# def get_hash(files):
# # Returns a single hash value of a list of files
# return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
# def exif_size(img):
# # Returns exif-corrected PIL size
# s = img.size # (width, height)
# try:
# rotation = dict(img._getexif().items())[orientation]
# if rotation == 6: # rotation 270
# s = (s[1], s[0])
# elif rotation == 8: # rotation 90
# s = (s[1], s[0])
# except:
# pass
# return s
# def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
# dataset = LoadImagesAndLabels(path, imgsz, batch_size,
# augment=augment, # augment images
# hyp=hyp, # augmentation hyperparameters
# rect=rect, # rectangular training
# cache_images=cache,
# single_cls=opt.single_cls,
# stride=int(stride),
# pad=pad)
# batch_size = min(batch_size, len(dataset))
# nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
# dataloader = torch.utils.data.DataLoader(dataset,
# batch_size=batch_size,
# num_workers=nw,
# pin_memory=True,
# collate_fn=LoadImagesAndLabels.collate_fn)
# return dataloader, dataset
# class LoadImages: # for inference
# def __init__(self, path, img_size=640):
# path = str(Path(path)) # os-agnostic
# files = []
# if os.path.isdir(path):
# files = sorted(glob.glob(os.path.join(path, '*.*')))
# elif os.path.isfile(path):
# files = [path]
# images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
# videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
# nI, nV = len(images), len(videos)
# self.img_size = img_size
# self.files = images + videos
# self.nF = nI + nV # number of files
# self.video_flag = [False] * nI + [True] * nV
# self.mode = 'images'
# if any(videos):
# self.new_video(videos[0]) # new video
# else:
# self.cap = None
# assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
# (path, img_formats, vid_formats)
# def __iter__(self):
# self.count = 0
# return self
# def __next__(self):
# if self.count == self.nF:
# raise StopIteration
# path = self.files[self.count]
# if self.video_flag[self.count]:
# # Read video
# self.mode = 'video'
# ret_val, img0 = self.cap.read()
# if not ret_val:
# self.count += 1
# self.cap.release()
# if self.count == self.nF: # last video
# raise StopIteration
# else:
# path = self.files[self.count]
# self.new_video(path)
# ret_val, img0 = self.cap.read()
# self.frame += 1
# print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
# else:
# # Read image
# self.count += 1
# img0 = cv2.imread(path) # BGR
# assert img0 is not None, 'Image Not Found ' + path
# print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# # Padded resize
# img = letterbox(img0, new_shape=self.img_size)[0]
# # Convert
# img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
# img = np.ascontiguousarray(img)
# # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
# return path, img, img0, self.cap
# def new_video(self, path):
# self.frame = 0
# self.cap = cv2.VideoCapture(path)
# self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
# def __len__(self):
# return self.nF # number of files
# class LoadWebcam: # for inference
# def __init__(self, pipe=0, img_size=640):
# self.img_size = img_size
# if pipe == '0':
# pipe = 0 # local camera
# # pipe = 'rtsp://192.168.1.64/1' # IP camera
# # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
# self.pipe = pipe
# self.cap = cv2.VideoCapture(pipe) # video capture object
# self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
# def __iter__(self):
# self.count = -1
# return self
# def __next__(self):
# self.count += 1
# if cv2.waitKey(1) == ord('q'): # q to quit
# self.cap.release()
# cv2.destroyAllWindows()
# raise StopIteration
# # Read frame
# if self.pipe == 0: # local camera
# ret_val, img0 = self.cap.read()
# img0 = cv2.flip(img0, 1) # flip left-right
# else: # IP camera
# n = 0
# while True:
# n += 1
# self.cap.grab()
# if n % 30 == 0: # skip frames
# ret_val, img0 = self.cap.retrieve()
# if ret_val:
# break
# # Print
# assert ret_val, 'Camera Error %s' % self.pipe
# img_path = 'webcam.jpg'
# print('webcam %g: ' % self.count, end='')
# # Padded resize
# img = letterbox(img0, new_shape=self.img_size)[0]
# # Convert
# img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
# img = np.ascontiguousarray(img)
# return img_path, img, img0, None
# def __len__(self):
# return 0
# class LoadStreams: # multiple IP or RTSP cameras
# def __init__(self, sources='streams.txt', img_size=640):
# self.mode = 'images'
# self.img_size = img_size
# if os.path.isfile(sources):
# with open(sources, 'r') as f:
# sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
# else:
# sources = [sources]
# n = len(sources)
# self.imgs = [None] * n
# self.sources = sources
# for i, s in enumerate(sources):
# # Start the thread to read frames from the video stream
# print('%g/%g: %s... ' % (i + 1, n, s), end='')
# cap = cv2.VideoCapture(0 if s == '0' else s)
# assert cap.isOpened(), 'Failed to open %s' % s
# w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# fps = cap.get(cv2.CAP_PROP_FPS) % 100
# _, self.imgs[i] = cap.read() # guarantee first frame
# thread = Thread(target=self.update, args=([i, cap]), daemon=True)
# print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
# thread.start()
# print('') # newline
# # check for common shapes
# s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
# self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
# if not self.rect:
# print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
# def update(self, index, cap):
# # Read next stream frame in a daemon thread
# n = 0
# while cap.isOpened():
# n += 1
# # _, self.imgs[index] = cap.read()
# cap.grab()
# if n == 4: # read every 4th frame
# _, self.imgs[index] = cap.retrieve()
# n = 0
# time.sleep(0.01) # wait time
# def __iter__(self):
# self.count = -1
# return self
# def __next__(self):
# self.count += 1
# img0 = self.imgs.copy()
# if cv2.waitKey(1) == ord('q'): # q to quit
# cv2.destroyAllWindows()
# raise StopIteration
# # Letterbox
# img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# # Stack
# img = np.stack(img, 0)
# # Convert
# img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
# img = np.ascontiguousarray(img)
# return self.sources, img, img0, None
# def __len__(self):
# return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
# class LoadImagesAndLabels(Dataset): # for training/testing
# def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
# cache_images=False, single_cls=False, stride=32, pad=0.0):
# try:
# f = [] # image files
# for p in path if isinstance(path, list) else [path]:
# p = str(Path(p)) # os-agnostic
# parent = str(Path(p).parent) + os.sep
# if os.path.isfile(p): # file
# with open(p, 'r') as t:
# t = t.read().splitlines()
# f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# elif os.path.isdir(p): # folder
# f += glob.iglob(p + os.sep + '*.*')
# else:
# raise Exception('%s does not exist' % p)
# self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
# except Exception as e:
# raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# n = len(self.img_files)
# assert n > 0, 'No images found in %s. See %s' % (path, help_url)
# bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
# nb = bi[-1] + 1 # number of batches
# self.n = n # number of images
# self.batch = bi # batch index of image
# self.img_size = img_size
# self.augment = augment
# self.hyp = hyp
# self.image_weights = image_weights
# self.rect = False if image_weights else rect
# self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# self.mosaic_border = [-img_size // 2, -img_size // 2]
# self.stride = stride
# # Define labels
# self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
# self.img_files]
# # Check cache
# cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
# if os.path.isfile(cache_path):
# cache = torch.load(cache_path) # load
# if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
# cache = self.cache_labels(cache_path) # re-cache
# else:
# cache = self.cache_labels(cache_path) # cache
# # Get labels
# cache_arr = []
# for x in self.img_files:
# cache_arr.append(cache[x])
# labels, shapes = zip(*cache_arr)
# self.shapes = np.array(shapes, dtype=np.float64)
# self.labels = list(labels)
# # Rectangular Training https://github.com/ultralytics/yolov3/issues/232
# if self.rect:
# # Sort by aspect ratio
# s = self.shapes # wh
# ar = s[:, 1] / s[:, 0] # aspect ratio
# irect = ar.argsort()
# self.img_files = [self.img_files[i] for i in irect]
# self.label_files = [self.label_files[i] for i in irect]
# self.labels = [self.labels[i] for i in irect]
# self.shapes = s[irect] # wh
# ar = ar[irect]
# # Set training image shapes
# shapes = [[1, 1]] * nb
# for i in range(nb):
# ari = ar[bi == i]
# mini, maxi = ari.min(), ari.max()
# if maxi < 1:
# shapes[i] = [maxi, 1]
# elif mini > 1:
# shapes[i] = [1, 1 / mini]
# self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# # Cache labels
# create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
# nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
# pbar = tqdm(self.label_files)
# for i, file in enumerate(pbar):
# l = self.labels[i] # label
# if l.shape[0]:
# assert l.shape[1] == 5, '> 5 label columns: %s' % file
# assert (l >= 0).all(), 'negative labels: %s' % file
# assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
# if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
# nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
# if single_cls:
# l[:, 0] = 0 # force dataset into single-class mode
# self.labels[i] = l
# nf += 1 # file found
# # Create subdataset (a smaller dataset)
# if create_datasubset and ns < 1E4:
# if ns == 0:
# create_folder(path='./datasubset')
# os.makedirs('./datasubset/images')
# exclude_classes = 43
# if exclude_classes not in l[:, 0]:
# ns += 1
# # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
# with open('./datasubset/images.txt', 'a') as f:
# f.write(self.img_files[i] + '\n')
# # Extract object detection boxes for a second stage classifier
# if extract_bounding_boxes:
# p = Path(self.img_files[i])
# img = cv2.imread(str(p))
# h, w = img.shape[:2]
# for j, x in enumerate(l):
# f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
# if not os.path.exists(Path(f).parent):
# os.makedirs(Path(f).parent) # make new output folder
# b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
# b[2:] = b[2:] * 1.3 + 30 # pad
# b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
# b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
# b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
# assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
# else:
# ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
# pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
# cache_path, nf, nm, ne, nd, n)
# assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
# # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
# self.imgs = [None] * n
# if cache_images:
# gb = 0 # Gigabytes of cached images
# pbar = tqdm(range(len(self.img_files)), desc='Caching images')
# self.img_hw0, self.img_hw = [None] * n, [None] * n
# for i in pbar: # max 10k images
# self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
# gb += self.imgs[i].nbytes
# pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# def cache_labels(self, path='labels.cache'):
# # Cache dataset labels, check images and read shapes
# x = {} # dict
# pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
# for (img, label) in pbar:
# try:
# l = []
# image = Image.open(img)
# image.verify() # PIL verify
# # _ = io.imread(img) # skimage verify (from skimage import io)
# shape = exif_size(image) # image size
# if os.path.isfile(label):
# with open(label, 'r') as f:
# l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
# if len(l) == 0:
# l = np.zeros((0, 5), dtype=np.float32)
# x[img] = [l, shape]
# except Exception as e:
# x[img] = None
# print('WARNING: %s: %s' % (img, e))
# x['hash'] = get_hash(self.label_files + self.img_files)
# torch.save(x, path) # save for next time
# return x
# def __len__(self):
# return len(self.img_files)
# # def __iter__(self):
# # self.count = -1
# # print('ran dataset iter')
# # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# # return self
# def __getitem__(self, index):
# if self.image_weights:
# index = self.indices[index]
# hyp = self.hyp
# if self.mosaic:
# # Load mosaic
# img, labels = load_mosaic(self, index)
# shapes = None
# else:
# # Load image
# img, (h0, w0), (h, w) = load_image(self, index)
# # print('!!!1', img.shape)
# # Letterbox
# shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
# # print('self.rect', self.rect, 'shape', shape)
# img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
# shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# # print('!!!2', img.shape)
# # Load labels
# labels = []
# x = self.labels[index]
# if x.size > 0:
# # Normalized xywh to pixel xyxy format
# labels = x.copy()
# labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
# labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
# labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
# labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
# if self.augment:
# # Augment imagespace
# if not self.mosaic:
# img, labels = random_affine(img, labels,
# degrees=hyp['degrees'],
# translate=hyp['translate'],
# scale=hyp['scale'],
# shear=hyp['shear'])
# # Augment colorspace
# augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# # Apply cutouts
# # if random.random() < 0.9:
# # labels = cutout(img, labels)
# nL = len(labels) # number of labels
# if nL:
# # convert xyxy to xywh
# labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# # Normalize coordinates 0 - 1
# labels[:, [2, 4]] /= img.shape[0] # height
# labels[:, [1, 3]] /= img.shape[1] # width
# if self.augment:
# # random left-right flip
# lr_flip = True
# if lr_flip and random.random() < 0.5:
# img = np.fliplr(img)
# if nL:
# labels[:, 1] = 1 - labels[:, 1]
# # random up-down flip
# ud_flip = False
# if ud_flip and random.random() < 0.5:
# img = np.flipud(img)
# if nL:
# labels[:, 2] = 1 - labels[:, 2]
# labels_out = torch.zeros((nL, 6))
# if nL:
# labels_out[:, 1:] = torch.from_numpy(labels)
# # Convert
# img = img[:, :, ::-1].transpose(2, 0, 1)
# img = np.ascontiguousarray(img)
# return torch.from_numpy(img), labels_out, self.img_files[index], shapes
# @staticmethod
# def collate_fn(batch):
# img, label, path, shapes = zip(*batch) # transposed
# for i, l in enumerate(label):
# l[:, 0] = i # add target image index for build_targets()
# return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# def load_image(self, index):
# # loads 1 image from dataset, returns img, original hw, resized hw
# img = self.imgs[index]
# if img is None: # not cached
# path = self.img_files[index]
# img = cv2.imread(path) # BGR
# assert img is not None, 'Image Not Found ' + path
# h0, w0 = img.shape[:2] # orig hw
# r = self.img_size / max(h0, w0) # resize image to img_size
# if r != 1: # always resize down, only resize up if training with augmentation
# interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
# img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
# return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
# else:
# return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
# def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
# r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
# hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
# dtype = img.dtype # uint8
# x = np.arange(0, 256, dtype=np.int16)
# lut_hue = ((x * r[0]) % 180).astype(dtype)
# lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
# lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
# img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
# cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# # Histogram equalization
# # if random.random() < 0.2:
# # for i in range(3):
# # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
# def load_mosaic(self, index):
# # loads images in a mosaic
# labels4 = []
# s = self.img_size
# yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
# indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
# for i, index in enumerate(indices):
# # Load image
# img, _, (h, w) = load_image(self, index)
# # place img in img4
# if i == 0: # top left
# img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
# x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
# x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
# elif i == 1: # top right
# x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
# x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
# elif i == 2: # bottom left
# x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
# x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
# elif i == 3: # bottom right
# x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
# x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
# img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
# padw = x1a - x1b
# padh = y1a - y1b
# # Labels
# x = self.labels[index]
# labels = x.copy()
# if x.size > 0: # Normalized xywh to pixel xyxy format
# labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
# labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
# labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
# labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
# labels4.append(labels)
# # Concat/clip labels
# if len(labels4):
# labels4 = np.concatenate(labels4, 0)
# # np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
# np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# # Replicate
# # img4, labels4 = replicate(img4, labels4)
# # Augment
# # img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
# img4, labels4 = random_affine(img4, labels4,
# degrees=self.hyp['degrees'],
# translate=self.hyp['translate'],
# scale=self.hyp['scale'],
# shear=self.hyp['shear'],
# border=self.mosaic_border) # border to remove
# return img4, labels4
# def replicate(img, labels):
# # Replicate labels
# h, w = img.shape[:2]
# boxes = labels[:, 1:].astype(int)
# x1, y1, x2, y2 = boxes.T
# s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
# for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
# x1b, y1b, x2b, y2b = boxes[i]
# bh, bw = y2b - y1b, x2b - x1b
# yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
# x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
# img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
# labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
# return img, labels
# def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
# shape = img.shape[:2] # current shape [height, width]
# if isinstance(new_shape, int):
# new_shape = (new_shape, new_shape)
# # Scale ratio (new / old)
# r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
# if not scaleup: # only scale down, do not scale up (for better test mAP)
# r = min(r, 1.0)
# # Compute padding
# ratio = r, r # width, height ratios
# new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
# dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
# if auto: # minimum rectangle
# dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
# elif scaleFill: # stretch
# dw, dh = 0.0, 0.0
# new_unpad = (new_shape[1], new_shape[0])
# ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
# dw /= 2 # divide padding into 2 sides
# dh /= 2
# if shape[::-1] != new_unpad: # resize
# print('new_unpad', new_unpad)
# img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
# top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
# left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
# # print('top, bottom, left, right', top, bottom, left, right)
# img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
# return img, ratio, (dw, dh)
# def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
# # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# # targets = [cls, xyxy]
# height = img.shape[0] + border[0] * 2 # shape(h,w,c)
# width = img.shape[1] + border[1] * 2
# # Rotation and Scale
# R = np.eye(3)
# a = random.uniform(-degrees, degrees)
# # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
# s = random.uniform(1 - scale, 1 + scale)
# # s = 2 ** random.uniform(-scale, scale)
# R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# # Translation
# T = np.eye(3)
# T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
# T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
# # Shear
# S = np.eye(3)
# S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
# S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# # Combined rotation matrix
# M = S @ T @ R # ORDER IS IMPORTANT HERE!!
# if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
# img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# # Transform label coordinates
# n = len(targets)
# if n:
# # warp points
# xy = np.ones((n * 4, 3))
# xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
# xy = (xy @ M.T)[:, :2].reshape(n, 8)
# # create new boxes
# x = xy[:, [0, 2, 4, 6]]
# y = xy[:, [1, 3, 5, 7]]
# xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # # apply angle-based reduction of bounding boxes
# # radians = a * math.pi / 180
# # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# # x = (xy[:, 2] + xy[:, 0]) / 2
# # y = (xy[:, 3] + xy[:, 1]) / 2
# # w = (xy[:, 2] - xy[:, 0]) * reduction
# # h = (xy[:, 3] - xy[:, 1]) * reduction
# # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# # reject warped points outside of image
# xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
# xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# w = xy[:, 2] - xy[:, 0]
# h = xy[:, 3] - xy[:, 1]
# area = w * h
# area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
# ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
# i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
# targets = targets[i]
# targets[:, 1:5] = xy[i]
# return img, targets
# def cutout(image, labels):
# # https://arxiv.org/abs/1708.04552
# # https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# # https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
# h, w = image.shape[:2]
# def bbox_ioa(box1, box2):
# # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
# box2 = box2.transpose()
# # Get the coordinates of bounding boxes
# b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
# b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# # Intersection area
# inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
# (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# # box2 area
# box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# # Intersection over box2 area
# return inter_area / box2_area
# # create random masks
# scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
# for s in scales:
# mask_h = random.randint(1, int(h * s))
# mask_w = random.randint(1, int(w * s))
# # box
# xmin = max(0, random.randint(0, w) - mask_w // 2)
# ymin = max(0, random.randint(0, h) - mask_h // 2)
# xmax = min(w, xmin + mask_w)
# ymax = min(h, ymin + mask_h)
# # apply random color mask
# image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# # return unobscured labels
# if len(labels) and s > 0.03:
# box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
# ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
# labels = labels[ioa < 0.60] # remove >60% obscured labels
# return labels
# def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# # creates a new ./images_reduced folder with reduced size images of maximum size img_size
# path_new = path + '_reduced' # reduced images path
# create_folder(path_new)
# for f in tqdm(glob.glob('%s/*.*' % path)):
# try:
# img = cv2.imread(f)
# h, w = img.shape[:2]
# r = img_size / max(h, w) # size ratio
# if r < 1.0:
# img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
# fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
# cv2.imwrite(fnew, img)
# except:
# print('WARNING: image failure %s' % f)
# def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# # Save images
# formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# # for path in ['../coco/images/val2014', '../coco/images/train2014']:
# for path in ['../data/sm4/images', '../data/sm4/background']:
# create_folder(path + 'bmp')
# for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
# for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
# cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# # Save labels
# # for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
# for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
# with open(file, 'r') as f:
# lines = f.read()
# # lines = f.read().replace('2014/', '2014bmp/') # coco
# lines = lines.replace('/images', '/imagesbmp')
# lines = lines.replace('/background', '/backgroundbmp')
# for ext in formats:
# lines = lines.replace(ext, '.bmp')
# with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
# f.write(lines)
# def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# # Converts dataset to bmp (for faster training)
# formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for a, b, files in os.walk(dataset):
# for file in tqdm(files, desc=a):
# p = a + '/' + file
# s = Path(file).suffix
# if s == '.txt': # replace text
# with open(p, 'r') as f:
# lines = f.read()
# for f in formats:
# lines = lines.replace(f, '.bmp')
# with open(p, 'w') as f:
# f.write(lines)
# elif s in formats: # replace image
# cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
# if s != '.bmp':
# os.system("rm '%s'" % p)
# def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# # Copies all the images in a text file (list of images) into a folder
# create_folder(path[:-4])
# with open(path, 'r') as f:
# for line in f.read().splitlines():
# os.system('cp "%s" %s' % (line, path[:-4]))
# print(line)
# def create_folder(path='./new_folder'):
# # Create folder
# if os.path.exists(path):
# shutil.rmtree(path) # delete output folder
# os.makedirs(path) # make new output folder
| 43.342421
| 240
| 0.509221
|
38ec7f10464cf22f6d6318e9e4de8f6a3f24a3c9
| 20,160
|
py
|
Python
|
lightlab/equipment/abstract_drivers/configurable.py
|
ZhimuG/lightlab-ZG
|
cceb417be153c31d62b6e07b347ef093b9462b84
|
[
"MIT"
] | null | null | null |
lightlab/equipment/abstract_drivers/configurable.py
|
ZhimuG/lightlab-ZG
|
cceb417be153c31d62b6e07b347ef093b9462b84
|
[
"MIT"
] | null | null | null |
lightlab/equipment/abstract_drivers/configurable.py
|
ZhimuG/lightlab-ZG
|
cceb417be153c31d62b6e07b347ef093b9462b84
|
[
"MIT"
] | null | null | null |
from lightlab import visalogger as logger
from pyvisa import VisaIOError
from contextlib import contextmanager
import dpath.util
import json
from numpy import floor
from pathlib import Path
from lightlab.util.io import lightlabDevelopmentDir
defaultFileDir = lightlabDevelopmentDir / 'savedConfigDefaults/'
from . import AbstractDriver
class AccessException(Exception):
pass
class TekConfig(object):
''' Wraps a dictionary attribute. Uses dpath for operations.
Commands are defined as tuples (cStr, val). For example (':PATH:TO:CMD', 4).
Use these by doing scope.write(' '.join(TekConfig.get('PATH:TO:CMD')))
The val is always a string.
Todo:
:transferring subgroup from one instance to another.
:returning a dictionary representing a subgroup (actually this might currently be happening in error)
:transferring subgroup values to a different subgroup in the same instance (for example, CH1 to CH2)
'''
separator = ':'
def __init__(self, initDict=None):
if initDict is None:
initDict = dict()
self.dico = initDict.copy()
def __str__(self):
return json.dumps(self.dico, indent=2, sort_keys=True)
def print(self, subgroup=''):
sub = type(self)()
sub.transfer(self, subgroup=subgroup)
print(sub)
def copy(self, subgroup=''):
ret = type(self)()
ret.transfer(self, subgroup=subgroup)
return ret
def get(self, cStr, asCmd=True):
''' Returns the value only, not a dictionary
Args:
asCmd (bool): if true, returns a tuple representing a command. Otherwise returns just the value
'''
try:
val = dpath.util.get(self.dico, cStr, separator=self.separator)
except KeyError:
raise KeyError(cStr + ' is not present in this TekConfig instance')
if type(val) is dict and '&' in val.keys():
val = val['&']
if not asCmd:
return val
else:
return (cStr, str(val))
def set(self, cStr, val):
''' Takes the value only, not a dictionary '''
# First check that it does not exist as a subdir
try:
ex = dpath.util.get(self.dico, cStr, separator=self.separator)
except KeyError:
# doesn't exist, we are good to go
pass
else:
if type(ex) is dict:
# we don't want to overwrite this subdirectory, so put a tag on cmd
cStr = cStr + self.separator + '&'
cmd = (cStr, val)
success = dpath.util.set(self.dico, *cmd, separator=self.separator)
if success != 1: # it doesn't exist yet
try:
dpath.util.new(self.dico, *cmd, separator=self.separator)
except (ValueError, dpath.exceptions.PathNotFound):
# We probably have an integer leaf where we would also like to have a directory
parent = self.separator.join(cmd[0].split(self.separator)[:-1])
try:
oldV = self.get(parent, asCmd=False)
except KeyError:
print('dpath did not take ' + str(cmd))
raise
dpath.util.set(self.dico, parent, {'&': oldV}, separator=self.separator)
dpath.util.new(self.dico, *cmd, separator=self.separator)
def getList(self, subgroup='', asCmd=True):
''' Deep crawler that goes in and generates a command for every leaf.
Args:
subgroup (str): subgroup must be a subdirectory. If '', it is root directory. It can also be a command string, in which case, the returned list has length 1
asCmd (bool): if false, returns a list of strings that can be sent to scopes
Returns:
list: list of valid commands (cstr, val) on the subgroup subdirectory
'''
cList = []
children = dpath.util.search(self.dico, subgroup + '*',
yielded=True, separator=self.separator)
for cmd in children:
s, v = cmd
if type(v) is not dict:
if s[0] != self.separator:
s = self.separator + s
cList += [(s, v)]
else:
cList += self.getList(subgroup=cmd[0] + self.separator)
if asCmd:
return cList
else:
writeList = [None] * len(cList)
for i, cmd in enumerate(cList):
cStr, val = cmd
if cStr[-1] == '&': # check for tokens
cStr = cStr[:-2]
writeList[i] = cStr + ' ' + str(val)
return writeList
def setList(self, cmdList):
''' The inverse of getList '''
for c in cmdList:
self.set(*c)
def transfer(self, source, subgroup=''):
''' Pulls config from the source TekConfig object. This is useful for subgrouping.
For example, you might want to load from default only the trigger configuration.
Args:
source (TekConfig or dict): the object from which config values are pulled into self
subgroup (str): subgroup must be a subdirectory. If '', it is root directory.
It can also be a command string, in which case, only that parameter is affected
'''
if type(source) is dict:
sCon = type(self)(source)
elif type(source) is type(self):
sCon = source
else:
raise Exception('Invalid source for transfer. Got ' + str(type(source)))
commands = sCon.getList(subgroup=subgroup)
self.setList(commands)
return self
@classmethod
def fromFile(cls, fname, subgroup=''):
fpath = Path(fname)
with fpath.open('r') as fx:
d = json.load(fx)
full = cls(d)
ret = cls()
ret.transfer(full, subgroup=subgroup)
return ret
@classmethod
def __parseShorthand(cls, setResponse):
''' Turns shorthand multi-command strings into list of proper command tuples
'''
pairs = setResponse.split(';')
commands = [None] * len(pairs)
cmdGrp = None
for i in range(len(pairs)):
words = pairs[i].split(' ')
cmdLeaf, val = words[0:2]
if len(words) > 2:
print('Warning 2-value returns not handled by TekConfig class. Ignoring...')
print(*words)
if cmdLeaf[0] == cls.separator:
pat = cmdLeaf[1:]
cmdGrp = cls.separator.join(pat.split(cls.separator)[:-1])
else:
pat = cmdGrp + cls.separator + cmdLeaf
commands[i] = (pat, val)
return commands
@classmethod
def fromSETresponse(cls, setResponse, subgroup=''):
''' setResponse (str): what is returned by the scope in response to query('SET?')
It will require some parsing for subgroup shorthand
'''
commandList = cls.__parseShorthand(setResponse)
full = cls()
full.setList(commandList)
if subgroup == '':
return full
else:
ret = cls()
ret.transfer(full, subgroup=subgroup)
return ret
def save(self, fname, subgroup='', overwrite=False):
''' Saves dictionary parameters in json format. Merges if there's something already there, unless overwrite is True.
Args:
fname (str): file name
subgroup (str): groups of commands to write. If '', it is everything.
overwrite (bool): will make a new file exactly corresponding to this instance, otherwise merges with existing
'''
try:
existingConfig = TekConfig.fromFile(fname)
except FileNotFoundError:
# file probably doesn't exist
existingConfig = None
overwrite = True
if overwrite:
configToSave = type(self)()
configToSave.transfer(self, subgroup=subgroup)
else:
configToSave = existingConfig.transfer(self, subgroup=subgroup)
fpath = Path(fname)
with fpath.open('w+') as fx:
fx.write(str(configToSave)) # __str__ gives nice json format
# pylint: disable=no-member
class Configurable(AbstractDriver):
''' Instruments can be configurable to keep track of settings within the instrument
This class is setup so that the hardware state is reflected exactly in the 'live' config
**unless somebody changes something in lab**.
Watch out for that and use ``forceHardware`` if that is a risk
This clas uses query/write methods that are not directly inherited,
so the subclass or its parents must implement those functions
'''
config = None #: Dictionary of :class:`TekConfig` objects.
def __init__(self, headerIsOptional=True, verboseIsOptional=False, precedingColon=True, interveningSpace=True, **kwargs):
self._hardwareinit = False
self.verboseIsOptional = verboseIsOptional
self.headerIsOptional = headerIsOptional
self.header = not headerIsOptional
self.colon = precedingColon
self.space = interveningSpace
self.config = dict()
self.config['default'] = None
self.config['init'] = TekConfig()
self.config['live'] = TekConfig()
self.separator = self.config['live'].separator
super().__init__(**kwargs)
def initHardware(self):
''' Runs upon first hardware access.
Tells the instrument how to format its commands
'''
if not self._hardwareinit:
if self.verboseIsOptional:
self.write('VERBOSE ON')
if self.headerIsOptional:
self.write('HEADER OFF')
self._hardwareinit = True
return self._hardwareinit
# Simple, individual getter and setter
def setConfigParam(self, cStr, val=None, forceHardware=False):
''' Sets an individual configuration parameter.
If the value has been read before, and there is no change,
then it will **not** write to the hardware.
Args:
cStr (str): name of the command
val (any): value to send. Detects type, so if it's an int, it will be stored as int
forceHardware (bool): will always send to hardware,
in case it is critical or if it tends to be changed by pesky lab users
Returns:
(bool): Did it requre a write to hardware?
'''
if val is None:
val = ''
try:
prevVal = self.config['live'].get(cStr, asCmd=False)
except KeyError:
prevVal = None
refresh = True
else:
refresh = (str(val) != str(prevVal))
if refresh or forceHardware:
self.config['live'].set(cStr, val)
if prevVal is None:
self.config['init'].transfer(self.config['live'], cStr)
self._setHardwareConfig(cStr) # send only the one that changed
return True
else:
return False
def getConfigParam(self, cStr, forceHardware=False):
''' Gets a single parameter.
If the value has been read before, and there is no change,
then it will **not** query the hardware.
This is much faster than getting from hardware; however,
it assumes that nobody in lab touched anything.
Args:
cStr (str): name of the command
forceHardware (bool): will always query from hardware,
in case it is critical or if it tends to be changed by pesky lab users
Returns:
(any): command value. Detects type, so that ``'2.5'`` will return as ``float``
If the command is not recognized, attempts to get it from hardware
'''
try:
prevVal = self.config['live'].get(cStr, asCmd=False)
except KeyError:
prevVal = None
if prevVal is None or forceHardware: # Try getting from hardware
self._getHardwareConfig(cStr)
if prevVal is None: # This is the first time getting, so it goes in 'init'
self.config['init'].transfer(self.config['live'], cStr)
return self.config['live'].get(cStr, asCmd=False)
@contextmanager
def tempConfig(self, cStr, tempVal, forceHardware=False):
''' Changes a parameter within the context of a "with" block.
Args are same as in :meth:`getConfigParam`.
'''
oldVal = self.getConfigParam(cStr, forceHardware)
try:
self.setConfigParam(cStr, tempVal)
yield self
finally:
self.setConfigParam(cStr, oldVal)
def getDefaultFilename(self):
r''' Combines the :data:`lightlab.util.io.paths.defaultFileDir`
with the \*IDN? string of this instrument.
Returns:
(str): the default filename
'''
info = self.instrID().split(',')
deffile = defaultFileDir / '-'.join(info[:3]) + '.json'
return deffile
def saveConfig(self, dest='+user', subgroup='', overwrite=False):
'''
If you would like to setup a temporary state (i.e. taking some measurements and going back), use a file and `subgroup=`
Args:
subgroup (str): a group of commands or a single command. If '', it means everything.
Side effects:
if dest is object or dict, modifies it
if dest is token, modifies the config library of self
if dest is filename, writes that file
'''
if type(dest) in [TekConfig, dict]:
dest.transfer(self.config['live'], subgroup=subgroup)
elif type(dest) is str and dest[0] == '+': # tokens
if dest[1:] in ['default, init']:
raise AccessException(
'You are not allowed to change defaults or initialization history')
self.config[dest[1:]] = TekConfig()
self.config[dest[1:]].transfer(
self.config['live'], subgroup=subgroup)
elif type(dest) is str:
self.config['live'].save(dest, subgroup, overwrite)
else:
raise Exception(
'Invalid save destination. It must be a file, token, or TekConfig object')
def loadConfig(self, source='+user', subgroup=''):
''' Loads some configuration parameters from a source which is either:
* a file name string, or
* a special token ['+default' or '+init'], or
* some TekConfig object or dict you have out there
Args:
source (str/TekConfig): load source
subgroup (str): a group of commands or a single command. If '', it means everything.
'''
if type(source) in [TekConfig, dict]:
srcObj = source
elif type(source) is str and source[0] == '+': # tokens
if source[1:] == 'default' and self.config['default'] is None: # need to load default
self.config['default'] = TekConfig.fromFile(
self.getDefaultFilename())
srcObj = self.config[source[1:]]
elif type(source) is str:
srcObj = TekConfig.fromFile(source)
else:
raise Exception(
'Invalid load source. It must be a file, token, or TekConfig object')
for liveInit in ['live', 'init']:
self.config[liveInit].transfer(srcObj, subgroup=subgroup)
# This writes everything without checking how it is set currently
self._setHardwareConfig(subgroup)
def __getFullHardwareConfig(self, subgroup=''):
''' Get everything that is returned by the SET? query
Args:
subgroup (str): default '' means everything
Returns:
TekConfig: structured configuration object
'''
self.initHardware()
logger.info('Querying SET? response of %s', self.instrID())
try:
resp = self.query('SET?')
return TekConfig.fromSETresponse(resp, subgroup=subgroup)
except VisaIOError as err: # SET timed out. You are done.
logger.error('%s timed out on \'SET?\'. \
Try resetting with \'*RST\'.', self.instrID())
raise err
def _getHardwareConfig(self, cStrList):
''' Queries all or a subgroup of commands using the state of the 'live' config.
This does not return, but it puts it in the config['live'] attribute
Args:
cStrList (list or str): list of command strings. Can also be a scalar string
'''
self.initHardware()
if type(cStrList) is not list and type(cStrList) is str:
cStrList = [cStrList]
for cStr in cStrList:
if cStr[-1] == '&': # handle the sibling subdir token
cStr = cStr[:-2]
try:
ret = self.query(cStr + '?')
except VisaIOError:
logger.error('Problematic parameter was %s.\n'
'Likely it does not exist in this instrument command structure.', cStr)
raise
logger.debug('Queried %s, got %s', cStr, ret)
if self.header:
val = ret.split(' ')[-1]
else:
val = ret
# Type detection
try:
val = float(val)
except ValueError:
pass
else:
if val == floor(val):
val = int(val)
self.config['live'].set(cStr, val)
def _setHardwareConfig(self, subgroup=''):
''' Writes all or a subgroup of commands using the state of the 'live' config.
Args:
subgroup (str): a subgroup of commands. If '', we write everything
'''
self.initHardware()
live = self.config['live'].getList(subgroup, asCmd=False)
for cmd in live:
if not self.colon and cmd[0] == self.separator:
cmd = cmd[1:]
if not self.space:
''.join(cmd.split(' '))
logger.debug('Sending %s to configurable hardware', cmd)
self.write(cmd)
def generateDefaults(self, filename=None, overwrite=False):
''' Attempts to read every configuration parameter.
Handles several cases where certain parameters do not make sense and must be skipped
Generates a new default file which is saved
in configurable.defaultFileDir
*This takes a while.*
Args:
filename (str): simple name. You can't control the directory.
overwrite (bool): If False, stops if the file already exists.
'''
if filename is None:
filename = self.getDefaultFilename()
if Path(filename).exists() and not overwrite:
logger.warning('%s already exists.'
'Use `overwrite` if you really want.', filename)
return
allConfig = self.__getFullHardwareConfig()
allSetCmds = allConfig.getList('', asCmd=True)
cfgBuild = TekConfig()
for cmd in allSetCmds:
if cmd[0][-1] != '&': # handle the sibling subdir token
cStr = cmd[0]
else:
cStr = cmd[0][:-2]
try:
val = self.query(cStr + '?', withTimeout=1000)
cfgBuild.set(cStr, val)
logger.info(cStr, '<--', val)
except VisaIOError:
logger.info(cStr, 'X -- skipping')
cfgBuild.save(filename)
logger.info('New default saved to %s', filename)
# pylint: enable=no-member
| 38.326996
| 172
| 0.567659
|
389410478e6f499ac51d7e391fbacc1c42e73e89
| 27,305
|
py
|
Python
|
msoffcrypto/format/ppt97.py
|
lunarobliq/msoffcrypto-tool
|
36d1d0e5abaa57624271ba6344cad3478b25021d
|
[
"MIT"
] | 1
|
2019-12-20T06:17:24.000Z
|
2019-12-20T06:17:24.000Z
|
msoffcrypto/format/ppt97.py
|
lunarobliq/msoffcrypto-tool
|
36d1d0e5abaa57624271ba6344cad3478b25021d
|
[
"MIT"
] | null | null | null |
msoffcrypto/format/ppt97.py
|
lunarobliq/msoffcrypto-tool
|
36d1d0e5abaa57624271ba6344cad3478b25021d
|
[
"MIT"
] | null | null | null |
import logging, io, shutil, tempfile
from struct import pack, unpack
from collections import namedtuple
import olefile
from . import base
from .common import _parse_encryptionheader, _parse_encryptionverifier
from ..method.rc4_cryptoapi import DocumentRC4CryptoAPI
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
RecordHeader = namedtuple('RecordHeader', [
'recVer',
'recInstance',
'recType',
'recLen',
])
def _parseRecordHeader(blob):
# RecordHeader: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/df201194-0cd0-4dfb-bf10-eea353d8eabc
getBitSlice = lambda bits, i, w: (bits & (2 ** w - 1 << i)) >> i
blob.seek(0)
buf, = unpack("<H", blob.read(2))
recVer = getBitSlice(buf, 0, 4)
recInstance = getBitSlice(buf, 4, 12)
recType, = unpack("<H", blob.read(2))
recLen, = unpack("<I", blob.read(4))
rh = RecordHeader(
recVer=recVer,
recInstance=recInstance,
recType=recType,
recLen=recLen,
)
return rh
def _packRecordHeader(rh):
setBitSlice = lambda bits, i, w, v: (bits & ~((2**w - 1) << i)) | ((v & (2**w - 1)) << i)
blob = io.BytesIO()
_buf = 0xffff
_buf = setBitSlice(_buf, 0, 4, rh.recVer)
_buf = setBitSlice(_buf, 4, 12, rh.recInstance)
buf = pack("<H", _buf)
blob.write(buf)
buf = pack("<H", rh.recType)
blob.write(buf)
buf = pack("<I", rh.recLen)
blob.write(buf)
blob.seek(0)
return blob
CurrentUserAtom = namedtuple('CurrentUserAtom', [
'rh',
'size',
'headerToken',
'offsetToCurrentEdit',
'lenUserName',
'docFileVersion',
'majorVersion',
'minorVersion',
'unused',
'ansiUserName',
'relVersion',
'unicodeUserName',
])
def _parseCurrentUserAtom(blob):
# CurrentUserAtom: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/940d5700-e4d7-4fc0-ab48-fed5dbc48bc1
# rh (8 bytes): A RecordHeader structure...
buf = io.BytesIO(blob.read(8))
rh = _parseRecordHeader(buf)
# logger.debug(rh)
# ...Sub-fields are further specified in the following table.
assert rh.recVer == 0x0
assert rh.recInstance == 0x000
assert rh.recType == 0x0ff6
size, = unpack("<I", blob.read(4))
# logger.debug(hex(size))
# size (4 bytes): ...It MUST be 0x00000014.
assert size == 0x00000014
# headerToken (4 bytes): An unsigned integer that specifies
# a token used to identify whether the file is encrypted.
headerToken, = unpack("<I", blob.read(4))
# TODO: Check headerToken value
offsetToCurrentEdit, = unpack("<I", blob.read(4))
lenUserName, = unpack("<H", blob.read(2))
docFileVersion, = unpack("<H", blob.read(2))
majorVersion, minorVersion, = unpack("<BB", blob.read(2))
unused = blob.read(2)
ansiUserName = blob.read(lenUserName)
relVersion, = unpack("<I", blob.read(4))
unicodeUserName = blob.read(2*lenUserName)
return CurrentUserAtom(
rh=rh,
size=size,
headerToken=headerToken,
offsetToCurrentEdit=offsetToCurrentEdit,
lenUserName=lenUserName,
docFileVersion=docFileVersion,
majorVersion=majorVersion,
minorVersion=minorVersion,
unused=unused,
ansiUserName=ansiUserName,
relVersion=relVersion,
unicodeUserName=unicodeUserName,
)
def _packCurrentUserAtom(currentuseratom):
blob = io.BytesIO()
buf = _packRecordHeader(currentuseratom.rh).read()
blob.write(buf)
buf = pack("<I", currentuseratom.size)
blob.write(buf)
buf = pack("<I", currentuseratom.headerToken)
blob.write(buf)
buf = pack("<I", currentuseratom.offsetToCurrentEdit)
blob.write(buf)
buf = pack("<H", currentuseratom.lenUserName)
blob.write(buf)
buf = pack("<H", currentuseratom.docFileVersion)
blob.write(buf)
buf = pack("<BB", currentuseratom.majorVersion, currentuseratom.minorVersion)
blob.write(buf)
buf = currentuseratom.unused
blob.write(buf)
buf = currentuseratom.ansiUserName
blob.write(buf)
buf = pack("<I", currentuseratom.relVersion)
blob.write(buf)
buf = currentuseratom.unicodeUserName
blob.write(buf)
blob.seek(0)
return blob
CurrentUser = namedtuple('CurrentUser', ['currentuseratom'])
def _parseCurrentUser(blob):
# Current User Stream: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/76cfa657-07a6-464b-81ab-4c017c611f64
currentuser = CurrentUser(
currentuseratom=_parseCurrentUserAtom(blob)
)
return currentuser
def _packCurrentUser(currentuser):
blob = io.BytesIO()
buf = _packCurrentUserAtom(currentuser.currentuseratom).read()
blob.write(buf)
blob.seek(0)
return blob
UserEditAtom = namedtuple('UserEditAtom', [
'rh',
'lastSlideIdRef',
'version',
'minorVersion',
'majorVersion',
'offsetLastEdit',
'offsetPersistDirectory',
'docPersistIdRef',
'persistIdSeed',
'lastView',
'unused',
'encryptSessionPersistIdRef',
])
def _parseUserEditAtom(blob):
# UserEditAtom: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/3ffb3fab-95de-4873-98aa-d508fbbac981
# rh (8 bytes): A RecordHeader structure...
buf = io.BytesIO(blob.read(8))
rh = _parseRecordHeader(buf)
# logger.debug(rh)
# ...Sub-fields are further specified in the following table.
assert rh.recVer == 0x0
assert rh.recInstance == 0x000
assert rh.recType == 0x0ff5
assert rh.recLen == 0x0000001c or rh.recLen == 0x00000020 # 0x0000001c + len(encryptSessionPersistIdRef)
lastSlideIdRef, = unpack("<I", blob.read(4))
version, = unpack("<H", blob.read(2))
minorVersion, majorVersion, = unpack("<BB", blob.read(2))
# majorVersion, minorVersion, = unpack("<BB", blob.read(2))
offsetLastEdit, = unpack("<I", blob.read(4))
offsetPersistDirectory, = unpack("<I", blob.read(4))
docPersistIdRef, = unpack("<I", blob.read(4))
persistIdSeed, = unpack("<I", blob.read(4))
lastView, = unpack("<H", blob.read(2))
unused = blob.read(2)
# encryptSessionPersistIdRef (4 bytes): An optional PersistIdRef
# that specifies the value to look up in the persist object directory
# to find the offset of the CryptSession10Container record (section 2.3.7).
encryptSessionPersistIdRef, = unpack("<I", blob.read(4))
return UserEditAtom(
rh=rh,
lastSlideIdRef=lastSlideIdRef,
version=version,
minorVersion=minorVersion,
majorVersion=majorVersion,
offsetLastEdit=offsetLastEdit,
offsetPersistDirectory=offsetPersistDirectory,
docPersistIdRef=docPersistIdRef,
persistIdSeed=persistIdSeed,
lastView=lastView,
unused=unused,
encryptSessionPersistIdRef=encryptSessionPersistIdRef,
)
def _packUserEditAtom(usereditatom):
blob = io.BytesIO()
buf = _packRecordHeader(usereditatom.rh).read()
blob.write(buf)
buf = pack("<I", usereditatom.lastSlideIdRef)
blob.write(buf)
buf = pack("<H", usereditatom.version)
blob.write(buf)
buf = pack("<BB", usereditatom.minorVersion, usereditatom.majorVersion)
blob.write(buf)
buf = pack("<I", usereditatom.offsetLastEdit)
blob.write(buf)
buf = pack("<I", usereditatom.offsetPersistDirectory)
blob.write(buf)
buf = pack("<I", usereditatom.docPersistIdRef)
blob.write(buf)
buf = pack("<I", usereditatom.persistIdSeed)
blob.write(buf)
buf = pack("<H", usereditatom.lastView)
blob.write(buf)
buf = usereditatom.unused
blob.write(buf)
buf = pack("<I", usereditatom.encryptSessionPersistIdRef)
blob.write(buf)
blob.seek(0)
return blob
PersistDirectoryEntry = namedtuple('PersistDirectoryEntry', [
'persistId',
'cPersist',
'rgPersistOffset',
])
def _parsePersistDirectoryEntry(blob):
# PersistDirectoryEntry: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/6214b5a6-7ca2-4a86-8a0e-5fd3d3eff1c9
getBitSlice = lambda bits, i, w: (bits & (2 ** w - 1 << i)) >> i
buf, = unpack("<I", blob.read(4))
persistId = getBitSlice(buf, 0, 20)
cPersist = getBitSlice(buf, 20, 12)
# cf. PersistOffsetEntry: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/a056484a-2132-4e1e-aa54-6e387f9695cf
size_rgPersistOffset = 4 * cPersist
_rgPersistOffset = blob.read(size_rgPersistOffset)
_rgPersistOffset = io.BytesIO(_rgPersistOffset)
rgPersistOffset = []
pos = 0
while pos < size_rgPersistOffset:
persistoffsetentry, = unpack("<I", _rgPersistOffset.read(4))
rgPersistOffset.append(persistoffsetentry)
pos += 4
return PersistDirectoryEntry(
persistId=persistId,
cPersist=cPersist,
rgPersistOffset=rgPersistOffset,
)
def _packPersistDirectoryEntry(directoryentry):
setBitSlice = lambda bits, i, w, v: (bits & ~((2**w - 1) << i)) | ((v & (2**w - 1)) << i)
blob = io.BytesIO()
_buf = 0xffffffff
_buf = setBitSlice(_buf, 0, 20, directoryentry.persistId)
_buf = setBitSlice(_buf, 20, 12, directoryentry.cPersist)
buf = pack("<I", _buf)
blob.write(buf)
for v in directoryentry.rgPersistOffset:
buf = pack("<I", v)
blob.write(buf)
blob.seek(0)
return blob
PersistDirectoryAtom = namedtuple('PersistDirectoryAtom', [
'rh',
'rgPersistDirEntry',
])
def _parsePersistDirectoryAtom(blob):
# PersistDirectoryAtom: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/d10a093d-860f-409c-b065-aeb24b830505
# rh (8 bytes): A RecordHeader structure...
buf = io.BytesIO(blob.read(8))
rh = _parseRecordHeader(buf)
# logger.debug(rh)
# ...Sub-fields are further specified in the following table.
assert rh.recVer == 0x0
assert rh.recInstance == 0x000
assert rh.recType == 0x1772
_rgPersistDirEntry = blob.read(rh.recLen)
_rgPersistDirEntry = io.BytesIO(_rgPersistDirEntry)
rgPersistDirEntry = []
pos = 0
while pos < rh.recLen:
persistdirectoryentry = _parsePersistDirectoryEntry(_rgPersistDirEntry)
size_persistdirectoryentry = 4 + 4 * len(persistdirectoryentry.rgPersistOffset)
# logger.debug((persistdirectoryentry, size_persistdirectoryentry))
rgPersistDirEntry.append(persistdirectoryentry)
pos += size_persistdirectoryentry
return PersistDirectoryAtom(
rh=rh,
rgPersistDirEntry=rgPersistDirEntry,
)
def _packPersistDirectoryAtom(directoryatom):
blob = io.BytesIO()
buf = _packRecordHeader(directoryatom.rh).read()
blob.write(buf)
for v in directoryatom.rgPersistDirEntry:
buf = _packPersistDirectoryEntry(v)
blob.write(buf.read())
blob.seek(0)
return blob
def _parseCryptSession10Container(blob):
# CryptSession10Container: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/b0963334-4408-4621-879a-ef9c54551fd8
CryptSession10Container = namedtuple('CryptSession10Container', [
'rh',
'data',
])
# rh (8 bytes): A RecordHeader structure...
buf = io.BytesIO(blob.read(8))
rh = _parseRecordHeader(buf)
# logger.debug(rh)
# ...Sub-fields are further specified in the following table.
assert rh.recVer == 0xf
# The specified value fails
# assert rh.recInstance == 0x000
assert rh.recType == 0x2f14
data = blob.read(rh.recLen)
return CryptSession10Container(
rh=rh,
data=data,
)
def construct_persistobjectdirectory(data):
# PowerPoint Document Stream: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/1fc22d56-28f9-4818-bd45-67c2bf721ccf
# 1. Read the CurrentUserAtom record (section 2.3.2) from the Current User Stream (section 2.1.1). ...
data.currentuser.seek(0)
currentuser = _parseCurrentUser(data.currentuser)
# logger.debug(currentuser)
# 2. Seek, in the PowerPoint Document Stream, to the offset specified by the offsetToCurrentEdit field of
# the CurrentUserAtom record identified in step 1.
data.powerpointdocument.seek(currentuser.currentuseratom.offsetToCurrentEdit)
persistdirectoryatom_stack = []
# The stream MUST contain exactly one UserEditAtom record.
# https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/b0963334-4408-4621-879a-ef9c54551fd8
for i in range(1):
# 3. Read the UserEditAtom record at the current offset. ...
usereditatom = _parseUserEditAtom(data.powerpointdocument)
# logger.debug(usereditatom)
# 4. Seek to the offset specified by the offsetPersistDirectory field of the UserEditAtom record identified in step 3.
data.powerpointdocument.seek(usereditatom.offsetPersistDirectory)
# 5. Read the PersistDirectoryAtom record at the current offset. ...
persistdirectoryatom = _parsePersistDirectoryAtom(data.powerpointdocument)
# logger.debug(persistdirectoryatom)
persistdirectoryatom_stack.append(persistdirectoryatom)
# 6. Seek to the offset specified by the offsetLastEdit field in the UserEditAtom record identified in step 3.
# 7. Repeat steps 3 through 6 until offsetLastEdit is 0x00000000.
if usereditatom.offsetLastEdit == 0x00000000:
break
else:
data.powerpointdocument.seek(usereditatom.offsetLastEdit)
# 8. Construct the complete persist object directory for this file as follows:
persistobjectdirectory = {}
# 8a. For each PersistDirectoryAtom record previously identified in step 5,
# add the persist object identifier and persist object stream offset pairs to
# the persist object directory starting with the PersistDirectoryAtom record
# last identified, that is, the one closest to the beginning of the stream.
# 8b. Continue adding these pairs to the persist object directory for each PersistDirectoryAtom record
# in the reverse order that they were identified in step 5; that is, the pairs from the PersistDirectoryAtom record
# closest to the end of the stream are added last.
# 8c. When adding a new pair to the persist object directory, if the persist object identifier
# already exists in the persist object directory, the persist object stream offset from
# the new pair replaces the existing persist object stream offset for that persist object identifier.
while len(persistdirectoryatom_stack) > 0:
persistdirectoryatom = persistdirectoryatom_stack.pop()
for entry in persistdirectoryatom.rgPersistDirEntry:
# logger.debug("persistId: %d" % entry.persistId)
for i, offset in enumerate(entry.rgPersistOffset):
persistobjectdirectory[entry.persistId + i] = offset
return persistobjectdirectory
def _parse_header_RC4CryptoAPI(encryptionInfo):
flags = encryptionInfo.read(4)
headerSize, = unpack("<I", encryptionInfo.read(4))
logger.debug(headerSize)
blob = io.BytesIO(encryptionInfo.read(headerSize))
header = _parse_encryptionheader(blob)
logger.debug(header)
blob = io.BytesIO(encryptionInfo.read())
verifier = _parse_encryptionverifier(blob, "RC4") # TODO: Fix (cf. ooxml.py)
logger.debug(verifier)
info = {
'salt': verifier['salt'],
'keySize': header['keySize'],
'encryptedVerifier': verifier['encryptedVerifier'],
'encryptedVerifierHash': verifier['encryptedVerifierHash'],
}
return info
class Ppt97File(base.BaseOfficeFile):
def __init__(self, file):
self.file = file
ole = olefile.OleFileIO(file) # do not close this, would close file
self.ole = ole
self.format = "ppt97"
self.keyTypes = ['password']
self.key = None
self.salt = None
# streams closed in destructor:
currentuser = ole.openstream('Current User')
powerpointdocument = ole.openstream('PowerPoint Document')
Data = namedtuple('Data', ['currentuser', 'powerpointdocument'])
self.data = Data(
currentuser=currentuser,
powerpointdocument=powerpointdocument,
)
def __del__(self):
"""Destructor, closes opened streams."""
if hasattr(self, 'data') and self.data:
if self.data.currentuser:
self.data.currentuser.close()
if self.data.powerpointdocument:
self.data.powerpointdocument.close()
def load_key(self, password=None):
persistobjectdirectory = construct_persistobjectdirectory(self.data)
logger.debug("[*] persistobjectdirectory: {}".format(persistobjectdirectory))
self.data.currentuser.seek(0)
currentuser = _parseCurrentUser(self.data.currentuser)
logger.debug("[*] currentuser: {}".format(currentuser))
self.data.powerpointdocument.seek(currentuser.currentuseratom.offsetToCurrentEdit)
usereditatom = _parseUserEditAtom(self.data.powerpointdocument)
logger.debug("[*] usereditatom: {}".format(usereditatom))
# cf. Part 2 in https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/1fc22d56-28f9-4818-bd45-67c2bf721ccf
cryptsession10container_offset = persistobjectdirectory[usereditatom.encryptSessionPersistIdRef]
logger.debug("[*] cryptsession10container_offset: {}".format(cryptsession10container_offset))
self.data.powerpointdocument.seek(cryptsession10container_offset)
cryptsession10container = _parseCryptSession10Container(self.data.powerpointdocument)
logger.debug("[*] cryptsession10container: {}".format(cryptsession10container))
encryptionInfo = io.BytesIO(cryptsession10container.data)
encryptionVersionInfo = encryptionInfo.read(4)
vMajor, vMinor = unpack("<HH", encryptionVersionInfo)
logger.debug("[*] encryption version: {} {}".format(vMajor, vMinor))
assert vMajor in [0x0002, 0x0003, 0x0004] and vMinor == 0x0002 # RC4 CryptoAPI
info = _parse_header_RC4CryptoAPI(encryptionInfo)
if DocumentRC4CryptoAPI.verifypw(password, info['salt'], info['keySize'],
info['encryptedVerifier'], info['encryptedVerifierHash']):
self.type = 'rc4_cryptoapi'
self.key = password
self.salt = info['salt']
self.keySize = info['keySize']
else:
raise Exception("Failed to verify password")
def decrypt(self, ofile):
# Current User Stream
self.data.currentuser.seek(0)
currentuser = _parseCurrentUser(self.data.currentuser)
# logger.debug(currentuser)
cuatom = currentuser.currentuseratom
currentuser_new = CurrentUser(
currentuseratom=CurrentUserAtom(
rh=cuatom.rh,
size=cuatom.size,
# https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/940d5700-e4d7-4fc0-ab48-fed5dbc48bc1
# 0xE391C05F: The file SHOULD NOT<6> be an encrypted document.
headerToken=0xe391c05f,
offsetToCurrentEdit=cuatom.offsetToCurrentEdit,
lenUserName=cuatom.lenUserName,
docFileVersion=cuatom.docFileVersion,
majorVersion=cuatom.majorVersion,
minorVersion=cuatom.minorVersion,
unused=cuatom.unused,
ansiUserName=cuatom.ansiUserName,
relVersion=cuatom.relVersion,
unicodeUserName=cuatom.unicodeUserName,
)
)
buf = _packCurrentUser(currentuser_new)
buf.seek(0)
currentuser_buf = buf
# List of encrypted parts: https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/b0963334-4408-4621-879a-ef9c54551fd8
# PowerPoint Document Stream
self.data.powerpointdocument.seek(0)
powerpointdocument_size = len(self.data.powerpointdocument.read())
logger.debug("[*] powerpointdocument_size: {}".format(powerpointdocument_size))
self.data.powerpointdocument.seek(0)
dec_bytearray = bytearray(self.data.powerpointdocument.read())
# UserEditAtom
self.data.powerpointdocument.seek(currentuser.currentuseratom.offsetToCurrentEdit)
# currentuseratom_raw = self.data.powerpointdocument.read(40)
self.data.powerpointdocument.seek(currentuser.currentuseratom.offsetToCurrentEdit)
usereditatom = _parseUserEditAtom(self.data.powerpointdocument)
# logger.debug(usereditatom)
# logger.debug(["offsetToCurrentEdit", currentuser.currentuseratom.offsetToCurrentEdit])
rh_new = RecordHeader(
recVer=usereditatom.rh.recVer,
recInstance=usereditatom.rh.recInstance,
recType=usereditatom.rh.recType,
recLen=usereditatom.rh.recLen - 4, # Omit encryptSessionPersistIdRef field
)
# logger.debug([_packRecordHeader(usereditatom.rh).read(), _packRecordHeader(rh_new).read()])
usereditatom_new = UserEditAtom(
rh=rh_new,
lastSlideIdRef=usereditatom.lastSlideIdRef,
version=usereditatom.version,
minorVersion=usereditatom.minorVersion,
majorVersion=usereditatom.majorVersion,
offsetLastEdit=usereditatom.offsetLastEdit,
offsetPersistDirectory=usereditatom.offsetPersistDirectory,
docPersistIdRef=usereditatom.docPersistIdRef,
persistIdSeed=usereditatom.persistIdSeed,
lastView=usereditatom.lastView,
unused=usereditatom.unused,
encryptSessionPersistIdRef=0x00000000, # Clear
)
# logger.debug(currentuseratom_raw)
# logger.debug(_packUserEditAtom(usereditatom).read())
# logger.debug(_packUserEditAtom(usereditatom_new).read())
buf = _packUserEditAtom(usereditatom_new)
buf.seek(0)
buf_bytes = bytearray(buf.read())
offset = currentuser.currentuseratom.offsetToCurrentEdit
dec_bytearray[offset:offset+len(buf_bytes)] = buf_bytes
# PersistDirectoryAtom
self.data.powerpointdocument.seek(currentuser.currentuseratom.offsetToCurrentEdit)
usereditatom = _parseUserEditAtom(self.data.powerpointdocument)
# logger.debug(usereditatom)
self.data.powerpointdocument.seek(usereditatom.offsetPersistDirectory)
persistdirectoryatom = _parsePersistDirectoryAtom(self.data.powerpointdocument)
# logger.debug(persistdirectoryatom)
persistdirectoryatom_new = PersistDirectoryAtom(
rh=persistdirectoryatom.rh,
rgPersistDirEntry=[
PersistDirectoryEntry(
persistId=persistdirectoryatom.rgPersistDirEntry[0].persistId,
# Omit CryptSession10Container
cPersist=persistdirectoryatom.rgPersistDirEntry[0].cPersist-1,
rgPersistOffset=persistdirectoryatom.rgPersistDirEntry[0].rgPersistOffset,
),
],
)
self.data.powerpointdocument.seek(usereditatom.offsetPersistDirectory)
buf = _packPersistDirectoryAtom(persistdirectoryatom_new)
buf_bytes = bytearray(buf.read())
offset = usereditatom.offsetPersistDirectory
dec_bytearray[offset:offset+len(buf_bytes)] = buf_bytes
# Persist Objects
self.data.powerpointdocument.seek(0)
persistobjectdirectory = construct_persistobjectdirectory(self.data)
directory_items = list(persistobjectdirectory.items())
for i, (persistId, offset) in enumerate(directory_items):
self.data.powerpointdocument.seek(offset)
buf = self.data.powerpointdocument.read(8)
rh = _parseRecordHeader(io.BytesIO(buf))
logger.debug("[*] rh: {}".format(rh))
# CryptSession10Container
if rh.recType == 0x2f14:
logger.debug("[*] CryptSession10Container found")
# Remove encryption, pad by zero to preserve stream size
dec_bytearray[offset:offset+(8+rh.recLen)] = b"\x00" * (8+rh.recLen)
continue
# The UserEditAtom record (section 2.3.3) and the PersistDirectoryAtom record (section 2.3.4) MUST NOT be encrypted.
if rh.recType in [0x0ff5, 0x1772]:
logger.debug("[*] UserEditAtom/PersistDirectoryAtom found")
continue
# TODO: Fix here
recLen = directory_items[i+1][1] - offset - 8
logger.debug("[*] recLen: {}".format(recLen))
self.data.powerpointdocument.seek(offset)
enc_buf = io.BytesIO(self.data.powerpointdocument.read(8+recLen))
blocksize = self.keySize * ((8 + recLen) // self.keySize + 1) # Undocumented
dec = DocumentRC4CryptoAPI.decrypt(self.key, self.salt, self.keySize, enc_buf, blocksize=blocksize, block=persistId)
dec_bytes = bytearray(dec.read())
dec_bytearray[offset:offset+len(dec_bytes)] = dec_bytes
# To BytesIO
dec_buf = io.BytesIO(dec_bytearray)
dec_buf.seek(0)
for i, (persistId, offset) in enumerate(directory_items):
dec_buf.seek(offset)
buf = dec_buf.read(8)
rh = _parseRecordHeader(io.BytesIO(buf))
logger.debug("[*] rh: {}".format(rh))
dec_buf.seek(0)
logger.debug("[*] powerpointdocument_size={}, len(dec_buf.read())={}".format(powerpointdocument_size, len(dec_buf.read())))
dec_buf.seek(0)
powerpointdocument_dec_buf = dec_buf
# TODO: Pictures Stream
# TODO: Encrypted Summary Info Stream
with tempfile.TemporaryFile() as _ofile:
self.file.seek(0)
shutil.copyfileobj(self.file, _ofile)
outole = olefile.OleFileIO(_ofile, write_mode=True)
outole.write_stream('Current User', currentuser_buf.read())
outole.write_stream('PowerPoint Document', powerpointdocument_dec_buf.read())
# Finalize
_ofile.seek(0)
shutil.copyfileobj(_ofile, ofile)
return
def is_encrypted(self):
r'''
Test if the file is encrypted.
>>> f = open("tests/inputs/plain.ppt", "rb")
>>> file = Ppt97File(f)
>>> file.is_encrypted()
False
>>> f = open("tests/inputs/rc4cryptoapi_password.ppt", "rb")
>>> file = Ppt97File(f)
>>> file.is_encrypted()
True
'''
self.data.currentuser.seek(0)
currentuser = _parseCurrentUser(self.data.currentuser)
logger.debug("[*] currentuser: {}".format(currentuser))
self.data.powerpointdocument.seek(currentuser.currentuseratom.offsetToCurrentEdit)
usereditatom = _parseUserEditAtom(self.data.powerpointdocument)
logger.debug("[*] usereditatom: {}".format(usereditatom))
if usereditatom.rh.recLen == 0x00000020: # Cf. _parseUserEditAtom
return True
else:
return False
| 35.78637
| 141
| 0.670756
|
e457308233e565cd2db07e56db970fca3960dce8
| 929
|
py
|
Python
|
catalog/dto/inventory.py
|
Guya-LTD/catalog
|
632b3c3766e2600275c0a18db6378b2d38e3c463
|
[
"RSA-MD"
] | null | null | null |
catalog/dto/inventory.py
|
Guya-LTD/catalog
|
632b3c3766e2600275c0a18db6378b2d38e3c463
|
[
"RSA-MD"
] | null | null | null |
catalog/dto/inventory.py
|
Guya-LTD/catalog
|
632b3c3766e2600275c0a18db6378b2d38e3c463
|
[
"RSA-MD"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Copyright Header Details
Copyright
---------
Copyright (C) Guya , PLC - All Rights Reserved (As Of Pending...)
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
LICENSE
-------
This file is subject to the terms and conditions defined in
file 'LICENSE.txt', which is part of this source code package.
Authors
-------
* [Simon Belete](https://github.com/Simonbelete)
Project
-------
* Name:
- Guya E-commerce & Guya Express
* Sub Project Name:
- Catalog Service
* Description
- Catlog Catalog Service
"""
from flask_restplus import Namespace, fields
from catalog.blueprint.v1.catalog import namespace
class CatalogDto:
"""Request and Respons Data Transfer Object."""
request = namespace.model('catalog_request', {})
response = namespace.model('catalog_response', {})
| 22.658537
| 76
| 0.668461
|
8d665c30acd00ae75ea57ccebf4c7e48c21d2949
| 1,851
|
py
|
Python
|
PhysicsTools/HeppyCore/python/framework/config_test.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
PhysicsTools/HeppyCore/python/framework/config_test.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
PhysicsTools/HeppyCore/python/framework/config_test.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from __future__ import absolute_import
import unittest
import os
import shutil
import copy
from .config import *
class ConfigTestCase(unittest.TestCase):
def test_analyzer(self):
class Ana1(object):
pass
ana1 = Analyzer(
Ana1,
toto = '1',
tata = 'a'
)
# checking that the analyzer name does not contain a slash,
# to make sure the output directory name does not contain a subdirectory
self.assertTrue( '/' not in ana1.name )
def test_MCComponent(self):
DYJets = MCComponent(
name = 'DYJets',
files ='blah_mc.root',
xSection = 3048.,
nGenEvents = 34915945,
triggers = ['HLT_MC'],
vertexWeight = 1.,
effCorrFactor = 1 )
self.assertTrue(True)
def test_config(self):
class Ana1(object):
pass
ana1 = Analyzer(
Ana1,
toto = '1',
tata = 'a'
)
comp1 = Component(
'comp1',
files='*.root',
triggers='HLT_stuff'
)
from PhysicsTools.HeppyCore.framework.chain import Chain as Events
config = Config( components = [comp1],
sequence = [ana1],
services = [],
events_class = Events )
def test_copy(self):
class Ana1(object):
pass
ana1 = Analyzer(
Ana1,
instance_label = 'inst1',
toto = '1',
)
ana2 = copy.copy(ana1)
ana2.instance_label = 'inst2'
ana2.toto2 = '2'
self.assertEqual(ana2.name, '__main__.Ana1_inst2')
self.assertEqual(ana2.toto2, '2')
if __name__ == '__main__':
unittest.main()
| 26.826087
| 80
| 0.504592
|
9b61ef01252892818c9b97a43515f3b770dca33b
| 5,931
|
py
|
Python
|
poblacionInicial2Preprocessing.py
|
learsi1911/GAMA_pygmo_SHA
|
8da8c7930f0768492065633c433ce7c811c73e98
|
[
"Apache-2.0"
] | null | null | null |
poblacionInicial2Preprocessing.py
|
learsi1911/GAMA_pygmo_SHA
|
8da8c7930f0768492065633c433ce7c811c73e98
|
[
"Apache-2.0"
] | null | null | null |
poblacionInicial2Preprocessing.py
|
learsi1911/GAMA_pygmo_SHA
|
8da8c7930f0768492065633c433ce7c811c73e98
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 15 18:14:22 2021
@author: 20210595
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 14 21:24:18 2021
@author: 20210595
"""
from gama.genetic_programming.components.individual import Individual
from gama.genetic_programming.compilers.scikitlearn import compile_individual
from gama.genetic_programming.components.primitive_node import PrimitiveNode
from gama.genetic_programming.components.primitive import Primitive
from gama.genetic_programming.components.terminal import Terminal
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import FastICA
import numpy as np
from sklearn.ensemble import (
ExtraTreesClassifier,
RandomForestClassifier,
GradientBoostingClassifier,
)
from sklearn.preprocessing import (
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
Binarizer,
)
# pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC(C=1.2,
# kernel='linear',
# degree=2,
# gamma='auto',
# coef0=0.1))])
# InstanciaClase = Individual(SVC(C=1.2, kernel='linear', degree=2, gamma='auto', coef0=0.1), compile_individual)
# print(InstanciaClase)
# clf_config = {
# RandomForestClassifier: {
# "n_estimators": 100,
# "criterion": "gini",
# "max_features": 0.05,
# "min_samples_split": 4,
# "min_samples_leaf": 3,
# "bootstrap": False,
# }
# }
#InstanciaClase = Individual(clf_config, compile_individual)
#print(InstanciaClase)
## crear un main node, es un primitive node primero para preprocesamiento y luego para clasificadores
# pre = PrimitiveNode("Normalizer", "data", ['l1'])
primitiveEjemplo1 = Primitive(['PolynomialFeatures.degree',
'PolynomialFeatures.include_bias',
'PolynomialFeatures.interaction_only'], "data", PolynomialFeatures)
# print(primitivoEjemplo.output)
# print(primitivoEjemplo.identifier)
## Ahora es crear las terminales que concuerdan con el Primitive
termi1 = Terminal(2, "PolynomialFeatures.degree", "PolynomialFeatures.degree")
termi2 = Terminal(False, "PolynomialFeatures.include_bias", "PolynomialFeatures.include_bias")
termi3 = Terminal(False, "PolynomialFeatures.interaction_only", "PolynomialFeatures.interaction_only")
## Recuerda el orden
listaTerminals1 = [termi1, termi2, termi3]
## Ahora con el primitivo vamos a crear un PrimitiveNode de pre-procesamiento
primitiveNodePreProcesamiento1 = PrimitiveNode(primitiveEjemplo1, "data", listaTerminals1)
## Primero es crear un Primitive, en Primitive.output = "data" para preprocesamiento y Primitive.output = "prediction" para las técncias de classificación
primitiveEjemplo = Primitive(['PCA.svd_solver', 'PCA.iterated_power'], "data", PCA)
# print(primitivoEjemplo.output)
# print(primitivoEjemplo.identifier)
## Ahora es crear las terminales que concuerdan con el Primitive
termiSVD_solver = Terminal("randomized", "PCA.svd_solver", "PCA.svd_solver")
termiITERATED = Terminal(9, "PCA.iterated_power", "PCA.iterated_power")
## Recuerda el orden
listaTerminals = [termiITERATED, termiSVD_solver]
## Ahora con el primitivo vamos a crear un PrimitiveNode de pre-procesamiento
primitiveNodePreProcesamiento = PrimitiveNode(primitiveEjemplo, primitiveNodePreProcesamiento1, listaTerminals)
## Con el PrimitiveNode de pre-procesamiento vamos a crear el PrimitiveNode para classificacion
## Recuerda el primer paso es crear el Primitive con Primitive.output = prediction
primitiveClassification = Primitive(['RandomForestClassifier.n_estimators', # Primitive.input
'RandomForestClassifier.criterion',
'RandomForestClassifier.max_features',
'RandomForestClassifier.min_samples_split',
'RandomForestClassifier.min_samples_leaf',
'RandomForestClassifier.bootstrap'],
"prediction", # Primitive.output
RandomForestClassifier # Primitive.identifier
)
## Ahora es crear las terminales que concuerdan con el Primitive
terminal1 = Terminal(100, 'RandomForestClassifier.n_estimators', 'RandomForestClassifier.n_estimators')
terminal2 = Terminal("gini", 'RandomForestClassifier.criterion', 'RandomForestClassifier.criterion')
terminal3 = Terminal(0.05, 'RandomForestClassifier.max_features', 'RandomForestClassifier.max_features')
terminal4 = Terminal(3, 'RandomForestClassifier.min_samples_split', 'RandomForestClassifier.min_samples_split')
terminal5 = Terminal(1, 'RandomForestClassifier.min_samples_leaf', 'RandomForestClassifier.min_samples_leaf')
terminal6 = Terminal(False, 'RandomForestClassifier.bootstrap', 'RandomForestClassifier.bootstrap')
terminalClassification = [terminal1, terminal2, terminal3, terminal4, terminal5, terminal6]
## Ahora con el primitivo vamos a crear un PrimitiveNode de classification
## NOTA: SI NO HAY TÉCNICA DE PREPROCESAMIENTO PrimitiveNode._data_node = "data" (de tipo string), de lo contario
## es decir si hay PrimitiveNode de preprocesamiento, se tiene que tragar el PrimitiveNode de preprocesamiento, como aqui:
primitiveNodeClassification = PrimitiveNode(primitiveClassification, primitiveNodePreProcesamiento, terminalClassification)
#print(primitiveNodeClassification)
#Ahora crearemos un Individual
ind = Individual(primitiveNodeClassification, compile_individual)
print(ind)
| 43.291971
| 154
| 0.711179
|
ba28867b51bc9c57df9025487bd12b909edc9d63
| 2,391
|
py
|
Python
|
parsedatetime/pdt_locales/it_IT.py
|
MadChadJack/parsedatetime
|
113886ab2460f4d2f350214b1afa7ad83baae0b2
|
[
"Apache-2.0"
] | null | null | null |
parsedatetime/pdt_locales/it_IT.py
|
MadChadJack/parsedatetime
|
113886ab2460f4d2f350214b1afa7ad83baae0b2
|
[
"Apache-2.0"
] | null | null | null |
parsedatetime/pdt_locales/it_IT.py
|
MadChadJack/parsedatetime
|
113886ab2460f4d2f350214b1afa7ad83baae0b2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import *
# don't use an unicode string
localeID = 'it_IT'
dateSep = ['/']
timeSep = [':', '.']
meridian= ['del mattino', 'di pomeriggio']
usesMeridian = True
uses24 = True
Weekdays = [
'lunedì', 'martedì', 'mercoledì',
'giovedì', 'venerdì', 'sabato', 'domenica',
]
shortWeekdays = [
'lun', 'mar', 'mer',
'gio', 'ven', 'sab', 'dom',
]
Months = [
'gennaio', 'febbraio', 'marzo',
'aprile', 'maggio', 'giugno',
'luglio', 'agosto', 'settembre',
'ottobre', 'novembre', 'dicembre',
]
shortMonths = [
'gen', 'feb', 'mar',
'apr', 'mag', 'giu',
'lug', 'ago', 'set',
'ott', 'nov', 'dic',
]
# use the same formats as ICU by default
dateFormats = {
'full': 'EEEE d MMMM yyyy',
'long': 'd MMMM yyyy',
'medium': 'd MMM yyyy',
'short': 'd/M/yy'
}
timeFormats = {
'full': 'h:mm:ss a z',
'long': 'h:mm:ss a z',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = ['d', 'm', 'y']
# Used to parse expressions like "in 5 hours"
numbers = {
'zero': 0,
'una': 1,
'uno': 1,
'un\'ora': 1,
'due': 2,
'tre': 3,
'quattro': 4,
'cinque': 5,
'sei': 6,
'sette': 7,
'otto': 8,
'nove': 9,
'dieci': 10,
'undici': 11,
'dodici'
'tredici': 13,
'quattordici': 14,
'quindici': 15,
'sedici': 16,
'diciassette': 17,
'diciotto': 18,
'diciannove': 19,
'venti': 20,
'ventuno': 21,
'ventidue': 22,
'ventre': 23,
'ventiquattro': 24
}
decimal_mark = ','
# this will be added to re_values later
units = {
'seconds': ['seconda', 'secondo', 'secondi', 'sec', 'seci', 's'],
'minutes': ['minuta', 'minuto', 'minuti','min', 'mini', 'm'],
'hours': ['ora', 'ore', 'o'],
'days': ['giorno', 'giorni', 'g'],
'weeks': ['settimana', 'settimane', 's'],
'months': ['mese', 'mesi', 'm'],
'years': ['anno', 'anni', 'a'],
}
# text constants to be used by later regular expressions
re_values = {
'daysuffix': '°',
'qunits': 'h|m|s|d|w|y',
'now': ['adesso', 'proprio adesso'],
}
# Used to adjust the returned date before/after the source
Modifiers = {
'prima': -1,
'dopo': 1,
'fa': -1,
'lo scorso': -1,
'scorsa': -1,
'precedente': -1,
'questa': 0
}
dayOffsets = {
'domani': 1,
'oggi': 0,
'ieri': -1,
}
| 20.262712
| 69
| 0.513174
|
f75158b499dfb05fef535b2348c00c185ed59ba4
| 405
|
py
|
Python
|
sales/migrations/0008_auto_20200912_0910.py
|
zhou-en/turf_portal
|
2424ac15248c9f1c0fa4f65ca18fe0ea71d7ff4e
|
[
"MIT"
] | null | null | null |
sales/migrations/0008_auto_20200912_0910.py
|
zhou-en/turf_portal
|
2424ac15248c9f1c0fa4f65ca18fe0ea71d7ff4e
|
[
"MIT"
] | 47
|
2020-10-14T21:54:35.000Z
|
2022-02-25T05:21:39.000Z
|
sales/migrations/0008_auto_20200912_0910.py
|
zhou-en/turf_portal
|
2424ac15248c9f1c0fa4f65ca18fe0ea71d7ff4e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-12 09:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sales', '0007_order_orderline'),
]
operations = [
migrations.AlterField(
model_name='orderline',
name='price',
field=models.FloatField(blank=True, default=0.0, null=True),
),
]
| 21.315789
| 72
| 0.6
|
1ecb21181485e8f306420834a220b195e5562c27
| 294
|
py
|
Python
|
xu/compa/Parapluie/__init__.py
|
sonnts996/XuCompa-Request
|
f343e7bfd1b4263eb76438c96d347c549cc75ce3
|
[
"Apache-2.0"
] | null | null | null |
xu/compa/Parapluie/__init__.py
|
sonnts996/XuCompa-Request
|
f343e7bfd1b4263eb76438c96d347c549cc75ce3
|
[
"Apache-2.0"
] | null | null | null |
xu/compa/Parapluie/__init__.py
|
sonnts996/XuCompa-Request
|
f343e7bfd1b4263eb76438c96d347c549cc75ce3
|
[
"Apache-2.0"
] | null | null | null |
import xu.compa.Parapluie.res as PResource
import xu.compa.Parapluie.src.PFunction as PFunction
import xu.compa.Parapluie.src.Parapluie as Parapluie
from xu.compa.Parapluie.src.ActionWidget import *
from xu.compa.Parapluie.src.Adapter import *
from xu.compa.Parapluie.src.StickyWindow import *
| 42
| 52
| 0.829932
|
56f6cd18a5841aa57d02f1be9ed59c235956a456
| 2,292
|
py
|
Python
|
demo/drawmaps.py
|
zx64/omgifol
|
622737dea233dc653463bd6e9a8544c7aff899f2
|
[
"MIT"
] | 51
|
2015-03-02T14:26:46.000Z
|
2021-11-01T13:52:28.000Z
|
demo/drawmaps.py
|
zx64/omgifol
|
622737dea233dc653463bd6e9a8544c7aff899f2
|
[
"MIT"
] | 31
|
2015-03-18T00:35:23.000Z
|
2022-03-13T07:30:05.000Z
|
demo/drawmaps.py
|
zx64/omgifol
|
622737dea233dc653463bd6e9a8544c7aff899f2
|
[
"MIT"
] | 22
|
2015-02-13T17:08:17.000Z
|
2022-03-31T18:54:33.000Z
|
from __future__ import print_function
from omg import *
import sys
from PIL import Image, ImageDraw
def drawmap(wad, name, filename, width, format):
xsize = width - 8
edit = MapEditor(wad.maps[name])
xmin = ymin = 32767
xmax = ymax = -32768
for v in edit.vertexes:
xmin = min(xmin, v.x)
xmax = max(xmax, v.x)
ymin = min(ymin, -v.y)
ymax = max(ymax, -v.y)
scale = xsize / float(xmax - xmin)
xmax = int(xmax * scale)
xmin = int(xmin * scale)
ymax = int(ymax * scale)
ymin = int(ymin * scale)
for v in edit.vertexes:
v.x = v.x * scale
v.y = -v.y * scale
im = Image.new('RGB', ((xmax - xmin) + 8, (ymax - ymin) + 8), (255,255,255))
draw = ImageDraw.Draw(im)
edit.linedefs.sort(key=lambda a: not a.two_sided)
for line in edit.linedefs:
p1x = edit.vertexes[line.vx_a].x - xmin + 4
p1y = edit.vertexes[line.vx_a].y - ymin + 4
p2x = edit.vertexes[line.vx_b].x - xmin + 4
p2y = edit.vertexes[line.vx_b].y - ymin + 4
color = (0, 0, 0)
if line.two_sided:
color = (144, 144, 144)
if line.action:
color = (220, 130, 50)
draw.line((p1x, p1y, p2x, p2y), fill=color)
draw.line((p1x+1, p1y, p2x+1, p2y), fill=color)
draw.line((p1x-1, p1y, p2x-1, p2y), fill=color)
draw.line((p1x, p1y+1, p2x, p2y+1), fill=color)
draw.line((p1x, p1y-1, p2x, p2y-1), fill=color)
del draw
im.save(filename, format)
#import psyco
#psyco.full()
if (len(sys.argv) < 5):
print("\n Omgifol script: draw maps to image files\n")
print(" Usage:")
print(" drawmaps.py source.wad pattern width format\n")
print(" Draw all maps whose names match the given pattern (eg E?M4 or MAP*)")
print(" to image files of a given format (PNG, BMP, etc). width specifies the")
print(" desired width of the output images.")
else:
print("Loading %s..." % sys.argv[1])
inwad = WAD()
inwad.from_file(sys.argv[1])
width = int(sys.argv[3])
format = sys.argv[4].upper()
for name in inwad.maps.find(sys.argv[2]):
print("Drawing %s" % name)
drawmap(inwad, name, name + "_map" + "." + format.lower(), width, format)
| 30.157895
| 86
| 0.569808
|
e8e07d74f19a693d87df99b4ce22393547d47aa5
| 437
|
py
|
Python
|
infomemes/plots/styles.py
|
luiztauffer/infomemes
|
7a54546aa688b703e6e020e53eeabae9dc922619
|
[
"MIT"
] | 6
|
2019-09-05T17:14:20.000Z
|
2021-09-24T10:19:30.000Z
|
infomemes/plots/styles.py
|
luiztauffer/infomemes
|
7a54546aa688b703e6e020e53eeabae9dc922619
|
[
"MIT"
] | 2
|
2019-09-08T16:59:04.000Z
|
2020-02-15T11:51:24.000Z
|
infomemes/plots/styles.py
|
luiztauffer/infomemes
|
7a54546aa688b703e6e020e53eeabae9dc922619
|
[
"MIT"
] | null | null | null |
# Color palettes
palettes = {}
palettes['palette_0'] = ["rgba(134, 149, 184, 0.5)", "rgba(184, 179, 134, 0.5)",
"rgba(184, 134, 134, 0.5)", "rgba(134, 184, 132, 0.5)",
"rgba(140, 134, 184, 0.5)", "rgba(184, 134, 176, 0.5)"]
palettes['Tableau10'] = ["#4e79a7", "#f28e2c", "#e15759", "#76b7b2", "#59a14f",
"#edc949", "#af7aa1", "#ff9da7", "#9c755f", "#bab0ab"]
| 39.727273
| 80
| 0.469108
|
9c0bc90bb82cfe8889c2ca1acd6d174054ed2f20
| 3,612
|
py
|
Python
|
distributors/urlobject/netloc.py
|
fish2000/django-signalqueue
|
2d98b8e6b7bf2bd131c4d7c18d54d7fb008ed989
|
[
"BSD-3-Clause"
] | 12
|
2015-02-27T19:26:13.000Z
|
2019-04-21T12:16:01.000Z
|
distributors/urlobject/netloc.py
|
Steckelfisch/django-signalqueue
|
851bf3641bd940a49b7a2da74c9809bdfcac8fd3
|
[
"BSD-3-Clause"
] | 1
|
2017-08-25T10:11:47.000Z
|
2017-08-25T10:11:47.000Z
|
distributors/urlobject/netloc.py
|
Steckelfisch/django-signalqueue
|
851bf3641bd940a49b7a2da74c9809bdfcac8fd3
|
[
"BSD-3-Clause"
] | null | null | null |
import urlparse
class Netloc(unicode):
"""
A netloc string (``username:password@hostname:port``).
Contains methods for accessing and (non-destructively) modifying those four
components of the netloc. All methods return new instances.
"""
def __repr__(self):
return 'Netloc(%r)' % (unicode(self),)
@classmethod
def __unsplit(cls, username, password, hostname, port):
"""Put together a :class:`Netloc` from its constituent parts."""
auth_string = u''
if username:
auth_string = username
if password:
auth_string += u':' + password
auth_string += '@'
port_string = u''
if port is not None:
port_string = u':%d' % port
return cls(auth_string + hostname + port_string)
@property
def username(self):
"""The username portion of this netloc, or ``None``."""
return self.__urlsplit.username
def with_username(self, username):
"""Replace or add a username to this netloc."""
return self.__replace(username=username)
def without_username(self):
"""Remove any username (and password) from this netloc."""
return self.without_password().with_username('')
@property
def password(self):
"""The password portion of this netloc, or ``None``."""
return self.__urlsplit.password
def with_password(self, password):
"""
Replace or add a password to this netloc.
Raises a ``ValueError`` if you attempt to add a password to a netloc
with no username.
"""
if password and not self.username:
raise ValueError("Can't set a password on a netloc with no username")
return self.__replace(password=password)
def without_password(self):
"""Remove any password from this netloc."""
return self.with_password('')
@property
def auth(self):
"""The username and password of this netloc as a 2-tuple."""
return (self.username, self.password)
def with_auth(self, username, *password):
"""Replace or add a username and password in one method call."""
netloc = self.without_auth()
if password:
return netloc.with_username(username).with_password(*password)
return netloc.with_username(username)
def without_auth(self):
return self.without_password().without_username()
@property
def hostname(self):
"""The hostname portion of this netloc."""
return self.__urlsplit.hostname
def with_hostname(self, hostname):
"""Replace the hostname on this netloc."""
return self.__replace(hostname=hostname)
@property
def port(self):
"""The port number on this netloc (as an ``int``), or ``None``."""
return self.__urlsplit.port
def with_port(self, port):
"""Replace or add a port number to this netloc."""
return self.__replace(port=port)
def without_port(self):
"""Remove any port number from this netloc."""
return self.__replace(port=None)
@property
def __urlsplit(self):
return urlparse.SplitResult('', self, '', '', '')
def __replace(self, **params):
"""Replace any number of components on this netloc."""
unsplit_args = {'username': self.username,
'password': self.password,
'hostname': self.hostname,
'port': self.port}
unsplit_args.update(params)
return self.__unsplit(**unsplit_args)
| 31.684211
| 81
| 0.611019
|
0f5e127b221ad48b4fa1f05caba7446fc471c4c0
| 1,969
|
py
|
Python
|
adv/natalie.py
|
smashwidget/dl-1
|
352a65b9e40a623d7a9e5a693fa2b412f27d8cff
|
[
"Apache-2.0"
] | null | null | null |
adv/natalie.py
|
smashwidget/dl-1
|
352a65b9e40a623d7a9e5a693fa2b412f27d8cff
|
[
"Apache-2.0"
] | null | null | null |
adv/natalie.py
|
smashwidget/dl-1
|
352a65b9e40a623d7a9e5a693fa2b412f27d8cff
|
[
"Apache-2.0"
] | null | null | null |
import adv.adv_test
import adv.adv_test
import adv
from adv import *
from module import energy
from slot.d import *
from slot.a import *
import slot
import random
def module():
return Natalie
class Natalie(adv.Adv):
comment = 's2 without str buff'
conf = {}
conf['slot.a'] = slot.a.HoH() + slot.a.One_with_the_Shadows()
conf['slot.d'] = Fatalis()
conf['acl'] = """
`s2, pin='prep'
`s2, seq=5
`s1
`s3, fsc
`s3, seq=5 and s1.charged < s1.sp-200
`fs, seq=5 and s1.sp-212<=s1.charged and s1.charged<=s1.sp
`fs, seq=5 and s1.sp > 3000 and s3.charged>=s3.sp
"""
def d_slots(this):
from adv.adv_test import sim_duration
if sim_duration <= 60:
this.slots.a = TL()+The_Chocolatiers()
def init(this):
random.seed()
this.hp = 100
if this.condition('energy'):
this.prerun = this.c_prerun
def prerun(this):
this.energy = energy.Energy(this,
self={} ,
team={}
)
def c_prerun(this):
this.energy = energy.Energy(this,
self={'s1':1,'a1':1} ,
team={}
)
def s1_proc(this, e):
with adv.CrisisModifier('s1', 2, this.hp):
this.dmg_make('o_s1_crisis', this.conf.s1.dmg)
if this.energy() == 5:
dmg = this.conf.s1.dmg * this.energy.get_energy_boost()
this.dmg_make('o_s1_crisis_energized', dmg)
if random.random() < 0.8:
this.energy.add_energy('a1')
def s2_proc(this, e):
if this.hp > 30:
this.hp = 20
this.a3atk = Selfbuff('a3atk',0.20,-1,'att','passive').on()
this.a3spd = Spdbuff('a3spd',0.10,-1).on()
# else:
# Selfbuff('s2', 0.15, 10).on()
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf, verbose=-2, mass=1)
| 25.907895
| 71
| 0.527171
|
cf0821b35fe876e29a5dc28707fe0804b678aa3b
| 821
|
py
|
Python
|
pythogic/pl/semantics/PLInterpretation.py
|
MarcoFavorito/pythogic
|
aaa74fec41fbf08d96371f62218c462e9a2b69e0
|
[
"MIT"
] | 4
|
2018-02-21T10:43:55.000Z
|
2018-04-13T07:55:04.000Z
|
pythogic/pl/semantics/PLInterpretation.py
|
marcofavorito/pythogic
|
aaa74fec41fbf08d96371f62218c462e9a2b69e0
|
[
"MIT"
] | 34
|
2018-03-04T18:30:12.000Z
|
2018-08-14T21:36:29.000Z
|
pythogic/pl/semantics/PLInterpretation.py
|
marcofavorito/pythogic
|
aaa74fec41fbf08d96371f62218c462e9a2b69e0
|
[
"MIT"
] | 1
|
2018-03-04T18:27:57.000Z
|
2018-03-04T18:27:57.000Z
|
from typing import Dict
from pythogic.base.Alphabet import Alphabet
from pythogic.base.Symbol import Symbol
from pythogic.base.Formula import AtomicFormula, Formula
class PLInterpretation(object):
def __init__(self, alphabet: Alphabet, symbol2truth:Dict[Symbol, bool]):
assert alphabet.symbols == symbol2truth.keys() and all(type(v)==bool for v in symbol2truth.values())
self.alphabet = alphabet
self.symbol2truth = symbol2truth
def __eq__(self, other):
if type(self) == type(other):
return self.alphabet == other.alphabet and self.symbol2truth == other.symbol2truth
else:
return False
def _members(self):
return self.alphabet, tuple(sorted(self.symbol2truth.items()))
def __hash__(self):
return hash(self._members())
| 31.576923
| 108
| 0.694275
|
487b84644c8a287a21128e46fed522cef0f18567
| 1,933
|
py
|
Python
|
examples/ad_manager/v202202/line_item_service/get_line_items_that_need_creatives.py
|
MarkusBordihn/googleads-python-lib
|
09bbcb01f9443f1d140efd8f2d27ef0e4aa74d20
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202202/line_item_service/get_line_items_that_need_creatives.py
|
MarkusBordihn/googleads-python-lib
|
09bbcb01f9443f1d140efd8f2d27ef0e4aa74d20
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202202/line_item_service/get_line_items_that_need_creatives.py
|
MarkusBordihn/googleads-python-lib
|
09bbcb01f9443f1d140efd8f2d27ef0e4aa74d20
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all line items that are missing creatives.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v202202')
# Create a statement to select line items.
statement = (ad_manager.StatementBuilder(version='v202202')
.Where('isMissingCreatives = :isMissingCreatives')
.WithBindVariable('isMissingCreatives', True))
# Retrieve a small amount of line items at a time, paging
# through until all line items have been retrieved.
while True:
response = line_item_service.getLineItemsByStatement(statement.ToStatement(
))
if 'results' in response and len(response['results']):
for line_item in response['results']:
# Print out some information for each line item.
print('Line item with ID "%d" and name "%s" was found.\n' %
(line_item['id'], line_item['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 37.173077
| 79
| 0.722193
|
dcaad8742da99cd8f9af8820faf026c4df69231c
| 900
|
py
|
Python
|
python/Data Structures/Linked Lists/removeNthNode.py
|
sinderpl/CodingExamples
|
9bc59a0345589bf51fc74fe9ad527e9498b9b5c9
|
[
"MIT"
] | null | null | null |
python/Data Structures/Linked Lists/removeNthNode.py
|
sinderpl/CodingExamples
|
9bc59a0345589bf51fc74fe9ad527e9498b9b5c9
|
[
"MIT"
] | null | null | null |
python/Data Structures/Linked Lists/removeNthNode.py
|
sinderpl/CodingExamples
|
9bc59a0345589bf51fc74fe9ad527e9498b9b5c9
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
if not head or not head.next:
return None
if n == 0:
return head
slow = fast = head
# Increment the fast pointer up to N if possible
for _ in range(n):
fast = fast.next
# If we need to delete the first value in the list
# this will not cover the edge case where n > len(head)
if not fast:
return head.next
# keep going until the end to reach end - nth node
while fast.next:
slow = slow.next
fast = fast.next
slow.next = slow.next.next
return head
| 32.142857
| 87
| 0.546667
|
32f2333474e380b71ac2b141778e8ec5c128cdac
| 6,683
|
py
|
Python
|
test_LPRNet.py
|
barrypitman/LPRNet_Pytorch
|
f165e1cd4bc0e116e2c30f1443b48aa0f6f559f5
|
[
"Apache-2.0"
] | null | null | null |
test_LPRNet.py
|
barrypitman/LPRNet_Pytorch
|
f165e1cd4bc0e116e2c30f1443b48aa0f6f559f5
|
[
"Apache-2.0"
] | null | null | null |
test_LPRNet.py
|
barrypitman/LPRNet_Pytorch
|
f165e1cd4bc0e116e2c30f1443b48aa0f6f559f5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# /usr/bin/env/python3
'''
test pretrained model.
Author: aiboy.wei@outlook.com .
'''
from data.load_data import CHARS, CHARS_DICT, LPRDataLoader
from PIL import Image, ImageDraw, ImageFont
from model.LPRNet import build_lprnet
# import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import optim
import torch.nn as nn
import numpy as np
import argparse
import torch
import time
import cv2
import os
def get_parser():
parser = argparse.ArgumentParser(description='parameters to train net')
parser.add_argument('--img_size', default=[94, 24], help='the image size')
parser.add_argument('--test_img_dirs', default="./data/test", help='the test images path')
parser.add_argument('--dropout_rate', default=0, help='dropout rate.')
parser.add_argument('--lpr_max_len', default=8, help='license plate number max length.')
parser.add_argument('--test_batch_size', default=100, type=int, help='testing batch size.')
parser.add_argument('--phase_train', default=False, type=bool, help='train or test phase flag.')
parser.add_argument('--num_workers', default=8, type=int, help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model')
parser.add_argument('--show', default=False, type=bool, help='show test image and its predict result or not.')
parser.add_argument('--pretrained_model', default='./weights/Final_LPRNet_model.pth', help='pretrained base model')
args = parser.parse_args()
return args
def collate_fn(batch):
imgs = []
labels = []
lengths = []
for _, sample in enumerate(batch):
img, label, length = sample
imgs.append(torch.from_numpy(img))
labels.extend(label)
lengths.append(length)
labels = np.asarray(labels).flatten().astype(np.float32)
return (torch.stack(imgs, 0), torch.from_numpy(labels), lengths)
def test():
args = get_parser()
lprnet = build_lprnet(lpr_max_len=args.lpr_max_len, phase=args.phase_train, class_num=len(CHARS), dropout_rate=args.dropout_rate)
device = torch.device("cuda:0" if args.cuda else "cpu")
lprnet.to(device)
print("Successful to build network!")
# load pretrained model
if args.pretrained_model:
lprnet.load_state_dict(torch.load(args.pretrained_model))
print("load pretrained model successful!")
else:
print("[Error] Can't found pretrained mode, please check!")
return False
test_img_dirs = os.path.expanduser(args.test_img_dirs)
test_dataset = LPRDataLoader(test_img_dirs.split(','), args.img_size, args.lpr_max_len)
try:
Greedy_Decode_Eval(lprnet, test_dataset, args)
finally:
cv2.destroyAllWindows()
def Greedy_Decode_Eval(Net, datasets, args):
# TestNet = Net.eval()
epoch_size = len(datasets) // args.test_batch_size
batch_iterator = iter(DataLoader(datasets, args.test_batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collate_fn))
Tp = 0
Tn_1 = 0
Tn_2 = 0
t1 = time.time()
for i in range(epoch_size):
# load train data
images, labels, lengths = next(batch_iterator)
start = 0
targets = []
for length in lengths:
label = labels[start:start+length]
targets.append(label)
start += length
targets = np.array([el.numpy() for el in targets])
imgs = images.numpy().copy()
if args.cuda:
images = Variable(images.cuda())
else:
images = Variable(images)
# forward
prebs = Net(images)
# greedy decode
prebs = prebs.cpu().detach().numpy()
print(prebs.shape)
preb_labels = list()
for i in range(prebs.shape[0]):
preb = prebs[i, :, :]
preb_label = list()
for j in range(preb.shape[1]):
preb_label.append(np.argmax(preb[:, j], axis=0))
print('')
print(preb[:, j])
print(np.argmax(preb[:, j], axis=0))
no_repeat_blank_label = list()
pre_c = preb_label[0]
if pre_c != len(CHARS) - 1:
no_repeat_blank_label.append(pre_c)
for c in preb_label: # dropout repeate label and blank label
if (pre_c == c) or (c == len(CHARS) - 1):
if c == len(CHARS) - 1:
pre_c = c
continue
no_repeat_blank_label.append(c)
pre_c = c
preb_labels.append(no_repeat_blank_label)
for i, label in enumerate(preb_labels):
# show image and its predict label
lb = ""
for x in label:
lb += CHARS[x]
tg = ""
for j in targets[i].tolist():
tg += CHARS[int(j)]
print("expected " + tg + ", got " + lb)
if args.show:
show(imgs[i], label, targets[i])
if len(label) != len(targets[i]):
Tn_1 += 1
continue
if (np.asarray(targets[i]) == np.asarray(label)).all():
Tp += 1
else:
Tn_2 += 1
Acc = Tp * 1.0 / (Tp + Tn_1 + Tn_2)
print("[Info] Test Accuracy: {} [{}:{}:{}:{}]".format(Acc, Tp, Tn_1, Tn_2, (Tp+Tn_1+Tn_2)))
t2 = time.time()
print("[Info] Test Speed: {}s 1/{}]".format((t2 - t1) / len(datasets), len(datasets)))
def show(img, label, target):
img = np.transpose(img, (1, 2, 0))
img *= 128.
img += 127.5
img = img.astype(np.uint8)
lb = ""
for i in label:
lb += CHARS[i]
tg = ""
for j in target.tolist():
tg += CHARS[int(j)]
flag = "F"
if lb == tg:
flag = "T"
# img = cv2.putText(img, lb, (0,16), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.6, (0, 0, 255), 1)
img = cv2ImgAddText(img, lb, (0, 0))
cv2.imshow("test", img)
print("target: ", tg, " ### {} ### ".format(flag), "predict: ", lb)
cv2.waitKey()
cv2.destroyAllWindows()
def cv2ImgAddText(img, text, pos, textColor=(255, 0, 0), textSize=12):
if (isinstance(img, np.ndarray)): # detect opencv format or not
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype("data/NotoSansCJK-Regular.ttc", textSize, encoding="utf-8")
draw.text(pos, text, textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
if __name__ == "__main__":
test()
| 35.359788
| 136
| 0.602574
|
8c39ba45f5d218c4da274872a7140ef2f904521c
| 1,854
|
py
|
Python
|
gitlabform/gitlabform/processors/project/members_processor.py
|
llamasoft/gitlabform
|
bcf4db607209b00845da39d22b668ec5c77f18f2
|
[
"MIT"
] | null | null | null |
gitlabform/gitlabform/processors/project/members_processor.py
|
llamasoft/gitlabform
|
bcf4db607209b00845da39d22b668ec5c77f18f2
|
[
"MIT"
] | 15
|
2020-03-04T19:39:16.000Z
|
2022-03-21T23:01:19.000Z
|
gitlabform/gitlabform/processors/project/members_processor.py
|
grzesuav/gitlabform
|
c62bc4b3459194e28ed78aef807d5737e88d24e7
|
[
"MIT"
] | null | null | null |
import logging
from gitlabform.gitlab import GitLab
from gitlabform.gitlabform.processors.abstract_processor import AbstractProcessor
class MembersProcessor(AbstractProcessor):
def __init__(self, gitlab: GitLab):
super().__init__("members")
self.gitlab = gitlab
def _process_configuration(self, project_and_group: str, configuration: dict):
groups = configuration.get("members|groups")
if groups:
for group in groups:
logging.debug("Setting group '%s' as a member", group)
access = (
groups[group]["group_access"]
if "group_access" in groups[group]
else None
)
expiry = (
groups[group]["expires_at"] if "expires_at" in groups[group] else ""
)
# we will remove group access first and then re-add them,
# to ensure that the groups have the expected access level
self.gitlab.unshare_with_group(project_and_group, group)
self.gitlab.share_with_group(project_and_group, group, access, expiry)
users = configuration.get("members|users")
if users:
for user in users:
logging.debug("Setting user '%s' as a member", user)
access = (
users[user]["access_level"]
if "access_level" in users[user]
else None
)
expiry = (
users[user]["expires_at"] if "expires_at" in users[user] else ""
)
self.gitlab.remove_member_from_project(project_and_group, user)
self.gitlab.add_member_to_project(
project_and_group, user, access, expiry
)
| 39.446809
| 88
| 0.555556
|
abb1ea68260f821a7e273fede6d8d0adef7f027f
| 3,084
|
py
|
Python
|
scripts/psk-frontend.py
|
cclauss/tuya-convert
|
2cf2fa59ca04838da89b0c9ac273c1c4227676d3
|
[
"MIT"
] | null | null | null |
scripts/psk-frontend.py
|
cclauss/tuya-convert
|
2cf2fa59ca04838da89b0c9ac273c1c4227676d3
|
[
"MIT"
] | null | null | null |
scripts/psk-frontend.py
|
cclauss/tuya-convert
|
2cf2fa59ca04838da89b0c9ac273c1c4227676d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import socket
import select
import ssl
import sslpsk
from Crypto.Cipher import AES
from hashlib import md5
from binascii import hexlify, unhexlify
def listener(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
return sock
def client(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return sock
def gen_psk(identity, hint):
print("ID: %s" % hexlify(identity))
key = md5(hint[-16:]).digest()
iv = md5(identity[1:]).digest()
cipher = AES.new(key, AES.MODE_CBC, iv)
psk = cipher.encrypt(identity[1:33])
print("PSK: %s" % hexlify(psk))
return psk
class PskFrontend():
def __init__(self, listening_host, listening_port, host, port):
self.listening_port = listening_port
self.listening_host = listening_host
self.host = host
self.port = port
self.server_sock = listener(listening_host, listening_port)
self.sessions = []
self.hint = '1dHRsc2NjbHltbGx3eWh5' '0000000000000000'
def readables(self):
readables = [self.server_sock]
for (s1, s2) in self.sessions:
readables.append(s1)
readables.append(s2)
return readables
def new_client(self, s1):
try:
ssl_sock = sslpsk.wrap_socket(s1,
server_side = True,
ssl_version=ssl.PROTOCOL_TLSv1_2,
ciphers='PSK-AES128-CBC-SHA256',
psk=lambda identity: gen_psk(identity, self.hint),
hint=self.hint)
s2 = client(self.host, self.port)
self.sessions.append((ssl_sock, s2))
except Exception as e:
print("could not establish sslpsk socket:", e)
def data_ready_cb(self, s):
if s == self.server_sock:
_s, frm = s.accept()
print("new client on port %d from %s:%d"%(self.listening_port, frm[0], frm[1]))
self.new_client(_s)
for (s1, s2) in self.sessions:
if s == s1 or s == s2:
c = s1 if s == s2 else s2
try:
buf = s.recv(4096)
if len(buf) > 0:
c.send(buf)
else:
s1.shutdown(socket.SHUT_RDWR)
s2.shutdown(socket.SHUT_RDWR)
self.sessions.remove((s1,s2))
except:
self.sessions.remove((s1,s2))
def main():
gateway = '10.42.42.1'
proxies = [PskFrontend(gateway, 443, gateway, 80), PskFrontend(gateway, 8886, gateway, 1883)]
while True:
readables = []
for p in proxies:
readables = readables + p.readables()
r,_,_ = select.select(readables, [], [])
for s in r:
for p in proxies:
p.data_ready_cb(s)
if __name__ == '__main__':
main()
| 29.09434
| 97
| 0.56096
|
ef4bd5b4abec7115157b7e530f849eb55b05eb91
| 746
|
py
|
Python
|
preconvert/output/bson.py
|
timothycrosley/preconvert
|
828a35998908162695543ae7781b0527485cda77
|
[
"MIT"
] | 12
|
2019-08-20T16:01:49.000Z
|
2022-02-09T16:08:36.000Z
|
preconvert/output/bson.py
|
timothycrosley/preconvert
|
828a35998908162695543ae7781b0527485cda77
|
[
"MIT"
] | 4
|
2019-08-21T09:18:09.000Z
|
2021-06-02T00:02:18.000Z
|
preconvert/output/bson.py
|
timothycrosley/preconvert
|
828a35998908162695543ae7781b0527485cda77
|
[
"MIT"
] | 3
|
2019-08-18T11:07:07.000Z
|
2021-02-27T21:34:42.000Z
|
import bson
from bson import *
from preconvert.converters import convert_namedtuple
from preconvert.output import convert
def dumps(content, *args, on_unknown=convert.bson, **kwargs): # type: ignore
"""BSON dumps with preconversion for common unserializable types in place"""
if isinstance(content, tuple):
content = convert_namedtuple(content)
return bson.dumps(content, on_unknown=on_unknown, *args, **kwargs)
def dump(content, *args, on_unknown=convert.bson, **kwargs): # type: ignore
"""BSON dump with preconversion for common unserializable types in place"""
if isinstance(content, tuple):
content = convert_namedtuple(content)
return bson.dump(content, on_unknown=on_unknown, *args, **kwargs)
| 37.3
| 80
| 0.739946
|
f11e5b6bc023e353c735ed691854ebebc4aedd1f
| 1,728
|
py
|
Python
|
util.py
|
thomasmorgan/LilyLib
|
5b45876d4902379fdfe3295a89b23d01c828b0d5
|
[
"MIT"
] | null | null | null |
util.py
|
thomasmorgan/LilyLib
|
5b45876d4902379fdfe3295a89b23d01c828b0d5
|
[
"MIT"
] | null | null | null |
util.py
|
thomasmorgan/LilyLib
|
5b45876d4902379fdfe3295a89b23d01c828b0d5
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
def flatten(List):
while any([isinstance(i, list) or isinstance(i, tuple) for i in List]):
new_list = []
for i in List:
if isinstance(i, list) or isinstance(i, tuple):
new_list += i
else:
new_list += [i]
List = new_list
return List
def split_and_flatten(item):
item = flatten([item])
for subitem in item:
if not isinstance(subitem, str) and not isinstance(subitem, int):
raise ValueError("Cannot split and flatten {} as it is not a string or int".format(subitem))
return flatten([subitem.split(" ") if isinstance(subitem, str) else subitem for subitem in item])
def select(List, *indexes):
indexes = flatten(list(indexes))
return [j for i, j in enumerate(List) if (i + 1) in indexes]
def pattern(List, *indexes):
indexes = flatten(list(indexes))
new_list = []
for i in indexes:
new_list.append(List[i - 1])
return new_list
def omit(List, *indexes):
indexes = flatten(list(indexes))
return [j for i, j in enumerate(List) if (i + 1) not in indexes]
def subset(List, start, stop):
if stop >= start:
return List[start - 1:stop]
else:
if stop == 1:
return List[start - 1::-1]
else:
return List[start - 1:stop - 2:-1]
def join(*motifs):
if isinstance(motifs[0], list):
motifs = motifs[0]
joined = motifs[0]
for motif in motifs[1:]:
for key in joined:
joined[key] += motif[key]
return joined
def rep(passage, times=1):
new_passage = []
for t in range(times):
new_passage += deepcopy(passage)
return(new_passage)
| 25.411765
| 104
| 0.592593
|
f3935a7fd1208bbf1627594d0e62ed61cbacff1b
| 2,916
|
py
|
Python
|
config/base_settings.py
|
hemor/tursh
|
81be09eb6d74f3234de0341b9fe442ea4cc1001d
|
[
"MIT"
] | null | null | null |
config/base_settings.py
|
hemor/tursh
|
81be09eb6d74f3234de0341b9fe442ea4cc1001d
|
[
"MIT"
] | null | null | null |
config/base_settings.py
|
hemor/tursh
|
81be09eb6d74f3234de0341b9fe442ea4cc1001d
|
[
"MIT"
] | null | null | null |
"""
Django settings for TURSH project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_DIR = os.path.join(BASE_DIR, 'assets', 'static')
TEMPLATE_DIR = os.path.join(BASE_DIR, 'assets', 'templates')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shortener',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'staticfiles')
STATICFILES_DIRS = [STATIC_DIR, ]
STATIC_URL = '/static/'
SESSION_COOKIE_AGE = 3600
| 26.509091
| 91
| 0.706104
|
0779d14ab004983a352e4b21b9b5f2824bab56a7
| 836
|
py
|
Python
|
src/hbcomp/profile/views.py
|
zgoda/hbcomp
|
0b787a05f2cd512c44363daaa560ec74cc9d6261
|
[
"MIT"
] | null | null | null |
src/hbcomp/profile/views.py
|
zgoda/hbcomp
|
0b787a05f2cd512c44363daaa560ec74cc9d6261
|
[
"MIT"
] | null | null | null |
src/hbcomp/profile/views.py
|
zgoda/hbcomp
|
0b787a05f2cd512c44363daaa560ec74cc9d6261
|
[
"MIT"
] | null | null | null |
from flask import render_template, redirect, url_for, flash
from flask_login import login_required
from flask_babel import _
from ..ext import db
from ..profile import profile_bp
from ..profile.forms import ProfileForm
from ..models import User
@profile_bp.route('/<int:user_id>', methods=['POST', 'GET'])
@login_required
def details(user_id):
user = User.query.get_or_404(user_id)
form = ProfileForm()
if form.validate_on_submit():
form.populate_obj(obj=user)
db.session.add(user)
db.session.commit()
flash(_('your profile data has been saved'), category='success')
return redirect(url_for('profile.details', user_id=user_id))
form = ProfileForm(obj=user)
ctx = {
'user': user,
'form': form,
}
return render_template('profile/details.html', **ctx)
| 29.857143
| 72
| 0.687799
|
f6f69db33d11d1b598c6556c9421194e50dc6d4c
| 217
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKeyoTranslations.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKeyoTranslations.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKeyoTranslations.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractKeyoTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
| 24.111111
| 74
| 0.705069
|
789e5cf434cdbaa040d0738182f6301cbc1ee3f8
| 513
|
py
|
Python
|
python/find_no_list_divisible_by_two_nos.py
|
codevscolor/codevscolor
|
35ef9042bdc86f45ef87795c35963b75fb64d5d7
|
[
"Apache-2.0"
] | 6
|
2019-04-26T03:11:54.000Z
|
2021-05-07T21:48:29.000Z
|
python/find_no_list_divisible_by_two_nos.py
|
akojif/codevscolor
|
56db3dffeac8f8d76ff8fcf5656770f33765941f
|
[
"Apache-2.0"
] | null | null | null |
python/find_no_list_divisible_by_two_nos.py
|
akojif/codevscolor
|
56db3dffeac8f8d76ff8fcf5656770f33765941f
|
[
"Apache-2.0"
] | 26
|
2019-02-23T14:50:46.000Z
|
2022-02-04T23:44:24.000Z
|
#1
list_size = int(input("How many numbers are in the list : "))
#2
number_list = []
final_list = []
#3
for i in range(0,list_size):
number_list.append(int(input("Enter list item {} : ".format(i))))
#4
m = int(input("Enter the first divider : "))
n = int(input("Enter the second divider : "))
#5
for i in range(0,list_size):
if number_list[i] % m == 0 and number_list[i] % n == 0 :
final_list.append(number_list[i])
#6
print("Numbers that are divisible by {} and {} is : ".format(m,n),final_list)
| 28.5
| 77
| 0.643275
|
3df327b63a2f5076a1412d69760118ccb1503a3e
| 320
|
py
|
Python
|
No_0171_Excel Sheet Column Number/No_171_Excel Sheet Column Number_description.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 32
|
2020-01-05T13:37:16.000Z
|
2022-03-26T07:27:09.000Z
|
No_0171_Excel Sheet Column Number/No_171_Excel Sheet Column Number_description.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | null | null | null |
No_0171_Excel Sheet Column Number/No_171_Excel Sheet Column Number_description.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 8
|
2020-06-18T16:17:27.000Z
|
2022-03-15T23:58:18.000Z
|
'''
Description:
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
...
Example 1:
Input: "A"
Output: 1
Example 2:
Input: "AB"
Output: 28
Example 3:
Input: "ZY"
Output: 701
'''
| 10.666667
| 89
| 0.559375
|
9eefae3a8be4fce8211ee5a4de2dd154ef913a88
| 18,789
|
py
|
Python
|
icevision/models/mmseg/models/deeplabv3/backbones/resnet.py
|
lgvaz/mantisshrimp2
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 17
|
2020-07-31T22:09:07.000Z
|
2020-08-30T11:18:36.000Z
|
icevision/models/mmseg/models/deeplabv3/backbones/resnet.py
|
Borda/icevision
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 115
|
2020-08-01T09:19:54.000Z
|
2020-09-04T18:51:28.000Z
|
icevision/models/mmseg/models/deeplabv3/backbones/resnet.py
|
lgvaz/mantisshrimp2
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 1
|
2020-08-25T06:04:34.000Z
|
2020-08-25T06:04:34.000Z
|
__all__ = [
"resnet101_d8",
"resnet101_d16_mg124",
"resnet101b_d8",
"resnet18_d8",
"resnet18b_d8",
"resnet50_d8",
"resnet50b_d8",
]
from icevision.imports import *
from icevision.models.mmseg.utils import *
class MMSegDeeplabBackboneConfig(MMSegBackboneConfig):
def __init__(self, **kwargs):
super().__init__(model_name="deeplabv3", **kwargs)
base_config_path = mmseg_configs_path / "deeplabv3"
base_weights_url = "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3"
resnet101_d16_mg124 = MMSegDeeplabBackboneConfig(
backbone_type="R101-D16-MG124",
pre_trained_variants=[
{
"config_path": "deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "40k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-67b0c992.pth",
},
{
"default": True,
"config_path": "deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-57bb8425.pth",
},
],
)
resnet101_d8 = MMSegDeeplabBackboneConfig(
backbone_type="R-101-D8",
pre_trained_variants=[
{
"config_path": "deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py",
"crop_size": (480, 480),
"lr_schd": "40k",
"pre_training_dataset": "pascal_context",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context_20200911_204118-1aa27336.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py",
"crop_size": (480, 480),
"lr_schd": "40k",
"pre_training_dataset": "pascal_context_59",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59_20210416_110332-cb08ea46.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py",
"crop_size": (480, 480),
"lr_schd": "80k",
"pre_training_dataset": "pascal_context",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context_20200911_170155-2a21fff3.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py",
"crop_size": (480, 480),
"lr_schd": "80k",
"pre_training_dataset": "pascal_context_59",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59_20210416_113002-26303993.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "40k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth",
},
{
"default": True,
"config_path": "deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py",
"crop_size": (512, 512),
"lr_schd": "160k",
"pre_training_dataset": "ade20k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py",
"crop_size": (512, 512),
"lr_schd": "20k",
"pre_training_dataset": "voc12aug",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py",
"crop_size": (512, 512),
"lr_schd": "40k",
"pre_training_dataset": "voc12aug",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py",
"crop_size": (512, 512),
"lr_schd": "160k",
"pre_training_dataset": "coco-stuff164k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k_20210709_155402-f035acfd.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py",
"crop_size": (512, 512),
"lr_schd": "20k",
"pre_training_dataset": "coco-stuff10k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-c49752cb.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py",
"crop_size": (512, 512),
"lr_schd": "320k",
"pre_training_dataset": "coco-stuff164k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402-3cbca14d.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py",
"crop_size": (512, 512),
"lr_schd": "40k",
"pre_training_dataset": "coco-stuff10k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-636cb433.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py",
"crop_size": (512, 512),
"lr_schd": "80k",
"pre_training_dataset": "coco-stuff164k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k_20210709_201252-13600dc2.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py",
"crop_size": (512, 512),
"lr_schd": "80k",
"pre_training_dataset": "ade20k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "40k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth",
},
],
)
resnet101b_d8 = MMSegDeeplabBackboneConfig(
backbone_type="R-101B-D8",
pre_trained_variants=[
{
"config_path": "deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes_20201226_171821-8fd49503.pth",
},
{
"default": True,
"config_path": "deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes_20201226_190843-9142ee57.pth",
},
],
)
resnet18_d8 = MMSegDeeplabBackboneConfig(
backbone_type="R-18-D8",
pre_trained_variants=[
{
"default": True,
"config_path": "deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes_20201225_021506-23dffbe2.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes_20201225_021506-6452126a.pth",
},
],
)
resnet18b_d8 = MMSegDeeplabBackboneConfig(
backbone_type="R-18B-D8",
pre_trained_variants=[
{
"default": True,
"config_path": "deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes_20201225_094144-46040cef.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes_20201225_094144-fdc985d9.pth",
},
],
)
resnet50_d8 = MMSegDeeplabBackboneConfig(
backbone_type="R-50-D8",
pre_trained_variants=[
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "40k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth",
},
{
"default": True,
"config_path": "deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py",
"crop_size": (512, 512),
"lr_schd": "160k",
"pre_training_dataset": "ade20k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py",
"crop_size": (512, 512),
"lr_schd": "20k",
"pre_training_dataset": "voc12aug",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py",
"crop_size": (512, 512),
"lr_schd": "40k",
"pre_training_dataset": "voc12aug",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py",
"crop_size": (512, 512),
"lr_schd": "160k",
"pre_training_dataset": "coco-stuff164k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k_20210709_163016-49f2812b.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py",
"crop_size": (512, 512),
"lr_schd": "20k",
"pre_training_dataset": "coco-stuff10k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-b35f789d.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py",
"crop_size": (512, 512),
"lr_schd": "320k",
"pre_training_dataset": "coco-stuff164k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k_20210709_155403-51b21115.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py",
"crop_size": (512, 512),
"lr_schd": "40k",
"pre_training_dataset": "coco-stuff10k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-dc76f3ff.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py",
"crop_size": (512, 512),
"lr_schd": "80k",
"pre_training_dataset": "coco-stuff164k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k_20210709_163016-88675c24.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py",
"crop_size": (512, 512),
"lr_schd": "80k",
"pre_training_dataset": "ade20k",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "40k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth",
},
],
)
resnet50b_d8 = MMSegDeeplabBackboneConfig(
backbone_type="R-50B-D8",
pre_trained_variants=[
{
"default": True,
"config_path": "deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py",
"crop_size": (512, 1024),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes_20201225_155148-ec368954.pth",
},
{
"config_path": "deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py",
"crop_size": (769, 769),
"lr_schd": "80k",
"pre_training_dataset": "cityscapes",
"weights_url": "https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes_20201225_155404-87fb0cf4.pth",
},
],
)
| 53.529915
| 219
| 0.670658
|
267037a78db14ab8eb08e4d1608dc32e3024218a
| 4,530
|
py
|
Python
|
modules/gapi/misc/python/test/test_gapi_core.py
|
lefatoum2/opencv
|
f7cab121fe2954c67b343b3b7805e1c092812093
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_core.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
inference-engine/thirdparty/fluid/modules/gapi/misc/python/test/test_gapi_core.py
|
mmakridi/openvino
|
769bb7709597c14debdaa356dd60c5a78bdfa97e
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
# Plaidml is an optional backend
pkgs = [
('ocl' , cv.gapi.core.ocl.kernels()),
('cpu' , cv.gapi.core.cpu.kernels()),
('fluid' , cv.gapi.core.fluid.kernels())
# ('plaidml', cv.gapi.core.plaidml.kernels())
]
class gapi_core_test(NewOpenCVTests):
def test_add(self):
# TODO: Extend to use any type and size here
sz = (720, 1280)
in1 = np.full(sz, 100)
in2 = np.full(sz, 50)
# OpenCV
expected = cv.add(in1, in2)
# G-API
g_in1 = cv.GMat()
g_in2 = cv.GMat()
g_out = cv.gapi.add(g_in1, g_in2)
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
def test_add_uint8(self):
sz = (720, 1280)
in1 = np.full(sz, 100, dtype=np.uint8)
in2 = np.full(sz, 50 , dtype=np.uint8)
# OpenCV
expected = cv.add(in1, in2)
# G-API
g_in1 = cv.GMat()
g_in2 = cv.GMat()
g_out = cv.gapi.add(g_in1, g_in2)
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in1, in2), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected.dtype, actual.dtype, 'Failed on ' + pkg_name + ' backend')
def test_mean(self):
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.imread(img_path)
# OpenCV
expected = cv.mean(in_mat)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.mean(g_in)
comp = cv.GComputation(g_in, g_out)
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
def test_split3(self):
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.imread(img_path)
# OpenCV
expected = cv.split(in_mat)
# G-API
g_in = cv.GMat()
b, g, r = cv.gapi.split3(g_in)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r))
for pkg_name, pkg in pkgs:
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
# Comparison
for e, a in zip(expected, actual):
self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(e.dtype, a.dtype, 'Failed on ' + pkg_name + ' backend')
def test_threshold(self):
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
maxv = (30, 30)
# OpenCV
expected_thresh, expected_mat = cv.threshold(in_mat, maxv[0], maxv[0], cv.THRESH_TRIANGLE)
# G-API
g_in = cv.GMat()
g_sc = cv.GScalar()
mat, threshold = cv.gapi.threshold(g_in, g_sc, cv.THRESH_TRIANGLE)
comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(mat, threshold))
for pkg_name, pkg in pkgs:
actual_mat, actual_thresh = comp.apply(cv.gin(in_mat, maxv), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(0.0, cv.norm(expected_mat, actual_mat, cv.NORM_INF),
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected_mat.dtype, actual_mat.dtype,
'Failed on ' + pkg_name + ' backend')
self.assertEqual(expected_thresh, actual_thresh[0],
'Failed on ' + pkg_name + ' backend')
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| 34.06015
| 99
| 0.559382
|
2b111004892c19302bfb166baf93f4c9eaa6b3a0
| 721
|
py
|
Python
|
tests/frequency.py
|
cmu-sei/usersim
|
0a90e1c2f32ce27bbb564c7196050c50409989dd
|
[
"BSL-1.0"
] | 10
|
2018-05-07T07:52:51.000Z
|
2021-09-04T05:34:46.000Z
|
tests/frequency.py
|
cmu-sei/usersim
|
0a90e1c2f32ce27bbb564c7196050c50409989dd
|
[
"BSL-1.0"
] | null | null | null |
tests/frequency.py
|
cmu-sei/usersim
|
0a90e1c2f32ce27bbb564c7196050c50409989dd
|
[
"BSL-1.0"
] | 4
|
2018-04-09T17:59:13.000Z
|
2019-11-17T01:33:35.000Z
|
# Copyright 2017 Carnegie Mellon University. See LICENSE.md file for terms.
import api
import usersim
def run_test():
reps = 10
config = {'type': 'frequency',
'config': {'frequency': 2000,
'repetitions': reps,
'task': {'type': 'test',
'config': {}}}}
sim = usersim.UserSim(True)
task_id = api.new_task(config)
while True:
result = sim.cycle()
if len(result) > 1:
print(result)
# Break once the final task has been stopped.
if api.status_task(task_id + reps)['state'] == api.States.STOPPED:
break
if __name__ == '__main__':
run_test()
| 23.258065
| 75
| 0.521498
|
1f3d9ea0f1fcf24d0e18c32ebdc87cd562dbc0f1
| 688
|
py
|
Python
|
LC_problems/954.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/954.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/954.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 954.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/4/1 11:41
------------
"""
import collections
from typing import List
class Solution:
def canReorderDoubled(self, arr: List[int]) -> bool:
c = collections.Counter(arr)
if c[0] % 2 != 0:
return False
for k in sorted(c, key=abs):
if c[k] > c[2 * k]:
return False
elif c[k]!=0:
c[2 * k] = c[2 * k] - c[k]
c[k] = 0
return True
if __name__ == '__main__':
s = Solution()
print(s.canReorderDoubled([2, 4, 0, 0, 8, 1]))
| 22.933333
| 56
| 0.482558
|
d184d3c62162a6d403c105290f2ca67fafc309aa
| 2,655
|
py
|
Python
|
webapp/python_utils/blob_upload.py
|
alan-turing-institute/DetectorCheckerWebApp
|
68b802907d6b07ae5154c0979c8adfce65b56348
|
[
"MIT"
] | 2
|
2020-02-11T10:50:05.000Z
|
2020-07-14T16:55:15.000Z
|
webapp/python_utils/blob_upload.py
|
alan-turing-institute/DetectorCheckerWebApp
|
68b802907d6b07ae5154c0979c8adfce65b56348
|
[
"MIT"
] | 10
|
2019-03-29T14:46:12.000Z
|
2020-06-12T16:09:55.000Z
|
webapp/python_utils/blob_upload.py
|
alan-turing-institute/DetectorCheckerWebApp
|
68b802907d6b07ae5154c0979c8adfce65b56348
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
A simple script to upload data to azure as a blob.
"""
import os
import argparse
import smtplib
from azure.storage.blob import BlockBlobService
CONTAINER_NAME = os.environ["AZURE_CONTAINER"]
BLOCK_BLOB_SERVICE = BlockBlobService(
account_name=os.environ["AZURE_STORAGE_ACCOUNT"],
connection_string=os.environ["AZURE_CONNECTION_STRING"],
)
def check_blob_exists(blob_name):
"""
Checks if blob exists
"""
exist = False
generator = BLOCK_BLOB_SERVICE.list_blobs(CONTAINER_NAME)
for blob in generator:
if blob.name == blob_name:
exist = True
break
return exist
def email_confirmation(blob_name, email_to):
"""
Sends email confirmation of upload is successful.
"""
mail_user = os.environ["DC_EMAIL_ACC"]
mail_password = os.environ["DC_EMAIL_PWD"]
cc_list = os.environ["DC_EMAIL_CC"]
sent_from = mail_user
subject = "Data upload notification"
email_text = """\
From: %s
To: %s
Cc: %s
Subject: %s
Dear DetectorChecker user,
File %s has been uploaded successfuly.
- DetectorChecker Team
""" % (sent_from, email_to, cc_list, subject, blob_name)
try:
server = smtplib.SMTP("smtp.mail.com", 587)
server.login(mail_user, mail_password)
server.sendmail(sent_from, email_to, email_text)
except Exception as exception:
print(exception)
finally:
server.quit()
def main():
"""
Main upload routine
"""
parser = argparse.ArgumentParser(description="Uploads blobs to Azure")
parser.add_argument("--source", default=None)
parser.add_argument("--target", default=None)
parser.add_argument("--email", default=None)
args = parser.parse_args()
if not args.source or not args.target:
raise RuntimeError("Source file and/or target file were not specified.")
file_path = (args.source).strip()
blob_name = (args.target).strip()
if args.email is not None:
email = (args.email).strip()
else:
email = None
blob_exists = check_blob_exists(blob_name)
if not blob_exists:
try:
BLOCK_BLOB_SERVICE.create_blob_from_path(
CONTAINER_NAME, blob_name, file_path
)
except ValueError as exception:
raise RuntimeError(exception)
else:
raise RuntimeError(
"A blob with the filename of %s already exists!" % (blob_name)
)
# # upload was successful let's send a email notification
# if email is not None:
# email_confirmation(blob_name, email)
if __name__ == "__main__":
main()
| 21.942149
| 80
| 0.656874
|
df9c0b2064a18c7b2377e6b38a3f4264b204c9b9
| 979
|
py
|
Python
|
api/tests/opentrons/commands/test_commands.py
|
faliester/opentrons
|
e945d0f72fed39b0f68c0b30b7afd1981644184f
|
[
"Apache-2.0"
] | null | null | null |
api/tests/opentrons/commands/test_commands.py
|
faliester/opentrons
|
e945d0f72fed39b0f68c0b30b7afd1981644184f
|
[
"Apache-2.0"
] | null | null | null |
api/tests/opentrons/commands/test_commands.py
|
faliester/opentrons
|
e945d0f72fed39b0f68c0b30b7afd1981644184f
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from opentrons.commands import protocol_commands
@pytest.mark.parametrize(
argnames="seconds,"
"minutes,"
"expected_seconds,"
"expected_minutes,"
"expected_text",
argvalues=[
[10, 0, 10, 0, "Delaying for 0 minutes and 10 seconds"],
[10, 9, 10, 9, "Delaying for 9 minutes and 10 seconds"],
[100, 0, 40, 1, "Delaying for 1 minutes and 40 seconds"],
[105, 5.25, 0, 7, "Delaying for 7 minutes and 0 seconds"],
]
)
def test_delay(seconds,
minutes,
expected_seconds,
expected_minutes,
expected_text
):
command = protocol_commands.delay(seconds, minutes)
name = command['name']
payload = command['payload']
assert name == 'command.DELAY'
assert payload['seconds'] == expected_seconds
assert payload['minutes'] == expected_minutes
assert payload['text'] == expected_text
| 30.59375
| 66
| 0.590398
|
e598f975dec6f9ad78ecd34befd188e3ab80d4ff
| 13,053
|
py
|
Python
|
tests/generator/test_compression.py
|
DevMau5x/goldcoin-blockchain-2
|
ed223dd16fa290ea710db7202d6c52a056242cfa
|
[
"Apache-2.0"
] | 17
|
2021-09-08T17:07:54.000Z
|
2022-03-30T04:11:58.000Z
|
tests/generator/test_compression.py
|
DevMau5x/goldcoin-blockchain-2
|
ed223dd16fa290ea710db7202d6c52a056242cfa
|
[
"Apache-2.0"
] | 15
|
2021-09-28T21:09:49.000Z
|
2022-03-22T21:13:23.000Z
|
tests/generator/test_compression.py
|
Pierre21dd/gold2
|
4a35f207ed4c8a7745bfbc73fd3c190bd8b60a3f
|
[
"Apache-2.0"
] | 9
|
2021-09-12T10:03:23.000Z
|
2022-03-15T08:35:11.000Z
|
# flake8: noqa: F501
from dataclasses import dataclass
from typing import List, Any
from unittest import TestCase
from goldcoin.full_node.bundle_tools import (
bundle_suitable_for_compression,
compressed_coin_spend_entry_list,
compressed_spend_bundle_solution,
match_standard_transaction_at_any_index,
simple_solution_generator,
spend_bundle_to_serialized_coin_spend_entry_list,
)
from goldcoin.full_node.generator import run_generator, create_generator_args
from goldcoin.types.blockchain_format.program import Program, SerializedProgram, INFINITE_COST
from goldcoin.types.generator_types import BlockGenerator, CompressorArg, GeneratorArg
from goldcoin.types.spend_bundle import SpendBundle
from goldcoin.util.byte_types import hexstr_to_bytes
from goldcoin.util.ints import uint32
from goldcoin.wallet.puzzles.load_clvm import load_clvm
from tests.core.make_block_generator import make_spend_bundle
from clvm import SExp
import io
from clvm.serialize import sexp_from_stream
from clvm_tools import binutils
TEST_GEN_DESERIALIZE = load_clvm("test_generator_deserialize.clvm", package_or_requirement="goldcoin.wallet.puzzles")
DESERIALIZE_MOD = load_clvm("chialisp_deserialisation.clvm", package_or_requirement="goldcoin.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="goldcoin.wallet.puzzles")
DECOMPRESS_CSE = load_clvm("decompress_coin_spend_entry.clvm", package_or_requirement="goldcoin.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_spend_entry_with_prefix.clvm", package_or_requirement="goldcoin.wallet.puzzles"
)
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="goldcoin.wallet.puzzles")
TEST_MULTIPLE = load_clvm("test_multiple_generator_input_arguments.clvm", package_or_requirement="goldcoin.wallet.puzzles")
Nil = Program.from_bytes(b"\x80")
original_generator = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080"
) # noqa
gen1 = b"aaaaaaaaaa" + original_generator
gen2 = b"bb" + original_generator
FAKE_BLOCK_HEIGHT1 = uint32(100)
FAKE_BLOCK_HEIGHT2 = uint32(200)
@dataclass(frozen=True)
class MultipleCompressorArg:
arg: List[CompressorArg]
split_offset: int
def create_multiple_ref_generator(args: MultipleCompressorArg, spend_bundle: SpendBundle) -> BlockGenerator:
"""
Decompress a transaction by referencing bytes from multiple input generator references
"""
compressed_cse_list = compressed_coin_spend_entry_list(spend_bundle)
program = TEST_MULTIPLE.curry(
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
args.arg[0].start,
args.arg[0].end - args.split_offset,
args.arg[1].end - args.split_offset,
args.arg[1].end,
compressed_cse_list,
)
# TODO aqk: Improve ergonomics of CompressorArg -> GeneratorArg conversion
generator_args = [
GeneratorArg(FAKE_BLOCK_HEIGHT1, args.arg[0].generator),
GeneratorArg(FAKE_BLOCK_HEIGHT2, args.arg[1].generator),
]
return BlockGenerator(program, generator_args)
def spend_bundle_to_coin_spend_entry_list(bundle: SpendBundle) -> List[Any]:
r = []
for coin_spend in bundle.coin_spends:
entry = [
coin_spend.coin.parent_coin_info,
sexp_from_stream(io.BytesIO(bytes(coin_spend.puzzle_reveal)), SExp.to),
coin_spend.coin.amount,
sexp_from_stream(io.BytesIO(bytes(coin_spend.solution)), SExp.to),
]
r.append(entry)
return r
class TestCompression(TestCase):
def test_spend_bundle_suitable(self):
sb: SpendBundle = make_spend_bundle(1)
assert bundle_suitable_for_compression(sb)
def test_compress_spend_bundle(self):
pass
def test_multiple_input_gen_refs(self):
start1, end1 = match_standard_transaction_at_any_index(gen1)
start2, end2 = match_standard_transaction_at_any_index(gen2)
ca1 = CompressorArg(FAKE_BLOCK_HEIGHT1, SerializedProgram.from_bytes(gen1), start1, end1)
ca2 = CompressorArg(FAKE_BLOCK_HEIGHT2, SerializedProgram.from_bytes(gen2), start2, end2)
prefix_len1 = end1 - start1
prefix_len2 = end2 - start2
assert prefix_len1 == prefix_len2
prefix_len = prefix_len1
results = []
for split_offset in range(prefix_len):
gen_args = MultipleCompressorArg([ca1, ca2], split_offset)
spend_bundle: SpendBundle = make_spend_bundle(1)
multi_gen = create_multiple_ref_generator(gen_args, spend_bundle)
cost, result = run_generator(multi_gen, INFINITE_COST)
results.append(result)
assert result is not None
assert cost > 0
assert all(r == results[0] for r in results)
def test_compressed_block_results(self):
sb: SpendBundle = make_spend_bundle(1)
start, end = match_standard_transaction_at_any_index(original_generator)
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
s = simple_solution_generator(sb)
assert c != s
cost_c, result_c = run_generator(c, INFINITE_COST)
cost_s, result_s = run_generator(s, INFINITE_COST)
print(result_c)
assert result_c is not None
assert result_s is not None
assert result_c == result_s
def test_spend_byndle_coin_spend(self):
for i in range(0, 10):
sb: SpendBundle = make_spend_bundle(i)
cs1 = SExp.to(spend_bundle_to_coin_spend_entry_list(sb)).as_bin()
cs2 = spend_bundle_to_serialized_coin_spend_entry_list(sb)
assert cs1 == cs2
class TestDecompression(TestCase):
def __init__(self, *args, **kwargs):
super(TestDecompression, self).__init__(*args, **kwargs)
self.maxDiff = None
def test_deserialization(self):
self.maxDiff = None
cost, out = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [bytes(Program.to("hello"))])
assert out == Program.to("hello")
def test_deserialization_as_argument(self):
self.maxDiff = None
cost, out = TEST_GEN_DESERIALIZE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, Nil, bytes(Program.to("hello"))]
)
print(bytes(Program.to("hello")))
print()
print(out)
assert out == Program.to("hello")
def test_decompress_puzzle(self):
cost, out = DECOMPRESS_PUZZLE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, b"\xff", bytes(Program.to("pubkey")), b"\x80"]
)
print()
print(out)
# An empty CSE is invalid. (An empty CSE list may be okay)
# def test_decompress_empty_cse(self):
# cse0 = binutils.assemble("()")
# cost, out = DECOMPRESS_CSE.run_with_cost(INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0])
# print()
# print(out)
def test_decompress_cse(self):
"""Decompress a single CSE / CoinSpendEntry"""
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
cost, out = DECOMPRESS_CSE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0]
)
print()
print(out)
def test_decompress_cse_with_prefix(self):
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
start = 2 + 44
end = start + 238
prefix = original_generator[start:end]
# (deserialize decompress_puzzle puzzle_prefix cse)
cost, out = DECOMPRESS_CSE_WITH_PREFIX.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, prefix, cse0]
)
print()
print(out)
def test_block_program_zero(self):
"Decompress a list of CSEs"
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_spend_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
cost, out = DECOMPRESS_BLOCK.run_with_cost(
INFINITE_COST,
[
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
start,
Program.to(end),
cse2,
DESERIALIZE_MOD,
[bytes(original_generator)],
],
)
print()
print(out)
def test_block_program_zero_with_curry(self):
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_spend_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
p = DECOMPRESS_BLOCK.curry(DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end))
cost, out = p.run_with_cost(INFINITE_COST, [cse2, DESERIALIZE_MOD, [bytes(original_generator)]])
print()
print(p)
print(out)
p_with_cses = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end), cse2, DESERIALIZE_MOD
)
generator_args = create_generator_args([SerializedProgram.from_bytes(original_generator)])
cost, out = p_with_cses.run_with_cost(INFINITE_COST, generator_args)
print()
print(p_with_cses)
print(out)
| 44.247458
| 792
| 0.73914
|
5ce2152bee561b8e9bcca8688e657b30883286e4
| 3,667
|
py
|
Python
|
ooobuild/lo/frame/control_event.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/frame/control_event.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/frame/control_event.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.frame
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
from ..beans.named_value import NamedValue as NamedValue_a37a0af3
from ..util.url import URL as URL_57ad07b9
class ControlEvent(object):
"""
Struct Class
describes a control event sent by extended user interface controls.
**since**
OOo 2.0.3
See Also:
`API ControlEvent <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1frame_1_1ControlEvent.html>`_
"""
__ooo_ns__: str = 'com.sun.star.frame'
__ooo_full_ns__: str = 'com.sun.star.frame.ControlEvent'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.frame.ControlEvent'
"""Literal Constant ``com.sun.star.frame.ControlEvent``"""
def __init__(self, aInformation: typing.Optional[typing.Tuple[NamedValue_a37a0af3, ...]] = UNO_NONE, aURL: typing.Optional[URL_57ad07b9] = UNO_NONE, Event: typing.Optional[str] = '') -> None:
"""
Constructor
Arguments:
aInformation (typing.Tuple[NamedValue, ...], optional): aInformation value.
aURL (URL, optional): aURL value.
Event (str, optional): Event value.
"""
super().__init__()
if isinstance(aInformation, ControlEvent):
oth: ControlEvent = aInformation
self.aInformation = oth.aInformation
self.aURL = oth.aURL
self.Event = oth.Event
return
kargs = {
"aInformation": aInformation,
"aURL": aURL,
"Event": Event,
}
if kargs["aInformation"] is UNO_NONE:
kargs["aInformation"] = None
if kargs["aURL"] is UNO_NONE:
kargs["aURL"] = None
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._a_information = kwargs["aInformation"]
self._a_url = kwargs["aURL"]
self._event = kwargs["Event"]
@property
def aInformation(self) -> typing.Tuple[NamedValue_a37a0af3, ...]:
"""
specifies a sequence of named values which are used as additional values for the event.
The number and types of named values depend on the event.
"""
return self._a_information
@aInformation.setter
def aInformation(self, value: typing.Tuple[NamedValue_a37a0af3, ...]) -> None:
self._a_information = value
@property
def aURL(self) -> URL_57ad07b9:
"""
fully parsed URL describing the control that sends this notification.
"""
return self._a_url
@aURL.setter
def aURL(self, value: URL_57ad07b9) -> None:
self._a_url = value
@property
def Event(self) -> str:
"""
specifies the event which has occurred.
"""
return self._event
@Event.setter
def Event(self, value: str) -> None:
self._event = value
__all__ = ['ControlEvent']
| 31.076271
| 195
| 0.638942
|
16f1754e579b0d12de894b3d0751c0eabfdf8e6f
| 9,280
|
py
|
Python
|
Ryven/packages/auto_generated/ctypes.test.test_repr/nodes.py
|
tfroehlich82/Ryven
|
cb57c91d13949712844a4410a9302c4a90d28dcd
|
[
"MIT"
] | 2,872
|
2020-07-01T09:06:34.000Z
|
2022-03-31T05:52:32.000Z
|
Ryven/packages/auto_generated/ctypes.test.test_repr/nodes.py
|
dhf327/Ryven
|
a11e361528d982a9dd3c489dd536f8b05ffd56e1
|
[
"MIT"
] | 59
|
2020-06-28T12:50:50.000Z
|
2022-03-27T19:07:54.000Z
|
Ryven/packages/auto_generated/ctypes.test.test_repr/nodes.py
|
dhf327/Ryven
|
a11e361528d982a9dd3c489dd536f8b05ffd56e1
|
[
"MIT"
] | 339
|
2020-07-05T04:36:20.000Z
|
2022-03-24T07:25:18.000Z
|
from NENV import *
import ctypes.test.test_repr
class NodeBase(Node):
pass
class Array_Node(NodeBase):
"""
"""
title = 'ARRAY'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='len'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.ARRAY(self.input(0), self.input(1)))
class Cfunctype_Node(NodeBase):
"""
CFUNCTYPE(restype, *argtypes,
use_errno=False, use_last_error=False) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in different ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
"""
title = 'CFUNCTYPE'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.CFUNCTYPE(self.input(0)))
class Dllcanunloadnow_Node(NodeBase):
"""
"""
title = 'DllCanUnloadNow'
type_ = 'ctypes.test.test_repr'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.DllCanUnloadNow())
class Dllgetclassobject_Node(NodeBase):
"""
"""
title = 'DllGetClassObject'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='rclsid'),
NodeInputBP(label='riid'),
NodeInputBP(label='ppv'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.DllGetClassObject(self.input(0), self.input(1), self.input(2)))
class Pyfunctype_Node(NodeBase):
"""
"""
title = 'PYFUNCTYPE'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.PYFUNCTYPE(self.input(0)))
class Setpointertype_Node(NodeBase):
"""
"""
title = 'SetPointerType'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='pointer'),
NodeInputBP(label='cls'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.SetPointerType(self.input(0), self.input(1)))
class Winfunctype_Node(NodeBase):
"""
"""
title = 'WINFUNCTYPE'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.WINFUNCTYPE(self.input(0)))
class Winerror_Node(NodeBase):
"""
"""
title = 'WinError'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='code', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='descr', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.WinError(self.input(0), self.input(1)))
class _Calcsize_Node(NodeBase):
"""
Return size in bytes of the struct described by the format string."""
title = '_calcsize'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='format'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr._calcsize(self.input(0)))
class _Check_Size_Node(NodeBase):
"""
"""
title = '_check_size'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='typecode', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr._check_size(self.input(0), self.input(1)))
class _Reset_Cache_Node(NodeBase):
"""
"""
title = '_reset_cache'
type_ = 'ctypes.test.test_repr'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr._reset_cache())
class C_Buffer_Node(NodeBase):
"""
"""
title = 'c_buffer'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.c_buffer(self.input(0), self.input(1)))
class Cast_Node(NodeBase):
"""
"""
title = 'cast'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='obj'),
NodeInputBP(label='typ'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.cast(self.input(0), self.input(1)))
class Create_String_Buffer_Node(NodeBase):
"""
create_string_buffer(aBytes) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aBytes, anInteger) -> character array
"""
title = 'create_string_buffer'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.create_string_buffer(self.input(0), self.input(1)))
class Create_Unicode_Buffer_Node(NodeBase):
"""
create_unicode_buffer(aString) -> character array
create_unicode_buffer(anInteger) -> character array
create_unicode_buffer(aString, anInteger) -> character array
"""
title = 'create_unicode_buffer'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.create_unicode_buffer(self.input(0), self.input(1)))
class String_At_Node(NodeBase):
"""
string_at(addr[, size]) -> string
Return the string at addr."""
title = 'string_at'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.string_at(self.input(0), self.input(1)))
class Wstring_At_Node(NodeBase):
"""
wstring_at(addr[, size]) -> string
Return the string at addr."""
title = 'wstring_at'
type_ = 'ctypes.test.test_repr'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_repr.wstring_at(self.input(0), self.input(1)))
export_nodes(
Array_Node,
Cfunctype_Node,
Dllcanunloadnow_Node,
Dllgetclassobject_Node,
Pyfunctype_Node,
Setpointertype_Node,
Winfunctype_Node,
Winerror_Node,
_Calcsize_Node,
_Check_Size_Node,
_Reset_Cache_Node,
C_Buffer_Node,
Cast_Node,
Create_String_Buffer_Node,
Create_Unicode_Buffer_Node,
String_At_Node,
Wstring_At_Node,
)
| 24.879357
| 116
| 0.613901
|
15e8e7c26c629cbd9f1425f27c9b00a8ea3be94e
| 13,203
|
py
|
Python
|
scripts/community.py
|
communcom/commun.contracts
|
98aaa55d33b91af130013c82780253f39eae47e7
|
[
"MIT"
] | null | null | null |
scripts/community.py
|
communcom/commun.contracts
|
98aaa55d33b91af130013c82780253f39eae47e7
|
[
"MIT"
] | null | null | null |
scripts/community.py
|
communcom/commun.contracts
|
98aaa55d33b91af130013c82780253f39eae47e7
|
[
"MIT"
] | 1
|
2020-10-25T13:58:12.000Z
|
2020-10-25T13:58:12.000Z
|
from deployutils.testnet import *
from deployutils import log_action
from copy import deepcopy
def issueCommunToken(owner, quantity, clientKey, **kwargs):
pushAction('cyber.token', 'issue', 'c.issuer@issue', {
'to':'c.issuer',
'quantity':quantity,
'memo':'issue for '+owner
}, providebw='c.issuer/c@providebw', keys=clientKey, **kwargs)
pushAction('cyber.token', 'transfer', 'c.issuer@issue', {
'from':'c.issuer',
'to':owner,
'quantity':quantity,
'memo':'issue for '+owner
}, providebw='c.issuer/c@providebw', keys=clientKey, **kwargs)
def buyCommunityPoints(owner, quantity, community, ownerKey, clientKey, **kwargs):
issueCommunToken(owner, quantity, clientKey, **kwargs)
return pushAction('cyber.token', 'transfer', owner, {
'from':owner,
'to':'c.point',
'quantity':quantity,
'memo':community
}, providebw=owner+'/c@providebw', keys=[ownerKey, clientKey], **kwargs)
def createCommunityUser(community, creator, creatorKey, clientKey, *, buyPointsOn=None, leaderUrl=None):
(private, public) = createKey()
account = createRandomAccount(public, creator=creator, keys=creatorKey)
openBalance(account, community, creator, keys=creatorKey)
if buyPointsOn:
buyCommunityPoints(account, buyPointsOn, community, private, clientKey)
if leaderUrl:
regLeader(commun_code=community, leader=account, url=leaderUrl,
providebw=account+'/'+creator, keys=[private, creatorKey])
return (account, private)
def recover(account, active_key=None, owner_key=None, provider=None, **kwargs):
args = {'account':account}
if active_key: args['active_key'] = active_key
if owner_key: args['owner_key'] = owner_key
providebw = ['c.recover/'+provider] if provider else None
if active_key and provider: providebw.append(account+'/'+provider)
return pushAction('c.recover', 'recover', 'c.recover@recover', args, providebw=providebw, **kwargs)
def applyOwner(account, **kwargs):
return pushAction('c.recover', 'applyowner', account, {'account': account}, **kwargs)
def cancelOwner(account, **kwargs):
return pushAction('c.recover', 'cancelowner', account, {'account': account}, **kwargs)
def getUnusedPointSymbol():
while True:
point = ''.join(random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZ") for i in range(6))
if getPointParam(point) == None:
return point
def getPointParam(point):
param = mongoClient["_CYBERWAY_c_point"]["param"].find_one({"max_supply._sym":point})
if param:
param['max_supply'] = Asset.fromdb(param['max_supply'])
param['transfer_fee'] = int(param['transfer_fee'].to_decimal())
return param
def getPointStat(point):
stat = mongoClient["_CYBERWAY_c_point"]["stat"].find_one({"supply._sym":point})
if stat:
stat['supply'] = Asset.fromdb(stat['supply'])
stat['reserve'] = Asset.fromdb(stat['reserve'])
return stat
def getPointBalance(point, account):
res = mongoClient["_CYBERWAY_c_point"]["accounts"].find_one({"balance._sym":point,"_SERVICE_.scope":account})
if res:
res = Asset.fromdb(res['balance'])
return res
def transferPoints(sender, recipient, amount, memo='', **kwargs):
args = {'from':sender, 'to':recipient, 'quantity':amount, 'memo':memo}
return pushAction('c.point', 'transfer', sender, args, providebw=sender+'/c@providebw', **kwargs)
def lockPoints(owner, period, **kwargs):
args = {'owner':owner, 'period':period}
return pushAction('c.point', 'globallock', owner, args, providebw=owner+'/c@providebw', **kwargs)
def enableSafe(owner, unlock, delay, trusted="", **kwargs):
args = {'owner':owner, 'unlock':unlock, 'delay':delay, 'trusted':trusted}
return pushAction('c.point', 'enablesafe', owner, args, providebw=owner+'/c@providebw', **kwargs)
def disableSafe(owner, modId, point, signer=None, **kwargs):
actor = [owner, signer] if signer else owner
args = {'owner':owner, 'mod_id':modId, 'commun_code':point}
return pushAction('c.point', 'disablesafe', actor, args, providebw=owner+'/c@providebw', **kwargs)
def unlockSafe(owner, modId, unlock, signer=None, **kwargs):
actor = [owner, signer] if signer else owner
provide = [owner+'/c@providebw', signer+'/c@providebw'] if signer else owner+'/c@providebw'
args = {'owner':owner, 'mod_id':modId, 'unlock':unlock}
return pushAction('c.point', 'unlocksafe', actor, args, providebw=provide, **kwargs)
def lockSafe(owner, lock, **kwargs):
args = {'owner':owner, 'lock':lock}
return pushAction('c.point', 'locksafe', owner, args, providebw=owner+'/c@providebw', **kwargs)
def modifySafe(owner, modId, point, delay=None, trusted=None, signer=None, **kwargs):
actor = [owner, signer] if signer else owner
args = {'owner':owner, 'mod_id':modId, 'commun_code':point}
if delay is not None: args["delay"] = delay
if trusted is not None: args["trusted"] = trusted
return pushAction('c.point', 'modifysafe', actor, args, providebw=owner+'/c@providebw', **kwargs)
def applySafeMod(owner, modId, signer=None, **kwargs):
actor = [owner, signer] if signer else owner
args = {'owner':owner, 'mod_id':modId}
return pushAction('c.point', 'applysafemod', actor, args, providebw=owner+'/c@providebw', **kwargs)
def cancelSafeMod(owner, modId, **kwargs):
args = {'owner':owner, 'mod_id':modId}
return pushAction('c.point', 'cancelsafemod', owner, args, providebw=owner+'/c@providebw', **kwargs)
def getPointGlobalLock(account):
return mongoClient["_CYBERWAY_c_point"]["lock"].find_one({"_SERVICE_.scope":account})
def getPointSafe(point, account):
res = mongoClient["_CYBERWAY_c_point"]["safe"].find_one({"unlocked._sym":point,"_SERVICE_.scope":account})
if res:
res["unlocked"] = Asset.fromdb(res['unlocked'])
return res
def getPointSafeMod(point, account, modId):
return mongoClient["_CYBERWAY_c_point"]["safemod"].find_one({"id":modId,"commun_code":point,"_SERVICE_.scope":account})
def createCommunity(community_name, creator_auth, creator_key, maximum_supply, reserve_amount, *, cw=3333, fee=100, owner_account=None, output=False):
symbol = maximum_supply.symbol
initial_supply = Asset.fromstr(str(maximum_supply))
initial_supply.amount //= 1000
c = parseAuthority(creator_auth)
(creator_account, creator_permission) = (c['actor'],c['permission'])
creator_auth = '{acc}@{perm}'.format(acc=creator_account, perm=creator_permission)
if owner_account is None: owner_account = getRandomAccount()
with log_action("0. Create <Owner> account '{}'".format(owner_account)):
createAccount(creator_auth, owner_account, 'c@active', creator_auth,
providebw=creator_account+'/c@providebw', keys=creator_key, output=output)
trx = Trx()
for auth in ('lead.smajor', 'lead.major', 'lead.minor'):
trx.addAction('cyber', 'updateauth', owner_account, {
'account': owner_account,
'permission': auth,
'parent': 'active',
'auth': createAuthority([], ['c.ctrl@cyber.code'])})
trx.addAction('cyber', 'linkauth', owner_account, {
'account': owner_account,
'code': 'c.gallery',
'type': 'ban',
'requirement': 'lead.minor'})
trx.addAction('cyber', 'linkauth', owner_account, {
'account': owner_account,
'code': 'c.list',
'type': 'ban',
'requirement': 'lead.minor'})
trx.addAction('cyber', 'linkauth', owner_account, {
'account': owner_account,
'code': 'c.list',
'type': 'unban',
'requirement': 'lead.minor'})
trx.addAction('cyber', 'updateauth', owner_account, {
'account': owner_account,
'permission': 'transferperm',
'parent': 'active',
'auth': createAuthority([], ['c.emit@cyber.code'])})
trx.addAction('cyber', 'linkauth', owner_account, {
'account': owner_account,
'code': 'c.point',
'type': 'transfer',
'requirement': 'transferperm'})
trx.addAction('cyber', 'providebw', 'c@providebw', {
'provider': 'c',
'account': owner_account})
pushTrx(trx, keys=[creator_key], output=output)
with log_action('1. Buy some value of CMN tokens (for testing purposes c.issuer@issue)'):
trx = Trx()
trx.addAction('cyber.token', 'issue', 'c.issuer@issue', {
'to':'c.issuer',
'quantity':reserve_amount,
'memo':"Reserve for {c}".format(c=community_name)
})
trx.addAction('cyber.token', 'transfer', 'c.issuer@issue', {
'from':'c.issuer',
'to':owner_account,
'quantity':reserve_amount,
'memo':"Reserve for {c}".format(c=community_name)
})
trx.addAction('cyber', 'providebw', 'c@providebw', {
'provider': 'c',
'account': 'c.issuer'})
pushTrx(trx, keys=creator_key, output=output)
with log_action('2. Create community points'):
pushAction('c.point', 'create', 'c.point@clients', {
'issuer': owner_account,
'initial_supply': initial_supply,
'maximum_supply': maximum_supply,
'cw': cw,
'fee': fee
}, providebw='c.point/c@providebw', keys=creator_key, output=output)
with log_action('3. Restock CMN tokens for community points'):
transfer(owner_account, 'c.point', reserve_amount, 'restock: {code}'.format(code=symbol.code),
providebw=owner_account+'/c@providebw', keys=creator_key, output=output)
with log_action('4. Open point balance for c.gallery & c.ctrl'):
trx = Trx()
for acc in ('c.gallery', 'c.ctrl'):
trx.addAction('c.point', 'open', 'c@providebw', {
"owner": acc,
"commun_code": symbol.code,
"ram_payer": "c"
})
pushTrx(trx, keys=creator_key, output=output)
with log_action('5. Register community (c.list:create)'):
pushAction('c.list', 'create', 'c.list@clients', {
"commun_code": symbol.code,
"community_name": community_name
}, providebw=['c.list/c@providebw', 'c.emit/c@providebw', 'c.ctrl/c@providebw', 'c.gallery/c@providebw'],
keys=creator_key, output=output)
with log_action('6. Pass account to community'):
updateAuth(owner_account, 'active', 'owner', [], [owner_account+'@lead.smajor'],
providebw=owner_account+'/c@providebw', keys=creator_key, output=output)
return owner_account
def openBalance(owner, commun_code, payer, **kwargs):
return pushAction('c.point', 'open', payer, {
'owner':owner,
'commun_code':commun_code,
'ram_payer':payer
}, **kwargs)
def regLeader(commun_code, leader, url, **kwargs):
return pushAction('c.ctrl', 'regleader', leader, {
'commun_code': commun_code,
'leader': leader,
'url': url
}, **kwargs)
def voteLeader(commun_code, voter, leader, pct, **kwargs):
return pushAction('c.ctrl', 'voteleader', voter, {
'commun_code': commun_code,
'voter': parseAuthority(voter)['actor'],
'leader': leader,
'pct': pct
}, **kwargs)
def unvoteLeader(commun_code, voter, leader, **kwargs):
return pushAction('c.ctrl', 'unvotelead', voter, {
'commun_code': commun_code,
'voter': voter,
'leader': leader,
}, **kwargs)
def createPost(commun_code, author, permlink, category, header, body, **kwargs):
return pushAction('c.gallery', 'create', author, {
'commun_code':commun_code,
'message_id':{'author':author, 'permlink':permlink},
'parent_id':{'author':"", 'permlink':category},
'header':header,
'body':body,
'tags':[],
'metadata':''
}, **kwargs)
def upvotePost(commun_code, voter, author, permlink, **kwargs):
return pushAction('c.gallery', 'upvote', voter, {
'commun_code':commun_code,
'voter':voter,
'message_id':{'author':author, 'permlink':permlink}
}, **kwargs)
def downvotePost(commun_code, voter, author, permlink, **kwargs):
return pushAction('c.gallery', 'downvote', voter, {
'commun_code':commun_code,
'voter':voter,
'message_id':{'author':author, 'permlink':permlink}
}, **kwargs)
def unvotePost(commun_code, voter, author, permlink, **kwargs):
return pushAction('c.gallery', 'unvote', voter, {
'commun_code':commun_code,
'voter':voter,
'message_id':{'author':author, 'permlink':permlink}
}, **kwargs)
| 42.182109
| 150
| 0.613876
|
e92a12277358ecba1fa43e833301dc9a1efc44c2
| 7,574
|
py
|
Python
|
mogp_emulator/benchmarks/benchmark_pivot.py
|
EXAUQ/mogp-emulator
|
9d5772135498bdf5b95b44b4afb065c2c266f899
|
[
"MIT"
] | 21
|
2021-01-20T07:02:12.000Z
|
2022-03-30T21:09:04.000Z
|
mogp_emulator/benchmarks/benchmark_pivot.py
|
EXAUQ/mogp-emulator
|
9d5772135498bdf5b95b44b4afb065c2c266f899
|
[
"MIT"
] | 114
|
2019-04-25T14:53:11.000Z
|
2021-01-06T17:07:41.000Z
|
mogp_emulator/benchmarks/benchmark_pivot.py
|
EXAUQ/mogp-emulator
|
9d5772135498bdf5b95b44b4afb065c2c266f899
|
[
"MIT"
] | 8
|
2021-02-02T08:56:12.000Z
|
2022-02-15T10:03:15.000Z
|
'''This benchmark performs convergence tests using the pivoted
Cholesky routines applied to the 2D Branin function. Details of the 2D
Branin function can be found at
https://www.sfu.ca/~ssurjano/branin.html. The code samples the Branin
function using an increasing number of points, with a duplicate point
added to make the matrix singular. When pivoting is not used, the
algorithm is stabilized by adding a nugget term to the diagonal of the
covariance matrix. This degrades the performance of the emulator
globally, despite the fact that the problem arises from a local
problem in fitting the emulator. Pivoting ignores points that are too
close to one another, ensuring that there is no loss of performance as
the number of points increases.
Note that this benchmark only covers relatively small designs. Tests
have revealed that there are some stability issues when applying
pivoting to larger numbers of inputs -- this appears to be due to the
minimization algorithm, perhaps due to the fact that pivoting computes
the inverse of a slightly different matrix which may influence the
fitting algorithm performance. Care should thus be taken to examine
the resulting performance when applying pivoting in practice. Future
versions may implement other approaches to ensure that pivoting gives
stable performance on a wide variety of input data.
'''
import numpy as np
from mogp_emulator import GaussianProcess, fit_GP_MAP
from mogp_emulator import MonteCarloDesign, LatinHypercubeDesign
from scipy.stats import uniform
try:
import matplotlib.pyplot as plt
makeplots = True
except ImportError:
makeplots = False
def branin_2d(x):
"2D Branin function, see https://www.sfu.ca/~ssurjano/branin.html for more information"
if np.array(x).shape == (2,):
x1, x2 = x
else:
assert len(np.array(x).shape) == 2
assert np.array(x).shape[1] == 2
x1 = x[:,0]
x2 = x[:,1]
a, b, c, r, s, t = 1., 5.1/4./np.pi**2, 5./np.pi, 6., 10., 1./8./np.pi
return a*(x2 - b*x1**2 + c*x1 - r)**2 + s*(1. - t)*np.cos(x1) + s
f = branin_2d
n_dim = 2
design_space = [uniform(loc = -5., scale = 15.).ppf, uniform(loc = 0., scale = 15.).ppf]
simulations = [5, 10, 15, 20, 25, 30]
def generate_input_data(n_simulations, method = "random"):
"Generate random points x1 and x2 for evaluating the multivalued 2D Branin function"
n_simulations = int(n_simulations)
assert(n_simulations > 0)
assert method == "random" or method == "lhd"
if method == "random":
ed = MonteCarloDesign(design_space)
elif method == "lhd":
ed = LatinHypercubeDesign(design_space)
inputs = ed.sample(n_simulations)
return inputs
def generate_target_data(inputs):
"Generate target data for multivalued emulator benchmark"
inputs = np.array(inputs)
assert len(inputs.shape) == 2
assert inputs.shape[1] == n_dim
n_simulations = inputs.shape[0]
targets = f(inputs)
return targets
def generate_training_data(n_simulations):
"Generate n_simulations input data and add a duplicate point to make matrix singular"
inputs = generate_input_data(n_simulations, method = "lhd")
targets = generate_target_data(inputs)
inputs_new = np.zeros((inputs.shape[0] + 1, inputs.shape[1]))
targets_new = np.zeros(targets.shape[0] + 1)
inputs_new[:-1, :] = np.copy(inputs)
targets_new[:-1] = np.copy(targets)
inputs_new[-1,:] = np.copy(inputs[0,:])
targets_new[-1] = np.copy(targets[0])
return inputs_new, targets_new
def generate_test_data(n_testing):
"Generate n_testing points for testing the accuracy of an emulator"
testing = generate_input_data(n_testing, method = "random")
test_targets = generate_target_data(testing)
return testing, test_targets
def run_model(n_simulations, n_testing):
"Generate training data, fit emulator, and test model accuracy on random points, returning RMSE"
print('fitting GPs')
# run LHD model
inputs, targets = generate_training_data(n_simulations)
gp = GaussianProcess(inputs, targets, nugget="adaptive")
gp = fit_GP_MAP(gp)
print("fitting pivoted GP")
gp_pivot = GaussianProcess(inputs, targets, nugget="pivot")
gp_pivot = fit_GP_MAP(gp_pivot)
print("making predictions")
testing, test_targets = generate_test_data(n_testing)
norm_const = np.max(test_targets)-np.min(test_targets)
test_vals_pivot, unc_pivot, deriv = gp_pivot.predict(testing, deriv = False, unc = True)
test_vals, unc, deriv = gp.predict(testing, deriv = False, unc = True)
return (np.sqrt(np.sum((test_vals - test_targets)**2)/float(n_testing))/norm_const,
np.sqrt(np.sum(unc**2)/float(n_testing))/norm_const**2,
np.sqrt(np.sum((test_vals_pivot - test_targets)**2)/float(n_testing))/norm_const,
np.sqrt(np.sum(unc_pivot**2)/float(n_testing))/norm_const**2)
def plot_model_errors(simulation_list, error, unc, error_pivot, unc_pivot, n_testing):
"Makes plot showing accuracy of emulator as a function of n_simulations"
plt.figure(figsize=(4,3))
plt.semilogy(simulation_list, error_pivot,'-o', label = 'Pivot')
plt.semilogy(simulation_list, error,'-x', label = 'Nugget')
plt.xlabel('Number of design points')
plt.ylabel('Average prediction RMSE')
plt.legend()
plt.title('Error for '+str(n_testing)+' predictions')
plt.savefig('pivot_error.png',bbox_inches='tight')
plt.figure(figsize=(4,3))
plt.semilogy(simulation_list, unc_pivot,'-o', label = "Pivot")
plt.semilogy(simulation_list, unc,'-x', label = 'Nugget')
plt.xlabel('Number of design points')
plt.ylabel('Average prediction variance')
plt.legend()
plt.title('Uncertainty for '+str(n_testing)+' predictions')
plt.savefig('pivot_unc.png',bbox_inches='tight')
def run_all_models(n_testing, simulation_list, n_iter = 10):
"Runs all models, printing out results and optionally making plots"
n_simtrials = len(simulation_list)
errors = np.zeros((n_simtrials, n_iter))
uncs = np.zeros((n_simtrials, n_iter))
errors_pivot = np.zeros((n_simtrials, n_iter))
uncs_pivot = np.zeros((n_simtrials, n_iter))
for iteration in range(n_iter):
for sim_index in range(n_simtrials):
print(sim_index, iteration)
(errors[sim_index, iteration],
uncs[sim_index, iteration],
errors_pivot[sim_index, iteration],
uncs_pivot[sim_index, iteration]) = run_model(simulation_list[sim_index], n_testing)
error = np.mean(errors, axis = -1)
unc = np.mean(uncs, axis = -1)
error_pivot = np.mean(errors_pivot, axis = -1)
unc_pivot = np.mean(uncs_pivot, axis = -1)
print("\n")
print("Convergence test results:")
print("Num. design points RMSE Nugget RMSE Pivot")
for sim, err, pivot_err in zip(simulation_list, error, error_pivot):
print('{:19} {:19} {:19}'.format(str(sim), str(err), str(pivot_err)))
print("\n")
print("Num. design points Variance Nugget Variance Pivot")
for sim, un, pivot_un in zip(simulation_list, unc, unc_pivot):
print('{:19} {:19} {:19}'.format(str(sim), str(un), str(pivot_un)))
if makeplots:
plot_model_errors(simulation_list, error, unc, error_pivot, unc_pivot, n_testing)
if __name__ == '__main__':
run_all_models(100, [int(x) for x in simulations], n_iter = 10)
| 38.841026
| 100
| 0.687748
|
72bd2d3324384e9e36ab67f1c6e2f46a69b72575
| 3,663
|
py
|
Python
|
photo_rename/copy_metadata.py
|
eigenholser/jpeg_rename
|
bd498fcff87fa94fc29ce85bc9f6d8063bc12095
|
[
"MIT"
] | null | null | null |
photo_rename/copy_metadata.py
|
eigenholser/jpeg_rename
|
bd498fcff87fa94fc29ce85bc9f6d8063bc12095
|
[
"MIT"
] | null | null | null |
photo_rename/copy_metadata.py
|
eigenholser/jpeg_rename
|
bd498fcff87fa94fc29ce85bc9f6d8063bc12095
|
[
"MIT"
] | 1
|
2018-06-24T14:53:28.000Z
|
2018-06-24T14:53:28.000Z
|
import logging
import os
import re
import stat
import sys
import pyexiv2
import photo_rename
from photo_rename import FileMetadata, Harvester
from photo_rename.utils import CustomArgumentParser
logger = logging.getLogger(__name__)
def process_all_files(src_directory, dst_directory, simon_sez=None):
"""
Manage the entire process of gathering data and renaming files.
"""
error = False
if not os.path.exists(src_directory):
logger.error(
"Directory {0} does not exist. Exiting.".format(
src_directory))
error = True
if not os.access(dst_directory, os.W_OK):
logger.error(
"Destination directory {0} is not writable. Exiting.".format(
dst_directory))
error = True
if error:
logger.warn("Exiting due to errors.")
sys.exit(1)
harvester = Harvester(src_directory, metadata_dst_directory=dst_directory)
filemaps = harvester["filemaps"]
count = 0
for fm in filemaps.get():
count += 1
src_fmd = FileMetadata(os.path.join(src_directory, fm.src_fn))
if simon_sez:
logger.info(
"Copying metadata from {} ==> {}".format(
fm.src_fn, fm.dst_fn))
src_fmd.copy_metadata(os.path.join(dst_directory, fm.dst_fn))
else:
logger.info(
"DRY RUN: Copying metadata from {} ==> {}".format(
fm.src_fn, fm.dst_fn))
if count == 0:
logger.warn("No matching files found. Check src and dst.")
def main():
"""
Parse command-line arguments. Initiate file processing.
"""
parser = CustomArgumentParser()
parser.add_argument("-s", "--simon-sez",
help="Really, Simon sez copy the data!", action="store_true")
parser.add_argument("-r", "--src-directory",
help="Copy metadata from files in this directory.")
parser.add_argument("-d", "--dst-directory",
help="Copy metadata to matching files in this directory.")
parser.add_argument("-v", "--verbose", help="Log level to DEBUG.",
action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
error = False
# Require these two arguments.
for arg in [args.src_directory, args.dst_directory]:
if not arg:
logger.error(
"Required src or dst directory parameter missing.")
error = True
# XXX: Duplicates exit below. Can't check directory if null.
logger.error("Exiting due to errors.")
parser.usage_message()
sys.exit(1)
if (os.path.exists(args.src_directory) and
os.path.isdir(args.src_directory)):
src_directory = args.src_directory
else:
logger.error(
"--src-directory={} does not exist or is not a directory.".format(
args.dst_directory))
error = True
if (os.path.exists(args.dst_directory) and
os.path.isdir(args.dst_directory)):
dst_directory = args.dst_directory
else:
logger.error(
"--dst-directory={} does not exist or is not a directory.".format(
args.dst_directory))
error = True
if error:
logger.error("Exiting due to errors.")
parser.usage_message()
sys.exit(1)
else:
process_all_files(src_directory, dst_directory, simon_sez=args.simon_sez)
if __name__ == '__main__': # pragma: no cover
main()
| 31.307692
| 81
| 0.603877
|
f319bae6accce0c48a7049259aa2cd23c7657bb2
| 1,517
|
py
|
Python
|
truestory/datautil.py
|
savvybit/TrueStory
|
4b329ee82b07bdcb79947dded7e6d3ede02bf9e6
|
[
"MIT"
] | 2
|
2021-06-08T20:05:31.000Z
|
2021-12-16T10:41:32.000Z
|
truestory/datautil.py
|
savvybit/TrueStory
|
4b329ee82b07bdcb79947dded7e6d3ede02bf9e6
|
[
"MIT"
] | null | null | null |
truestory/datautil.py
|
savvybit/TrueStory
|
4b329ee82b07bdcb79947dded7e6d3ede02bf9e6
|
[
"MIT"
] | null | null | null |
"""Utilities playing with data files."""
import csv
import functools
import io
import json
import addict
import toml
import pkg_resources
from truestory import settings
LOADERS = {
"json": json.load,
"toml": toml.load,
"csv": csv.DictReader,
}
def get_string(path):
"""Returns string content from `path` file."""
content = pkg_resources.resource_string(settings.PROJECT_NAME, path)
return content.decode(settings.ENCODING).strip()
def get_stream(path, binary=True):
"""Returns a file-like object for the one pointed by `path`."""
stream = pkg_resources.resource_stream(settings.PROJECT_NAME, path)
if not binary:
stream = io.TextIOWrapper(stream)
return stream
def get_structured(path, *, file_type):
"""Get deserialized content within a JSON or Toml file.
Args:
path (str): Path to file on disk.
file_type (str): Choose between json/toml/csv.
Returns:
addict.Dict: Dictionary-like object containing the read data.
"""
assert file_type in LOADERS, "invalid file type for loading content"
stream = get_stream(path, binary=file_type in ("json",))
message_dict = LOADERS[file_type](stream)
if file_type == "csv":
message_dict = {"reader": message_dict}
return addict.Dict(message_dict)
get_json_data = functools.partial(get_structured, file_type="json")
get_toml_data = functools.partial(get_structured, file_type="toml")
get_csv_data = functools.partial(get_structured, file_type="csv")
| 26.155172
| 72
| 0.709295
|
578ed2e1d6a1bee6874e670bf4e05d56d15c22ce
| 931
|
py
|
Python
|
env/lib/python3.6/site-packages/altair/vegalite/v1/data.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 1,134
|
2015-09-19T05:38:36.000Z
|
2021-09-21T15:15:11.000Z
|
env/lib/python3.6/site-packages/altair/vegalite/v1/data.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 165
|
2015-09-19T05:09:33.000Z
|
2018-10-08T19:42:28.000Z
|
env/lib/python3.6/site-packages/altair/vegalite/v1/data.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 75
|
2015-09-19T03:30:25.000Z
|
2018-11-19T05:37:38.000Z
|
from ..data import (MaxRowsError, curry, default_data_transformer, limit_rows,
pipe, sample, to_csv, to_json, to_values, DataTransformerRegistry)
# ==============================================================================
# VegaLite 1 data transformers
# ==============================================================================
ENTRY_POINT_GROUP = 'altair.vegalite.v1.data_transformer' # type: str
data_transformers = DataTransformerRegistry(entry_point_group=ENTRY_POINT_GROUP) # type: DataTransformerRegistry
data_transformers.register('default', default_data_transformer)
data_transformers.register('json', to_json)
data_transformers.register('csv', to_csv)
data_transformers.enable('default')
__all__ = (
'MaxRowsError',
'curry',
'default_data_transformer',
'limit_rows',
'pipe',
'sample',
'to_csv',
'to_json',
'to_values',
'data_transformers'
)
| 29.09375
| 113
| 0.606874
|
fa388732e0a7a30d27ee5e4ef1e0737117d28bcf
| 5,725
|
py
|
Python
|
telethon/client/dialogs.py
|
justinjohnymathew/Telethon
|
fb40e7b50837d67fe5e8df27995a2c80bdc26296
|
[
"MIT"
] | null | null | null |
telethon/client/dialogs.py
|
justinjohnymathew/Telethon
|
fb40e7b50837d67fe5e8df27995a2c80bdc26296
|
[
"MIT"
] | null | null | null |
telethon/client/dialogs.py
|
justinjohnymathew/Telethon
|
fb40e7b50837d67fe5e8df27995a2c80bdc26296
|
[
"MIT"
] | 1
|
2018-09-05T14:59:27.000Z
|
2018-09-05T14:59:27.000Z
|
import itertools
from collections import UserList
from async_generator import async_generator, yield_
from .users import UserMethods
from .. import utils
from ..tl import types, functions, custom
class DialogMethods(UserMethods):
# region Public methods
@async_generator
async def iter_dialogs(
self, limit=None, *, offset_date=None, offset_id=0,
offset_peer=types.InputPeerEmpty(), ignore_migrated=False,
_total=None):
"""
Returns an iterator over the dialogs, yielding 'limit' at most.
Dialogs are the open "chats" or conversations with other people,
groups you have joined, or channels you are subscribed to.
Args:
limit (`int` | `None`):
How many dialogs to be retrieved as maximum. Can be set to
``None`` to retrieve all dialogs. Note that this may take
whole minutes if you have hundreds of dialogs, as Telegram
will tell the library to slow down through a
``FloodWaitError``.
offset_date (`datetime`, optional):
The offset date to be used.
offset_id (`int`, optional):
The message ID to be used as an offset.
offset_peer (:tl:`InputPeer`, optional):
The peer to be used as an offset.
ignore_migrated (`bool`, optional):
Whether :tl:`Chat` that have ``migrated_to`` a :tl:`Channel`
should be included or not. By default all the chats in your
dialogs are returned, but setting this to ``True`` will hide
them in the same way official applications do.
_total (`list`, optional):
A single-item list to pass the total parameter by reference.
Yields:
Instances of `telethon.tl.custom.dialog.Dialog`.
"""
limit = float('inf') if limit is None else int(limit)
if limit == 0:
if not _total:
return
# Special case, get a single dialog and determine count
dialogs = await self(functions.messages.GetDialogsRequest(
offset_date=offset_date,
offset_id=offset_id,
offset_peer=offset_peer,
limit=1
))
_total[0] = getattr(dialogs, 'count', len(dialogs.dialogs))
return
seen = set()
req = functions.messages.GetDialogsRequest(
offset_date=offset_date,
offset_id=offset_id,
offset_peer=offset_peer,
limit=0
)
while len(seen) < limit:
req.limit = min(limit - len(seen), 100)
r = await self(req)
if _total:
_total[0] = getattr(r, 'count', len(r.dialogs))
entities = {utils.get_peer_id(x): x
for x in itertools.chain(r.users, r.chats)}
messages = {m.id: custom.Message(self, m, entities, None)
for m in r.messages}
# Happens when there are pinned dialogs
if len(r.dialogs) > limit:
r.dialogs = r.dialogs[:limit]
for d in r.dialogs:
peer_id = utils.get_peer_id(d.peer)
if peer_id not in seen:
seen.add(peer_id)
cd = custom.Dialog(self, d, entities, messages)
if cd.dialog.pts:
self._channel_pts[cd.id] = cd.dialog.pts
if not ignore_migrated or getattr(
cd.entity, 'migrated_to', None) is None:
await yield_(cd)
if len(r.dialogs) < req.limit\
or not isinstance(r, types.messages.DialogsSlice):
# Less than we requested means we reached the end, or
# we didn't get a DialogsSlice which means we got all.
break
req.offset_date = r.messages[-1].date
req.offset_peer = entities[utils.get_peer_id(r.dialogs[-1].peer)]
if req.offset_id == r.messages[-1].id:
# In some very rare cases this will get stuck in an infinite
# loop, where the offsets will get reused over and over. If
# the new offset is the same as the one before, break already.
break
req.offset_id = r.messages[-1].id
req.exclude_pinned = True
async def get_dialogs(self, *args, **kwargs):
"""
Same as :meth:`iter_dialogs`, but returns a list instead
with an additional ``.total`` attribute on the list.
"""
total = [0]
kwargs['_total'] = total
dialogs = UserList()
async for x in self.iter_dialogs(*args, **kwargs):
dialogs.append(x)
dialogs.total = total[0]
return dialogs
@async_generator
async def iter_drafts(self):
"""
Iterator over all open draft messages.
Instances of `telethon.tl.custom.draft.Draft` are yielded.
You can call `telethon.tl.custom.draft.Draft.set_message`
to change the message or `telethon.tl.custom.draft.Draft.delete`
among other things.
"""
r = await self(functions.messages.GetAllDraftsRequest())
for update in r.updates:
await yield_(custom.Draft._from_update(self, update))
async def get_drafts(self):
"""
Same as :meth:`iter_drafts`, but returns a list instead.
"""
result = []
async for x in self.iter_drafts():
result.append(x)
return result
# endregion
| 36.698718
| 78
| 0.559301
|
225b90bdaab50a8bb49152deb0d9af8f7e77e741
| 3,771
|
py
|
Python
|
setup.py
|
tonysimpson/Ni
|
e1fd80852538d7d4b918aa929e48eba8bf503b5b
|
[
"MIT"
] | 1
|
2020-04-21T21:40:41.000Z
|
2020-04-21T21:40:41.000Z
|
setup.py
|
tonysimpson/Ni
|
e1fd80852538d7d4b918aa929e48eba8bf503b5b
|
[
"MIT"
] | null | null | null |
setup.py
|
tonysimpson/Ni
|
e1fd80852538d7d4b918aa929e48eba8bf503b5b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
"""
Ni aims to provide a module that just makes Python faster.
Ni is based on a fork of Armin Rigo's Psyco, Armin and the other Psyco
developers moved onto PyPy so go check that out if you want a high performance
implementation of Python (http://pypy.org).
"""
from __future__ import print_function
import os, sys
import glob
from distutils.core import setup
from distutils.extension import Extension
####################################################################
# Override defaults using environment variables e.g. dev mode
# install with debug trace points:
# > NI_TRACE=1 pip install -e .
#
# If the following is set to 1, Psyco is compiled by #including all .c
# files into ni.c.
# It provides a version of _psyco.so whose only exported (non-static)
# symbol is init_psyco(). It also seems that the GDB debugger doesn't locate
# too well non-static symbols in shared libraries. Recompiling after minor
# changes is faster if ALL_STATIC=0.
ALL_STATIC = int(os.environ.get('NI_ALL_STATIC', 0))
# Enable debugger trace points and compiler with debug options by
# setting to 1
NI_TRACE = int(os.environ.get('NI_TRACE', 0))
# Extra checks enable with 1
ALL_CHECKS = int(os.environ.get('NI_ALL_CHECKS', 0))
####################################################################
macros = [
('ALL_STATIC', str(ALL_STATIC)),
('NI_TRACE', str(NI_TRACE)),
('ALL_CHECKS', str(ALL_CHECKS)),
]
class ProcessorAutodetectError(Exception):
pass
def autodetect():
platform = sys.platform.lower()
if platform.startswith('win'): # assume an Intel Windows
mach = 'win'
else:
mach = os.popen('uname -m', 'r').read().strip()
if not mach:
raise ProcessorAutodetectError("cannot run 'uname -m'")
try:
return {
'x86_64': 'x64',
}[mach]
except KeyError:
raise ProcessorAutodetectError("unsupported processor '%s'" % mach)
PROCESSOR = autodetect()
def find_sources(processor, all_static):
if all_static:
return ['./ni/ni.c']
else:
result = glob.glob('./ni/*.c')
result += glob.glob('./ni/Python/*.c')
result += glob.glob('./ni/Modules/*.c')
result += glob.glob('./ni/Objects/*.c')
result += glob.glob('./ni/%s/*.c' % (processor,))
return result
if NI_TRACE:
extra_compile_args = ['-O0', '-g3', '-Wall', '-fno-stack-protector']
else:
extra_compile_args = ['-O3']
extra_link_args = []
sources = find_sources(PROCESSOR, ALL_STATIC==1)
processor_dir = os.path.join('./ni', PROCESSOR)
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: C',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Interpreters',
]
setup(
name = "ni",
version = "0.1a2",
description = "Plugin JIT for CPython",
maintainer = "Tony Simpson",
maintainer_email = "agjasimpson@gmail.com",
url = "http://github.com/tonysimpson/ni",
license = "MIT License",
long_description = __doc__,
packages = ['ni'],
ext_modules=[Extension(name = 'ni',
sources = sources,
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
define_macros = macros,
debug = True,
include_dirs = [processor_dir],
libraries = ['ffi'],
)],
classifiers=CLASSIFIERS,
)
| 32.508621
| 78
| 0.596924
|
d4f47e8a37f113531f6f718b2275ccf9d42a5e2b
| 534
|
py
|
Python
|
push_me_out/apps/notifier/serializers.py
|
dev-prakhar/push-me-out-server
|
bd5fe94e1f08663fb4059ec2b5d6b4dea5ea2c58
|
[
"MIT"
] | null | null | null |
push_me_out/apps/notifier/serializers.py
|
dev-prakhar/push-me-out-server
|
bd5fe94e1f08663fb4059ec2b5d6b4dea5ea2c58
|
[
"MIT"
] | null | null | null |
push_me_out/apps/notifier/serializers.py
|
dev-prakhar/push-me-out-server
|
bd5fe94e1f08663fb4059ec2b5d6b4dea5ea2c58
|
[
"MIT"
] | 1
|
2020-12-17T10:16:08.000Z
|
2020-12-17T10:16:08.000Z
|
from rest_framework import serializers
from apps.notifier.models import Subscriber, NotificationType
class SubscriberSerializer(serializers.ModelSerializer):
class Meta:
model = Subscriber
fields = ('id', 'service_endpoint', 'p256dh', 'auth')
extra_kwargs = {
'p256dh': {'write_only': True},
'auth': {'write_only': True},
}
class NotificationTypeSerializer(serializers.ModelSerializer):
class Meta:
model = NotificationType
fields = ('id', 'name', )
| 28.105263
| 62
| 0.649813
|
0a717f699f2aaa6d5e9dfec9c25be7096f65d9fd
| 1,020
|
py
|
Python
|
tests/test_utils.py
|
confuzeus/time-attack
|
d76e4eeb0d7561302d789b701665477f01ca4a68
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
confuzeus/time-attack
|
d76e4eeb0d7561302d789b701665477f01ca4a68
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
confuzeus/time-attack
|
d76e4eeb0d7561302d789b701665477f01ca4a68
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from time_attack.utils import yes_or_no
class TestUtils(TestCase):
def test_yes_or_no(self):
msg = "abcd"
expected_msg = "abcd (y/n) "
with patch("builtins.input") as mock_input:
mock_input.return_value = "y"
answer = yes_or_no(msg)
mock_input.assert_called_with(expected_msg)
self.assertTrue(answer)
mock_input.return_value = "Y"
answer = yes_or_no(msg)
self.assertTrue(answer)
mock_input.return_value = "n"
answer = yes_or_no(msg)
self.assertFalse(answer)
mock_input.return_value = "N"
answer = yes_or_no(msg)
self.assertFalse(answer)
mock_input.reset_mock()
mock_input.side_effect = ("Nein", "y")
answer = yes_or_no(msg)
self.assertEqual(mock_input.call_count, 2)
self.assertTrue(answer)
| 20.4
| 55
| 0.584314
|
605140f91cc829cba9587d6368c9662f2be82375
| 3,777
|
py
|
Python
|
acl18/io.py
|
tzshi/mh4-parser-acl18
|
20234703d6a2824fb6f76d45499db0a6ec6e27fb
|
[
"MIT"
] | 4
|
2018-08-22T13:54:38.000Z
|
2021-05-16T16:22:36.000Z
|
acl18/io.py
|
tzshi/mh4-parser-acl18
|
20234703d6a2824fb6f76d45499db0a6ec6e27fb
|
[
"MIT"
] | null | null | null |
acl18/io.py
|
tzshi/mh4-parser-acl18
|
20234703d6a2824fb6f76d45499db0a6ec6e27fb
|
[
"MIT"
] | 1
|
2019-05-19T16:59:20.000Z
|
2019-05-19T16:59:20.000Z
|
#!/usr/bin/env python
# encoding: utf-8
from .const import ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC
from .graph import DependencyGraph, Word
def read_conll(filename):
def get_word(columns):
return Word(columns[FORM], columns[UPOS], lemma=columns[LEMMA], xpos=columns[XPOS], feats=columns[FEATS], misc=columns[MISC])
def get_graph(graphs, words, tokens, edges, comments):
graph = DependencyGraph(words, tokens)
for (h, d, r) in edges:
graph.attach(h, d, r)
graph.comments = comments
graphs.append(graph)
file = open(filename, "r", encoding="utf-8")
graphs = []
words = []
tokens = []
edges = []
comments = []
sentence_start = False
while True:
line = file.readline()
if not line:
if len(words) > 0:
get_graph(graphs, words, tokens, edges, comments)
words, tokens, edges, comments = [], [], [], []
break
line = line.rstrip("\r\n")
# Handle sentence start boundaries
if not sentence_start:
# Skip comments
if line.startswith("#"):
comments.append(line)
continue
# Start a new sentence
sentence_start = True
if not line:
sentence_start = False
if len(words) > 0:
get_graph(graphs, words, tokens, edges, comments)
words, tokens, edges, comments = [], [], [], []
continue
# Read next token/word
columns = line.split("\t")
# Skip empty nodes
if "." in columns[ID]:
continue
# Handle multi-word tokens to save word(s)
if "-" in columns[ID]:
start, end = map(int, columns[ID].split("-"))
tokens.append((start, end + 1, columns[FORM]))
for _ in range(start, end + 1):
word_line = file.readline().rstrip("\r\n")
word_columns = word_line.split("\t")
words.append(get_word(word_columns))
if word_columns[HEAD].isdigit():
head = int(word_columns[HEAD])
else:
head = -1
edges.append((head, int(word_columns[ID]), word_columns[DEPREL].split(":")[0]))
# Basic tokens/words
else:
words.append(get_word(columns))
if columns[HEAD].isdigit():
head = int(columns[HEAD])
else:
head = -1
edges.append((head, int(columns[ID]), columns[DEPREL].split(":")[0]))
file.close()
return graphs
def write_conll(filename, graphs, append=False):
if append:
file = open(filename, "a", encoding="utf-8")
else:
file = open(filename, "w", encoding="utf-8")
for j in range(len(graphs)):
graph = graphs[j]
curtoken = 0
for i in range(1, len(graph.nodes)):
if curtoken < len(graph.tokens) and i == graph.tokens[curtoken][0]:
file.write("{}-{}\t{}\t_\t_\t_\t_\t_\t_\t_\t_\n".format(graph.tokens[curtoken][0], graph.tokens[curtoken][1] - 1, graph.tokens[curtoken][2]))
curtoken += 1
file.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t_\t{}\n".format(
i, graph.nodes[i].word, graph.nodes[i].lemma, graph.nodes[i].upos, graph.nodes[i].xpos,
graph.nodes[i].feats, graph.heads[i], graph.rels[i], graph.nodes[i].misc))
file.write("\n")
file.close()
def read_text(filename):
file = open(filename, "r")
text = file.read()
file.close()
documents = text.split("\n\n")
ret = [" ".join(x.split("\n")).strip() for x in documents]
return ret
| 31.739496
| 157
| 0.530845
|
845d086e0175ac3e128640fe5dd42e06f5959913
| 2,059
|
py
|
Python
|
examples/gmm/sine/vi_gmm.py
|
pnickl/mimo
|
81c4bbd2594e2136445009eae752ab8a1602a1cf
|
[
"MIT"
] | 3
|
2020-05-19T12:01:48.000Z
|
2020-10-15T11:51:37.000Z
|
examples/gmm/sine/vi_gmm.py
|
pnickl/mimo
|
81c4bbd2594e2136445009eae752ab8a1602a1cf
|
[
"MIT"
] | null | null | null |
examples/gmm/sine/vi_gmm.py
|
pnickl/mimo
|
81c4bbd2594e2136445009eae752ab8a1602a1cf
|
[
"MIT"
] | null | null | null |
import copy
import operator
import numpy as np
import numpy.random as npr
from matplotlib import pyplot as plt
from mimo.distributions import Dirichlet
from mimo.distributions import CategoricalWithDirichlet
from mimo.distributions import NormalWishart
from mimo.distributions import GaussianWithNormalWishart
from mimo.mixtures import BayesianMixtureOfGaussians
from mimo.util.text import progprint_xrange
npr.seed(1337)
nb_samples = 2500
data = np.zeros((nb_samples, 2))
step = 14. * np.pi / nb_samples
for i in range(data.shape[0]):
x = i * step - 6.
data[i, 0] = x + npr.normal(0, 0.1)
data[i, 1] = 3. * (np.sin(x) + npr.normal(0, .1))
plt.figure()
plt.plot(data[:, 0], data[:, 1], 'kx')
plt.title('data')
nb_models = 25
gating_hypparams = dict(K=nb_models, alphas=np.ones((nb_models, )))
gating_prior = Dirichlet(**gating_hypparams)
components_hypparams = dict(mu=np.zeros((2, )), kappa=0.01,
psi=np.eye(2), nu=3)
components_prior = NormalWishart(**components_hypparams)
gmm = BayesianMixtureOfGaussians(gating=CategoricalWithDirichlet(gating_prior),
components=[GaussianWithNormalWishart(components_prior)
for _ in range(nb_models)])
gmm.add_data(data)
allscores = []
allmodels = []
for superitr in range(5):
# Gibbs sampling to wander around the posterior
print('Gibbs Sampling')
for _ in progprint_xrange(25):
gmm.resample()
# mean field to lock onto a mode
print('Mean Field')
gmm.resample() # sample once to initialize posterior
scores = [gmm.meanfield_update() for _ in progprint_xrange(100)]
allscores.append(scores)
allmodels.append(copy.deepcopy(gmm))
plt.figure()
plt.title('model vlb scores vs iteration')
for scores in allscores:
plt.plot(scores)
models_and_scores = sorted([(m, s[-1]) for m, s in zip(allmodels, allscores)],
key=operator.itemgetter(1), reverse=True)
plt.figure()
plt.title('best model')
gmm.plot()
plt.show()
| 26.74026
| 88
| 0.682856
|
ac16bf3fe447908a896b7bf2e8611e1f09de6309
| 12,475
|
py
|
Python
|
notification_integration/utilities/service_handler_test.py
|
GoogleCloudPlatform/cloud-alerting-notification-forwarding
|
1bd47043de723c92dee3f49ddf4bbe226ba5d106
|
[
"Apache-2.0"
] | 3
|
2021-12-08T02:59:28.000Z
|
2022-02-04T01:14:54.000Z
|
notification_integration/utilities/service_handler_test.py
|
GoogleCloudPlatform/cloud-alerting-notification-forwarding
|
1bd47043de723c92dee3f49ddf4bbe226ba5d106
|
[
"Apache-2.0"
] | null | null | null |
notification_integration/utilities/service_handler_test.py
|
GoogleCloudPlatform/cloud-alerting-notification-forwarding
|
1bd47043de723c92dee3f49ddf4bbe226ba5d106
|
[
"Apache-2.0"
] | 1
|
2021-12-25T17:32:04.000Z
|
2021-12-25T17:32:04.000Z
|
# Copyright 2021 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for service_handler.py."""
import copy
import json
import httplib2
import unittest
from unittest.mock import Mock
from utilities import service_handler
# A valid config map used in the tests.
_SERVICE_NAME = 'google_chat'
_HTTP_METHOD = 'POST'
_CONFIG_PARAMS = {'service_name': _SERVICE_NAME,
'webhook_url': 'https://chat.123.com',
'msg_format': 'card'
}
_BAD_CONFIG_PARAMS = [
{'service': _SERVICE_NAME}, # Bad service name key
{'service_name': 'wrong_xxx'}, # Bad service name value
{'service_name': 'google_chat', 'url': '123.com'}, # Bad url key
{'service_name': 'google_chat', 'webhook_url': 123}, # Bad url value
{'service_name': 'google_chat', 'webhook_url': '123.com', 'format': 'card'}, # Bad format key
{'service_name': 'google_chat', 'webhook_url': '123.com', 'msg_format': 'video'}, # Bad format value
]
# Test notification json object.
_NOTIF = {
"incident": {
"condition": {
"conditionThreshold": {
"aggregations": [
{
"alignmentPeriod": "60s",
"crossSeriesReducer": "REDUCE_SUM",
"perSeriesAligner": "ALIGN_SUM"
}
],
"comparison": "COMPARISON_GT",
"duration": "60s",
"filter": "metric.type=\"compute.googleapis.com/instance/cpu/usage_time\" AND resource.type=\"gce_instance\"",
"trigger": {
"count": 1
}
},
"displayName": "test condition",
"name": "projects/tf-test/alertPolicies/3528831492076541324/conditions/3528831492076543949"
},
"condition_name": "test condition",
"ended_at": 1621359336,
"incident_id": "0.m2d61b3s6d5d",
"metric": {
"displayName": "CPU usage",
"type": "compute.googleapis.com/instance/cpu/usage_time"
},
"policy_name": "test Alert Policy",
"resource": {
"labels": {
"project_id": "tf-test"
},
"type": "gce_instance"
},
"resource_id": "",
"resource_name": "tf-test VM Instance labels {project_id=tf-test}",
"resource_type_display_name": "VM Instance",
"started_at": 1620754533,
"state": "closed",
"summary": "CPU usage for tf-test VM Instance labels {project_id=tf-test} returned to normal with a value of 0.081.",
"url": "https://console.cloud.google.com/monitoring/alerting/incidents/0.m2d61b3s6d5d?project=tf-test"
},
"version": "1.2"
}
class ServiceHandlerTest(unittest.TestCase):
def testAbstactServiceHandlerCanNotBeInitialized(self):
with self.assertRaises(TypeError):
service_handler.ServiceHandler(_SERVICE_NAME) # pylint: disable=abstract-class-instantiated
class HttpRequestBasedHandlerTest(unittest.TestCase):
def testAbstactclassHttpRequestBasedHandlerCanNotBeInitialized(self):
with self.assertRaises(TypeError):
service_handler.HttpRequestBasedHandler(_SERVICE_NAME, _HTTP_METHOD) # pylint: disable=abstract-class-instantiated
class GchatHandlerTest(unittest.TestCase):
def setUp(self):
# To mock the GCS blob returned by bucket.get_blob.
self._http_obj_mock = Mock()
self._http_mock = Mock(return_value=self._http_obj_mock)
httplib2.Http = self._http_mock
def testCheckServiceNameInConfigParamsFailed(self):
handler = service_handler.GchatHandler()
bad_configs = [
{'service': _SERVICE_NAME}, # Bad key
{'service_name': 'wrong_xxx'} # Bad value
]
for bad_config in bad_configs:
with self.assertRaises(service_handler.ConfigParamsError):
handler.CheckServiceNameInConfigParams(bad_config)
def testCheckConfigParamsFailed(self):
handler = service_handler.GchatHandler()
for bad_config in _BAD_CONFIG_PARAMS:
with self.assertRaises(service_handler.ConfigParamsError):
handler.CheckConfigParams(bad_config)
def testSendNotificationFailedDueToBadConfig(self):
handler = service_handler.GchatHandler()
for bad_config in _BAD_CONFIG_PARAMS:
_, status_code = handler.SendNotification(bad_config, _NOTIF)
self.assertEqual(status_code, 400)
def testSendNotificationFailedDueToUnexpectedCheckConfigParamsException(self):
handler = service_handler.GchatHandler()
# Set the config_param to None to cause exception.
_, status_code = handler.SendNotification(None, _NOTIF)
self.assertEqual(status_code, 500)
def testSendNotificationFormatTextFailedDuetoException(self):
handler = service_handler.GchatHandler()
config_params = _CONFIG_PARAMS.copy()
self._http_obj_mock.request.side_effect = Exception('unknown exception')
config_params['msg_format'] = 'text'
_, status_code = handler.SendNotification(config_params, _NOTIF)
self.assertEqual(status_code, 400)
self._http_obj_mock.request.assert_called_once()
def testSendNotificationFormatTextSucceed(self):
handler = service_handler.GchatHandler()
config_params = _CONFIG_PARAMS.copy()
config_params['msg_format'] = 'text'
self._http_obj_mock.request.return_value = httplib2.Response({'status': 200}), b'OK'
_, status_code = handler.SendNotification(config_params, _NOTIF)
self.assertEqual(status_code, 200)
expected_body = (
'{"text": "{\\"incident\\": {\\"condition\\": {\\"conditionThreshold\\": '
'{\\"aggregations\\": [{\\"alignmentPeriod\\": \\"60s\\", '
'\\"crossSeriesReducer\\": \\"REDUCE_SUM\\", \\"perSeriesAligner\\":'
' \\"ALIGN_SUM\\"}], \\"comparison\\": \\"COMPARISON_GT\\", '
'\\"duration\\": \\"60s\\", \\"filter\\": \\"metric.type=\\\\\\'
'"compute.googleapis.com/instance/cpu/usage_time\\\\\\" AND '
'resource.type=\\\\\\"gce_instance\\\\\\"\\", \\"trigger\\": '
'{\\"count\\": 1}}, \\"displayName\\": \\"test condition\\", \\"name\\":'
' \\"projects/tf-test/alertPolicies/3528831492076541324/conditions/'
'3528831492076543949\\"}, \\"condition_name\\": \\"test condition\\",'
' \\"ended_at\\": 1621359336, \\"incident_id\\": \\"0.m2d61b3s6d5d\\",'
' \\"metric\\": {\\"displayName\\": \\"CPU usage\\", \\"type\\": '
'\\"compute.googleapis.com/instance/cpu/usage_time\\"}, \\"policy_name\\":'
' \\"test Alert Policy\\", \\"resource\\": {\\"labels\\": '
'{\\"project_id\\": \\"tf-test\\"}, \\"type\\": \\"gce_instance\\"}, '
'\\"resource_id\\": \\"\\", \\"resource_name\\": \\"tf-test VM Instance '
'labels {project_id=tf-test}\\", \\"resource_type_display_name\\": \\"VM '
'Instance\\", \\"started_at\\": 1620754533, \\"state\\": \\"closed\\", '
'\\"summary\\": \\"CPU usage for tf-test VM Instance labels '
'{project_id=tf-test} returned to normal with a value of 0.081.\\", '
'\\"url\\": \\"https://console.cloud.google.com/monitoring/alerting/'
'incidents/0.m2d61b3s6d5d?project=tf-test\\"}, \\"version\\": \\"1.2\\"}"}'
)
self._http_obj_mock.request.assert_called_once_with(
uri='https://chat.123.com',
method='POST',
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=expected_body,
)
def testSendNotificationFormatCardSucceed(self):
handler = service_handler.GchatHandler()
self._http_obj_mock.request.return_value = httplib2.Response({'status': 200}), b'OK'
http_response, status_code = handler.SendNotification(_CONFIG_PARAMS, _NOTIF)
self.assertEqual(status_code, 200)
self.assertEqual(http_response, 'OK')
expected_body = (
'{"cards": [{"sections": [{"widgets": [{"textParagraph": {"text":'
' "<b><font color=\\"#0000FF\\">Summary:</font></b> CPU usage for '
'tf-test VM Instance labels {project_id=tf-test} returned to normal'
' with a value of 0.081., <br><b><font color=\\"#0000FF\\">'
'State:</font></b> closed"}}, {"textParagraph": {"text": '
'"<b>Condition Display Name:</b> test condition <br><b>Start '
'at:</b> 2021-05-11 17:35:33 (UTC)<br><b>Incident Labels:</b> '
'{\'project_id\': \'tf-test\'}"}}, {"buttons": [{"textButton": '
'{"text": "View Incident Details", "onClick": {"openLink": '
'{"url": "https://console.cloud.google.com/monitoring/alerting/'
'incidents/0.m2d61b3s6d5d?project=tf-test"}}}}]}]}]}]}'
)
self._http_obj_mock.request.assert_called_once_with(
uri='https://chat.123.com',
method='POST',
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=expected_body,
)
def testSendNotificationFormatTextNon200Status(self):
handler = service_handler.GchatHandler()
config_params = _CONFIG_PARAMS.copy()
self._http_obj_mock.request.return_value = httplib2.Response({'status': 500}), b'Server error'
config_params['msg_format'] = 'text'
http_response, status_code = handler.SendNotification(config_params, _NOTIF)
self.assertEqual(status_code, 500)
self.assertEqual(http_response, 'Server error')
self._http_obj_mock.request.assert_called_once()
def testSendNotificationFormatCardFailedDueToMissingField(self):
missing_fields = ['condition', 'resource', 'url', 'state', 'summary']
handler = service_handler.GchatHandler()
self._http_obj_mock.request.return_value = httplib2.Response({'status': 200}), b'OK'
for missing_field in missing_fields:
notif = copy.deepcopy(_NOTIF)
del notif['incident'][missing_field]
_, status_code = handler.SendNotification(_CONFIG_PARAMS, notif)
self.assertEqual(status_code, 400)
def testSendNotificationFormatCardStartedAtMissing(self):
handler = service_handler.GchatHandler()
notif_without_startime = copy.deepcopy(_NOTIF)
del notif_without_startime['incident']['started_at']
self._http_obj_mock.request.return_value = httplib2.Response({'status': 200}), b'OK'
_, status_code = handler.SendNotification(_CONFIG_PARAMS, notif_without_startime)
self.assertEqual(status_code, 200)
expected_body = (
'{"cards": [{"sections": [{"widgets": [{"textParagraph": {"text":'
' "<b><font color=\\"#0000FF\\">Summary:</font></b> CPU usage for '
'tf-test VM Instance labels {project_id=tf-test} returned to normal'
' with a value of 0.081., <br><b><font color=\\"#0000FF\\">'
'State:</font></b> closed"}}, {"textParagraph": {"text": '
'"<b>Condition Display Name:</b> test condition <br><b>Start '
'at:</b> <br><b>Incident Labels:</b> '
'{\'project_id\': \'tf-test\'}"}}, {"buttons": [{"textButton": '
'{"text": "View Incident Details", "onClick": {"openLink": '
'{"url": "https://console.cloud.google.com/monitoring/alerting/'
'incidents/0.m2d61b3s6d5d?project=tf-test"}}}}]}]}]}]}'
)
self._http_obj_mock.request.assert_called_once_with(
uri='https://chat.123.com',
method='POST',
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=expected_body,
)
if __name__ == '__main__':
unittest.main()
| 48.352713
| 128
| 0.611703
|
0a44efe7625cf61a5fd91e20812d25065e30dc7d
| 498
|
py
|
Python
|
mainapp/migrations/0055_rescuecamp_facilities_available.py
|
reyasmohammed/rescuekerala
|
68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c
|
[
"MIT"
] | 1
|
2018-09-22T21:08:38.000Z
|
2018-09-22T21:08:38.000Z
|
mainapp/migrations/0055_rescuecamp_facilities_available.py
|
reyasmohammed/rescuekerala
|
68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c
|
[
"MIT"
] | 1
|
2018-08-23T11:38:02.000Z
|
2018-08-23T11:38:02.000Z
|
mainapp/migrations/0055_rescuecamp_facilities_available.py
|
reyasmohammed/rescuekerala
|
68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c
|
[
"MIT"
] | 5
|
2019-11-07T11:34:56.000Z
|
2019-11-07T11:36:00.000Z
|
# Generated by Django 2.1 on 2018-08-19 08:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0054_auto_20180819_1211'),
]
operations = [
migrations.AddField(
model_name='rescuecamp',
name='facilities_available',
field=models.TextField(blank=True, null=True, verbose_name='Facilities existing, (light, kitchen, toilets), etc - ലഭ്യമായ സൗകര്യങ്ങൾ '),
),
]
| 26.210526
| 148
| 0.63253
|
9f85188b3513563a7444f7a0e908f11af985498b
| 18,179
|
py
|
Python
|
tensorflow/python/ops/math_ops_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 13
|
2018-07-23T18:53:35.000Z
|
2021-11-18T19:56:45.000Z
|
tensorflow/python/ops/math_ops_test.py
|
Iorizy2/tensorflow
|
10878ede0688fbf6d720bef1d132a1537f9ea72c
|
[
"Apache-2.0"
] | 1
|
2018-03-28T23:47:43.000Z
|
2018-03-28T23:47:43.000Z
|
tensorflow/python/ops/math_ops_test.py
|
Iorizy2/tensorflow
|
10878ede0688fbf6d720bef1d132a1537f9ea72c
|
[
"Apache-2.0"
] | 13
|
2018-09-07T13:28:38.000Z
|
2020-07-17T15:06:24.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
@test_util.with_c_api
class ReduceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
y_tf = self.evaluate(math_ops.reduce_sum(x))
self.assertEqual(y_tf, 21)
@test_util.run_in_graph_and_eager_modes()
def testReduceExplicitAxes(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
for axis in (0, -2, (0, 0), (0, -2)):
self.assertAllEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)),
[5, 7, 9])
for axis in (1, -1, (1, 1), (1, -1)):
self.assertAllEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)),
[6, 15])
for axis in (None, (0, 1), (-1, -2), (-2, -1, 0, 1)):
self.assertEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)), 21)
@test_util.run_in_graph_and_eager_modes()
def testReduceInvalidAxis(self):
if context.executing_eagerly():
# The shape check is in run a graph construction time. In eager mode,
# it misses the check, magically return result given wrong shape.
return
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
axis = np.array([[0], [1]])
with self.assertRaisesRegexp(ValueError, "must be at most rank 1"):
math_ops.reduce_sum(x, axis)
@test_util.with_c_api
class LogSumExpTest(test_util.TensorFlowTestCase):
def testReduceLogSumExp(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np).eval()
y_np = log(np.sum(exp(x_np)))
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=[0])
y_np = log(np.sum(exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = y_tf.eval()
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices2(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=0)
y_np = log(np.sum(exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = y_tf.eval()
self.assertAllClose(y_tf_np, y_np)
def testKeepDims(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np, keepdims=True).eval()
self.assertEqual(y_tf_np.ndim, x_np.ndim)
y_np = log(np.sum(exp(x_np), keepdims=True))
self.assertAllClose(y_tf_np, y_np)
def testOverflow(self):
x = [1000, 1001, 1002, 1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegexp(RuntimeWarning,
"overflow encountered in exp"):
out = log(np.sum(exp(x_np)))
if out == np.inf:
raise RuntimeWarning("overflow encountered in exp")
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
y_np = log(np.sum(exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testUnderflow(self):
x = [-1000, -1001, -1002, -1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegexp(RuntimeWarning,
"divide by zero encountered in log"):
out = log(np.sum(exp(x_np)))
if out == -np.inf:
raise RuntimeWarning("divide by zero encountered in log")
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
y_np = log(np.sum(exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testInfinity(self):
with self.test_session(use_gpu=True):
res = math_ops.reduce_logsumexp(-np.inf).eval()
self.assertEqual(-np.inf, res)
@test_util.with_c_api
class RoundTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def testRounding(self):
x = [0.49, 0.7, -0.3, -0.8]
# TODO(nolivia): Remove this when RoundOp is forwards compatible
# x = np.arange(-5.0, 5.0, .25)
for dtype in [np.float32, np.double, np.int32]:
x_np = np.array(x, dtype=dtype)
with test_util.device(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = self.evaluate(y_tf)
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
@test_util.with_c_api
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
@test_util.with_c_api
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def testSquaredDifference(self):
for dtype in [np.int32, np.float16]:
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
y = np.array([-3, -2, -1], dtype=dtype)
z = (x - y) * (x - y)
with test_util.device(use_gpu=True):
z_tf = self.evaluate(math_ops.squared_difference(x, y))
self.assertAllClose(z, z_tf)
@test_util.with_c_api
class ApproximateEqualTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def testApproximateEqual(self):
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.00009)
z = False
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.000009)
z = True
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = np.array([[[[-1, 2.00009999], [-3, 4.01]]]], dtype=dtype)
y = np.array([[[[-1.001, 2], [-3.00009, 4]]]], dtype=dtype)
z = np.array([[[[False, True], [True, False]]]], dtype=np.bool)
with test_util.device(use_gpu=True):
z_tf = self.evaluate(math_ops.approximate_equal(x, y, tolerance=0.0001))
self.assertAllEqual(z, z_tf)
@test_util.with_c_api
class ScalarMulTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def testAcceptsRefs(self):
if context.executing_eagerly():
var = resource_variable_ops.ResourceVariable(10, name="var")
else:
var = variables.Variable(10)
result = math_ops.scalar_mul(3, var)
init = variables.global_variables_initializer()
with test_util.device(use_gpu=True):
self.evaluate(init)
self.assertEqual(30, self.evaluate(result))
@test_util.run_in_graph_and_eager_modes()
def testAcceptsConstant(self):
const = constant_op.constant(10)
result = math_ops.scalar_mul(3, const)
with test_util.device(use_gpu=True):
self.assertEqual(30, self.evaluate(result))
@test_util.run_in_graph_and_eager_modes()
def testAcceptsTensor(self):
tensor = array_ops.ones([10, 10])
result = math_ops.scalar_mul(3, tensor)
expected = array_ops.ones([10, 10]) * 3
with test_util.device(use_gpu=True):
self.assertAllEqual(self.evaluate(expected), self.evaluate(result))
@test_util.run_in_graph_and_eager_modes()
def testAcceptsIndexedSlices(self):
values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
indices = constant_op.constant([0, 2, 5])
x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices))
with test_util.device(use_gpu=True):
self.assertAllEqual(self.evaluate(x.values),
[[-6, -9], [-15, -21], [0, 3]])
self.assertAllEqual(self.evaluate(x.indices), [0, 2, 5])
@test_util.with_c_api
class AccumulateNTest(test_util.TensorFlowTestCase):
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
@test_util.with_c_api
class AddNTest(test_util.TensorFlowTestCase):
def testPartials(self):
"""Test that previously revealed a bug in buffer forwarding for AddN."""
partials = []
for _ in range(98):
partials.append(math_ops.add_n([constant_op.constant(1)]))
partials.append(
math_ops.add_n([constant_op.constant(1),
constant_op.constant(1)]))
res = math_ops.add_n(partials) + constant_op.constant(0)
with self.test_session(use_gpu=True):
self.assertAllEqual(res.eval(), 100)
def testFloat(self):
np.random.seed(12345)
for num_inputs in range(1, 10):
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.add_n(tf_x).eval())
self.assertAllClose(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs).eval())
def testInt(self):
np.random.seed(54321)
for num_inputs in range(1, 10):
x = [
np.random.randint(-128, 128, (5, 4, 3, 2, 1))
for _ in range(num_inputs)
]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.add_n(tf_x).eval())
self.assertAllEqual(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs).eval())
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with self.test_session(use_gpu=True) as sess:
input_vars = [
variables.Variable(10.0 * np.random.random())
for i in range(0, num_inputs)
]
addn = math_ops.add_n(input_vars)
sess.run(variables.global_variables_initializer())
add_n_grad = gradients.gradients(addn, input_vars)
self.assertAllEqual(np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[g.eval() for g in add_n_grad])
@test_util.with_c_api
class DivAndModTest(test_util.TensorFlowTestCase):
# TODO(aselle): Test more types before exposing new division operators.
def intTestData(self):
nums = np.arange(-10, 10, 1).reshape(20, 1)
divs = np.arange(-3, 4, 2).reshape(1, 4)
return nums, divs
def floatTestData(self):
nums = np.arange(-10, 10, .25).reshape(80, 1)
divs = np.arange(-3, 0, .25).reshape(1, 12)
return nums, divs
def testFloorModInt(self):
nums, divs = self.intTestData()
with self.test_session():
# TODO(aselle): Change test to use % after switch
# tf_result = math_ops.floor_mod(nums, divs).eval()
tf_result = math_ops.floormod(nums, divs).eval()
np_result = nums % divs
self.assertAllEqual(tf_result, np_result)
def testFloorModFloat(self):
nums, divs = self.floatTestData()
with self.test_session():
tf_result = math_ops.floormod(nums, divs).eval()
np_result = nums % divs
self.assertAllEqual(tf_result, np_result)
# TODO(aselle): put this test in once % switched to floormod
# tf2_result = (array_ops.constant(nums)
# % array_ops.constant(divs)).eval()
# self.assertAllEqual(tf2_result, tf_result)
def testTruncateModInt(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = math_ops.truncatemod(nums, divs).eval()
np_result = np.fmod(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testTruncateModFloat(self):
nums, divs = self.floatTestData()
with self.test_session():
tf_result = math_ops.truncatemod(nums, divs).eval()
np_result = np.fmod(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testDivideInt(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = math_ops.floor_div(nums, divs).eval()
np_result = nums // divs
self.assertAllEqual(tf_result, np_result)
# TODO(aselle): Put this test in once // is switched to floordiv
# tf2_result = (array_ops.constant(nums)
# // array_ops.constant(divs)).eval()
# self.assertAllEqual(tf2_result, tf_result)
def testDivideName(self):
with self.test_session():
op = math_ops.divide(
array_ops.constant(3), array_ops.constant(4), name="my_cool_divide")
self.assertEqual(op.name, "my_cool_divide:0")
def testRealDiv(self):
nums, divs = self.floatTestData()
with self.test_session():
tf_result = math_ops.realdiv(nums, divs).eval()
np_result = np.divide(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testComplexDiv(self):
foo = array_ops.constant([1. + 3.j])
with self.test_session():
_ = math_ops.divide(foo, 1.).eval()
_ = math_ops.div(foo, 2.).eval()
def testFloorDivGrad(self):
with self.test_session():
a = variables.Variable(2.)
b = variables.Variable(4.)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
c_grad = gradients.gradients(math_ops.divide(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
c_grad = gradients.gradients(math_ops.div(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
c_grad = gradients.gradients(math_ops.floordiv(a, b), [a, b])
self.assertAllEqual([None if x is None else x.eval()
for x in c_grad], [None, None])
def testConsistent(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = (math_ops.floor_div(nums, divs) * divs + math_ops.floormod(
nums, divs)).eval()
tf_nums = array_ops.constant(nums)
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
np_result = (nums // divs) * divs + (nums % divs)
# Consistent with numpy
self.assertAllEqual(tf_result, np_result)
# Consistent with two forms of divide
self.assertAllEqual(tf_result, tf2_result)
# consistency for truncation form
tf3_result = (math_ops.truncatediv(nums, divs) * divs +
math_ops.truncatemod(nums, divs)).eval()
expanded_nums = np.reshape(
np.tile(nums, divs.shape[1]), (nums.shape[0], divs.shape[1]))
# Consistent with desire to get numerator
self.assertAllEqual(tf3_result, expanded_nums)
# Consistent with desire to get numerator
self.assertAllEqual(tf_result, expanded_nums)
if __name__ == "__main__":
googletest.main()
| 37.794179
| 80
| 0.651521
|
20c3784d719d8f9a2dc4cbafe409e44f634ba40d
| 10,380
|
py
|
Python
|
sdk/tables/azure-data-tables/tests/test_table_service_properties_async.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 1
|
2020-03-05T18:10:35.000Z
|
2020-03-05T18:10:35.000Z
|
sdk/tables/azure-data-tables/tests/test_table_service_properties_async.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 2
|
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/tables/azure-data-tables/tests/test_table_service_properties_async.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import time
import pytest
from msrest.exceptions import ValidationError # TODO This should be an azure-core error.
from devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer
from azure.core.exceptions import HttpResponseError
from azure.data.tables._models import TableAnalyticsLogging, Metrics, RetentionPolicy, CorsRule
from azure.data.tables.aio import TableServiceClient
from _shared.testcase import TableTestCase
from devtools_testutils import CachedResourceGroupPreparer, CachedStorageAccountPreparer
# ------------------------------------------------------------------------------
class TableServicePropertiesTest(TableTestCase):
# --Helpers-----------------------------------------------------------------
def _assert_properties_default(self, prop):
assert prop is not None
self._assert_logging_equal(prop['analytics_logging'], TableAnalyticsLogging())
self._assert_metrics_equal(prop['hour_metrics'], Metrics())
self._assert_metrics_equal(prop['minute_metrics'], Metrics())
self._assert_cors_equal(prop['cors'], list())
def _assert_logging_equal(self, log1, log2):
if log1 is None or log2 is None:
assert log1 == log2
return
assert log1.version == log2.version
assert log1.read == log2.read
assert log1.write == log2.write
assert log1.delete == log2.delete
self._assert_retention_equal(log1.retention_policy, log2.retention_policy)
def _assert_delete_retention_policy_equal(self, policy1, policy2):
if policy1 is None or policy2 is None:
assert policy1 == policy2
return
assert policy1.enabled == policy2.enabled
assert policy1.days == policy2.days
def _assert_static_website_equal(self, prop1, prop2):
if prop1 is None or prop2 is None:
assert prop1 == prop2
return
assert prop1.enabled == prop2.enabled
assert prop1.index_document == prop2.index_document
assert prop1.error_document404_path == prop2.error_document404_path
def _assert_delete_retention_policy_not_equal(self, policy1, policy2):
if policy1 is None or policy2 is None:
assert policy1 != policy2
return
assert not (policy1.enabled == policy2.enabled and policy1.days == policy2.days)
def _assert_metrics_equal(self, metrics1, metrics2):
if metrics1 is None or metrics2 is None:
assert metrics1 == metrics2
return
assert metrics1.version == metrics2.version
assert metrics1.enabled == metrics2.enabled
assert metrics1.include_apis == metrics2.include_apis
self._assert_retention_equal(metrics1.retention_policy, metrics2.retention_policy)
def _assert_cors_equal(self, cors1, cors2):
if cors1 is None or cors2 is None:
assert cors1 == cors2
return
assert len(cors1) == len(cors2)
for i in range(0, len(cors1)):
rule1 = cors1[i]
rule2 = cors2[i]
assert len(rule1.allowed_origins) == len(rule2.allowed_origins)
assert len(rule1.allowed_methods) == len(rule2.allowed_methods)
assert rule1.max_age_in_seconds == rule2.max_age_in_seconds
assert len(rule1.exposed_headers) == len(rule2.exposed_headers)
assert len(rule1.allowed_headers) == len(rule2.allowed_headers)
def _assert_retention_equal(self, ret1, ret2):
assert ret1.enabled == ret2.enabled
assert ret1.days == ret2.days
# --Test cases per service ---------------------------------------
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_table_service_properties_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
url = self.account_url(storage_account, "table")
tsc = TableServiceClient(url, storage_account_key, logging_enable=True)
# Act
resp = await tsc.set_service_properties(
analytics_logging=TableAnalyticsLogging(),
hour_metrics=Metrics(),
minute_metrics=Metrics(),
cors=list())
# Assert
assert resp is None
if self.is_live:
time.sleep(30)
self._assert_properties_default(await tsc.get_service_properties())
# --Test cases per feature ---------------------------------------
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_set_logging_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
url = self.account_url(storage_account, "table")
tsc = TableServiceClient(url, storage_account_key)
logging = TableAnalyticsLogging(read=True, write=True, delete=True, retention_policy=RetentionPolicy(enabled=True, days=5))
# Act
await tsc.set_service_properties(analytics_logging=logging)
# Assert
if self.is_live:
time.sleep(30)
received_props = await tsc.get_service_properties()
self._assert_logging_equal(received_props['analytics_logging'], logging)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_set_hour_metrics_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
url = self.account_url(storage_account, "table")
tsc = TableServiceClient(url, storage_account_key)
hour_metrics = Metrics(enabled=True, include_apis=True, retention_policy=RetentionPolicy(enabled=True, days=5))
# Act
await tsc.set_service_properties(hour_metrics=hour_metrics)
# Assert
if self.is_live:
time.sleep(30)
received_props = await tsc.get_service_properties()
self._assert_metrics_equal(received_props['hour_metrics'], hour_metrics)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_set_minute_metrics_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
url = self.account_url(storage_account, "table")
tsc = TableServiceClient(url, storage_account_key)
minute_metrics = Metrics(enabled=True, include_apis=True,
retention_policy=RetentionPolicy(enabled=True, days=5))
# Act
await tsc.set_service_properties(minute_metrics=minute_metrics)
# Assert
if self.is_live:
time.sleep(30)
received_props = await tsc.get_service_properties()
self._assert_metrics_equal(received_props['minute_metrics'], minute_metrics)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_set_cors_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
url = self.account_url(storage_account, "table")
tsc = TableServiceClient(url, storage_account_key)
cors_rule1 = CorsRule(['www.xyz.com'], ['GET'])
allowed_origins = ['www.xyz.com', "www.ab.com", "www.bc.com"]
allowed_methods = ['GET', 'PUT']
max_age_in_seconds = 500
exposed_headers = ["x-ms-meta-data*", "x-ms-meta-source*", "x-ms-meta-abc", "x-ms-meta-bcd"]
allowed_headers = ["x-ms-meta-data*", "x-ms-meta-target*", "x-ms-meta-xyz", "x-ms-meta-foo"]
cors_rule2 = CorsRule(
allowed_origins,
allowed_methods,
max_age_in_seconds=max_age_in_seconds,
exposed_headers=exposed_headers,
allowed_headers=allowed_headers)
cors = [cors_rule1, cors_rule2]
# Act
await tsc.set_service_properties(cors=cors)
# Assert
if self.is_live:
time.sleep(30)
received_props = await tsc.get_service_properties()
self._assert_cors_equal(received_props['cors'], cors)
# --Test cases for errors ---------------------------------------
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_retention_no_days_async(self, resource_group, location, storage_account, storage_account_key):
# Assert
pytest.raises(ValueError,
RetentionPolicy,
True, None)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_too_many_cors_rules_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
tsc = TableServiceClient(self.account_url(storage_account, "table"), storage_account_key)
cors = []
for i in range(0, 6):
cors.append(CorsRule(['www.xyz.com'], ['GET']))
# Assert
with pytest.raises(HttpResponseError):
await tsc.set_service_properties(None, None, None, cors)
@CachedResourceGroupPreparer(name_prefix="tablestest")
@CachedStorageAccountPreparer(name_prefix="tablestest")
async def test_retention_too_long_async(self, resource_group, location, storage_account, storage_account_key):
# Arrange
tsc = TableServiceClient(self.account_url(storage_account, "table"), storage_account_key)
minute_metrics = Metrics(enabled=True, include_apis=True,
retention_policy=RetentionPolicy(enabled=True, days=366))
# Assert
with pytest.raises(HttpResponseError):
await tsc.set_service_properties(None, None, minute_metrics)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 42.892562
| 131
| 0.657129
|
7748fe6fda00b90f45509eabcb11c72721f25b7e
| 1,473
|
py
|
Python
|
docs/samples/pipelines/sample-tf-pipeline.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 6
|
2022-02-15T21:54:19.000Z
|
2022-02-16T21:18:54.000Z
|
docs/samples/pipelines/sample-tf-pipeline.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 7
|
2021-08-31T23:55:06.000Z
|
2022-03-02T11:34:58.000Z
|
docs/samples/pipelines/sample-tf-pipeline.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 2
|
2021-12-16T10:32:07.000Z
|
2022-02-28T17:08:52.000Z
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.compiler as compiler
import kfp.dsl as dsl
from kfp import components
kfserving_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'
'master/components/kubeflow/kfserving/component.yaml')
@dsl.pipeline(
name='KFServing pipeline',
description='A pipeline for KFServing.'
)
def kfservingPipeline(
action='apply',
model_name='tensorflow-sample',
model_uri='gs://kfserving-samples/models/tensorflow/flowers',
namespace='anonymous',
framework='tensorflow'):
kfserving_op(action=action,
model_name=model_name,
model_uri=model_uri,
namespace=namespace,
framework=framework)
if __name__ == '__main__':
compiler.Compiler().compile(kfservingPipeline, __file__ + '.tar.gz')
| 35.071429
| 105
| 0.692464
|
858f17363a1dccfde714bf60b9abbaab3e1ddc50
| 9,679
|
py
|
Python
|
lib/python2.7/site-packages/networkx/tests/test_convert.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 15
|
2018-04-26T08:17:18.000Z
|
2021-03-05T08:44:13.000Z
|
lib/python2.7/site-packages/networkx/tests/test_convert.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | null | null | null |
lib/python2.7/site-packages/networkx/tests/test_convert.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 6
|
2018-04-12T15:49:27.000Z
|
2022-01-27T12:34:50.000Z
|
#!/usr/bin/env python
from nose.tools import *
from networkx import *
from networkx.convert import *
from networkx.algorithms.operators import *
from networkx.generators.classic import barbell_graph,cycle_graph
class TestConvert():
def edgelists_equal(self,e1,e2):
return sorted(sorted(e) for e in e1)==sorted(sorted(e) for e in e2)
def test_simple_graphs(self):
for dest, source in [(to_dict_of_dicts, from_dict_of_dicts),
(to_dict_of_lists, from_dict_of_lists)]:
G=barbell_graph(10,3)
dod=dest(G)
# Dict of [dicts, lists]
GG=source(dod)
assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
assert_equal(sorted(G.edges()), sorted(GG.edges()))
GW=to_networkx_graph(dod)
assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
assert_equal(sorted(G.edges()), sorted(GW.edges()))
GI=Graph(dod)
assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
assert_equal(sorted(G.edges()), sorted(GI.edges()))
# With nodelist keyword
P4=path_graph(4)
P3=path_graph(3)
dod=dest(P4,nodelist=[0,1,2])
Gdod=Graph(dod)
assert_equal(sorted(Gdod.nodes()), sorted(P3.nodes()))
assert_equal(sorted(Gdod.edges()), sorted(P3.edges()))
def test_digraphs(self):
for dest, source in [(to_dict_of_dicts, from_dict_of_dicts),
(to_dict_of_lists, from_dict_of_lists)]:
G=cycle_graph(10)
# Dict of [dicts, lists]
dod=dest(G)
GG=source(dod)
assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
assert_equal(sorted(G.edges()), sorted(GG.edges()))
GW=to_networkx_graph(dod)
assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
assert_equal(sorted(G.edges()), sorted(GW.edges()))
GI=Graph(dod)
assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
assert_equal(sorted(G.edges()), sorted(GI.edges()))
G=cycle_graph(10,create_using=DiGraph())
dod=dest(G)
GG=source(dod, create_using=DiGraph())
assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
assert_equal(sorted(G.edges()), sorted(GG.edges()))
GW=to_networkx_graph(dod, create_using=DiGraph())
assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
assert_equal(sorted(G.edges()), sorted(GW.edges()))
GI=DiGraph(dod)
assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
assert_equal(sorted(G.edges()), sorted(GI.edges()))
def test_graph(self):
G=cycle_graph(10)
e=G.edges()
source=[u for u,v in e]
dest=[v for u,v in e]
ex=zip(source,dest,source)
G=Graph()
G.add_weighted_edges_from(ex)
# Dict of dicts
dod=to_dict_of_dicts(G)
GG=from_dict_of_dicts(dod,create_using=Graph())
assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
assert_equal(sorted(G.edges()), sorted(GG.edges()))
GW=to_networkx_graph(dod,create_using=Graph())
assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
assert_equal(sorted(G.edges()), sorted(GW.edges()))
GI=Graph(dod)
assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
assert_equal(sorted(G.edges()), sorted(GI.edges()))
# Dict of lists
dol=to_dict_of_lists(G)
GG=from_dict_of_lists(dol,create_using=Graph())
# dict of lists throws away edge data so set it to none
enone=[(u,v,{}) for (u,v,d) in G.edges(data=True)]
assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
assert_equal(enone, sorted(GG.edges(data=True)))
GW=to_networkx_graph(dol,create_using=Graph())
assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
assert_equal(enone, sorted(GW.edges(data=True)))
GI=Graph(dol)
assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
assert_equal(enone, sorted(GI.edges(data=True)))
def test_with_multiedges_self_loops(self):
G=cycle_graph(10)
e=G.edges()
source,dest = list(zip(*e))
ex=list(zip(source,dest,source))
XG=Graph()
XG.add_weighted_edges_from(ex)
XGM=MultiGraph()
XGM.add_weighted_edges_from(ex)
XGM.add_edge(0,1,weight=2) # multiedge
XGS=Graph()
XGS.add_weighted_edges_from(ex)
XGS.add_edge(0,0,weight=100) # self loop
# Dict of dicts
# with self loops, OK
dod=to_dict_of_dicts(XGS)
GG=from_dict_of_dicts(dod,create_using=Graph())
assert_equal(sorted(XGS.nodes()), sorted(GG.nodes()))
assert_equal(sorted(XGS.edges()), sorted(GG.edges()))
GW=to_networkx_graph(dod,create_using=Graph())
assert_equal(sorted(XGS.nodes()), sorted(GW.nodes()))
assert_equal(sorted(XGS.edges()), sorted(GW.edges()))
GI=Graph(dod)
assert_equal(sorted(XGS.nodes()), sorted(GI.nodes()))
assert_equal(sorted(XGS.edges()), sorted(GI.edges()))
# Dict of lists
# with self loops, OK
dol=to_dict_of_lists(XGS)
GG=from_dict_of_lists(dol,create_using=Graph())
# dict of lists throws away edge data so set it to none
enone=[(u,v,{}) for (u,v,d) in XGS.edges(data=True)]
assert_equal(sorted(XGS.nodes()), sorted(GG.nodes()))
assert_equal(enone, sorted(GG.edges(data=True)))
GW=to_networkx_graph(dol,create_using=Graph())
assert_equal(sorted(XGS.nodes()), sorted(GW.nodes()))
assert_equal(enone, sorted(GW.edges(data=True)))
GI=Graph(dol)
assert_equal(sorted(XGS.nodes()), sorted(GI.nodes()))
assert_equal(enone, sorted(GI.edges(data=True)))
# Dict of dicts
# with multiedges, OK
dod=to_dict_of_dicts(XGM)
GG=from_dict_of_dicts(dod,create_using=MultiGraph(),
multigraph_input=True)
assert_equal(sorted(XGM.nodes()), sorted(GG.nodes()))
assert_equal(sorted(XGM.edges()), sorted(GG.edges()))
GW=to_networkx_graph(dod,create_using=MultiGraph(),multigraph_input=True)
assert_equal(sorted(XGM.nodes()), sorted(GW.nodes()))
assert_equal(sorted(XGM.edges()), sorted(GW.edges()))
GI=MultiGraph(dod) # convert can't tell whether to duplicate edges!
assert_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
#assert_not_equal(sorted(XGM.edges()), sorted(GI.edges()))
assert_false(sorted(XGM.edges()) == sorted(GI.edges()))
GE=from_dict_of_dicts(dod,create_using=MultiGraph(),
multigraph_input=False)
assert_equal(sorted(XGM.nodes()), sorted(GE.nodes()))
assert_not_equal(sorted(XGM.edges()), sorted(GE.edges()))
GI=MultiGraph(XGM)
assert_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
assert_equal(sorted(XGM.edges()), sorted(GI.edges()))
GM=MultiGraph(G)
assert_equal(sorted(GM.nodes()), sorted(G.nodes()))
assert_equal(sorted(GM.edges()), sorted(G.edges()))
# Dict of lists
# with multiedges, OK, but better write as DiGraph else you'll
# get double edges
dol=to_dict_of_lists(G)
GG=from_dict_of_lists(dol,create_using=MultiGraph())
assert_equal(sorted(G.nodes()), sorted(GG.nodes()))
assert_equal(sorted(G.edges()), sorted(GG.edges()))
GW=to_networkx_graph(dol,create_using=MultiGraph())
assert_equal(sorted(G.nodes()), sorted(GW.nodes()))
assert_equal(sorted(G.edges()), sorted(GW.edges()))
GI=MultiGraph(dol)
assert_equal(sorted(G.nodes()), sorted(GI.nodes()))
assert_equal(sorted(G.edges()), sorted(GI.edges()))
def test_edgelists(self):
P=path_graph(4)
e=[(0,1),(1,2),(2,3)]
G=Graph(e)
assert_equal(sorted(G.nodes()), sorted(P.nodes()))
assert_equal(sorted(G.edges()), sorted(P.edges()))
assert_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
e=[(0,1,{}),(1,2,{}),(2,3,{})]
G=Graph(e)
assert_equal(sorted(G.nodes()), sorted(P.nodes()))
assert_equal(sorted(G.edges()), sorted(P.edges()))
assert_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
e=((n,n+1) for n in range(3))
G=Graph(e)
assert_equal(sorted(G.nodes()), sorted(P.nodes()))
assert_equal(sorted(G.edges()), sorted(P.edges()))
assert_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
def test_directed_to_undirected(self):
edges1 = [(0, 1), (1, 2), (2, 0)]
edges2 = [(0, 1), (1, 2), (0, 2)]
assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges1)).edges(),edges1))
assert_true(self.edgelists_equal(nx.Graph(nx.DiGraph(edges2)).edges(),edges1))
assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges1)).edges(),edges1))
assert_true(self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges2)).edges(),edges1))
assert_true(self.edgelists_equal(nx.MultiGraph(nx.MultiDiGraph(edges1)).edges(),
edges1))
assert_true(self.edgelists_equal(nx.MultiGraph(nx.MultiDiGraph(edges2)).edges(),
edges1))
assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(),edges1))
assert_true(self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(),edges1))
| 44.399083
| 91
| 0.605434
|
a8c75bdcad7859c2cd45cd6a901f3bd8d7805d26
| 290
|
py
|
Python
|
genshin_wishes/standard_wishes.py
|
EYH0602/Genshin_Impact_Wishes_Analyzer
|
ee178cd7fde995a5ca1b979f97a6b77af912a86a
|
[
"MIT"
] | null | null | null |
genshin_wishes/standard_wishes.py
|
EYH0602/Genshin_Impact_Wishes_Analyzer
|
ee178cd7fde995a5ca1b979f97a6b77af912a86a
|
[
"MIT"
] | 4
|
2021-09-08T05:38:09.000Z
|
2021-09-19T16:32:50.000Z
|
genshin_wishes/standard_wishes.py
|
EYH0602/Genshin_Impact_Wishes_Analyzer
|
ee178cd7fde995a5ca1b979f97a6b77af912a86a
|
[
"MIT"
] | null | null | null |
from .wishes_base import WishesBase
class StandardWishes(WishesBase):
def init_params(self):
self.params['gacha_type'] = '200'
self.file_name = 'genshine_standard_wishes.csv'
self.rst_file_name = 'standard_analysis.txt'
self.table = 'standard_wishes'
| 26.363636
| 55
| 0.696552
|
4c76a20add8d869f56e604bc510ab00a49eeaa7b
| 697
|
py
|
Python
|
djstripe/context_managers.py
|
hoopit/dj-stripe
|
726853081cd95be86777492c23fb61de5d35a72a
|
[
"MIT"
] | 2
|
2020-09-01T20:05:28.000Z
|
2021-07-22T08:20:42.000Z
|
djstripe/context_managers.py
|
hoopit/dj-stripe
|
726853081cd95be86777492c23fb61de5d35a72a
|
[
"MIT"
] | null | null | null |
djstripe/context_managers.py
|
hoopit/dj-stripe
|
726853081cd95be86777492c23fb61de5d35a72a
|
[
"MIT"
] | 2
|
2020-01-31T14:26:09.000Z
|
2020-07-14T04:24:15.000Z
|
"""
dj-stripe Context Managers
"""
from contextlib import contextmanager
from . import settings as djstripe_settings
@contextmanager
def stripe_temporary_api_version(version, validate=True):
"""
Temporarily replace the global api_version used in stripe API calls with
the given value.
The original value is restored as soon as context exits.
"""
old_version = djstripe_settings.get_stripe_api_version()
try:
djstripe_settings.set_stripe_api_version(version, validate=validate)
yield
finally:
# Validation is bypassed since we're restoring a previous value.
djstripe_settings.set_stripe_api_version(old_version, validate=False)
| 27.88
| 77
| 0.747489
|
bc9cf1f00b95f02e09bb27f69cd3c05c28c637b5
| 11,954
|
py
|
Python
|
stackalytics/dashboard/helpers.py
|
Mirantis/stackalytics
|
96ec7c6c630a9f2532b808069e045d434bbac200
|
[
"Apache-2.0"
] | 3
|
2015-06-18T14:16:59.000Z
|
2021-02-10T03:41:31.000Z
|
stackalytics/dashboard/helpers.py
|
Mirantis/stackalytics
|
96ec7c6c630a9f2532b808069e045d434bbac200
|
[
"Apache-2.0"
] | 2
|
2015-07-01T08:10:59.000Z
|
2017-05-22T02:19:44.000Z
|
stackalytics/dashboard/helpers.py
|
Mirantis/stackalytics
|
96ec7c6c630a9f2532b808069e045d434bbac200
|
[
"Apache-2.0"
] | 4
|
2015-11-08T12:12:19.000Z
|
2019-06-17T09:44:47.000Z
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import operator
import re
import time
import six
from stackalytics.dashboard import parameters
from stackalytics.dashboard import vault
from stackalytics.processor import utils
INFINITY_HTML = '∞'
def _extend_author_fields(record):
record['author_link'] = make_link(
record['author_name'], '/',
{'user_id': record['user_id'], 'company': ''})
record['company_link'] = make_link(
record['company_name'], '/',
{'company': record['company_name'], 'user_id': ''})
def _extend_record_common_fields(record):
_extend_author_fields(record)
record['date_str'] = format_datetime(record['date'])
record['module_link'] = make_link(
record['module'], '/',
{'module': record['module'], 'company': '', 'user_id': ''})
record['blueprint_id_count'] = len(record.get('blueprint_id', []))
record['bug_id_count'] = len(record.get('bug_id', []))
for coauthor in record.get('coauthor') or []:
_extend_author_fields(coauthor)
def _extend_by_parent_info(record, parent, prefix='parent_'):
parent = vault.extend_record(parent)
_extend_record_common_fields(parent)
for k, v in six.iteritems(parent):
record[prefix + k] = v
def extend_record(record):
record = vault.extend_record(record)
_extend_record_common_fields(record)
if record['record_type'] == 'commit':
record['branches'] = ','.join(record['branches'])
if 'correction_comment' not in record:
record['correction_comment'] = ''
record['message'] = make_commit_message(record)
if record['commit_date']:
record['commit_date_str'] = format_datetime(record['commit_date'])
elif record['record_type'] == 'mark':
review = vault.get_memory_storage().get_record_by_primary_key(
record['review_id'])
patch = vault.get_memory_storage().get_record_by_primary_key(
utils.get_patch_id(record['review_id'], record['patch']))
if not review or not patch:
return None
_extend_by_parent_info(record, review, 'parent_')
_extend_by_parent_info(record, patch, 'patch_')
elif record['record_type'] == 'patch':
review = vault.get_memory_storage().get_record_by_primary_key(
record['review_id'])
_extend_by_parent_info(record, review, 'parent_')
elif record['record_type'] == 'email':
record['email_link'] = record.get('email_link') or ''
record['blueprint_links'] = []
for bp_id in record.get('blueprint_id', []):
bp_module, bp_name = bp_id.split(':')
record['blueprint_links'].append(
make_blueprint_link(bp_module, bp_name))
elif record['record_type'] in ['bpd', 'bpc']:
record['summary'] = utils.format_text(record['summary'])
if record.get('mention_count'):
record['mention_date_str'] = format_datetime(
record['mention_date'])
record['blueprint_link'] = make_blueprint_link(record['module'],
record['name'])
elif record['record_type'] in ['bugr', 'bugf']:
record['number'] = record['web_link'].split('/')[-1]
record['title'] = filter_bug_title(record['title'])
record['status_class'] = re.sub('\s+', '', record['status'])
elif record['record_type'] == 'tr':
record['date_str'] = format_date(record['date']) # no need for hours
return record
def get_current_company(user):
now = time.time()
idx = -1
for i, r in enumerate(user['companies']):
if now < r['end_date']:
idx = i
return user['companies'][idx]['company_name']
def extend_user(user):
user = user.copy()
user['id'] = user['user_id']
user['text'] = user['user_name']
if user['companies']:
company_name = get_current_company(user)
user['company_link'] = make_link(
company_name, '/', {'company': company_name, 'user_id': ''})
else:
user['company_link'] = ''
return user
def extend_module(module_id, project_type, release):
module_id_index = vault.get_vault()['module_id_index']
module_id = module_id.lower()
if module_id not in module_id_index:
return None
repos_index = vault.get_vault()['repos_index']
module = module_id_index[module_id]
name = module['module_group_name']
if name[0].islower():
name = name.capitalize()
# (module, release) pairs
own_sub_modules = set(vault.resolve_modules([module_id], [release]))
visible_sub_modules = own_sub_modules & set(vault.resolve_modules(
vault.resolve_project_types([project_type]), [release]))
child_modules = []
for m, r in own_sub_modules:
child = {'module_name': m, 'visible': (m, r) in visible_sub_modules}
if m in repos_index:
child['repo_uri'] = repos_index[m]['uri']
child_modules.append(child)
child_modules.sort(key=lambda x: x['module_name'])
return {
'id': module_id,
'name': name,
'tag': module['tag'],
'modules': child_modules,
}
def get_activity(records, start_record, page_size, query_message=None):
if query_message:
# note that all records are now dicts!
key_func = operator.itemgetter('date')
records = [vault.extend_record(r) for r in records]
records = [r for r in records
if (r.get('message') and
r.get('message').find(query_message) > 0)]
else:
key_func = operator.attrgetter('date')
records_sorted = sorted(records, key=key_func, reverse=True)
result = []
for record in records_sorted[start_record:]:
processed_record = extend_record(record)
if processed_record:
result.append(processed_record)
if len(result) == page_size:
break
return result
def get_contribution_summary(records):
marks = dict((m, 0) for m in [-2, -1, 0, 1, 2, 'A', 'WIP', 'x', 's'])
commit_count = 0
loc = 0
drafted_blueprint_count = 0
completed_blueprint_count = 0
email_count = 0
filed_bug_count = 0
resolved_bug_count = 0
patch_set_count = 0
change_request_count = 0
abandoned_change_requests_count = 0
translations = 0
for record in records:
record_type = record.record_type
if record_type == 'commit':
commit_count += 1
loc += record.loc
elif record_type == 'mark':
value = 0
if record.type == 'Workflow':
if record.value == 1:
value = 'A'
else:
value = 'WIP'
elif record.type == 'Code-Review':
value = record.value
elif record.type == 'Abandon':
value = 'x'
elif record.type[:5] == 'Self-':
value = 's'
marks[value] += 1
elif record_type == 'email':
email_count += 1
elif record_type == 'bpd':
drafted_blueprint_count += 1
elif record_type == 'bpc':
completed_blueprint_count += 1
elif record_type == 'bugf':
filed_bug_count += 1
elif record_type == 'bugr':
resolved_bug_count += 1
elif record_type == 'patch':
patch_set_count += 1
elif record_type == 'review':
change_request_count += 1
if record.status == 'ABANDONED':
abandoned_change_requests_count += 1
elif record_type == 'tr':
translations += record.loc
result = {
'drafted_blueprint_count': drafted_blueprint_count,
'completed_blueprint_count': completed_blueprint_count,
'commit_count': commit_count,
'email_count': email_count,
'loc': loc,
'marks': marks,
'filed_bug_count': filed_bug_count,
'resolved_bug_count': resolved_bug_count,
'patch_set_count': patch_set_count,
'change_request_count': change_request_count,
'abandoned_change_requests_count': abandoned_change_requests_count,
'translations': translations,
}
return result
def format_datetime(timestamp):
return datetime.datetime.utcfromtimestamp(
timestamp).strftime('%d %b %Y %H:%M:%S') + ' UTC'
def format_date(timestamp):
return datetime.datetime.utcfromtimestamp(timestamp).strftime('%d %b %Y')
def format_launchpad_module_link(module):
return '<a href="https://launchpad.net/%s">%s</a>' % (module, module)
def make_link(title, uri=None, options=None):
param_names = ('release', 'project_type', 'module', 'company', 'user_id',
'metric')
param_values = {}
for param_name in param_names:
value = parameters.get_parameter({}, param_name)
if value:
param_values[param_name] = ','.join(value)
if options:
param_values.update(options)
if param_values:
uri += '?' + '&'.join(['%s=%s' % (n, utils.safe_encode(v))
for n, v in six.iteritems(param_values)])
return '<a href="%(uri)s">%(title)s</a>' % {'uri': uri, 'title': title}
def make_blueprint_link(module, name):
uri = '/report/blueprint/' + module + '/' + name
return '<a href="%(uri)s">%(title)s</a>' % {'uri': uri, 'title': name}
def make_commit_message(record):
s = record['message']
module = record['module']
s = utils.format_text(s)
# insert links
s = re.sub(re.compile('(blueprint\s+)([\w-]+)', flags=re.IGNORECASE),
r'\1<a href="https://blueprints.launchpad.net/' +
module + r'/+spec/\2" class="ext_link">\2</a>', s)
s = re.sub(re.compile('(bug[\s#:]*)([\d]{5,7})', flags=re.IGNORECASE),
r'\1<a href="https://bugs.launchpad.net/bugs/\2" '
r'class="ext_link">\2</a>', s)
s = re.sub(r'\s+(I[0-9a-f]{40})',
r' <a href="https://review.openstack.org/#q,\1,n,z" '
r'class="ext_link">\1</a>', s)
s = utils.unwrap_text(s)
return s
def make_page_title(project_type_inst, release, module_inst, company,
user_inst):
pt_class = project_type_inst['id']
if project_type_inst.get('parent'):
pt_class = project_type_inst['parent']['id']
is_openstack = pt_class == 'all' or pt_class[:9] == 'openstack'
if company or user_inst:
if user_inst:
s = user_inst['user_name']
if company:
s += ' (%s)' % company
else:
s = company
else:
if is_openstack:
s = 'OpenStack community'
else:
s = project_type_inst['title'] + ' community'
s += ' contribution'
if module_inst:
s += ' to %s' % module_inst['module_group_name']
if is_openstack:
s += ' in'
if release != 'all':
if company or user_inst:
s += ' OpenStack'
s += ' %s release' % release.capitalize()
else:
s += ' all releases'
else:
if release != 'all':
s += ' during OpenStack %s release' % release.capitalize()
return s
def filter_bug_title(title):
return re.sub(r'^(?:Bug #\d+.+:\s+)"(.*)"', r'\1', title)
| 33.578652
| 78
| 0.59821
|
1ca6e49965d2e6e498f370ee92dec0ef087bfde1
| 3,000
|
py
|
Python
|
book_figures/chapter7/fig_PCA_rotation.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | 3
|
2017-02-23T07:59:15.000Z
|
2021-01-16T18:49:32.000Z
|
book_figures/chapter7/fig_PCA_rotation.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | null | null | null |
book_figures/chapter7/fig_PCA_rotation.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | 1
|
2021-01-16T18:49:36.000Z
|
2021-01-16T18:49:36.000Z
|
"""
Scematic Diagram of PCA
-----------------------
Figure 7.2
A distribution of points drawn from a bivariate Gaussian and centered on the
origin of x and y. PCA defines a rotation such that the new axes (x' and y')
are aligned along the directions of maximal variance (the principal components)
with zero covariance. This is equivalent to minimizing the square of the
perpendicular distances between the points and the principal components.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set parameters and draw the random sample
np.random.seed(42)
r = 0.9
sigma1 = 0.25
sigma2 = 0.08
rotation = np.pi / 6
s = np.sin(rotation)
c = np.cos(rotation)
X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T
R = np.array([[c, -s],
[s, c]])
X = np.dot(R, X)
#------------------------------------------------------------
# Plot the diagram
fig = plt.figure(figsize=(5, 5), facecolor='w')
ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False)
# draw axes
ax.annotate(r'$x$', (-r, 0), (r, 0),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
ax.annotate(r'$y$', (0, -r), (0, r),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
# draw rotated axes
ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
# scatter points
ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2)
# draw lines
vnorm = np.array([s, -c])
for v in (X.T):
d = np.dot(v, vnorm)
v1 = v - d * vnorm
ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k')
# draw ellipses
for sigma in (1, 2, 3):
ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2,
rotation * 180. / np.pi,
ec='k', fc='gray', alpha=0.2, zorder=1))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.show()
| 34.090909
| 79
| 0.583
|
b4fd89bd03784082575be592f19792ca18d0b899
| 6,598
|
py
|
Python
|
tensorflow/python/kernel_tests/qr_op_test.py
|
tycho/tensorflow
|
27767d8e9c1325979cf32ff5b81c10df9006fd57
|
[
"Apache-2.0"
] | 2
|
2020-02-18T04:08:18.000Z
|
2020-05-20T07:14:44.000Z
|
tensorflow/python/kernel_tests/qr_op_test.py
|
tycho/tensorflow
|
27767d8e9c1325979cf32ff5b81c10df9006fd57
|
[
"Apache-2.0"
] | 1
|
2021-05-26T02:25:04.000Z
|
2021-05-26T02:25:31.000Z
|
tensorflow/python/kernel_tests/qr_op_test.py
|
tycho/tensorflow
|
27767d8e9c1325979cf32ff5b81c10df9006fd57
|
[
"Apache-2.0"
] | 3
|
2018-10-26T19:30:26.000Z
|
2021-03-19T19:30:40.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class QrOpTest(test.TestCase):
def testWrongDimensions(self):
# The input to qr should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
linalg_ops.qr(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
linalg_ops.qr(vector)
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for full_matrices_ in True, False:
for rows_ in 4, 5:
for cols_ in 4, 5:
matrix1 = random_ops.random_normal([rows_, cols_], seed=42)
matrix2 = random_ops.random_normal([rows_, cols_], seed=42)
q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)
q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)
all_ops += [q1, r1, q2, r2]
val = sess.run(all_ops)
for i in range(8):
q = 4 * i
self.assertAllEqual(val[q], val[q + 2]) # q1 == q2
self.assertAllEqual(val[q + 1], val[q + 3]) # r1 == r2
def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
def CompareOrthogonal(self, x, y, rank):
if is_single:
atol = 5e-4
else:
atol = 5e-14
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=atol)
def CheckApproximation(self, a, q, r):
if is_single:
tol = 1e-5
else:
tol = 1e-14
# Tests that a ~= q*r.
a_recon = math_ops.matmul(q, r)
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
if is_single:
tol = 1e-5
else:
tol = 1e-14
self.assertAllClose(identity.eval(), xx.eval(), atol=tol)
def Test(self):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
with self.test_session(use_gpu=True) as sess:
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices_)
if use_static_shape_:
q_tf_val, r_tf_val = sess.run([q_tf, r_tf])
else:
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
q_dims = q_tf_val.shape
np_q = np.ndarray(q_dims, dtype_)
np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
new_first_dim = np_q_reshape.shape[0]
x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
for i in range(new_first_dim):
if full_matrices_:
np_q_reshape[i,:,:], _ = \
np.linalg.qr(x_reshape[i,:,:], mode="complete")
else:
np_q_reshape[i,:,:], _ = \
np.linalg.qr(x_reshape[i,:,:], mode="reduced")
np_q = np.reshape(np_q_reshape, q_dims)
CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
CheckApproximation(self, x_np, q_tf_val, r_tf_val)
CheckUnitary(self, q_tf_val)
return Test
if __name__ == "__main__":
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 1, 2, 5, 10, 32, 100:
for cols in 1, 2, 5, 10, 32, 100:
for full_matrices in False, True:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
for use_static_shape in True, False:
shape = batch_dims + (rows, cols)
name = "%s_%s_full_%s_static_%s" % (dtype.__name__,
"_".join(map(str, shape)),
full_matrices,
use_static_shape)
_AddTest(QrOpTest, "Qr", name,
_GetQrOpTest(dtype, shape, full_matrices,
use_static_shape))
test.main()
| 38.360465
| 80
| 0.625947
|
3743cfdf1eb458fba7e04801a6e1be033e923638
| 4,034
|
py
|
Python
|
broadcast_message_producer.py
|
alifzl/YomKippur
|
2bec1384b7ba20c818678474f5089d6203bdc491
|
[
"MIT"
] | 1
|
2021-11-16T09:41:02.000Z
|
2021-11-16T09:41:02.000Z
|
broadcast_message_producer.py
|
atknin/YomKippur
|
2bec1384b7ba20c818678474f5089d6203bdc491
|
[
"MIT"
] | null | null | null |
broadcast_message_producer.py
|
atknin/YomKippur
|
2bec1384b7ba20c818678474f5089d6203bdc491
|
[
"MIT"
] | 1
|
2020-05-25T09:51:47.000Z
|
2020-05-25T09:51:47.000Z
|
import requests
import json
from configs.readconfig import configp
import logging
import pika
import uuid
import unittest
from appium import webdriver
from time import sleep
from selenium.common.exceptions import NoSuchElementException
import sys, logging, json, re, os
from workers.whatsapp_broadcast_message_worker import WhatsAppBroadcastMessage,BroadcastParametrizedTestCase
class Rabbit:
def __init__(self):
rhost = configp.get('rabbitmq', 'ip')
username = configp.get('rabbitmq', 'username')
password = configp.get('rabbitmq', 'password')
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(rhost,5672,'/',credentials)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
result = self.channel.queue_declare(exclusive=True)
self.callback_queue = result.method.queue
def broadcastmsgproducer(self,queuename,message, emulatername, action):
# Get the service resource
try:
routingkey = queuename+".*"
exchange = queuename+".exchange"
self.response = None
self.corr_id = str(uuid.uuid4())
res = self.channel.basic_publish(exchange=exchange,
routing_key=routingkey,
body=message,properties = pika.BasicProperties(reply_to = self.callback_queue,
correlation_id = self.corr_id,headers={'emulator_name':emulatername, 'action': action}))
if res:
self.connection.close()
return self.corr_id
else:
self.connection.close()
return False
except Exception as e:
print e
return False
def msgworker(self, queuename):
while 1:
self.channel.basic_consume(self.callback, queue=queuename, no_ack=True)
print "Waiting For Messages"
self.channel.start_consuming()
def callback(self,ch, method, properties, body):
print body
print properties
if properties.headers['action'] == 'send_only':
print type(body)
body = eval(body)
print body
#body = json.loads(body)
emulator_name = body['emulator_name']
mobile_number_list = body['mobile_number_list']
message_body = body['message_body']
print message_body
try:
print "entered"
#obj = WhatsAppBroadcastMessage().var_setup(emulator_name,mobile_number_list,message_body)
suite = unittest.TestSuite()
suite.addTest(BroadcastParametrizedTestCase.parametrize(WhatsAppBroadcastMessage, emulator_name=emulator_name,mobile_number_list=mobile_number_list,message_body=message_body))
#suite.addTest(BroadcastParametrizedTestCase.parametrize(TestOne, param=13))
result = unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.TextTestRunner().run(suite)
print "de-entetred"
except Exception as e:
print e
elif properties.headers['action'] == 'create_and_send':
print "Not handled"
else:
print "default handle"
print(" [x] Received %r" % body)
def main(self):
global LOGGER
FORMAT = '%(levelname)s: %(asctime)-15s: %(filename)s: %(funcName)s: %(module)s: %(message)s'
logging.basicConfig(filename="/var/log/whatsapp_single_worker.log", level=logging.DEBUG, format=FORMAT)
LOGGER = logging.getLogger("Broadcastworker")
broadcast_message_queue_name = configp.get('queue_name', 'broadcast_message')
try:
self.msgworker(broadcast_message_queue_name)
except Exception as e:
LOGGER.error(e)
if __name__ == '__main__':
rabbitInstance = Rabbit()
rabbitInstance.main()
| 36.342342
| 191
| 0.630392
|
f7ff04a0d14c05d62eebf33a07be72d3158395ce
| 2,012
|
py
|
Python
|
tests/testKuhn.py
|
1696012928/RoomAI
|
37be09590489ab5f7c85083173e83ea31c40b76c
|
[
"MIT"
] | 1
|
2018-03-02T00:49:31.000Z
|
2018-03-02T00:49:31.000Z
|
tests/testKuhn.py
|
1696012928/RoomAI
|
37be09590489ab5f7c85083173e83ea31c40b76c
|
[
"MIT"
] | null | null | null |
tests/testKuhn.py
|
1696012928/RoomAI
|
37be09590489ab5f7c85083173e83ea31c40b76c
|
[
"MIT"
] | null | null | null |
import unittest
import roomai.kuhn
import roomai.common
class KuhnTester(unittest.TestCase):
"""
"""
def testKuhn(self):
"""
"""
for i in range(1000):
players = [roomai.kuhn.Example_KuhnPokerAlwaysBetPlayer() for i in range(2)] + [roomai.kuhn.KuhnPokerChancePlayer()]
env = roomai.kuhn.KuhnPokerEnv()
infos,public_state,_,_ = env.init()
for i in range(len(players)):
players[i].receive_info(infos[i])
while public_state.is_terminal == False:
turn = infos[-1].public_state.turn
action = players[turn].take_action()
infos,public_state,_,_ = env.forward(action)
for i in range(len(players)):
players[i].receive_info(infos[i])
print (env.public_state.scores)
def testKuhnEnvBackward(self):
env = roomai.kuhn.KuhnPokerEnv()
env.init({"backward_enable":True})
env.forward(roomai.kuhn.KuhnPokerActionChance.lookup("0,2"))
action = roomai.kuhn.KuhnPokerAction("bet")
infos, public_state, person_states, private_state = env.forward(action)
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 2)
infos, public_state, person_states, private_state = env.forward(roomai.kuhn.KuhnPokerAction("bet"))
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 3)
infos, public_state, person_states, private_state = env.backward()
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 2)
def testCompete(self):
players = [roomai.kuhn.Example_KuhnPokerAlwaysBetPlayer() for i in range(2)]
env = roomai.kuhn.KuhnPokerEnv()
env.compete(env, players + [roomai.common.RandomPlayerChance()])
| 36.581818
| 128
| 0.644135
|
7e8f9a9a0306c6f4cda3607aa0d6c0e0e9199755
| 2,856
|
py
|
Python
|
packages/lektor_python_colombia_plugin/lektor_python_colombia_plugin.py
|
sorelyss/sitio-web
|
5c279f7daa3e1270285b7ae947ad4b6a962de014
|
[
"MIT"
] | 9
|
2018-08-09T17:55:18.000Z
|
2021-12-26T20:06:03.000Z
|
packages/lektor_python_colombia_plugin/lektor_python_colombia_plugin.py
|
sorelyss/sitio-web
|
5c279f7daa3e1270285b7ae947ad4b6a962de014
|
[
"MIT"
] | 110
|
2018-02-23T01:18:33.000Z
|
2021-04-02T15:50:12.000Z
|
packages/lektor_python_colombia_plugin/lektor_python_colombia_plugin.py
|
sorelyss/sitio-web
|
5c279f7daa3e1270285b7ae947ad4b6a962de014
|
[
"MIT"
] | 34
|
2017-10-30T17:27:46.000Z
|
2021-10-12T23:49:23.000Z
|
# -*- coding: utf-8 -*-
"""Custom plugin to add extra functionality to the Python Colombia site."""
# Standard library imports
from collections import OrderedDict
import os
import sys
# Third party imports
from PIL import Image as ImagePIL
from jinja2 import Undefined
# Local imports
from lektor.db import Image
from lektor.pluginsystem import Plugin
from lektor.project import Project
from lektor.reporter import reporter
from lektor.utils import portable_popen
PY3 = sys.version_info[0] == 3
PROJECT = Project.discover()
ROOT_PATH = os.path.abspath(os.path.dirname(PROJECT.project_path))
CONTENT_PATH = os.path.join(ROOT_PATH, 'content')
class PythonColombiaPlugin(Plugin):
name = 'Python Colombia Custom Lektor Plugin'
description = 'This is a custom local plugin to add extra functionality.'
def on_after_build(self, builder, build_state, source, prog):
if isinstance(source, Image) and source.parent['_model'] == 'user':
w, h = source.width, source.height
fpath = CONTENT_PATH + source.path
if isinstance(w, Undefined):
with ImagePIL.open(fpath) as img:
w, h = img.size
if w != 400 and (h != 200 or h != 400):
if fpath.endswith(source.parent['image']):
print('Size should be 400x400. Current size is {}x{} ({})'.format(w, h, source.path))
elif fpath.endswith(source.parent['image_alt']):
print('Size should be 400x200. Current size is {}x{} ({})'.format(w, h, source.path))
if source.parent['type'] == 'persona' and not fpath.lower().endswith(('.jpg', '.jpeg')):
print('User images should be of type .jpg ({})'.format(source.path))
if fpath.lower().endswith('.jpeg'):
print('File extension should be .jpg ({})'.format(source.path))
def on_after_build_all(self, *args, **kwargs):
def run_map_generation_script():
"""Run the map generation script located at `/scripts/map.py`."""
scripts_root = os.path.join(self.env.root_path, 'scripts')
args = [sys.executable, os.path.join(scripts_root, 'map.py')]
portable_popen(args, cwd=scripts_root).communicate()
run_map_generation_script()
# reporter.report_build_all_failure(10)
def on_setup_env(self, **extra):
def estimate_reading_time(content):
"""Estimate reading time in minutes for content."""
words = content.split(' ')
time = len(words)/200.0 # Average word reading speed for adults
return int(round(time, 0))
self.env.jinja_env.globals.update(dir=dir)
self.env.jinja_env.globals.update(OrderedDict=OrderedDict)
self.env.jinja_env.globals.update(estimate_reading_time=estimate_reading_time)
| 39.123288
| 105
| 0.647409
|
5b2b796ab665612c7e31992ad809e61f9fcc99c8
| 697
|
py
|
Python
|
opensanctions/helpers/__init__.py
|
quantumchips/opensanctions
|
56f19dcfea704480e56a311d2a807c8446237457
|
[
"MIT"
] | null | null | null |
opensanctions/helpers/__init__.py
|
quantumchips/opensanctions
|
56f19dcfea704480e56a311d2a807c8446237457
|
[
"MIT"
] | 19
|
2021-12-01T12:04:03.000Z
|
2022-03-01T12:03:40.000Z
|
opensanctions/helpers/__init__.py
|
quantumchips/opensanctions
|
56f19dcfea704480e56a311d2a807c8446237457
|
[
"MIT"
] | null | null | null |
from opensanctions.helpers.gender import clean_gender
from opensanctions.helpers.emails import clean_emails
from opensanctions.helpers.phones import clean_phones
from opensanctions.helpers.addresses import make_address, apply_address
from opensanctions.helpers.sanctions import make_sanction
from opensanctions.helpers.features import apply_feature
from opensanctions.helpers.dates import extract_years, parse_date
from opensanctions.helpers.excel import convert_excel_cell
__all__ = [
"clean_gender",
"clean_emails",
"clean_phones",
"make_address",
"apply_address",
"make_sanction",
"extract_years",
"parse_date",
"apply_feature",
"convert_excel_cell",
]
| 31.681818
| 71
| 0.799139
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.