text stringlengths 8 6.05M |
|---|
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, Normalize
from matplotlib.ticker import FormatStrFormatter, StrMethodFormatter
import numpy as np
def heatmap(datas, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
#print(datas)
im = ax.imshow(datas)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(datas.shape[1]))
ax.set_yticks(np.arange(datas.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(row_labels)
ax.set_yticklabels(col_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(datas.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(datas.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(im, data=None, valfmt ="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A list or array of two color specifications. The first is used for
values below a threshold, the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
if isinstance(valfmt, str):
valfmt = StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plotDecisionBoundary(X_train, y_train, model, param, paramname, subplt):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
subplt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap =cmap_bold,
edgecolor='k', s=20)
subplt.axis(x_max = xx.max(), x_min = xx.min, y_max = yy.max(), y_min = yy.min())
subplt.set_xlabel("Alcohol")
subplt.set_ylabel("Malic acid")
subplt.set_title("Wine classification ("+paramname+" = "+str(param)+")", pad=10)
|
"""
Modify your own source code with this piece of Python black magic.
When a piece of code calls `replace_me(value)`, that line will be replaced with the given `value`. If you want to insert a comment and keep the line that inserted it, use `insert_comment(value)`.
**ATTENTION**: Calling these functions will modify your source code. Keep backups.
Example:
from replace_me import replace_me, insert_comment
# If you run this program, this source code will change.
# These two lines will become the same:
# Hello World
replace_me("Hello World", as_comment=True)
# Code generation. Creates a hard coded list of 100 numbers.
replace_me('numbers = ' + str(list(range(100))))
import random
# The next comment will be replaced with a random number.
insert_comment(random.randint(1, 10))
# ??
# Pseudo-quine, replaces the line with itself.
quine = 'replace_me(quine)'
replace_me(quine)
"""
import re
import sys
from inspect import getframeinfo, stack
from pprint import pformat
def replace_me(value, as_comment=False):
"""
** ATTENTION **
CALLING THIS FUNCTION WILL MODIFY YOUR SOURCE CODE. KEEP BACKUPS.
Replaces the current souce code line with the given `value`, while keeping
the indentation level. If `as_comment` is True, then `value` is inserted
as a Python comment and pretty-printed.
Because inserting multi-line values changes the following line numbers,
don't mix multiple calls to `replace_me` with multi-line values.
"""
caller = getframeinfo(stack()[1][0])
if caller.filename == '<stdin>':
raise ValueError("Can't use `replace_me` module in interactive interpreter.")
with open(caller.filename, 'r+', encoding='utf-8') as f:
lines = f.read().split('\n')
spaces, = re.match(r'^(\s*)', lines[caller.lineno-1]).groups()
if as_comment:
if not isinstance(value, str):
value = pformat(value, indent=4)
value_lines = value.rstrip().split('\n')
value_lines = (spaces + '# ' + l for l in value_lines)
else:
value_lines = (spaces + l for l in str(value).split('\n'))
lines[caller.lineno-1] = '\n'.join(value_lines)
f.seek(0)
f.truncate()
f.write('\n'.join(lines))
def insert_comment(comment):
"""
** ATTENTION **
CALLING THIS FUNCTION WILL MODIFY YOUR SOURCE CODE. KEEP BACKUPS.
Inserts a Python comment in the next source code line. If a comment alraedy
exists, it'll be replaced. The current indentation level will be maintained,
multi-line values will be inserted as multiple comments, and non-str values
will be pretty-printed.
Because inserting multi-line comments changes the following line numbers,
don't mix multiple calls to `insert_comment` with multi-line comments.
"""
caller = getframeinfo(stack()[1][0])
if caller.filename == '<stdin>':
raise ValueError("Can't use `replace_me` module in interactive interpreter.")
line_number = caller.lineno-1
comment_line = line_number + 1
with open(caller.filename, 'r+', encoding='utf-8') as f:
lines = f.read().split('\n')
spaces, = re.match(r'^(\s*)', lines[line_number]).groups()
while comment_line < len(lines) and lines[comment_line].startswith(spaces + '#'):
lines.pop(comment_line)
if not isinstance(comment, str):
comment = pformat(comment, indent=4)
comment_lines = [spaces + '# ' + l for l in comment.rstrip().split('\n')]
lines = lines[:comment_line] + comment_lines + lines[comment_line:]
f.seek(0)
f.truncate()
f.write('\n'.join(lines))
NONE = {}
def test(value, expected=NONE):
"""
** ATTENTION **
CALLING THIS FUNCTION WILL MODIFY YOUR SOURCE CODE. KEEP BACKUPS.
If `expected` is not given, replaces with current line with an equality
assertion. This is useful when manually testing side-effect-free code to
automatically create automated tests.
"""
if hasattr(value, '__next__'):
value = list(value)
if expected is not NONE:
try:
assert value == expected
except AssertionError:
print('TEST FAILED: expected\n{}\ngot\n{}\n'.format(repr(expected), repr(value)))
raise
return value
caller = getframeinfo(stack()[1][0])
if caller.filename == '<stdin>':
raise ValueError("Can't use `replace_me` module in interactive interpreter.")
line_number = caller.lineno-1
with open(caller.filename, 'r+', encoding='utf-8') as f:
lines = f.read().split('\n')
spaces, rest = re.match(r'^(\s*)(.+\))', lines[line_number]).groups()
lines[line_number] = spaces + rest[:-1] + ', {})'.format(repr(value))
f.seek(0)
f.truncate()
f.write('\n'.join(lines))
return value
def hardcode_me(value):
"""
** ATTENTION **
CALLING THIS FUNCTION WILL MODIFY YOUR SOURCE CODE. KEEP BACKUPS.
Replaces the call to this functions with the hardcoded representation of
the given. Limitations: must use the function "hardcode_me" and the call
must be a single line.
assert hardcode_me(1+1) == 2
becomes
assert 2 == 2
This code does a string replacement in a very naive way, so don't try
tricky situations (e.g. having a string containing "hardcode_me()" in the
same line).
"""
import re
caller = getframeinfo(stack()[1][0])
if caller.filename == '<stdin>':
raise ValueError("Can't use `replace_me` module in interactive interpreter.")
if len(caller.code_context) != 1 or 'hardcode_me' not in caller.code_context[0]:
raise ValueError("Can only hardcode single-line calls that use the name 'hardcode_me'.")
line_number = caller.lineno-1
with open(caller.filename, 'r+', encoding='utf-8') as f:
lines = f.read().split('\n')
line = lines[line_number]
def replace(match):
# Our goal here is to replace everything inside the matching
# parenthesis, while ignoring literal strings.
parens = 1
index = 0
string = match.group(1)
while parens:
if string[index] == ')':
parens -= 1
elif string[index] == '(':
parens += 1
elif string[index] in '"\'':
while index is not None:
index = string.index(string[index], index+1)
if string[index-1] != '\\':
# TODO: \\" breaks this
break
if index is None or index >= len(string):
raise ValueError('Found unbalaced parenthesis while trying to hardcode value. Did you use line breaks?')
index += 1
return repr(value) + string[index:]
modified_line = re.sub(r'(?:replace_me\.)?hardcode_me\((.+)', replace, line)
lines = lines[:line_number] + [modified_line] + lines[line_number+1:]
f.seek(0)
f.truncate()
f.write('\n'.join(lines))
return value
if __name__ == '__main__':
# If you run this program, the following examples will change.
# These two lines will become the same:
# Hello World
replace_me("Hello World", as_comment=True)
# Code generation. Creates a hard coded list of 100 numbers.
replace_me('numbers = ' + str(list(range(100))))
import random
# The next comment will be replaced with a random number.
insert_comment(random.randint(1, 10))
# ??
# Pseudo-quine, replaces the line with itself.
quine = 'replace_me(quine)'
replace_me(quine)
test(1+1)
# becomes
test(1+1, 2)
assert hardcode_me(1+1) == 2
# becomes
assert 2 == 2 |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 20:28:14 2020
@author: Michele Milesi 844682
"""
import re
#Riceve come parametro la stringa che contiene il nome del file
#e legge le rige del file mettendole in una lista
def read_file(file_name):
with open(file_name, 'r') as input_file:
file_rows = input_file.readlines()
return file_rows
#Prende come argomento un dizionario, la chiave e il valore da aggiungere
#alla chiave
def update_dict(dictionary, key, value):
value_list = dictionary.get(key, [])
value_list.append(value)
dictionary[key] = value_list
return
#Prende come argomento una lista di stringhe
#rimuove i caratteri di spazio alla fine di ogni stringa,
#rimuove tutti i commenti e scarta le righe vuote
#inoltre associa ad ogni riga la sua posizione (numero di riga) all'interno del file
#restituisce la nuova lista di tuple (numero_riga, riga)
def format_rows(string_list):
format_string_list = []
for count in range(0, len(string_list)):
row = string_list[count]
format_row = re.search('((("[^"]*")|[^#])*)#?', row.rstrip()).group(1)
if format_row != '':
format_string_list.append(((count+1), format_row))
return format_string_list
#Verifica che il campo degli attributi sia corretto
#Ogni attributo deve terminare con un punto e virgola (;) ed essere separato dal successivo da esattamente uno spazio
#Ogni attributo è composto dalla coppia nome_attributo-valore_attributo, questi due elementi devono essere sepaeati esattamente da uno spazio
#I valori non numerici devono essere inclusi in doppi apici
#attributi gene_id e transcript_id sono obbligatori e devono essere i primi due attributi di ogni riga
#Il valore di transcript_id nei record 'intron_CNS' deve essere diverso da ""
#Il valore di transcript_id nei record 'inter' e 'inter_CNS' deve essere uguale a ""
#Si sottolina anche che il nome degli attributi non dovrebbe essere incluso in doppi apici, infatti,
#se ciò accadesse non verrebbero riconosciuti gli attributi obbligatori
def check_attributes(row, errors):
field_list = row[1].split('\t')
mandatory_attributes = {'transcript_id': False, 'gene_id': False}
attribute_order = [] #contiene il nome degli attributi in ordine (come sono ordinati nel record)
attribute_name_with_dublequotes = False
feature = field_list[2]
attribute_list = re.findall(r'((?:(?:\"[^\"]*\")|\w)+(?:(?:\"[^\"]*\")|[\w\.]|\s)*)', field_list[8]) #Cerca ogni possibile coppia nome_attributo valore_attributo (esclude i separatori)
attribute_list = [attribute.strip() for attribute in attribute_list]
if '; '.join(attribute_list) + ';' != field_list[8]: #controlla che alla fine di ogni attributo ci sia un punto e virgola e siano separati da uno spazio
update_dict(errors[0], row[0], 'Illegal attribute separator: attributes must end in a semicolon which must then be separated by exactly one space character')
else:
for attribute in attribute_list:
attribute_component = re.findall(r'((?:\"[^\"]*\")|[\w\.]+)', attribute) #Divide ogni possibile coppia di attributi nei vari campi
if len(attribute_component) != 2 or ' '.join(attribute_component) != attribute: #se ci sono + di due elementi o se non sono separati da esattamente uno spazio, dà errore
update_dict(errors[0], row[0], 'Attribute ' + attribute_component[0] + ' has the wrong format: each attribute must be a pair name value separated by exactly one space')
elif not attribute_component[1].isnumeric() and (attribute_component[1][0] != '\"' or attribute_component[1][-1] != '\"'): #controlla che i valori non numerici siano inclusi in doppi apici
update_dict(errors[0], row[0], 'Textual value of ' + attribute_component[0] + ' must be surrounded by doublequotes')
if len(attribute_component) >= 2:
if attribute_component[0][0] == '\"' and attribute_component[0][-1] == '\"': #controlla se il nome dell'attributo è incluso in doppi apici
attribute_name_with_dublequotes = True
if attribute_component[0] in mandatory_attributes: #controlla se è un attributo obbligatorio
mandatory_attributes[attribute_component[0]] = True
attribute_order.append(attribute_component[0])
if attribute_component[0] == 'transcript_id':
if feature == 'intron_CNS' and attribute_component[1] == '""':
update_dict(errors[0], row[0], "Illegal value of transcript_id: a 'intron_CNS' record musn't have transcript_id equal to \"\"")
if feature in ['inter_CNS', 'inter'] and attribute_component[1] != '""':
update_dict(errors[0], row[0], "Illegal value of transcript_id: a '" + feature + "' record must have transcript_id equal to \"\"")
if len(attribute_order) >= 2 and (attribute_order[0] not in mandatory_attributes or attribute_order[1] not in mandatory_attributes): #controlla che i primi due attributi siano gene_id e transfert_id
update_dict(errors[0], row[0], "transfert_id and gene_id must be the firsts two attributes")
for key in mandatory_attributes: #controlla che ci siano gli attributi obbligatori
if not mandatory_attributes[key]:
update_dict(errors[0], row[0],'Attribute ' + key + ' is required')
if attribute_name_with_dublequotes: #verifica se nella riga c'è almeno un nome di attributo incluso in doppi apici
update_dict(errors[0], row[0], "WARNING: attribute names shouldn't be sourrounded by doublequotes")
return
#Controlla che il valore del campo frame sia correto
#nel caso di start_codon e stop_codon non contigui il frame deve essere '0' o '1' o '2'
#nel caso siano contigui allora deve essere per forza 0
#nel caso deglle altre feature il valore del frame deve essere '0' o '1' o '2' o '.'
def check_frame(row, errors):
frame = row[1].split('\t')[7]
feature = row[1].split('\t')[2]
if feature == 'start_codon' or feature == 'stop_codon':
if row[0] not in errors[1]: #controlla che i valori di start e end della riga siano corretti per poter effettuare la conversione in intero
start = int(row[1].split('\t')[3])
end = int(row[1].split('\t')[4])
if end - start > 0 and frame != '0':
update_dict(errors[0], row[0], "Illegal frame (" + frame + "): contiguous " + feature + " must have frame '0'")
elif end - start == 0 and frame not in ['0', '1', '2']:
update_dict(errors[0], row[0], "Illegal frame (" + frame + "): " + feature + " must have a '0' or '1' or '2' in frame field")
else:
if frame not in ['0', '1', '2', '.']:
update_dict(errors[0], row[0], "Illegal frame (" + frame + "): must have '0' or '1' or '2' or '3' or '.' in frame field")
#Verifica che il valore del campo strand sia corretto
#Deve essere per forza o '+' o '-'
def check_strand(row, errors):
strand = row[1].split('\t')[6]
if strand != '+' and strand != '-':
update_dict(errors[0], row[0], "Illegal strand (" + strand + "): must have '+' or '-' in strand field")
#Verifica che il valore del campo score sia correto
#Deve essere o '.' o un intero o un floating point
def check_score(row, errors):
score = row[1].split('\t')[5]
if score != '.' and not re.match(r"([0-9]+(\.[0-9]+)?)|(\.[0-9]+)", score):
update_dict(errors[0], row[0], "Illegal score (" + score + "): must have a number or a dot in score field")
#Verifiva la correttezza dei campi start e end
#Entrambi devono essere interi e maggiori o uguali di 1
#Inoltre deve risultare start <= end
def check_start_end(row, errors):
start = row[1].split('\t')[3]
end = row[1].split('\t')[4]
line = row[0]
wrong_start_end = False
if re.search("[^0-9]+", start):
update_dict(errors[0], line, "Illegal start (" + start + "): must have an integer >= 1 in start field")
wrong_start_end = True
elif int(start) <= 0:
update_dict(errors[0], line, "Illegal start (" + start + "): must have an integer >= 1 in start field")
wrong_start_end = True
if re.search("[^0-9]+", end):
update_dict(errors[0], line, "Illegal end (" + end + "): must have an integer >= 1 in end field")
wrong_start_end = True
elif int(end) <= 0:
update_dict(errors[0], line, "Illegal end (" + end + "): must have an integer >= 1 in end field")
wrong_start_end = True
if not wrong_start_end and int(start) > int(end):
update_dict(errors[0], line, "Illegal value of start and end (" + start + ", " + end + "): it must be start <= end")
wrong_start_end = True
if wrong_start_end:
errors[1].append(line)
#Controlla che i campi di ogni riga siano separati dal corretto separatore e che il numero di campi sia = 9
#inoltre controlla che siano presenti i record obbligatori 'CDS', 'start_codon' e 'stop_codon'
#verifica anche che la source nel file sia unica
#se questi primi vincoli sono rispettati, allora richiama per ogni riga le funzioni che verificano la correttezza dei vari campi che compongono la riga
#infine, ritorna un dizionario che ha come chiave la feature e
#come valore la lista di tutte le righe che hanno quella feature
#questo dizionario servirà per successivi controlli su vincoli legati a più record
def check_fields(rows, errors):
required_feature = ['CDS', 'start_codon', 'stop_codon']
row_dict = {'CDS': [], 'start_codon': [], 'stop_codon': [], '5UTR': [], '3UTR': [], 'inter': [], 'inter_CNS': [], 'intron_CNS': [], 'exon': []} #dizionario che verrà ritornato e che viene utilizzato per il controllo dei record obbligatori
source_set = set()
for row in rows:
if re.search('\t', row[1]): #verifica che sia presente il separatore dei campi all'interno della riga
field_list = row[1].split('\t')
field_number = len(field_list)
feature = field_list[2]
if field_number != 9: #verifica che il numero di campi sia esattamente 9
update_dict(errors[0], row[0], "Wrong number of fields (" + str(field_number) + ") -> expected 9")
elif feature in row_dict: #se tutto ok e la feature è valida, richiama le funzioni per controllare i vari campi della riga
source_set.add(field_list[1])
check_start_end(row, errors)
check_score(row, errors)
check_strand(row, errors)
check_frame(row, errors)
check_attributes(row, errors)
update_dict(row_dict, feature, row)
else:
update_dict(errors[0], row[0], "Illegal field separator")
for feature in required_feature:
if len(row_dict[feature]) == 0:
update_dict(errors[0], 0, "The file needs a "+ feature +" record")
if len(source_set) > 1:
update_dict(errors[0], 0, "The source must be unique in the file")
return row_dict
#verifica la somma delle lunghezze di tutti i record 'start_codon' siano <= 3bp
#controlla che le coordinate di tutti i record 'start_codon' siano all'interno delle coordinate di almeno un record 'CDS'
#inltre verifica che le coordinate di tutti i record 'start_codon' siano esterni alle coordinate di tutti i record '5UTR'
def check_start_codon(row_dict, errors):
start_codon_length = 0
start_codon_rows = []
rows_not_in_cds = [] #contiene i numeri delle righe 'start_codon' che non sono incluse in coordinate di almeno un record 'CDS'
rows_in_5UTR = [] #contiene i numeri delle righe che sono incluse nelle coordinate di almeno un record '5UTR'
for row in row_dict['start_codon']:
row_in_cds = False
start_codon_rows.append(str(row[0]))
if row[0] not in errors[1]: #controlla che i valori di start e end della riga siano corretti per poter effettuare la conversione in intero
start = int(row[1].split('\t')[3])
end = int(row[1].split('\t')[4])
start_codon_length += end - start + 1
for cds_row in row_dict['CDS']:
if cds_row[0] not in errors[1]: #controlla che i valori di start e end della riga siano corretti per poter effettuare la conversione in intero
cds_start = int(cds_row[1].split('\t')[3])
cds_end = int(cds_row[1].split('\t')[4])
if cds_start <= start and end <= cds_end:
row_in_cds = True
break
for row_5UTR in row_dict['5UTR']:
if row_5UTR[0] not in errors[1]: #controlla che i valori di start e end della riga siano corretti per poter effettuare la conversione in intero
start_5UTR = int(row_5UTR[1].split('\t')[3])
end_5UTR = int(row_5UTR[1].split('\t')[4])
if start in range(start_5UTR, end_5UTR + 1) or end in range(start_5UTR, end_5UTR + 1):
rows_in_5UTR.append(str(row[0]))
break
if not row_in_cds:
rows_not_in_cds.append(str(row[0]))
if start_codon_length > 3:
update_dict(errors[0], 0, 'The start_codon feature is up to 3bp long in total, violation at lines: ' + ', '.join(start_codon_rows))
if len(rows_not_in_cds) > 0:
update_dict(errors[0], 0, 'The start_codon must be included in coordinates for CDS features, violation at lines: ' + ', '.join(rows_not_in_cds))
if len(rows_in_5UTR):
update_dict(errors[0], 0, 'The start_codon must be exluded from the coordinates for the "5UTR" features, violation at lines: ' + ', '.join(rows_in_5UTR))
return
#verifica la somma delle lunghezze di tutti i record 'stop_codon' siano <= 3bp
#controlla che le coordinate di tutti i record 'stop_codon'siano escluse delle coordinate di tutti i record 'CDS' e '3UTR'
def check_stop_codon(row_dict, errors):
stop_codon_length = 0
stop_codon_rows = []
rows_in_cds = [] #contiene i numeri delle righe 'stop_codon' che sono incluse in coordinate di almeno un record 'CDS'
rows_in_3UTR = [] #contiene i numeri delle righe che sono incluse nelle coordinate di almeno un record '3UTR'
for row in row_dict['stop_codon']:
stop_codon_rows.append(str(row[0]))
if row[0] not in errors[1]: #controlla che i valori di start e end della riga siano corretti per poter effettuare la conversione in intero
start = int(row[1].split('\t')[3])
end = int(row[1].split('\t')[4])
stop_codon_length += end - start + 1
for cds_row in row_dict['CDS']:
if cds_row[0] not in errors[1]: #controlla che i valori di start e end della riga siano corretti per poter effettuare la conversione in intero
cds_start = int(cds_row[1].split('\t')[3])
cds_end = int(cds_row[1].split('\t')[4])
if start in range(cds_start, cds_end + 1) or end in range(cds_start, cds_end + 1):
rows_in_cds.append(str(row[0]))
break
for row_3UTR in row_dict['3UTR']:
if row_3UTR[0] not in errors[1]: #controlla che i valori di start e end della riga siano corretti per poter effettuare la conversione in intero
start_3UTR = int(row_3UTR[1].split('\t')[3])
end_3UTR = int(row_3UTR[1].split('\t')[4])
if start in range(start_3UTR, end_3UTR + 1) or end in range(start_3UTR, end_3UTR + 1):
rows_in_3UTR.append(str(row[0]))
break
if stop_codon_length > 3:
update_dict(errors[0], 0, 'The stop_codon feature is up to 3bp long in total: violation at lines: ' + ', '.join(stop_codon_rows))
if len(rows_in_cds) > 0:
update_dict(errors[0], 0, 'The stop codon must not be included in the CDS features, violation at lines: ' + ', '.join(rows_in_cds))
if len(rows_in_3UTR):
update_dict(errors[0], 0, 'The stop codon must be exluded from the coordinates for the "3UTR" features, violation at lines: ' + ', '.join(rows_in_3UTR))
return
#Si occupa di stampare le violazioni (se presenti)
def print_errors(file_input_name, errors):
# Scrive un file.
out_file = open("./risulato.txt", "w")
if errors == {}:
out_file.write("The file '"+ file_input_name + "' is correct\n")
else:
out_file.write('Violations in file \'' + file_input_name + '\':\n\n')
if 0 in errors:
out_file.write("Gereral errors:\n")
for error in errors[0]:
out_file.write("\t" + error + '\n')
del errors[0]
out_file.write('\n')
for line in dict(sorted(errors.items())):
out_file.write("At line " + str(line) + ":\n")
for error in errors[line]:
out_file.write("\t" + error + '\n')
out_file.write('\n')
out_file.close()
#variabile composta da un dizionario e una lista
#il dizionario conterrà tutte le violazioni presenti: la chiave è il numero di riga e il valore è una lista che contiene tutte le violazioni di quella riga
#Alla chiave 0 vengono aggiunti tutti gli errori che comprendono + righe o a cui non è possibile associare una singola riga, ad esempio,
#la violazione "start_codon mancante" non è riconducibile ad una singola riga, ma all'intero file
#la lista conterrà tutte le righe che hanno valori irregolari di start o end o start > end, questa lista serve per verificare in modo rapido
#se è possibile convertire in intero i campi start e end, nel caso questi valori servissero per qualche ulteriore controllo
errors = ({}, [])
file_input_name = input("File to validate: ")
file_rows = read_file(file_input_name)
format_file_rows = format_rows(file_rows)
row_dict = check_fields(format_file_rows, errors)
check_start_codon(row_dict, errors)
check_stop_codon(row_dict, errors)
print_errors(file_input_name, errors[0]) |
import views
import unittest
from mock import patch
class TestFlask(unittest.TestCase):
def setUp(self):
self.app = views.app.test_client()
def testStatus(self):
urlList = ['/', '/sports_form', '/sports_form_ajax']
for url in urlList:
response = self.app.get(url)
self.assertEqual(response.status, '200 OK')
def testIndexContents(self):
response = self.app.get('/')
self.assertTrue('This is the home page' in response.get_data())
self.assertTrue('This is the footer' in response.get_data())
@patch('views.render_template')
@patch('views.request')
class MockTestFlask(unittest.TestCase):
def testResult(self, mock_request, mock_render_template):
result = views.result()
mock_render_template.assert_called_once_with('result.html', page_title='Survey results', BaskTeam=mock_request.form['basketball'], BaskLevel=mock_request.form['basketball_level'], BaseTeam=mock_request.form['baseball'], BaseLevel=mock_request.form['baseball_level'])
if __name__ == '__main__':
unittest.main()
|
import uuid
from blog import mongo
from flask import jsonify
from werkzeug.security import generate_password_hash
class User:
def __init__(self, data):
self.data = data
def validate(self, data):
#Username validation
username = data.get('username')
if username:
if mongo.db.user.find_one({'username': data['username']}):
# If username exists
return {'detail': 'Username already in use!'}
else:
return {'detail': 'Username required!'}
# Email validation
email = data.get('email')
if email:
if mongo.db.user.find_one({'email': data['email']}):
# If email exists
return {'detail': 'Email already in use!'}
if '@' not in email or '.' not in email:
# If correct email format
return {'detail': 'Invalid email format!'}
else:
return {'detail': 'Email required!'}
# Passwords validation
password = data.get('password')
password2 = data.get('password2')
if password and password2:
# if passwords don't match
if password != password2:
return {'detail': 'Passwords must match!'}
else:
return {'detail': 'password and password2 required!'}
return True
def create(self):
validate = self.validate(self.data)
if validate == True:
# On valid data, add user to database
user = {
'public_id': uuid.uuid4().hex,
'username': self.data['username'],
'email': self.data['email'],
'password': generate_password_hash(self.data['password']),
'posts': []
}
mongo.db.user.insert_one(user)
# Removing unnecessary data
del user['_id']
del user['password']
return jsonify(user), 201
return jsonify(validate), 400 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import OneHotEncoder, MultiLabelBinarizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
import pickle
df = pd.read_csv("data1.csv")
# print(bankdata.head())
# mlb = MultiLabelBinarizer()
# mhv = mlb.fit_transform(fbrdata['name'].apply(lambda x: set(x.split(' '))))
# df_out = pd.DataFrame(mhv,columns=mlb.classes_)
# enc = OneHotEncoder(sparse=False)
# ohe_vars = ['name'] # specify the list of columns here
# ohv = enc.fit_transform(fbrdata.loc[:,ohe_vars])
# ohe_col_names = ['%s_%s'%(var,cat) for var,cats in zip(ohe_vars, enc.categories_) for cat in cats]
# df_out.assign(**dict(zip(ohe_col_names,ohv.T)))
splitted = df['name'].str.split()
df['first_name'] = splitted.str[0]
df['feature'] = df['first_name']+" "+df['province'].astype(str)
df['feature'] = df['feature'].apply(lambda x: str(x).lower())
df = df[['feature','gender']]
df = df[[len(e)>1 for e in df.feature]]
#df = df.drop_duplicates()
#df.to_csv('data2.csv')
Xfeatures = df["feature"]
cv = CountVectorizer()
X = cv.fit_transform(Xfeatures)
# print(cv.get_feature_names())
y = df["gender"]
# print(fbrdata.head())
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state=42)
clf = MultinomialNB()
clf.fit(X_train,y_train)
print(clf.score(X_train,y_train))
sample_name = ["Usman"]
vect = cv.transform(sample_name).toarray()
print(clf.predict(vect))
filename = 'finalized_model1.pkl'
pickle.dump(clf, open(filename, 'wb'))
vectorizer_file = 'vectorizer1.pkl'
pickle.dump(cv, open(vectorizer_file,'wb'))
# svclassifier = SVC(kernel='linear')
# svclassifier.fit(X_train, y_train)
# y_pred = svclassifier.predict(X_test)
#
# print(y_pred)
# print(confusion_matrix(y_test,y_pred))
# print(classification_report(y_test,y_pred))
# |
from user import user
from admin import admin
from membre import membre
dico = {'id':'','prenom':'','nom':'','role':'','pres':'','mail':'','mdp':''}
u = user(dico)
#print(u.connecte)
print("Bonjour, Veuillez vous identifier")
_id = int(input("Veuillez Saisir votre identifiant: "))
psw = input("Veuillez Saisir votre Mot De Passe: ")
dico = u.connexion(_id,psw)
print(u.connecte)
if u.connecte == False:
print("Echec de connection")
else:
if dico['role']=='membre':
m=membre(dico)
m.connecte=True
choix = m.menu()
if choix =="y":
_id = m.id
_prenom = input("Prenom: ")
_nom = input("Nom: ")
_role =input("Role: ")
_presentation =input("Presentation: ")
_email = input("Mail: ")
_mdp = input("PSW: ")
if u.checkMdp(_mdp) == 'ok' :
dico = {'id':_id,'prenom':_prenom,'nom':_nom,'role':_role,'pres':_presentation,'mail':_email,'mdp':_mdp}
m.update(dico)
else:
print("mdp invalide")
else :
print("choix non compris")
else :
a=admin(dico)
a.connecte=True
choix = a.menu()
if choix == "1":
a.addUser()
elif choix == "2":
a.delUser()
else :
print("choix non compris")
|
#!/usr/bin/env python3
from subprocess import check_output, check_call, Popen, PIPE
from tempfile import NamedTemporaryFile
import sys
import json
import codecs
import sys
try:
import ijson
HAS_IJSON = True
except ImportError:
HAS_IJSON = False
print("WARN: ijson not available. I may choke on big mailbox.",
file=sys.stderr)
# basic functions
def cmd_run(cmd):
print('Running (run): {}'.format(cmd))
return check_call(cmd)
def cmd_capture(cmd):
print('Running (capture): {}'.format(cmd))
return check_output(cmd)
def cmd_stream(cmd):
print('Running (stream): {}'.format(cmd))
p = Popen(cmd, stdout = PIPE)
return p
# reconstruct responses from thread info
def traverse(msg, ms, parent = None):
assert len(msg) == 2
head = msg[0]
head_id = head['id']
if parent is not None:
ms.append((head_id, parent))
children = msg[1]
for ch in children:
traverse(ch, ms, parent = head_id)
def get_responses(obj):
ms = []
for thread in obj: # over all threads
get_responses_from_thread(thread, ms)
return ms
def get_responses_from_thread(thread, ms):
for top_message in thread:
traverse(top_message, ms, None)
def get_replied_ids(query):
notmuch = [ 'notmuch', 'show', '--format=json',
'--entire-thread=true', '--body=false', query ]
out = cmd_stream(notmuch)
ms = []
try:
reader = codecs.getreader('utf8')
reader = reader(out.stdout, errors = 'replace')
if HAS_IJSON:
threads = ijson.items(reader, 'item')
for thread in threads:
get_responses_from_thread(thread, ms)
else:
obj = json.load(reader)
ms = get_responses(obj)
finally:
pass
replied = set( x[1] for x in ms )
return sorted(replied)
# stuff to untag messages
def untag_ids(ids):
with NamedTemporaryFile() as batch_file:
for i in ids:
line = '-noresponse +response -- id:{}\n'.format(i)
batch_file.write(line.encode('utf8'))
batch_file.flush()
notmuch = [ 'notmuch', 'tag',
'--batch', '--input={}'.format(batch_file.name) ]
cmd_run(notmuch)
# high-level commands
def index(query = None):
# indexes only the new mails
query = 'tag:new' if query is None else query
replied = get_replied_ids(query)
if len(replied) == 0:
print('Nothing to untag.')
else:
print('IDs to untag ({}): {}'.format(len(replied), ', '.join(replied)))
untag_ids(replied)
def reindex(query = None):
# reindexes messages that much the query, it may take some time
# 1. tag everything with unreplied
query = '*' if query is None else query
cmd_run([ 'notmuch', 'tag', '+noresponse', '-response', query ])
# 3. get all reply ids
replied = get_replied_ids(query)
print('Replies to untag: {}'.format(len(replied)))
# 4. untag these ids
untag_ids(replied)
if __name__ == '__main__':
if len(sys.argv) not in [ 2, 3 ]:
print('Usage: {} <index|reindex> [query]'.format(sys.argv[0]))
sys.exit(1)
cmd = sys.argv[1]
query = None if len(sys.argv) == 2 else sys.argv[2]
if cmd == 'index':
index(query)
elif cmd == 'reindex':
reindex(query)
else:
print('Unknown command: {}'.format(cmd))
sys.exit(1)
|
from pypi_simple import __version__
project = "pypi-simple"
author = "John T. Wodder II"
copyright = "2018-2023 John T. Wodder II" # noqa: A001
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_copybutton",
]
autodoc_default_options = {
"members": True,
"undoc-members": True,
}
autodoc_member_order = "bysource"
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"requests": ("https://requests.readthedocs.io/en/latest/", None),
}
exclude_patterns = ["_build"]
source_suffix = ".rst"
source_encoding = "utf-8"
master_doc = "index"
version = __version__
release = __version__
today_fmt = "%Y %b %d"
default_role = "py:obj"
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"collapse_navigation": False,
"prev_next_buttons_location": "both",
}
html_last_updated_fmt = "%Y %b %d"
html_show_sourcelink = True
html_show_sphinx = True
html_show_copyright = True
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Title :py_exception.py
# Description :
# Author :Devon
# Date :2018/1/17
# Version :1.0
# Platform : windows
# Usage :python test4.py
# python_version :2.7.14
#==============================================================================
# 各个异常处理解释
try:
x = int(input('input x:'))
y = int(input('input y:'))
print('x/y = ',x/y)
except ZeroDivisionError: #捕捉除0异常
print("ZeroDivision")
except (TypeError,ValueError) as e: #捕捉多个异常
print(e)
except: #捕捉其余类型异常
print("it's still wrong")
else: #没有异常时执行
print('it work well')
finally: #不管是否有异常都会执行
print("Cleaning up")
i =0
f = lambda x,y:x+y # 定义lambda函数
# 实例使用try .. except .. else进行输入三次数字,错误类型就跳出。
while i < 3:
try:
num1 = int(input("please enter a number:"))
num2 = int(input("please enter the second number:"))
print "这两个数的和为:",f(num1, num2)
except ValueError:
print "You enter the error value."
i += 1
print i
if i >=3:
print "失败次数过多。"
continue
else:
print "there is no error in the code."
break
finally:
pass
|
from flask import Flask, render_template
import flask_restful
from core.excel_handler import excel_reader
EXCEL_FILE = 'datas/test_data.xls'
app = Flask(__name__)
api = flask_restful.Api(app)
@app.route("/")
def index():
return render_template('index.html')
class ExcealReader(flask_restful.Resource):
def get(self):
# return only the first sheet data
return excel_reader(EXCEL_FILE)[0]
api.add_resource(ExcealReader, '/data')
if __name__ == '__main__':
app.run(debug=True)
|
from unittest import mock
from .helpers import ProviderForTesting
class TestHooks:
def test_basic(self, cmd, mocker, commit):
result, _ = cmd('start', git_inited=True)
assert result.exit_code == 0
mocker.spy(ProviderForTesting, 'stop')
mocker.spy(ProviderForTesting, 'start')
commit('Some message')
result, _ = cmd('hooks post-commit')
assert result.exit_code == 0
ProviderForTesting.stop.assert_called_once_with(mock.ANY, 'Some message', force=False, task=None)
ProviderForTesting.start.assert_called_once_with(mock.ANY)
def test_ignored_non_running_repos(self, cmd, mocker, commit):
result, _ = cmd('init --no-hook', inited=False, git_inited=True)
assert result.exit_code == 0
mocker.spy(ProviderForTesting, 'stop')
mocker.spy(ProviderForTesting, 'start')
commit('Some message')
result, _ = cmd('hooks post-commit')
assert result.exit_code == 0
assert ProviderForTesting.stop.call_count == 0
assert ProviderForTesting.start.call_count == 0
def test_task_static(self, cmd, mocker, commit):
result, _ = cmd('start', git_inited=True)
assert result.exit_code == 0
mocker.spy(ProviderForTesting, 'stop')
mocker.spy(ProviderForTesting, 'start')
commit('Some message')
result, _ = cmd('hooks post-commit', config='task_static.config')
assert result.exit_code == 0
ProviderForTesting.stop.assert_called_once_with(mock.ANY, 'Some message', force=False, task='some task name')
ProviderForTesting.start.assert_called_once_with(mock.ANY)
def test_task_dynamic_branch(self, cmd, mocker, commit):
result, _ = cmd('start', git_inited=True)
assert result.exit_code == 0
mocker.spy(ProviderForTesting, 'stop')
mocker.spy(ProviderForTesting, 'start')
commit('Some message', branch='#123_Some_brunch')
result, _ = cmd('hooks post-commit', config='task_dynamic_branch.config')
assert result.exit_code == 0
ProviderForTesting.stop.assert_called_once_with(mock.ANY, 'Some message', force=False, task=123)
ProviderForTesting.start.assert_called_once_with(mock.ANY)
def test_task_dynamic_commit(self, cmd, mocker, commit):
result, _ = cmd('start', git_inited=True)
assert result.exit_code == 0
mocker.spy(ProviderForTesting, 'stop')
mocker.spy(ProviderForTesting, 'start')
commit('#321 Some message')
result, _ = cmd('hooks post-commit', config='task_dynamic_commit.config')
assert result.exit_code == 0
ProviderForTesting.stop.assert_called_once_with(mock.ANY, '#321 Some message', force=False, task=321)
ProviderForTesting.start.assert_called_once_with(mock.ANY)
|
import json
from rest_framework import status
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
class SnippetListCreateAPIView(APIView):
def get(self, request):
snippets = Snippet.objects.all()
serializer = SnippetSerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request):
# 사용자가 입력한 데이터를 SnippetSerializer 에 넣어서 변환
print(request.data) # {'author': 1, 'title': '망고월드', 'code': "print('Mango world')"}
print(type(request.data)) # <class 'dict'>
print(json.dumps(request.data)) # {"author": 1, "title": "\ub9dd\uace0\uc6d4\ub4dc", "code": "print('Mango world')"}
print(type(json.dumps(request.data))) # <class 'str'>
json_str = json.dumps(request.data)
print(json.loads(json_str)) # {'author': 1, 'title': '망고월드', 'code': "print('Mango world')"}
print(type(json.loads(json_str))) # <class 'dict'>
# 사용자가 보낸 데이터는 python data type 이다.이 데이터를 장고 데이터베이스에 저장해서 모델 인스턴스로 변경 해야함
serializer = SnippetSerializer(data=request.data)
if serializer.is_valid():
serializer.save() # save()를 하기 전까지는 생성 업데이트가 되지 않는다.
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SnippetRetrieveUpdateDestroyAPIView(APIView):
def get_object(self, pk):
return get_object_or_404(Snippet, pk=pk)
def get(self, request, pk):
snippet = self.get_object(pk)
serializer = SnippetSerializer(snippet)
return Response(serializer.data)
def patch(self, request, pk):
snippet = self.get_object(pk)
serializer = SnippetSerializer(snippet, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# APIView 내부에 dispatch 라는 애가 GET, PATCH, DELETE 를 소문자로 변경해서 그에 맞는게 HTTP method 에 있으면
# 해당 HTTP 메소드를 실행 시킨다. 만약에 함수 이름을 dele 라고 오타를 낼 경우 DELETE 가 실행되지 않는다.
|
import sys
import os
import time
from twisted.python import log, monkey
from twisted.application import service
from twisted.internet import reactor
from buildslave import bot
# states for use below (using symbols prevents typos)
STOPPED = "stopped"
DISCONNECTED = "disconnected"
CONNECTED = "connected"
class Idleizer(service.Service):
"""
A service that will monitor a buildslave instance for idleness and
disconnectedness, and gracefully restart it if either of those goes on for
longer than a configured time.
This is intended to be instantiated directly in buildbot.tac.
"""
def __init__(self, buildslave_svc, max_idle_time=None, max_disconnected_time=None):
"""
Create a service. C{buildslave_svc} should be the
L{buildslave.bot.BuildSlave} instance created in C{buildbot.tac}.
C{max_idle_time} is the maximum time, in seconds, that the slave will
be allowed to idle before it is restarted. Similarly,
C{max_disconnected_time} is the maximum time it will be allowed to go
without a connection to a master.
"Idle" is defined as not beginning a step. Note that this will *not*
kill a long-running step, because a graceful restart will wait until
the build is complete.
"""
self.buildslave_svc = buildslave_svc
self.max_idle_time = max_idle_time
self.max_disconnected_time = max_disconnected_time
self._timer = None
self._state = STOPPED
# don't reboot unless this class requested the stop
self._reboot_on_shutdown = False
self._setUpMonkeyPatches()
def startService(self):
self._monkey.patch()
self.changeState(DISCONNECTED)
def stopService(self):
self.changeState(STOPPED)
self._monkey.restore()
def _clearTimer(self):
if self._timer:
self._timer.cancel()
self._timer = None
def _setTimer(self, when):
self._clearTimer()
def fired():
self._timer = None
self.doRestart()
self._timer = reactor.callLater(when, fired)
def changeState(self, new_state):
if new_state == self._state:
return
self._clearTimer()
self._state = new_state
if new_state == CONNECTED:
self._setTimer(self.max_idle_time)
elif new_state == DISCONNECTED:
self._setTimer(self.max_disconnected_time)
def registerActivity(self):
if self._state == CONNECTED:
# just reset the timer
self._setTimer(self.max_idle_time)
else:
log.msg("Idleizer: activity while not connected??")
def maybeReboot(self):
if self._reboot_on_shutdown:
self.reboot()
# note that the reactor.stop() will continue when this method returns
def _setUpMonkeyPatches(self):
self._monkey = monkey.MonkeyPatcher()
def changeStateOn(obj, name, state):
old_fn = getattr(obj, name)
def new_fn(*args, **kwargs):
self.changeState(state)
return old_fn(*args, **kwargs)
self._monkey.addPatch(obj, name, new_fn)
# this is probably overkill, but it's good to be sure
changeStateOn(self.buildslave_svc.bf,
'gotPerspective', CONNECTED)
changeStateOn(self.buildslave_svc.bf,
'startedConnecting', DISCONNECTED)
changeStateOn(self.buildslave_svc.bf,
'clientConnectionFailed', DISCONNECTED)
changeStateOn(self.buildslave_svc.bf,
'clientConnectionLost', DISCONNECTED)
def registerActivityOn(obj, name):
old_fn = getattr(obj, name)
def new_fn(*args, **kwargs):
self.registerActivity()
return old_fn(*args, **kwargs)
self._monkey.addPatch(obj, name, new_fn)
registerActivityOn(bot.SlaveBuilder, 'activity')
def maybeRebootOn(obj, name):
old_fn = getattr(obj, name)
def new_fn(*args, **kwargs):
self.maybeReboot()
return old_fn(*args, **kwargs)
self._monkey.addPatch(obj, name, new_fn)
maybeRebootOn(bot.SlaveBuilder, 'remote_shutdown')
maybeRebootOn(bot.Bot, 'remote_shutdown')
def doRestart(self):
log.msg("I feel very idle and was thinking of rebooting as soon as "
"the buildmaster says it's OK")
self._reboot_on_shutdown = True
# this is a re-implementation of the gracefulShutdown method from
# bot.py, with a few tweaks:
# - if no perspective, reboot rather than calling reactor.stop
# - if the callRemote fails, reboot immediately (this will always fail
# until we upgrade the masters to 0.8.3 or higher)
if not self.buildslave_svc.bf.perspective:
log.msg("No active connection, rebooting NOW")
self.reboot()
return
log.msg("Telling the master we want to shutdown after any running builds are finished")
d = self.buildslave_svc.bf.perspective.callRemote("shutdown")
def _shutdownfailed(err):
if err.check(AttributeError):
log.msg("Master does not support slave initiated shutdown. Upgrade master to 0.8.3 or later to use this feature.")
else:
log.msg('callRemote("shutdown") failed')
log.err(err)
log.msg("rebooting NOW, since the master won't talk to us")
self.reboot()
d.addErrback(_shutdownfailed)
# if this deferred succeeds, then we'll get a call to remote_shutdown,
# which will call self.reboot.
def reboot(self):
log.msg("Invoking platform-specific reboot command")
if sys.platform in ('darwin', 'linux2'):
# -S means to accept password from stdin, which we then redirect from
# /dev/null
# This results in sudo not waiting forever for a password. If sudoers
# isn't set up properly, this will fail immediately
os.system("sudo -S reboot < /dev/null")
# Windows
elif sys.platform == "win32":
os.system("shutdown -f -r -t 0")
else:
log.msg("unknown platform " + sys.platform)
# After starting the shutdown, we just die. If the shutdown fails, then
# this should trigger some extra monitoring alerts
reactor.stop()
|
import os
import click
import io
import shutil
import tarfile
import logging
import sonosco.common.audio_tools as audio_tools
import sonosco.common.path_utils as path_utils
from sonosco.datasets.download_datasets.create_manifest import create_manifest
from sonosco.common.utils import setup_logging
from sonosco.common.constants import *
LOGGER = logging.getLogger(__name__)
AN4_URL = 'http://www.speech.cs.cmu.edu/databases/an4/an4_raw.bigendian.tar.gz'
def try_download_an4(target_dir, sample_rate, min_duration, max_duration):
"""
Method to download an4 dataset. Creates manifest files.
Args:
target_dir:
sample_rate:
min_duration:
max_duration:
"""
path_to_data = os.path.join(os.path.expanduser("~"), target_dir)
if not os.path.exists(path_to_data):
os.makedirs(path_to_data)
target_unpacked_dir = os.path.join(path_to_data, "an4_unpacked")
path_utils.try_create_directory(target_unpacked_dir)
extracted_dir = os.path.join(path_to_data, "An4")
if os.path.exists(extracted_dir):
shutil.rmtree(extracted_dir)
LOGGER.info("Start downloading...")
file_name = AN4_URL.split("/")[-1]
target_filename = os.path.join(target_unpacked_dir, file_name)
path_utils.try_download(target_filename, AN4_URL)
LOGGER.info("Download complete")
LOGGER.info("Unpacking...")
tar = tarfile.open(target_filename)
tar.extractall(extracted_dir)
tar.close()
assert os.path.exists(extracted_dir), f"Archive {file_name} was not properly uncompressed"
LOGGER.info("Converting files to wav and extracting transcripts...")
create_wav_and_transcripts(path_to_data, 'train', sample_rate, extracted_dir, 'an4_clstk')
create_wav_and_transcripts(path_to_data, 'test', sample_rate, extracted_dir, 'an4test_clstk')
create_manifest(path_to_data, os.path.join(path_to_data,'an4_train_manifest.csv'), min_duration, max_duration)
create_manifest(path_to_data, os.path.join(path_to_data,'an4_val_manifest.csv'), min_duration, max_duration)
def create_wav_and_transcripts(path, data_tag, sample_rate, extracted_dir, wav_subfolder_name):
"""
Creates wav and transcripts.
Args:
path:
data_tag:
sample_rate:
extracted_dir:
wav_subfolder_name:
"""
tag_path = os.path.join(path,data_tag)
transcript_path_new = os.path.join(tag_path, 'txt')
wav_path_new = os.path.join(tag_path, 'wav')
path_utils.try_create_directory(transcript_path_new)
path_utils.try_create_directory(wav_path_new)
wav_path_ext = os.path.join(extracted_dir, 'an4/wav')
file_ids = os.path.join(extracted_dir, f'an4/etc/an4_{data_tag}.fileids')
transcripts_ext = os.path.join(extracted_dir, f'an4/etc/an4_{data_tag}.transcription')
path = os.path.join(wav_path_ext, wav_subfolder_name)
convert_audio_to_wav(path, sample_rate)
format_files(file_ids, transcript_path_new, wav_path_new, transcripts_ext, wav_path_ext)
def convert_audio_to_wav(train_path, sample_rate):
"""
Creates audio file to wav
Args:
train_path:
sample_rate:
Returns:
"""
with os.popen('find %s -type f -name "*.raw"' % train_path) as pipe:
for line in pipe:
raw_path = line.strip()
new_path = line.replace('.raw', '.wav').strip()
audio_tools.transcode_recordings_an4(raw_path=raw_path, wav_path= new_path, sample_rate=sample_rate)
def format_files(file_ids, new_transcript_path, new_wav_path, transcripts, wav_path):
"""
Formats files accordingly.
Args:
file_ids:
new_transcript_path:
new_wav_path:
transcripts:
wav_path:
Returns:
"""
with open(file_ids, 'r') as f:
with open(transcripts, 'r') as t:
paths = f.readlines()
transcripts = t.readlines()
for x in range(len(paths)):
path = os.path.join(wav_path, paths[x].strip()) + '.wav'
filename = path.split('/')[-1]
extracted_transcript = _process_transcript(transcripts, x)
current_path = os.path.abspath(path)
new_path = os.path.join(new_wav_path ,filename)
text_path = os.path.join(new_transcript_path,filename.replace('.wav', '.txt'))
with io.FileIO(text_path, "w") as file:
file.write(extracted_transcript.encode('utf-8'))
os.rename(current_path, new_path)
def _process_transcript(transcripts, x):
"""
Helper method to split into words.
Args:
transcripts:
x:
Returns:
"""
extracted_transcript = transcripts[x].split('(')[0].strip("<s>").split('<')[0].strip().upper()
return extracted_transcript
@click.command()
@click.option("--target-dir", default="temp/data/an4", type=str, help="Directory to store the dataset.")
@click.option("--sample-rate", default=16000, type=int, help="Sample rate.")
@click.option("--min-duration", default=1, type=int,
help="Prunes training samples shorter than the min duration (given in seconds).")
@click.option("--max-duration", default=15, type=int,
help="Prunes training samples longer than the max duration (given in seconds).")
def main(**kwargs):
"""Processes and downloads an4 dataset."""
try_download_an4(**kwargs)
if __name__ == '__main__':
LOGGER = logging.getLogger(SONOSCO)
setup_logging(LOGGER)
main()
|
# __init__.py
from .bin_converter import *
from .uni_inter_intergers import *
|
a=int(input())
b=int(input())
c=int()
if a>b:
c=1
elif a==b:
c=0
else:
c=2
print(c) |
__author__ = 'Dell'
import csv
from datetime import datetime
edgesreader = csv.reader(open("flickr-growth-sorted.txt", "r"), delimiter = '\t')
ingraph = dict()
outgraph = dict()
def write_edge_file(input):
inwriter = csv.writer(open(input[0], "wb"), delimiter='\t')
outwriter = csv.writer(open(input[1], "wb"), delimiter='\t')
for row in ingraph:
if len(ingraph[row]) != len(set(ingraph[row])):
print "Something is wrong with the edges file"
inwriter.writerow([row, len(ingraph[row])])
for row in outgraph:
if len(outgraph[row]) != len(set(outgraph[row])):
print "Something is wrong with the edges file"
outwriter.writerow([row, len(outgraph[row])])
base = datetime.strptime('2006-11-02', "%Y-%m-%d")
# end = datetime.strptime('2007-05-18', "%Y-%m-%d").date()
flag = 0
for row in edgesreader:
if flag == 0 and datetime.strptime(row[2], "%Y-%m-%d") > base:
flag = 1
write_edge_file(["start-link-indegree.csv", "start-link-outdegree.csv"])
if int(row[1]) in ingraph:
ingraph[int(row[1])].append(int(row[0]))
else:
ingraph[int(row[1])] = [int(row[0])]
if int(row[0]) in outgraph:
outgraph[int(row[0])].append(int(row[1]))
else:
outgraph[int(row[0])] = [int(row[1])]
write_edge_file(["end-link-indegree.csv", "end-link-outdegree.csv"])
|
#!/usr/bin/python
import sys
import xml.etree.ElementTree as ET
class Layout:
def __init__(self, name, title, forOtherScreen, arg):
self.name = name
self.title = title
self.arg = arg
self.forOtherScreen = forOtherScreen
self.uid = "layout." + name
layouts = [
Layout("togglefullscreen", "Toggle full screen mode", False, "togglefullscreen"),
Layout("full", "Full", True, "set:0,0,1,1"),
Layout("left", "Left", True, "set:0,0,0.5,1"),
Layout("top", "Top", True, "set:0,0,1,0.5"),
Layout("bottom", "Bottom", True, "set:0,0.5,1,1"),
Layout("right", "Right", True, "set:0.5,0,1,1"),
Layout("topleft", "Top left", True, "set:0,0,0.5,0.5"),
Layout("bottomleft", "Bottom left", True, "set:0,0.5,0.5,1"),
Layout("topright", "Top right", True, "set:0.5,0,1,0.5"),
Layout("bottomright", "Bottom right", True, "set:0.5,0.5,1,1"),
Layout("center", "Center", True, "set:0.1,0.1,0.9,0.9"),
Layout("movecenter", "Move to center", True, "move:0.5,0.5"),
Layout("movetopleft", "Move to top left", True, "move:0,0"),
Layout("movebottomleft", "Move to bottom left", True, "move:0,1"),
Layout("movetopright", "Move to top right", True, "move:1,0"),
Layout("movebottomright", "Move to bottom right", True, "move:1,1"),
Layout("grow", "Grow window", False, "resizeAll:0.1667"),
Layout("shrink", "Shrink window", False, "resizeAll:-0.1667"),
Layout("growleft", "Grow left side of window", False, "resize:0.1667,0,0,0"),
Layout("shrinkleft", "Shrink left side of window", False, "resize:-0.1667,0,0,0"),
Layout("growtop", "Grow top side of window", False, "resize:0,0.1667,0,0"),
Layout("shrinktop", "Shrink top side of window", False, "resize:0,-0.1667,0,0"),
Layout("growright", "Grow right side of window", False, "resize:0,0,0.1667,0"),
Layout("shrinkright", "Shrink right side of window", False, "resize:0,0,-0.1667,0"),
Layout("growbottom", "Grow bottom side of window", False, "resize:0,0,0,0.1667"),
Layout("shrinkbottom", "Shrink bottom side of window", False, "resize:0,0,0,-0.1667")
]
for x1 in range(1, 4):
for y1 in range(1, 4):
for x2 in range(x1, 4):
for y2 in range(y1, 4):
if x1 == x2 and y1 == y2:
layouts.append(Layout("%d%d" % (x1, y1), "(%d %d)" % (x1, y1), True, "set:%f,%f,%f,%f" % ((x1 - 1) / 3.0, (y1 - 1) / 3.0, x1 / 3.0, y1 / 3.0)))
else:
layouts.append(Layout("%d%d-%d%d" % (x1, y1, x2, y2), "(%d %d) - (%d %d)" % (x1, y1, x2, y2), True, "set:%f,%f,%f,%f" % ((x1 -1) / 3.0, (y1 - 1) / 3.0, x2 / 3.0, y2 / 3.0)))
query = sys.argv[1]
if len(query) > 0:
layouts = [n for n in layouts if query.lower() in n.name.lower()]
layouts = sorted(layouts, key=lambda l: len(l.name))
root = ET.Element('items')
if len(sys.argv) > 2:
otherScreen = sys.argv[2]
for layout in layouts:
if layout.forOtherScreen:
ie = ET.Element('item', valid="yes", arg=layout.arg + ":" + otherScreen, uid=layout.uid)
te = ET.Element('title')
te.text = layout.title + " on other screen"
ie.append(te)
icon = ET.Element('icon')
icon.text="icon_%s.png" % layout.name
ie.append(icon)
root.append(ie)
else:
for layout in layouts:
ie = ET.Element('item', valid="yes", arg=layout.arg, uid=layout.uid)
te = ET.Element('title')
te.text = layout.title
ie.append(te)
icon = ET.Element('icon')
icon.text="icon_%s.png" % layout.name
ie.append(icon)
root.append(ie)
print '<?xml version="1.0"?>'
print ET.tostring(root)
print ""
|
# coding: utf-8
#question 2
class WordFrequency(object):
"""
Analyses files to genrate a dictionary that has words and their frequencies
"""
def __init__(self):
self.freq_dict = {}
self.special_character = ",./;<>?:{}[]\1234567890!@#%^&*()-_=+"
def getFreq(self,key):
"""Input the word you want to check for"""
if key in self.freq_dict:
return self.freq_dict[key]
else:
return(False)
def analyzeFile(self, filename):
"""
After initialization this method of class analyzes the file given in argument
Keyword arguments:
filename : file you want to analyze
"""
self.freq_dict = {}
file_obj = open(filename,'r')
all_lines = file_obj.readlines()
for line in all_lines:
clean_line = self._clean(line)
words = clean_line.split(' ')
for word in words:
if word in self.freq_dict: self.freq_dict[word] += 1
else: self.freq_dict[word] = 1
def _clean(self, string):
"""Cleans string from special characters and punctuations"""
string = string.replace("\n",'')
string = string.lower()
for character in self.special_character: string = string.replace(character,'')
return string
if __name__=="__main__":
calculator = WordFrequency()
calculator.analyzeFile("test_file.txt")
print(calculator.getFreq(input("Enter the word for its frequency : ")))
|
from unittest import TestCase
from unittest.mock import patch, Mock, call
from bat.example.cli import (
hello_world,
get_help,
default,
argparse,
Configuration,
)
class ExampleTests(TestCase):
@patch('builtins.print')
def test_hello_world(t: TestCase, print: Mock):
conf = Mock(Configuration)
hello_world(conf)
print.assert_called_with('Hello from the example module!')
def test_get_help(t: TestCase):
parser = Mock(argparse.ArgumentParser)
conf = Mock(Configuration)
helper = get_help(parser)
helper(conf)
parser.print_help.assert_called_with()
@patch('builtins.print')
def test_default(t: TestCase, print: Mock):
conf = Mock(Configuration)
default(conf)
print.assert_has_calls([
call('default response from example module CLI'),
call(f'{conf=}'),
])
|
import cupy
from cupy.core import _routines_logic as _logic
from cupy.core import fusion
def all(a, axis=None, out=None, keepdims=False):
"""Tests whether all array elements along a given axis evaluate to True.
Args:
a (cupy.ndarray): Input array.
axis (int or tuple of ints): Along which axis to compute all.
The flattened array is used by default.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the axis is remained as an axis of
size one.
Returns:
cupy.ndarray: An array reduced of the input array along the axis.
.. seealso:: :func:`numpy.all`
"""
if fusion._is_fusing():
if keepdims:
raise NotImplementedError(
'cupy.all does not support `keepdims` in fusion yet.')
return fusion._call_reduction(
_logic.all, a, axis=axis, out=out)
assert isinstance(a, cupy.ndarray)
return a.all(axis=axis, out=out, keepdims=keepdims)
def any(a, axis=None, out=None, keepdims=False):
"""Tests whether any array elements along a given axis evaluate to True.
Args:
a (cupy.ndarray): Input array.
axis (int or tuple of ints): Along which axis to compute all.
The flattened array is used by default.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the axis is remained as an axis of
size one.
Returns:
cupy.ndarray: An array reduced of the input array along the axis.
.. seealso:: :func:`numpy.any`
"""
if fusion._is_fusing():
if keepdims:
raise NotImplementedError(
'cupy.any does not support `keepdims` in fusion yet.')
return fusion._call_reduction(
_logic.any, a, axis=axis, out=out)
assert isinstance(a, cupy.ndarray)
return a.any(axis=axis, out=out, keepdims=keepdims)
|
from tkinter import*
from time import*
from random import*
window = Tk()
c = Canvas(window, height=400, width=300, bg='black')
l = Text(window, height=1, width=6)
l.pack()
c.pack()
player = c.create_rectangle(0, 380, 20, 400, fill='white', outline='white')
c.move(player, 140, 0)
s = Text(window, height=1, width=6)
s.pack()
b = Text(window, height=1, width=6)
b.pack()
play = c.create_rectangle(0, 0, 80, 40, fill='white', outline='white')
tlay = c.create_text(150, 220, text='[L]PLAY', fill='black', font=('', 15))
title = c.create_text(150, 90, text='ASTEROID', fill='white', font=('', 30))
Quit = c.create_rectangle(0, 0, 80, 40, fill='white', outline='white')
tuit = c.create_text(150, 270, text='[Q]QUIT', fill='black', font=('', 15))
pause = c.create_rectangle(0, 0, 80, 40, fill='white', outline='white')
tause = c.create_text(40, 30, text='[P]PAUSE', fill='black', font=('', 15))
c.move(play, 110, 200)
c.itemconfig(player, state=HIDDEN)
c.move(Quit, 110, 250)
c.move(pause, 0, 10)
class tetris:
blocks = []
score = 0
bullet = []
wait = 0.1
lives = 3
bullets = 20
powers = []
interface = 'menu'
def reset_settings():
tetris.score = ''
tetris.lives = ''
tetris.bullets = ''
def game_settings():
tetris.score = 0
tetris.lives = 3
tetris.bullets = 20
def move(d):
if d == "Up" and not c.coords(player)[1] <= 0:
c.move(player, 0, -10)
elif d == "Left" and not c.coords(player)[0] <= 0:
c.move(player, -10, 0)
elif d == "Right" and not c.coords(player)[2] >= 300:
c.move(player, 10, 0)
elif d == "Down" and not c.coords(player)[3] >= 400:
c.move(player, 0, 10)
def ccreate():
return put_block(randint(0, 29), 'pink')
def reload():
if tetris.bullets < 20:
tetris.bullets = tetris.bullets + 1
def move_player(direction):
d = direction.keysym
di = direction
move(d)
if di.char == ' ' and tetris.interface == 'playing':
shoot()
if di.char == 'l' and tetris.interface == 'menu':
tetris.interface = 'playing'
game_settings()
elif di.char == 'q' and tetris.interface == 'menu':
quit()
elif di.char == 'm' and tetris.interface == 'lose' or tetris.interface == 'pause':
tetris.interface = 'menu'
elif di.char == 'p' and tetris.interface == 'playing':
tetris.interface = 'pause'
def event(event):
move_player(event)
c.bind_all('<Key>', event)
def menu(e):
if tetris.interface == 'lose' or tetris.interface == 'pause':
tetris.interface = 'menu'
c.bind_all('<Key-m>', menu)
def resume(e):
if tetris.interface == 'pause':
tetris.interface = 'playing'
game_settings()
c.bind_all('<Key-r>', resume)
def shoot():
if not tetris.bullets == 0:
tetris.bullets = tetris.bullets - 1
p = c.coords(player)
tetris.bullet.append(c.create_rectangle(p[0], p[1], p[2], p[3], fill='white', outline='white'))
def put_block(x, col):
x = x * 10
y = 0
if col == 'pink':
out = 'white'
else:
out = col
return c.create_rectangle(x, y, x + 10, y + 10, fill=col, outline=out)
def move_block(num):
c.move(num, 0, 10)
def create():
return put_block(randint(0, 29), choice(['green', 'red', 'yellow', 'blue']))
def stringer(List):
s = ''
for i in List:
s = s + str(i)
return s
def ffind(l, o):
count = 0
for i in l:
if i == o:
re = count
break
count += 1
return count
while True:
if tetris.interface == 'playing':
c.itemconfig(pause, state=NORMAL)
c.itemconfig(tause, state=NORMAL)
c.itemconfig(title, state=HIDDEN)
c.itemconfig(play, state=HIDDEN)
c.itemconfig(tlay, state=HIDDEN)
c.itemconfig(Quit, state=HIDDEN)
c.itemconfig(tuit, state=HIDDEN)
c.itemconfig(player, state=NORMAL)
if randint(0, 5) == 0:
tetris.blocks.append(create())
elif randint(0, 10) == 0:
tetris.powers.append(ccreate())
if tetris.lives == 0:
c.itemconfig(player, state=HIDDEN)
tetris.interface = 'lose'
for i in tetris.blocks:
if len(c.coords(i)) == 4:
move_block(i)
if c.coords(i)[3] == 410:
c.delete(i)
del tetris.blocks[ffind(tetris.blocks, i)]
for i in tetris.powers:
if len(c.coords(i)) == 4:
move_block(i)
if c.coords(i)[3] == 410:
c.delete(i)
del tetris.powers[ffind(tetris.powers, i)]
for i in tetris.bullet:
c.move(i, 0, -10)
if c.coords(i)[1] == 0:
c.delete(i)
del tetris.bullet[ffind(tetris.bullet, i)]
else:
for o in tetris.blocks:
if len(c.coords(i)) == 4 and len(c.coords(o)) == 4:
op = c.coords(o)
ip = c.coords(i)
if ((op[3] == ip[1] or op[1] == ip[1]) and (op[0] == ip[0] or op[2] == ip[2])):
c.delete(o)
del tetris.blocks[ffind(tetris.blocks, o)]
tetris.score = tetris.score + 1
del tetris.bullet[ffind(tetris.bullet, i)]
c.delete(i)
i = player
for o in tetris.blocks:
if len(c.coords(i)) == 4 and len(c.coords(o)) == 4:
op = c.coords(o)
ip = c.coords(i)
if ((op[3] == ip[1] or op[1] == ip[1]) and (op[0] == ip[0] or op[2] == ip[2])):
c.delete(o)
del tetris.blocks[ffind(tetris.blocks, o)]
tetris.lives = tetris.lives - 1
for o in tetris.powers:
if len(c.coords(i)) == 4 and len(c.coords(o)) == 4:
op = c.coords(o)
ip = c.coords(i)
if ((op[3] == ip[1] or op[1] == ip[1]) and (op[0] == ip[0] or op[2] == ip[2])):
c.delete(o)
del tetris.powers[ffind(tetris.powers, o)]
reload()
elif tetris.interface == 'menu':
c.itemconfig(title, state=NORMAL)
c.itemconfig(play, state=NORMAL)
c.itemconfig(tlay, state=NORMAL)
c.itemconfig(Quit, state=NORMAL)
c.itemconfig(tuit, state=NORMAL)
c.itemconfig(player, state=HIDDEN)
c.itemconfig(tlay, text='[L]PLAY')
c.itemconfig(title, text='ASTEROID')
c.itemconfig(pause, state=HIDDEN)
c.itemconfig(tause, state=HIDDEN)
c.itemconfig(tuit, text='[Q]QUIT')
if True:
for o in tetris.powers:
c.delete(o)
del tetris.powers[ffind(tetris.powers, o)]
for o in tetris.blocks:
c.delete(o)
del tetris.blocks[ffind(tetris.blocks, o)]
for i in tetris.bullet:
c.delete(i)
del tetris.bullet[ffind(tetris.bullet, i)]
reset_settings()
elif tetris.interface == 'lose':
c.itemconfig(Quit, state=HIDDEN)
c.itemconfig(tuit, state=HIDDEN)
c.itemconfig(title, state=NORMAL)
c.itemconfig(play, state=NORMAL)
c.itemconfig(tlay, state=NORMAL)
c.itemconfig(pause, state=HIDDEN)
c.itemconfig(tause, state=HIDDEN)
c.itemconfig(title, text='GAME OVER')
c.itemconfig(tlay, text='[M]MENU')
elif tetris.interface == 'pause':
c.itemconfig(Quit, state=NORMAL)
c.itemconfig(tuit, state=NORMAL)
c.itemconfig(title, state=NORMAL)
c.itemconfig(play, state=NORMAL)
c.itemconfig(tlay, state=NORMAL)
c.itemconfig(pause, state=HIDDEN)
c.itemconfig(tause, state=HIDDEN)
c.itemconfig(title, text='PAUSED')
c.itemconfig(tuit, text='[R]RESUME')
c.itemconfig(tlay, text='[M]MENU')
s.delete(0.0, END)
s.insert(END, tetris.score)
b.delete(0.0, END)
b.insert(END, tetris.bullets)
l.delete(0.0, END)
l.insert(END, tetris.lives)
sleep(tetris.wait)
c.update()
|
import os
import numpy as np
import h5py
from PIL import Image
data_dir = '/tempspace/hyuan/DrSleep/VOC2012/VOCdevkit/VOC2012'
train_list = '/tempspace/hyuan/DrSleep/VOC2012/VOCdevkit/VOC2012/dataset/train.txt'
val_list = '/tempspace/hyuan/DrSleep/VOC2012/VOCdevkit/VOC2012/dataset/val.txt'
test_list = '/tempspace/hyuan/DrSleep/VOC2012/VOCdevkit/VOC2012/dataset/test.txt'
size = 256, 256
# IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) # BGR
IMG_MEAN = np.array((122.67891434,116.66876762,104.00698793), dtype=np.float32) # RGB
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth labels.
Args:
data_dir: path to the directory with images and labels.
data_list: path to the file with lines of the form '/path/to/image /path/to/label'.
Returns:
Two lists with all file names for images and labels, respectively.
"""
f = open(data_list, 'r')
images = []
labels = []
for line in f:
image, label = line.strip("\n").split(' ')
images.append(data_dir + image)
labels.append(data_dir + label)
return images, labels
def load_image(in_image):
""" Load an image, returns PIL.Image. """
img = Image.open(in_image)
return img
def pil_to_nparray(pil_image):
""" Convert a PIL.Image to numpy array. """
pil_image.load()
return np.asarray(pil_image, dtype="float32")
def resize_image(in_image, new_width, new_height, out_image=None,
resize_mode=Image.BILINEAR):
""" Resize an image.
Arguments:
in_image: `PIL.Image`. The image to resize.
new_width: `int`. The image new width.
new_height: `int`. The image new height.
out_image: `str`. If specified, save the image to the given path.
resize_mode: `PIL.Image.mode`. The resizing mode.
Returns:
`PIL.Image`. The resize image.
"""
img = in_image.resize((new_width, new_height), resize_mode)
if out_image:
img.save(out_image)
return img
def build_hdf5_image_dataset(target_path, image_shape, output_path='dataset.h5', normalize=False):
""" Build HDF5 Image Dataset.
Build an HDF5 dataset by providing a plain text file with images path and class id.
Examples:
```
# Load path/class_id image file:
dataset_file = 'my_dataset.txt'
build_hdf5_image_dataset(dataset_file, image_shape=(128, 128),
mode='file', output_path='dataset.h5',
categorical_labels=True, normalize=True)
# Load HDF5 dataset
import h5py
h5f = h5py.File('dataset.h5', 'r')
X = h5f['X']
Y = h5f['Y']
# Build neural network and train
network = ...
model = DNN(network, ...)
model.fit(X, Y)
```
Arguments:
target_path: `str`. Path of the plain text file.
image_shape: `tuple (height, width)`. The images shape. Images that
doesn't match that shape will be resized.
output_path: `str`. The output path for the hdf5 dataset. Default:
'dataset.h5'
normalize: `bool`. If True, normalize all pictures by dividing
every image array by 255.
"""
assert image_shape, "Image shape must be defined."
assert image_shape[0] and image_shape[1], "Image shape error. It must be a tuple of int: ('width', 'height')."
images, labels = read_labeled_image_list(data_dir, target_path)
d_imgshape = (len(images), image_shape[0], image_shape[1], 3)
d_labelshape = (len(images), image_shape[0], image_shape[1])
dataset = h5py.File(output_path, 'w')
dataset.create_dataset('X', d_imgshape, dtype='f')
dataset.create_dataset('Y', d_labelshape, dtype='f')
for i in range(len(images)):
img = load_image(images[i])
label = load_image(labels[i])
img = resize_image(img, image_shape[0], image_shape[1])
label = resize_image(label, image_shape[0], image_shape[1], resize_mode=Image.NEAREST)
img = pil_to_nparray(img)
label = pil_to_nparray(label)
# substract IMG_MEAN
img -= IMG_MEAN
if normalize:
img /= 255.
dataset['X'][i] = img
dataset['Y'][i] = label
if __name__ == '__main__':
build_hdf5_image_dataset(train_list, size, output_path='./training.h5', normalize=False)
build_hdf5_image_dataset(val_list, size, output_path='./validation.h5', normalize=False)
build_hdf5_image_dataset(test_list, size, output_path='./testing.h5', normalize=False) |
#!/usr/bin/env python3
#
# Validator for 'throughput' test spec
#
import pscheduler
from validate import spec_is_valid, MAX_SCHEMA
try:
json = pscheduler.json_load()
except ValueError as ex:
pscheduler.succeed_json({
"valid": False,
"error": str(ex)
})
valid, message = spec_is_valid(json)
result = {
"valid": valid
}
if not valid:
result["error"] = message
if ('single-ended' in json) and ('loopback' in json):
pscheduler.succeed_json({
"valid": False,
"error": "Single-ended and loopback modes are mutually exclusive"
})
pscheduler.succeed_json(result)
|
from .predict import Predictor, Prediction
# from .mmdetection import MMTwoStagePredictor
from .kalman import KalmanPredictor
from .ecc import ECCPredictor
|
from enum import Enum
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import MLP, Logit, ConvNet, GatedAttn, MixLogCDF, GatedConv2d, GatedLinear
from .squeeze import (squeeze1d, unsqueeze1d, channel_merge, channel_split, checker_merge,
checker_split)
class AbstractCoupling(nn.Module):
"""
abstract class for bijective coupling layers
"""
def __init__(self, dims, masking='checkerboard', odd=False):
super(AbstractCoupling, self).__init__()
self.dims = dims
if len(dims) == 1:
self.squeeze = lambda z, odd=odd: squeeze1d(z, odd)
self.unsqueeze = lambda z0, z1, odd=odd: unsqueeze1d(z0, z1, odd)
elif len(dims) == 3 and masking == 'checkerboard':
self.squeeze = lambda z, odd=odd: checker_split(z, odd)
self.unsqueeze = lambda z0, z1, odd=odd: checker_merge(z0, z1, odd)
elif len(dims) == 3 and masking == 'channelwise':
self.squeeze = lambda z, odd=odd: channel_split(z, dim=1, odd=odd)
self.unsqueeze = lambda z0, z1, odd=odd: channel_merge(z0, z1, dim=1, odd=odd)
else:
raise Exception('unsupported combination of masking and dimension: %s, %s' %
(masking, str(dims)))
def forward(self, z, log_df_dz):
z0, z1 = self.squeeze(z)
z0, z1, log_df_dz = self._transform(z0, z1, log_df_dz)
z = self.unsqueeze(z0, z1)
return z, log_df_dz
def backward(self, y, log_df_dz):
y0, y1 = self.squeeze(y)
y0, y1, log_df_dz = self._inverse_transform(y0, y1, log_df_dz)
y = self.unsqueeze(y0, y1)
return y, log_df_dz
def _transform(self, z0, z1, log_df_dz):
pass
def _inverse_transform(self, z0, z1, log_df_dz):
pass
class AdditiveCoupling(AbstractCoupling):
"""
additive coupling used in NICE
"""
def __init__(self, dims, masking='checkerboard', odd=False):
super(AdditiveCoupling, self).__init__(dims, masking, odd)
if len(dims) == 1:
in_chs = dims[0] // 2 if not odd else (dims[0] + 1) // 2
out_chs = dims[0] - in_chs
self.net_t = MLP(in_chs, out_chs)
elif len(dims) == 3:
if masking == 'checkerboard':
in_out_chs = dims[0]
elif masking == 'channelwise':
in_out_chs = dims[0] // 2
self.net_t = ConvNet(in_out_chs, in_out_chs)
def _transform(self, z0, z1, log_df_dz):
t = self.net_t(z1)
z0 = z0 + t
return z0, z1, log_df_dz
def _inverse_transform(self, y0, y1, log_df_dz):
t = self.net_t(y1)
y0 = y0 - t
return y0, y1, log_df_dz
class AffineCoupling(AbstractCoupling):
"""
affine coupling used in Real NVP
"""
def __init__(self, dims, masking='checkerboard', odd=False):
super(AffineCoupling, self).__init__(dims, masking, odd)
self.register_parameter('s_log_scale', nn.Parameter(torch.randn(1) * 0.01))
self.register_parameter('s_bias', nn.Parameter(torch.randn(1) * 0.01))
if len(dims) == 1:
in_chs = dims[0] // 2 if not odd else (dims[0] + 1) // 2
self.out_chs = dims[0] - in_chs
self.net = MLP(in_chs, self.out_chs * 2)
elif len(dims) == 3:
if masking == 'checkerboard':
in_out_chs = dims[0] * 2
elif masking == 'channelwise':
in_out_chs = dims[0] // 2
self.out_chs = in_out_chs
self.net = ConvNet(in_out_chs, in_out_chs * 2)
def _transform(self, z0, z1, log_df_dz):
params = self.net(z1)
t = params[:, :self.out_chs]
s = torch.tanh(params[:, self.out_chs:]) * self.s_log_scale + self.s_bias
z0 = z0 * torch.exp(s) + t
log_df_dz += torch.sum(s.view(z0.size(0), -1), dim=1)
return z0, z1, log_df_dz
def _inverse_transform(self, y0, y1, log_df_dz):
params = self.net(y1)
t = params[:, :self.out_chs]
s = torch.tanh(params[:, self.out_chs:]) * self.s_log_scale + self.s_bias
y0 = torch.exp(-s) * (y0 - t)
log_df_dz -= torch.sum(s.view(y0.size(0), -1), dim=1)
return y0, y1, log_df_dz
class MixLogAttnCoupling(AbstractCoupling):
"""
mixture logistic coupling with attention used in Flow++
"""
def __init__(self, dims, masking='checkerboard', odd=False, base_filters=32, n_mixtures=4):
super(MixLogAttnCoupling, self).__init__(dims, masking, odd)
self.n_mixtures = n_mixtures
self.register_parameter('a_log_scale', nn.Parameter(torch.randn(1) * 0.01))
self.register_parameter('a_bias', nn.Parameter(torch.randn(1) * 0.01))
if len(dims) == 1:
in_chs = dims[0] // 2 if not odd else (dims[0] + 1) // 2
out_chs = dims[0] - in_chs
mid_shape = (base_filters, ) + tuple(d // 2 for d in dims[1:])
self.sections = [out_chs] * 2 + [out_chs * self.n_mixtures] * 3
self.net = nn.Sequential(
nn.Linear(in_chs, base_filters),
GatedLinear(base_filters, base_filters),
nn.LayerNorm(mid_shape),
GatedAttn(mid_shape, base_filters),
nn.LayerNorm(mid_shape),
nn.Linear(base_filters, sum(self.sections)),
)
elif len(dims) == 3:
if masking == 'checkerboard':
in_chs = dims[0] * 2
mid_shape = (base_filters, ) + tuple(d // 2 for d in dims[1:])
self.sections = [dims[0] * 2] * 2 + [dims[0] * 2 * self.n_mixtures] * 3
elif masking == 'channelwise':
in_chs = dims[0] // 2
mid_shape = (base_filters, ) + dims[1:]
self.sections = [dims[0] // 2] * 2 + [dims[0] // 2 * self.n_mixtures] * 3
self.net = nn.Sequential(
nn.Conv2d(in_chs, base_filters, 3, 1, 1),
GatedConv2d(base_filters, base_filters),
nn.LayerNorm(mid_shape),
GatedAttn(mid_shape, base_filters),
nn.LayerNorm(mid_shape),
nn.Conv2d(base_filters, sum(self.sections), 3, 1, 1),
)
self.logit = Logit()
self.mix_log_cdf = MixLogCDF()
def _transform(self, z0, z1, log_df_dz):
B = z0.size(0)
C = z0.size()[1:]
params = self.net(z1)
a, b, logpi, mu, s = torch.split(params, self.sections, dim=1)
a = torch.tanh(a) * self.a_log_scale + self.a_bias
logpi = F.log_softmax(logpi.view(B, self.n_mixtures, *C), dim=1)
mu = mu.view(B, self.n_mixtures, *C)
s = s.view(B, self.n_mixtures, *C)
z0, log_df_dz = self.mix_log_cdf(z0, logpi, mu, s, log_df_dz)
z0, log_df_dz = self.logit(z0, log_df_dz)
z0 = z0 * torch.exp(a) + b
log_df_dz += torch.sum(a.view(z0.size(0), -1), dim=1)
return z0, z1, log_df_dz
def _inverse_transform(self, z0, z1, log_df_dz):
B = z0.size(0)
C = z0.size()[1:]
params = self.net(z1)
a, b, logpi, mu, s = torch.split(params, self.sections, dim=1)
a = torch.tanh(a) * self.a_log_scale + self.a_bias
logpi = F.log_softmax(logpi.view(B, self.n_mixtures, *C), dim=1)
mu = mu.view(B, self.n_mixtures, *C)
s = s.view(B, self.n_mixtures, *C)
z0 = torch.exp(-a) * (z0 - b)
log_df_dz -= torch.sum(a.view(z0.size(0), -1), dim=1)
z0, log_df_dz = self.logit.backward(z0, log_df_dz)
z0, log_df_dz = self.mix_log_cdf.backward(z0, logpi, mu, s, log_df_dz)
return z0, z1, log_df_dz
|
from bokeh.plotting import figure
from bokeh.io import output_file, show
x=[10,12,14,25,17]
y=[40,56,76,54,43]
output_file("graph2.html")
f=figure()
f.circle(x,y)
show(f)
|
from konlpy.tag import Kkma
from konlpy.tag import Hannanum
from konlpy.tag import Komoran
from konlpy.tag import Okt
import sys
kkma = Kkma()
hannanum = Hannanum()
komoran = Komoran()
okt = Okt()
analize_list = [kkma, hannanum, komoran, okt]
analize_list_name = ["Kkma_Class", "Hannanum_Class", "Komoran_Class", "Okt_Class"]
count_name = 0
f = open("sync_test2.txt",'rt')
read1 = f.readline()
read2 = f.readline()
for i in analize_list:
dic = {}
count = 0
long_length = 0
if len(i.morphs(read1)) >= len(i.morphs(read2)):
long_length = len(i.morphs(read1))
else:
long_length = len(i.morphs(read2))
for ch in range(len(i.morphs(read1))):
if i.morphs(read1)[ch] not in dic:
dic[i.morphs(read1)[ch]] = 0
for ch in range(len(i.morphs(read2))):
if i.morphs(read2)[ch] in dic:
dic[i.morphs(read2)[ch]] += 1
count += 1
sorted_dic = sorted(dic.items(), key = (lambda x:x[1]),reverse = True)
print(analize_list_name[count_name])
print("중복이 많은 형태소 상위 3개: ", sorted_dic[0],', ',sorted_dic[1],', ',sorted_dic[2])
print("총 중복 형태소 개수: ", count, "개")
print("긴 문장의 형태소 개수: ", long_length, "개")
print("유사도(총 중복 형태소 개수/긴 문장의 형태소 개수): ", round(count/long_length * 100, 2), "%")
print("\n")
count_name += 1
f.close() |
import os
import json
import pandas as pd
from flask import Flask, request, redirect, url_for, render_template, jsonify, Request
from flask_caching import Cache
from werkzeug.utils import secure_filename
app = Flask(__name__)
config = {
"DEBUG": True, # some Flask specific configs
"CACHE_TYPE": "SimpleCache", # Flask-Caching related configs
"CACHE_DEFAULT_TIMEOUT": 0
}
app.config.from_mapping(config)
cache = Cache(app)
'''
Create a list with names of matches whose videos are present in the static folder.
Create also a dictionary with names of events
'''
if not 'Data' in os.listdir():
os.mkdir('Data')
if not 'game' in os.listdir('static'):
os.mkdir('static/game')
if not 'match' in os.listdir('static/game'):
os.mkdir('static/game/match')
if not 'video' in os.listdir('static/game'):
os.mkdir('static/game/video')
if not 'extra_tag' in os.listdir('static/game'):
os.mkdir('static/game/extra_tag')
files_list = os.listdir("./static/game/video")
matches_value = []
# dict_name = dict()
for file in files_list:
if file.split(".")[-1] == "mp4":
matches_value.append(file.split(".")[0])
'''
When this script runs the command line show an address "127.0.0.1:5000/".
When the user copy and paste this link in their browser the init function
checks if csv files corresponding to names of matches in the static folder are present.
If csv files are not present, they will be created.
After that the init function load all the data relative
to the first half of the first match in the static folder (in alphabetical order).
'''
@app.route('/')
def init():
return render_template("home.html", condition = "False", message = "first start")
#return redirect("/" + matches_value[0] + "_Events", code=302)
'''
This method Runs when in the home.html interface we update a file with json match and/or mp4 match
1. if the upload is complete show alert "upload complete"
2. otherwise show an error message
'''
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file_temp = request.files.getlist("file")
if len(file_temp) == 0 or (file_temp[0].content_type != "video/mp4" and file_temp[0].content_type != 'application/json'):
try:
file_json = 'static/game/match/match.json'
data = json.load(open(file_json))
name_match = data["home_team"]["team_id"] + " " + data["away_team"]["team_id"]
except:
message = "match.json invalid, control if the sintax of json file is correct"
condition = "True"
return render_template("home.html", condition = condition, message = message)
return redirect("/" + name_match , code=302)
else:
for elem in file_temp:
if elem.content_type == "video/mp4":
elem.save(os.path.join(os.path.abspath("./static/game/video"), "game.mp4"))
else:
if elem.content_type == 'application/json':
elem.save(os.path.join(os.path.abspath("./static/game/match"), "match.json"))
message = "upload complete"
condition = "True"
return render_template("home.html", condition = condition, message = message)
'''
This method Runs when in the home.html interface is clicking the start game button.
1. it's controlling if the video and json are exist in they folder.
2. if both two files exist -> run the events_tagging.html interface
otherwise it display an error message
'''
@app.route('/<match>')
def matchView(match):
e = ""
try:
video_link = '/static/game/video/game.mp4'
e = "game.mp4 not found or invalid"
file_json = 'static/game/match/match.json'
data = json.load(open(file_json))
e = "match.json not found or invalid"
tag_json = 'static/game/extra_tag/extra_tag.json'
data_tag = json.load(open(tag_json))
e = "extra_tag.json not found or invalid"
except:
print(e)
condition = True
return render_template("home.html", condition = condition, message = e)
date_last_update =""
match_data = data["date_utc"]
match_code = data["home_team"]["team_id"] + "_" + data["away_team"]["team_id"]
name_team_A = data["home_team"]["team_id"]
name_team_B = data["away_team"]["team_id"]
tag = data_tag["extra_tag"]
key_tag = data_tag["key_tag"]
result_tag = data_tag["result_tag"]
#squad of players
squad_A = {}
squad_A["lineup"] = ":"
for elem in data["home_team"]["formation"]["lineup"]:
squad_A[elem["shirt_number"]] = elem["name"]
squad_A["bench"] = ":"
for elem in data["home_team"]["formation"]["bench"]:
squad_A[elem["shirt_number"]] = elem["name"]
squad_B = {}
squad_B["lineup"] = ":"
for elem in data["away_team"]["formation"]["lineup"]:
squad_B[elem["shirt_number"]] = elem["name"]
squad_B["bench"] = ":"
for elem in data["away_team"]["formation"]["bench"]:
squad_B[elem["shirt_number"]] = elem["name"]
#run the page with all real program
return render_template('events_tagging.html', match_code=match_code, match=match, date_last_update=date_last_update, match_name=match, data=match_data,
video_link=video_link, tag=tag, key_tag=key_tag, result_tag = result_tag, name_team_A=name_team_A,
name_team_B=name_team_B, squad_A=squad_A, squad_B=squad_B, name = {'n':match} )
'''
This method Runs when in the events_tagging.html interface is clicking the save button.
1. recive a Post request (with array of events, in json format)
2. add new file into Data\name_match\json (don't overwrites the precedent files)
3. reload home.html interface
'''
@app.route('/<match>/update', methods=['POST', 'GET'])
def save_events(match):
e = "erro save"
condition = True
if request.method == "POST":
#recive the array with events
array_of_events = request.get_data()
#codify type bytes in type string
array_of_events = array_of_events.decode('utf8').replace("'", '"')
#load string in json format
try:
data = json.loads(array_of_events)
except:
print("non valido")
name_directory = "./Data/" + match
name_directory_json = "./Data/" + match + "/json"
name_directory_csv = "./Data/" + match + "/csv"
if not match in os.listdir("./Data"):
os.mkdir(name_directory)
if not "json" in os.listdir("./Data/"+ match):
os.mkdir(name_directory_json)
if not "csv" in os.listdir("./Data/"+ match):
os.mkdir(name_directory_csv)
number_of_files_json = len(os.listdir(name_directory_json))+1
name_file_json = name_directory_json + "/" + match + "_" + str(number_of_files_json)+".json"
while os.path.exists(name_file_json):
number_of_files_json += 1
name_file_json = name_directory_json + "/" + match + "_" + str(number_of_files_json)+".json"
e = "erro save"
#write json in file
with open(name_file_json, "w") as outfile:
json.dump(data, outfile)
e = "Save complete in Data folder"
#convert json file in csv
# number_of_files_csv = len(os.listdir(name_directory_csv))+1
# name_file_csv = name_directory_csv + "/" + match + "_" + str(number_of_files_csv)+".csv"
# df = pd.read_json(name_file_json)
# df.to_csv(name_file_csv, index=None)
return render_template("home.html", condition = condition, message = e)
if __name__ == '__main__':
app.debug = True
app.run() |
# Create a program that asks the user to enter their name and their age.
# Print out a message addressed to them that tells them the year that they will turn 100 years old.
def main():
name = raw_input("Give me your name: ")
age = raw_input("Give me your age: ")
age = int(age)
current_year = 2015
age_at_100 = (100 - age) + current_year
print(name + " will by 100 years old on " + str(age_at_100))
if __name__ == "__main__":
main()
|
class EngineNotDefined(Exception):
pass
class EngineNotConnected(Exception):
pass
|
#!/usr/bin/env python
import roslib
import rospy
import numpy as np
import cv2
import copy
from geometry_msgs.msg import Vector3Stamped,Point,PointStamped
from sensor_msgs.msg import PointCloud2
from linemod_detector.msg import NamedPoint
from visualization_msgs.msg import Marker
from cv_bridge import CvBridge
from tf import TransformListener
class ray_to_points(object):
# Alright, this thing is going to take in a ray (in the camera frame) from
# a monocular camera or a stereo region lacking a disparity match. It's going
# intersect that ray with a presumed flay ground plane and generate a point
# cloud around that intersection point that incorporates some of our
# geometrical uncertainty (most notably, pitch)
def __init__(self):
rospy.init_node('ray_to_points',log_level=rospy.DEBUG)
self.named_point_sub = rospy.Subscriber('named_point', NamedPoint, self.handle_named_point)
self.points_pub = rospy.Publisher('points', PointCloud2)
self.named_point_pub = rospy.Publisher('point', NamedPoint)
self.marker_pub = rospy.Publisher('marker', Marker)
self.tf = TransformListener()
self.pitch_error = rospy.get_param("~pitch_error",0.1)
self.yaw_error = rospy.get_param("~yaw_error",0.1)
def handle_named_point(self, point_in):
rospy.logdebug("handle_named_point x: %s y: %s z: %s",
point_in.point.x,
point_in.point.y,
point_in.point.z)
point_stamped = PointStamped()
point_stamped.header = point_in.header
point_stamped.point = point_in.point
ground_named_point, odom_named_point = self.cast_ray(point_stamped,self.tf,point_in.name)
rospy.logdebug("ground_named_point %s",ground_named_point)
self.named_point_pub.publish(ground_named_point)
rospy.logdebug("odom_named_point %s",odom_named_point)
self.send_marker(odom_named_point)
def send_marker(self, named_pt):
m=Marker()
m.header = copy.deepcopy(named_pt.header)
m.type=Marker.CYLINDER
m.pose.position = named_pt.point
m.pose.orientation.x=0.707
m.pose.orientation.y=0.0
m.pose.orientation.z=0.0
m.pose.orientation.w=0.707
m.scale.x=0.2
m.scale.y=0.2
m.scale.z=0.2
m.color.r=0.8
m.color.g=0.8
m.color.b=0.8
m.color.a=1.0
#m.text=named_pt.name
self.marker_pub.publish(m)
def cast_ray(self, point_in, tf, name):
base_link_point = tf.transformPoint('/base_link', point_in)
t = tf.getLatestCommonTime('/base_link', point_in.header.frame_id)
pos, quat = tf.lookupTransform('/base_link', point_in.header.frame_id, t)
height = pos[2]
x_slope = (base_link_point.point.x - pos[0])/(pos[2]-base_link_point.point.z)
y_slope = (base_link_point.point.y - pos[1])/(pos[2]-base_link_point.point.z)
ground_point = np.array([0.,0.,0.])
ground_point[0] = x_slope*height
ground_point[1] = y_slope*height
ground_named_point = NamedPoint()
ground_named_point.point.x = ground_point[0]
ground_named_point.point.y = ground_point[1]
ground_named_point.point.z = ground_point[2]
ground_named_point.header = point_in.header
ground_named_point.header.frame_id = 'base_link'
ground_named_point.header.stamp = point_in.header.stamp
ground_named_point.name = name
odom_named_point = self.tf.transformPoint('/odom',ground_named_point)
return ground_named_point, odom_named_point
def make_point_cloud(Point):
# Take a vector, nominally [x,y,1] and apply some rotation about x (pitch)
# and about y (yaw) in the base_link frame. This will make a frustum that
# can be sampled for a point cloud
p = self.pitch_error
pitch_mat = np.array([[1., 0., 0.],[0., np.cos(p), -np.sin(p)],[0., np.sin(p), np.cos(p)]])
y = self.yaw_error
yaw_mat = np.array([[np.cos(y), 0., np.sin(y)],[0., 1., 0.],[-np.sin(y), 0., np.cos(y)]])
vec = np.array([0,0,0])
vec[0] = Point.x
vec[1] = Point.y
vec[2] = Point.z
down_left = np.dot(pitch_mat,np.dot(yaw_mat,vec))
down_right = np.dot(pitch_mat,np.dot(-yaw_mat,vec))
up_left = np.dot(-pitch_mat,np.dot(yaw_mat,vec))
up_right = np.dot(-pitch_mat,np.dot(-yaw_mat,vec))
if __name__=="__main__":
try:
ray_to_points()
rospy.spin()
except rospy.ROSInterruptException: pass
|
from django.conf.urls.defaults import patterns, url,include
from shopback.base.authentication import UserLoggedInAuthentication
from shopback.base.views import InstanceModelView
from shopback.base.permissions import IsAuthenticated, PerUserThrottling
from shopapp.autolist.views import ListItemTaskView,CreateListItemTaskModelView
from shopapp.autolist.resources import ItemListTaskResource
urlpatterns = patterns('shopapp.autolist.views',
url('^$','pull_from_taobao',name='pull_from_taobao'),
url('itemlist/$','list_all_items',name='list_all_items'),
url('timetable/$','show_time_table_summary',name='show_time_table_summary'),
url('weektable/(?P<weekday>\d+)/$', 'show_weektable', name='show_weektable'),
url('scheduletime/$','change_list_time',name='change_list_time'),
url('timetablecats/$','show_timetable_cats',name='show_timetable_cats'),
url('timeslots/$','get_timeslots_json',name='get_timeslots'),
url('logs/$', 'show_logs', name='show_logs'),
url('invalid/(?P<num_iid>[^/]+)/$', 'invalid_list_task', name='invalid_list'),
url(r'^listtask/$', CreateListItemTaskModelView.as_view(resource=ItemListTaskResource, authentication=(UserLoggedInAuthentication,), permissions=(IsAuthenticated,),)),
url(r'^(?P<pk>[^/]+)/$', InstanceModelView.as_view(resource=ItemListTaskResource, authentication=(UserLoggedInAuthentication,), permissions=(IsAuthenticated,))),
url(r'^list/self/$', ListItemTaskView.as_view(resource=ItemListTaskResource, authentication=(UserLoggedInAuthentication,), permissions=(IsAuthenticated,),)),
)
|
def insertionSort(li):#insertion sort function
for i in range(1,len(li)):
C = i
while (C>0) and (li[C] < li[C-1]):
li[C], li[C-1] = li[C-1], li[C]
C = C-1
return li
#directly from previous assignment
def mergesort(list):
if len(list) < 2:
return list
else:
mp = len(list)//2
list1 = (list[:mp])
list2 = (list[mp:])#same as class example till this line
insertionSort(list1), insertionSort(list2)#calling a sinultaeneous insertion sort function on both lists
return mergesort(list1),mergesort(list2)#and returning the mergesort
def adapted_sorting(li):
mp = len(li)//2
list1 = (li[:mp])#same as in regular mergesort
list2 = (li[mp:])
if (len(list1)%2 == 0):##a series of checks to decide whether to insertion sort or merge sort based on even/oddness of len(list)
return mergesort(li)####WRITE A MODIFIED MERGESORT THAT JUST MERGES BUT DOESNT INSERTION ASWELL
if (len(list1)%2 != 0):
return insertionSort(li)
if (len(list2)%2 == 0):
return mergesort(li)
if (len(list2)%2 != 0):
return insertionSort(li)
l= [1,3,5,2,9]#a list to use,can be changed as you want
l1= [1,3,5,2,9]
print(mergesort(l))#performing part 1 of this assignment
print(adapted_sorting(l1))#part 2 of this assignment
|
import difflib
import os
import sys
from podctl.container import Container
from podctl.build import BuildScript
from podctl.visitors import (
Base,
Copy,
Packages,
Run,
User,
)
from unittest import mock
from podctl.visitors import packages
packages.subprocess.check_call = mock.Mock()
os.environ['CACHE_DIR'] = '/test'
os.environ['CI'] = '1'
def script_test(name, *visitors):
result = str(Container(*visitors).script('build'))
path = os.path.join(
os.path.dirname(__file__),
f'test_{name}.sh',
)
if os.getenv('TEST_REWRITE') and os.path.exists(path):
os.unlink(path)
if not os.path.exists(path):
with open(path, 'w+') as f:
f.write(result)
raise Exception(f'Fixture created test_{name}.sh')
with open(path, 'r') as f:
expected = f.read()
result = difflib.unified_diff(
expected,
result,
fromfile='expected',
tofile='result'
)
assert not list(result), sys.stdout.writelines(result)
def test_build_empty():
script_test(
'build_empty',
Base('alpine'),
)
def test_build_packages():
script_test(
'build_packages',
Base('alpine'),
Packages('bash'),
)
def test_build_user():
script_test(
'build_user',
Base('alpine'),
User('app', 1000, '/app'),
)
def test_build_copy():
script_test(
'build_copy',
Base('alpine'),
Copy('/test', '/app'),
)
def test_build_run():
script_test(
'build_run',
Base('alpine'),
Run('foo'),
Run('sudo bar'),
Run('sudo bar > test'),
Run('''
bar
'''),
)
|
#tfrecord file
import tensorflow as tf
tf.enable_eager_execution()
# to get the path of image and folder
import pathlib
import os
# 1. the path of folder
datadir = pathlib.Path(os.path.join(os.getcwd(), "disease_photos/"))
print(os.getcwd()) # /Users/eunsukkim/Desktop/tutorial1 + disease_photos/
print(datadir)
# 2. the each name of folder
label_names = sorted(item.name for item in datadir.glob("*/") if item.is_dir())
print(label_names)
# 3. each name of folder + number
label_to_index = dict((name, index) for index, name in enumerate(label_names))
print(label_to_index)
# 4. the path of image
all_image_paths = list(datadir.glob("*/*"))
print(all_image_paths)
all_image_paths = [str(path) for path in all_image_paths]
print(all_image_paths)
# 5. each image + number
all_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths]
print(all_image_labels)
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
print(image_ds)
# save tfdata
tfrec = tf.data.experimental.TFRecordWriter("tfrecord.tfrecord")
tfrec.write(image_ds)
|
class Pixel:
"""
O objeto Pixel é composto por tuplas de cores. Seu atributo numero_fontes deve
ser substituído por um objeto TuplaCores.
"""
def __init__(self, n_fontes: str):
# self.tupla = TuplaCores(int(n_fontes[1]))
self.numero_fontes = n_fontes
class TuplaCores:
"""
Classe correspondente à tupla de cores que compõe o Pixel. Quando é do tipo P2, os
pixels são monocromáticos, enquanto que o tipo P3 indica que os pixels são compostos de valores RGB.
"""
def __init__(self, tipo_tupla: str):
self.tipo_tupla = tipo_tupla[1]
class Imagem:
"""
Esta classe define uma imagem em formato .ppm
Nesse formato, são especificados se cada pixel usa 2 ou 3 fontes de cor, sendo assim,
monocromática ou colorida; a dimensão da array de pixels, ou seja, a resolução; o valor
máximo para cada fonte que compõe o pixel e, por fim, a matriz de pixels. Ou seja, a
imagem propriamente dita.
O atributo pixels deve ser substituido por um objeto Pixel e o atributo tipo deve ser removido.
"""
def __init__(self, tipo: str, dimensao: str, maximo: int, pixels: [int]):
# self.tipo = Pixel(tipo)
self.tipo = tipo
self.dimensao = dimensao
self.maximo = maximo
self.pixels = pixels
self.histograma = []
def salvar(self, nome: str):
with open(nome, mode='w') as saida:
saida.write(str(self.tipo))
saida.write(str(self.dimensao))
if not str(self.tipo).__contains__('1'):
saida.write(str(self.maximo))
saida.write('\n')
for pixel in self.pixels:
for valor in pixel:
saida.write(str(valor) + '\n')
def mostrar_propriedades(self):
print('Tipo: {}\nDimensões: {}\nValor máximo: {}'.format(
self.tipo,
self.dimensao,
self.maximo
))
def gerar_histograma(self, normalizar: bool = False, salvar: bool = False):
"""
O histograma de uma imagem é definido como uma função discreta
h(rk) = nk, em que rk é o k-ésimo nível de cinza e nk é o número
de pixels na imagem contendo o nível de cinza rk.
Quando o parâmetro normalizar está definido como True, o histograma
é dado por p(rk) = nk/n para k [0, L-1], em que L é o valor máximo
dos pixels. A função p(r) nos dá a probabilidade de ocorrência de
dado nível de cinza rk. Assim, o formato do histograma não muda,
mas os valores vão ficar entre 0 e 1.
:param normalizar:
:param salvar:
:return: void
"""
histograma = []
nks = []
pixels = self.pixels
niveis_cinza = list(set(pixels))
for nivel in niveis_cinza:
if normalizar:
nk = pixels.count(nivel) / len(pixels)
else:
nk = pixels.count(nivel)
nks.append(nk)
histograma.append([nivel, nk])
self.histograma = histograma
if salvar:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(23, 8))
ax.bar(niveis_cinza, nks)
ax.set_title('Histograma da imagem')
ax.set_xlabel('Níveis de cores')
ax.set_ylabel('nk')
if normalizar:
fig.savefig('img/histograma_norm.png')
else:
fig.savefig('img/histograma.png')
def equalizar(self, salvar: bool = False, verboso: bool = False):
"""
Esse é doideira pra explicar
:return:
"""
novo_histograma = []
pixels_img = self.pixels
niveis_cinza = list(set(pixels_img))
soma = 0
if verboso:
print('Numero de níveis de cinza: {}'.format(len(niveis_cinza)))
hist_norm = []
hist_cum = []
# o histograma normalizado me diz a probabilidade de cada nível de cinza aparecer em um pixel
for nivel in niveis_cinza:
# a normalização diz a probabilidade de cada nível de cinza aparecer em um pixel arbitrário
nk = pixels_img.count(nivel) / len(pixels_img)
hist_norm.append(nk)
print('Histograma normalizado:\n{}'.format(hist_norm))
for i in range(0, len(hist_norm)):
# fazemos a soma cumulativa dessas probabilidades
soma += hist_norm[i]
hist_cum.append(soma)
print('Histograma cumulativo:\n{}'.format(hist_cum))
for i in range(0, len(hist_cum)):
# retiramos os valores de sua representação probabilistica
valor_final = round(hist_cum[i] * self.maximo)
novo_histograma.append(valor_final)
print('Histograma final:\n{}'.format(novo_histograma, len(novo_histograma)))
# se a opção verbosa estiver desabilitada
else:
# o histograma normalizado me diz a probabilidade de cada nível de cinza aparecer em um pixel
for nivel in niveis_cinza:
# a normalização diz a probabilidade de cada nível de cinza aparecer em um pixel arbitrário
nk = pixels_img.count(nivel) / len(pixels_img)
# fazemos a soma cumulativa dessas probabilidades
soma += nk
# retiramos os valores de sua representação probabilistica
valor_final = round(soma * self.maximo)
novo_histograma.append(valor_final)
print('Fim da operação, alterando pixels da imagem')
self.pixels = novo_histograma
if salvar:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(23, 8))
ax.bar(niveis_cinza, novo_histograma)
ax.set_title('Histograma equalizado da imagem original')
ax.set_xlabel('Níveis de cores')
ax.set_ylabel('Valor normalizado')
fig.savefig('img/histograma_equalizado.png')
|
python简介
版本 www.python.org 2.7.x / 3.5.x
特点、应用场景
'''
高级的、面向对象的、可扩展的、可移植的、易于学习和维护的程序语言
动态语言,程序在运行时可以动态修改对象元数据
弱类型语言,数据由 标量scalar和容器container 表示 ,GC进行内存资源管理
一种胶水语言,依赖第三方软件包,用于业务逻辑的编写和模块的组合
适用 快速交付、原型建模,自动化运维 。。
结合第三方软件包,具有强大的功能,适应于场景: 网络通信、UI、WebService、分布式计算和存储..
除了OS和Driver,几乎没有不能做的!
灵活和松散的特性,入门容易,精通难,坑很多,步步惊心!
'''
相关项目
django,pyqt numpy goagent openstack 豆瓣 spark boost pycrypto pil xlws
实现版本 cpython
安装 pip 市场pypi download
wget https://bootstrap.pypa.io/get-pip.py
运行 idel,ipython,pycharm os运行差异
ipython cmd(? ?? * %run cmd )
ipython qtconsole --pylab=inline
pip install qtconsole
ipython --pylab
ipython notebook
最近的两个输出结果保存在_(一个下划线)和__(两个下划线)变量中
!cmd 执行系统命令
帮助系统 pydoc
文件形式: py pyc pyo pyw pyd
语法介绍
helloworld
面向过程和对象 内建函数 模块级函数 类级函数 对象函数
关键字
False class finally is return
None continue for lambda try
True def from nonlocal while
and del global not with
as elif if or yield
assert else import pass
break except in raise
from import as _main_ dir 包管理 _init.py_
文档缩进 注释
数值类型 int long float complex
布尔 True False
字符串 单双引号
None
数值运算 逻辑运算 位操作
字符串操作相关函数 *
数组 list与tuple 范围取值 数组操作函数 len range crud
字典dict k=v items keys values crud
if else pass
for while in
def 默认参数 *args **kvargs def内嵌
lambda map reduce sort
try except finally
class self inherit super @staticmethod object
has_attr set_attr
对象内建函数 init getitem str
重载: http://www.cnblogs.com/wjoyxt/p/5112537.html
数据库访问
dbi https://www.python.org/dev/peps/pep-0249/
psycopg http://initd.org/psycopg/
conn = psycopg2.connect(dbname,user, password)
cusor
to see : http://www.cnblogs.com/yy3b2007com/p/5724427.html
sqlite3
http://www.runoob.com/sqlite/sqlite-python.html
connect(path) connect("memory:")
docstring helpdoc
sphinx : yum install python-sphinx sphinx-quickstart
pyment: https://github.com/dadadel/pyment
pyment test.py patch -p1 < test.py.patch
doxygen http://www.stack.nl/~dimitri/doxygen/
pdoc https://github.com/BurntSushi/pdoc
装饰器@decorderator
内存管理 gc None 引用计数 deepcopy
相关函数
dir type id list tuple open str hex int map reduce list zip
相关模块
os sys os.path time socket json urllib struct pickle sqlite re threading stringIO logging unittest
项目介绍
ctypes https://github.com/adoggie/py-ffmpeg/blob/master/ffmpeg.py
pyqt https://github.com/adoggie/vlc_player/blob/master/video_player.pyw
gevent
django
numpy
scrapy http web爬虫框架
scapy 网络协议解析包
py4j - 实现python与java之间互相调用
pyjnius - python调用java类,据说优于py4j,目前没感觉到!
chardet - 字符集检测
"iconv -f GB2312 -t UTF-8 xxx.txt " " chardetect.py xxx.txt"
paramiko - ssh 自动化
pykafka
zkpython
pysal - 地理空间计算库
numpy - 数值计算 numpy,scipy,matplotlib 在centos7下无法用pip安装,直接 yum install numpy 或者使用 virtualenv安装 ...
scipy - 数值计算
Plotly - 交互式图形化 数据 绘制, Plotly: a graphing library for making interactive, publication-quality graphs. See examples of statistic, scientific, 3D charts, and more here: https://plot.ly/python.
celery - 异步rpc
pep8.py - 代码规格检查
pydot / pygraphviz / networkx / osg (社交网络有向图) http://networkx.github.io
matplotlib.pyplot
pyexcept
poster / requests (http client)
Docutils rst格式转换
Pygments 代码语法高亮
objgraph/pygraph 绘制有向图 objgraph.show_refs([c],filename='/tmp/test.png')
pandas http://pandas.pydata.org/ 数据分析包
mysql-connector https://github.com/sanpingz/mysql-connector
ogr gdal gis工具包
jupyter notebook 基于web的交互式学习环境
pip install jupyter
利用 jupyter 进行绘图练习:
http://matplotlib.org/ ,可通过%matplotlib inline 激活,(https://www.dataquest.io/blog/matplotlib-tutorial/)
google python-fire : pip install fire 函数与shell的集成
zeep - soap wsdl 服务
polygon - 多边形库绘制和计算 https://www.j-raedler.de/projects/polygon/
fire - google command api tools
OpenMPI - 并行计算
cython - python 扩展包
pymunk 物理游戏引擎包 http://www.pymunk.org/ 可以运行在jupyter notebook
微服务
--------
connexion - "https://pypi.python.org/pypi/connexion 基于配置的微型service app, 基本实现功能在djangoframework中都覆盖了。 "
相当多的优秀第三方扩展 库 https://github.com/vinta/awesome-python
Python-Markdown
pip install markdown Pygments
pygmentize -S default -f html > default.css
|
import matplotlib.pyplot as plt
import numpy as np
# This time will make a figure with two subplots arranged vertically.
# First we initialize our figure
plt.figure()
# Again We set up numpy arrays covering the range we want to plot.
xvals1 = np.linspace(-5, 5, 500)
xvals2 = np.linspace(-5, 5, 20)
# This creates a subplot on a notional grid with 2 rows and 1 column and
# selects the 1st plot (top row).
plt.subplot(211)
# Plot both sin(x) with a blue solid line and cos(x) with red dashed line. We
# can do this with a single plot() command.
plt.plot(xvals1, np.sin(xvals1), 'b-', xvals1, np.cos(xvals1), 'r--')
# And add a legend
plt.legend(["sin", "cos"])
# Select the lower plot
plt.subplot(212)
# Plot sinh(x) and cosh(x) with yellow triangles and green circles connected
# by lines. We do this as four plots - using the denser x value list for the
# lines, and sparser for the points.
# We can use several plot commands to do this.
plt.plot(xvals1, np.sinh(xvals1), 'y-')
plt.plot(xvals2, np.sinh(xvals2), 'y^', label="sinh")
plt.plot(xvals1, np.cosh(xvals1), 'g-')
plt.plot(xvals2, np.cosh(xvals2), 'go', label="cosh")
# Let also fill the area between the curves, but make it semi-transparent
# using the fill_between() function.
plt.fill_between(xvals1, np.cosh(xvals1), np.sinh(xvals1), facecolor='green',
alpha=0.2)
# Tune a few other settings
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
# Since we've set some labels in the plot commands above we can call
# legend() without any arguments.
plt.legend()
# And finally we display our plot.
plt.show()
|
# -*- coding: utf-8 -*-
# @Time : 2021-03-11 16:38
# @Author : sloan
# @Email : 630298149@qq.com
# @File : MulprocessApply.py
# @Software: PyCharm
from concurrent.futures import ProcessPoolExecutor
from MulprocessBased import SampleGeneratorBase
from sloan_utils import Panda_tool
import os
import time
import cv2
import os.path as osp
import glob
import json
import numpy as np
class GenerateTestImage(SampleGeneratorBase):
'''
对原图按宽4096,高3500,步长2048的窗口滑动裁剪;
会生成裁剪后的子图和对应的未带bbox注释的coco json
'''
def __init__(self,workers=4):
super(SampleGeneratorBase,self).__init__()
self.workers = workers
self.threading_num = 0
self.save_root_path = None
self.overlap = (2048,2048)
self.hw = (3500,4096)
def process_img(self,*args):
img_path = args[0]
img_name = osp.split(img_path)[-1]
src = cv2.imread(img_path)
overlap_factor_w, overlap_factor_h = self.overlap
row_cutshape, col_cutshape = self.hw
crop_img = src.copy()
crop_img_h,crop_img_w = crop_img.shape[:2]
rows,cols = int(np.ceil(crop_img_h/row_cutshape)), int(np.ceil(crop_img_w/col_cutshape))
# 没有超过边界,需要继续补充切片
rows_gap = rows * row_cutshape - (rows - 1) * overlap_factor_h
cols_gap = cols * col_cutshape - (cols - 1) * overlap_factor_w
while rows_gap < crop_img_h:
rows += 1
rows_gap = rows * row_cutshape - (rows - 1) * overlap_factor_h
while cols_gap < crop_img_w:
cols += 1
cols_gap = cols * col_cutshape - (cols - 1) * overlap_factor_w
for row in range(rows):
for col in range(cols):
x1, y1, x2, y2 = (col * col_cutshape - col * overlap_factor_w), (
row * row_cutshape - row * overlap_factor_h), \
((col + 1) * col_cutshape - col * overlap_factor_w), (
(row + 1) * (row_cutshape) - row * overlap_factor_h)
if x2>crop_img_w: # 切片右边界超出图像
offset = x2 - crop_img_w
x2 = crop_img_w
x1 -= offset
if y2>crop_img_h: # 切片下边界超出图像
offset = y2 - crop_img_h
y2 = crop_img_h
y1 -= offset
img_temp = crop_img[y1:y2, x1:x2]
im_name = img_name[:-4] + '_{}x{}.jpg'.format(row, col)
save_img_path = osp.join(self.save_root_path, im_name)
# print("{} is saved!".format(im_name))
if not osp.exists(save_img_path):
cv2.imwrite(save_img_path, img_temp)
return self.threading_num
def batch_sample(self,*args):
img_root_path,save_img_root_path,save_test_json_path,overlap,hw = args
os.makedirs(save_img_root_path,exist_ok=True)
self.save_root_path = save_img_root_path
self.overlap = overlap
self.hw = hw
img_path_list = glob.glob(img_root_path+'/*/*jpg')
s1 = time.time()
print(len(img_path_list))
results = []
task_pool = ProcessPoolExecutor(max_workers=self.workers)
count = 0
for img_path in img_path_list:
# count += 1
# if count > 2: break
if self.needed_to_process(img_path):
rt = task_pool.submit(self._process_img, img_path)
results.append(rt)
results = [rt.result() for rt in results if rt]
print(len(results))
Panda_tool.gen_test_json(test_image_path=save_img_root_path,
save_json_path=save_test_json_path,
hw=self.hw)
print("-----finished-------")
print("cost time:{} s".format(time.time() - s1))
class GenerateTrainImage(SampleGeneratorBase):
'''
对原图按宽4096,高3500,步长2048的窗口滑动裁剪;
会生成裁剪后的子图和对应的含注释的coco json
'''
def __init__(self,workers=4):
super(SampleGeneratorBase,self).__init__()
self.workers = workers
self.threading_num = 0
self.save_root_path = None
self.overlap = (2048,2048)
self.hw = (3500,4096)
def process_img(self,*args):
img_path,v = args[0],args[1]
bboxes = v['bbox']
img_name = osp.split(img_path)[-1]
src = cv2.imread(img_path)
src_h,src_w = src.shape[:2]
# some param and var
overlap_factor_w, overlap_factor_h = self.overlap
row_cutshape, col_cutshape = self.hw
images_dict = {}
img_idx,anno_idx = 0, 0
images, annotations = [], []
crop_img = src.copy()
crop_img_h,crop_img_w = crop_img.shape[:2]
rows,cols = int(np.ceil(crop_img_h/row_cutshape)), int(np.ceil(crop_img_w/col_cutshape))
# 没有超过边界,需要补充一个切片
# if rows * row_cutshape - (rows - 1) * overlap_factor < crop_img_h:
# rows += 1
# if cols * col_cutshape - (cols - 1) * overlap_factor < crop_img_w:
# cols += 1
rows_gap = rows * row_cutshape - (rows - 1) * overlap_factor_h
cols_gap = cols * col_cutshape - (cols - 1) * overlap_factor_w
while rows_gap < crop_img_h:
rows += 1
rows_gap = rows * row_cutshape - (rows - 1) * overlap_factor_h
while cols_gap < crop_img_w:
cols += 1
cols_gap = cols * col_cutshape - (cols - 1) * overlap_factor_w
# 开始按n行m列切图
for row in range(rows):
for col in range(cols):
cut_x1, cut_y1, cut_x2, cut_y2 = (col * col_cutshape - col * overlap_factor_w), (
row * row_cutshape - row * overlap_factor_h), \
((col + 1) * col_cutshape - col * overlap_factor_w), (
(row + 1) * (row_cutshape) - row * overlap_factor_h)
if cut_x2 > crop_img_w: # 切片右边界超出图像
col_offset = cut_x2 - crop_img_w
cut_x2 = crop_img_w
cut_x1 -= col_offset
if cut_y2 > crop_img_h: # 切片下边界超出图像
row_offset = cut_y2 - crop_img_h
cut_y2 = crop_img_h
cut_y1 -= row_offset
crop_img_cp = crop_img.copy()
# 类别位置与裁剪位置判断
for bbox in bboxes:
dst_cat, dst_w, dst_h = bbox['category_id'], bbox['w'], bbox['h']
dst_x1, dst_y1 = bbox['x1'], bbox['y1']
dst_x2, dst_y2 = dst_x1 + dst_w, dst_y1 + dst_h
if Panda_tool.judge_saved(src_pos=(cut_x1, cut_y1, cut_x2, cut_y2),
dst_pos=(dst_x1, dst_y1, dst_x2, dst_y2),
iof_thr=0.5):
# 裁剪过程可能导致类别被切分,类别需重新计算位置并考虑边界切分情况
dst_x1, dst_y1, dst_x2, dst_y2 = max(dst_x1 - cut_x1, 0), max(dst_y1 - cut_y1, 0), \
min(dst_x2 - cut_x1, cut_x2), min(dst_y2 - cut_y1, cut_y2)
# 写入coco格式
im_name = osp.split(img_path)[-1][:-4] + '_{}x{}.jpg'.format(row, col)
if im_name not in images_dict.keys():
img_temp = crop_img_cp[cut_y1:cut_y2, cut_x1:cut_x2]
img_temp_h, img_temp_w = img_temp.shape[:2]
save_img_path = osp.join(self.save_root_path, im_name)
if not osp.exists(save_img_path):
cv2.imwrite(save_img_path, img_temp)
images_dict[im_name] = img_idx
image = {}
image['file_name'] = im_name
image['width'] = img_temp_w
image['height'] = img_temp_h
image['id'] = img_idx
images.append(image)
img_idx += 1
annotation = {}
dst_w, dst_h = dst_x2 - dst_x1, dst_y2 - dst_y1
box = [dst_x1, dst_y1, dst_w, dst_h]
annotation['bbox'] = box
annotation['area'] = dst_w * dst_h
annotation['iscrowd'] = 0
annotation['image_id'] = images_dict[im_name]
annotation['category_id'] = dst_cat
annotation['id'] = anno_idx
anno_idx += 1
annotations.append(annotation)
return self.threading_num,(images,annotations)
def batch_sample(self,*args):
img_root_path,save_img_root_path,save_anno_path,src_anno_json,overlap,hw = args
os.makedirs(save_img_root_path,exist_ok=True)
self.save_root_path = save_img_root_path
self.overlap = overlap
self.hw = hw
all_instance, cla_instance, img_instance, defect, categories = Panda_tool._create_data_dict(
json_paths=[src_anno_json],
img_path=[img_root_path])
s1 = time.time()
print(len(img_instance.keys()))
results = []
task_pool = ProcessPoolExecutor(max_workers=self.workers)
count = 0
for img_path,v in img_instance.items():
# count += 1
# if count > 3: break
if self.needed_to_process(img_path):
rt = task_pool.submit(self._process_img, img_path,v)
results.append(rt)
meta = {}
img_idx = 0
anno_idx = 0
new_images,new_annotations = [],[]
for rt in results:
if rt:
images,annotations = rt.result()[1]
img_old2new = {}
for image in images:
img_old2new[image['id']] = img_idx
image['id'] = img_idx
img_idx += 1
new_images.append(image)
for annotation in annotations:
annotation['image_id'] = img_old2new[annotation['image_id']]
annotation['id'] = anno_idx
anno_idx += 1
new_annotations.append(annotation)
# 保存至coco格式
meta['images'] = new_images
meta['annotations'] = new_annotations
meta['categories'] = Panda_tool.CATEGOTIES
Panda_tool.write2result(meta, save_anno_path)
print(len(results))
print("-----finished-------")
print("cost time:{} s".format(time.time() - s1))
if __name__ == "__main__":
PATCH_W, PATCH_H = 3000,3000
# TODO: adjust back to 1500
PATCH_OVERLAP = (1200,1200)
# TODO: test data A/B adjust
GTI_FC = GenerateTestImage(workers=8)
GTI_FC.batch_sample('../../../tcdata/panda_round1_test_202104_B/',
'../../../user_data/tmp_data/panda_round1_test_202104_B_patches_{}_{}'.format(PATCH_W,PATCH_H),
'../../../user_data/tmp_data/panda_round1_coco_full_patches_wh_{}_{}_testB.json'.format(PATCH_W,PATCH_H),
PATCH_OVERLAP,(PATCH_H,PATCH_W))
# GTI_FC = GenerateTrainImage(workers=16)
# GTI_FC.batch_sample('panda_round1_train_202104/',
# 'panda_round1_train_202104_patches_{}_{}/'.format(PATCH_W,PATCH_H),
# 'panda_round1_coco_full_patches_wh_{}_{}.json'.format(PATCH_W,PATCH_H),
# 'panda_round1_coco_full.json',
# PATCH_OVERLAP,(PATCH_H,PATCH_W)
# )
|
""" The setup script to install Qrtmp as a package in your Python distribution. """
from distutils.core import setup
setup(name='Qrtmp',
version='0.2.0',
description='Qrtmp - Quick/Simple RTMP Implementation.',
author='Goel Biju',
packages=['qrtmp']
)
|
import re
import requests
import tweepy
import ssl
import time
from botocore.exceptions import ReadTimeoutError
from requests.exceptions import Timeout, ConnectionError
import datetime
from PolitiStats.properties import getConsumerKey, getCivicsKey, getConsumerSecret, getAccessKey, getAccessSecret, \
getNewsKey
def getStateOfficials(state):
request = requests.get(
"https://civicinfo.googleapis.com/civicinfo/v2/representatives/ocd-division%2Fcountry%3Aus%2Fstate%3A" +
state + "?key=" + getCivicsKey())
civics_data = request.json()
offices = []
officials = []
for i in range(0, len(civics_data['offices'])):
offices.append(civics_data['offices'][i]['name'])
try:
official = {
"office": civics_data['offices'][i]['name'],
"name": civics_data['officials'][i]['name'],
"party": civics_data['officials'][i]['party'],
"socials": civics_data['officials'][i]['channels']
}
officials.append(official)
except:
official = {
"office": civics_data['offices'][i]['name'],
"name": civics_data['officials'][i]['name'],
"party": civics_data['officials'][i]['party'],
"socials": "null"
}
officials.append(official)
# print()
return officials
def clean_tweets(content):
"""Convert all named and numeric character references
(e.g. >, >, >) in the string s to the
corresponding Unicode characters"""
content = (content.replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"')
.replace(''', "'").replace(';', " ")
.replace(r'\u', " ").replace('\u2026', "")
.replace('\n', ''))
content.encode('ascii', 'ignore').decode('ascii')
# Exclude retweets, too many mentions and too many hashtags, and remove handles and hashtags
if not any((('RT @' in content, 'RT' in content,
content.count('@') >= 3, content.count('#') >= 3))):
content = re.sub('@[^\s]+', '', content)
content = re.sub('#[^\s]+', '', content)
return content
return None
# Handling authentication with Twitter
auth = tweepy.OAuthHandler(getConsumerKey(), getConsumerSecret())
auth.set_access_token(getAccessKey(), getAccessSecret())
# Create a wrapper for the API provided by Twitter
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# Function for handling pagination in our search
def limit_handled(cursor):
while True:
try:
try:
yield cursor.next()
except StopIteration:
return
except tweepy.RateLimitError:
print('Reached rate limit. Sleeping for >15 minutes')
time.sleep(15 * 61)
# Function to make the search using Twitter API for 7 days of data
def search_tweets_timeline(official_handle):
timeline = []
for i in range(0, 7):
tweets = []
# Finds the tweets from cursor and iterates through
for tweet in limit_handled(tweepy.Cursor(api.search,
q="@" + official_handle,
count=20,
tweet_mode='extended',
lang="en",
result_type='recent',
until=datetime.date.today() - datetime.timedelta(days=i)).items(20)):
try:
# Checks if its an extended tweet (>140 characters)
content = tweet.full_text
content = clean_tweets(content)
if content is not None:
tweets.append(content)
except Exception as e:
print('Encountered Exception:', e)
pass
timeline.append(tweets)
return timeline
# Fetches url for image of official and description
def get_official_info(official_name):
# queries wikipedia to find the title of article
title = requests.get(
"https://en.wikipedia.org/w/api.php?action=opensearch&search=" + official_name.replace(' ',
'') + "&limit=1&namespace=0&format=json")
# Fetches the main image based on the title
image = requests.get("http://en.wikipedia.org/w/api.php?action=query&titles="
+ title.json()[3][0][30:] + "&prop=pageimages&format=json&pithumbsize=100")
# Fetches short description of politician
description = requests.get(
"https://en.wikipedia.org/w/api.php?format=json&action=query"
"&prop=extracts&exintro&explaintext&redirects=1&titles=" +
title.json()[3][0][30:])
# Get larger image than thumbnail
try:
image_src = str(list(image.json()['query']['pages'].items())[0][1]['thumbnail']['source'])
index = image_src.find("px")
image_src = image_src.replace(image_src[index - 2:], "512" + image_src[index:])
# If no image exists in wikipedia
except:
image_src = "https://images.unsplash.com/photo-1547354142-526457358bb7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=800&q=80"
try:
desc_src = list(description.json()['query']['pages'].items())[0][1]['extract']
except:
desc_src = "No description exists..."
# returns a dictionary with description and image
info = {"Image": image_src,
"Description": desc_src}
# Returns the actual image
return info
# Function to make the search using Twitter API
def search_tweets(official_handle):
tweets = []
# Finds the tweets from cursor and iterates through
for tweet in limit_handled(tweepy.Cursor(api.search,
q="@" + official_handle,
count=20,
tweet_mode='extended',
lang="en",
result_type='popular',
until=datetime.date.today()).items(20)):
try:
# Checks if its an extended tweet (>140 characters)
content = tweet.full_text
if content is not None:
tweets.append({"Text": content,
"Day": int(tweet.created_at.day)})
except Exception as e:
print('Encountered Exception:', e)
pass
return tweets
def work(official_handle):
# Initializing the Twitter search
tweets = []
try:
tweets = search_tweets(official_handle)
# Stop temporarily when hitting Twitter rate Limit
except tweepy.RateLimitError:
print("RateLimitError...waiting ~15 minutes to continue")
time.sleep(1001)
search_tweets(official_handle)
# Stop temporarily when getting a timeout or connection error
except (Timeout, ssl.SSLError, ReadTimeoutError,
ConnectionError) as exc:
print("Timeout/connection error...waiting ~15 minutes to continue")
time.sleep(1001)
search_tweets(official_handle)
# Stop temporarily when getting other errors
except tweepy.TweepError as e:
if 'Failed to send request:' in e.reason:
print("Time out error caught.")
time.sleep(1001)
search_tweets(official_handle)
elif 'Too Many Requests' in e.reason:
print("Too many requests, sleeping for 15 min")
time.sleep(1001)
search_tweets(official_handle)
else:
print(e)
print("Other error with this user...passing")
pass
return tweets
# Fetch last 20 tweets from
def user_tweets(official_handle):
tweet = api.user_timeline(official_handle)
return_list = []
for i in range(0, len(tweet)):
return_list.append({
"Name": tweet[i]._json['user']['name'],
"Handle": tweet[i]._json['user']['screen_name'],
"Text": tweet[i]._json['text'],
"Favorites": tweet[i]._json['favorite_count'],
"Retweets": tweet[i]._json['retweet_count']
})
return return_list
# Get list of news articles about a politician
def get_news(official_name):
# Create list for news sources
newsLines = []
# NewsAPI API call
url = ('https://newsapi.org/v2/everything?'
'apiKey=' + getNewsKey() + '&'
'qInTitle=\"' + official_name + '\"&'
'language=en&'
'sortBy=publishedAt&'
'pageSize=10')
response = requests.get(url).json()['articles']
# Format response by replacing any carriage returns and removing the brackets + ... at eol
for line in response:
if line['content'] is not None:
article = line['content'].replace("\r\n", '')
index = article.find('[')
if index != -1:
article = article[0:index - 3]
newsLines.append({"Title": line['title'],
"Content": article,
"Source": line['source']['name'],
"URL": line['url']})
return newsLines
# Get a 7 day timeline of news
def get_news_timeline(official_name):
timeline = []
for i in range(0, 7):
# Create list for news sources
newsLines = []
# NewsAPI API call
url = ('https://newsapi.org/v2/everything?'
'apiKey=' + getNewsKey() + '&'
'qInTitle=\"' + official_name + '\"&'
'language=en&'
'from=' + (
datetime.date.today() - datetime.timedelta(days=i)).isoformat() + '&' +
'to=' + (datetime.date.today() - datetime.timedelta(days=i)).isoformat() + '&' +
'sortBy=publishedAt&'
'pageSize=5')
response = requests.get(url).json()['articles']
# Format response by replacing any carriage returns and removing the brackets + ... at eol
for line in response:
if line['content'] is not None:
article = line['content'].replace("\r\n", '')
index = article.find('[')
if index != -1:
article = article[0:index - 3]
newsLines.append(article)
timeline.append(newsLines)
return timeline
# print(search_tweets_timeline("JoeBiden"))
# print(user_tweets("JoeBiden"))
# getStateOfficials("az")
# work("SenMcSallyAZ")
# print(get_news_timeline("Joe Biden"))
# print(get_official_info("Donald Trump"))
|
import pytest
import pathlib
import nipkg_assembler.create_default_package as nipm
def test_create_default_package():
nipm.main('tests/temp_package', 'nipkg_assembler/default_package', False)
assert pathlib.Path('tests/temp_package').exists()
|
# Generated by Django 2.1.12 on 2019-11-03 17:17
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, unique=True)),
('year', models.CharField(choices=[('B1', "Bachelor's 1"), ('B2', "Bachelor's 2"), ('B3', "Bachelor's 3"), ('B4', "Bachelor's 4"), ('M1', "Master's 1"), ('M2', "Master's 2"), ('D', 'PhD'), ('gr', 'Graduated'), ('ot', 'Other')], default='B1', max_length=2)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('course_class_code', models.CharField(max_length=20)),
('course_code', models.CharField(max_length=10)),
('level', models.CharField(max_length=100)),
('category', models.CharField(max_length=100)),
('eligible_year', models.CharField(max_length=50)),
('credits', models.IntegerField()),
('main_language', models.CharField(max_length=50)),
('school', models.CharField(max_length=100)),
('campus', models.CharField(max_length=50)),
('year', models.CharField(max_length=10)),
('term', models.CharField(max_length=50)),
('academic_disciplines', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=100), size=3)),
('instructors', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=100), size=43)),
('syllabus_urls', django.contrib.postgres.fields.ArrayField(base_field=models.URLField(blank=True), size=5)),
('sessions', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.CreateModel(
name='CourseReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('overall_rating', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])),
('text', models.CharField(max_length=5000)),
('anonymous', models.BooleanField(default=False)),
('datetime_created', models.DateTimeField(auto_now_add=True)),
('datetime_updated', models.DateTimeField(auto_now=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course_rater_app.Course')),
('reviewer', models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Lab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('professor', models.CharField(max_length=100)),
('topic', models.CharField(max_length=100)),
('website', models.URLField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='LabReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('overall_rating', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])),
('text', models.CharField(max_length=5000)),
('anonymous', models.BooleanField(default=False)),
('datetime_created', models.DateTimeField(auto_now_add=True)),
('datetime_updated', models.DateTimeField(auto_now=True)),
('lab', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course_rater_app.Lab')),
('reviewer', models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn import neighbors
from sklearn.ensemble import RandomForestClassifier
from xlrd import open_workbook
import sys
#vectorize the training set
def vectorize(begin,end):
context = []
j_list = []
for j in range(begin,end):
if j == 42 or j == 99: #these cases don't exist
continue
with open("files/id"+str(j)+".txt", 'r') as f:
text = f.read()
f.closed
soup = BeautifulSoup(text)
if len(soup.find_all('div', class_='msgBody')) == 0:
for i in range(len(soup.find_all('h2'))):
if 'ENFORCEMENT RESULT' in soup.find_all('h2')[i]: #result-convict
# if 'HOW CONDUCT WAS DISCOVERED' in soup.find_all('h2')[i]: #investigation initiated by company
next = True
n = soup.find_all('h2')[i]
#this retrieves all text before the next heading
instance = ''
while next == True:
sib = n.next_sibling
if sib.name == 'h2':
next = False
elif sib.name != 'br' and sib != '\n':
instance = instance + sib.lstrip()
n = sib
else:
n = sib
#if this case has already been accounted for, break out
if j in j_list:
break
else:
j_list.append(j)
context.append((instance.encode('utf-8')))
else:
continue
# print j_list
vectorizer = TfidfVectorizer(min_df=1,stop_words='english') #sklearn vectorizer
X = vectorizer.fit_transform(context)
return X.toarray(),vectorizer,j_list
def results(j_list,begin,end):
wb = open_workbook('trace.xlsx')
s = wb.sheet_by_index(0)
labels = []
i_list = []
for i in range(begin,end):
if len(j_list) != 0:
#make sure we have the vector for this case (since this is the training case)
if i not in j_list:
continue
for j in range(0,491):
if s.cell(j,0).value == 'https://www.traceinternational2.org/compendium/view.asp?id='+str(i):
result = s.cell(j,28).value #result-conviction
# result = s.cell(j,53).value #investigation initiated by company
if i not in i_list:
i_list.append(i)
#assign label values
if result == 'Yes':
l = 1
elif result == 'No':
l = 0
else:
l = 2
labels.append(l)
break
# print i_list
return labels,i_list
def train(j_list,i_list,vectors,labels):
# training the data with training vectors and labels
new_vectors = []
for j in range(len(j_list)):
if j_list[j] in i_list:
new_vectors.append(vectors[j])
# print len(new_vectors)
# print len(labels)
# clf = neighbors.KNeighborsClassifier()
# clf = svm.LinearSVC()
clf = RandomForestClassifier()
clf.fit(new_vectors,labels)
return clf
def predict_by_word(begin,end):
#used for result-conviction only
predictions = []
a = []
j_list = []
labels = []
for j in range(begin,end):
if j == 42:
continue
with open("files/id"+str(j)+".txt", 'r') as f:
text = f.read()
f.closed
soup = BeautifulSoup(text)
if len(soup.find_all('div', class_='msgBody')) == 0:
for i in range(len(soup.find_all('h2'))):
if 'ENFORCEMENT RESULT' in soup.find_all('h2')[i]:
next = True
n = soup.find_all('h2')[i]
instance = ''
inst = []
while next == True:
sib = n.next_sibling
if sib.name == 'h2':
next = False
elif sib.name != 'br' and sib != '\n':
instance = instance + sib.lstrip()
n = sib
else:
n = sib
if j in j_list:
break
else:
j_list.append(j)
else:
continue
# print instance
if 'convict' in instance or 'conviction' in instance:
labels.append(1)
else:
labels.append(0)
return labels, j_list
def predict(clf,vectorizer,begin,end):
#prediction based on classifier
predictions = []
a = []
j_list = []
labels = []
for j in range(begin,end):
if j == 42:
continue
with open("files/id"+str(j)+".txt", 'r') as f:
text = f.read()
f.closed
soup = BeautifulSoup(text)
if len(soup.find_all('div', class_='msgBody')) == 0:
for i in range(len(soup.find_all('h2'))):
if 'ENFORCEMENT RESULT' in soup.find_all('h2')[i]:
# if 'HOW CONDUCT WAS DISCOVERED' in soup.find_all('h2')[i]:
next = True
n = soup.find_all('h2')[i]
instance = ''
inst = []
while next == True:
sib = n.next_sibling
if sib.name == 'h2':
next = False
elif sib.name != 'br' and sib != '\n':
instance = instance + sib.lstrip()
n = sib
else:
n = sib
if j in j_list:
break
else:
j_list.append(j)
# print 'predictions', j
inst.append(instance.encode('utf-8'))
a = vectorizer.transform(inst).toarray()
label = clf.predict(a)
labels.append(label[0])
else:
continue
return labels,j_list
if __name__ == '__main__':
train_end = 300
test_end = 465
# predicted1,j_list1 = predict_by_word(train_end,test_end) #predict by word
vectors,vectorizer,j_list = vectorize(1,train_end)
labels,k_list = results(j_list,1,train_end)
# print len(vectors)
# print len(labels)
clf = train(j_list,k_list,vectors,labels)
predicted,j_list = predict(clf,vectorizer,train_end,test_end)
# print predicted
real,i_list = results(j_list,train_end,test_end)
# print real
#not all 'real' information was provided on the
new_p = []
for j in range(len(j_list)):
if j_list[j] in i_list:
new_p.append(predicted[j])
# print len(new_p)
# print len(real)
#predict by word
# new_p1 = []
# for j in range(len(j_list1)):
# if j_list1[j] in i_list:
# new_p1.append(predicted1[j])
#accuracy
incorrect = 0
for i in range(len(new_p)):
if new_p[i] != real[i]:
incorrect = incorrect + 1
#predict by word
# incorrect1 = 0
# for i in range(len(new_p1)):
# if new_p1[i] != real[i]:
# incorrect1 = incorrect1 + 1
# print "Test error, SVM:", 1 - float(incorrect)/len(new_p)
# print "Percent correct, predict by word:", 1 - (float(incorrect1)/len(new_p1))
print "Percent correct, Random Forest:", 1 - (float(incorrect)/len(new_p))
|
#!/usr/bin/env python
"""Archive Now for python"""
# usage: ./archiveNow.py -v mycluster -u myuser -d mydomain.net -j MyJob -r '2019-03-26 14:47:00' [ -k 5 ] [ -t S3 ] [ -f ]
# import pyhesity wrapper module
from pyhesity import *
from datetime import datetime, timedelta
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True) # cluster to connect to
parser.add_argument('-u', '--username', type=str, required=True) # username
parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local
parser.add_argument('-j', '--jobname', type=str, required=True) # job name
parser.add_argument('-r', '--rundate', type=str, default=None) # run date to archive in military format with 00 seconds
parser.add_argument('-k', '--keepfor', type=int, required=True) # (optional) will use policy retention if omitted
parser.add_argument('-t', '--target', type=str, required=True) # (optional) will use policy target if omitted
parser.add_argument('-f', '--fromtoday', action='store_true') # (optional) keepfor x days from today instead of from snapshot date
parser.add_argument('-l', '--listruns', action='store_true')
parser.add_argument('-n', '--newestrun', action='store_true')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
jobname = args.jobname
rundate = args.rundate
keepfor = args.keepfor
target = args.target
fromtoday = args.fromtoday
listruns = args.listruns
newestrun = args.newestrun
# authenticate
apiauth(vip, username, domain)
# find protection job
job = [job for job in api('get', 'protectionJobs') if job['name'].lower() == jobname.lower()]
if not job:
print("Job '%s' not found" % jobname)
exit()
else:
job = job[0]
daysToKeep = None
vault = [vault for vault in api('get', 'vaults') if vault['name'].lower() == target.lower()]
if len(vault) > 0:
vault = vault[0]
target = {
"vaultId": vault['id'],
"vaultName": vault['name'],
"vaultType": "kCloud"
}
else:
print('No archive target named %s' % target)
exit()
if keepfor:
daysToKeep = keepfor
# find requested run
runs = api('get', 'protectionRuns?jobId=%s' % job['id'])
foundRun = False
for run in runs:
# zero out seconds for rundate match
thisrundate = datetime.strptime(usecsToDate(run['copyRun'][0]['runStartTimeUsecs']), "%Y-%m-%d %H:%M:%S")
thisrundatebase = (thisrundate - timedelta(seconds=thisrundate.second)).strftime("%Y-%m-%d %H:%M:%S")
if listruns is True:
print(thisrundatebase)
else:
if rundate == thisrundatebase or newestrun:
thisrun = api('get', '/backupjobruns?allUnderHierarchy=true&exactMatchStartTimeUsecs=%s&excludeTasks=true&id=%s' % (run['backupRun']['stats']['startTimeUsecs'], run['jobId']))
jobUid = thisrun[0]['backupJobRuns']['protectionRuns'][0]['backupRun']['base']['jobUid']
foundRun = True
currentExpiry = None
for copyRun in run['copyRun']:
# resync existing archive run
if copyRun['target']['type'] == 'kArchival':
target = copyRun['target']['archivalTarget']
currentExpiry = copyRun.get('expiryTimeUsecs', 0)
# configure archive task
archiveTask = {
"jobRuns": [
{
"copyRunTargets": [
{
"archivalTarget": target,
"type": "kArchival"
}
],
"runStartTimeUsecs": run['copyRun'][0]['runStartTimeUsecs'],
"jobUid": {
"clusterId": jobUid['clusterId'],
"clusterIncarnationId": jobUid['clusterIncarnationId'],
"id": jobUid['objectId']
}
}
]
}
# if fromtoday is not set, calculate days to keep from snapshot date
if fromtoday is False:
daysToKeep = daysToKeep - dayDiff(dateToUsecs(datetime.now().strftime("%Y-%m-%d %H:%M:%S")), run['copyRun'][0]['runStartTimeUsecs'])
# if there's an existing archive and keepfor is specified adjust the retention
if keepfor is not None and currentExpiry != 0 and currentExpiry is not None:
if currentExpiry != 0 and currentExpiry is not None:
daysToKeep = daysToKeep + (dayDiff(run['copyRun'][0]['runStartTimeUsecs'], currentExpiry))
archiveTask['jobRuns'][0]['copyRunTargets'][0]['daysToKeep'] = int(daysToKeep)
# if the current archive was deleted, resync it
if currentExpiry == 0:
archiveTask['jobRuns'][0]['copyRunTargets'][0]['daysToKeep'] = int(daysToKeep)
# update run
if((daysToKeep > 0 and currentExpiry is None) or (daysToKeep != 0 and currentExpiry is not None)):
print('archiving snapshot from %s...' % usecsToDate(run['copyRun'][0]['runStartTimeUsecs']))
result = api('put', 'protectionRuns', archiveTask)
exit()
else:
print('Not archiving because expiry time would be in the past or unchanged')
# report if no run was found
if foundRun is False and listruns is not True:
print('Could not find a run with the date %s' % rundate)
|
# -*- coding: utf-8 -*-
from email.mime.text import MIMEText
from email import encoders
import email.header
import email.utils
import smtplib
def format_addr(s):
name, addr = email.utils.parseaddr(s)
return email.utils.formataddr((\
email.header.Header(name, 'utf-8').encode(), \
addr.encode('utf-8') if isinstance(addr, unicode) else addr))
mail_content = input('mail content:')
send_from_addr = input("from:")
password = input('password:')
smtp_server = input('SMTP server:')
send_to_addr = input('send to:')
msg = MIMEText(mail_content, "plain", 'utf-8')
msg['From'] = format_addr(u'Python爱好者 <%s>' % send_from_addr)
msg['To'] = format_addr(u'管理员 <%s>' % send_to_addr)
msg['Subject'] = email.header.Header(u'来自SMTP的问候……', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(send_from_addr, password)
server.sendmail(send_from_addr, [send_to_addr], msg.as_string())
server.quit()
|
"""
This is the X-Spider base class
"""
from celery import Task
from rlibs.base import XSession
from w3lib.url import canonicalize_url
from requests_futures.sessions import FuturesSession
class BaseSpider(Task):
"""
base spider class for all the spiders
"""
def __init__(self, **kwargs):
self.session = XSession()
def x_request(self, url, method='get', **kwargs):
if hasattr(self.session, method):
tmp = getattr(self.session, method)
try:
result = tmp(url, **kwargs)
except Exception as tmp:
print(tmp)
result = None
else:
result = None
return result
def handle_url(self, url):
return canonicalize_url(url)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 偏函数
# Python的functools模块提供了很多有用的功能,其中一个就是偏函数(Partial function)。要注意,这里的偏函数和数学意义上的偏函数不一样
print(int('12345'))
print(int('12345', base=8))
print(int('12345', 16))
print(int('11111', base=2))
def int2(x, base=2):
return int(x, base)
# functools.partial就是帮助我们创建一个偏函数的,不需要我们自己定义int2(),可以直接使用下面的代码创建一个新的函数int2
import functools
int2 = functools.partial(int, base=2)
print(int2('1000000'))
|
from openpyxl import Workbook
from openpyxl import load_workbook
from zlib import crc32
import sys
import glob
import logging
import xml.etree.ElementTree as ET
def GetCrc32(filename): # calculate crc32
with open(filename, 'rb') as f:
return crc32(f.read())
def strnset(str,ch,n): # string change
str = str[:n] + ch
return str
#log setting
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(filename='info.log', level=logging.DEBUG, format=LOG_FORMAT)
# if len(sys.argv) < 2:
# print('You must enter the file')
# exit(1)
# elif len(sys.argv) > 2:
# print('Only one file is permitted')
# exit(1)
#filename = sys.argv[1]
#search all files and store in an array
logging.info("Start search all files in Packages.")
listFile = []
listuxz = glob.glob("./*/*.uxz")
listbin = glob.glob("./*/*.bin")
listtgz = glob.glob("./*/*.tgz")
listexe = glob.glob("./*/*.exe")
listFile.extend(listuxz)
listFile.extend(listbin)
listFile.extend(listtgz)
listFile.extend(listexe)
logging.info("Finish searching.")
#list sort
logging.info("Sorting the list...")
sortFile = sorted(listFile)
#Create sheet for excel
wb = Workbook()
ws = wb.active
ws.title = "platform"
ws1 = wb.create_sheet("win2019")
ws2 = wb.create_sheet("win2016")
ws3 = wb.create_sheet("rhel7")
ws4 = wb.create_sheet("suse12")
#Check all packages by crc32
row = 1
for n in sortFile:
crcpkg = format(GetCrc32(n), 'x')
print('{:s} {:8} {:s}'.format( n,' crc32: ', crcpkg))
logging.info('{:s} {:8} {:s}'.format( n,' crc32: ', crcpkg))
tmpxml = strnset(n,".xml",-4)
tree = ET.parse(tmpxml)
# print(tree.getroot())
root = tree.getroot()
crcxml = root.findall(".//*[@NAME='crc']/VALUE")
for tmp in crcxml:
print(tmp.text)
ws.cell(column=4, row=row, value=tmp.text)
ws.cell(column=1, row=row, value=n)
ws.cell(column=2, row=row, value=crcpkg)
ws.cell(column=3, row=row, value=tmpxml)
row = row + 1
wb.save("sample.xlsx")
logging.info("Mission Completed!")
# result = 'crc32.txt'
# f = open ('./' + result,'w')
# for n in sortFile:
# str = n;
# crc = format(getCrc32(n), 'x')
# print('{:s} {:8} {:x}'.format( n,' crc32: ', getCrc32(n)))
# f.write(str + ' page_crc32: ' + crc + '\n')
# f.close()
|
# -*- coding: utf-8 -*-
def test_login(app, loggedout):
app.session.login("administrator", "root")
assert app.session.logged_username == "administrator"
|
from datetime import datetime
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from app import login
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
facility = db.Column(db.String(64), index=True, unique= False)
is_integrator = db.Column(db.Boolean, default = False)
is_facility_admin = db.Column(db.Boolean, default=False)
company = db.Column(db.String(64), index=True, unique=False)
fname = db.Column(db.String(28), index=True, unique=False)
lname = db.Column(db.String(28), index=True, unique=False)
requests = db.relationship('Request', backref='author', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
class Request(db.Model):
id = db.Column(db.Integer, primary_key=True)
facility = db.Column(db.String(64), index = True, unique = False)
#is_integrator_request = db.Column(db.Boolean, db.ForeignKey('user.is_integrator'))
#company = db.Column(db.String(64), db.ForeignKey('user.company'), index = True, unique = False)
user_id = db.Column(db.String(64), db.ForeignKey('user.id'), index= True, unique = True)
is_approved= db.Column(db.Boolean, default = False)
status = db.Column(db.String(16), index=True, unique= False)
beam = db.Column(db.String(16), index=True, unique = False)
ion = db.Column(db.Integer, index = True, unique = False)
energy = db.Column(db.Integer, index = True)
fluence = db.Column(db.Integer, index = True)
flux = db.Column(db.Integer, index = True)
LET = db.Column(db.Integer, index = True)
hours = db.Column(db.Integer, index = True)
beam_size = db.Column(db.Integer, index = True)
#range = db.Column()
scheduled_start = db.Column(db.DateTime)
scheduled_end = db.Column(db.DateTime)
def __repr__(self):
return '<Request {}>'.format(self.id)
'''
class NSRL(db.Model):
#TODO add timeboard as base_schedule
class LBNL(db.Model):
#TODO add timeboard as base_schedule
class TAMU(db.Model):
#TODO add timeboard as base_schedule
class Berkley(db.Model):
#TODO add timeboard as base_schedule
class scheduleLBNL(db.Model):
class scheduleNSRL(db.Model):
class scheduleTAMU(db.Model):
class scheduleBerkley(db.Model):
'''
|
import pandas as pd
import numpy as np
from flask import Flask, render_template,request
import pickle
import yfinance as yf
# Create an instance of Flask
app = Flask(__name__)
model = pickle.load(open('app/model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
# Route that will trigger the predict function
@app.route('/predict',methods=['POST'])
def predict():
#For rendering results on HTML GUI
ticker = request.form['ticker']
processed_ticker= ticker.upper()
# Fetching data from Yahoo Finance
data = yf.download(processed_ticker)
forecast_out = int(request.form['forecast_out'])
data['Predictions'] = data['Adj Close'].shift(-forecast_out)
data = data[['Adj Close', 'Predictions']]
X = data.drop(['Predictions'], axis=1)
X = X[:-forecast_out]
y = data['Predictions']
y = y[:-forecast_out]
prediction = model.predict(X)
prediction = 'The price of {} will move from ${} to ${} in {} days. This result is based on a Linear Regression model with an R-squared (R2): 0.9521'.format(processed_ticker, y[-1].round(2), prediction[-1].round(2), forecast_out)
return render_template('index.html',prediction=prediction)
@app.route('/charts')
def charts():
return render_template('charts.html')
@app.route('/charts2')
def charts2():
return render_template('charts2.html')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/tables')
def tables():
return render_template('tables.html')
if __name__ == "__main__":
app.run(debug=True) |
import sys
class Solution:
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
value1 = sys.maxsize
value2 = sys.maxsize
for i in range(len(nums)):
if nums[i] <= value1:
value1 = nums[i]
elif nums[i] <= value2:
value2 = nums[i]
else:
return True
return False
solution = Solution()
# print(solution.increasingTriplet([1,2,3,4,5]))
# print(solution.increasingTriplet([1,2]))
# print(solution.increasingTriplet([1,2,3]))
# print(solution.increasingTriplet([3,2,1]))
# print(solution.increasingTriplet([1,1,1]))
# print(solution.increasingTriplet([0,0,1,2,3]))
# print(solution.increasingTriplet([4, 2, 3, 3, 1, 4, 3]))
# print(solution.increasingTriplet([0,2,3,0,1,4,3]))
print(solution.increasingTriplet([9, 8, 7, 3, 4, 5, 0, 1, 0]))
print(solution.increasingTriplet([1, 2, 2, 2, 2, 2, 3]))
print(solution.increasingTriplet([9, 1, 8, 2, 7, 3]))
print(solution.increasingTriplet([9, 8, 1]))
print(solution.increasingTriplet([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]))
|
import matplotlib.pyplot as plt
import numpy as np
import PIL
from PIL import Image
import pandas as pd
#(0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral)
kaggle_csv = pd.read_csv('fer2013.csv', header = None)
print (kaggle_csv)
kgc = []
for i in range(len(kaggle_csv[1])):
if kaggle_csv[0][i] == 0:
temp = kaggle_csv[1][i]
temp = [int(j) for j in temp.split()]
temp = np.array(temp, dtype = np.int32)
temp = temp.reshape(48,48,1)
kgc.append(temp)
img = kgc[i].reshape(48,48)
img = img.astype(np.uint8)
result = Image.fromarray(img)
result.save('./0/'+str(i)+".jpg")
elif kaggle_csv[0][i] == 1:
temp = kaggle_csv[1][i]
temp = [int(j) for j in temp.split()]
temp = np.array(temp, dtype = np.int32)
temp = temp.reshape(48,48,1)
kgc.append(temp)
img = kgc[i].reshape(48,48)
img = img.astype(np.uint8)
result = Image.fromarray(img)
result.save("./1/"+str(i)+".jpg")
elif kaggle_csv[0][i] == 2:
temp = kaggle_csv[1][i]
temp = [int(j) for j in temp.split()]
temp = np.array(temp, dtype = np.int32)
temp = temp.reshape(48,48,1)
kgc.append(temp)
img = kgc[i].reshape(48,48)
img = img.astype(np.uint8)
result = Image.fromarray(img)
result.save("./2/"+str(i)+".jpg")
elif kaggle_csv[0][i] == 3:
temp = kaggle_csv[1][i]
temp = [int(j) for j in temp.split()]
temp = np.array(temp, dtype = np.int32)
temp = temp.reshape(48,48,1)
kgc.append(temp)
img = kgc[i].reshape(48,48)
img = img.astype(np.uint8)
result = Image.fromarray(img)
result.save("./3/"+str(i)+".jpg")
elif kaggle_csv[0][i] == 4:
temp = kaggle_csv[1][i]
temp = [int(j) for j in temp.split()]
temp = np.array(temp, dtype = np.int32)
temp = temp.reshape(48,48,1)
kgc.append(temp)
img = kgc[i].reshape(48,48)
img = img.astype(np.uint8)
result = Image.fromarray(img)
result.save("./4/"+str(i)+".jpg")
elif kaggle_csv[0][i] == 5:
temp = kaggle_csv[1][i]
temp = [int(j) for j in temp.split()]
temp = np.array(temp, dtype = np.int32)
temp = temp.reshape(48,48,1)
kgc.append(temp)
img = kgc[i].reshape(48,48)
img = img.astype(np.uint8)
result = Image.fromarray(img)
result.save("./5/"+str(i)+".jpg")
elif kaggle_csv[0][i] == 6:
temp = kaggle_csv[1][i]
temp = [int(j) for j in temp.split()]
temp = np.array(temp, dtype = np.int32)
temp = temp.reshape(48,48,1)
kgc.append(temp)
img = kgc[i].reshape(48,48)
img = img.astype(np.uint8)
result = Image.fromarray(img)
result.save("./6/"+str(i)+".jpg")
else:
print("error")
|
from bibliopixel.animation import BaseMatrixAnim
from bibliopixel import log
import numpy as np
import cv2
import os
grab = None
if os.name == 'nt':
try:
from desktopmagic.screengrab_win32 import getRectAsImage, getScreenAsImage
log.debug("Using desktopmagic module")
def nt_grab(bbox=None):
if bbox is None:
img = getScreenAsImage()
else:
img = getRectAsImage(bbox)
return img
grab = nt_grab
except:
pass
if grab is None:
try:
from pil import ImageGrab
log.info("Using PIL ImageGrab module")
except:
try:
import pyscreenshot as ImageGrab
log.info("Using pyscreenshot module")
except:
raise Exception("Unable to find any available screenshot option.")
grab = ImageGrab.grab
class ScreenGrab(BaseMatrixAnim):
def __init__(self, led, bbox=None, mirror=True, offset=0.0, crop=True):
super(ScreenGrab, self).__init__(led)
if not sum(bbox):
bbox = None
self.bbox = bbox
self.crop = crop
self.mirror = mirror
self.image = frame = self._capFrame()
self._iw = frame.shape[1]
self._ih = frame.shape[0]
self.width = led.width
self.height = led.height
# self._scale = (self.height*1.0/self._ih)
self._cropY = 0
self._cropX = 0
xoffset = yoffset = offset
if xoffset > 1.0:
xoffset = 1.0
elif xoffset < -1.0:
xoffset = -1.0
if yoffset > 1.0:
yoffset = 1.0
elif yoffset < -1.0:
yoffset = -1.0
xoffset += 1.0
yoffset += 1.0
if self.height >= self.width:
self._cropX = (self._iw - int(self.width /
(self.height / float(self._ih)))) / 2
if self._ih >= self._iw:
scale = (self.height * 1.0) / self._ih
else:
scale = (self.width * 1.0) / self._iw
else:
self._cropY = (self._ih - int(self.height /
(self.width / float(self._iw)))) / 2
if self._ih >= self._iw:
scale = (self.width * 1.0) / self._iw
else:
scale = (self.height * 1.0) / self._ih
scaleW = int(self.width / scale)
scaleH = int(self.height / scale)
padTB = (scaleH - self._ih) / 2
padLR = (scaleW - self._iw) / 2
padYoff = int(round(padTB * yoffset)) - padTB
padXoff = int(round(padLR * xoffset)) - padLR
self._pad = (padTB + padYoff, padTB - padYoff,
padLR + padXoff, padLR - padXoff)
self.xoff = int(round(self._cropX * xoffset)) - self._cropX
self.yoff = int(round(self._cropY * yoffset)) - self._cropY
def _capFrame(self):
img = grab(self.bbox)
return np.array(img)
def step(self, amt=1):
image = self._capFrame()
if self.crop:
image = image[self._cropY + self.yoff:self._ih - self._cropY +
self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff]
else:
t, b, l, r = self._pad
image = cv2.copyMakeBorder(
image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0])
resized = cv2.resize(image, (self.width, self.height),
interpolation=cv2.INTER_LINEAR)
if self.mirror:
resized = cv2.flip(resized, 1)
for y in range(self.height):
for x in range(self.width):
self._led.set(x, y, tuple(resized[y, x][0:3]))
MANIFEST = [
{
"class": ScreenGrab,
"controller": "matrix",
"desc": None,
"display": "ScreenGrab",
"id": "ScreenGrab",
"params": [
{
"default": (0, 0, 0, 0),
"help": "Bounding box of screen area to capture. Leave all 0 to capture whole screen.",
"id": "bbox",
"label": "Bounding Box",
"type": "multi_tuple",
"controls": [{
"label": "Top-Left X",
"type": "int",
"default": 0
}, {
"label": "Top-Left Y",
"type": "int",
"default": 0
}, {
"label": "Bottom-Left X",
"type": "int",
"default": 0
}, {
"label": "Bottom-Left Y",
"type": "int",
"default": 0
}]
},
{
"default": False,
"help": "True crops image to matrix aspect. False resizes input to fit.",
"id": "crop",
"label": "Crop",
"type": "bool"
},
{
"default": False,
"help": "Mirror output",
"id": "mirror",
"label": "Mirror",
"type": "bool"
}
],
"type": "animation"
}
]
|
import numpy as np
import pandas as pd
def sample_X(n,p):
return np.random.normal(loc=0, scale=1, size=(n,p))
def sample_epsilon(n):
return np.random.normal(loc=0, scale=1, size=n)
def sample_Z(n):
return np.random.binomial(n=1,p=1/3,size=n)
def sample_Q(n, omega, epsilon):
return np.random.binomial(n=1,p=1/(1+np.exp(-omega*epsilon)),size=n)
def get_W(Z,Q):
return Z * Q
def get_tau(kappa, additive, p, X):
assert kappa <= p, "Kappa assumed to be smaller than p." +\
f" You passed kappa={kappa} and p={p}".format(kappa,p)
if additive:
tau = np.sum(np.clip(X[:,0:kappa],a_min=None, a_max=0),axis=1)
else:
tau = np.clip(np.sum(X[:,0:kappa],axis=1),a_min=None, a_max=0)
return tau
def get_mu(kappa, additive, p, X, nuisance_terms = [5,6], nuisance_scale=3):
"""
nuisance_terms: 1-indexed to be in line with the GRF paper.
"""
nuisance_terms = np.array(nuisance_terms) - 1
assert nuisance_terms.min() >= kappa, "Nuisance terms overlap with effect terms."
if additive:
mu = np.sum(np.clip(X[:,nuisance_terms],a_min=None, a_max=0),axis=1)
else:
mu = np.clip(np.sum(X[:,nuisance_terms],axis=1),a_min=None, a_max=0)
return nuisance_scale*mu
def get_Y(mu, W, tau, epsilon):
return mu + (W - 1/2)*tau + epsilon
def get_sample(p, n, omega, kappa, additive, nuisance, seed):
np.random.seed(seed)
X = sample_X(n,p)
epsilon = sample_epsilon(n)
Q = sample_Q(n,omega,epsilon)
Z = sample_Z(n)
W = get_W(Z,Q)
tau = get_tau(kappa, additive, p, X)
mu = get_mu(kappa, additive, p, X)
Y = get_Y(mu, W, tau, epsilon)
return X, Y, W, Z, tau |
# Generated by Django 2.0.5 on 2018-08-10 10:52
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('weather', '0002_auto_20180810_0326'),
]
operations = [
migrations.AddField(
model_name='city',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from ImproveDeepNN.TensorFlowTutorial.tf_utils import *
np.random.seed(1)
#0 example
'''
y_hat = tf.constant(36, name='y_hat')
y = tf.constant(39, name='y')
loss = tf.Variable((y - y_hat) ** 2, name='loss')
init = tf.global_variables_initializer()
x = tf.placeholder(tf.int64, name='x')
with tf.Session() as session:
session.run(init)
print(session.run(loss))
print(session.run(2 * x, feed_dict={x:3}))
session.close()
'''
'''
Writing and running programs in TensorFlow has the following steps:
Create Tensors (variables) that are not yet executed/evaluated.
Write operations between those Tensors.
Initialize your Tensors.
Create a Session.
Run the Session. This will run the operations you’d written above.
'''
#1 linear function
'''
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
X = tf.constant(np.random.randn(3, 1), name='X')
W = tf.constant(np.random.randn(4, 3), name='W')
b = tf.constant(np.random.randn(4, 1), name='b')
Y = tf.add(tf.matmul(W, X), b)
sess = tf.Session()
result = sess.run(Y)
sess.close()
return result
print("result = " + str(linear_function()))
'''
#2 sigmoid computing
'''
method 1:
sess = tf.Session()
# Run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
sess.close() # Close the session
method 2:
with tf.Session() as sess:
# run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
# This takes care of closing the session for you :)
'''
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
x = tf.placeholder(tf.float32, name='x')
sigmoid = tf.sigmoid(x)
with tf.Session() as sess:
result = sess.run(sigmoid, feed_dict={x: z})
return result
'''
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
'''
#3 computing the cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, out of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
z = tf.placeholder(tf.float32, name='z')
y = tf.placeholder(tf.float32, name='y')
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
with tf.Session() as sess:
cost = sess.run(cost, feed_dict={z: logits, y: labels})
return cost
'''
logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
'''
#4 using one hot encodings
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
C = tf.constant(value = C, name='C')
one_hot_matrix = tf.one_hot(labels, C, axis=0)
with tf.Session() as sess:
one_hot = sess.run(one_hot_matrix)
return one_hot
'''
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = " + str(one_hot))
'''
#5 initialize with zeros and ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
ones = tf.ones(shape)
with tf.Session() as sess:
ones = sess.run(ones)
return ones
'''
print ("ones = " + str(ones([3])))
''' |
def setup():
size(500, 500)
smooth()
background(255)
noStroke()
noLoop()
def draw():
for i in range(0, 10, 1):
for k in range(5):
fill(i*20)
rect(i*40+50, 75+40*(2*k-1), 35, 35)
fill(160-15*i)
rect(i*40+50, 75+40*2*k, 35, 35)
|
import pandas as pd
import numpy as np
import tensorflow as tf
csv = pd.read_csv('bmi.csv')
# print(csv.head())
# 값 fat normal thin / 정답*label
# 학습하기 위한 Label의 종류 3가지를 one-hot Encoding : 이진화 하는거.
# 가장 큰 키와 가장 작은키
# print(csv['height'].max())
# print(csv['height'].min())
# 가장 큰 몸무게와 작은 몸무게
# print(csv['weight'].max())
# print(csv['weight'].min())
# 기계학습의 범위가 커서 feature들의 범위를 줄인다 이것을 정규화 라고 한다.
# 값의 범위를 축소하여 0과 1사이에 값들로 만든다
csv['height'] = csv['height'] / 200
csv['weight'] = csv['weight'] / 100
# 가장 큰 키와 가장 작은키
# print(csv['height'].max(),csv['height'].min())
# 가장 큰 몸무게와 작은 몸무게
# print(csv['weight'].max(),csv['weight'].min())
# 답의 종류에 따른 one hot encoding 값을 정해요
bcalss = {"thin":[1,0,0],"normal":[0,1,0],"fat":[0,0,1]}
csv['label_fat'] = csv['label'].apply(lambda x: np.array(bcalss[x]))
print(csv.head())
# 원본 데이터의 label에 따라 one hot Encoding 테이블을 생성하여 새로운 칼럼 label_pat를 추가해 봅니다.
# 갖고있는 데이터를 모두 훈련에 참여시키면 훈련된 데이터만 잘 알아 맞추,, 새로운 데이터에 대해서는 정확도가 떠렁짐 = overfit
# 기준이 애매하다... 보통 갖고 있는 데이터에 70 80프로를 훈련에 참여시키고 나머지에 데이터로 검증을 해야한다.
# 전체 데이터 수는 2만여건으로 이중에 3분의2를 훈련데이토 3분의1을 검증데이터로 사용하려한다.
# 15000번째 인덱스부터 2만번 까지의 데이터를 뽑아 검증데이토ㅗ 담아요
test_csv = csv[15000:20000]
# 검증을 위한 데이터 test_csv로 문제와 답을 나눈다.
test_pat = test_csv[['weight','height']]
test_ans = list(test_csv['label_fat'])
# print(test_pat.head())
# print(test_ans.head())
# 훈련을 시키기 위한 문제를 위한 placeholder 를 만든다.
x = tf.placeholder(tf.float32,[None,2])
# 훈련을 시키기 위한 답 placeholder를 만든다.
y_ = tf.placeholder(tf.float32,[None,3])
# 가중치를 위한 배열을 만들어요.
# [feature(키,무게)의 수, 답(label(fat / normal / thin)의 수]
# 변수선언
W = tf.Variable(tf.zeros([2,3])) # 가중치
# 답의 수가 3가지, 즉 fat thin normal 각각의 feature의 각각 weright을 적용한 thin/fat/normal이 되려면 얼마를 넘어야 한다.
# 각각으ㅔ 대한 기준치 (임계치)가 3개의 값이 필요하다.
# 일단 0으로 채워두면 tensor가 학승을 하면서 이것을 알맞은 값을 셋팅응ㄹ 합니다.
b = tf.Variable(tf.zeros([3])) # 편향 바이어스의 값의 수는 답을 수와 동일하게 한다.
# 텐서가 제공하는 기계학습을 위한 softmax 객체를 생성한다. ==> 모델을 만든다.
# softmax 회귀 정의 matmul.. y = wx + b 의 함수를 만든다,...?
y = tf.nn.softmax(tf.matmul(x,W)+b) # 경사하강법...? # c
# 훈련된 결과와 진짜 답과의 거리를 가능하면 작게 만들기 위한 객체를 생성한다.
# 진짜 답과 예측한 답의 합을 담느다.(잔차의 합)
cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) # B
# 그 잔차의 합의 최소화 되게 해주는 개체를 생성한다.
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(cross_entropy) # A
# 이 객체를 텐서를 이용하여 실행한다. 이수식에는 2개의 플레이스홀더가 있다.
# A의 수식에 사용된 cross entropy는 B이며 B의 cross _entrpy는 y_와ㅛfmf vhgkagksek.
# y_는 진짜 답을 담는 플레이스 홀더이다.
# y는 훈려을 위한 식이며 즉 훈련된 결과과 담기는 변수. 현재 식에 문제가 담길 placeholder x가 포함된다.
# tensor에서 실행시킬때 y_와 x에 해당하는 값을 설정해야 한다.
# 훈련시키는 과정에서 정확도를 확인하기 위한 수식
# 예측한 답과 진짜 답을 비교하여 predict에 담는다.
predict = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
# 예측 답과 진짜답을 비교한 predict는 boolean 배열을 실수형으로 변환하여 평균값을 계산.
accuracy = tf.reduce_mean(tf.cast(predict,tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# 원본 데이터 2만개 중에 검증용 데이터 15000~20000건을 제외ㅘㄴ 모든 데이터를 훈련시킨다.
# 15000개를 한번에 훈련 시키지 않고 100개씩 끊어서 훈려한데,, why?
# i는 0,100,200~ 14900
# i번째 부터 100개씪 봅아온다. rows에 담는다.
for step in range(3500):
i = (step * 100) % 14000
rows = csv[1+i : 1+i+100]
x_pat = rows[['weight','height']]
y_ans = list(rows['label_fat'])
fd = {x:x_pat, y_:y_ans}
sess.run(train,feed_dict=fd)
if step % 500 == 0:
cre = sess.run(cross_entropy,feed_dict=fd)
acc = sess.run(accuracy, feed_dict={x:test_pat,y_:test_ans})
print("step= ",step,"cre",cre,"acc=",acc)
acc = sess.run(accuracy,feed_dict={x:test_pat, y_:test_ans})
print("정답률 =", acc)
# for step in range(0,14900,100):
# print(step)
sess.close()
|
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import sys
import click
from typing import List
import nbformat
import os
from nbconvert import RSTExporter
from nbconvert.preprocessors import ExecutePreprocessor
exporter = RSTExporter()
ep = ExecutePreprocessor(timeout=600, kernel_name="python3", store_widget_state=True)
def open_nb(name: str, src: str) -> nbformat:
"""
Open notebook file
:param name: name of notebook to open
:param src: source directory
:return: notebook object
"""
print("Reading...", end=" ")
nb = nbformat.read(
"{src}/{name}/{name}.ipynb".format(name=name, src=src), as_version=4
)
print("OK", end=" ")
return nb
def execute(nb: nbformat, name: str, src: str) -> nbformat:
"""
Execute notebook and store widget state.
:param nb: notebook object to execute
:param name: notebook name (for setup directory context purpose)
:param src: notebook source directory (for setup context)
:return: notebook object with computed and stored output widget state
"""
print("Executing...", end=" ")
ep.preprocess(nb, {"metadata": {"path": "%s/%s/" % (src, name)}})
print("OK", end=" ")
return nb
def copy_image(name: str, export: str, src: str):
"""
Copy images present next to notebook file to exported folder.
:param name: notebook name
:param export: export directory
:param src: source directory
:return: None
"""
src = "%s/%s" % (src, name)
dest = "%s/%s" % (export, name)
images = [f for f in os.listdir(src) if f.split(".")[-1] in ["png"]]
for img in images:
os.rename("%s/%s" % (src, img), "%s/%s" % (dest, img))
def to_export(nb: nbformat, name: str, export: str):
"""
Export notebook into HTML format.
:param nb: notebook with result state
:param name: notebook name
:param export: directory to export
:return: None
"""
print("Exporting...", end=" ")
rst, _ = exporter.from_notebook_node(nb)
path = "%s/%s" % (export, name)
if not os.path.exists(path):
os.makedirs(path)
with open("%s/%s.rst" % (path, name), "w") as f:
f.write(rst)
print("OK", end=" ")
def list_notebook(src: str) -> List[str]:
"""
List available notebook in directory.
:return:
"""
dirs = os.listdir(src)
return [
d
for d in dirs
if os.path.isfile("{src}/{name}/{name}.ipynb".format(name=d, src=src))
]
@click.command("Check and export notebooks")
@click.option("--src", nargs=1, help="Notebook directory")
@click.option("--check", nargs=1, help="check notebook according to result file given")
@click.option("--export", nargs=1, help="export notebooks to directory given")
def main(src: str, check: str, export: str):
for name in list_notebook(src):
print("{:30}".format(name), ":", end="")
nb = open_nb(name, src)
nb = execute(nb, name, src)
if check:
pass # Implement check
if export:
to_export(nb, name, export)
copy_image(name, export, src)
print("")
if __name__ == "__main__":
main()
|
"""
Author(s): Tushar Sharma <tushar.sharma@ivycomptech.com>
Contains class UserAuth for operations relate to user management
and access control
"""
from gms_services.utils.LoadQuery import LoadQuery
from gms_services.utils.Database import Database
class UserAuth:
""" Class for user Authentication related stuff
"""
def __init__(self):
""" Constructor Method
Insatance Variables
load_query_obj for getting the query string
db_obj to execute the query
"""
self.load_query_obj = LoadQuery()
self.db_obj = Database()
def is_user_logged_in(self, sesskey):
""" Check if user is already logged in or not
:param sesskey: str, session key for already maintained session
:return: `True` if logged in else `False`
:rtype: bool
"""
user_logged_in_query = \
self.load_query_obj.get_query("DQL.check_login_query",
{"<SESSKEY>": sesskey})
data = self.db_obj.get_result(user_logged_in_query)
if data:
return True
else:
return False
|
from flask import Flask, request, render_template, send_file, redirect
from scrapper import get_stackoverflow, get_wwr, get_remote
import csv
"""
These are the URLs that will give you remote jobs for the word 'python'
https://stackoverflow.com/jobs?r=true&q=python
https://weworkremotely.com/remote-jobs/search?term=python
https://remoteok.io/remote-dev+python-jobs
Good luck!
"""
app = Flask("Final Assignment")
db = {}
@app.route('/')
def main():
return render_template('main.html')
@app.route('/search')
def search():
term = request.args.get('term').lower()
if not term:
return redirect("/")
fromDb = db.get(term)
if fromDb:
so_list = fromDb
else:
so_list = get_stackoverflow(term)
wwr_list = get_wwr(term)
remote_list = get_remote(term)
db[term] = so_list+wwr_list+remote_list
return render_template('search.html',
term=term,
result=db[term],
count=len(db[term])
)
@app.route('/export')
def export():
term = request.args.get('term').lower()
fromDb = db.get(term)
if not fromDb:
return redirect("/")
with open(f"{term}.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(["TITLE", "COMPANY", "LINK"])
for row in fromDb:
writer.writerow([
row.get("title"),
row.get("company"),
row.get("href")
])
return send_file(
f"{term}.csv",
mimetype="application/x-csv",
attachment_filename= f"{term}.csv",
as_attachment=True
)
app.run(host="0.0.0.0") |
from graphics import *
class Button:
"""A button is a labeled rectangle in a window.
It is enabled or disabled with the activate()
and deactivate() methods. The clicked(pt) method
returns True if and only if the button is enabled and pt is inside it."""
def __init__(self, win, center, width, height, label):
## as you read through this, ask yourself: what are the instance variables here?
## it would be useful to add comments describing what some of these variables are for...
""" Creates a rectangular button, eg:
qb = Button(myWin, centerPoint, width, height, 'Quit') """
w,h = width/2.0, height/2.0
x,y = center.getX(), center.getY()
#x-vars to determine button top left and bottom right corners
self.xmax, self.xmin = x+w, x-w
#y-vars to determine button top left and bottom right corners
self.ymax, self.ymin = y+h, y-h
#top left corner
p1 = Point(self.xmin, self.ymin)
#top right corner
p2 = Point(self.xmax, self.ymax)
#draw the button
self.rect = Rectangle(p1,p2)
self.rect.setFill('lightgray')
self.rect.draw(win)
#label the button
self.label = Text(center, label)
self.label.draw(win)
self.activate() #sets button to 'active', colors text, bold outline color
def getLabel(self):
"""Returns the label string of this button."""
return self.label.getText()
def activate(self):
"""Sets this button to 'active'."""
self.label.setFill('black') #color the text "black"
self.rect.setWidth(2) #set the outline to look bolder
self.active = True #set the boolean variable that tracks "active"-ness to True
def deactivate(self):
"""Sets this button to 'inactive'."""
self.active = False
self.label.setFill('darkgray')
self.rect.setWidth(1)
def isClicked(self, pt):
"""Returns true if button active and Point pt is inside"""
if self.active and (pt.getX() <= self.xmax and pt.getX() >= self.xmin
and pt.getY() <= self.ymax and pt.getY() >= self.ymin):
return True
else:
return False
if __name__ == "__main__":
main()
|
__author__ = 'tk'
import random
def merge(test1,test2):
i , j , k = 0 , 0 , 0
arr = []
lens1 = len(test1)
lens2 = len(test2)
while i<lens1 and j<lens2:
if test1[i] < test2[j]:
arr.append(test1[i])
i = i+1
else:
arr.append(test2[j])
j = j +1
if i==lens1:
arr.extend(test2[j::])
else:
arr.extend(test1[i::])
return arr
def mergesort(my_list):
if len(my_list) > 1:
blist = my_list[0:len(my_list)/2:1]
clist = my_list[len(my_list)/2::1]
bsorted = mergesort(blist)
csorted = mergesort(clist)
return merge(bsorted,csorted)
else:
return my_list
if __name__ == "__main__":
tk_list = random.sample(xrange(1000),10)
print "my_list" + str(tk_list)
print mergesort(tk_list)
print sorted(tk_list)
|
from dxtbx.model.experiment_list import ExperimentListFactory
from cctbx import miller
import cPickle as pickle
import numpy as np
import sys, time, argparse, os
"""
Extract indexing matrices and reflection information from DIALS indexing output so that
contents are accessible without libtbx.python; also, predict all observed positions to
the specified resolution. Contents are saved to matrix_info.pickle, indexed_info.pickle
and predicted_info.pickle in given path.
"""
def parse_commandline():
"""
Parse command line input.
"""
parser = argparse.ArgumentParser(description='Convert DIALS indexing output to non-libtbx accessible format.')
parser.add_argument('-p','--path', help='Directory with output of DIALS indexing', required=True)
parser.add_argument('-i','--info', help='Dictionary that specifies mag, res, pixel size, and shape', required=True)
return vars(parser.parse_args())
def extract_matrices(crystal, savepath):
"""
Extract orientation matrices and save to pickle file.
"""
matrices = dict()
matrices['A'] = np.array(crystal.get_A()).reshape(3,3)
matrices['U'] = np.array(crystal.get_U()).reshape(3,3)
matrices['B'] = np.array(crystal.get_B()).reshape(3,3)
with open(os.path.join(savepath, "matrix_info.pickle"), "wb") as handle:
pickle.dump(matrices, handle)
return matrices
def find_duplicates(millers, qvecs, A_matrix):
"""
Find duplicate hkl entries and record the entry (noted by value of 1) whose associated
qvector is more distant from the calculated qvector.
"""
from collections import Counter
# convert from np.array format to list of tuples
millers_as_tuple = list()
for hkl in millers:
millers_as_tuple.append(tuple(hkl))
# identify duplicate hkl
counts = Counter(millers_as_tuple)
suspect_idx = np.where(np.array(counts.values())!=1)[0]
# track down indices of duplicate hkl that don't match calculated qvector
discard_idx = list()
for idx in suspect_idx:
h,k,l = counts.keys()[idx]
print "Duplicate found for Miller (%i,%i,%i)" %(h,k,l)
mult_idx = np.where((millers[:,0]==h) & (millers[:,1]==k) & (millers[:,2]==l))[0]
q_calc = np.inner(A_matrix, np.array([h,k,l])).T
q_recorded = qvecs[mult_idx]
delta = np.sum(np.abs(q_calc - q_recorded), axis=1)
discard_idx.append(mult_idx[np.where(delta!=delta.min())[0]][0])
# convert to np.array of 1s (valid) and 0 (for discard)
discard_idx = np.array(discard_idx)
discard = np.zeros(millers.shape[0])
if len(discard_idx) > 0:
discard[discard_idx]=1
return discard
def extract_rlp_info(indexed, crystal, A_matrix, savepath):
"""
Extract Miller indices, resolution, and positional information of indexed reflections.
"""
idx_info = dict()
hkl = indexed.select(indexed['miller_index']!=(0,0,0))['miller_index']
idx_info['res'] = np.array(crystal.get_unit_cell().d(hkl))
idx_info['hkl'] = np.array(hkl)
# extract info from indexed object and remove unindexed Miller indices
for key,tag in zip(['I', 'sigI', 'xyz', 'qvec'], ['intensity.sum.value', 'intensity.sum.variance', 'xyzobs.px.value', 'rlp']):
idx_info[key] = np.array(indexed.select(indexed['miller_index']!=(0,0,0))[tag])
# remove duplicate entries from each data type if any exist
discard = find_duplicates(idx_info['hkl'], idx_info['qvec'], A_matrix)
if np.sum(discard) > 0:
for key in idx_info.keys():
idx_info[key] = idx_info[key][discard==0]
# dump to pickle file
with open(os.path.join(savepath, "indexed_info.pickle"), "wb") as handle:
pickle.dump(idx_info, handle)
return idx_info
def missing_wedge_mask(angle, shape):
"""
Generate a volume of the predicted missing wedge region based on the tomogram's
shape and maximum tilt angle. A value of zero corresponds to pixels that belong
to the missing wedge.
"""
# determine the slope of missing wedge plane
rise, run = shape[2]/2 * np.tan(np.deg2rad(angle)), shape[2]/2
if rise > shape[2]/2:
segment = np.tan(np.deg2rad(90 - angle)) * (rise - shape[2]/2)
run = shape[2]/2 - segment
rise = shape[2]/2
m = float(rise) / float(run)
# generate octant mask -- 1/8 of total volume for efficiency
xc, yc, zc = int(shape[0]/2), int(shape[1]/2), int(shape[2]/2)
c = np.vstack((np.meshgrid(range(xc), range(yc), range(zc)))).reshape(3,-1).T
idx1 = np.where((c[:,0]>=0) & (c[:,2]>=0) & (c[:,2] > m * c[:,0]))[0]
octant = np.ones((xc,yc,zc)).flatten()
octant[idx1] = 0
octant = octant.reshape((xc,yc,zc))
# generate full volume from octant
m_wedge = np.ones(shape)
m_wedge[xc:,yc:,zc:] = octant
m_wedge[0:xc,yc:,zc:] = octant
m_wedge[xc:,0:yc,zc:] = np.fliplr(octant)
m_wedge[0:xc,0:yc:,zc:] = np.fliplr(octant)
m_wedge[xc:,yc:,0:zc] = np.flip(m_wedge[xc:,yc:,zc:], 2)
m_wedge[xc:,0:yc,0:zc] = np.flip(m_wedge[xc:,0:yc,zc:], 2)
m_wedge[0:xc:,yc:,0:zc] = np.flip(m_wedge[0:xc:,yc:,zc:], 2)
m_wedge[0:xc:,0:yc:,0:zc] = np.flip(m_wedge[0:xc:,0:yc:,zc:], 2)
m_wedge = np.transpose(m_wedge, (1,0,2)).T # for experimental tomograms
#m_wedge = np.transpose(m_wedge, (1,2,0)).T # for simulated tomograms
return m_wedge
def predict_positions(A, crystal, specs, savepath):
"""
Predict the locations of Bragg peaks out to the specified resolution, and remove
any predicted to fall inside the missing wedge region. Output positions and their
associated Miller indices as separate keys in a dictionary.
"""
# generate complete list of Miller indices to given resolution
ms = miller.build_set(crystal_symmetry = crystal.get_crystal_symmetry(),
anomalous_flag=True,
d_min = specs['res']).expand_to_p1()
hkl = np.array(ms.indices())
# predict the xyz positions of each peak in the FFT of the tomogram
qvecs = np.inner(A, np.squeeze(hkl)).T
px_coords = qvecs * 1.0 / specs['mag'] * specs['px_size'] + np.array(specs['shape']) / 2.0
print "Predicted %i reflections to %.1f resolution" %(len(hkl), specs['res'])
# remove any Millers located inside the missing wedge
mwedge = missing_wedge_mask(specs['angle'], specs['shape'])
sel = np.fliplr(px_coords.copy())
sel = np.around(sel).astype(int)
valid_idx = mwedge.flatten()[np.ravel_multi_index(sel.T, specs['shape'])]
hkl_valid, px_valid = hkl[valid_idx==1], px_coords[valid_idx==1]
print "Retained %i reflection outside missing wedge" %(len(hkl_valid))
# store in dictionary, save as pickle, and return
predicted = dict()
predicted['hkl'], predicted['xyz'] = hkl_valid, px_valid
with open(os.path.join(savepath, "predicted_info.pickle"), "wb") as handle:
pickle.dump(predicted, handle)
return predicted
if __name__ == '__main__':
start_time = time.time()
args = parse_commandline()
# loading command line input
indexed = pickle.load(open(os.path.join(args['path'], "indexed.pickle")))
exp = ExperimentListFactory.from_json_file(os.path.join(args['path'], "indexed.json"), check_format=False)
crystal = exp[0].crystal
# extracting and saving information
matrices = extract_matrices(crystal, args['path'])
rlp_info = extract_rlp_info(indexed, crystal, matrices['A'], args['path'])
specs = pickle.load(open(args['info']))
predicted = predict_positions(matrices['A'], crystal, specs, args['path'])
print "elapsed time is %.2f minutes" %((time.time() - start_time)/60.0)
|
# Escribir una funcion que calcule la traspuesta de una matriz de nxn
# Recibe una matriz de nxn y devuelve la misma matriz traspuesta |
from django.contrib import admin
from . models import Check
admin.site.register(Check)
|
import pickle
import os
import numpy as np
import matplotlib.pyplot as plt
from model import classifier
from constant import *
def time_taken(start, end):
"""Human readable time between `start` and `end`
:param start: time.time()
:param end: time.time()
:returns: day:hour:minute:second.millisecond
"""
my_time = end-start
day = my_time // (24 * 3600)
my_time = my_time % (24 * 3600)
hour = my_time // 3600
my_time %= 3600
minutes = my_time // 60
my_time %= 60
seconds = my_time
milliseconds = ((end - start)-int(end - start))
day_hour_min_sec = str('%02d' % int(day))+":"+str('%02d' % int(hour))+":"+str('%02d' % int(minutes))+":"+str('%02d' % int(seconds)+"."+str('%.3f' % milliseconds)[2:])
return day_hour_min_sec
def find_modality_bin_behavior(a_path, db_file_name):
"""
Finds modality, bins, behavior by using `path` and `dataset` file name
:param a_path: Dataset path
:param db_file_name: Dataset file name
:return: modality, bins, behavior
"""
modality = a_path.split(os.sep)[1].split("_")[0].capitalize()
bins = a_path.split(os.sep)[1].split("_")[1]
if modality == "Proprioception":
modality = "Haptic"
if (db_file_name.split(".")[0].split("_")[0]) == 'low':
behavior = "Drop"
else:
behavior = db_file_name.split(".")[0].split("_")[0].capitalize()
if behavior == "Crush":
behavior = 'Press'
return modality, bins, behavior
def reshape_full_data(data):
"""
Reshape data into (Categories, Objects, Trials)
:param data: Dataset list
:return: reshaped Dataset list
"""
return data.reshape(NUM_OF_CATEGORY, OBJECTS_PER_CATEGORY, TRIALS_PER_OBJECT, -1)
def read_dataset(a_path, db_file_name):
"""
Read dataset
:param a_path: Dataset path
:param db_file_name: Dataset file name
:return: interaction_data, category_labels, object_labels
"""
bin_file = open(a_path + os.sep + db_file_name, "rb")
interaction_data = pickle.load(bin_file)
category_labels = pickle.load(bin_file)
object_labels = pickle.load(bin_file)
bin_file.close()
return reshape_full_data(interaction_data), reshape_full_data(category_labels), reshape_full_data(object_labels)
def repeat_trials(interaction_data_1_train, interaction_data_2_train):
"""
Repeat trials for both robots
:param interaction_data_1_train: Source robot dataset
:param interaction_data_2_train: Target robot dataset
:return: Repeated source robot dataset, Repeated target robot dataset
"""
# Source
# One example of the source robot can be mapped to all the example of the target robot
# So, repeating each example of the source robot for each example of target robot
interaction_data_1_train_repeat = np.repeat(interaction_data_1_train, TRIALS_PER_OBJECT, axis=2)
# Target
# Concatenating same examples of target robot to make it same size as source robot
interaction_data_2_train_repeat = interaction_data_2_train
for _ in range(TRIALS_PER_OBJECT - 1):
interaction_data_2_train_repeat = np.concatenate((interaction_data_2_train_repeat, interaction_data_2_train),
axis=2)
return interaction_data_1_train_repeat, interaction_data_2_train_repeat
def object_recognition_classifier(clf, data_train, data_test, label_train, label_test, num_of_features):
"""
Train a classifier and test it based on provided data
:param clf:
:param data_train:
:param data_test:
:param label_train:
:param label_test:
:param num_of_features:
:return: accuracy, prediction
"""
train_cats_data = data_train.reshape(-1, num_of_features)
train_cats_label = label_train.reshape(-1, 1).flatten()
test_cats_data = data_test.reshape(-1, num_of_features)
test_cats_label = label_test.reshape(-1, 1).flatten()
y_acc, y_pred = classifier(clf, train_cats_data, test_cats_data, train_cats_label, test_cats_label)
return y_acc, y_pred
def print_discretized_data(data, x_values, y_values, modality, behavior, file_path=None):
"""
prints the data point and save it
:param data: one data point
:param x_values: temporal bins
:param y_values:
:param modality:
:param behavior:
:param file_path:
:return:
"""
data = data.reshape(x_values, y_values)
plt.imshow(data.T)
title_name = " ".join([behavior, modality, "Features"])
plt.title(title_name, fontsize=16)
plt.xlabel("Temporal Bins", fontsize=16)
if modality == 'Haptic':
y_label = "Joints"
elif modality == 'Audio':
y_label = "Frequency Bins"
else:
y_label = ""
plt.ylabel(y_label, fontsize=16)
ax = plt.gca()
ax.set_xticks(np.arange(0, x_values, 1))
ax.set_yticks(np.arange(0, y_values, 1))
ax.set_xticklabels(np.arange(1, x_values + 1, 1))
ax.set_yticklabels(np.arange(1, y_values + 1, 1))
plt.colorbar()
if file_path != None:
plt.savefig(file_path, bbox_inches='tight', dpi=100)
#plt.show()
plt.close()
""" Setting 1 """
# Target Robot never interacts with a few categories
def reshape_data_setting1(num_of_category, data):
"""
Reshape data into (Categories, Objects, Trials)
:param num_of_category:
:param data: Dataset list
:return: reshaped Dataset list
"""
return data.reshape(num_of_category, OBJECTS_PER_CATEGORY, TRIALS_PER_OBJECT, -1)
def get_data_label_for_given_labels(given_labels, interaction_data, category_labels):
"""
Get all the examples of the given labels
:param given_labels: labels to find
:param interaction_data: examples
:param category_labels: labels
:return: Dataset, labels
"""
data = []
label = []
for a_label in given_labels:
data.append(interaction_data[a_label])
label.append(category_labels[a_label])
return np.array(data), np.array(label)
def train_test_splits(num_of_objects):
"""
Split the data into object based 5 fold cross validation
:param num_of_objects:
:return: dictionary containing train test index of 5 folds
"""
n_folds = 5
tt_splits = {}
for a_fold in range(n_folds):
train_index = []
test_index = np.arange(a_fold, (a_fold + 1))
if a_fold > 0:
train_index.extend(np.arange(0, a_fold))
if (a_fold + 1) - 1 < num_of_objects - 1:
train_index.extend(np.arange((a_fold + 1), num_of_objects))
tt_splits.setdefault("fold_" + str(a_fold), {}).setdefault("train", []).extend(train_index)
tt_splits.setdefault("fold_" + str(a_fold), {}).setdefault("test", []).extend(test_index)
return tt_splits
def object_based_5_fold_cross_validation(clf, data_train, data_test, labels, num_of_features):
"""
Perform object based 5 fold cross validation and return mean accuracy
:param clf: classifier
:param data_train: Training dataset
:param data_test: Testing dataset
:param labels: True labels
:param num_of_features: Number of features of the robot
:return: mean accuracy of 5 fold validation
"""
tts = train_test_splits(OBJECTS_PER_CATEGORY)
my_acc = []
for a_fold in sorted(tts):
train_cats_index = tts[a_fold]["train"]
test_cats_index = tts[a_fold]["test"]
train_cats_data = data_train[:, train_cats_index]
train_cats_label = labels[:, train_cats_index]
train_cats_data = train_cats_data.reshape(-1, num_of_features)
train_cats_label = train_cats_label.reshape(-1, 1).flatten()
test_cats_data = data_test[:, test_cats_index]
test_cats_label = labels[:, test_cats_index]
test_cats_data = test_cats_data.reshape(-1, num_of_features)
test_cats_label = test_cats_label.reshape(-1, 1).flatten()
y_acc, y_pred = classifier(clf, train_cats_data, test_cats_data, train_cats_label, test_cats_label)
my_acc.append(y_acc)
return np.mean(my_acc)
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, get_user_model, login, logout
from .forms import UserLoginForm, UserRegisterForm
from django.http import HttpResponse
from django.contrib.auth.models import User
# Create your views here.
# def login_view(request):
# form = UserLoginForm(request.POST or None)
# print("THIS WORKED")
# if form.is_valid():
# username = form.cleaned_data.get("username")
# password = form.cleaned_data.get("password")
# return render(request, "accounts/login.html", {"form":form})
def login_view(request):
myform = UserLoginForm(request.POST or None)
if myform.is_valid():
print('this was a post')
data = myform.cleaned_data
username = data['username']
password = data['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect("store")
else:
return redirect("login")
else:
return render(request, "accounts/login.html", {"form":myform})
def register_view(request):
myform = UserRegisterForm(request.POST or None)
if myform.is_valid():
Loginform = UserLoginForm()
data = myform.cleaned_data
username = data["username"]
email = data["email"]
password = data["password"]
user = User.objects.create_user(username, email, password)
user.save()
login(request, user)
return redirect("store")
return render(request, "accounts/login.html", {"form":myform})
def logout_view(request):
logout(request)
return redirect("login") |
import requests
from bs4 import BeautifulSoup
def link_func(year):
syear = str(year)
syear1 = str(year + 1)
test = requests.get('https://fbref.com/en/squads/53a2f082/' + syear + '-' + syear1 + '/Real-Madrid-Stats').text
soup = BeautifulSoup(test, 'lxml')
table = soup.find(id='matchlogs_for')
links_list = []
lists = table.find_all('td', class_="left group_start")
for i in lists:
links_list.append(i.__str__())
proper_links = []
for i in links_list:
i = i.strip('''<td class="left group_start" data-stat="match_report"><a href''')
i = i.rstrip('>Match Report</a></')
i = i.rstrip('"')
proper_links.append(i)
return proper_links
def player_sub_info(player_name, soup2, soup3, soup4, list_times):
if player_name in soup2: #finds the choosen player in the starting lineup
count = 0
for i in soup4:
count += 1
if player_name in str(i) and "Substitute" in str(i): # finds the index at which the player is being substituted
return '+' + list_times[count-1]
else:
return '90'
elif player_name in soup3: #finds the choosen player being substituted
count = 0
for i in soup4:
count += 1
if player_name in str(i) and "Substitute" in str(i): #finds the index at which the player is being substituted
return '-' + list_times[count-1]
else:
return '0'
# def players_played():
def headline(soup1, team):
soup2 = soup1.find('h1').text
if soup2.index(team) == 0:
return soup1.find_all(class_="event a")
else:
return soup1.find_all(class_="event b")
def sub_info(player_name, soup4, sub_in_time, sub_off_time, list_times):
for i in soup4:
position = soup4.index(i)
i = str(i)
if "event_icon substitute_in" not in i:
continue
a = i.split('for ')
if player_name in a[0]:
return sub_in_time.append(list_times[position])
elif player_name in a[1]:
return sub_off_time.append(list_times[position])
def g_a_range(sub_in_time, sub_off_time, soup4, list_times):
if len(sub_in_time) == 0 and len(sub_off_time) == 0:
return soup4
elif len(sub_in_time) == 0:
upper = sub_off_time[0]
upperi = list_times.index(upper)
return soup4[:upperi]
elif len(sub_off_time) == 0:
lower = sub_in_time[-1]
loweri = list_times.index(lower)
return soup4[loweri:]
else:
upper = sub_off_time[0]
upperi = list_times.index(upper)
lower = sub_in_time[-1]
loweri = list_times.index(lower)
return soup4[loweri:upperi]
def Goal_and_Assist(player_name, soup5):
Goals = 0
Assists = 0
for i in soup5:
i = str(i)
if "event_icon goal" in i and player_name in i:
i = i.split("Assist")
if player_name in i[0]:
Goals += 1
Goals
elif player_name in i[1]:
Assists += 1
# print(Assists)
Assists
else:
continue
else:
continue
return [Goals, Assists]
def minutes_played(sub_in_time, sub_off_time, soup):
if len(sub_in_time) == 0 and len(sub_off_time) == 0:
if '990' in soup:
return 90
else:
return 120
elif len(sub_in_time) == 0:
upper = sub_off_time[0]
upper = upper.split('+')
return int(upper[0])
elif len(sub_off_time) == 0:
lower = sub_in_time[-1]
lower = lower.split('+')
if '990' in soup:
return 90 - int(lower[0])
else:
return 120 -int(lower[0])
else:
upper = sub_off_time[0]
upper = upper.split('+')
lower = sub_in_time[-1]
lower = lower.split('+')
return int(upper[0]) - int(lower[0])
|
from http import HTTPStatus
class BaseApplicationException(Exception):
_status = HTTPStatus.INTERNAL_SERVER_ERROR
def __init__(self, *args):
super().__init__(*args)
self.status = self._status
class BadRequest(BaseApplicationException):
_status = HTTPStatus.BAD_REQUEST
|
from flask import Flask, render_template, request
import md5
def encrypt(mechanism, message):
if mechanism in ['rot13', 'base64', 'hex']:
return message.encode(mechanism)
if mechanism == 'md5':
m = md5.new()
m.update(message)
return m.hexdigest()
app = Flask(__name__)
@app.route('/')
@app.route('/encryption')
@app.route('/encryption/')
@app.route('/encryption/<mechanism>')
def enc(mechanism = 'all'):
if mechanism == 'all':
encryption = 'Keyless Encryptions'
else:
encryption = mechanism
return render_template('encryption.html', text = '', results=[], mechanism = mechanism, Encryption = encryption)
@app.route('/encryption', methods=['POST'])
@app.route('/encryption/', methods=['POST'])
@app.route('/encryption/<mechanism>', methods=['POST'])
def enc_with_input(mechanism = 'all'):
results = []
text = request.form['text']
if mechanism == 'all':
for m in ['rot13', 'base64', 'hex', 'md5']:
results.append((m, encrypt(m, text)))
Encryption = 'Keyless Encryptions:'
else:
results.append((mechanism, encrypt(mechanism, text)))
Encryption = mechanism
return render_template('encryption.html', text = text, results=results,mechanism=mechanism, Encryption = Encryption)
if __name__ == '__main__':
app.debug = True;
app.run(host='0.0.0.0', port = 8000)
|
from onegov.core.orm.abstract import AdjacencyListCollection
from onegov.page.model import Page
class PageCollection(AdjacencyListCollection[Page]):
""" Manages a hierarchy of pages.
Use it like this:
from onegov.page import PageCollection
pages = PageCollection(session)
"""
__listclass__ = Page
def copy(self, page: Page, parent: Page | None) -> Page:
""" Takes the given page and copies it to a given parent.
The parent may be the same as the given page or another. If there's
a conflict with existing children, the name is adjusted using
:meth:`get_unique_child_name`.
"""
return self.add(
parent=parent,
title=page.title,
type=page.type,
meta=page.meta,
content=page.content
)
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
favicon_view = RedirectView.as_view(url='/static/favicon.ico', permanent=True)
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^favicon\.ico$', favicon_view),
url(r'^upload/$', views.upload_image, name='upload_image'),
url(r'^user/(?P<username>\w+)',views.edit_profile, name='edit_profile'),
url(r'^accounts/editprofile/',views.editprofile, name='editprofile'),
url(r'^image/(?P<image_id>\d+)', views.single_image, name='single_image'),
url(r'^search/', views.search, name='search'),
url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'),
url(r'^like/(?P<image_id>\d+)', views.like, name='like'),
url(r'^follow/(?P<user_id>\d+)', views.follow, name='follow'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
"""The module contains functions and classes for MRI reconstruction.
It provides convenient simulation and sampling functions,
such as the poisson-disc sampling function. It also
provides functions to compute preconditioners,
and density compensation factors.
"""
from sigpy.mri import app, dcf, linop, precond, samp, sim, util
from sigpy.mri.dcf import * # noqa
from sigpy.mri.precond import * # noqa
from sigpy.mri.samp import * # noqa
from sigpy.mri.sim import * # noqa
from sigpy.mri.util import * # noqa
__all__ = ["app", "linop"]
__all__.extend(dcf.__all__)
__all__.extend(precond.__all__)
__all__.extend(samp.__all__)
__all__.extend(sim.__all__)
__all__.extend(util.__all__)
|
import urban_dictionary
if __name__ == '__main__':
urban_dictionary.UrbanDictionary()
|
#######################################################################
#
# CSV Readers
#
#######################################################################
import csv
import os
import pandas as pd
from enum import Enum
def not_eol(y):
"""helper function to test if field is empty"""
if not("\n" in y):
return True
else:
return False
def read_csv(path, filename):
#f = open(path + "\\" + filename + '.csv','rb')
if (filename[-3:]) != "csv":
filename += ".csv"
f = os.path.join(path, filename)
filedata = None
if os.path.exists(f):
with open(f) as csvfile:
filedata = []
read_file = csv.reader(csvfile, delimiter=',')
for row in read_file:
filedata.append(row)
#print(row)
return filedata
def write_csv(path, filename, data):
"""Takes a list of lists (rows, cols), or just a list"""
#fn = path + '\\'+ filename +'.csv'
if (filename[-3:]) != "csv":
filename += ".csv"
fn = os.path.join(path, filename)
#print(fn)
with open(fn, 'w') as resultFile:
wr = csv.writer(resultFile, lineterminator='\n')
if all(isinstance(elem, list) for elem in data):
wr.writerows(data)
else:
wr.writerow(data)
class CSVFilePanda:
"""using pandas dataframes"""
filename = 'default'
df = None
path = '.'
def __init__(self, path='.', filename='default', writeaccess=False):
self.filename = filename
self.path = path
self.open(writeaccess)
def __del__(self):
self.close()
def open(self, writeaccess):
try:
if writeaccess:
self.df = open(self.path + "/" + self.filename, 'w+')
else:
self.df = pd.read_csv(self.path + "/" + self.filename, sep=',')
#print(self.df)
except IOError as err:
print("I/O error({0}): {1}".format(err.errno, err.strerror))
def close(self):
if self.df is not None:
pass
#self.df.close()
def print_df(self):
print(self.df)
def read_rows_into_list(self):
rows = []
for item in self.df:
if item != '':
try:
item = float(item)
except ValueError:
pass
rows.append(item)
return rows
def get_column_as_series(self, columnname):
"""returns column as series"""
return self.df[columnname]
def get_column_as_list(self, columnname):
"""returns column as list"""
return self.get_column_as_series(columnname).tolist()
def get_column_length(self):
return len(self.df.index)
def get_column_max(self, columnname):
return self.df[columnname].max()
def get_column_min(self, columnname):
return self.df[columnname].min()
def find_closest_matching_index(self, columnname, value):
index = 0
for item in self.df[columnname]:
if item > value:
break
index += 1
# Check if you are at the end of the column index
if index >= len(self.df.index):
index -= 1
# Check if the prevous value is actually closer to your target
if abs(value-self.df[columnname][index-1]) < abs(value-self.df[columnname][index]):
index -= 1
return index
class CSVFile:
filename = 'default'
path = '.'
handle = None
def __init__(self, path='.', filename='default', writeaccess=False):
self.filename = filename
self.path = path
self.open(writeaccess)
def __del__(self):
self.close()
def open(self, writeaccess):
try:
if writeaccess:
self.handle = open(self.path + "\\" + self.filename, 'w+')
else:
self.handle = open(self.path + "\\" + self.filename, 'rb')
except IOError as err:
print("I/O error({0}): {1}".format(err.errno, err.strerror))
def close(self):
if self.handle:
self.handle.close()
def write_row(self, rowdata):
self.handle.write(rowdata + '\n')
def write_file(self, filedata):
for item in filedata:
self.handle.write(item + '\n')
def read_rows_into_list(self):
#lines = [line.rstrip('\n') for line in self.handle]
rows = []
for line in self.handle:
rows.append(line.rstrip('\n'))
return rows
class CSVFile2:
"""using import csv"""
filename = 'default'
f = None
path = '.'
handle = None
def __init__(self, path='.', filename='default', writeaccess=False):
self.filename = filename
self.path = path
self.open(writeaccess)
def __del__(self):
self.close()
def open(self, writeaccess):
try:
if writeaccess:
self.f = open(self.path + "\\" + self.filename, 'w+')
else:
#self.f = open(self.path + "\\" + self.filename, 'rb')
self.f = open(self.path + "\\" + self.filename)
self.handle = csv.reader(self.f)
except IOError as err:
print("I/O error({0}): {1}".format(err.errno, err.strerror))
def close(self):
if self.handle:
self.handle.close()
self.f.close()
def write_row(self, rowdata):
self.handle.write(rowdata + '\n')
def write_file(self, filedata):
for item in filedata:
self.handle.write(item + '\n')
def read_rows_into_list(self):
rows = []
for line in self.handle:
row = []
for item in line:
if item != '':
try:
item = float(item)
except ValueError:
pass
row.append(item)
rows.append(row)
return rows
|
import logging
import uuid
from django.contrib import auth
from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib import messages
from django.conf import settings
from apiclient import errors
from smtplib import SMTPException
from .models import User
from mysite import const
from mysite import helper
logger = logging.getLogger(const.LOGGER_NAME)
def login(request):
if request.user.is_authenticated:
return redirect(settings.LOGIN_REDIRECT_URL)
if not request.POST:
return render(request, 'login.html', {'type': 'login'})
username = request.POST.get('email', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
has_user = User.objects.filter(username=username).count() > 0
if not has_user:
error_msg = "Don't have this account."
elif user is None:
error_msg = "Your password was incorrect."
elif not user.verified:
error_msg = "The account not verified yet."
elif not user.is_active:
error_msg = "The account has been disabled."
else:
auth.login(request, user)
logger.info("User name: {username} login successful".format(username=username))
return redirect(settings.LOGIN_REDIRECT_URL)
messages.error(request, error_msg)
return render(request, 'login.html', {'type': 'login'})
def signup(request):
username = request.POST.get('email', '')
password = request.POST.get('password', '')
confirm_password = request.POST.get('confirm_password', '')
has_user = User.objects.filter(username=username).count() > 0
if has_user:
error_msg = "Already had this account."
elif password != confirm_password:
error_msg = "Password not matching."
else:
try:
verify_uuid = str(uuid.uuid4())
verify_url = "{scheme}://{host}{verify_url}?verify_uuid={uuid}".format(
scheme=request.scheme,
host=request.get_host(),
verify_url=settings.VERIFY_URL,
uuid=verify_uuid
)
mail_content = settings.EMAIL_CONTEXT.format(
verify_url=verify_url
)
gmail_service = helper.get_gmail_service()
message = helper.create_message(
sender=settings.EMAIL_HOST_USER,
to=username,
subject=settings.EMAIL_SUBJECT,
msgplain=mail_content
)
helper.send_message_internal(
service=gmail_service,
user_id="me",
message=message
)
messages.success(request, "Sent verify email success.")
user = User.objects.create_user(
username=username,
email=username,
password=password,
verified=False,
verify_uuid=verify_uuid
)
user.save()
except (SMTPException, errors.HttpError) as err:
logger.error("Username: {username}, Error: {err}".format(
username=username,
err=err
))
messages.error(request, "Sent verify email fail.")
return render(request, 'login.html', {'type': 'login'})
messages.error(request, error_msg)
return render(request, 'login.html', {'type': 'signup'})
def logout(request):
auth.logout(request)
return redirect(settings.LOGIN_REDIRECT_URL)
def verify(request):
verify_uuid = request.GET.get('verify_uuid', '')
user = User.objects.filter(verify_uuid=verify_uuid).first()
if not user:
logger.warning("Don't have this account. verify_uuid: {verify_uuid}".format(
verify_uuid=verify_uuid
))
return redirect(settings.LOGIN_URL)
elif user.verified:
return redirect(settings.LOGIN_URL)
user.verified = True
user.save()
user.backend = 'django.contrib.auth.backends.ModelBackend'
auth.login(request, user)
return redirect(settings.LOGIN_REDIRECT_URL)
def success(request):
return render(request, 'success.html')
|
import numpy as np
import random
import matplotlib.pyplot as plt
import math
import tensorflow as tf
# Creating z-matrix - shape(1000,1000) and filled from 1 (row[0]) to 1000 (row[999])
x = np.arange(1,1000,1)
y = np.arange(1,1000,1)
xx, yy = np.meshgrid(x,y, sparse = True)
z = np.tile(yy, (1, 999))
def positionGeneration(d): # d = diameter of the robot
# Randomly generating theta (the angle between local reference frame of the robot and the global frame)
# and the left, right sensor positions
theta = np.random.randint(0,360) # Theta is 0 when it faces the direction at which the maximum intensity is.
x_l = np.random.randint(1+d,1000-d)
y_l = np.random.randint(1+d,1000-d)
x_r = int(x_l + math.cos(90-theta) * d)
y_r = int(y_l - math.sin(90-theta) * d)
return (np.array([x_l,x_r,y_l,y_r]))
def zvalue(position, z):
# Finding the corresponding z value (light intensity) to the given position values
z_l = z[int(position[0]), int(position[2])]
z_r = z[int(position[1]), int(position[3])]
return np.array([z_l, z_r])
def step(position, action):
# taking action from the current state (current position)
if action == 0:
turn = math.pi / 180
else:
turn = -(math.pi / 180)
# Generating Rotation matrix using the given turn value
c, s = np.cos(turn), np.sin(turn)
R = np.array(((c, -s), (s, c)))
x_l, x_r, y_l, y_r = position[0], position[1], position[2], position[3]
x_c = (x_l + x_r)/2
y_c = (y_l + y_r)/2
# Turning by degree of 'turn'
l_temp= np.array([x_l-x_c, y_l-y_c])
l_new = np.matmul(l_temp, R) + np.array([x_c, y_c])
r_temp= np.array([x_r-x_c, y_r-y_c])
r_new = np.matmul(r_temp, R) + np.array([x_c, y_c])
# Assinging new position value
new_position = np.array([l_new[0], r_new[0], l_new[1], r_new[1]])
return(new_position)
def rewardDone(state):
if (abs(state[0] - state[1]) < 5):
reward = 50
done = True
else:
# Giving negative reward when it is not done
reward = -1
done = False
return(reward, done)
# Setting the neural network
learning_rate = 0.2
input_size = 2
output_size = 2
action_space = np.array([0, 1])
X = tf.placeholder( tf.float32, [None, input_size], name = 'input_x')
W1 = tf.get_variable("W1", shape = [input_size, output_size], initializer = tf.contrib.layers.xavier_initializer())
Qpred = tf.matmul(X, W1)
Y = tf.placeholder( tf.float32, [None, output_size])
loss = tf.reduce_sum(tf.square(Y-Qpred))
train = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)
# Hyperparameters
num_episodes = 3000
dis = 0.9
rList = [] # empty array which will contain (number of rotation / angle needs to rotate ) - the lower the value is , the better it finds the right angle
# Q-learning
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
# About robot
d = 50
for i in range( num_episodes):
e = 1./((i+1)/10) # e-greedy value
rAll = 0
done = False
step_count = 0
position = positionGeneration(d)
state = zvalue(position, z)
#initial_z_diff = abs(state[0] - state[1])
if position[2] > position[3]:
initial_angle_diff = math.acos((position[2]-position[3])/d) * 180/(math.pi)
else:
initial_angle_diff = 360 - (math.acos((position[2]-position[3])/d) * 180/(math.pi))
while not done:
step_count += 1
x = np.reshape(state, [1, input_size]) # pre-processing the input data
# Choose an action by greedily
Qs = sess.run(Qpred, feed_dict={X: x})
if np.random.rand(1) < e:
action = np.random.randint(1, size = 0)
else:
action = np.argmax(Qs)
# Result from the action
new_position = step(position, action)
new_state = zvalue(new_position, z)
reward, done = rewardDone(new_state)
# Updating Q-network
x1 = np.reshape(new_state, [1, input_size])
Qs1 = sess.run(Qpred, feed_dict={X: x1})
Qs[0, action] = reward + dis * np.max(Qs1) # check
rAll += reward
sess.run(train, feed_dict = {X: x, Y: Qs})
position = new_position
state = new_state
if (initial_angle_diff > 1 and initial_angle_diff < 359):
if (initial_angle_diff < 180):
rList.append(step_count/initial_angle_diff)
else:
rList.append(step_count/( 360 - initial_angle_diff))
else:
rList.append(0)
print("Episodes: {}, steps: {}, initial angle difference: {}".format(i, step_count, initial_angle_diff))
plt.bar(range(len(rList)), rList, color="blue")
plt.show() |
# Import the Flask module that has been installed.
from flask import Flask, jsonify
# Createing a "games" JSON / dict to emulate data coming from a database.
games = [
{
"id": 0,
"name": "Scrabble",
"editor": "mattel",
"year_published": "1978",
"description": "descp",
"category": "family",
"time": "60min",
"number_player": "2-5"
},
{
"id": 1,
"name": "Aventuriers du rail",
"editor": "asmodee",
"year_published": "2006",
"description": "descp",
"category": "family",
"time": "45min",
"number_player": "2-5"
}
]
# Creating a new "app" by using the Flask constructor. Passes __name__ as a parameter.
app = Flask(__name__)
# Annotation that allows the function to be hit at the specific URL.
@app.route("/")
# Generic Python function that returns "Hello world!"
def index():
return "Hello world!"
# Annotation that allows the function to be hit at the specific URL. Indicates a GET HTTP method.
@app.route("/bordgames/v1.0/games", methods=["GET"])
# Function that will run when the endpoint is hit.
def get_games():
# Returns a JSON of the games defined above. jsonify is a Flask function that serializes the object for us.
return jsonify({"games": games})
@app.route("/bordgames/v1.0/games", methods=["POST"])
# Function that will run when the endpoint is hit.
def getpost_games():
# Returns a JSON of the games defined above. jsonify is a Flask function that serializes the object for us.
return jsonify({"games": games})
# Annotation that allows the function to be hit at the specific URL with a parameter. Indicates a GET HTTP method.
@app.route("/bordgames/v1.0/games/<int:game_id>", methods=["GET"])
# This function requires a parameter from the URL.
def get_game(game_id):
# Create an empty dictionary.
result = {}
# Loops through all the different games to find the one with the id that was entered.
for game in games:
# Checks if the id is the same as the parameter.
if game["id"] == game_id:
# Sets the result to the game and makes it a JSON.
result = jsonify({"game": game})
# Returns the game in JSON form or an empty dictionary. Should handle the error like 404, but will not cover here.
return result
# Checks to see if the name of the package is the run as the main package.
if __name__ == "__main__":
# Runs the Flask application only if the main.py file is being run.
app.run(host='0.0.0.0') |
import urllib
from bs4 import BeautifulSoup
#Posting to http://egov1.co.gaston.nc.us/website/ParcelDataSite/viewer.htm
#Query is for '4A017' in 'Neighborhood Code by Number'
#Post Data
data = urllib.urlencode(dict(ArcXMLRequest='''<?xml version="1.0" encoding="UTF-8" ?><ARCXML version="1.1">
<REQUEST>
<GET_FEATURES outputmode="xml" geometry="false" envelope="true" featurelimit="1000" beginrecord="1">
<LAYER id="121212" /><SPATIALQUERY subfields="PUBLICGIS.GISADMIN.PARCELCAMA.PID PUBLICGIS.GISADMIN.PARCELCAMA.CURR_NAME1 PUBLICGIS.GISADMIN.PARCELCAMA.CURR_NAME2 PUBLICGIS.GISADMIN.PARCELCAMA.CURR_ADDR1 PUBLICGIS.GISADMIN.PARCELCAMA.CURR_ADDR2 PUBLICGIS.GISADMIN.PARCELCAMA.CURR_CITY PUBLICGIS.GISADMIN.PARCELCAMA.CURR_STATE PUBLICGIS.GISADMIN.PARCELCAMA.CURR_ZIPCODE PUBLICGIS.GISADMIN.PARCELCAMA.PHYSSTRADD PUBLICGIS.GISADMIN.PARCELCAMA.SALESAMT PUBLICGIS.GISADMIN.PARCELCAMA.SALEDATE PUBLICGIS.GISADMIN.PARCELCAMA.YEARBLT PUBLICGIS.GISADMIN.PARCELCAMA.SQFT PUBLICGIS.GISADMIN.PARCELCAMA.XBEDRM PUBLICGIS.GISADMIN.PARCELCAMA.XBATHS PUBLICGIS.GISADMIN.PARCELCAMA.CALCAC PUBLICGIS.GISADMIN.PARCELCAMA.TOTVAL PUBLICGIS.GISADMIN.PARCELCAMA.IMAGEPATH" where="( NBHDNUM like '%4A017%' )" /></GET_FEATURES></REQUEST></ARCXML>''', BgColor='#000000', FormCharset='ISO-8859-1', RedirectURL='', HeaderFile='', FooterFile=''))
post = urllib.urlopen("http://egov1.co.gaston.nc.us/servlet/com.esri.esrimap.Esrimap?ServiceName=MS2003Parcel1&CustomService=Query&ClientVersion=3.1&Form=True&Encode=False", data)
result = post.read()
#Parse the result, and get the contents of the (only) <script> tag. Requires BeautifulSoup, lxml, and html5lib.
soup = BeautifulSoup(result)
js = soup.script.get_text()
#Make the JavaScript return the XML. Fix encoding
fixed = js.replace('null(XMLResponse)', 'return XMLResponse').replace('#SHAPE#', 'shape').replace('&', '&')
#Write JavaScript out to FS.
outfile = open('output.js', 'w+')
outfile.write(fixed)
outfile.close |
from models import db, Pet
from app import app
db.drop_all()
db.create_all()
Kilo = Pet(name='Kilo', species='Dog', photo_url='https://images.unsplash.com/photo-1587790311640-50b019663f01?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=800&q=80', age=3)
Reggie = Pet(name='Reggie', species='Dog', photo_url='https://images.unsplash.com/photo-1508948956644-0017e845d797?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=832&q=80', age=14, available=False)
Bowser = Pet(name='Bowser', species='Cat', photo_url='https://images.unsplash.com/photo-1588000152938-fae9cedc61ce?ixid=MnwxMjA3fDB8MHxzZWFyY2h8MTZ8fGNhdHxlbnwwfDJ8MHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=500&q=60', age=14, notes='Sweet, Calm, Loving')
Onyx = Pet(name='Onyx', species='Cat', photo_url='https://images.unsplash.com/photo-1615903629900-be1d6190c5a3?ixid=MnwxMjA3fDB8MHxzZWFyY2h8MTN8fGNhdHxlbnwwfDJ8MHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=500&q=60', age=4, notes='FIV positive')
Morty = Pet(name='Morty', species='Cat', age=1)
petlist=[Kilo,Reggie, Bowser, Onyx, Morty]
db.session.add_all(petlist)
db.session.commit()
|
from flask import jsonify
from service.student_questions_services import StudentQuestionsServices
def route(app):
# ----Retrieve all student questions from the database and return status code of 200 for successful retrieval
@app.route("/studentquestions", methods=['GET'])
def get_all_student_questions():
return jsonify(StudentQuestionsServices.get_all_student_questions()), 200
|
#!/usr/bin/env python3
# encoding: utf-8
"""
quick_sort.py
Created by Jakub Konka on 2011-11-01.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
import sys
import random as rnd
sys.setrecursionlimit(10000)
def quick_sort(array):
'''This function implements the standard version of the
quick sort algorithm.
Keyword arguments:
array -- input array of integers
Return: array (sorted)
'''
left = []
right = []
n = len(array)
if n > 1:
pivot = array[n-1]
for i in range(n-1):
if array[i] < pivot:
left.append(array[i])
else:
right.append(array[i])
return quick_sort(left) + [pivot] + quick_sort(right)
else:
return array
if __name__ == '__main__':
n = int(sys.argv[1])
array = [rnd.randint(1,100) for i in range(n)]
# print("Array before sorting: ", array)
array = quick_sort(array)
# print("Array after sorting: ", array)
|
t = int(input())
# Python program to compute sum of pairwise bit differences
def sumBitDifferences(arr,n):
ans = 0 # Initialize result
# traverse over all bits
for i in range(0, 32):
# count number of elements with i'th bit set
count = 0
for j in range(0,n):
if ( (arr[j] & (1 << i)) ):
count+=1
# Add "count * (n - count) * 2" to the answer
ans += (count * (n - count) * 2);
return ans
for _ in range(t):
n = int(input())
l = list(map(int,input().split()))
print(sumBitDifferences(l,n)) |
alt = float(input('Digite a altura: '))
lar = float(input('Digite a largura: '))
area = alt * lar
print(f"A área é: {area}") |
import os
import requests
from flask import Flask, jsonify, render_template, request, session
from flask_socketio import SocketIO, emit
from flask_session import Session
from datetime import datetime, date
from flask_socketio import join_room, leave_room
app = Flask(__name__)
app.config["SECRET_KEY"] = os.getenv("SECRET_KEY")
app.config['TEMPLATES_AUTO_RELOAD'] = True
socketio = SocketIO(app)
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
channel = {}
onlineUsers = []
channel["Public"] = {}
channel["AllInfo"] = {}
AllInfo = channel["AllInfo"]
AllInfo["Public"] = "Public"
initChannel = {
"ChannelName": "Public",
"name": "default",
"content": "default"
}
initTime = "_1_"
channel["Public"][initTime] = initChannel
@app.route("/")
def index():
return render_template("index.html")
@app.route("/duckflame", methods=['POST'])
def duckflame():
name = request.form.get('name')
if name in onlineUsers:
render_template("error.html", userName=name)
userTemp = onlineUsers[:]
onlineUsers.append(name)
return render_template("mainPage.html", onlineUsers=userTemp, name=name, slf=name)
@app.route("/channelInfomation")
def getChannel():
return jsonify(channel)
@app.route("/channelInfo/<string:ChannelName>")
def changeChannel(ChannelName):
name = ChannelName.split()
return channel[name[0]]
@socketio.on("privateMesSend")
def sendPrivate(data):
username = data["username"]
room = data["room"]
content = data["content"]
emit("privateMesSend", {"content": content, "username": username}, room=room);
@socketio.on("sub name")
def enter(data):
name = data["name"]
emit("announce name", {"name": name}, broadcast=True)
@socketio.on("connection request")
def conRequest(data):
room = data["room"]
receiver = data["receiver"]
sender = data["sender"]
emit("connect Request", {"room": room, "receiver": receiver, "sender": sender}, broadcast=True)
@socketio.on("message")
def send(data):
"""
TODO: Channel can sorted by data/time.
"""
content = data["message"].split()
name = data["name"].split()
time = data["date"]
ChannelName = data["ChannelName"].replace(" ", "").split()
userMes = channel[ChannelName[0]]
if time not in userMes:
userMes[time] = {}
userMes[time]["name"] = name
userMes[time]["content"] = content
userMes[time]["ChannelName"] = ChannelName[0]
if initTime in userMes:
del userMes[initTime]
emit("announce message", {"name": name, "content": content, "ChannelName": ChannelName[0]}, broadcast=True)
@socketio.on('join')
def on_join(data):
username = data['username']
room = data['room']
join_room(room)
@socketio.on('leave')
def on_leave(data):
username = data['username']
room = data['room']
leave_room(room)
@socketio.on("logout")
def logout(data):
onlineUsers.remove(date.name)
Session.remove(data.name)
return render_template("index.html")
@socketio.on("Create channel")
def createC(data):
channelN = data["ChannelName"].replace(" ", "").split()
channel[channelN[0]] = {}
AllInfo[channelN[0]] = channelN
emit("announce new Channel", { "ChannelName": channelN }, broadcast=True)
@socketio.on("newUserOnline")
def newUserOnlineNotice(data):
emit("newUserOnlineNotice", { "name": data["name"] }, broadcast=True)
|
def sum(a,b):
return a+b
a=int(input('Enter first number '))
b=int(input('Enter second number '))
print("Sum of number is "+str(sum(a,b))) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 1 10:21:35 2018
@author: ddeng
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import pdb
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC, LinearSVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFECV
from sklearn.linear_model import RandomizedLogisticRegression, LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
import time
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import to_categorical
import itertools
from keras.optimizers import SGD
def load_my_data():
df = pd.read_csv('hand_crafted_features_va.csv')
n_sample = len(df['Instance_name'])
n_feature = len(df.keys()[7:])
data = np.empty((n_sample, n_feature) , dtype = np.float64)
target = np.empty((n_sample, ), dtype = np.int)
for index, row in df.iterrows():
data[index] = np.asarray(df.loc[index, df.keys()[7:]], dtype =np.float64)
target[index] = np.asarray(df.loc[index, 'ManiaLevel'])
full_dict = {'data': data, 'target': target}
train_df = df.loc[df['Partition']=='train']
train_df.reset_index(drop =True, inplace=True)
train_n_sample = len(train_df)
dev_df = df.loc[df['Partition']=='dev']
dev_df.reset_index(drop =True, inplace=True)
dev_n_sample = len(dev_df)
data = np.empty((train_n_sample, n_feature) , dtype = np.float64)
target = np.empty((train_n_sample, ), dtype = np.int)
for index, row in train_df.iterrows():
data[index] = np.asarray(train_df.loc[index, train_df.keys()[7:]], dtype =np.float64)
target[index] = np.asarray(train_df.loc[index, 'ManiaLevel'])
train_set = [data, target]
data = np.empty((dev_n_sample, n_feature) , dtype = np.float64)
target = np.empty((dev_n_sample, ), dtype = np.int)
for index, row in dev_df.iterrows():
data[index] = np.asarray(dev_df.loc[index, dev_df.keys()[7:]], dtype =np.float64)
target[index] = np.asarray(dev_df.loc[index, 'ManiaLevel'])
dev_set = [ data, target]
return full_dict, {'train':train_set, 'dev': dev_set}
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def main():
dataset, _= load_my_data()
X, y = dataset['data'], dataset['target']
X = preprocessing.scale(X)
# bianry classification
y[y==3]=2
# split for cross validation
#random_state=409,
# stratify = np.zeros((X.shape[0]//2, ))
# stratify = np.concatenate((stratify, np.ones(X.shape[0] - X.shape[0]//2, )))
X_train, X_test, y_train, y_test = train_test_split(
X,y, test_size = 0.4, random_state=10, shuffle=True, stratify=y)
# feature normalization
ticks = time.time()
estimator = SVC(kernel='linear')
# selector = RFECV(estimator=estimator, step=1, cv=StratifiedKFold(5),
# scoring ='recall_macro')
# selector.fit(X_train, y_train)
# num_f_bef = X_train.shape
# #transform on two sets
# X_train = selector.transform(X_train)
# X_test = selector.transform(X_test)
lsvc = LinearSVC(C=10, dual=False).fit(X_train, y_train)
model = SelectFromModel(lsvc, prefit=True)
num_f_bef = X_train.shape
X_train = model.transform(X_train)
X_test = model.transform(X_test)
print('before selecting,{}; after selecting: {}'.format(num_f_bef[1], X_train.shape[1]))
# classifications
param_grid = [
{'C': [ 1, 10, 100, 1000, 10000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000, 10000], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1e-6, 1e-7], 'kernel': ['rbf']},
]
score = 'recall'
class_names = ['remission','mania']
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(estimator, param_grid, cv=5, scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print('Time Elapse: {}'.format(time.time()-ticks))
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Best scores found on development set:")
print()
print(clf.best_score_)
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred, target_names=class_names))
print()
# confusion matrix
plt.figure()
plot_confusion_matrix(confusion_matrix(y_test, y_pred), classes=class_names,
title = 'Confusion matrix without normalization')
plt.figure()
plot_confusion_matrix(confusion_matrix(y_test, y_pred), classes=class_names, normalize=True,
title = 'Confusion matrix')
def create_model(dropout_rate=0.0, neurons=300, optimizer='adam', learn_rate=0.01, momentum=0.9):
# create model
model = Sequential()
model.add(Dense(neurons, input_dim=1535, activation='relu', kernel_initializer='uniform'))
model.add(Dropout(dropout_rate))
model.add(Dense(3, activation='softmax'))
# Compile model
optimizer = SGD(lr = learn_rate, momentum=momentum)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
def cv_on_deep_nn():
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
dataset, _= load_my_data()
X, y = dataset['data'], dataset['target']
# encoder = LabelEncoder()
# encoder.fit(y)
# y = encoder.transform(y)
# y = to_categorical(y)
X = preprocessing.scale(X)
#random_state=409,
stratify = np.zeros((X.shape[0]//3, ))
stratify = np.concatenate((stratify, np.ones(X.shape[0]//3, )))
stratify = np.concatenate((stratify, np.ones(X.shape[0] - 2*(X.shape[0]//3), )+1))
X_train, X_test, y_train, y_test = train_test_split(
X,y, test_size = 0.4, random_state=409, shuffle=True, stratify=stratify)
score = 'recall'
class_names = ['remission','hypomania','mania']
# create model
model = KerasClassifier(build_fn=create_model, verbose=0)
# define the grid search parameters
# batch_size = [10, 20, 40, 60, 80, 100]
# epochs = [10, 50, 100]
batch_size = [20, 40]
epochs = [ 50, 100]
dropout_rate = [ 0.3, 0.5, 0.6, 0.7]
neurons = [300, 600]
learn_rate = [0.001, 0.01]
momentum = [0.0,0.3, 0.6, 0.8, 0.9]
param_grid = dict(batch_size=batch_size, epochs=epochs,
neurons=neurons, dropout_rate = dropout_rate,
learn_rate=learn_rate, momentum=momentum)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring='%s_macro' % score, n_jobs=-1)
grid_result = grid.fit(X_train, y_train)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
#test
y_true, y_pred = y_test, grid.predict(X_test)
print(classification_report(y_true, y_pred, target_names=class_names))
print()
# confusion matrix
plt.figure()
plot_confusion_matrix(confusion_matrix(y_test, y_pred), classes=class_names,
title = 'Confusion matrix without normalization')
plt.figure()
plot_confusion_matrix(confusion_matrix(y_test, y_pred), classes=class_names, normalize=True,
title = 'Confusion matrix')
print(classification_report(y_test, y_pred))
if __name__ == "__main__":
pdb.set_trace()
main() |
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
import vtk
from vtk.util import numpy_support
from scipy.ndimage import map_coordinates
from fury.colormap import line_colors
def set_input(vtk_object, inp):
""" Generic input function which takes into account VTK 5 or 6
Parameters
----------
vtk_object: vtk object
inp: vtkPolyData or vtkImageData or vtkAlgorithmOutput
Returns
-------
vtk_object
Notes
-------
This can be used in the following way::
from fury.utils import set_input
poly_mapper = set_input(vtk.vtkPolyDataMapper(), poly_data)
"""
if isinstance(inp, vtk.vtkPolyData) \
or isinstance(inp, vtk.vtkImageData):
vtk_object.SetInputData(inp)
elif isinstance(inp, vtk.vtkAlgorithmOutput):
vtk_object.SetInputConnection(inp)
vtk_object.Update()
return vtk_object
def numpy_to_vtk_points(points):
""" Numpy points array to a vtk points array
Parameters
----------
points : ndarray
Returns
-------
vtk_points : vtkPoints()
"""
vtk_points = vtk.vtkPoints()
vtk_points.SetData(numpy_support.numpy_to_vtk(np.asarray(points),
deep=True))
return vtk_points
def numpy_to_vtk_colors(colors):
""" Numpy color array to a vtk color array
Parameters
----------
colors: ndarray
Returns
-------
vtk_colors : vtkDataArray
Notes
-----
If colors are not already in UNSIGNED_CHAR you may need to multiply by 255.
Examples
--------
>>> import numpy as np
>>> from fury.utils import numpy_to_vtk_colors
>>> rgb_array = np.random.rand(100, 3)
>>> vtk_colors = numpy_to_vtk_colors(255 * rgb_array)
"""
vtk_colors = numpy_support.numpy_to_vtk(np.asarray(colors), deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
return vtk_colors
def map_coordinates_3d_4d(input_array, indices):
""" Evaluate the input_array data at the given indices
using trilinear interpolation
Parameters
----------
input_array : ndarray,
3D or 4D array
indices : ndarray
Returns
-------
output : ndarray
1D or 2D array
"""
if input_array.ndim <= 2 or input_array.ndim >= 5:
raise ValueError("Input array can only be 3d or 4d")
if input_array.ndim == 3:
return map_coordinates(input_array, indices.T, order=1)
if input_array.ndim == 4:
values_4d = []
for i in range(input_array.shape[-1]):
values_tmp = map_coordinates(input_array[..., i],
indices.T, order=1)
values_4d.append(values_tmp)
return np.ascontiguousarray(np.array(values_4d).T)
def lines_to_vtk_polydata(lines, colors=None):
""" Create a vtkPolyData with lines and colors
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3), list of arrays, tuple (3,), array (K,), None
If None then a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
colour.
If an array (N, 3) is given, where N is equal to the number of lines.
Then every line is coloured with a different RGB color.
If a list of RGB arrays is given then every point of every line takes
a different color.
If an array (K, 3) is given, where K is the number of points of all
lines then every point is colored with a different RGB color.
If an array (K,) is given, where K is the number of points of all
lines then these are considered as the values to be used by the
colormap.
If an array (L,) is given, where L is the number of streamlines then
these are considered as the values to be used by the colormap per
streamline.
If an array (X, Y, Z) or (X, Y, Z, 3) is given then the values for the
colormap are interpolated automatically using trilinear interpolation.
Returns
-------
poly_data : vtkPolyData
is_colormap : bool, true if the input color array was a colormap
"""
# Get the 3d points_array
points_array = np.vstack(lines)
nb_lines = len(lines)
nb_points = len(points_array)
lines_range = range(nb_lines)
# Get lines_array in vtk input format
lines_array = []
# Using np.intp (instead of int64), because of a bug in numpy:
# https://github.com/nipy/dipy/pull/789
# https://github.com/numpy/numpy/issues/4384
points_per_line = np.zeros([nb_lines], np.intp)
current_position = 0
for i in lines_range:
current_len = len(lines[i])
points_per_line[i] = current_len
end_position = current_position + current_len
lines_array += [current_len]
lines_array += range(current_position, end_position)
current_position = end_position
lines_array = np.array(lines_array)
# Set Points to vtk array format
vtk_points = numpy_to_vtk_points(points_array)
# Set Lines to vtk array format
vtk_lines = vtk.vtkCellArray()
vtk_lines.GetData().DeepCopy(numpy_support.numpy_to_vtk(lines_array))
vtk_lines.SetNumberOfCells(nb_lines)
is_colormap = False
# Get colors_array (reformat to have colors for each points)
# - if/else tested and work in normal simple case
if colors is None: # set automatic rgb colors
cols_arr = line_colors(lines)
colors_mapper = np.repeat(lines_range, points_per_line, axis=0)
vtk_colors = numpy_to_vtk_colors(255 * cols_arr[colors_mapper])
else:
cols_arr = np.asarray(colors)
if cols_arr.dtype == np.object: # colors is a list of colors
vtk_colors = numpy_to_vtk_colors(255 * np.vstack(colors))
else:
if len(cols_arr) == nb_points:
if cols_arr.ndim == 1: # values for every point
vtk_colors = numpy_support.numpy_to_vtk(cols_arr,
deep=True)
is_colormap = True
elif cols_arr.ndim == 2: # map color to each point
vtk_colors = numpy_to_vtk_colors(255 * cols_arr)
elif cols_arr.ndim == 1:
if len(cols_arr) == nb_lines: # values for every streamline
cols_arrx = []
for (i, value) in enumerate(colors):
cols_arrx += lines[i].shape[0]*[value]
cols_arrx = np.array(cols_arrx)
vtk_colors = numpy_support.numpy_to_vtk(cols_arrx,
deep=True)
is_colormap = True
else: # the same colors for all points
vtk_colors = numpy_to_vtk_colors(
np.tile(255 * cols_arr, (nb_points, 1)))
elif cols_arr.ndim == 2: # map color to each line
colors_mapper = np.repeat(lines_range, points_per_line, axis=0)
vtk_colors = numpy_to_vtk_colors(255 * cols_arr[colors_mapper])
else: # colormap
# get colors for each vertex
cols_arr = map_coordinates_3d_4d(cols_arr, points_array)
vtk_colors = numpy_support.numpy_to_vtk(cols_arr, deep=True)
is_colormap = True
vtk_colors.SetName("Colors")
# Create the poly_data
poly_data = vtk.vtkPolyData()
poly_data.SetPoints(vtk_points)
poly_data.SetLines(vtk_lines)
poly_data.GetPointData().SetScalars(vtk_colors)
return poly_data, is_colormap
def get_polydata_lines(line_polydata):
""" vtk polydata to a list of lines ndarrays
Parameters
----------
line_polydata : vtkPolyData
Returns
-------
lines : list
List of N curves represented as 2D ndarrays
"""
lines_vertices = numpy_support.vtk_to_numpy(line_polydata.GetPoints().GetData())
lines_idx = numpy_support.vtk_to_numpy(line_polydata.GetLines().GetData())
lines = []
current_idx = 0
while current_idx < len(lines_idx):
line_len = lines_idx[current_idx]
next_idx = current_idx + line_len + 1
line_range = lines_idx[current_idx + 1: next_idx]
lines += [lines_vertices[line_range]]
current_idx = next_idx
return lines
def get_polydata_triangles(polydata):
""" get triangles (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
triangles
"""
vtk_polys = numpy_support.vtk_to_numpy(polydata.GetPolys().GetData())
assert((vtk_polys[::4] == 3).all()) # test if its really triangles
return np.vstack([vtk_polys[1::4], vtk_polys[2::4], vtk_polys[3::4]]).T
def get_polydata_vertices(polydata):
""" get vertices (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
points, represented as 2D ndarrays
"""
return numpy_support.vtk_to_numpy(polydata.GetPoints().GetData())
def get_polydata_normals(polydata):
""" get vertices normal (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
Normals, represented as 2D ndarrays (Nx3). None if there are no normals
in the vtk polydata.
"""
vtk_normals = polydata.GetPointData().GetNormals()
if vtk_normals is None:
return None
else:
return numpy_support.vtk_to_numpy(vtk_normals)
def get_polydata_colors(polydata):
""" get points color (ndarrays Nx3 int) from a vtk polydata
Parameters
----------
polydata : vtkPolyData
Returns
-------
output : array (N, 3)
Colors. None if no normals in the vtk polydata.
"""
vtk_colors = polydata.GetPointData().GetScalars()
if vtk_colors is None:
return None
else:
return numpy_support.vtk_to_numpy(vtk_colors)
def set_polydata_triangles(polydata, triangles):
""" set polydata triangles with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
triangles : array (N, 3)
triangles, represented as 2D ndarrays (Nx3)
"""
vtk_triangles = np.hstack(np.c_[np.ones(len(triangles)).astype(np.int) * 3,
triangles])
vtk_triangles = numpy_support.numpy_to_vtkIdTypeArray(vtk_triangles,
deep=True)
vtk_cells = vtk.vtkCellArray()
vtk_cells.SetCells(len(triangles), vtk_triangles)
polydata.SetPolys(vtk_cells)
return polydata
def set_polydata_vertices(polydata, vertices):
""" set polydata vertices with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
vertices : vertices, represented as 2D ndarrays (Nx3)
"""
vtk_points = vtk.vtkPoints()
vtk_points.SetData(numpy_support.numpy_to_vtk(vertices, deep=True))
polydata.SetPoints(vtk_points)
return polydata
def set_polydata_normals(polydata, normals):
""" set polydata normals with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
normals : normals, represented as 2D ndarrays (Nx3) (one per vertex)
"""
vtk_normals = numpy_support.numpy_to_vtk(normals, deep=True)
polydata.GetPointData().SetNormals(vtk_normals)
return polydata
def set_polydata_colors(polydata, colors):
""" set polydata colors with a numpy array (ndarrays Nx3 int)
Parameters
----------
polydata : vtkPolyData
colors : colors, represented as 2D ndarrays (Nx3)
colors are uint8 [0,255] RGB for each points
"""
vtk_colors = numpy_support.numpy_to_vtk(colors, deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
vtk_colors.SetNumberOfComponents(3)
vtk_colors.SetName("RGB")
polydata.GetPointData().SetScalars(vtk_colors)
return polydata
def update_polydata_normals(polydata):
""" generate and update polydata normals
Parameters
----------
polydata : vtkPolyData
"""
normals_gen = set_input(vtk.vtkPolyDataNormals(), polydata)
normals_gen.ComputePointNormalsOn()
normals_gen.ComputeCellNormalsOn()
normals_gen.SplittingOff()
# normals_gen.FlipNormalsOn()
# normals_gen.ConsistencyOn()
# normals_gen.AutoOrientNormalsOn()
normals_gen.Update()
vtk_normals = normals_gen.GetOutput().GetPointData().GetNormals()
polydata.GetPointData().SetNormals(vtk_normals)
def get_polymapper_from_polydata(polydata):
""" get vtkPolyDataMapper from a vtkPolyData
Parameters
----------
polydata : vtkPolyData
Returns
-------
poly_mapper : vtkPolyDataMapper
"""
poly_mapper = set_input(vtk.vtkPolyDataMapper(), polydata)
poly_mapper.ScalarVisibilityOn()
poly_mapper.InterpolateScalarsBeforeMappingOn()
poly_mapper.Update()
poly_mapper.StaticOn()
return poly_mapper
def get_actor_from_polymapper(poly_mapper):
""" get vtkActor from a vtkPolyDataMapper
Parameters
----------
poly_mapper : vtkPolyDataMapper
Returns
-------
actor : vtkActor
"""
actor = vtk.vtkActor()
actor.SetMapper(poly_mapper)
actor.GetProperty().BackfaceCullingOn()
actor.GetProperty().SetInterpolationToPhong()
return actor
def get_actor_from_polydata(polydata):
""" get vtkActor from a vtkPolyData
Parameters
----------
polydata : vtkPolyData
Returns
-------
actor : vtkActor
"""
poly_mapper = get_polymapper_from_polydata(polydata)
return get_actor_from_polymapper(poly_mapper)
def apply_affine(aff, pts):
"""Apply affine matrix `aff` to points `pts`.
Returns result of application of `aff` to the *right* of `pts`. The
coordinate dimension of `pts` should be the last.
For the 3D case, `aff` will be shape (4,4) and `pts` will have final axis
length 3 - maybe it will just be N by 3. The return value is the
transformed points, in this case::
res = np.dot(aff[:3,:3], pts.T) + aff[:3,3:4]
transformed_pts = res.T
This routine is more general than 3D, in that `aff` can have any shape
(N,N), and `pts` can have any shape, as long as the last dimension is for
the coordinates, and is therefore length N-1.
Parameters
----------
aff : (N, N) array-like
Homogenous affine, for 3D points, will be 4 by 4. Contrary to first
appearance, the affine will be applied on the left of `pts`.
pts : (..., N-1) array-like
Points, where the last dimension contains the coordinates of each
point. For 3D, the last dimension will be length 3.
Returns
-------
transformed_pts : (..., N-1) array
transformed points
Notes
-----
Copied from nibabel to remove dependency.
Examples
--------
>>> aff = np.array([[0,2,0,10],[3,0,0,11],[0,0,4,12],[0,0,0,1]])
>>> pts = np.array([[1,2,3],[2,3,4],[4,5,6],[6,7,8]])
>>> apply_affine(aff, pts) #doctest: +ELLIPSIS
array([[14, 14, 24],
[16, 17, 28],
[20, 23, 36],
[24, 29, 44]]...)
Just to show that in the simple 3D case, it is equivalent to:
>>> (np.dot(aff[:3,:3], pts.T) + aff[:3,3:4]).T #doctest: +ELLIPSIS
array([[14, 14, 24],
[16, 17, 28],
[20, 23, 36],
[24, 29, 44]]...)
But `pts` can be a more complicated shape:
>>> pts = pts.reshape((2,2,3))
>>> apply_affine(aff, pts) #doctest: +ELLIPSIS
array([[[14, 14, 24],
[16, 17, 28]],
<BLANKLINE>
[[20, 23, 36],
[24, 29, 44]]]...)
"""
aff = np.asarray(aff)
pts = np.asarray(pts)
shape = pts.shape
pts = pts.reshape((-1, shape[-1]))
# rzs == rotations, zooms, shears
rzs = aff[:-1, :-1]
trans = aff[:-1, -1]
res = np.dot(pts, rzs.T) + trans[None, :]
return res.reshape(shape)
def asbytes(s):
if sys.version_info[0] >= 3:
if isinstance(s, bytes):
return s
return s.encode('latin1')
else:
return str(s)
|
from collections import deque # doubly ended queue
"""Breadth-First Search."""
"""Can be used for Dijikstra's algorithm, Edmonds-Karp algorithm,
Cheyen's algorithm or for AI surroundings exploration."""
class Node:
def __init__(self, data, *neighbors: []):
self.data = data
self.adjacency_list = neighbors
self.visited = False
def breath_first_search(start_node):
queue = deque([start_node])
while queue:
# Remove first item of queue
actual_node = queue.popleft()
actual_node.visited = True
print(actual_node.data)
for node in actual_node.adjacency_list:
if not node.visited:
queue.append(node)
if __name__ == "__main__":
node5 = Node("E")
node3 = Node("C")
node4 = Node("D", node5)
node2 = Node("B", node4)
node1 = Node("A", node2, node3)
breath_first_search(node1)
|
import torch
from torch import nn
class NPairsLoss(nn.Module):
def __init__(self, name):
super(NPairsLoss, self).__init__()
self.name = name
def forward(self, r1, r2):
"""
Computes the N-Pairs Loss between the r1 and r2 representations.
:param r1: Tensor of shape (batch_size, representation_size)
:param r2: Tensor of shape (batch_size, representation_size)
:return: he scalar loss
"""
scores = torch.matmul(r1, r2.t())
diagonal_mean = torch.mean(torch.diag(scores))
mean_log_row_sum_exp = torch.mean(torch.logsumexp(scores, dim=1))
return -diagonal_mean + mean_log_row_sum_exp
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
# Recursive solution is trivial. Want another solution.
if root == None:
return []
else:
return self.inorderTraversal(root.left) + [root.val] + self.inorderTraversal(root.right)
"""
# Any recursive code can be written into iterative solutions
if root == None:
return []
st = [root]
ans = []
while len(st) > 0:
curr = st.pop()
if curr.left != None:
ncurr = TreeNode(curr.val)
ncurr.right = curr.right
st.append(ncurr)
st.append(curr.left)
continue
else:
ans.append(curr.val)
if curr.right != None:
st.append(curr.right)
return ans
|
#Print 8 multiplied by 9
print (8*9) |
from torch import nn as nn
import torch
from models.bert_modules.embedding import BERTEmbedding
from models.bert_modules.transformer import TransformerBlock
from utils import fix_random_seed_as
import pickle
class BERT(nn.Module):
def __init__(self, args):
super().__init__()
#fix_random_seed_as(args.model_init_seed)
# self.init_weights()
max_len = args.bert_max_len
num_items = args.num_items
n_layers = args.bert_num_blocks
heads = args.bert_num_heads
vocab_size = num_items + 2
hidden = args.bert_hidden_units
self.hidden = hidden
dropout = args.bert_dropout
self.meta = pickle.load(open(args.meta, 'rb'))
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=self.hidden, max_len=max_len, dropout=dropout)
self.ue = BERTEmbedding(vocab_size=6040, embed_size=int(self.hidden/4), max_len=max_len, dropout=dropout)
if args.kg:
self.dir = BERTEmbedding(vocab_size=args.dire_size+1, embed_size=int(self.hidden/8), max_len=max_len, dropout=dropout)
self.act = BERTEmbedding(vocab_size=args.acto_size+1, embed_size=int(self.hidden/8), max_len=max_len, dropout=dropout)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(int(hidden*7/4), heads, hidden * 4, dropout) for _ in range(n_layers)])
else:
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(int(hidden*5/4), heads, hidden * 4, dropout) for _ in range(n_layers)])
def forward(self, x, user, dire, ac1, ac2, ac3, ac4):
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x)
user = user.unsqueeze(1)
user = user.expand(user.shape[0], x.shape[1])
u = self.ue(user)
if dire is None:
f_in = torch.cat((x, u), axis=2)
else:
dire = self.dir(dire)
ac1 = self.act(ac1)
ac2 = self.act(ac2)
ac3 = self.act(ac3)
f_in = torch.cat((x,dire), axis=2)
f_in = torch.cat((f_in,ac1), axis=2)
f_in = torch.cat((f_in,ac2), axis=2)
f_in = torch.cat((f_in,ac3), axis=2)
f_in = torch.cat((f_in,u), axis=2)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
f_in = transformer.forward(f_in, mask)
return f_in
def init_weights(self):
pass
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumSwaps function below.
def minimumSwaps(arr):
i = 0
l = len(arr)
swaps = 0
while i < l-1:
while arr[i]-1 != i:
t = arr[arr[i]-1]
arr[arr[i]-1] = arr[i]
arr[i] = t
#print(arr[i], arr[arr[i]-1])
#arr[i], arr[arr[i]-1] = arr[arr[i]-1],arr[i]
swaps += 1
i += 1
return swaps
if __name__ == '__main__':
fptr = open("out.txt", 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = minimumSwaps(arr)
fptr.write(str(res) + '\n')
fptr.close()
|
# Generated by Django 2.1.5 on 2019-03-19 22:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flowchart', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='flowchartquestion',
name='solution',
),
migrations.AddField(
model_name='flowchartquestion',
name='image',
field=models.ImageField(blank=True, upload_to='flowchart/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='flowchartquestion',
name='answer',
field=models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4')], default='1', max_length=6),
),
migrations.AlterField(
model_name='flowchartquestion',
name='difficulty',
field=models.CharField(choices=[('easy', 'Easy'), ('medium', 'Medium'), ('hard', 'Hard')], default='medium', max_length=6),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.