blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11ffda3f83c07b96012a29d6f0df3a67e7760664 | c15d1e6e8396278aaf495a8f6949514791b6b2cb | /clonality.py | 5dea01b243347bfdddede61957f680797167d117 | [] | no_license | KnightsDiagnosticsLab/PeakFinder | c27615ac1179010e273511d4d4663f1966a06745 | cc9ea1e0e6b0d7c1eacde482fa72c965049f2e09 | refs/heads/master | 2020-09-04T17:12:55.374919 | 2020-01-29T01:07:24 | 2020-01-29T01:07:24 | 219,824,905 | 1 | 0 | null | 2020-01-28T19:22:52 | 2019-11-05T18:36:57 | Python | UTF-8 | Python | false | false | 27,235 | py | #!/usr/bin/env python3
# Importing Packages
import os
import sys
import re
import pandas as pd
import numpy as np
from scipy.signal import find_peaks, peak_prominences, peak_widths
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
from itertools import combinations
from outliers import smirnov_grubbs as grubbs
from bokeh.io import output_file, show, save
from bokeh.layouts import column
from bokeh.plotting import figure
from bokeh.models import BoxAnnotation, Label, Range1d, WheelZoomTool, ResetTool, PanTool, LegendItem, Legend
from bokeh.core.validation.warnings import FIXED_SIZING_MODE
from bokeh.core.validation import silence
import easygui
from convert_fsa_to_csv import convert_folder
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 50)
TOOLTIPS = [("(x,y)", "($x{1.1}, $y{int})")]
silence(FIXED_SIZING_MODE, True)
channels_of_interest = {
'IGH-A_channel_1': 'blue',
'IGH-B_channel_1': 'blue',
'IGH-C_channel_2': 'green',
'IGK-A_channel_1': 'blue',
'IGK-B_channel_1': 'blue',
'TCRB-A_channel_1': 'blue',
'TCRB-A_channel_2': 'green',
'TCRB-B_channel_1': 'blue',
'TCRB-C_channel_1': 'blue',
'TCRB-C_channel_2': 'green',
'TCRB-C_channel_3': 'orange',
'TCRG-A_channel_1': 'blue',
'TCRG-A_channel_2': 'green',
'TCRG-B_channel_1': 'blue',
'TCRG-B_channel_2': 'green',
'SCL_channel_1': 'black',
'IGH-A_channel_1_repeat': 'blue',
'IGH-B_channel_1_repeat': 'blue',
'IGH-C_channel_2_repeat': 'green',
'IGK-A_channel_1_repeat': 'blue',
'IGK-B_channel_1_repeat': 'blue',
'TCRB-A_channel_1_repeat': 'blue',
'TCRB-A_channel_2_repeat': 'green',
'TCRB-B_channel_1_repeat': 'blue',
'TCRB-C_channel_1_repeat': 'blue',
'TCRB-C_channel_2_repeat': 'green',
'TCRG-A_channel_1_repeat': 'blue',
'TCRG-A_channel_2_repeat': 'green',
'TCRG-B_channel_1_repeat': 'blue',
'TCRG-B_channel_2_repeat': 'green',
'SCL_channel_1_repeat': 'black'
}
roi_clonality = {
'IGH-A_channel_1': [(310, 360, 'FR1-JH', 'blue')],
'IGH-B_channel_1': [(250, 295, 'FR2-JH', 'blue')],
'IGH-C_channel_2': [(100, 170, 'FR3-JH', 'blue')],
'IGK-A_channel_1': [(120, 160, 'Vκ-Jκ-1', 'blue'), (190, 210, 'Vκ-Jκ-2', 'green'), (260, 300, 'Vκ-Jκ-3', 'red')],
'IGK-B_channel_1': [(210, 250, 'Vκ-Kde-1', 'blue'), (270, 300, 'Vκ-Kde-2', 'green'), (350, 390, 'Vκ-Kde-3', 'red')],
'TCRB-A_channel_1': [(240, 285, 'Vβ_Jβ_Jβ2.X', 'blue')],
'TCRB-A_channel_2': [(240, 285, 'Vβ_Jβ_Jβ1.X', 'blue')],
'TCRB-B_channel_1': [(240, 285, 'Vβ_Jβ2', 'blue')],
'TCRB-C_channel_1': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRB-C_channel_2': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRG-A_channel_1': [(175, 195, 'Vγ10_Jγ1.1_2.1', 'blue'), (230, 255, 'Vγ1-8_Jγ1.1_2.1', 'green')],
'TCRG-A_channel_2': [(145, 175, 'Vγ10_Jγ1.3_2.3', 'blue'), (195, 230, 'Vγ1-8_Jγ1.3_2.3', 'green')],
'TCRG-B_channel_1': [(110, 140, 'Vγ11_Jγ1.1_2.1', 'blue'), (195, 220, 'Vγ9_Jγ1.1_2.1', 'green')],
'TCRG-B_channel_2': [(80, 110, 'Vγ11_Jγ2.1_2.3', 'blue'), (160, 195, 'Vγ9_Jγ1.3_2.3', 'green')],
'IGH-A_channel_1_repeat': [(310, 360, 'FR1-JH', 'blue')],
'IGH-B_channel_1_repeat': [(250, 295, 'FR2-JH', 'blue')],
'IGH-C_channel_2_repeat': [(100, 170, 'FR3-JH', 'blue')],
'IGK-A_channel_1_repeat': [(120, 160, 'Vκ-Jκ-1', 'blue'), (190, 210, 'Vκ-Jκ-2', 'green'), (260, 300, 'Vκ-Jκ-3', 'red')],
'IGK-B_channel_1_repeat': [(210, 250, 'Vκ-Kde-1', 'blue'), (270, 300, 'Vκ-Kde-2', 'green'), (350, 390, 'Vκ-Kde-3', 'red')],
'TCRB-A_channel_1_repeat': [(240, 285, 'Vβ_Jβ_Jβ2.X', 'blue')],
'TCRB-A_channel_2_repeat': [(240, 285, 'Vβ_Jβ_Jβ1.X', 'blue')],
'TCRB-B_channel_1_repeat': [(240, 285, 'Vβ_Jβ2', 'blue')],
'TCRB-C_channel_1_repeat': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRB-C_channel_2_repeat': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRG-A_channel_1_repeat': [(175, 195, 'Vγ10_Jγ1.1_2.1', 'blue'), (230, 255, 'Vγ1-8_Jγ1.1_2.1', 'green')],
'TCRG-A_channel_2_repeat': [(145, 175, 'Vγ10_Jγ1.3_2.3', 'blue'), (195, 230, 'Vγ1-8_Jγ1.3_2.3', 'green')],
'TCRG-B_channel_1_repeat': [(110, 140, 'Vγ11_Jγ1.1_2.1', 'blue'), (195, 220, 'Vγ9_Jγ1.1_2.1', 'green')],
'TCRG-B_channel_2_repeat': [(80, 110, 'Vγ11_Jγ2.1_2.3', 'blue'), (160, 195, 'Vγ9_Jγ1.3_2.3', 'green')],
}
channel_colors = {
'channel_1': 'blue',
'channel_2': 'green',
'channel_3': 'purple',
'channel_4': 'red',
'channel_5': 'darkgoldenrod',
'SCL': 'black'
}
def pretty_name(c, t):
if 'channel' in c:
channel = re.findall(r'channel_\d', c)[0]
if 'repeat' in c:
pc = '_'.join([t, channel, 'repeat'])
else:
pc = '_'.join([t, channel])
else:
pc = c
return pc
def organize_clonality_files(path):
tests = [
'IGH-A', 'IGH-B', 'IGH-C', 'IGK-A', 'IGK-B',
'TCRB-A', 'TCRB-B', 'TCRB-C', 'TCRG-A', 'TCRG-B',
'SCL'
]
# construct case list
csv_list = [f for f in os.listdir(path) if f.endswith('.csv')]
# case_names_as_llt = [re.findall(r'(\d\dKD-\d\d\dM\d\d\d\d)(-R)*', x) for
# x in csv_list] # 'llt' is 'list of lists of tuple'
case_names_as_llt = [
re.findall(
r'(\d+KD-\d+M\d+)(-R)*',
x) for x in csv_list] # 'llt' is 'list of lists of tuple'
case_names_as_ll = [list(lt[0]) for lt in case_names_as_llt if len(
lt) > 0] # ll is 'list of lists'
# finally we have a set of unique strings
case_names = {''.join(x) for x in case_names_as_ll}
# make a dictionary of case names to case files
cd = {case_name: {t: [f for f in csv_list if case_name in f and t in f]
for t in tests} for case_name in case_names}
cases = {case_name: Case() for case_name in case_names}
for case_name, c in cases.items():
c.name = case_name
c.files = cd[case_name]
# c.ladder = {}
# c.rox500 = []
# c.index_of_peaks_to_annotate = {}
# c.index_of_artifactual_peaks = {}
# c.index_of_replicate_peaks = {}
# c.allelic_ladder = None
# c.plot_labels = {}
return cases
class Case(object):
""" I'm sure there's a better way than making a dummy class like this.
"""
def __init__(self):
self.name = None
self.files = {}
self.ladder = {}
self.rox500 = []
self.index_of_peaks_to_annotate = {}
self.index_of_artifactual_peaks = {}
self.index_of_replicate_peaks = {}
self.allelic_ladder = None
self.plot_labels = {}
self.widths = {}
self.abberant_peaks = {}
self.some_peaks = {}
self.some_upside_down_peaks = {}
pass
def gather_case_data(case, case_name, path):
df = pd.DataFrame()
for t, files in case.files.items():
for f in files:
df_t = pd.read_csv(os.path.join(path, f))
df_t.columns = [pretty_name(c, t) for c in df_t.columns]
columns_to_drop = [c for c in df_t.columns if not (
c.startswith('TCR') or c.startswith('IG') or c.startswith('SCL'))]
df_t = df_t.drop(columns_to_drop, axis=1)
df = pd.concat([df, df_t], axis=1, sort=False)
df.name = case_name
case.df = df
return case
def local_southern(case, order=2):
for ch_ss, ladder in case.ladder.items():
x_fitted = np.array([])
for i in range(2, len(ladder) - 1):
x1 = ladder[i - 2:i + 1]
y1 = case.rox500[i - 2:i + 1]
polyx1 = np.poly1d(np.polyfit(x1, y1, deg=order))
x2 = ladder[i - 1:i + 2]
y2 = case.rox500[i - 1:i + 2]
polyx2 = np.poly1d(np.polyfit(x2, y2, deg=order))
if i == 2:
x = range(case.df.index.tolist()[0], ladder[i])
elif i == len(ladder) - 2:
x = range(ladder[i - 1], case.df.index.tolist()[-1] + 1)
# print('x[0] = {}, x[-1] = {}'.format(x[0], x[-1]))
else:
x = range(ladder[i - 1], ladder[i])
y = np.average(np.array([polyx1(x), polyx2(x)]), axis=0)
x_fitted = np.concatenate((x_fitted, y), axis=0)
x_df = pd.DataFrame(x_fitted)
# print('len(x_fitted) = {}'.format(len(x_fitted)))
col_name = '_'.join(['x_fitted', ch_ss])
x_df.columns = [col_name]
case.df = pd.concat([case.df, x_df], axis=1, sort=False)
return case
def pick_peak_one(case):
case.ladder_success = False
scldf = case.df['SCL_channel_1']
# Goal is to return the farther (on x axis) of the two tallest peaks
# this range was determined by looking at 250+ cases
mask = scldf.index.isin(range(1500, 2300))
min_dist = 20
if mask.size == scldf.size:
peaks_x, _ = find_peaks(scldf.where(mask, 0), distance=min_dist)
peaks_2tallest = sorted(
[(x, scldf[x]) for x in peaks_x], key=lambda coor: coor[1], reverse=True)[:2]
peak_farther_of_2tallest = sorted(
peaks_2tallest, key=lambda coor: coor[0], reverse=True)[0]
case.peak_one = peak_farther_of_2tallest
mask = scldf.index.isin(range(case.peak_one[0], scldf.size))
peaks_x, _ = find_peaks(scldf.where(mask, 0), distance=min_dist)
case.peaks = [(x, scldf[x]) for x in sorted(peaks_x, reverse=False)]
else:
print(
'\tSkipping {} due to size mismatch, likely due to multiple files being added to the same column in the case DataFrame column'.format(
case.name))
for f in case.files['SCL']:
print('\t\t{}'.format(f))
return case
def make_decay_curve(case):
a = case.peak_one[1]
b = 0.5
x_decay = np.array(range(case.peak_one[0], len(case.df.index.tolist())))
i = 0
while i < 20:
i += 0.1
y_decay = a * b**(i * (x_decay - case.peak_one[0]) / case.peak_one[0])
decay = pd.Series(data=y_decay, index=x_decay)
decay.name = 'decay'
if decay.name not in case.df.columns:
case.df = pd.concat([case.df, decay], axis=1, sort=False)
else:
case.df[decay.name] = decay
case = evaluate_SCL(case, decay)
if case.residual <= 10:
case.ladder_success = True
break
case.decay_value = i
return case
def evaluate_SCL(case, decay):
qualifying_peaks = [(x, y) for x, y in case.peaks if y > decay[x]]
combos = [list(c) for c in combinations(qualifying_peaks, 3)]
combos.sort(key=lambda coor: coor[0])
case.ladder_SCL = [400, 100, 300, 200] # just some made up ladder
case.residual = 1000000
for combo in combos:
ladder_SCL = [case.peak_one[0]] + [x for x, y in combo]
poly_current, res_current, rank, singular_values, rcond = np.polyfit(
ladder_SCL, [100, 200, 300, 400], 1, full=True)
res_current = res_current[0]
if res_current < case.residual:
case.residual = res_current
case.ladder_SCL = ladder_SCL
return case
def build_ladder(df, size_standard, label_name):
choices, std = reduce_choices(df, label_name)
ss = np.array(size_standard)
if len(choices) < len(size_standard):
print(
'\tWARNING: len(choices) = {}, k = {}'.format(
len(choices),
len(size_standard)))
X = np.array([sorted(list(c))
for c in combinations(choices, len(size_standard))])
# print('\t{} choose {} -> {:,} combos'.format(len(choices), len(size_standard), len(X)))
pfit_zx = np.polyfit(ss, X.T, deg=1, full=True)
residuals_zx = pfit_zx[1]
X_mean = np.expand_dims(np.mean(X, axis=1), axis=1)
R_sq_zx = 1.0 - (np.square(residuals_zx) / np.sum(np.square(X - X_mean)))
# i = np.argmax(R_sq_zx)
ranked_R_sq, indices = np.unique(R_sq_zx, return_index=True)
indices = indices.tolist()
indices.reverse()
for i in indices:
ladder = X[i]
Y = df[ladder]
# print('len(ladder) = {}'.format(len(ladder)))
Ygrubb = grubbs.test(Y.tolist(), alpha=0.05)
if len(Y) == len(Ygrubb):
break
return ladder
def reduce_choices(ds, label_name):
t = 2.0
# print('label_name = {}'.format(label_name))
# print(ds)
try:
peaks_x_restricted, _ = find_peaks(
ds, height=[20, 1000], distance=30, width=2)
except:
p = figure(tools='pan,wheel_zoom,reset', tooltips=TOOLTIPS, title=label_name)
p.line(ds.index.to_list(), ds, line_width=0.5, color='blue')
show(p)
peaks_x, _ = find_peaks(ds)
coor = [(x, ds[x]) for x in peaks_x]
# print('label_name = {}'.format(label_name))
# print('coor = {}'.format(coor))
tallest = sorted(coor, key=lambda x: x[1])[-1]
choices_x = [x for x in peaks_x_restricted if x > tallest[0]]
choices_y = [ds[x] for x in choices_x]
# choices_y_grubbs = grubbs.test(choices_y, alpha=0.05)
# choices_x_reduced = [x for x in choices_x if ds[x] in choices_y_grubbs]
polyxy = np.poly1d(np.polyfit(choices_x, choices_y, deg=1))
# polybaseline = np.poly1d(np.polyfit(ds.index.tolist()[choices_x[0]:], ds[choices_x[0]:],deg=1))
std = np.std(choices_y)
std2_below = polyxy(ds.index.to_list()) - t * std
std2_above = polyxy(ds.index.to_list()) + t * std
# std2 = [(x1,x2) for x1, x2 in zip(std2_below, std2_above)]
peaks_x, _ = find_peaks(
ds, height=[
std2_below, std2_above], prominence=20, width=2)
choices_x = [x for x in peaks_x if x > tallest[0]]
return choices_x, std
def size_standard(case, ch_ss_num=4):
rox500_16 = [
35,
50,
75,
100,
139,
150,
160,
200,
250,
300,
340,
350,
400,
450,
490,
500]
rox500_14 = [
35,
50,
75,
100,
139,
150,
160,
200,
250,
300,
340,
350,
400,
450]
rox500_13 = [50, 75, 100, 139, 150, 160, 200, 250, 300, 340, 350, 400, 450]
rox500_75_400 = [75, 100, 139, 150, 160, 200, 250, 300, 340, 350, 400]
rox500_75_450 = [75, 100, 139, 150, 160, 200, 250, 300, 340, 350, 400, 450]
rox500 = rox500_75_400
case.rox500 = rox500[:]
ch_ss = 'channel_' + str(ch_ss_num)
ladder_channels = [
ch for ch in case.df.columns if ch_ss in ch and 'x_fitted' not in ch]
# print('ladder_channels = {}'.format(ladder_channels))
for ch in ladder_channels:
label_name = '_'.join([case.name, ch])
case.ladder[ch] = build_ladder(case.df[ch], rox500, label_name)
return case
def baseline_correction_simple(case, ch_list=None, ch_ss_num=4):
if ch_list is None:
ch_list = case.df.columns.to_list()
else:
ch_list = list(set(case.df.columns.to_list()) & set(ch_list))
ch_ss = 'channel_' + str(ch_ss_num)
ch_list = [ch for ch in ch_list if ch_ss not in ch]
for ch in ch_list:
peaks_i, props = find_peaks(case.df[ch], prominence=50)
I = case.df.index.to_list()
I_1k = I[1000:]
# right_bases = props['right_bases']
# left_bases = props['left_bases']
# I_exclude = set()
# for l,r in zip(left_bases, right_bases):
# I_exclude.update(set(range(l,r)))
# I = [i for i in I if i not in I_exclude]
x_baseline = case.df[ch][I_1k].to_list()
# x_avg = mean(x_baseline)
polyxy = np.poly1d(np.polyfit(I_1k, x_baseline, deg=1))
case.df[ch] = case.df[ch] - polyxy(case.df.index.to_list())
# case.df = case.df.where(case.df > 0, 0)
return case
def baseline_correction_upside_down(
case,
ch_list=None,
ch_ss_num=4,
iterations=3,
prominence=1,
distance=20):
if ch_list is None:
ch_list = case.df.columns.to_list()
else:
ch_list = list(set(case.df.columns.to_list()) & set(ch_list))
ch_ss = 'channel_' + str(ch_ss_num)
ch_list = [ch for ch in ch_list if ch_ss not in ch]
for ch in ch_list:
peaks_start, _ = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
df = case.df[ch] * -1
peaks_start, _ = find_peaks(
df, prominence=prominence, distance=distance)
all_your_base = set()
for i in range(0, iterations):
bases, props = find_peaks(
df, prominence=prominence, distance=distance)
spl = InterpolatedUnivariateSpline(
bases, df[bases], bbox=[bases[0], bases[int(len(bases) / 2)]])
spl_df = pd.Series(spl(case.df.index.tolist()))
df = df - spl_df
case.df[ch] = df * -1
peaks_finish, _ = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
abberant_peaks = set(peaks_finish) - set(peaks_start)
case.abberant_peaks[ch] = abberant_peaks
return case
def baseline_correction_advanced(
case,
ch_list=None,
ch_ss_num=4,
iterations=3,
prominence=1,
distance=20):
if ch_list is None:
ch_list = case.df.columns.to_list()
else:
ch_list = list(set(case.df.columns.to_list()) & set(ch_list))
ch_ss = 'channel_' + str(ch_ss_num)
ch_list = [ch for ch in ch_list if ch_ss not in ch]
for ch in ch_list:
peaks_start, _ = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
all_your_base = set()
for i in range(0, iterations):
peaks_current, props = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
# abberant_peaks = set(peaks_current) - set(peaks_original)
bases = set(np.concatenate(
[props['left_bases'], props['right_bases']]))
all_your_base = all_your_base | bases
# bases = bases | abberant_peaks
bases = sorted(list(bases))
# bases = sorted(list(set(np.concatenate([props['left_bases'], props['right_bases']]))))
# bases = [b for b in bases if b >=0]
# spl = InterpolatedUnivariateSpline(bases, case.df[ch][bases])
# print('len(bases) = {}'.format(len(bases)))
spl = InterpolatedUnivariateSpline(
bases, case.df[ch][bases], ext=1)
# spl = interp1d(bases, case.df[ch][bases], fill_value='extrapolate')
spl_df = pd.Series(spl(case.df.index.tolist()))
case.df[ch] = case.df[ch] - spl_df
# peaks_finish, _ = find_peaks(case.df[ch], prominence=prominence, distance=distance)
# abberant_peaks = set(peaks_finish) - set(peaks_start)
# case.abberant_peaks[ch] = abberant_peaks
# case.abberant_peaks[ch] = all_your_base
return case
def index_of_peaks_to_annotate(case):
for ch in case.df.columns:
x_col_name = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
if ch in roi_clonality.keys():
peaks_x, _ = find_peaks(case.df[ch], prominence=100, height=300)
peaks_in_all_roi = []
for x_start, x_end, _, _ in roi_clonality[ch]:
peaks_in_current_roi = [
x for x in peaks_x if case.df[x_col_name][x] >= x_start and case.df[x_col_name][x] <= x_end]
peaks_y = case.df[ch][peaks_in_current_roi].to_list()
peaks_in_current_roi = [x for y, x in sorted(
zip(peaks_y, peaks_in_current_roi), reverse=True)]
if len(peaks_in_current_roi) > 5:
peaks_in_all_roi.extend(peaks_in_current_roi[0:5])
else:
peaks_in_all_roi.extend(peaks_in_current_roi)
case.index_of_peaks_to_annotate[ch] = peaks_in_all_roi[:]
return case
def find_artifactual_peaks(case):
for ch in case.df.columns:
if 'channel_3' in ch and 'SCL' not in ch:
ch_4 = re.sub(r'channel_\d', 'channel_4', ch)
label_name = case.name + '_' + ch
ladder = case.ladder[ch_4]
peaks_temp, _ = find_peaks(case.df[ch], height=500)
peaks_i = []
for i in peaks_temp:
if i >= ladder[0] and i <= ladder[-1]:
peaks_i.append(i)
case.index_of_artifactual_peaks[ch] = peaks_i[:]
return case
def plot_scl(case, ch, plot_dict, w, h):
if ch in channels_of_interest.keys() and 'SCL' in ch:
ch_num = re.findall(r'channel_\d', ch)[0]
label_name = case.name + '_' + ch
x_col_name = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
x = case.df[ch].index.to_list()
y = case.df[ch].to_list()
p = figure(
tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='fragment size',
y_axis_label='RFU',
width=w,
height=h,
x_range=(
1000,
max(x)),
tooltips=TOOLTIPS)
p.line(x, y, line_width=0.5, color=channel_colors.get(ch_num, 'blue'))
plot_dict[ch] = p
return plot_dict
def plot_channels_of_interest(case, ch, plot_dict, w, h, ch_ss_num=4):
if ch in channels_of_interest.keys() and 'SCL' not in ch:
ch_num = re.findall(r'channel_\d', ch)[0]
label_name = case.name + '_' + ch
x_col_name = 'x_fitted_' + \
re.sub(r'channel_\d', 'channel_' + str(ch_ss_num), ch)
p = figure(
tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='fragment size',
y_axis_label='RFU',
width=w,
height=h,
x_range=(
75,
400),
tooltips=TOOLTIPS)
x = case.df[x_col_name].to_list()
y = case.df[ch].to_list()
p.line(x, y, line_width=0.5, color=channel_colors.get(ch_num, 'blue'))
plot_dict[ch] = p
return plot_dict
def highlight_roi_clonality(case, ch, plot_dict, w, h):
if ch in roi_clonality.keys():
p = plot_dict[ch]
legends = []
for x_left, x_right, roi_name, roi_color in roi_clonality[ch]:
dummy_dot = p.line([0, 0], [1, 1], line_width=20,
color=roi_color, alpha=0.10)
roi = BoxAnnotation(
left=x_left,
right=x_right,
fill_color=roi_color,
fill_alpha=0.05)
p.add_layout(roi)
legends.append(LegendItem(label=roi_name, renderers=[dummy_dot]))
p.add_layout(Legend(items=legends, location='top_right'))
# print(p.legend.items)
plot_dict[ch] = p
return plot_dict
def plot_peaks_of_interest(
case,
ch,
plot_dict,
w,
h,
replicate_only,
ch_ss_num=4):
if ch in roi_clonality.keys():
x_col_name = 'x_fitted_' + \
re.sub(r'channel_\d', 'channel_' + str(ch_ss_num), ch)
p = plot_dict[ch]
if replicate_only:
peaks_index = case.index_of_replicate_peaks[ch]
else:
peaks_index = case.index_of_peaks_to_annotate[ch]
x_peaks = case.df[x_col_name][peaks_index].to_list()
y_peaks = case.df[ch][peaks_index].to_list()
p.y_range.start = -100
if len(y_peaks) > 0:
p.y_range.end = 1.3 * max(y_peaks)
else:
p.y_range.end = 1000
for x, y in zip(x_peaks, y_peaks):
mytext = Label(
angle=1,
x=x,
y=int(y),
text='{:.1f}'.format(x),
x_offset=0,
y_offset=2,
text_font_size='8pt')
p.add_layout(mytext)
return plot_dict
def plot_size_standard(case, ch, plot_dict, w, h, ch_ss_num=4):
# if ch in channels_of_interest.keys() and 'SCL' not in ch:
ch_ss = re.sub(r'channel_\d', 'channel_' + str(ch_ss_num), ch)
ch_num = re.findall(r'channel_\d', ch)[0]
if ch_ss in case.ladder.keys():
label_name = case.name + '_' + ch_ss
# case.df[ch_ss].index.rename('x')
x = case.df[ch_ss].index.to_list()
y = case.df[ch_ss].to_list()
x_ladder = case.ladder[ch_ss]
y_ladder = case.df[ch_ss][x_ladder].to_list()
p = figure(tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='size standard',
y_axis_label='RFU',
width=w,
height=int(h / 2.0),
x_range=(0,
max(x)),
y_range=(-200,
max(y_ladder) + 200),
tooltips=TOOLTIPS)
p.line(x, y, line_width=0.5, color=channel_colors.get(ch_num, 'blue'))
p.ygrid.visible = False
p.x(x_ladder, y_ladder)
for x, y, label in zip(x_ladder, y_ladder, case.rox500):
mytext = Label(
angle=1,
x=x,
y=y,
text=str(label),
x_offset=0,
y_offset=2,
text_font_size='8pt')
p.add_layout(mytext)
plot_dict[ch_ss] = p
return plot_dict
def plot_empty_channel_3(case, ch, plot_dict, w, h):
if ch in channels_of_interest.keys() and 'SCL' not in ch:
ch_3 = re.sub(r'channel_\d', 'channel_3', ch)
label_name = case.name + '_' + ch_3
x = case.df[ch_3].index.to_list()
y = case.df[ch_3].to_list()
x_ladder = case.index_of_artifactual_peaks[ch_3]
y_ladder = case.df[ch_3][x_ladder].to_list()
if len(y_ladder) > 0:
p = figure(tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='channel of artifactual peaks',
y_axis_label='RFU',
width=w,
height=int(h / 2.0),
x_range=(0,
max(x)),
y_range=(-200,
1.5 * max(y_ladder)),
tooltips=TOOLTIPS)
else:
p = figure(
tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='channel of artifactual peaks',
y_axis_label='RFU',
width=w,
height=int(
h / 2.0),
x_range=(
0,
max(x)),
tooltips=TOOLTIPS)
p.line(
x,
y,
line_width=0.5,
color=channel_colors.get(
'channel_3',
'blue'))
p.ygrid.visible = False
# p.x(x_ladder, y_ladder)
x_col_name = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
x_fitted = case.df[x_col_name][x_ladder].to_list()
for x, y, label in zip(x_ladder, y_ladder, x_fitted):
mytext = Label(angle=1, x=x, y=y, text='{:.1f}'.format(
label), x_offset=0, y_offset=2, text_font_size='8pt')
p.add_layout(mytext)
plot_dict[ch_3] = p
return plot_dict
def sync_axes(plot_dict):
sorted_keys = sorted(plot_dict.keys())
p1 = plot_dict[sorted_keys[0]]
p1.toolbar.active_scroll = p1.select_one(WheelZoomTool)
for ch, p in plot_dict.items():
p.tools = p1.tools
p.toolbar.logo = None
ch_repeat = ch + '_repeat'
if ch_repeat in plot_dict.keys():
if p.y_range.end is not None and plot_dict[ch_repeat].y_range.end is not None:
if p.y_range.end >= plot_dict[ch_repeat].y_range.end:
plot_dict[ch_repeat].x_range = p.x_range
plot_dict[ch_repeat].y_range = p.y_range
else:
p.x_range = plot_dict[ch_repeat].x_range
p.y_range = plot_dict[ch_repeat].y_range
return plot_dict
def plot_clonality_case(case, replicate_only, w=1100, h=350):
silence(FIXED_SIZING_MODE, True)
plot_dict = {}
for ch in sorted(case.df.columns):
plot_dict = plot_scl(case, ch, plot_dict, w, h)
plot_dict = plot_channels_of_interest(case, ch, plot_dict, w, h)
plot_dict = highlight_roi_clonality(case, ch, plot_dict, w, h)
plot_dict = plot_empty_channel_3(case, ch, plot_dict, w, h)
plot_dict = plot_size_standard(case, ch, plot_dict, w, h)
plot_dict = plot_peaks_of_interest(
case, ch, plot_dict, w, h, replicate_only)
plot_dict = sync_axes(plot_dict)
# sort the plots. SCL first, channel + repeat after, followed by their
# size standards.
plot_keys = sorted([key for key in plot_dict.keys() if 'SCL' not in key])
scl_keys = sorted([key for key in plot_dict.keys() if 'SCL' in key])
plot_keys = [*scl_keys, *plot_keys]
plots = column([plot_dict[ch] for ch in plot_keys], sizing_mode='fixed')
case_html = case.name + '.html'
output_file(case_html)
show(plots)
save(plots)
print('Saved {}'.format(case_html))
debug = False
def replicate_peaks(case):
for ch in case.index_of_peaks_to_annotate.keys():
if ch not in case.index_of_replicate_peaks.keys():
case.index_of_replicate_peaks[ch] = []
if 'repeat' not in ch:
x_ch = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
ch_repeat = ch + '_repeat'
x_ch_repeat = 'x_fitted_' + \
re.sub(r'channel_\d', 'channel_4', ch_repeat)
p1 = case.index_of_peaks_to_annotate[ch]
p2 = case.index_of_peaks_to_annotate[ch_repeat]
peaks1 = set()
peaks2 = set()
for i in p1:
i_re = case.df[x_ch][i]
for j in p2:
j_re = case.df[x_ch_repeat][j]
if abs(i_re - j_re) < 1.0:
peaks1.add(i)
peaks2.add(j)
case.index_of_replicate_peaks[ch] = sorted(list(peaks1))
case.index_of_replicate_peaks[ch_repeat] = sorted(list(peaks2))
return case
def main():
owd = os.getcwd() # original working directory
# path = os.path.abspath(sys.argv[1])
path = easygui.diropenbox()
os.chdir(path)
convert_folder(path)
cases = organize_clonality_files(path)
# output_path = os.path.join(path, '/plots')
# if not os.path.exists(output_path): os.mkdir(output_path)
for case_name in sorted(cases.keys()):
case = cases[case_name]
print('Processing {}'.format(case_name))
case = gather_case_data(case, case_name, path)
case = size_standard(case, ch_ss_num=4)
case = find_artifactual_peaks(case)
# case = baseline_correction_simple(case)
case = baseline_correction_advanced(
case, ch_list=channels_of_interest.keys(), distance=10)
# case = pick_peak_one(case)
# case = make_decay_curve(case)
case = local_southern(case)
case = index_of_peaks_to_annotate(case)
case = replicate_peaks(case)
plot_clonality_case(case, replicate_only=False, w=1050, h=350)
# except:
# print('Failed on {}'.format(case))
if __name__ == '__main__':
main()
| [
"43114068+DocSupport@users.noreply.github.com"
] | 43114068+DocSupport@users.noreply.github.com |
ddf50e75e79b2fdf8f47933f714c83b2eaa89e66 | 09d3b183035824f990946cdd8faa11e8bd729e6f | /geo-data/osmgeojson.py | cc3bfcb2ff891030f189c4724e3ddec70e74dbe7 | [] | no_license | srravya/data-greed | 78d20066acef11c2a56f03fca18975227102832d | 566d2c5ad521fd9ffd01df4fd77476bd3cc18c79 | refs/heads/master | 2021-01-11T09:27:46.965503 | 2016-06-22T17:11:28 | 2016-06-22T17:11:28 | 57,985,117 | 0 | 0 | null | 2016-06-08T05:19:22 | 2016-05-03T16:44:09 | Python | UTF-8 | Python | false | false | 2,349 | py | from geojson import Point
from geojson import Feature, FeatureCollection
from geojson import dump, load
from osmapi import OsmApi
import os
def degree_decimal(dms_list):
return dms_list[0] + (dms_list[1] / 60.0) + (dms_list[2] / 3600.0)
DATAFILE='libraries_new.geojson'
TESTFILE='libraries_test.geojson'
# Change the value to switch between test data and actual data
GEODATAFILE=DATAFILE
# COORD_SYSTEM='degree'
COORD_SYSTEM='decimal'
if COORD_SYSTEM == 'decimal':
lat = input('lat: ')
lon = input('lon: ')
elif COORD_SYSTEM == 'degree':
lat_dms = raw_input('deg,min,sec: ')
lon_dms = raw_input('deg,min,sec: ')
lat = degree_decimal([float(x.strip()) for x in lat_dms.split(',')])
lon = degree_decimal([float(y.strip()) for y in lon_dms.split(',')])
def prompt():
print("Select Option")
print("0. Exit")
print("1. Add a node")
print("2. Get node(s)")
def add_to_osm():
connection = OsmApi(passwordfile=u'', api=OSM_EP)
# GeoJSON point is (Easting, Northing) / (Long, Lat) order!
my_point = Point((lon,lat))
''' Properties: {
Name: Name of the library
Operator: Directorate of Public Libraries
Opening Hours: Open hours in OSM format
Address: Door number if available and street
'''
name = raw_input('Name: ')
timings = raw_input('Time: ')
street = raw_input('Street: ')
housenumber = raw_input('Door: ')
postcode = raw_input('PINCODE: ')
my_feature = Feature(geometry=my_point, properties={
'amenity':'library',
'name':name,
'operator':'Directorate of Public Libraries',
'opening_hours':timings,
'addr:country':'IN',
'addr:city':'Chennai',
'addr:street':street,
'addr:housenumber':housenumber,
'address:postcode':postcode,
'marker-color': '#00ff00',
'marker-symbol': 'library'
} )
if os.stat(GEODATAFILE).st_size == 0:
FILE_EMPTY = True
else:
FILE_EMPTY = False
if not FILE_EMPTY:
with open(GEODATAFILE,'r') as data:
current = load(data)
featureSet = current['features']
featureSet.append(my_feature)
print("Total libraries: %d" % len(featureSet))
libraries = FeatureCollection(featureSet)
else:
libraries = FeatureCollection([my_feature])
# Write data to file
with open(GEODATAFILE,'w+') as data:
dump(libraries, data, indent=4, sort_keys=True)
| [
"eternaltyro@gmail.com"
] | eternaltyro@gmail.com |
28e33303b4a8e6d06e0a3ae120f751b62b91b62b | e6a3835a1d1f4d7f6318dfd7047c3b527e994537 | /src/utils/utils.py | b353b1889ce8b210b94356a55dc40562aad8e40d | [] | no_license | MMichels/DeepCars | 9f8faec7b547c585888469202859d317e5d28526 | 327a604faa80d476cafb438b82af6537443670e0 | refs/heads/master | 2023-04-13T03:58:01.503567 | 2019-12-17T20:50:44 | 2019-12-17T20:50:44 | 228,690,108 | 0 | 0 | null | 2023-03-25T00:21:00 | 2019-12-17T19:48:14 | Python | UTF-8 | Python | false | false | 471 | py | import os
from pygame import image, error
from pygame.locals import RLEACCEL
def load_image(path, colorkey=None):
try:
img = image.load(path)
except error as message:
print('Não foi possivel abrir a imagem: ', path)
raise SystemExit(message)
img = img.convert_alpha()
if colorkey:
if colorkey == -1:
colorkey = img.get_at((0, 0))
img.set_colorkey(colorkey, RLEACCEL)
return img, img.get_rect()
| [
"michels09@hotmail.com"
] | michels09@hotmail.com |
4748aa5750dba7b48af7c65f6b08a0be79ebbcb4 | 563c1d3093a047d7185c34557345eadf60d0dcd1 | /reservoir-id/classifier_apply.py | b886278192ca1926597017c7d814da1eb2ac04a2 | [
"GPL-3.0-only"
] | permissive | kysolvik/reservoir-id | ea930cbd93199bf6f3bcda58fd5971d3402eb8bc | f3a25d0750d96f369a699547584d7db97b2cb43d | refs/heads/master | 2021-01-19T03:30:41.006479 | 2018-01-17T14:38:42 | 2018-01-17T14:38:42 | 87,315,930 | 0 | 0 | MIT | 2018-01-07T16:53:26 | 2017-04-05T13:59:18 | Python | UTF-8 | Python | false | false | 3,065 | py | #!/usr/bin/env python
"""
Apply classifier exported by classifier_train.py
Inputs: Classifier pkl path, small area cutoff
Outputs: CSV with classified regions
Notes:
1. Make sure that all columns in the apply csv match the train_csv
2. exclude_att_patterns must match
@authors: Kylen Solvik
Date Create: 5/27/17
"""
# Load libraries
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.externals import joblib
import xgboost as xgb
import numpy as np
import sys
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Apply Random Forest classifier to prop_csv.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('prop_csv',
help='Path to attribute table (from build_att_table.py).',
type=str)
parser.add_argument('xgb_pkl',
help='Path to pkl with xgb model.',
type=str)
parser.add_argument('class_csv_out',
help='Path for output classified csv',
type=str)
parser.add_argument('--area_lowbound',
help='Lower area bound. Must match trained model. All regions <= in size will be ignored',
default=2,
type=int)
parser.add_argument('--path_prefix',
help='To be placed at beginnings of all other path args',
type=str,default='')
args = parser.parse_args()
def main():
# Set any attributes to exclude for this run
exclude_att_patterns = []
# Load dataset
dataset = pd.read_csv(args.path_prefix + args.prop_csv,header=0)
dataset_acut = dataset.loc[dataset['area'] > args.area_lowbound]
# Exclude attributes matching user input patterns, or if they are all nans
exclude_atts = []
for pattern in exclude_att_patterns:
col_list = [col for col in dataset_acut.columns if pattern in col]
exclude_atts.extend(col_list)
for att in dataset.columns[1:]:
if sum(np.isfinite(dataset[att])) == 0:
exclude_atts.append(att)
for att in list(set(exclude_atts)):
del dataset_acut[att]
(ds_y,ds_x) = dataset_acut.shape
print(ds_y,ds_x)
# Convert dataset to array
array = dataset_acut.values
X = array[:,2:ds_x].astype(float)
Y = array[:,1].astype(int)
# Set nans to 0
X = np.nan_to_num(X)
# Export classifier trained on full data set
clf = joblib.load(args.path_prefix + args.xgb_pkl)
clf_pred = clf.predict(X)
dataset_out = dataset_acut
dataset_out["clf_pred"] = clf_pred
print(str(sum(clf_pred == 1)) + " classified as positive")
print(str(sum(clf_pred == 0)) + " classified as negative")
dataset_out.to_csv(args.path_prefix + args.class_csv_out,index=False)
if __name__ == '__main__':
main()
| [
"kysolvik@gmail.com"
] | kysolvik@gmail.com |
bb35ccd3ccfc92a049807e3711182d740eb677b8 | eab2dc435028b2548554d97b24eb7b7e3576b953 | /iblrig/check_sync_pulses.py | b53097729443914a5879f7b454f1900b4316e049 | [
"MIT"
] | permissive | k1o0/iblrig | 35edd8570215ca591b1f1e26e47439e633aa587a | 9177b852b344a9bbc26e4a4aeb5f0182bd8a9b25 | refs/heads/master | 2021-05-24T12:58:47.552912 | 2020-02-25T20:19:59 | 2020-02-25T20:19:59 | 253,573,669 | 0 | 0 | MIT | 2020-04-06T17:48:28 | 2020-04-06T17:48:28 | null | UTF-8 | Python | false | false | 2,875 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Monday, February 25th 2019, 2:10:38 pm
import logging
import sys
from pathlib import Path
import ibllib.io.raw_data_loaders as raw
import matplotlib.pyplot as plt
import numpy as np
from iblrig.misc import get_port_events
log = logging.getLogger("iblrig")
def sync_check(tph):
events = tph.behavior_data["Events timestamps"]
ev_bnc1 = get_port_events(events, name="BNC1")
ev_bnc2 = get_port_events(events, name="BNC2")
ev_port1 = get_port_events(events, name="Port1")
NOT_FOUND = "COULD NOT FIND DATA ON {}"
bnc1_msg = NOT_FOUND.format("BNC1") if not ev_bnc1 else "OK"
bnc2_msg = NOT_FOUND.format("BNC2") if not ev_bnc2 else "OK"
port1_msg = NOT_FOUND.format("Port1") if not ev_port1 else "OK"
warn_msg = f"""
##########################################
NOT FOUND: SYNC PULSES
##########################################
VISUAL STIMULUS SYNC: {bnc1_msg}
SOUND SYNC: {bnc2_msg}
CAMERA SYNC: {port1_msg}
##########################################"""
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
log.warning(warn_msg)
if __name__ == "__main__":
if len(sys.argv) == 1:
print("I need a file name...")
session_data_file = Path(sys.argv[1])
if not session_data_file.exists():
raise FileNotFoundError(f"{session_data_file}")
if session_data_file.name.endswith(".jsonable"):
data = raw.load_data(session_data_file.parent.parent)
else:
try:
data = raw.load_data(session_data_file)
except Exception:
print("Not a file or a valid session folder")
unsynced_trial_count = 0
frame2ttl = []
sound = []
camera = []
trial_end = []
for trial_data in data:
tevents = trial_data["behavior_data"]["Events timestamps"]
ev_bnc1 = get_port_events(tevents, name="BNC1")
ev_bnc2 = get_port_events(tevents, name="BNC2")
ev_port1 = get_port_events(tevents, name="Port1")
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
unsynced_trial_count += 1
frame2ttl.extend(ev_bnc1)
sound.extend(ev_bnc2)
camera.extend(ev_port1)
trial_end.append(trial_data["behavior_data"]["Trial end timestamp"])
print(f"Found {unsynced_trial_count} trials with bad sync data")
f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.plot(camera, np.ones(len(camera)) * 1, "|")
ax.plot(sound, np.ones(len(sound)) * 2, "|")
ax.plot(frame2ttl, np.ones(len(frame2ttl)) * 3, "|")
[ax.axvline(t, alpha=0.5) for t in trial_end]
ax.set_ylim([0, 4])
ax.set_yticks(range(4))
ax.set_yticklabels(["", "camera", "sound", "frame2ttl"])
plt.show()
| [
"nbonacchi@gmail.com"
] | nbonacchi@gmail.com |
8b0d58ef495a25ef7a5bac1d8320f8430110b81a | 4bdb484b1aaf38f38e512042e249c26bb8cb181c | /v-server/shopspider/diy/configs.py | 3e57d1d8796addaa9191b063104920b91f3dcb92 | [] | no_license | fan1018wen/scrapy-spider | 593ec2b6e02724e185e135ecc107400eeb7aec37 | 97d7ea1ce63d6c84ef9e01fb55e9376dbd7b8e83 | refs/heads/master | 2021-01-15T22:14:57.787123 | 2013-09-27T03:59:55 | 2013-09-27T03:59:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | ##coding=utf-8
# Define some diy functions here
table_prefix = 'P1_WJ_TEST_LANG' #数据表前缀 #pipeline eg TEST --> TEST_SHOP TEST_PRODUCT TEST_PRODUCT_IMAGE
show_messages = True #是否打印相关调试信息 True / False
#-数据库配置---如需修改端口 请移步至 pipeline
db_type = 'oracle' #数据库类型 oracle / mysql #pipeline
db_host = '172.16.4.211' #数据库主机 #pipeline
db_user = 'spider' # 用户名
db_pass = 'spider' # 密码
db_name = 'spider' #mysql为数据库名
db_sid = 'xe' # oracle为服务名 jlproject_primary
handle_image = True #是否处理图片 True / False #pipeline 一般无需修改 处理图片源路径为 http 绝对路径
download_image = False #是否下载图片 True / False #pipeline 一般无需修改
image_dir = '/picdir/php' #图片存放根目录 linux | windows 'D:\\7788\\picdir\\php' 一般无需修改
global conf
conf = {
'table_prefix' : table_prefix,
'show_messages' : show_messages,
'db_type' : db_type,
'db_host' : db_host,
'db_user' : db_user,
'db_pass' : db_pass,
'db_name' : db_name,
'db_sid' : db_sid,
'handle_image' : handle_image,
'download_image' : download_image,
'image_dir' : image_dir
}
#if conf['show_messages'] : | [
"wj922@qq.com"
] | wj922@qq.com |
0abd56daa2dfc8f450f36161ccbb0d4530572899 | 13d384f7eb991b7fe901468f1967f7b2952499a6 | /day-23 turtle-crossing-start/car_manager.py | 1f23d9f92ae79bb4124d80476d54b7f7eac0db84 | [] | no_license | miloscomplex/100_Days_of_Python | f31638fc5a3913dc32850b61c51d2cecac7cdbdf | 6ac67472627867d8bf9cccb496e6395d979b8c89 | refs/heads/main | 2023-08-25T03:00:49.216040 | 2021-10-07T03:32:19 | 2021-10-07T03:32:19 | 395,512,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | import random
from turtle import Turtle
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
TOP_MAX = 250
BOTTOM_MAX = -250
LEFT_DISTANCE = -320
class CarManager(Turtle):
def __init__(self):
super().__init__()
self.all_cars = []
self.car_speed = STARTING_MOVE_DISTANCE
self.hideturtle()
def create_car(self):
new_car = Turtle("square")
new_car.color(random.choice(COLORS))
new_car.shapesize(stretch_wid=1, stretch_len=2)
new_car.penup()
random_y = random.randint(BOTTOM_MAX, TOP_MAX)
random_x = random.randint(300, 890)
new_car.goto(random_x, random_y)
self.all_cars.append(new_car)
def move_cars(self):
for car in self.all_cars:
car.backward(self.car_speed)
if car.xcor() < LEFT_DISTANCE:
random_y = random.randint(BOTTOM_MAX, TOP_MAX)
random_x = random.randint(300, 890)
car.goto(random_x, random_y)
def level_up(self):
self.car_speed += MOVE_INCREMENT
| [
"hicallmesutton@gmail.com"
] | hicallmesutton@gmail.com |
9b2f6cdd33b203db499cf006e77db48474b4b153 | 2b240306722b3fba53caf25fc62fd599bb70f082 | /lectures/cs532-s19/assignments/A6/toPush/Python/driver.py | d42194c508f4364eb0d9d53e7a3c25d83dddcea9 | [] | no_license | bayardd/anwala.github.io | cac62b5d13a3e57106aff60f846a2a322938ceaf | 3d3b23f78813aff39760232f68d0b2043722a342 | refs/heads/master | 2020-04-20T04:09:07.304978 | 2019-04-30T17:27:35 | 2019-04-30T17:27:35 | 168,619,026 | 0 | 0 | null | 2019-02-01T00:38:51 | 2019-02-01T00:38:51 | null | UTF-8 | Python | false | false | 5,014 | py | import recommendations
allSimilar = []
file = open("data.txt", 'a')
newline = '\n'
tab = '\t'
file.write(f'First User Chosen: {tab} 368{newline}')
file.write(f'Second User Chosen: {tab} 81 {newline}')
file.write(f'Third User Chosen: {tab} 135 {newline}{newline}')
pref = recommendations.loadMovieLens()
# Get sorted list of user ratings
userRatings1 = (sorted(pref['368'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings2 = (sorted(pref['81'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings3 = (sorted(pref['135'].items(), key =
lambda kv:(kv[1], kv[0])))
# Get top 5 for each user
userRatings1.reverse()
userRatings2.reverse()
userRatings3.reverse()
# Formatted File output
file.write(f'First User Rating: {newline}')
file.write(f'ID 368 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}ID 368 Bottom 3 Rated Movies: {newline}')
userRatings1.reverse()
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Second User Rating: {newline}')
file.write(f'ID 81 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings2.reverse()
file.write(f'{newline}ID 81 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Third User Rating: {newline}')
file.write(f'ID 135 Top 3 Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings3.reverse()
file.write(f'{newline}ID 135 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}{newline}Substitute User ID: 368 {newline}{newline}')
# Find most correlated users
closest_5 = recommendations.topMatches(pref, '368')
# Find least correlated users
furthest_5 = recommendations.worstMatches(pref, '368')
# Output for least and most correlated users
file.write(f'Five other users with highest correlation: {newline}{newline}')
for x in closest_5:
correlationValue = round(x[0])
tempId = x[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
file.write(f'{newline}Five other users with lowest correlation: {newline}')
for y in furthest_5:
correlationValue = round(y[0])
tempId = y[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
recommendedMovies = recommendations.getRecommendations(pref, '368')
file.write(f'{newline}Computed Top 5 Movies to be Watched: {newline}')
for x in range(0,5):
rating = recommendedMovies[x][0]
name = recommendedMovies[x][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}Computed Bottom 5 Movies to be Watched: {newline}')
recommendedMovies.reverse()
for y in range(0,5):
rating = recommendedMovies[y][0]
name = recommendedMovies[y][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}{newline}Favorite Movie: {tab} Jurassic Park (1993){newline}')
file.write(f'Least Favorite Movie: {tab} Children of the Corn: The Gathering (1996){newline}{newline}')
similarMovies = recommendations.calculateSimilarItems(pref)
notSimilarMovies = recommendations.calculateLeastSimilarItems(pref)
file.write(f'Top Recommended Movies to be Watched for Jurassic Park: {newline}')
# print(similarMovies['Jurassic Park (1993)'])
for x in similarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Jurassic Park{newline}')
for x in notSimilarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Top Recommended Movies to be Watched for Children of the Corn: {newline}')
for x in similarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Children of the Corn{newline}')
for x in notSimilarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}') | [
"dbaya001@odu.edu"
] | dbaya001@odu.edu |
fb7248f9ab1b81c3bee297715b6eed6deb7193f3 | b2f6b65cba891f3a86e507d4dd312936517ab139 | /utils/modelsize.py | 213406ce9a9a0c028c54e6939f32b41239f2d85d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | leeesangwon/CGNet | 2822d288355e8a535a780c4a6e850608467465dc | d07c0e84d252bed9cbc28e66da4b85bdcc4c6293 | refs/heads/master | 2020-04-14T04:48:48.532572 | 2019-05-09T13:08:26 | 2019-05-09T13:08:26 | 163,646,131 | 1 | 0 | MIT | 2019-05-09T13:08:28 | 2018-12-31T06:45:11 | Python | UTF-8 | Python | false | false | 2,602 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class SizeEstimator(object):
def __init__(self, model, input_size=(1,1,32,32), bits=32):
'''
Estimates the size of PyTorch models in memory
for a given input size
'''
self.model = model
self.input_size = input_size
self.bits = 32
return
def get_parameter_sizes(self):
'''Get sizes of all parameters in `model`'''
mods = list(self.model.modules())
sizes = []
for i in range(1,len(mods)):
m = mods[i]
p = list(m.parameters())
for j in range(len(p)):
sizes.append(np.array(p[j].size()))
self.param_sizes = sizes
return
def get_output_sizes(self):
'''Run sample input through each layer to get output sizes'''
input_ = Variable(torch.FloatTensor(*self.input_size), volatile=True)
mods = list(self.model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
self.out_sizes = out_sizes
return
def calc_param_bits(self):
'''Calculate total number of bits to store `model` parameters'''
total_bits = 0
for i in range(len(self.param_sizes)):
s = self.param_sizes[i]
bits = np.prod(np.array(s))*self.bits
total_bits += bits
self.param_bits = total_bits
return
def calc_forward_backward_bits(self):
'''Calculate bits to store forward and backward pass'''
total_bits = 0
for i in range(len(self.out_sizes)):
s = self.out_sizes[i]
bits = np.prod(np.array(s))*self.bits
total_bits += bits
# multiply by 2 for both forward AND backward
self.forward_backward_bits = (total_bits*2)
return
def calc_input_bits(self):
'''Calculate bits to store input'''
self.input_bits = np.prod(np.array(self.input_size))*self.bits
return
def estimate_size(self):
'''Estimate model size in memory in megabytes and bits'''
self.get_parameter_sizes()
self.get_output_sizes()
self.calc_param_bits()
self.calc_forward_backward_bits()
self.calc_input_bits()
total = self.param_bits + self.forward_backward_bits + self.input_bits
total_megabytes = (total/8)/(1024**2)
return total_megabytes, total
| [
"874314714@qq.com"
] | 874314714@qq.com |
c43dee062a7499d04b64507171d861b11b09912e | df3c8c521a51f2b412118bd9d0e477da06a3b7cc | /build/view_environments/post_create_/create_post/create_post.py | 2a6a13f8a1551a30e01dd4e643e8f14b345f9bfd | [] | no_license | bharatmudragada/fb_post | c30b900731db5844df6b438e5d38a0dfb607412a | c5e7bb185a561bdcfcd7b2e30264554b07106044 | refs/heads/master | 2020-06-21T04:05:22.296755 | 2019-07-17T07:48:22 | 2019-07-17T07:48:22 | 197,339,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | from django_swagger_utils.drf_server.decorators.request_response import request_response
from django_swagger_utils.drf_server.default.parser_mapping import PARSER_MAPPING
from django_swagger_utils.drf_server.default.renderer_mapping import RENDERER_MAPPING
from fb_post.build.serializers.definitions.PostContent.PostContentSerializer import PostContentSerializer
from fb_post.build.serializers.definitions.PostId.PostIdSerializer import PostIdSerializer
options = {
'METHOD': 'POST',
'REQUEST_WRAPPING_REQUIRED': True,
'REQUEST_ENCRYPTION_REQUIRED': False,
'REQUEST_IS_PARTIAL': False,
'PARSER_CLASSES': [
PARSER_MAPPING["application/json"]
],
'RENDERER_CLASSES': [
RENDERER_MAPPING["application/json"]
],
'REQUEST_QUERY_PARAMS_SERIALIZER': None,
'REQUEST_HEADERS_SERIALIZER': None,
'REQUEST_SERIALIZER': PostContentSerializer,
'REQUEST_SERIALIZER_MANY_ITEMS': False,
'RESPONSE': {
'201' : {
'RESPONSE_SERIALIZER': PostIdSerializer,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
,
'400' : {
'RESPONSE_SERIALIZER': None,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
},
"SECURITY":{
"oauth" : [
"write"
]
}
}
app_name = "fb_post"
operation_id = "create_post"
group_name = ""
@request_response(options=options, app_name=app_name, operation_id=operation_id, group_name=group_name)
def create_post(request, *args, **kwargs):
args = (request,) + args
from django_swagger_utils.drf_server.wrappers.view_env_wrapper import view_env_wrapper
return view_env_wrapper(app_name, "create_post", group_name, *args, **kwargs)
| [
"bharathmudragada123@gmail.com"
] | bharathmudragada123@gmail.com |
4f2cdd1eb56bda921db71669d39b4bbdaf4062e4 | 82dafd9b89abdf334420e50f9d7562984aed8a7d | /cifar10_models/senet.py | a6f47305812f4ead441c3208f43d2a499c2c5841 | [] | no_license | mostafaelhoushi/tensor-decompositions | 844aaed58abeb1e17923860a5e9aebed64465030 | 8c3186dfc4d5d2eb22b0a673e3eaf1bcaa872feb | refs/heads/master | 2020-07-09T03:51:30.214582 | 2020-05-02T12:46:00 | 2020-05-02T12:46:00 | 203,867,675 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,086 | py | '''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['senet18']
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def senet18():
return SENet18()
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| [
"m.elhoushi@ieee.org"
] | m.elhoushi@ieee.org |
bec7c5ea5c678a589efad67a06df92c0335711e2 | dc29b57b9a025287574117a4e7c7fc27663d6063 | /pydemo/src/wxdemo/gridbagdemo.py | 3dc34973c575305cf8cc3a71ddc85a57d34b5233 | [] | no_license | bspeng922/pyutils | e4d0e988d5c168a3a9e97da2d09c6b714faa2c9a | 4fa6c75a7159e03383c0f89d67d1ca37f3d0f0a5 | refs/heads/master | 2020-04-11T09:59:19.089455 | 2017-01-06T07:42:20 | 2017-01-06T07:42:20 | 7,434,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import wx
class Example(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, "", size=(320,130))
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
text = wx.StaticText(panel, label="Rename To")
tc = wx.TextCtrl(panel)
btnok = wx.Button(panel, label="OK", size=(90,28))
btnclose = wx.Button(panel, label="Close", size=(90,28))
sizer = wx.GridBagSizer(4,4)
sizer.Add(text, pos=(0,0), flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)
sizer.Add(tc, pos=(1,0), span=(1,5), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
sizer.Add(btnok, pos=(3,3))
sizer.Add(btnclose, pos=(3,4), flag=wx.RIGHT|wx.BOTTOM, border=5)
sizer.AddGrowableCol(1)
sizer.AddGrowableRow(2)
panel.SetSizer(sizer)
if __name__ == "__main__":
app = wx.App()
Example(None, -1).Show()
app.MainLoop()
| [
"bspeng922@gmail.com"
] | bspeng922@gmail.com |
a4eb444e3bee4d492386c1d33f6ce720fe415054 | c862c18ea1097ec54df04e09debae9e68d0c9897 | /edit_note_dialog.py | 38cc02deab7901e90daae048cc7d898d15833112 | [] | no_license | YoungTeurus/Organiser_Qt | 605e8428e15f155c77edeb036d23133e22104365 | 499fcb9259f496adbecfc21730bdc9de33dc04dd | refs/heads/master | 2021-02-05T16:30:57.451874 | 2020-03-01T17:43:14 | 2020-03-01T17:43:14 | 243,803,353 | 0 | 0 | null | 2020-03-01T17:43:16 | 2020-02-28T16:12:47 | Python | UTF-8 | Python | false | false | 2,775 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Work\Organiser_Qt\edit_note_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 278)
self.title_line = QtWidgets.QLineEdit(Dialog)
self.title_line.setGeometry(QtCore.QRect(120, 10, 261, 20))
self.title_line.setObjectName("title_line")
self.note_text = QtWidgets.QTextEdit(Dialog)
self.note_text.setGeometry(QtCore.QRect(10, 40, 371, 201))
self.note_text.setObjectName("note_text")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 10, 91, 16))
self.label.setObjectName("label")
self.horizontalLayoutWidget = QtWidgets.QWidget(Dialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 243, 371, 31))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.save_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.save_button.setEnabled(False)
self.save_button.setCheckable(False)
self.save_button.setAutoRepeatDelay(298)
self.save_button.setObjectName("save_button")
self.horizontalLayout.addWidget(self.save_button)
self.delete_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.delete_button.setObjectName("delete_button")
self.horizontalLayout.addWidget(self.delete_button)
self.note_text.raise_()
self.title_line.raise_()
self.label.raise_()
self.horizontalLayoutWidget.raise_()
self.retranslateUi(Dialog)
self.save_button.clicked.connect(Dialog.save)
self.delete_button.clicked.connect(Dialog.delete)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Название заметки"))
self.save_button.setText(_translate("Dialog", "Сохранить изменения"))
self.delete_button.setText(_translate("Dialog", "Удалить заметку"))
| [
"ilya.elfimow@yandex.ru"
] | ilya.elfimow@yandex.ru |
f312f96e09ae162f71d13541059405e61729ea52 | 34d99bff51f26c03fcf05141589f51abeae2ff98 | /HTJK/venv/Lib/site-packages/wqrfnium/wqrfnium.py | 11297b7b76430aef3371b426153664074192804d | [] | no_license | zmbhza/appui | d5b31c60122eabe4d8d484d0d15e333b46a9d46f | 7a5b1072245c53b5a227943b41ef0b54420c7107 | refs/heads/master | 2022-12-21T14:00:41.509390 | 2020-09-27T03:34:15 | 2020-09-27T03:34:15 | 297,602,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,096 | py | # -*- coding: utf-8 -*-
import os,sys
import re,time
import Levenshtein
import xlrd,xlwt
from xlutils.copy import copy
import os,platform
import configparser
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#----------------------------------
# diy your elements_xls_path
def create_xls(elements_xls_path):
if not os.path.exists(elements_xls_path):
book = xlwt.Workbook(encoding='utf-8',style_compression=0)
book.add_sheet('Sheet1',cell_overwrite_ok=True)
book.save(elements_xls_path)
def get_elements(icon):
try:
Data = xlrd.open_workbook(elements_xls_path)
except Exception:
print('Please put the element into the elements.xls first!')
print('First column:icon,Second column:tmp_find_method,Third column:tmp_find_value,Fourth column:index,Fifth column:html_element')
print('For example:seachinput,id,kw,0,<input type="text" class="s_ipt" name="wd" id="kw" maxlength="100" autocomplete="off">')
exit(0)
table = Data.sheet_by_name("Sheet1")
nrows = table.nrows
for i in range(nrows):
element_tmp = table.cell(i,0).value
if element_tmp == icon:
try:
html_element = table.cell(i,4).value
except:
html_element = ''
return [table.cell(i,1).value,table.cell(i,2).value,int(table.cell(i,3).value),html_element,i]
print('not fonund the element: [ %s ],please fixed it by yourself...'%icon)
exit(0)
def update_elements(id,html,tmp,tmp_value,index):
Data = xlrd.open_workbook(elements_xls_path)
ww = copy(Data)
ww.get_sheet(0).write(id, 1,tmp)
ww.get_sheet(0).write(id, 2,tmp_value)
ww.get_sheet(0).write(id, 3,index)
ww.get_sheet(0).write(id, 4,html)
os.remove(elements_xls_path)
ww.save(elements_xls_path)
def input_html_element(id,html):
Data = xlrd.open_workbook(elements_xls_path)
ww = copy(Data)
ww.get_sheet(0).write(id, 4, html)
os.remove(elements_xls_path)
ww.save(elements_xls_path)
def likescore(oldstr,newstr):
score = Levenshtein.ratio(str(oldstr), str(newstr))
return score
def search_new(driver,old_html):
try:old_id = re.findall(r'id="(.*?)"',old_html)[0]
except:old_id = None
try:old_name = re.findall(r'name="(.*?)"',old_html)[0]
except:old_name=None
try:old_class = re.findall(r'class="(.*?)"',old_html)[0]
except:old_class=None
try:old_text = re.findall(r'>(.*?)<',old_html)[0]
except:old_text=''
try:old_value = re.findall(r'value="(.*?)"',old_html)[0]
except:old_value=''
try:old_onclick = re.findall(r'onclick="(.*?)"',old_html)[0]
except:old_onclick=None
try:old_style = re.findall(r'style="(.*?)"',old_html)[0]
except:old_style=''
try:old_placeholder = re.findall(r'placeholder="(.*?)"', old_html)[0]
except:old_placeholder=None
try:old_href = re.findall(r'href="(.*?)"',old_html)[0]
except:old_href=None
try:old_type = re.findall(r'type="(.*?)"',old_html)[0]
except:old_type = None
#--------------------------------------------------------get all par
try:
bq = re.findall(r'<(.+?) ',old_html)[0]
except:
bq = re.findall(r'<(.+?)>',old_html)[0]
new_elements = driver.find_elements_by_tag_name(bq)
end_element = new_elements[0]
end_index = 0
tmp_score = 0
for i in range(len(new_elements)):
score = 0
new_id = new_elements[i].get_attribute("id")
new_name = new_elements[i].get_attribute("name")
new_class = new_elements[i].get_attribute("class")
new_text = new_elements[i].text
new_value = new_elements[i].get_attribute("value")
new_onclick = new_elements[i].get_attribute("onclick")
new_style = new_elements[i].get_attribute("style")
new_placeholder = new_elements[i].get_attribute("placeholder")
new_href = new_elements[i].get_attribute("href")
try:new_type = re.findall(r'type="(.*?)"',new_elements[i].get_attribute("outerHTML"))[0]
except:new_type = None
score += likescore(old_id, new_id)
score += likescore(old_name, new_name)
score += likescore(old_class, new_class)
score += likescore(old_text, new_text)
score += likescore(old_value, new_value)
score += likescore(old_onclick, new_onclick)
score += likescore(str(old_style).replace(' ',''), str(new_style).replace(' ',''))
score += likescore(old_placeholder, new_placeholder)
score += likescore(old_href, new_href)
score += likescore(old_type,new_type)
if score > tmp_score:
end_element = new_elements[i]
end_index = i
tmp_score = score
new_html = end_element.get_attribute("outerHTML")
new_tmp = 'tag name' #use id,name
new_tmp_value = bq
new_index = end_index
return [end_element,new_html,new_tmp,new_tmp_value,new_index]
def getelement(driver,icon):
time1 = time.time()
element = get_elements(icon)
if element == 'error':
raise Exception
print('find: %s ...'%icon)
old_html = element[3]
try:
if element[0] == 'link_text': element[0] = 'link text'
if element[0] == 'class' or element[0] == 'class_name': element[0] = 'class name'
el = driver.find_elements(element[0],element[1])[element[2]]
print('success in %s s'%str(time.time()-time1)[:5])
if old_html == '':
html_element = el.get_attribute("outerHTML")
input_html_element(element[-1],html_element)
return el
except Exception:
print('find_faild,begin fix....')
if element[-2] == '':
print('we find this element:%s are you first set,but set wrong.Please set right in first time.'%icon)
exit(0)
newel_detail = search_new(driver,old_html)
newel = newel_detail[0]
new_html = newel_detail[1]
new_tmp = newel_detail[2]
new_tmp_value = newel_detail[3]
new_index = newel_detail[4]
update_elements(element[4],html=new_html,tmp=new_tmp,tmp_value=new_tmp_value,index=new_index)
print('find success in %s s'%str(time.time()-time1)[:5])
return newel
try:
cfp = configparser.ConfigParser()
cfp.read('wqrfnium.ini')
elements_xls_path = cfp.get('Excel','elements_xls_path')
except: # create wqrfnium.ini
cfp = configparser.ConfigParser()
cfp["Excel"] = {"elements_xls_path":""}
with open('wqrfnium.ini','w') as fp:
cfp.write(fp)
elements_xls_path = cfp.get('Excel','elements_xls_path')
def begin_wqrf(path):
global elements_xls_path
if 'xls' not in path.split('.')[-1]:
if path[-1] == '/':
path += 'elements.xls'
else:
path += '/elements.xls'
if elements_xls_path != path:
print("----------------------------------")
print("You are changeing the elements_xls_path,the new path is %s now!"%path)
print("你正在自定义元素表elements.xls的存放路径,新路径为:%s"%path)
print("You'd better handle the old elements_xls : %s by yourself."%elements_xls_path)
print("你最好处理掉旧的元素表:%s"%elements_xls_path)
create_xls(path)
cfp.set("Excel","elements_xls_path",path)
with open("wqrfnium.ini","w+") as f:
cfp.write(f)
elements_xls_path = path
if elements_xls_path == '': #no path
# begin to set the elements
if 'arwin' in platform.system() or 'inux' in platform.system() :
elements_xls_path =os.environ['HOME']+"/elements.xls"
else:
elements_xls_path = "C:\\elements.xls"
print('You are first use wqrfnium,it is creating elements.xls,you must edit elements.xls and play wqrfnium after!')
print('这是您第一次使用wqrfnium,它正在自动创建元素表elements.xls,您必须在这次启动后再去使用wqrfnium和添加元素到elements.xls等操作!')
print('Your elements.xls tmp path is %s' % elements_xls_path)
print('你的元素表elements.xls的临时路径是 %s'%elements_xls_path)
print("First colum is element's icon,second is element's tmp_find_method,third is element's tmp_find_value,forth is element's index,the last is element's html_element")
print("元素表:第一列为元素的标识,第二列为元素的临时定位方式,第三列为元素的临时定位值,第四列为元素的下标,最后一列元素的html标签源码")
print("You can also read the README to get help or wirte email to 1074321997@qq.com")
print("你也可以去阅读README.md来获取更多帮助,或者发送邮件到1074321997@qq.com联系作者")
print('You can use code [begin_wqrf("your diy new elements_xls_path ")] to diy your elements_xls_path!')
print('你可以在文件开头添加代码[begin_wqrf("你的元素表elements.path的自定义存放路径")] 来 自定义 你的元素表存放路径!')
create_xls(elements_xls_path)
cfp.set("Excel", "elements_xls_path", elements_xls_path)
with open("wqrfnium.ini", "w+") as f:
cfp.write(f)
else:
if 'arwin' in platform.system() or 'inux' in platform.system() :
if elements_xls_path == os.environ['HOME']+"/elements.xls": # default path
print('Your elements.xls tmp path is default : %s'%elements_xls_path)
print('你的elements.xls 的临时存放路径为默认:%s'%elements_xls_path)
else:
print('Your elements.xls tmp path is diy by yourself : %s' % elements_xls_path)
print('你的elements.xls 的自定义存放路径为:%s' % elements_xls_path)
else:
if elements_xls_path == "C:\\elements.xls": # default path
print('Your elements.xls tmp path is default : %s'%elements_xls_path)
print('你的elements.xls 的临时存放路径为默认:%s' % elements_xls_path)
else:
print('Your elements.xls tmp path is diy by yourself : %s' % elements_xls_path)
print('你的elements.xls 的自定义存放路径为:%s' % elements_xls_path)
| [
"847160625@qq.com"
] | 847160625@qq.com |
4ee39fb041156b51bf7fa191a298758ceaab2ef0 | bcda171a045e86f8437c9dd5f37a0a1ac2316063 | /anonymization/newtest.py | 1ed85056501ce83aeffe09c6b85218895595e2aa | [] | no_license | blackfeathering/CommunityDeception-master | f1127a9d22869a3bbc8db40ca99c89c0e98279d5 | c49dafd8774e029c0d57aa4f63ad192aacafa07f | refs/heads/master | 2023-04-03T03:41:13.651533 | 2021-03-15T06:16:28 | 2021-03-15T06:16:28 | 255,219,882 | 0 | 0 | null | 2021-03-29T22:52:54 | 2020-04-13T03:13:20 | Python | UTF-8 | Python | false | false | 4,824 | py | import logging.config
import sys
import cmath
from typing import List
from settings import master
from igraph import Graph
from igraph.clustering import VertexClustering
from utils.counter_pre import count_security_index_by_pre
from utils.pre_counter import count_pre_security_index
from utils.counter import count_security_index
from utils.timer import time_mark
import time
logging.config.dictConfig(master.LOGGING_SETTINGS)
logger = logging.getLogger('normal')
class NewtestCommunityCombine(object):
def __init__(self, graph, edges_sum, detection_func, func_args, interval, partitions=None,
path=None, index0=2, index1=0, **kwargs):
self.__graph = graph
self.__edges_sum = edges_sum
self.__detection_func = detection_func
self.__func_args = func_args
self.__interval = interval
self.__partitions = partitions
self.__path = path
self.__community_index_0 = index0
self.__community_index_1 = index1
self.__edge_set = None
self.__degree_list = None
self.__vertex_list = None
self.__vertex_part = None
self.__edge_added_list = None
self.__partitions_expected = None
self.__partitions_expected_degree: List[int] = list()
self.__partitions_expected_volume: List[int] = list()
self.__sorted_partitions_expected: List[List[int]] = list()
self.__degree_distribute: List[int] = list()
self.__start_time = time.time()
self.__end_time = None
def __start(self):
logger.info("CommunityCombine")
logger.info(f'Time : {time_mark(self.__start_time)}')
logger.info(f'Graph: {self.__path}')
logger.info(f'Info : {self.__graph.vcount()} {self.__graph.ecount()}')
logger.info(f'Edges: {self.__edges_sum}')
logger.info(f'Func : {self.__detection_func.__name__}')
logger.info(f'Args : {self.__func_args}')
logger.info(f'Gap : {self.__interval}')
logger.info(f'Parts: {len(self.__partitions)}')
logger.info("Community1")
subgraph0 = self.__partitions.subgraph(self.__community_index_0)
logger.info(f'Community index: {self.__community_index_0}, '
f'Info : {subgraph0.vcount()} {subgraph0.ecount()}')
logger.info("Community2")
subgraph1 = self.__partitions.subgraph(self.__community_index_1)
logger.info(f'Community index: {self.__community_index_1}, '
f'Info : {subgraph1.vcount()} {subgraph1.ecount()}')
logger.info("=" * 60)
def __quit(self):
self.__end_time = time.time()
logger.info("=" * 60)
logger.info(f'Time : {time_mark(self.__end_time)}')
logger.info(f'Total: {(self.__end_time - self.__start_time):10.4f} s')
logger.info("=" * 60)
logger.info("\n\n")
def __preprocess(self):
self.__edge_set = set(self.__graph.get_edgelist())
if not self.__partitions:
self.__partitions = self.__detection_func(self.__graph, **self.__func_args)
self.__set_necessary_info()
def __set_necessary_info(self):
v_degree = list()
v_index = list()
v_partation = list()
memberships = self.__partitions._membership
if self.__community_index_0 > self.__community_index_1:
a = self.__community_index_1
self.__community_index_1 = self.__community_index_0
self.__community_index_0 = a
for index in range(len(memberships)):
if memberships[index] == self.__community_index_0:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(0)
if memberships[index] == self.__community_index_1:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(1)
self.__degree_list = v_degree
self.__vertex_list = v_index
self.__vertex_part = v_partation
# 最终合并的社区编号为self.__community_index_1
partation_expected = VertexClustering(graph=self.__partitions._graph, membership=list(self.__partitions._membership))
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == self.__community_index_1:
partation_expected._membership[i] = self.__community_index_0
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == partation_expected._len - 1:
partation_expected._membership[i] = self.__community_index_1
partation_expected._len -= 1
#print(partation_expected._membership)
self.__partitions_expected = partation_expected
| [
"1960554271@qq.com"
] | 1960554271@qq.com |
13a4f3ce6cf13557eb0b81be5c554c8af70bd323 | 6984724d0466d477635b23d073affa9b00f01f67 | /Tasks/Ramanenka_Tasks/HT6/app_Calc.py | 139762ac73cc6b004c125c7310934ab7e8c2ccb9 | [] | no_license | RomanPutsilouski/M-PT1-37-21 | 202414fac782e6c68f741e55f9b7697f0c974f45 | ceef9b4e6bcff2a9033615ec761f0e2e73c9467e | refs/heads/main | 2023-05-30T21:10:22.404817 | 2021-06-30T00:26:29 | 2021-06-30T00:26:29 | 348,462,785 | 1 | 0 | null | 2021-06-05T15:44:27 | 2021-03-16T19:06:57 | Python | UTF-8 | Python | false | false | 257 | py | from ht6_calculator_with_brackets import recurs
"""Enter the expression or continue with default expression"""
expression = '(25 -(5- (1-2))/(5-8))'
# equation = input('Expression is: \n')
results = float(recurs(expression))
print(f'Result is: {results}') | [
"margoroma2010@gmail.com"
] | margoroma2010@gmail.com |
8608678850cf6031586f8b1bce7e8531244232c5 | 7869035b72807394154285d307e0597ee16f11d8 | /src/data_loader.py | 2a23407ac8c03daa931088d7b07b81b5ff04a48b | [] | no_license | tiffany70072/TokenPositioning | cb74edae92e19c16f8ca763935e56b0f2e698b85 | a2ab63640a2aff1abfccaa1c1486d8a97026ef0b | refs/heads/master | 2022-07-19T11:21:04.716882 | 2020-04-17T06:02:18 | 2020-04-17T06:02:18 | 254,995,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | import numpy as np
import os
from sklearn.model_selection import train_test_split
def load_data(task, data_name, data_type):
if task == "autoenc-last" or task == 'token-posi':
assert data_type == "train" or data_type == "valid", "no this data type."
data_path = os.path.join("../data", data_name)
encoder_data = np.load(os.path.join(data_path, "encoder_%s.npy" % data_type))
decoder_data = np.load(os.path.join(data_path, "decoder_%s.npy" % data_type))
assert encoder_data.shape[0] == decoder_data.shape[0], "data size not match."
decoder_output = set_decoder_output_data(decoder_data)
return encoder_data, decoder_data, decoder_output
else:
raise "No this task for load_data."
def set_decoder_output_data(decoder_input):
# Reshape 2d array into 3d array for Keras training.
# Shift one time step because decoder_input and decoder_output are different with one time step.
decoder_output = decoder_input.copy()
for i in range(len(decoder_output)):
decoder_output[i, :-1] = decoder_input[i, 1:] # Remove the first token in decoder output.
decoder_output[i, -1] *= 0
decoder_output = np.reshape(decoder_output, [decoder_output.shape[0], decoder_output.shape[1], 1])
return decoder_output
"""
def cut_validation(self):
# TODO: cut training, validation and testing
split_result = data_reader.data_split(self.encoder_in, self.decoder_in, self.decoder_out)
self.encoder_in = split_result[0]
self.decoder_in = split_result[1]
self.decoder_out = split_result[2]
self.encoder_in_valid = split_result[3][:50000] # TODO: Deal with too many data.
self.decoder_in_valid = split_result[4][:50000]
self.decoder_out_valid = split_result[5][:50000]
self.encoder_in_test = split_result[6]
self.decoder_in_test = split_result[7]
self.decoder_out_test = split_result[8]
self.encoder_in = split_result[0]#[:3000]
self.decoder_in = split_result[1]#[:3000]
self.decoder_out = split_result[2]#[:3000]
print("(Cut validation) training size:", self.encoder_in.shape)
print("(Cut validation) validation size:", self.encoder_in_valid.shape)
print("(Cut validation) testing size:", self.encoder_in_test.shape)
""" | [
"tiffany70072@gmail.com"
] | tiffany70072@gmail.com |
d2f27c55bbc9eed109b72828c5be2aad86fb4cd3 | 3cd680e0372f942affeb948eedca8e08d9bfb743 | /22.py | c9a9f60726386d6ffe5ecf4bcdc7f5f02fe04839 | [] | no_license | ug2454/PythonPractice | cb507e380b32ecba14b355a3bd60769a4682b4ab | cbf7211e00d46f166246d5932661a6f110cc1cf0 | refs/heads/master | 2022-11-09T03:52:57.971095 | 2020-06-14T12:11:51 | 2020-06-14T12:11:51 | 272,194,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | import max
numbers=[19,20,30]
print(max.max(numbers)) | [
"u.garg14@gmail.com"
] | u.garg14@gmail.com |
84fdc9040b3bcc55c94270233da3cce4c9b669d5 | babc56e88a3b5f5038be70ad676d5bd8f1bbf0d2 | /wind_direction_byo.py | 94bc6600dd5986d16cb2cf6d96ba20ac2a7f7738 | [] | no_license | VicenteYago/CustomWeatherStation | 873405ca16aa0b6f4f291cbc0068a6ea10aef745 | c655f947cca2cd0f8827c18f6f7a7c4c11ef4d43 | refs/heads/master | 2022-11-13T06:48:05.736830 | 2020-06-30T00:43:07 | 2020-06-30T00:43:07 | 269,812,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | from gpiozero import MCP3008
import time
import math
adc = MCP3008(channel=0)
count = 0
values = []
volts = [0.4, 1.4, 1.2, 2.8,
2.9, 2.2, 2.5, 1.8,
2.0, 0.7, 0.8, 0.1,
0.3, 0.2, 0.6, 2.7]
volts_dic = {
0.4: 0.0,
1.4: 22.5,
1.2: 45.0,
2.8: 67.5,
2.7: 90.5,
2.9: 112.5,
2.2: 135.0,
2.5: 157.5,
1.8: 180.0,
2.0: 202.5,
0.7: 225.0,
0.8: 247.5,
0.1: 270.0,
0.3: 292.5,
0.2: 315.0,
0.6: 337.5
}
def get_average(angles):
sin_sum = 0.0
cos_sum = 0.0
for angle in angles:
r = math.radians(angle)
sin_sum += math.sin(r)
cos_sum += math.cos(r)
flen = float(len(angles))
s = sin_sum / flen
c = cos_sum / flen
arc = math.degrees(math.atan(s / c))
average = 0.0
if s > 0 and c > 0:
average = arc
elif c < 0:
average = arc + 180
elif s < 0 and c > 0:
average = arc + 360
return 0.0 if average == 360 else average
def get_value(length = 5):
data = []
print("Measuring wind direction for %d seconds..." % length)
start_time = time.time()
while time.time() - start_time <= length:
wind = round(adc.value*3.3,1)
if not wind in volts_dic:
print("Unknown value :", str(wind))
else:
data.append(volts_dic[wind])
return get_average(data)
while True:
print(get_value())
| [
"="
] | = |
3d6c10f42425778b851063b600ddb7ceddf3622d | 161e4fad71b23ac5514f8cc8c04b97ff29039cf2 | /Array/Buy_Sell_Stock.py | 7ca3171b5ee36527ea4e438f7ffb002bbdda2c3b | [] | no_license | yash872/PyDsa | 726d43a0730e9143593327f180fab3eb3367d281 | a3046231c466f2ec5cae94129d2c15d21a082b86 | refs/heads/main | 2023-03-06T12:12:49.731899 | 2021-02-04T17:14:28 | 2021-02-04T17:14:28 | 332,211,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | '''
Best Time to Buy and Sell Stock
You are given an array prices where prices[i] is the price of a given stock on the ith day.
You want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock.
Return the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.
Example 1:
Input: prices = [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.
Note that buying on day 2 and selling on day 1 is not allowed because you must buy before you sell.
'''
#------------------------------
# Time-> O(N) | Space-> O(1)
#------------------------------
class Solution:
def maxProfit(self, prices: List[int]) -> int:
min_so_far = float('inf')
profit = 0
for price in prices:
profit = max(profit,price-min_so_far)
min_so_far = min(min_so_far,price)
return profit
| [
"noreply@github.com"
] | yash872.noreply@github.com |
46b305d71e12ec7393424848fdb3b864a16ff25c | c2a168ec9e91415eeadd53ba6042e614c3e8460c | /benchmark_features/hpopt_1/hpop_test_1/ht_13.py | c6733fb7f930bc4ee0b82563d4b43470ae436f78 | [] | no_license | LiYanChalmers/BoschProductionLine | 530098a9de0d08332511b24a31cdd4b4ec5473fb | de864e55be0e8cd174ccacb06afc77e3dc9ec42a | refs/heads/master | 2020-03-21T20:29:14.134812 | 2018-09-03T08:10:08 | 2018-09-03T08:10:08 | 139,010,159 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | # -*- coding: utf-8 -*-
"""
Template for CV parameter search
Tasks:
1. CV
2. Train model
3. Predict on test set
4. Save
a. CV results
b. models trained in CV
c. model trained on the whole train set
d. predictions on test set
To-do:
1. Use models in CV to predict on test set, and save the predictions
a. Rewrite the CV function
b. Overhead of prediction should be small
c. RAM requirement should be small if #columns is not too large
d. In some cases, may need many columns, RAM requirement may be high.
So not implementing this idea now.
"""
import sys
sys.path.insert(0, 'bosch_helper')
from bosch_helper import *
#%% Set parameter
param_id = 13
random_state = 90859
param = {'subsample': 0.95, 'silent': 1, 'objective': 'binary:logistic', 'nthread': 20, 'min_child_weight': 5.5, 'max_depth': 15, 'lambda': 4, 'eta': 0.025, 'colsample_bytree': 0.5, 'booster': 'gbtree', 'base_score': 0.0058, 'alpha': 0}
np.random.seed(random_state)
#%% Load data
x = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'x')
y_train = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'y_train')
x_train = x.loc['train']
x_test = x.loc['test']
#%%
cv_results, clfs, running_time = \
cross_val_predict_skf_rm_xgb(param, x_train, y_train,
num_boost_round=80,
n_splits=5,
n_repeats=3,
random_state=np.random.randint(10**6),
verbose_eval=True)
results = {'clfs_cv': clfs, 'results_cv': cv_results, 'running_time_cv': running_time}
#%% Train on model
dtrain = xgb.DMatrix(x_train, label=y_train)
param['seed'] = np.random.randint(10**6)
clf = xgb.train(param, dtrain,
num_boost_round=60,
feval=mcc_eval, evals=[(dtrain, 'train')])
y_train_pred = clf.predict(dtrain)
# Find best threshold
thresholds = np.linspace(0.01, 0.99, 400)
mcc = np.array([matthews_corrcoef(y_train, y_train_pred>thr) for thr in thresholds])
best_threshold = thresholds[mcc.argmax()]
results['best_threshold_train'] = best_threshold
results['mcc_max_train'] = mcc.max()
results['clf_train'] = clf
#%% Predict on test set
dtest = xgb.DMatrix(x_test)
y_test_pred = clf.predict(dtest)
y_test_pred_int = (y_test_pred>best_threshold).astype(int)
sub = pd.read_csv("sample_submission.csv.zip", index_col=0)
sub["Response"] = y_test_pred_int
sub.to_csv('ht_13.csv.gz', compression='gzip')
results['y_test_pred_prob'] = y_test_pred
results['y_test_pred_int'] = y_test_pred_int
save_pickle(results, 'ht_13.pickle')
| [
"li.yan.chalmers@gmail.com"
] | li.yan.chalmers@gmail.com |
5a3a47716a461cf0fbff4da09be385c1328fc34e | 66d915e0d9c0016d5bbb22946539b81866fecb45 | /Soma de numeros1.py | 75775660ac4254c3804a653dfe04dfded9325d39 | [
"MIT"
] | permissive | SricardoSdSouza/Curso-da-USP | 62e00a820b218cce24fb46ef89debd8f786ce66a | 6198c509c52bf6132f904cded2e12ae941f2b973 | refs/heads/main | 2023-06-02T00:19:53.006210 | 2021-06-14T19:57:30 | 2021-06-14T19:57:30 | 376,927,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | import math
numero = int(input('numero: '))
n=int(numero)
if numero > 0:
soma = 0
while numero != 0:
resto = numero % 10
numero = (numero - resto) // 10
soma = soma + resto
print("A soma dos números(",n,")é = ",soma)
else:
print('Número invalido...')
| [
"SricardoSdSouza@yahoo.com.br"
] | SricardoSdSouza@yahoo.com.br |
6d27c8039a8ce6ca14e65e11999fb3c5304f2563 | ef4a4c8de95516700134a45800238de9298e1485 | /zadacha3.py | ccb6d7317053767af297787dfcc42f5ddf4e9f3a | [] | no_license | nikolaj74-hub/lessons | a45d67d380982d245f5950fe6eef3041c7ffbd2e | 54437b8e8063668017d7e29612c0623adb8fce94 | refs/heads/master | 2023-01-23T19:11:18.680790 | 2020-12-04T13:46:02 | 2020-12-04T13:46:02 | 311,939,032 | 1 | 0 | null | 2020-12-04T13:42:39 | 2020-11-11T10:38:48 | Python | UTF-8 | Python | false | false | 1,624 | py | # Реализовать базовый класс Worker (работник), в котором определить атрибуты: name,
# surname, position (должность), income (доход). Последний атрибут должен быть
# защищенным и ссылаться на словарь, содержащий элементы: оклад и премия, например,
# {"wage": wage, "bonus": bonus}. Создать класс Position (должность) на базе класса Worker.
# В классе Position реализовать методы получения полного имени сотрудника (get_full_name) и
# дохода с учетом премии (get_total_income). Проверить работу примера на реальных данных
# (создать экземпляры класса Position, передать данные, проверить значения атрибутов,
# вызвать методы экземпляров
class Worker:
def __init__(self, n, sn, pos, w, b):
self.name = n
self.surname = sn
self.position = pos
self.incom = {"wage": w, "bonus": b}
class Position(Worker):
def get_full_name(self):
print(f'{self.name + " " + self.surname}')
def get_full_incom(self):
print(f'доход ={sum(self.incom.values())} тугр.')
a = Position('коля', 'трофимов', 'слесарь', 30000, 300)
print(a.name)
print(a.incom)
print(a.surname)
print(a.position)
a.get_full_name()
a.get_full_incom()
| [
"noreply@github.com"
] | nikolaj74-hub.noreply@github.com |
b676c5cba48c2e1efd64286543f5f6aadfef51fd | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/common/wotdecorators.py | 1554469a75cbd2eab8d57565f8457da484b5051a | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,832 | py | # 2017.08.29 21:52:48 Střední Evropa (letní čas)
# Embedded file name: scripts/common/wotdecorators.py
import inspect
from functools import update_wrapper
from debug_utils import LOG_WRAPPED_CURRENT_EXCEPTION, CRITICAL_ERROR
from time_tracking import LOG_TIME_WARNING
import time
import time_tracking
def noexcept(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def nofail(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
CRITICAL_ERROR('Exception in no-fail code')
return wrapper
def exposedtoclient(func):
def wrapper(*args, **kwArgs):
try:
lastTick = time.time()
result = func(*args, **kwArgs)
timeSinceLastTick = time.time() - lastTick
if timeSinceLastTick > time_tracking.DEFAULT_TIME_LIMIT:
LOG_TIME_WARNING(timeSinceLastTick, context=(getattr(args[0], 'id', 0),
func.__name__,
args,
kwArgs))
return result
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def singleton(cls):
return cls()
def decorate(func, dec):
argspec = inspect.getargspec(func)
name = func.__name__
signature = inspect.formatargspec(*argspec)
params = inspect.formatargspec(formatvalue=(lambda value: ''), *argspec)
source = 'def %s%s: return __dec%s\n' % (name, signature, params)
code = compile(source, '<decorator-gen>', 'single')
env = {'__dec': dec}
eval(code, env)
return update_wrapper(env[name], func)
def decorator(dec):
def wrapper(func):
return decorate(func, dec(func))
return wrapper
def condition(attributeName, logFunc = None, logStack = True):
def decorator(func):
def wrapper(*args, **kwargs):
attribute = getattr(args[0], attributeName)
if not bool(attribute):
if logFunc:
logFunc('Method condition failed', args, kwargs, stack=logStack)
return
return func(*args, **kwargs)
return decorate(func, wrapper)
return decorator
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\wotdecorators.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:48 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
c177f0da14bb7731c15a9e25ad35b2bb78f5ca63 | 3d2192385e65889d20b74742755f5369d0d09161 | /stock_colis/models/__init__.py | da8dece232489928427446f10dfd1d1af8ea259d | [] | no_license | FIDINGSARL/audoune | 9ba746a9d7424a41f8775a6e30f42f2a97224edf | 39cecd44497d5fa227cc594a6bf5807eb14976d3 | refs/heads/main | 2023-06-18T09:49:13.778878 | 2021-06-30T15:06:51 | 2021-06-30T15:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | # -*- coding: utf-8 -*-
from . import stock_colis, stock_colis_request
| [
"macbook@MacBook-Pro-de-MacBook.local"
] | macbook@MacBook-Pro-de-MacBook.local |
016e33094e39966281d2775ad6be6442e4a27330 | 63e06ef221242c2c614750df02b4283989e13052 | /projeto_da_roca/users/migrations/0002_auto_20210521_1213.py | b49e9079706612918bcb18961c11420541017361 | [] | no_license | amandacl/Da_Roca | 97ada3b6abe6df25258a34f82954c07c597daae6 | b6187d62b91f06e0afb523a84194ad12467a89b4 | refs/heads/master | 2023-06-21T11:59:14.891738 | 2021-06-02T02:13:02 | 2021-06-02T02:13:02 | 368,898,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | # Generated by Django 3.2.3 on 2021-05-21 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='address',
name='house_number',
field=models.IntegerField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='user',
name='cpf',
field=models.CharField(blank=True, max_length=11, null=True, unique=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
| [
"matheus.noronha@solyd.com.br"
] | matheus.noronha@solyd.com.br |
864f6c8e44747b438bdd00945bd88e7a810108db | 6cd4d2923292004390a1b23dc26d0a7a4a7df223 | /DjangoRedis/manage.py | 9a25fcc6109e9b625d9a5bb7fcfab9c54f637263 | [] | no_license | Lyle101/docker_redis | 4cc85b6c5c5784c3d032d129810ce49a0e4b09cc | f3b9db02ce65794d84220286c805ba799c0e79dd | refs/heads/master | 2020-04-09T07:11:30.999829 | 2018-12-03T08:07:50 | 2018-12-03T08:07:50 | 160,144,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoPrj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"Chris.Lyle101@gmail.com"
] | Chris.Lyle101@gmail.com |
18d5a691ca86297e0db6536e331fc046f0aedd4b | 9d53da8fbd6d6760fb652e84687cf73ef1f3034d | /model/EventPointNetpp/nets.py | b5fdb7fc10cad171466eb6ce22481815099f0d63 | [] | no_license | HowoongJun/localfeature | 8a944256738e7f10f5e0564c499bf88afaf006ba | 0d17fca75d2f67c33652710250c3d0f07d7c8970 | refs/heads/main | 2023-08-27T19:57:10.071631 | 2021-10-28T06:53:30 | 2021-10-28T06:53:30 | 340,907,081 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,301 | py | ###
#
# @Brief nets.py
# @Details EventPointNetPP network
# @Org Robot Learning Lab(https://rllab.snu.ac.kr), Seoul National University
# @Author Howoong Jun (howoong.jun@rllab.snu.ac.kr)
# @Date Sep. 01, 2021
# @Version v0.1
#
###
import torch
class CEventPointNetPP(torch.nn.Module):
def __init__(self):
super(CEventPointNetPP, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.conv1_1 = torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv3_1 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv3_2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv4_1 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv4_2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convDsc1 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convDsc2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convKp1 = torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.convKp2 = torch.nn.Conv2d(256, 65, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = self.relu(self.conv1_1(x))
x = self.relu(self.conv1_2(x))
x = self.pool(x)
x = self.relu(self.conv2_1(x))
x = self.relu(self.conv2_2(x))
x = self.pool(x)
x = self.relu(self.conv3_1(x))
x = self.relu(self.conv3_2(x))
x = self.pool(x)
x = self.relu(self.conv4_1(x))
x = self.relu(self.conv4_2(x))
kpt = self.relu(self.convKp1(x))
kpt = self.convKp2(kpt)
desc = self.relu(self.convDsc1(x))
desc = self.convDsc2(desc)
descNorm = torch.norm(desc, p=2, dim=1)
desc = desc.div(torch.unsqueeze(descNorm, 1))
return kpt, desc
| [
"prestoxic@gmail.com"
] | prestoxic@gmail.com |
e05f09d686cf4fc1af26ff93dd112cabeaac5381 | 60e2b0f728bf7b497e241afdacffaa8ee9203213 | /breast_cancer/breast_cancer_load.py | c7e6f6f48a1be53f7a7d856378b2b85efd42ffca | [] | no_license | yamadayoshi/deep_learning | 43897d59dc3f89ecd4820050b96acacbf653408e | 78bbf5b12011a5d17375b50b75203251003cb3d0 | refs/heads/master | 2021-02-19T01:02:57.934801 | 2020-03-10T20:02:45 | 2020-03-10T20:02:45 | 245,260,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import numpy as np
from keras.models import model_from_json
#read json file
file = open('breast_model.json', 'r')
network = file.read()
file.close()
#load model from json and weights
model = model_from_json(network)
model.load_weights('breast_weights.h5')
novo = np.array([[10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15, 10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15, 10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15]])
previsao = model.predict(novo) > 0.8 | [
"andre.yamada@digiage.com"
] | andre.yamada@digiage.com |
ecb41fb56f8890f13f0b34b3b3a1c309800192b5 | a4957a563bbd3ce322e0cd0fec8e0a37650b5092 | /calculatorv2.py | 289ec6ac8e829cd174995e3ee1cb013560bce9ea | [] | no_license | CodingFluent/Simple-CalculatorV2-Py | 66632717a94d0b27a5c1994b6d5eaf062ee793f7 | 3af99215b4eb8b40cabdc840172506825e27f4e0 | refs/heads/master | 2022-12-10T19:00:53.607598 | 2020-08-31T06:01:54 | 2020-08-31T06:01:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | a = float(input("Enter First Number => "))
op = str(input("Enter Operation (+, -, *, /, %) => "))
b = float(input("Enter Second Number => "))
if op == "+":
sum = a + b
total = str(f"The sum of {a} + {b} is {sum}")
elif op == "-":
diff = a - b
total = str(f"The difference of {a} - {b} is {diff}")
elif op == "*":
mul = a * b
total = str(f"The multiplication of {a} * {b} is {mul}")
elif op == "/":
div = a / b
total = str(f"The division of {a} / {b} is {div}")
elif op == "%":
mod = a % b
total = str(f"The module of {a} % {b} is {mod}")
else:
total = str("Please Enter an Valid Operation.......")
print (total) | [
"noreply@github.com"
] | CodingFluent.noreply@github.com |
c8fc1b630938f22c3762d543e169f25db756d2bd | fb23a842c99f9a5238a9c6dfb3ffa6eee5c3e47d | /Salt-api/python版示例/V2/diaoyong.py | f2f32adde67640cdb991d2d8e8fc1ff6f921dc29 | [] | no_license | nanzhushan/Saltstack | 45a492855860a5664f1c0a2099935ae95a17d0de | d9fc85a7be1861b13e6de55de9b6951e405fffb7 | refs/heads/master | 2021-05-31T16:37:09.928023 | 2016-04-11T07:04:10 | 2016-04-11T07:04:10 | 39,339,839 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | #!/usr/bin/python
#coding:utf8
from saltapi import *
#import saltapi
sapi = saltAPI()
#params = {'client':'local', 'fun':'test.ping', 'tgt':'*'}
#params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'hello'}
#arg1也可以写成arg
#params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'hostname'}
params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'touch /root/cc.txt;touch cc1.txt'}
test = sapi.saltCmd(params)
#test = sapi.saltCmd()
print test
| [
"624867243@qq.com"
] | 624867243@qq.com |
425be2dac09edaf397a3412fc17709976e67201f | de7a39129bf471d4d4be25c65174916a505146e6 | /book/examples/weave_examples_simple.py | 1dc25d425bcf85bc9a527aca248b38e6572a0caa | [] | no_license | jdh2358/py4science | a6da01de9cb16709828bfd801bf7faf847f346bb | a56c742ec2e0a31c2251468d9947ebaf707346d7 | refs/heads/master | 2016-09-05T22:18:38.520426 | 2009-12-05T17:47:26 | 2009-12-05T17:47:26 | 1,418,846 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | """Some simple examples of weave.inline use"""
from weave import inline,converters
import Numeric as nx
from pylab import rand
#-----------------------------------------------------------------------------
# Returning a scalar quantity computed from a Numeric array.
def trace(mat):
"""Return the trace of a matrix.
"""
nrow,ncol = mat.shape
code = \
"""
double tr=0.0;
for(int i=0;i<nrow;++i)
tr += mat(i,i);
return_val = tr;
"""
return inline(code,['mat','nrow','ncol'],
type_converters = converters.blitz)
# In-place operations on arrays in general work without any problems
def in_place_mult(num,mat):
"""In-place multiplication of a matrix by a scalar.
"""
nrow,ncol = mat.shape
code = \
"""
for(int i=0;i<nrow;++i)
for(int j=0;j<ncol;++j)
mat(i,j) *= num;
"""
inline(code,['num','mat','nrow','ncol'],
type_converters = converters.blitz)
def main():
zz = nx.zeros([10,10])
print 'tr(zz)=',trace(zz)
oo = nx.ones([4,4],nx.Float)
print 'tr(oo)=',trace(oo)
aa = rand(128,128)
print 'tr(aa)=',trace(aa)
print 'oo:',oo
in_place_mult(3,oo)
print '3*oo:',oo
if __name__=='__main__':
main()
| [
"jdh2358@gmail.com"
] | jdh2358@gmail.com |
8da0a0f25fb1f42f41d710abf1ca39dc617b67dc | 5e4a1d08d199722fde585723d06644e9999c144e | /input.py | af348c0972728af30a24ce077b2d8f0d4bcd81bf | [] | no_license | JustDoIT83/CTI110 | ca30948cd5dc4e30103a4adfb681f5090363462d | 3817c2b935eb166f0086026f0cf73c7e96b2bb8d | refs/heads/master | 2020-04-02T10:14:06.081690 | 2018-10-23T13:25:33 | 2018-10-23T13:25:33 | 154,330,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # get the users name, age, and income
name = input('What is your name?: ')
age = input('What is your age?: ')
income = input('What is your income?: ')
# display the date
print('here is the data you entered')
print('Name:', name)
print('Age:', age)
print('Income:', income)
| [
"noreply@github.com"
] | JustDoIT83.noreply@github.com |
badbe251c1d5142ea01e96e916591f5b6330a6ca | 202b1b82a2b7a70250415ba5d9bd1f6b277a6e84 | /share/qt/extract_strings_qt.py | acf54d0b19bbf49be33497e58552501d9f56933d | [
"MIT"
] | permissive | cmkcoin/cmkcore | 92cc4dcaf63b1d282ea2c2aa15ede822c9c7b0e7 | 5c2a3222ef901d1c6d9315177ba79e3f5094f2a6 | refs/heads/master | 2020-03-15T04:26:42.979962 | 2019-10-19T03:55:45 | 2019-10-19T03:55:45 | 131,965,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/dashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *dash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("cmk-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| [
"cmkdev@vps.cmk.io"
] | cmkdev@vps.cmk.io |
2e9d8f40ea73bf3323400de1ac413068f242e213 | 313978a9a5a1f0824a6f2bfb948e1a4ec0225213 | /4-iteração/lazy iterable e iterator.py | 7337513a1d77423de94a8c51d7d35f8de1e0a3f6 | [] | no_license | wallacex19/python | 71ae310a6a6ec2f1c8c80d4ad2bee7db2d391d13 | 99f11249fec5e001e10b2a155c2608e9b8b420ec | refs/heads/master | 2023-04-08T22:41:34.259091 | 2021-04-23T20:58:18 | 2021-04-23T20:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | # O objeto range em Python 3 (xrange em Python 2) pode ser executado em loop como qualquer outro iterável:
for n in range(3):
print(n)
# E como o range é iterável, podemos obter um iterador a partir dele:
iter(range(3))
# R:<range_iterator object at 0x7fe173542ed0>
# mas objetos range não sao 6-iteradores por si mesmos, nos nao podemos chamar next em um objeto range
next(range(3))
# R:Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: 'range' object is not an iterator
# E, ao contrário de um iterador, podemos fazer um loop em um objeto de intervalo sem consumi-lo:
numbers = range(3)
tuple(numbers)
# R:(0, 1, 2)
tuple(numbers)
# R:(0, 1, 2)
# Se fizéssemos isso com um iterador, não obteríamos nenhum elemento na segunda vez em que fizemos o loop:
numbers = iter(range(3))
tuple(numbers)
# R:(0, 1, 2)
tuple(numbers)
#R:()
# Ao contrário dos objetos zip, enumerate ou generator, os objetos range não são 6-iteradores.
#-- ENTÃO O QUE É O RANGE? --##
# O objeto range é "lazy" em certo sentido, porque não gera todos os números que "contém" quando o criamos. Em vez disso, ele nos fornece esses números conforme precisamos deles ao fazer um loop sobre ele.
#
# Aqui está um objeto range e um generator (que é um tipo de iterador):
numbers = range(1_000_000)
square = (n**2 for n in numbers) | [
"pedromadureira000@gmail.com"
] | pedromadureira000@gmail.com |
218046a18f59c8cc6a566f6a16807e74d5250298 | a4e502e9487cf17c53f9f931ec0dbc12168fea52 | /packages/pyre/platforms/PackageManager.py | 0877270914d7a2f1326787f57abfbb1ac0125b31 | [
"BSD-3-Clause"
] | permissive | bryanvriel/pyre | bdc5dd59c46d53ff81f2ece532b9073ac3b65be1 | 179359634a7091979cced427b6133dd0ec4726ea | refs/heads/master | 2021-09-28T00:10:26.454282 | 2018-11-11T16:42:07 | 2018-11-11T16:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# the framework
import pyre
# declaration
class PackageManager(pyre.protocol, family='pyre.platforms.packagers'):
"""
Encapsulation of host specific information
"""
# requirements
@pyre.provides
def prefix(self):
"""
The package manager install location
"""
@pyre.provides
def installed(self):
"""
Retrieve available information for all installed packages
"""
@pyre.provides
def packages(self, category):
"""
Provide a sequence of package names that provide compatible installations for the given
package {category}. If the package manager provides a way for the user to select a
specific installation as the default, care should be taken to rank the sequence
appropriately.
"""
@pyre.provides
def info(self, package):
"""
Return information about the given {package}
The type of information returned is determined by the package manager. This method
should return success if and only if {package} is actually fully installed.
"""
@pyre.provides
def contents(self, package):
"""
Generate a sequence of the contents of {package}
The type of information returned is determined by the package manager. Typically, it
contains the list of files that are installed by this package, but it may contain other
filesystem entities as well. This method should return a non-empty sequence if and only
if {pakage} is actually fully installed
"""
@pyre.provides
def configure(self, packageInstance):
"""
Dispatch to the {packageInstance} configuration procedure that is specific to the
particular implementation of this protocol
"""
# framework obligations
@classmethod
def pyre_default(cls, **kwds):
"""
Build the preferred host implementation
"""
# the host should specify a sensible default; if there is nothing there, this is an
# unmanaged system that relies on environment variables and standard locations
from .Bare import Bare
# return the support for unmanaged systems
return Bare
# end of file
| [
"michael.aivazis@orthologue.com"
] | michael.aivazis@orthologue.com |
defbb28049ad7d422477ecaaabdf790640d21b17 | c5e6a4e0264409f4dc5db9993c8c0cc058d4365a | /8_juego_ahorcado.py | c36c4f69dcc49dcd6f1cc0a09e02d34d9823de2c | [] | no_license | carlosafdz/programacion_python | 05c91eb858ce12b9fd2e9e3fd4e902c66ea2ee2d | 17b0db4dcf923d6de3fdfd9c9e78b1d1a50651ea | refs/heads/master | 2023-05-24T20:32:22.614224 | 2020-03-21T18:26:30 | 2020-03-21T18:26:30 | 248,345,937 | 0 | 0 | null | 2023-05-22T23:22:23 | 2020-03-18T21:22:31 | Python | UTF-8 | Python | false | false | 2,486 | py | import random
IMAGENES = [
'''
+=======+
| |
|
|
|
|
======
''',
'''
+=======+
| |
O |
|
|
|
======
''',
'''
+=======+
| |
O |
| |
|
|
======
''',
'''
+=======+
| |
O |
/| |
|
|
======
''',
'''
+=======+
| |
O |
/|\ |
|
|
======
''',
'''
+=======+
| |
O |
/|\ |
/ |
|
======
''',
'''
+=======+
| |
O |
/|\ |
/ \ |
|
======
''',
''' '''
]
PALABRAS = ["lavadora","secadora","pepel","computadora"]
def palabra_random():
idx = random.randint(0,len(PALABRAS)-1)
return PALABRAS[idx]
def mostrar_tablero(palabra_escondida,intentos):
print(IMAGENES[intentos])
print('')
print(palabra_escondida)
print("*---**---**---**---**---**---**---**---**---**---*")
def main():
palabra = palabra_random()
palabra_escondida = ["_"] * len(palabra)
intentos = 0
while True:
mostrar_tablero(palabra_escondida,intentos)
letra = input("escoge una letra: ")
indice_letras = []
for i in range(len(palabra)):
if palabra[i] == letra:
indice_letras.append(i)
if len(indice_letras) == 0:
intentos = intentos + 1
if intentos == 7:
mostrar_tablero(palabra_escondida,intentos)
print(f'Perdiste..... la palabra correcta era {palabra}')
break
else:
for i in indice_letras:
palabra_escondida[i] = letra
indice_letras = []
try:
palabra_escondida.index("_")
except ValueError:
print(" ")
print("ganaste!!!")
break
def pruebas_tablero():
mostrar_tablero("palabra",0)
mostrar_tablero("palabra",1)
mostrar_tablero("palabra",2)
mostrar_tablero("palabra",3)
mostrar_tablero("palabra",4)
mostrar_tablero("palabra",5)
mostrar_tablero("palabra",6)
if __name__ == "__main__":
main()
#pruebas_tablero() | [
"carlos.afdzf@hotmail.com"
] | carlos.afdzf@hotmail.com |
3ff18915969da0e6505bd95f4d68b34cfdb72eb5 | e2cb95d74ff13247a706a4a949e22fb397efe7b7 | /A2 - Digital Makeup Transfer/src/faceWarp.py | 9a20045a0b4934f6294b0a14c9d6558b1da7a672 | [] | no_license | Aditi-Singla/Digital-Image-Analysis | 945beb48bfbd1f7bb75d76059d5faafcfe88881f | 8fc08ee86c5a168e3dc6d3b22c4be5bf2195458d | refs/heads/master | 2020-04-01T00:36:28.232484 | 2018-07-18T18:45:20 | 2018-07-18T18:45:20 | 152,704,480 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | #!/usr/bin/env python
import numpy as np
import cv2
import sys
import scipy.spatial
# Read points from text file
def readPoints(path) :
points = [];
with open(path) as file :
for line in file :
x, y = line.split()
points.append((np.float32(x), np.float32(y)))
return points
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def applyAffineTransform(src, srcTri, dstTri, size) :
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
return dst
def warpTriangle(img1, img, t1, t) :
# Find bounding rectangle for each triangle
r1 = cv2.boundingRect(np.float32([t1]))
r = cv2.boundingRect(np.float32([t]))
# Offset points by left top corner of the respective rectangles
t1Rect = []
tRect = []
for i in xrange(0, 3):
tRect.append(((t[i][0] - r[0]),(t[i][1] - r[1])))
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
# Get mask by filling triangle
mask = np.zeros((r[3], r[2], 3), dtype = np.float32)
cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0);
# Apply warpImage to small rectangular patches
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r[2], r[3])
warpImage = applyAffineTransform(img1Rect, t1Rect, tRect, size)
# Alpha blend rectangular patches
imgRect = warpImage
# Copy triangular region of the rectangular patch to the output image
img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] = img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] * ( 1 - mask ) + imgRect * mask
if __name__ == '__main__' :
filename1 = sys.argv[1]
filename2 = sys.argv[2]
# Read images
img1 = cv2.imread(filename1);
img2 = cv2.imread(filename2);
# Convert Mat to float data type
img1 = np.float32(img1)
img2 = np.float32(img2)
# Read array of corresponding points
points1 = readPoints(filename1 + '.txt')
points2 = readPoints(filename2 + '.txt')
tri = scipy.spatial.Delaunay(np.array(points1))
# Allocate space for final output
imgMorph = np.zeros(img2.shape, dtype = img2.dtype)
np.savetxt('tri.txt', np.uint8(tri.vertices), fmt='%d')
for l in tri.vertices :
x = int(l[0])
y = int(l[1])
z = int(l[2])
t1 = [points1[x], points1[y], points1[z]]
t2 = [ points2[x], points2[y], points2[z] ]
# Morph one triangle at a time.
warpTriangle(img1, imgMorph, t1, t2)
# Display Result
cv2.imwrite('warped.jpg', np.uint8(imgMorph))
| [
"aditisksingla@gmail.com"
] | aditisksingla@gmail.com |
d5408abdee9094c62381748340a424756eef3c8c | 9d61daee8ec86d1c3b85ab577c4d0ffc5c4c4a7c | /code kata/summm.py | d8927006714e70f2f8448e2ce4032b3d9075ff48 | [] | no_license | Bhuvaneswaribai/guvi | ec3d2a922059859c778b78920d52936a44edbca8 | ab6bb1193af49dbc431d5eb7ae19050d11aa622c | refs/heads/master | 2020-06-03T00:11:14.636796 | 2019-07-04T11:30:00 | 2019-07-04T11:30:00 | 191,355,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py |
nuumber=int(input())
a=list(map(int,input().split()))
sum=0
for i in a:
sum+=i
print(sum)
| [
"noreply@github.com"
] | Bhuvaneswaribai.noreply@github.com |
71c917f941655f147f642dba17548ed3889df18d | 3328e95f5a8498ab366aec380f0e1822826ba7a9 | /puppy.py | 5ecb1fddd03ca00ec9d69d3d7ed91e3934b08270 | [] | no_license | Abhiram1214/opencv | 6e9dd53cc08c54a8e1ce6f0c297fda451ddb7c31 | 653a9ccddbc188679bc9afe8f83d98a93b47cf3d | refs/heads/main | 2022-12-26T12:03:41.308652 | 2020-10-11T12:01:53 | 2020-10-11T12:01:53 | 301,957,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | import cv2
import numpy as np
'''
img = cv2.imread(r'C:\Users\cvenkatanagasatya\Pictures\Open CV\Computer-Vision-with-Python\DATA\puppy.jpg')
while True:
cv2.imshow('puppy', img)
#if we waited for milli second and we pressed the esc key
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
'''
######################
#####Function#########
#####################
def draw_circle(event, x,y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.namedWindow(winname='Images') #this is connecting the below window to callback function
cv2.setMouseCallback('Images', draw_circle) #windows name with draw_circle
######################################
##### Showing images in OpenCV#########
#######################################
img = np.zeros((512,512,3), np.int8)
while True:
cv2.imshow("Images", img)
if cv2.waitKey(20) & 0xFF==27:
break
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | Abhiram1214.noreply@github.com |
c514c9650b93f135aac41cc8d73c464420d4b318 | f7e1ada65e270fe2961df46179798ba522949e5c | /main1.py | 37d1622998e259cd937474a1130d59c95377e6c3 | [] | no_license | fabian6768/WebsiteManager | 36fad06af38298f25592fd2680837c6a1eb6a9b9 | d10148e83e5533bbb3ece9018fd75db33a036138 | refs/heads/master | 2021-01-12T08:29:55.355610 | 2016-12-15T21:49:31 | 2016-12-15T21:49:31 | 76,597,511 | 0 | 1 | null | 2020-10-01T11:34:14 | 2016-12-15T21:28:28 | Python | UTF-8 | Python | false | false | 3,578 | py | #This Is A Program
from csv import *
from tkinter import *
from tkinter import messagebox
import webbrowser as wb
a=1
class Second(object):
def __init__(self):
self.t = Tk()
self.t.title("Website Library")
self.t.geometry("500x350")
self.t.configure(background="#ddaf7e")
self.book = []
self.urls = []
self.button = []
self.i = 0
self.j = 0
with open("website.csv", newline="") as csv:
self.csvf = reader(csv)
for row in self.csvf:
self.book.append(row[0])
self.urls.append(row[1])
for name in self.book:
self.button.append(Button(self.t, text=name, font="Verdana 15", width=16))
self.button[self.i].pack(pady=2)
self.i += 1
self.i = 0
for url in self.urls:
self.button[self.i].configure(command=lambda url=url: self.openwww(url))
self.i += 1
self.t.mainloop()
def openwww(self, url):
wb.open(url)
class Third(object):
def __init__(self):
self.t = Tk()
self.t.title("Website Library")
self.t.geometry("500x250")
self.t.configure(background="#ddaf7e")
self.first = Label(self.t, text="Name Of BookMark and second text box URL Of bookmark", font="Calibri 15", bg="#ddaf7e")
self.name = Label(self.t, text="Name :", font="Calibri 15", bg="#ddaf7e")
self.url = Label(self.t, text="URL :", font="Calibri 15", bg="#ddaf7e")
self.entry1 = Entry(self.t)
self.entry2 = Entry(self.t)
self.first.grid(row=0, columnspan=2)
self.name.grid(row=1, column=0, sticky=E)
self.url.grid(row=2, column=0, sticky=E)
self.entry1.grid(row=1, column=1, sticky=W)
self.entry2.grid(row=2, column=1, sticky=W)
self.getitall = Button(self.t, text="Get It All", font="Calibri 12", command=lambda: self.getit())
self.getitall.grid(row=3, column=1, sticky=W, padx=20)
self.t.mainloop()
def getit(self):
with open("website.csv", "a", newline="") as csv:
w = writer(csv)
w.writerow([self.entry1.get(), self.entry2.get()])
self.entry1.delete(0, END)
self.entry2.delete(0, END)
class WebsiteManager(object):
def __init__(self):
"""Creating The First Window That Holds Buttons"""
self.r = Tk()
self.r.title("Website Library 123")
self.r.geometry("500x250")
self.r.configure(background="#ddaf7e")
'''Configuring So that the First Window holds buttons'''
self.title = Label(self.r, text="Website Library", bg="#ddaf7e", font="Calibri 26").pack()
self.divider = Label(self.r, text=" "*100, bg="#ddaf7e").pack()
self.saved = Button(self.r, text="View Saved Websites", font="Verdana 15", command=lambda: self.newwind(1)).pack(pady=10)
self.addnew = Button(self.r, text="Add New Websites", font="Verdana 15", command=lambda: self.newwind(2)).pack(pady=10)
self.r.protocol("WM_DELETE_WINDOW", self.on_closing)
self.r.mainloop()
def on_closing(self):
global a
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.r.destroy()
a = 0
def newwind(self, option):
if option == 1:
self.r.destroy()
Second()
elif option == 2:
self.r.destroy()
Third()
def main():
while a == 1:
WebsiteManager()
if __name__ == "__main__":
main()
| [
"fabian6768@yahoo.com"
] | fabian6768@yahoo.com |
68caed12611a8b789a1964a22fb49575eca70c7f | 76d388b5d2e74ff0eda748c7868fadf0704cf700 | /tensorpack/utils/develop.py | 496de1dd245db766c3e4ba256ddb638d5e621b48 | [
"Apache-2.0"
] | permissive | jooyounghun/tensorpack | eebf0867e5a82ffd52660dccfbd34879b8d0f5af | 90cdae380c40a1e91f627520c4a739bd6ee3f18b | refs/heads/master | 2020-03-23T23:24:41.651089 | 2018-07-27T02:57:19 | 2018-07-27T02:57:19 | 142,232,523 | 1 | 0 | Apache-2.0 | 2018-07-25T01:45:06 | 2018-07-25T01:45:05 | null | UTF-8 | Python | false | false | 4,773 | py | # -*- coding: utf-8 -*-
# File: develop.py
# Author: tensorpack contributors
""" Utilities for developers only.
These are not visible to users (not automatically imported). And should not
appeared in docs."""
import os
import functools
from datetime import datetime
import importlib
import types
import six
from . import logger
def create_dummy_class(klass, dependency):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
Returns:
class: a class object
"""
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
@six.add_metaclass(_DummyMetaClass)
class _Dummy(object):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
return _Dummy
def create_dummy_func(func, dependency):
"""
When a dependency of a function is not available, create a dummy function which throws ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
Returns:
function: a function object
"""
if isinstance(dependency, (list, tuple)):
dependency = ','.join(dependency)
def _dummy(*args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, func))
return _dummy
def building_rtfd():
"""
Returns:
bool: if tensorpack is being imported to generate docs now.
"""
return os.environ.get('READTHEDOCS') == 'True' \
or os.environ.get('DOC_BUILDING')
def log_deprecated(name="", text="", eos=""):
"""
Log deprecation warning.
Args:
name (str): name of the deprecated item.
text (str, optional): information about the deprecation.
eos (str, optional): end of service date such as "YYYY-MM-DD".
"""
assert name or text
if eos:
eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b")
if name:
if eos:
warn_msg = "%s will be deprecated %s. %s" % (name, eos, text)
else:
warn_msg = "%s was deprecated. %s" % (name, text)
else:
warn_msg = text
if eos:
warn_msg += " Legacy period ends %s" % eos
logger.warn("[Deprecated] " + warn_msg)
def deprecated(text="", eos=""):
"""
Args:
text, eos: same as :func:`log_deprecated`.
Returns:
a decorator which deprecates the function.
Example:
.. code-block:: python
@deprecated("Explanation of what to do instead.", "2017-11-4")
def foo(...):
pass
"""
def get_location():
import inspect
frame = inspect.currentframe()
if frame:
callstack = inspect.getouterframes(frame)[-1]
return '%s:%i' % (callstack[1], callstack[2])
else:
stack = inspect.stack(0)
entry = stack[2]
return '%s:%i' % (entry[1], entry[2])
def deprecated_inner(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
name = "{} [{}]".format(func.__name__, get_location())
log_deprecated(name, text, eos)
return func(*args, **kwargs)
return new_func
return deprecated_inner
def HIDE_DOC(func):
func.__HIDE_SPHINX_DOC__ = True
return func
# Copied from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/util/lazy_loader.py
class LazyLoader(types.ModuleType):
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
820708161506216faa57b389f2f0890d60afef5d | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible/modules/cron.py | 2424f5c065543ddd96be359b69a92e58495389fd | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 26,537 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cron
short_description: Manage cron.d and crontab entries
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.
- When using symbols such as %, they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
- Required if I(state=absent).
- Note that if name is not set and I(state=present), then a
new crontab entry will always be created, regardless of existing ones.
- This parameter will always be required in future releases.
type: str
user:
description:
- The specific user whose crontab should be modified.
- When unset, this parameter defaults to the current user.
type: str
job:
description:
- The command to execute or, if env is set, the value of environment variable.
- The command should not contain line breaks.
- Required if I(state=present).
type: str
aliases: [ value ]
state:
description:
- Whether to ensure the job or environment variable is present or absent.
type: str
choices: [ absent, present ]
default: present
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
- If it is absolute, it will typically be C(/etc/crontab).
- Many linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- To use the I(cron_file) parameter you must specify the I(user) as well.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
type: bool
default: no
minute:
description:
- Minute when the job should run (C(0-59), C(*), C(*/2), and so on).
type: str
default: "*"
hour:
description:
- Hour when the job should run (C(0-23), C(*), C(*/2), and so on).
type: str
default: "*"
day:
description:
- Day of the month the job should run (C(1-31), C(*), C(*/2), and so on).
type: str
default: "*"
aliases: [ dom ]
month:
description:
- Month of the year the job should run (C(1-12), C(*), C(*/2), and so on).
type: str
default: "*"
weekday:
description:
- Day of the week that the job should run (C(0-6) for Sunday-Saturday, C(*), and so on).
type: str
default: "*"
aliases: [ dow ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use I(special_time).
version_added: "1.0"
type: bool
default: no
special_time:
description:
- Special time specification nickname.
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
disabled:
description:
- If the job should be disabled (commented out) in the crontab.
- Only has effect if I(state=present).
type: bool
default: no
version_added: "2.0"
env:
description:
- If set, manages a crontab's environment variable.
- New variables are added on top of crontab.
- I(name) and I(value) parameters are the name and the value of environment variable.
type: bool
default: false
version_added: "2.1"
insertafter:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted after the declaration of specified environment variable.
type: str
version_added: "2.1"
insertbefore:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted before the declaration of specified environment variable.
type: str
version_added: "2.1"
requirements:
- cron (or cronie on CentOS)
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
- Patrick Callahan (@dirtyharrycallahan)
- Evan Kaufman (@EvanK)
- Luca Berruti (@lberruti)
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
ansible.builtin.cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
ansible.builtin.cron:
name: "an old job"
state: absent
- name: Creates an entry like "@reboot /some/job.sh"
ansible.builtin.cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
- name: Creates an entry like "PATH=/opt/bin" on top of crontab
ansible.builtin.cron:
name: PATH
env: yes
job: /opt/bin
- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
ansible.builtin.cron:
name: APP_HOME
env: yes
job: /srv/app
insertafter: PATH
- name: Creates a cron file under /etc/cron.d
ansible.builtin.cron:
name: yum autoupdate
weekday: "2"
minute: "0"
hour: "12"
user: root
job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
- name: Removes a cron file from under /etc/cron.d
ansible.builtin.cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
- name: Removes "APP_HOME" environment variable from crontab
ansible.builtin.cron:
name: APP_HOME
env: yes
state: absent
'''
RETURN = r'''#'''
import os
import platform
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.six.moves import shlex_quote
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to current user)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.n_existing = ''
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.b_cron_file, 'rb')
self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
self.lines = self.n_existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.n_existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match(r'# \(/tmp/.*installed on.*\)', l) and
not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.n_existing = re.sub(pattern, '', self.n_existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'wb')
elif self.cron_file:
fileh = open(self.b_cron_file, 'wb')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'wb')
fileh.write(to_bytes(self.render()))
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0] + 1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match(r'%s' % self.ansible, l):
comment = re.sub(r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i - 1] == self.do_comment(None):
self.lines[i - 1] = self.do_comment(name)
return [self.lines[i - 1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match(r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match(r'%s' % self.ansible, l):
jobnames.append(re.sub(r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match(r'^\S+=', l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match(r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
user=dict(type='str'),
job=dict(type='str', aliases=['value']),
cron_file=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
backup=dict(type='bool', default=False),
minute=dict(type='str', default='*'),
hour=dict(type='str', default='*'),
day=dict(type='str', default='*', aliases=['dom']),
month=dict(type='str', default='*'),
weekday=dict(type='str', default='*', aliases=['dow']),
reboot=dict(type='bool', default=False),
special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
disabled=dict(type='bool', default=False),
env=dict(type='bool', default=False),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
],
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if not name:
module.deprecate(
msg="The 'name' parameter will be required in future releases.",
version='2.12', collection_name='ansible.builtin'
)
if reboot:
module.deprecate(
msg="The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
version='2.12', collection_name='ansible.builtin'
)
if module._diff:
diff = dict()
diff['before'] = crontab.n_existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if env and not name:
module.fail_json(msg="You must specify 'name' while working with environment variables (env=yes)")
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if (special_time or reboot) and platform.system() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.n_existing != '':
if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
changed = True
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
warnings=warnings,
changed=changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
941b70169ea0201bf4913ade211f0567886e5ca5 | 4c85452e12ad3d8ca08f91df21ff4c6812a9e3b7 | /tests/invalid_boards.py | 7ca7cb9830cd75f57154384786df9870880d65b6 | [
"MIT"
] | permissive | lesander/takuzu | 452ad7b0b8abc76647b8542118c91be6e3cb8ee7 | d0a913ce57a3234eaf17afd3c858f17c3f1e31e5 | refs/heads/master | 2022-07-05T17:01:48.117658 | 2020-05-21T23:00:25 | 2020-05-21T23:00:25 | 265,910,685 | 1 | 0 | MIT | 2022-06-22T02:06:48 | 2020-05-21T17:28:17 | Python | UTF-8 | Python | false | false | 299 | py | from takuzu import Takuzu
boards = [ [], [None], [1, 0, None], [ [], [] ], [ [1,0] ], [ [1,0], [1] ] ]
for b in boards:
try:
t = Takuzu(board=b, debug=True)
except AssertionError as e:
pass
else:
raise Exception('board={} should throw AssertionError'.format(b))
| [
"lesander@users.noreply.github.com"
] | lesander@users.noreply.github.com |
bcded7ca3347b631cb06ccb49aa49c5ef2291909 | 6cb18c62758bfbf783d3fabe851d1c4d9f323483 | /setup.py | 9319f44e05f51de89cc40224949e07be98a9e018 | [
"MIT"
] | permissive | bruinxiong/performer-pytorch | 68e505ff5e59d35e339b23661feef377795fd2df | c368b5e4efd46f72e2abaa655dc813021f911014 | refs/heads/main | 2023-01-04T02:25:42.898296 | 2020-10-26T22:41:09 | 2020-10-26T22:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from setuptools import setup, find_packages
setup(
name = 'performer-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.1.4',
license='MIT',
description = 'Performer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/performer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'efficient attention',
'transformers'
],
install_requires=[
'pytorch-fast-transformers>=0.3.0',
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | [
"lucidrains@gmail.com"
] | lucidrains@gmail.com |
21a7d146b5d95f1fee3c58b4e611dd502e854c74 | 83fb26fc9fe96c5821c7a13468f205ca6eb4fcda | /ICP exercise and assignment/A01/A01_exercise1.py | 2662b1fc38669910f481aa07bc1481af8bf91817 | [] | no_license | zc2214/Introduction-to-Computer-Programming | e58355fc732a2eacf29aa5141573e64ef1c3f27e | 95f5e36f102c5ebeeb628b61c3fdad416082ab4f | refs/heads/main | 2023-08-11T23:44:54.766836 | 2021-09-22T14:45:54 | 2021-09-22T14:45:54 | 323,836,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # PROGRAMMING ASSIGNMENT 01
# Filename: 'exercise1.py'
#
# Write a program that does the following (in the specified order):
# 1. asks the user to input his family name
# 2. asks the user to input his given name
# 3. then, prints the message Hello <given name> <family name> !!!
#
# WRITE YOUR CODE AFTER THIS LINE
firstname = input("Please enter your firstname")
lastname = input("Please enter your lastname")
print ("Hello",firstname,lastname )
| [
"noreply@github.com"
] | zc2214.noreply@github.com |
9cc2c3e325d074bfd93da7cd26d488883eadd91a | dd83f3a356278cd5ede9efa5ab25a93e258ef6b7 | /slowfast/models/vit_helper.py | afa96024b9244b5160c7ff9fba7708ce3beda16c | [
"Apache-2.0"
] | permissive | XrosLiang/Motionformer | 9debfcaed5c68cce27ec3d1f5ebc409ae81066c5 | 890bded4139dc4b17e344ea9c090bf2de4dd2678 | refs/heads/main | 2023-06-02T16:50:06.222720 | 2021-06-12T11:38:24 | 2021-06-12T11:38:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,425 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright 2020 Ross Wightman
# Modified Model definition
"""Video models."""
from einops import rearrange, repeat
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _quadruple
from torch import einsum
from functools import partial
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from torch.hub import load_state_dict_from_url
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.registry import register_model
from . import performer_helper
from . import orthoformer_helper
from . import nystrom_helper
default_cfgs = {
'vit_1k': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
'vit_1k_large': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth',
}
def qkv_attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class JointSpaceTimeAttention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.head_dim = head_dim
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
B, N, C = x.shape
qkv = self.qkv(x).reshape(
B, N, 3,
self.num_heads,
C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
# Joint space-time attention
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class DividedAttention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
# init to zeros
self.qkv.weight.data.fill_(0)
self.qkv.bias.data.fill_(0)
self.proj.weight.data.fill_(1)
self.proj.bias.data.fill_(0)
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, einops_from, einops_to, **einops_dims):
# num of heads variable
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(
t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
# Scale q
q *= self.scale
# Take out cls_q, cls_k, cls_v
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(
lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = qkv_attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(
lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims),
(q_, k_, v_)
)
# expand CLS token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim=1)
v_ = torch.cat((cls_v, v_), dim=1)
# attention
out = qkv_attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim=1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
## to out
x = self.proj(out)
x = self.proj_drop(x)
return x
class TrajectoryAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj_q = nn.Linear(dim, dim, bias=qkv_bias)
self.proj_kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
B, N, C = x.shape
P = seq_len
F = num_frames
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
# Reshape: 'b n (h d) -> (b h) n d'
q, k, v = map(
lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
# remove CLS token from q, k, v
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(
lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = qkv_attn(cls_q * self.scale, k, v)
cls_out = rearrange(cls_out, f'(b h) f d -> b f (h d)', f=1, h=h)
if approx == "nystrom":
## Shared spatial landmarks
q_, k_, v_ = map(
lambda t: rearrange(t, f'b h p d -> (b h) p d', h=h), (q_, k_, v_))
x = nystrom_helper.nystrom_spatial_attn(
q_, k_, v_,
landmarks=num_landmarks,
num_frames=F,
inv_iters=6,
use_spatial_landmarks=True
)
x = rearrange(x, f'(b h) p f d -> b h p f d', f=F, h=h)
elif approx == "orthoformer":
x = orthoformer_helper.orthoformer(
q_, k_, v_,
num_landmarks=num_landmarks,
num_frames=F,
)
elif approx == "performer":
# Form random projection matrices:
m = 256 # r = 2m, m <= d
d = self.head_dim
seed = torch.ceil(torch.abs(torch.sum(q_) * performer_helper.BIG_CONSTANT))
seed = torch.tensor(seed)
projection_matrix = performer_helper.create_projection_matrix(
m, d, seed=seed, device=q_.device, dtype=q_.dtype)
q_, k_ = map(lambda t: rearrange(t, f'b h p d -> b p h d'), (q_, k_))
q_prime = performer_helper.softmax_kernel_transformation(
q_,
is_query=True,
projection_matrix=projection_matrix
)
k_prime = performer_helper.softmax_kernel_transformation(
k_,
is_query=False,
projection_matrix=projection_matrix
)
q_prime, k_prime = map(
lambda t: rearrange(t, f'b p h r -> b h p r'), (q_prime, k_prime))
k_prime = rearrange(k_prime, 'b h (f n) r -> b h f n r', f=F)
v_ = rearrange(v_, 'b h (f n) d -> b h f n d', f=F)
kv = torch.einsum('b h f n r, b h f n d -> b h f r d', k_prime, v_)
qkv = torch.einsum('b h p r, b h f r d -> b h p f d', q_prime, kv)
normaliser = torch.einsum('b h f n r -> b h f r', k_prime)
normaliser = torch.einsum('b h p r, b h f r -> b h p f', q_prime, normaliser)
x = qkv / normaliser.unsqueeze(-1)
else:
# Using full attention
q_dot_k = q_ @ k_.transpose(-2, -1)
q_dot_k = rearrange(q_dot_k, 'b q (f n) -> b q f n', f=F)
space_attn = (self.scale * q_dot_k).softmax(dim=-1)
attn = self.attn_drop(space_attn)
v_ = rearrange(v_, 'b (f n) d -> b f n d', f=F, n=P)
x = torch.einsum('b q f n, b f n d -> b q f d', attn, v_)
# Temporal attention: query is the similarity-aggregated patch
x = rearrange(x, '(b h) s f d -> b s f (h d)', b=B)
x_diag = rearrange(x, 'b (g n) f d -> b g n f d', g=F)
x_diag = torch.diagonal(x_diag, dim1=-4, dim2=-2)
x_diag = rearrange(x_diag, f'b n d f -> b (f n) d', f=F)
q2 = self.proj_q(x_diag)
k2, v2 = self.proj_kv(x).chunk(2, dim=-1)
q2 = rearrange(q2, f'b s (h d) -> b h s d', h=h)
x, k2, v2 = map(
lambda t: rearrange(t, f'b s f (h d) -> b h s f d', f=F, h=h), (x, k2, v2))
q2 *= self.scale
attn = torch.einsum('b h s d, b h s f d -> b h s f', q2, k2)
attn = attn.softmax(dim=-1)
x = torch.einsum('b h s f, b h s f d -> b h s d', attn, x)
x = rearrange(x, f'b h s d -> b s (h d)')
# concat back the cls token
x = torch.cat((cls_out, x), dim=1)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
def get_attention_module(
attn_type='joint', dim=768, num_heads=12, qkv_bias=False,
attn_drop=0., proj_drop=0.
):
if attn_type == 'joint':
attn = JointSpaceTimeAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=proj_drop)
elif attn_type == 'trajectory':
attn = TrajectoryAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=proj_drop)
return attn
class Block(nn.Module):
def __init__(
self, dim=768, num_heads=12, attn_type='trajectory',
mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = get_attention_module(
attn_type=attn_type, dim=dim, num_heads=num_heads,
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
x = x + self.drop_path(
self.attn(
self.norm1(x),
seq_len=seq_len,
num_frames=num_frames,
approx=approx,
num_landmarks=num_landmarks
)[0]
)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class DividedSpaceTimeBlock(nn.Module):
def __init__(
self, dim=768, num_heads=12, attn_type='divided',
mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm
):
super().__init__()
self.einops_from_space = 'b (f n) d'
self.einops_to_space = '(b f) n d'
self.einops_from_time = 'b (f n) d'
self.einops_to_time = '(b n) f d'
self.norm1 = norm_layer(dim)
self.attn = DividedAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.timeattn = DividedAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.norm3 = norm_layer(dim)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
time_output = self.timeattn(self.norm3(x),
self.einops_from_time, self.einops_to_time, n=seq_len)
time_residual = x + time_output
space_output = self.attn(self.norm1(time_residual),
self.einops_from_space, self.einops_to_space, f=num_frames)
space_residual = time_residual + self.drop_path(space_output)
x = space_residual
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Mlp(nn.Module):
def __init__(
self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU, drop=0.
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = img_size if type(img_size) is tuple else to_2tuple(img_size)
patch_size = img_size if type(patch_size) is tuple else to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class PatchEmbed3D(nn.Module):
""" Image to Patch Embedding
"""
def __init__(
self, img_size=224, temporal_resolution=4, in_chans=3,
patch_size=16, z_block_size=2, embed_dim=768, flatten=True
):
super().__init__()
self.height = (img_size // patch_size)
self.width = (img_size // patch_size)
self.frames = (temporal_resolution // z_block_size)
self.num_patches = self.height * self.width * self.frames
self.proj = nn.Conv3d(in_chans, embed_dim,
kernel_size=(z_block_size, patch_size, patch_size),
stride=(z_block_size, patch_size, patch_size))
self.flatten = flatten
def forward(self, x):
B, C, T, H, W = x.shape
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
return x
class HeadMLP(nn.Module):
def __init__(self, n_input, n_classes, n_hidden=512, p=0.1):
super(HeadMLP, self).__init__()
self.n_input = n_input
self.n_classes = n_classes
self.n_hidden = n_hidden
if n_hidden is None:
# use linear classifier
self.block_forward = nn.Sequential(
nn.Dropout(p=p),
nn.Linear(n_input, n_classes, bias=True)
)
else:
# use simple MLP classifier
self.block_forward = nn.Sequential(
nn.Dropout(p=p),
nn.Linear(n_input, n_hidden, bias=True),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Dropout(p=p),
nn.Linear(n_hidden, n_classes, bias=True)
)
print(f"Dropout-NLP: {p}")
def forward(self, x):
return self.block_forward(x)
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
def adapt_input_conv(in_chans, conv_weight, agg='sum'):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float()
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
if agg == 'sum':
print("Summing conv1 weights")
conv_weight = conv_weight.sum(dim=1, keepdim=True)
else:
print("Averaging conv1 weights")
conv_weight = conv_weight.mean(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
if agg == 'sum':
print("Summing conv1 weights")
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
else:
print("Averaging conv1 weights")
conv_weight = conv_weight.mean(dim=1, keepdim=True)
conv_weight = conv_weight.repeat(1, in_chans, 1, 1)
conv_weight = conv_weight.to(conv_type)
return conv_weight
def load_pretrained(
model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False
):
# Load state dict
assert(f"{cfg.VIT.PRETRAINED_WEIGHTS} not in [vit_1k, vit_1k_large]")
state_dict = torch.hub.load_state_dict_from_url(url=default_cfgs[cfg.VIT.PRETRAINED_WEIGHTS])
if filter_fn is not None:
state_dict = filter_fn(state_dict)
input_convs = 'patch_embed.proj'
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(
in_chans, state_dict[weight_name], agg='avg')
print(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
print(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifier_name = 'head'
label_offset = cfg.get('label_offset', 0)
pretrain_classes = 1000
if num_classes != pretrain_classes:
# completely discard fully connected if model num_classes doesn't match pretrained weights
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
elif label_offset > 0:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
loaded_state = state_dict
self_state = model.state_dict()
all_names = set(self_state.keys())
saved_names = set([])
for name, param in loaded_state.items():
param = param
if 'module.' in name:
name = name.replace('module.', '')
if name in self_state.keys() and param.shape == self_state[name].shape:
saved_names.add(name)
self_state[name].copy_(param)
else:
print(f"didnt load: {name} of shape: {param.shape}")
print("Missing Keys:")
print(all_names - saved_names) | [
"mandelapatrick@devfair0297.h2.fair"
] | mandelapatrick@devfair0297.h2.fair |
acc5c7355bf61f8fbde46568884e95f5b124e22c | 4cfb9d75361f3c7f50744878e645073e3a8fc8d4 | /sinx+sinx fft.py | ab1696136d9a7c9f87d523e156f07203ab760d85 | [] | no_license | mychenyoke/gwwave1 | ac99c982b5037e8afff42e3055de366ddd8543dd | 7520846ab848ac2434db11ceb66a271d5ab68270 | refs/heads/master | 2020-03-18T13:47:02.888171 | 2018-05-28T15:18:36 | 2018-05-28T15:18:36 | 134,808,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import numpy as np
import matplotlib.pyplot as plt
omega1=0.1
omega2=0.2
sample_rate=20
a=np.arange(0,100)
sina=np.sin(omega1*a)
sinb=np.sin(omega2*a)+np.sin(omega1*a)
plt.figure(figsize=(10,24))
plt.subplot(4,1,1)
plt.title("sinax")
plt.plot(a,sina)
plt.savefig("sinax")
plt.subplot(4,1,2)
plt.title("sinax+sinbx")
plt.plot(a,sinb)
plt.savefig("sinax+sinbx")
aa=[]
fft_frequency=np.fft.fftfreq(len(a),1/sample_rate)
fft_sina=np.fft.fft(sina)
#print(abs(fft_sina))
aa=abs(fft_sina)
for ab in aa:
print(ab)
fft_sinb=np.fft.fft(sinb)
plt.subplot(4,1,3)
plt.title("FFT_Frequency_sinax")
plt.plot(fft_frequency,abs(fft_sina))
plt.savefig("FFT_Frequency_sinax")
plt.subplot(4,1,4)
plt.title("FFT_Frequency_sinax+sinbx")
plt.plot(fft_frequency,fft_sinb)
plt.savefig("FFT_Frequency_sinax+sinbx") | [
"noreply@github.com"
] | mychenyoke.noreply@github.com |
45b76c5185d0e6d5434ffd0717722d4e1b9aa0c1 | c744b20f4d5f4035dd81bf515f6e969a67299309 | /lists/migrations/0006_auto_20150825_1407.py | 34243587e4fd8a04e03184790c2e99036ba5781f | [] | no_license | jian-en/flyingjay-superlists-project | 14c94e16658e6aef76019847423b6fd0ac01eebe | 2c8ad9dfd26d68237b065797f3132872eb0cdaa5 | refs/heads/master | 2021-01-02T09:43:37.631559 | 2015-11-03T03:55:03 | 2015-11-03T03:55:03 | 40,744,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lists', '0005_auto_20150823_0227'),
]
operations = [
migrations.AlterField(
model_name='item',
name='text',
field=models.TextField(),
),
]
| [
"fujian_en@126.com"
] | fujian_en@126.com |
a4c71809c35378bb39dbbce97d55d2a122ab4dcd | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /AI/05_tictactoe/02grid_plot.py | b2fb6cbc7f65ddac4fc048c6664f6bdd82dfb227 | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | #!/usr/bin/env python
import numpy as np
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class gameboard(object):
def __init__(self):
#player 1 puts a "X", player 2 puts a "O"
self.g = [[1,0,1],[0,0,2],[0,2,0]]
self.grid = np.array(self.g)
print(self.grid)
def drawGrid(self):
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(0,3), ylim = (0,3))
self.myCells = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)]
for i in self.myCells:
if self.grid[i] == 1:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="red")
ax.add_patch(cell)
elif self.grid[i] == 2:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="blue")
ax.add_patch(cell)
else:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="none")
ax.add_patch(cell)
plt.show()
board = gameboard()
board.drawGrid() | [
"dbbudd@gmail.com"
] | dbbudd@gmail.com |
1697ff12097d074fe9a08b7e8cfbf1ecd1348016 | cca89a7bbe2da907a38eb00e9a083f57597273f0 | /162. 寻找峰值/pythonCode.py | ecfc5d414241c3d0b4d2b4aac3531e9ced628696 | [] | no_license | xerprobe/LeetCodeAnswer | cc87941ef2a25c6aa1366e7a64480dbd72750670 | ea1822870f15bdb1a828a63569368b7cd10c6ab8 | refs/heads/master | 2022-09-23T09:15:42.628793 | 2020-06-06T16:29:59 | 2020-06-06T16:29:59 | 270,215,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
def binarySearch(l:int,r:int) -> int:
if(l == r): return l
mid = (l + r) // 2
if(nums[mid] > nums[mid + 1]):
return binarySearch(l,mid)
else:
return binarySearch(mid+1,r)
return binarySearch(0,len(nums)-1)
# 峰值元素是指其值大于左右相邻值的元素。
# 给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。
# 数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。
# 你可以假设 nums[-1] = nums[n] = -∞。
# 示例 1:
# 输入: nums = [1,2,3,1]
# 输出: 2
# 解释: 3 是峰值元素,你的函数应该返回其索引 2。
# 示例 2:
# 输入: nums = [1,2,1,3,5,6,4]
# 输出: 1 或 5
# 解释: 你的函数可以返回索引 1,其峰值元素为 2;
# 或者返回索引 5, 其峰值元素为 6。
# 说明:
# 你的解法应该是 O(logN) 时间复杂度的。
# 链接:https://leetcode-cn.com/problems/find-peak-element/ | [
"changwenhao1@qq.com"
] | changwenhao1@qq.com |
96eb58da2807780f7f78eb49453cd03e2e4a57bb | 33f30925224a7db3e3bf6948c6c569ad850e9c76 | /Server/bin/rst2xml.py | 6a7fab179644d60c2959331900cdea30a7350337 | [] | no_license | duelle/CTT | 2bc64fffaf4b2eb3976fedd7aea231a51da8fbe9 | e2da2ab9c599833cc8409728b456a9e37825986b | refs/heads/master | 2022-04-06T15:25:06.747919 | 2020-02-19T14:04:37 | 2020-02-19T14:04:37 | 237,939,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #!/home/duelle/Repositories/git/RadonCTT/Server/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"duellmann@iste.uni-stuttgart.de"
] | duellmann@iste.uni-stuttgart.de |
311ba855cf35a4765fce0410377fb7f5eb4aa8a4 | c56448aa3553d1a5ab71099e741fa71c15d539cb | /stations/urls.py | 817356c4760a4af8560f60d4abb533fc1d2a9d3e | [] | no_license | Jack11709/django-underground | 8591cba5fbcd9e2202fbaefa1a95057d4258477d | 60b868ce5dcb5001761c5207cfd764474ec8f19a | refs/heads/master | 2022-06-04T04:11:14.667519 | 2019-10-31T09:50:46 | 2019-10-31T09:50:46 | 218,318,167 | 0 | 0 | null | 2022-05-25T03:24:00 | 2019-10-29T15:19:03 | Python | UTF-8 | Python | false | false | 588 | py | from django.urls import path
from .views import StationList, StationDetail, ZoneList, ZoneDetail, LineList, LineDetail # import our DRF views
urlpatterns = [
path('stations', StationList.as_view(), name='stations-list'),
path('stations/<int:pk>/', StationDetail.as_view(), name='stations-detail'),
path('zones', ZoneList.as_view()),
path('zones/<int:pk>/', ZoneDetail.as_view()),
path('lines', LineList.as_view()),
path('lines/<int:pk>/', LineDetail.as_view())
] # registering all our urls for this project, the route url for this project is in /project/urls.py | [
"jack.may@generalassemb.ly"
] | jack.may@generalassemb.ly |
98d80763957c0adf4a839f4d123400647c1b2d7f | 950fd350aba8c7584b8f362b2e5079b5010a1f6a | /lib/Sockets.py | aeb577b91be8e75da756909611e728e080dff370 | [] | no_license | entr0pist/fakeircd | 96814755b0b2041bc14db8f942680c47f5ea56b0 | 43a88be91aa6337e1eacaeadaa20dcdb2bccd3a2 | refs/heads/master | 2020-06-07T10:34:36.562878 | 2015-11-10T04:02:38 | 2015-11-10T04:02:38 | 42,418,758 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | from lib import config
from lib import linechat
from lib.User import User
class Sockets:
def __init__(self):
self.server = linechat.Serve()
def add_sock(self, sock):
self.server.add_sock(sock)
def rm_sock(self, sock):
self.server.rm_sock(sock)
def serve(self):
self.server.serve()
def spawn_all(self):
for server in config.get(None, 'listen'):
if self.server.sock_by_address(server['bind_address'], server['bind_port']):
continue
ssl = False
if 'ssl' in server:
ssl = server['ssl']
s = linechat.Server(User, port=server['bind_port'],
hostname=server['bind_address'], ssl=ssl)
self.server.add_sock(s)
for server in self.server.socks:
try:
sock = server.sock.getsockname()
except:
return
if not config.get_listen_by_host_port(sock):
self.server.rm_sock_by_address(*sock)
def shutdown_all(self):
self.server.close_all()
sockets = Sockets()
| [
"entr0pist@users.noreply.github.com"
] | entr0pist@users.noreply.github.com |
d86cb55284f9ec406e508cb0da30cb1564736a7e | 919fd48a34ca200086f51905d64c21c3b31b6739 | /CodeMixed-Text-Generator/cm_text_generator/grammar_inference.py | 0449999d7ffce757d92333a845762acfcc6197a9 | [
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"BSD-2-Clause",
"MIT",
"Python-2.0",
"PSF-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | mohdsanadzakirizvi/CodeMixed-Text-Generator | e89b758ad88a622c058bf1465003ae3c23a55b88 | 47740eeff3ecb46f5294711f4fe5d3a03a6e0b54 | refs/heads/main | 2023-06-15T22:43:21.578533 | 2021-04-13T09:16:24 | 2021-04-27T12:46:19 | 384,061,885 | 0 | 0 | MIT | 2021-07-08T08:54:14 | 2021-07-08T08:54:14 | null | UTF-8 | Python | false | false | 1,853 | py | ###GRAMMAR INFERENCE
from .data_structure_definitions import *
def ruleEnlister(root, grammar):
if root.token=="XXXXX":
cond=False
for rule in grammar: ##check false/true
if (rule.lhs.nonTerminal==root.label and len(rule.rhs)==len(root.children)):
#print "Using old rule!"
cond=True
for counter in range(len(rule.rhs)):
if(rule.rhs[counter].nonTerminal!=root.children[counter].label or rule.rhs[counter].index!=root.children[counter].repeatIndex):
cond=False
if cond==True:
root.ruleNum=rule.ruleNum
if(root.ruleNum==-1):
#print "Making new rule!", str(len(grammar))
lhs=grammarPoint(root.label, -1, -1)
rhs=[]
for child in root.children:
rhs.append(grammarPoint(child.label, child.repeatIndex, root.children.index(child)))
grammar.append(grammarRule(len(grammar), lhs, rhs))
root.ruleNum=len(grammar)-1
for child in root.children:
ruleEnlister(child, grammar)
def projectHindiRules(hinRoot, grammar):
if hinRoot.token=="XXXXX":
# print "\nLABEL: ", hinRoot.label, " ", str(hinRoot.ruleNum)
for child in hinRoot.children:
for count in range(len(grammar[hinRoot.ruleNum].rhs)):
#print "(", child.label, grammar[hinRoot.ruleNum].rhs[count].nonTerminal, child.repeatIndex, grammar[hinRoot.ruleNum].rhs[count].index, ")",
if child.label==grammar[hinRoot.ruleNum].rhs[count].nonTerminal and \
child.repeatIndex==grammar[hinRoot.ruleNum].rhs[count].index:
#print "index assigned: ", ind
grammar[hinRoot.ruleNum].rhs[count].hinRank=hinRoot.children.index(child)
#print "incrementing..."
for child in hinRoot.children:
projectHindiRules(child, grammar) | [
"mohdsanadzakirizvi@gmail.com"
] | mohdsanadzakirizvi@gmail.com |
e8f79267ba52969b4af0a0f02f9340977750ba24 | 5002ec313e12d6e5f58d5ef41ea265084ff96373 | /信息收集工具/modular/Subdomain_name_query.py | ff3d469ff26e6418b763ef974be8e1beb300a2bd | [] | no_license | IVorder/python | 9a8dc46d69fb9b5c3d65509348595623b8d47a8a | 6b60a13dda471ed3f1380b6bf014a33f185e6033 | refs/heads/master | 2020-06-21T22:43:41.838924 | 2019-07-18T10:21:28 | 2019-07-18T10:21:28 | 197,569,599 | 10 | 4 | null | 2019-07-18T10:55:49 | 2019-07-18T10:55:47 | null | UTF-8 | Python | false | false | 2,369 | py | # @author:九世
# @time:2019/7/2
# @file:mian.py
from gevent import monkey;monkey.patch_all()
import requests
import config.config
import warnings
import gevent
from multiprocessing import Process
import dns.resolver
from bs4 import BeautifulSoup
from gevent.lock import RLock
warnings.simplefilter("ignore", category=UserWarning)
domains=[]
lock=RLock()
def domain_query():
def wrater(func):
def query(*args,**kwargs):
print('\033[1;32m[+]\033[0m 域名查询:')
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
url='http://site.ip138.com/{}/domain.htm'.format(*args)
rqt=requests.get(url=url,headers=headers)
rgt=BeautifulSoup(rqt.text,'html.parser').find_all('a',target='_blank')
for c in rgt:
if str(*args) in str(c):
domains.append(c.get_text())
return func(*args,**kwargs)
return query
return wrater
def domain_baopo():
def wrter(func):
def bp(*args,**kwargs):
lock.acquire()
path=r'dict/domain.txt'
dp=[]
dk=open(path,'r',encoding='utf-8')
for d in dk.readlines():
dp.append("{}.{}".format("".join(d.split('\n')),*args))
lock.release()
return func(dp,**kwargs)
return bp
return wrter
@domain_query()
def run(url):
pass
def dns_b(domain):
try:
querys=dns.resolver.query(domain,'A')
for q in querys:
domains.append(domain)
except:
pass
def xc(rg):
rt=[]
try:
for r in rg:
rt.append(gevent.spawn(dns_b,r))
gevent.joinall(rt)
except:
pass
@domain_baopo()
def run2(url):
print('\033[1;32m[+]\033[0m 字典爆破域名开始')
rw=[]
calc=0
for c in url:
if calc==config.config.SUBDOMAIN:
p=Process(target=xc,args=(rw,))
p.start()
calc=0
rw.clear()
rw.append(c)
calc+=1
if len(rw)>0:
p = Process(target=xc, args=(rw,))
p.start()
def cat():
qc=list(set(domains))
for q in qc:
print(q) | [
"noreply@github.com"
] | IVorder.noreply@github.com |
d732b74a12857a9cfedd5615c35c20fd705c8355 | b05e271e498ab231c8e6fd650826cb98a1887c5f | /main.py | 59838bcf3d74bddadd669b317a56301dacea99a9 | [
"MIT"
] | permissive | tian409/joint-computation-offloading-and-resource-allocation | 1074e6bee92303757561a0b6a6dfee8663584f3f | 13e68b71c8e9ae7347a82294a355266c3ce28a81 | refs/heads/master | 2023-04-03T15:08:49.180165 | 2021-04-04T05:37:46 | 2021-04-04T05:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,472 | py | # -*- coding: utf-8 -*-
import copy, json, argparse
import torch
from scenario import Scenario
from agent import Agent
from dotdic import DotDic
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def create_agents(opt, sce, scenario, device):
agents = [] # Vector of agents
for i in range(opt.nagents):
agents.append(Agent(opt, sce, scenario, index=i, device=device)) # Initialization, create a CNet for each agent
return agents
def run_episodes(opt, sce, agents, scenario):
global_step = 0
nepisode = 0
action = torch.zeros(opt.nagents,dtype=int)
reward = torch.zeros(opt.nagents)
QoS = torch.zeros(opt.nagents)
state_target = torch.ones(opt.nagents) # The QoS requirement
f= open("DDPG.csv","w+")
f.write("This includes the running steps:\n")
while nepisode < opt.nepisodes:
state = torch.zeros(opt.nagents) # Reset the state
next_state = torch.zeros(opt.nagents) # Reset the next_state
nstep = 0
while nstep < opt.nsteps:
eps_threshold = opt.eps_min + opt.eps_increment * nstep * (nepisode + 1)
if eps_threshold > opt.eps_max:
eps_threshold = opt.eps_max # Linear increasing epsilon
# eps_threshold = opt.eps_min + (opt.eps_max - opt.eps_min) * np.exp(-1. * nstep * (nepisode + 1)/opt.eps_decay)
# Exponential decay epsilon
for i in range(opt.nagents):
action[i] = agents[i].Select_Action(state, scenario, eps_threshold) # Select action
for i in range(opt.nagents):
QoS[i], reward[i] = agents[i].Get_Reward(action, action[i], state, scenario) # Obtain reward and next state
next_state[i] = QoS[i]
for i in range(opt.nagents):
agents[i].Save_Transition(state, action[i], next_state, reward[i], scenario) # Save the state transition
agents[i].Optimize_Model() # Train the model
if nstep % opt.nupdate == 0: # Update the target network for a period
agents[i].Target_Update()
state = copy.deepcopy(next_state) # State transits
if torch.all(state.eq(state_target)): # If QoS is satisified, break
break
nstep += 1
print('Episode Number:', nepisode, 'Training Step:', nstep)
# print('Final State:', state)
f.write("%i \n" % nstep)
nepisode += 1
f.close()
def run_trial(opt, sce):
scenario = Scenario(sce)
agents = create_agents(opt, sce, scenario, device) # Initialization
run_episodes(opt, sce, agents, scenario)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c1', '--config_path1', type=str, help='path to existing scenarios file')
parser.add_argument('-c2', '--config_path2', type=str, help='path to existing options file')
parser.add_argument('-n', '--ntrials', type=int, default=1, help='number of trials to run')
args = parser.parse_args()
sce = DotDic(json.loads(open(args.config_path1, 'r').read()))
opt = DotDic(json.loads(open(args.config_path2, 'r').read())) # Load the configuration file as arguments
for i in range(args.ntrials):
trial_result_path = None
trial_opt = copy.deepcopy(opt)
trial_sce = copy.deepcopy(sce)
run_trial(trial_opt, trial_sce)
| [
"fenghao2018@bupt.edu.cn"
] | fenghao2018@bupt.edu.cn |
d5e7ae3bd1017599518278f12c78a1b1a2662ff3 | 4138376af721c583944882b68235746cd9637fd6 | /7/sunjiayin/cpNbnet.py | 305e2c1c4681006598eb80310af7c334d54f7acb | [] | no_license | hulaoan/homework-arch-5 | 9df792281b7ac92abc166ad80e69a5c2a59b2c9e | 1c1b07f8ebb1b2f9906c0cd29cef8227fed3c7fd | refs/heads/master | 2021-01-14T13:58:05.883628 | 2015-12-25T05:05:16 | 2015-12-25T05:05:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,361 | py | #!/usr/bin/env python
# coding:utf-8
import socket
import select
import time
import pdb
__all__ = ["nbNet"]
from nbNetUtils import *
class STATE:
def __init__(self):
self.state = "accept" #定义状态
self.have_read = 0 #记录读了的字节
self.need_read = 10 #头文件需要读取10个字节
self.have_write = 0 #记录读了的字节
self.need_write= 0 #需要写的字节
self.buff_read = "" #读缓存
self.buff_write = "" #写缓存
self.sock_obj = "" #sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() #实例化类
tmp_state.sock_obj = sock #定义类中sock
self.conn_state[sock.fileno()] = tmp_state #把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] #取出fd对应连接
sock = sock_state.sock_obj #取出fd的sock
conn, addr = sock.accept() #取出连接请求
conn.setblocking(0) #设置非阻塞模式
return conn #返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj #取出fd的sock
sock.close()#关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) #将fd重epoll中注销
self.conn_state.pop(fd) #踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] #取出fd对应连接
conn= sock_state.sock_obj #取出fd连接请求
if sock_state.need_read <= 0: #需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) #读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %(fd, one_read, sock_state.need_read))
if len(one_read) == 0: #读取数据为0报错
raise socket.error
sock_state.buff_read += one_read #把读取数据存到读缓存中
sock_state.have_read += len(one_read) #已经读取完的数据量
sock_state.need_read -= len(one_read) #还需要读取数据的量
sock_state.printState()
if sock_state.have_read == 10: #10字节为头文件处理
header_said_need_read = int(sock_state.have_read) #读取数据的量
if header_said_need_read <= 0: #如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read #还需读取数量变化
sock_state.buff_read = '' #读缓存清空
sock_state.printState()
return "readcontent" #还需读取数据
elif sock_state.need_read == 0:
return "process" #读取数据完成,转换状态
else:
return "readmore" #还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: #errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] #取出fd对应的连接构造体
conn = sock_state.sock_obj #取出fd对于连接
last_have_send = sock_state.have_write #已经写数据的量
try:
have_send = conn.send(sock_state.buff_write[last_have_send:]) #发送剩下的数据
sock_state.have_write += have_send #已经写的数据量
sock_state.need_write -= have_send #还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write !=0: #写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" #返回写入完成
else:
return "writemore" #返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() #定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] #取出fd构造体
if select.EPOLLHUP & events: #文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" #fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") #文件描述符出错
sock_state.state = "closing" #对应fd状态为closing
self.state_machine(fd) #状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] #fd构造体
self.sm[sock_state.state](fd) #通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} #定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) #定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() #初始化fd的epoll
self.epoll_sock.register(self.listen_sock.fileno(), select.EPOLLIN ) #linten可以读的描述符
self.logic = logic #业务处理
self.sm = {
"accept" : self.accept2read,
"read" : self.read2process,
"write" : self.write2read,
"process": self.process,
"closing": self.close,
} #状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %self.listen_sock.fileno() )
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) #业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) #发送的数据
sock_state.need_write = len(sock_state.buff_write) #需要发送的长度
sock_state.state = "write" #fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) #fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(conn.fileno(), select.EPOLLIN) #发送数据后重新将fd的epoll改成读
self.setFd(conn) #fd生成构造体
self.conn_state[conn.fileno()].state = "read" #fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
#状态转换
try:
read_ret = self.read(fd) #read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process":# 读取完成,转换到process
self.process(fd)
elif read_ret == "readcontent":# readcontent、readmore、retry 继续读取
pass
elif read_ret == "readmore":
pass
elif read_ret == "retry":
pass
elif read_ret == "closing":
self.conn_state[fd].state = 'closing' #状态为closing关闭连接
self.state_machine(fd)
else:
raise Exception("impossible state returned by self.read")
def write2read(self, fd):
try:
write_ret = self.write(fd) #函数write返回值
except socket.error, msg: #出错关闭连接
write_ret = "closing"
if write_ret == "writemore": #继续写
pass
elif write_ret == "writecomplete":#写完成
sock_state = self.conn_state[fd]
conn = sock_state.sock_obj
self.setFd(conn) #重置见连接fd构造体
self.conn_state[fd].state = "read" #将fd状态设置为read
self.epoll_sock.modify(fd, select.EPOLLIN) #epoll状态为可读
elif write_ret == "closing":# 发生错误关闭
dbgPrint(msg)
self.conn_state[fd].state = 'closing'
self.state_machine(fd)
if __name__ == '__main__':
def logic(d_in):
return(d_in[::-1])
reverseD = nbNet('0.0.0.0', 9060, logic)
reverseD.run()
| [
"sunjiayin@teach.works"
] | sunjiayin@teach.works |
622914c9a6c8f38dd5339009d187c1a23ea57bf5 | 6bd1aa6b80fd93fd65f3e3f9c6b4cc743fabc076 | /Laboratorios-Big-Data/MOOC/KMeans/KMeansHackers.py | 94bd8290cb1a8e974ee767a073e4064bc5d47159 | [] | no_license | RAricardo/Laboratorios-Big-Data | 617a7adc5531d29653b65af0a3a3e885a0aa42e8 | 04ebc65ae83007407e9e14f38774ef77a21cbe31 | refs/heads/master | 2020-04-29T05:09:51.189057 | 2019-04-08T17:00:46 | 2019-04-08T17:00:46 | 175,872,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # Databricks notebook source
from pyspark.sql import SparkSession
# COMMAND ----------
spark = SparkSession.builder.appName("Kmeans").getOrCreate()
# COMMAND ----------
data = spark.read.csv("/FileStore/tables/hack_data.csv", inferSchema=True, header=True)
# COMMAND ----------
data.printSchema()
# COMMAND ----------
from pyspark.ml.clustering import KMeans
# COMMAND ----------
from pyspark.ml.feature import VectorAssembler
# COMMAND ----------
data.columns
# COMMAND ----------
assembler = VectorAssembler(inputCols=['Session_Connection_Time',
'Bytes Transferred',
'Kali_Trace_Used',
'Servers_Corrupted',
'Pages_Corrupted',
'WPM_Typing_Speed'], outputCol="features")
# COMMAND ----------
final_data = assembler.transform(data)
# COMMAND ----------
final_data.printSchema()
# COMMAND ----------
from pyspark.ml.feature import StandardScaler
# COMMAND ----------
scaler = StandardScaler(inputCol="features", outputCol="Scaled Features")
# COMMAND ----------
scaler_model = scaler.fit(final_data)
# COMMAND ----------
cluster_final_data = scaler_model.transform(final_data)
# COMMAND ----------
kmeans2 = KMeans(featuresCol="Scaled Features", k=2)
# COMMAND ----------
kmeans3 = KMeans(featuresCol="Scaled Features", k=3)
# COMMAND ----------
model_k2 = kmeans2.fit(cluster_final_data)
model_k3 = kmeans3.fit(cluster_final_data)
# COMMAND ----------
model_k3.transform(cluster_final_data).groupBy("prediction").count().show()
# COMMAND ----------
model_k2.transform(cluster_final_data).groupBy("prediction").count().show()
# COMMAND ----------
| [
"rrazopardc@eafit.edu.co"
] | rrazopardc@eafit.edu.co |
55c5e4126f52501d3ab1f9cd4f9c49c47dc30d18 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/ZXR10-MACPING-MIB.py | 805cbd59b0fb3a90dcafa3b37ef03e6abdf405d0 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 12,798 | py | #
# PySNMP MIB module ZXR10-MACPING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZXR10-MACPING-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:42:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
iso, Bits, ModuleIdentity, Gauge32, Unsigned32, enterprises, IpAddress, Counter32, experimental, ObjectIdentity, MibIdentifier, NotificationType, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, mgmt, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Bits", "ModuleIdentity", "Gauge32", "Unsigned32", "enterprises", "IpAddress", "Counter32", "experimental", "ObjectIdentity", "MibIdentifier", "NotificationType", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "mgmt", "Counter64")
TruthValue, DisplayString, RowStatus, MacAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "RowStatus", "MacAddress", "TextualConvention")
zxr10L2vpn, = mibBuilder.importSymbols("ZXR10-SMI", "zxr10L2vpn")
zxr10MacPingMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4))
class DisplayString(OctetString):
pass
class OptionType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("ce", 0), ("pe", 1))
zxr10MacPingTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1), )
if mibBuilder.loadTexts: zxr10MacPingTable.setStatus('current')
zxr10MacPingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacSerial"))
if mibBuilder.loadTexts: zxr10MacPingEntry.setStatus('current')
zxr10PingMacSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacSerial.setStatus('current')
zxr10PingMacDestMac = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 2), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacDestMac.setStatus('current')
zxr10PingMacControlOutEtherIf = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlOutEtherIf.setStatus('current')
zxr10PingMacIfOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("none", 0), ("option", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfOption.setStatus('current')
zxr10PingMacPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPacketCount.setStatus('current')
zxr10PingMacTimeOut = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTimeOut.setStatus('current')
zxr10PingMacHops = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacHops.setStatus('current')
zxr10PingMacControlResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlResultType.setStatus('current')
zxr10PingMacTrapOncompletion = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 9), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTrapOncompletion.setStatus('current')
zxr10PingMacRosStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("not-active", 1), ("start-ping", 2), ("ping-processing", 3), ("ping-completed", 4))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacRosStatus.setStatus('current')
zxr10PingMacEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacEntryOwner.setStatus('current')
zxr10PingMacIfPeOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 12), OptionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfPeOption.setStatus('current')
zxr10PingMacVfiName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 13), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacVfiName.setStatus('current')
zxr10PingMacPeerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 14), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPeerAddress.setStatus('current')
zxr10PingMacResultTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2), )
if mibBuilder.loadTexts: zxr10PingMacResultTable.setStatus('current')
zxr10pingMacResultEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"))
if mibBuilder.loadTexts: zxr10pingMacResultEntry.setStatus('current')
zxr10PingMacResultSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSerial.setStatus('current')
zxr10PingMacResultSentPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSentPkts.setStatus('current')
zxr10PingMacResultRcvPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRcvPkts.setStatus('current')
zxr10PingMacResultRoundTripMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMinTime.setStatus('current')
zxr10PingMacResultRoundTripMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMaxTime.setStatus('current')
zxr10PingMacResultRoundTripAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripAvgTime.setStatus('current')
zxr10PingMacResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultType.setStatus('current')
zxr10PingMacExtResultDestIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestIfName.setStatus('current')
zxr10PingMacExtResultDestHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestHostName.setStatus('current')
zxr10PingMacExtResultSourceIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceIfName.setStatus('current')
zxr10PingMacExtResultSourceHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceHostName.setStatus('current')
zxr10PingMacExtResultOutVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultOutVlanId.setStatus('current')
zxr10PingMacExtResultInVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultInVlanId.setStatus('current')
zxr10PingMacResultEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultEntryOwner.setStatus('current')
zxr10PingMacResultRoundWobbleMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMinTime.setStatus('current')
zxr10PingMacResultRoundWobbleMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMaxTime.setStatus('current')
zxr10PingMacResultRoundWobbleAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleAvgTime.setStatus('current')
macpingNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3))
macpingTrapResult = NotificationType((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3, 1)).setObjects(("ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultSentPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRcvPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMinTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMaxTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripAvgTime"))
if mibBuilder.loadTexts: macpingTrapResult.setStatus('current')
mibBuilder.exportSymbols("ZXR10-MACPING-MIB", zxr10PingMacResultRoundTripAvgTime=zxr10PingMacResultRoundTripAvgTime, zxr10MacPingMIB=zxr10MacPingMIB, zxr10PingMacPeerAddress=zxr10PingMacPeerAddress, zxr10PingMacTimeOut=zxr10PingMacTimeOut, macpingNotifications=macpingNotifications, zxr10PingMacEntryOwner=zxr10PingMacEntryOwner, zxr10PingMacRosStatus=zxr10PingMacRosStatus, zxr10PingMacIfOption=zxr10PingMacIfOption, zxr10PingMacResultRoundWobbleAvgTime=zxr10PingMacResultRoundWobbleAvgTime, zxr10PingMacResultTable=zxr10PingMacResultTable, OptionType=OptionType, zxr10MacPingTable=zxr10MacPingTable, zxr10PingMacPacketCount=zxr10PingMacPacketCount, zxr10PingMacResultRcvPkts=zxr10PingMacResultRcvPkts, zxr10PingMacSerial=zxr10PingMacSerial, zxr10pingMacResultEntry=zxr10pingMacResultEntry, zxr10PingMacResultRoundWobbleMinTime=zxr10PingMacResultRoundWobbleMinTime, zxr10PingMacResultRoundTripMinTime=zxr10PingMacResultRoundTripMinTime, zxr10MacPingEntry=zxr10MacPingEntry, zxr10PingMacHops=zxr10PingMacHops, zxr10PingMacIfPeOption=zxr10PingMacIfPeOption, zxr10PingMacResultSerial=zxr10PingMacResultSerial, DisplayString=DisplayString, zxr10PingMacExtResultSourceHostName=zxr10PingMacExtResultSourceHostName, zxr10PingMacResultEntryOwner=zxr10PingMacResultEntryOwner, zxr10PingMacControlOutEtherIf=zxr10PingMacControlOutEtherIf, zxr10PingMacResultSentPkts=zxr10PingMacResultSentPkts, zxr10PingMacResultType=zxr10PingMacResultType, zxr10PingMacResultRoundWobbleMaxTime=zxr10PingMacResultRoundWobbleMaxTime, zxr10PingMacResultRoundTripMaxTime=zxr10PingMacResultRoundTripMaxTime, zxr10PingMacExtResultDestIfName=zxr10PingMacExtResultDestIfName, zxr10PingMacExtResultDestHostName=zxr10PingMacExtResultDestHostName, macpingTrapResult=macpingTrapResult, zxr10PingMacVfiName=zxr10PingMacVfiName, zxr10PingMacExtResultOutVlanId=zxr10PingMacExtResultOutVlanId, zxr10PingMacExtResultSourceIfName=zxr10PingMacExtResultSourceIfName, zxr10PingMacControlResultType=zxr10PingMacControlResultType, zxr10PingMacExtResultInVlanId=zxr10PingMacExtResultInVlanId, zxr10PingMacDestMac=zxr10PingMacDestMac, zxr10PingMacTrapOncompletion=zxr10PingMacTrapOncompletion)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
aafbc6488301d7e48ce363affc42a6a4fdd24a02 | 5fa4b8a36eec770bd740b6016030d2843cac8329 | /trial_scripts/do_multiprocessing.py | e3269fc1eac7ab4e43440377e0b0e23ed103b1c8 | [] | no_license | sysang/word-prepresentation-training | 79ffe4355b2f66dfd7c09625cc430dd65815c937 | 79565d8f69c31f4938f079517db7ff7c53ec54aa | refs/heads/master | 2022-12-22T10:22:52.649259 | 2020-10-03T17:04:08 | 2020-10-03T17:04:08 | 293,590,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | from multiprocessing import Process
from multiprocessing.sharedctypes import RawValue
import ctypes
def f(n):
n.value = 'hello!!'
if __name__ == '__main__':
num = RawValue(ctypes.c_wchar_p, 'abc')
p = Process(target=f, args=(num,))
p.start()
p.join()
print(num.value)
| [
"daosysang@gmail.com"
] | daosysang@gmail.com |
7ef2579880b9b7ec614ed66ecd323b2e3604e749 | 6eaca1b3ada96264bdad964652c19365f982025a | /QPainter/__init__.py | 0a9a28d278c61ebd50c91b5166dc7748582e2115 | [] | no_license | RahulARanger/My_Qt-Py_Book | 4c7e4dfc9a1d1ec8a587d3bbb722fc64f6de1008 | 396280e9110d11c9c297bf83f332411b98c98453 | refs/heads/master | 2023-08-15T01:42:33.415854 | 2021-10-01T19:44:50 | 2021-10-01T19:44:50 | 320,230,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | import RashSetup.__RashModules__.Rash.ApplicationManager
from .MemeGen import *
class UTIL(TabWindow):
def __init__(self, shared: dict):
Rash: RashSetup.__RashModules__.Rash.ApplicationManager.RashMain = shared["RASH"]
super().__init__(Rash)
self.Generator = MemeGenerator(self)
self.easeAdd(self.Generator, "SpongeBob")
| [
"saihanumarahul66@gmail.com"
] | saihanumarahul66@gmail.com |
672f47dbc06ff7e663a43bfdf34432fe9a92e2f4 | 5875c68d4e34193b9e565a6f34469612cfdc649c | /pyMap_0.9.4/pyCursors.py | a63f9c2bdf12abc465b5df4d587e61b1599a645e | [] | no_license | Naxs-me/Software_development_tycoon | 59d7059fb21b1655b05ad0057e17033603ec7377 | b8a6166589a6231e607001ef84f927d2d15792c0 | refs/heads/master | 2020-12-15T00:13:25.496993 | 2020-01-19T16:01:03 | 2020-01-19T16:01:03 | 234,924,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | #31 lines of code (7/21/2012)
import pygame
import os
#the images size must be a multiple of 8
#the image must contain only 3 colors
#(0,0,0)black, (255,255,255)white, (255,0,255)tranparent(pink)
def set_cursor_from_image(image, hotspot = (0,0)):
#if os.path.isfile((cwd+'/'+image)):
img = pygame.image.load(image).convert()
w,h = img.get_size()
strings = []
size = (w,h)
if w%8 == 0 and h%8 == 0:
black = pygame.Color(0,0,0,255)
white = pygame.Color(255,255,255,255)
trans = pygame.Color(255,0,255,255)
img.lock()
for r in xrange(0, w):
pix_str = ""
for c in xrange(0, h):
color = img.get_at((r,c))
if color == white:
pix_str += 'X'
if color == black:
pix_str += '.'
if color == trans:
pix_str += ' '
strings.append(pix_str)
img.unlock()
new_cursor = pygame.cursors.compile(strings)
pygame.mouse.set_cursor(size, hotspot, *new_cursor)
| [
"naxs.me@gmail.com"
] | naxs.me@gmail.com |
eba5e24cb7ae539f05831d88b27d99b2346a8f0a | ec9129d3eb1880df9f0b54c76510352a7e004b0c | /tools/make_vps_tarball.py | b03537feaa59ec1a6a93c522cfd621963bf12eba | [] | no_license | eugen-don/vps | 4057e6ddb1db274dbd8d78fa926376cfc3a40aa7 | 6a16569868241b35d8137b7f2b2f8db0cf67ff55 | refs/heads/master | 2021-01-11T16:29:53.109075 | 2014-05-14T09:20:33 | 2014-05-14T09:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
import sys
import os
import _env
import ops.os_init as os_init
import conf
assert conf.OS_IMAGE_DIR and os.path.isdir(conf.OS_IMAGE_DIR)
def usage():
print """usage: \n%s [image_path/partion_path] [tarball_dir]
""" % (sys.argv[0])
def main():
if len(sys.argv) < 3:
usage()
os._exit(0)
img_path = sys.argv[1]
tarball_dir = sys.argv[2]
if not os.path.exists(img_path):
print "%s not exists" % (img_path)
os._exit(1)
if not os.path.isdir(tarball_dir):
print '%s is not a directory' % (tarball_dir)
os._exit(1)
tarball_path = os_init.pack_vps_fs_tarball(img_path, tarball_dir)
print "%s packed in %s" % (img_path, tarball_path)
if "__main__" == __name__:
main()
| [
"frostyplanet@gmail.com"
] | frostyplanet@gmail.com |
246ec729ab0710529af7fd9594413b7242ed91fb | aba0b5002c040fa1b20bae5d7ac81c601395901f | /vistrails/packages/pandas/identifiers.py | 63685ce6e7907a39552e23085e227ce9fd8bac89 | [
"BSD-3-Clause"
] | permissive | skylogic004/VisTrails | 2673ca04160e776db17811d98b070f70e1d2e385 | bc0d95ceac6e75d6ffb083e8cdab8c62a90d4b00 | refs/heads/master | 2021-06-23T01:16:16.697903 | 2017-08-24T21:28:33 | 2017-08-24T21:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from __future__ import division, print_function
identifier = 'org.vistrails.vistrails.pandas'
name = 'pandas'
version = '0.0.1' | [
"matt@skylogic.ca"
] | matt@skylogic.ca |
f716de44a80a10f01bfaa8b3a8d58b4ec092c945 | dbe1f4110921a08cb13e22ea325d503bd5627195 | /chuhuo_2.71/bluedon/monitor/sbin/checkproc.py | cd3521785adb14ce48baf65ec961b05655ab0e50 | [] | no_license | Hehouhua/waf_branches | 92dc1b1cbecba20f24ef6c7372dde7caa43f9158 | ca76f3a1ed8150b423474c9e37aee37841a5ee35 | refs/heads/main | 2023-01-07T11:33:31.667688 | 2020-11-03T06:58:33 | 2020-11-03T06:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import os, re, sys
rexplogstart = re.compile(r'grep logstart.pl')
rexpwebvisit = re.compile(r'grep webvisit.pl')
def checklogstart():
if not os.path.exists("/usr/local/bdwaf/logs_bridge/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_bridge/data")
if not os.path.exists("/usr/local/bdwaf/logs_proxy/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_proxy/data")
flag = 0
pfp = os.popen('ps ax | grep logstart.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/logstart.pl')
def checkwebvisit():
flag = 0
pfp = os.popen('ps ax | grep webvisit.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/webvisit.pl')
if __name__ == '__main__':
checklogstart()
checkwebvisit()
| [
"hanson_wong@qq.com"
] | hanson_wong@qq.com |
fb2e193a24ae586d0c3d286e0fec5f4ca52eaf14 | 674f1ecdd8a196b5a271b556ed7e4d274fde63a1 | /article/migrations/0002_auto_20161129_2304.py | 65e17784a9ab696ab9749961108d38c587c88ee8 | [] | no_license | baby5/Django-Blog | fc57c06bac110c56662bcea20eb9c18579d20827 | 1e2f1a8b0589d87dea023d7e6d78376d0880ca27 | refs/heads/master | 2021-01-13T13:19:13.836449 | 2016-12-20T10:49:23 | 2016-12-20T10:49:23 | 72,647,232 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 15:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('created_time', models.DateTimeField(auto_now_add=True)),
('last_modified_time', models.DateTimeField(auto_now=True)),
],
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-last_modified_time']},
),
migrations.RenameField(
model_name='article',
old_name='date_time',
new_name='created_time',
),
migrations.AddField(
model_name='article',
name='abstract',
field=models.CharField(blank=True, help_text=b'arbitrary', max_length=54, null=True),
),
migrations.AddField(
model_name='article',
name='last_modified_time',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='article',
name='likes',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='article',
name='status',
field=models.CharField(choices=[(b'd', b'Draft'), (b'p', b'Published')], default=b'd', max_length=1),
),
migrations.AddField(
model_name='article',
name='topped',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='article',
name='views',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='article.Category'),
),
]
| [
"zxnzysj@163.com"
] | zxnzysj@163.com |
185a3393a192094de5e11ae5133799e98d58a651 | 9b04206109e36d5f4f7cc4820546546ac239c5e0 | /greedy/ATM_problem.py | 39cc9ea015a03ed7d3442b6e7512c88cda49fc4d | [] | no_license | joon3007/Algorithm | 28417fffde40a79aac54375b57b31071dcf6bc4d | e45b6379f67272db0997156deca5713aa2113348 | refs/heads/master | 2022-12-14T01:33:25.050675 | 2020-09-09T12:36:02 | 2020-09-09T12:36:02 | 291,960,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | '''
description
인하은행에는 ATM이 1대밖에 없다. 지금 이 ATM앞에 N명의 사람들이 줄을 서있다.
사람은 1번부터 N번까지 번호가 매겨져 있으며, i번 사람이 돈을 인출하는데 걸리는 시간은 Pi분이다.
사람들이 줄을 서는 순서에 따라서, 돈을 인출하는데 필요한 시간의 합이 달라지게 된다.
예를 들어, 총 5명이 있고, P1 = 3, P2 = 1, P3 = 4, P4 = 3, P5 = 2 인 경우를 생각해보자.
[1, 2, 3, 4, 5] 순서로 줄을 선다면, 1번 사람은 3분만에 돈을 뽑을 수 있다.
2번 사람은 1번 사람이 돈을 뽑을 때 까지 기다려야 하기 때문에, 3+1 = 4분이 걸리게 된다.
3번 사람은 1번, 2번 사람이 돈을 뽑을 때까지 기다려야 하기 때문에, 총 3+1+4 = 8분이 필요하게 된다.
4번 사람은 3+1+4+3 = 11분, 5번 사람은 3+1+4+3+2 = 13분이 걸리게 된다.
이 경우에 각 사람이 돈을 인출하는데 필요한 시간의 합은 3+4+8+11+13 = 39분이 된다.
줄을 [2, 5, 1, 4, 3] 순서로 줄을 서면, 2번 사람은 1분만에, 5번 사람은 1+2 = 3분,
1번 사람은 1+2+3 = 6분, 4번 사람은 1+2+3+3 = 9분, 3번 사람은 1+2+3+3+4 = 13분이 걸리게 된다.
각 사람이 돈을 인출하는데 필요한 시간의 합은 1+3+6+9+13 = 32분이다.
이 방법보다 더 필요한 시간의 합을 최소로 만들 수는 없다.
줄을 서 있는 사람의 수 N과 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어졌을 때,
각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 구하는 프로그램을 작성하시오.
input
첫째 줄에 사람의 수 N(1 ≤ N ≤ 1,000)이 주어진다. 둘째 줄에는 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어진다. (1 ≤ Pi ≤ 1,000)
output
첫째 줄에 각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 출력한다.
'''
num = int(input())
times = list(map(int, input().split()))
times.sort()
result = 0
time = 0
for i in times:
time += i
result += time
print(result) | [
"joon4141@gmail.com"
] | joon4141@gmail.com |
39042a14dedf3d1a3d6e06d5f15a0915493b8514 | 66a967fac0bc5dfdfe28ad0fd5464ed9113429bd | /HobbyCoding/src/ListPermutation.py | 6e6d6a9ef2a7a7f0a2211dc22bed93437611220c | [
"Apache-2.0"
] | permissive | inspectorG4dget/Hobby-Coding | a37430320e7a74805bc7740933e217d004fa9714 | 41e82dbcc73e328b43bebd037b2df414f0837ca6 | refs/heads/master | 2020-12-24T17:17:37.589058 | 2012-07-10T05:18:56 | 2012-07-10T05:18:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | '''
Created on Oct 4, 2010
@author: ashwin
Licensed to Ashwin Panchapakesan under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
Ashwin licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def permute(L):
if L == []:
return []
else:
for i in range(len(L)-1):
a = [L[i]]
b = L[:i]
c = L[i+1 :]
print "B:", b
print "C:", c
d = b + c
return a + permute(d)
def includeMembers(L):
if not L:
return L
else:
for i in L[0]:
includeMembers(L[1:])[-1] += i
if __name__ == "__main__":
print includeMembers(['asdf', 'jkl;']) | [
"topgunzurhero@gmail.com"
] | topgunzurhero@gmail.com |
ea6bb392af9c9e6b8d6c5ecb56a68b0cb11577a6 | 7040d642877f70360ca88a065ccf92b3c63dfd7b | /剑指 Offer 18. 删除链表的节点.py | f351503d1cc241f162b76a62e9ddfe892195285b | [
"BSD-2-Clause"
] | permissive | YuLili-git/leetcode_offer | 077fb1864f1c8e3258f5b9f065b7c0e71c8ccf8f | 268940aa4e57a02fe635b7d6f6038f2b204ca968 | refs/heads/main | 2023-08-24T19:07:37.650616 | 2021-10-13T16:07:28 | 2021-10-13T16:07:28 | 370,324,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | #给定单向链表的头指针和一个要删除的节点的值,定义一个函数删除该节点。
#返回删除后的链表的头节点。
#注意:此题对比原题有改动
#示例 1:
#输入: head = [4,5,1,9], val = 5
#输出: [4,1,9]
#解释: 给定你链表中值为 5 的第二个节点,那么在调用了你的函数之后,该链表应变为 4 -> 1 -> 9.
#示例 2:
#输入: head = [4,5,1,9], val = 1
#输出: [4,5,9]
#解释: 给定你链表中值为 1 的第三个节点,那么在调用了你的函数之后,该链表应变为 4 -> 5 -> 9.
#说明:
#题目保证链表中节点的值互不相同
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, head: ListNode, val: int) -> ListNode:
if head.val == val:
return head.next
cur,pre = head, head.next
while pre and pre.val != val:
cur = pre
pre = pre.next
if pre:
cur.next = pre.next
return head
| [
"noreply@github.com"
] | YuLili-git.noreply@github.com |
3acd601e6d39cf8b48f57ba59897836edd48fc79 | 59812860bc22356059bc5bf59a784c8535978b25 | /utils.py | 26243da58bf66dabbe19372cb62d5a0fae473788 | [] | no_license | Folifolo/backprop | 049c1f07b839e0f939903da601c11a31938a8cd5 | afe938aac37cf3e86778a33e17469dbf74a7961e | refs/heads/master | 2020-08-18T01:18:54.810518 | 2019-11-10T10:37:05 | 2019-11-10T10:37:05 | 215,731,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | import numpy as np
def relu(X):
return X * (X > 0)
def reluD(X):
return X > 0
#=====================
def calculate_E(label, prediction, size):
return -np.sum(label * np.log(prediction.T)) / size
def calculate_acc(label, prediction):
prediction = np.argmax(prediction, axis= 0)
label = np.argmax(label, axis= 1)
return (prediction == label).mean()
| [
"Folifolo@yandex.ru"
] | Folifolo@yandex.ru |
dc95cfc1d53773ef74245ed5c8a5b6bbbf3ce933 | 65e076e4fcc00a67faa0932b3f3a3d3a3a11e2aa | /sdk/python/pulumi_google_native/datastore/v1/_enums.py | 15df09472641b2ebbeb23bd87aeab08fb357fbf9 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | TheJaySmith-Google/pulumi-google-native | 816babe5c7316724e02d5b8b9d789df00262bb8e | 566c295a39fe8c3dd16e4a7894ff6de72423e5da | refs/heads/master | 2023-06-05T06:45:19.979837 | 2021-06-23T11:42:27 | 2021-06-23T11:42:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'GoogleDatastoreAdminV1IndexedPropertyDirection',
'IndexAncestor',
]
class GoogleDatastoreAdminV1IndexedPropertyDirection(str, Enum):
"""
Required. The indexed property's direction. Must not be DIRECTION_UNSPECIFIED.
"""
DIRECTION_UNSPECIFIED = "DIRECTION_UNSPECIFIED"
ASCENDING = "ASCENDING"
DESCENDING = "DESCENDING"
class IndexAncestor(str, Enum):
"""
Required. The index's ancestor mode. Must not be ANCESTOR_MODE_UNSPECIFIED.
"""
ANCESTOR_MODE_UNSPECIFIED = "ANCESTOR_MODE_UNSPECIFIED"
NONE = "NONE"
ALL_ANCESTORS = "ALL_ANCESTORS"
| [
"noreply@github.com"
] | TheJaySmith-Google.noreply@github.com |
ef168493665590dfa9c2c362d6e87e14550a7162 | 1e1ab6aba8ab3d05fe61df3b6a5fabbcdd00676a | /e_commerce_app/api/migrations/0002_remove_event_redundancy.py | 86c973a2de49034f1f646a2664d9eaf5bda0ec1e | [] | no_license | Batuhanipekci/E-Commerce | 4f548f3e59cfa68c422f91419a53dadf175dcad3 | 45350d74e344686f619c1f9c50dac08e8c6eebe2 | refs/heads/master | 2023-06-02T01:05:44.647508 | 2021-06-22T20:19:13 | 2021-06-22T20:19:13 | 378,535,014 | 1 | 0 | null | 2021-06-22T20:19:14 | 2021-06-20T01:26:24 | Python | UTF-8 | Python | false | false | 533 | py | # Generated by Django 3.0.7 on 2021-06-20 22:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='krdetailsview',
name='event',
),
migrations.RemoveField(
model_name='krtransaction',
name='event',
),
migrations.RemoveField(
model_name='krcounter',
name='event',
),
]
| [
"batuhanipekci@hotmail.com"
] | batuhanipekci@hotmail.com |
04ae589706bee6d73d70525a05dd97e1c16387fc | bf45d6fe3d0c6ee6e74c0c63c4206eee72361383 | /sketchit/draw.py | 58ebbb85db158fb5ff66bb82afb2a06c4ddb2b3d | [
"MIT"
] | permissive | tambibhavika2000/sketchme | 00d6273b5b4523dc8a1e5f3d22fd58790af80896 | 00c7ccff4531d48fb5ef2c403c4bb0e0b1c749bd | refs/heads/main | 2023-07-13T06:32:13.071137 | 2021-09-01T12:58:01 | 2021-09-01T12:58:01 | 402,060,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import cv2
def sketchit(path):
image=cv2.imread(path)
grey_img=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
invert=cv2.bitwise_not(grey_img)
blur=cv2.GaussianBlur(invert,(21,21),0)
invertedblur=cv2.bitwise_not(blur)
sketch=cv2.divide(grey_img , invertedblur,scale=256.0)
cv2.imwrite('sketch.png',sketch)
path=input("Enter Path of Image: ")
sketchit(path)
| [
"noreply@github.com"
] | tambibhavika2000.noreply@github.com |
aa1a467cc3e72429fddfc6663939baa04bc9e374 | bc073560803464da166d661e916d21ad51b2c80e | /files/scripts/contact_detector.py | 5ac2e00abc742896c576349cf11dd4b994ec5bc7 | [] | no_license | SDU-Embedded/event_processors | 680edb4a8107a2661407f43be933795ef0a1e987 | bdea5bbcab7d39f7b1746d1f391c494ffa0fd39d | refs/heads/master | 2021-07-26T21:41:26.831474 | 2020-05-04T07:03:53 | 2020-05-04T07:03:53 | 165,830,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from event_processors import EventProcessor
from event_listeners import PerchEventListener
from state_monitors import LinearStateMonitor
from metric_processors import ProbabilityProcessor
from thresholders import Thresholder
from event_builders import EventBuilder
from event_emitters import EventEmitter
if __name__ == "__main__":
cage1_event_listener = PerchEventListener('manna,hou,bisnap','ats_perch',bird=1 )
#cage2_event_listener = PerchEventListener('manna,hou,bisnap','ats_perch',bird=2 )
# Setup event listeners
#cage1_event_listener = PerchEventListener( servers='manna,hou,bisnap', topic='perch_sensor', bird=1 )
#cage2_event_listener = PerchEventListener( servers='manna,hou,bisnap', topic='perch_sendor', bird=2, debug=True )
# Setup state monitors
cage1_state_monitor = LinearStateMonitor( period=0.1, upwards_gain=0.1, downwards_gain=0.5 )
#cage2_state_monitor = LinearStateMonitor( period=0.1, upwards_gain=0.1, downwards_gain=0.5 )
cage1_event_listener.stateTransitionCallback = cage1_state_monitor.setState
#cage2_event_listener.stateTransitionCallback = cage2_state_monitor.setState
# Setup metric processor
metric_processor = ProbabilityProcessor( period=0.1 )
metric_processor.getters.append( cage1_state_monitor.getProbability )
#metric_processor.getters.append( cage2_state_monitor.getProbability )
# Setup thresholders
thresholder = Thresholder( upwards_threshold=0.45, downwards_threshold=0.15 )
metric_processor.setters.append( thresholder.evaluate )
# Setup event builders
builder = EventBuilder( bird="1", type="ats_contact" )
thresholder.emitEvent = builder.evaluate
# Setup event emitters
emitter = EventEmitter( 'manna,hou,bisnap','ats_contact')
builder.send = emitter.send
# Setup and run event processor
event_processor = EventProcessor()
event_processor.tasks.append(cage1_event_listener)
event_processor.tasks.append(cage2_event_listener)
event_processor.tasks.append(cage1_state_monitor)
event_processor.tasks.append(cage2_state_monitor)
event_processor.tasks.append(metric_processor)
event_processor.run()
#event_processor.tasks.append( TwoLevelStateMonitor(period=0.01, upwards_gain=0.03, downwards_gain=0.005) )
#event_processor.tasks.append( OnOffEventListener(servers, 'power', event_processor.tasks[-1].setState) )
#event_processor.tasks.append( TwoLevelStateMonitor(period=0.01, upwards_gain=0.03, downwards_gain=0.005) )
#event_processor.tasks.append( OnOffEventListener(servers, 'entropy', event_processor.tasks[-1].setState) )
#event_processor.tasks.append( ProbabilityProcessor( servers=servers, topic='bout', upwards_threshold=0.85, downwards_threshold=0.5, period=0.01, bird="1", type="bout" ) )
#event_processor.tasks[-1].getters.append( event_processor.tasks[0].getProbability )
#event_processor.tasks[-1].getters.append( event_processor.tasks[2].getProbability )
#event_processor.run()
| [
"lelar09@student.sdu.dk"
] | lelar09@student.sdu.dk |
29688ecf8b3300c70dbfd3ba0946cd5fffb4b583 | 843798667698d041a0097cc3d08847a27d9ec08f | /transaction/forms.py | 0c8ea7a6761f65784fa2c37bb85381cd3f50a348 | [] | no_license | jaredtmartin/jade | d1faa6bd657a3c9ee8726e8178ee53a5687c1e7d | f627d4a3939c50443e7643909b036a9d9e283b9e | refs/heads/master | 2021-01-18T14:03:08.906498 | 2011-05-06T20:07:07 | 2011-05-06T20:07:07 | 901,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,952 | py | from jade.transaction.models import *
from jade.common.widgets import AutoCompleteField
from django.utils.safestring import mark_safe
from django import forms
from jade.common.widgets import CalanderInput
from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.translation import ugettext as _
def modelformset_factory(*args, **kwargs):
"""
Returns a FormSet class for the given Django model class.
Change its as_table function to show the forms as rows
"""
prefix=kwargs.pop('prefix',None)
can_delete=kwargs.get('can_delete',False)
def get_default_prefix(cls): return prefix
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
form_list = u' '.join([form.as_row() for form in self.forms])
header_form=self.form()
if can_delete: header_form.fields[forms.formsets.DELETION_FIELD_NAME] = forms.fields.BooleanField(label=_(u'Delete'), required=False)
header=header_form.as_header_row()
return mark_safe(u'\n'.join([unicode(self.management_form),header, form_list]))
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i), 'formset_id':i, 'group':self.prefix}
if self.data or self.files:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
FormSet = forms.models.modelformset_factory(*args, **kwargs)
FormSet._construct_form=_construct_form
FormSet.as_table=as_table
FormSet.get_default_prefix=get_default_prefix
return FormSet
class RowForm(forms.ModelForm):
""" Adds four features to the ModelForms.
1. Adds .as_row method that renders the form as a table row, appropriate for a formset
2. Adds .default_prefix method as well as its hook in init so a default prefix can be specified in subclasses
3. Adds formset_id and group attributes to be set by a formset
4. Adds arguments to put html at the beginning and end of the html_output... This is important when working
with formsets
"""
def get_default_prefix(self): return 'rowform'
def __init__(self, *args, **kwargs):
self.formset_id=kwargs.pop('formset_id',None)
self.group=kwargs.pop('group',None)
super(RowForm, self).__init__(*args, **kwargs)
if not self.prefix: self.prefix=self.get_default_prefix()
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row, start='', end=''):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [start], []
for name, field in self.fields.items():
html_class_attr = ''
bf = forms.forms.BoundField(self, field, name)
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_unicode(bf_errors))
if bf.label:
label = conditional_escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.label_suffix:
if label[-1] not in ':?.!':
label += self.label_suffix
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_unicode(field.help_text)
else:
help_text = u''
output.append(normal_row % {
'errors': force_unicode(bf_errors),
'label': force_unicode(label),
'field': unicode(bf),
'help_text': help_text,
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0, error_row % force_unicode(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = u''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
output.append(end)
return mark_safe(u'\n'.join(output))
def as_row(self):
"Returns this form rendered as a row in a table."
row_attr=''
if self.group: row_attr+=' group="%s" ' % self.group
if not self.formset_id==None: row_attr+=' formset_id="%s" ' % self.formset_id
return self._html_output(
normal_row = u'<td%(html_class_attr)s>%(errors)s%(field)s%(help_text)s</td>',
error_row = u'<td colspan="2">%s</td>',
row_ender = u'</td>',
help_text_html = u'<br />%s',
errors_on_separate_row = False,
start=u'<tr%s>' % row_attr,
end=u'</tr>',
)
def as_header_row(self):
"Returns this form rendered as a row in a table."
return self._html_output(
normal_row = u'<th>%(label)s</th>',
error_row = u'<td colspan="2">%s</td>',
row_ender = u'</td>',
help_text_html = u'<br />%s',
errors_on_separate_row = False,
start=u'<tr>',
end=u'</tr>',
)
class GroupForm(RowForm):
def __init__(self, *args, **kwargs):
self.group=kwargs.pop('group',None)
super(RowForm, self).__init__(*args, **kwargs)
def as_row(self):
"Returns this form rendered as a row in a table with the specified group."
group_spec=' group="%s" ' % self.prefix.split('-')[0]
return self._html_output(
normal_row = u'aa<td'+group_spec+u' %(html_class_attr)s>%(errors)s%(field)s%(help_text)s</td>',
error_row = u'bb<td'+group_spec+u' colspan="2">%s</td>',
row_ender = u'cc</td>',
help_text_html = u'dd<br />%s',
errors_on_separate_row = False)
class SaleForm(RowForm):
def __init__(self, *args, **kwargs):
super(SaleForm, self).__init__(*args, **kwargs)
if kwargs.has_key('instance'):
instance = kwargs['instance']
self.initial['account'] = instance.account.name
def get_default_prefix(self): return 'saleform'
class Meta:
model = Sale
date = forms.DateField(widget=CalanderInput())
account=AutoCompleteField(model=Client, url="/accounting/ajax-client-list/", required=False, label='Client')
class TransactionForm(RowForm):
class Meta:
fields=('date', 'value', 'active','inventorytransaction')
model = Transaction
TransactionFormSet = modelformset_factory(Transaction, form=TransactionForm, extra=1)
class SaleLineForm(RowForm):
def __init__(self, *args, **kwargs):
super(SaleLineForm, self).__init__(*args, **kwargs)
if kwargs.has_key('instance'):
instance = kwargs['instance']
if instance.item: self.initial['item'] = instance.item.name
item=AutoCompleteField(model=Item, url='/inventory/ajax-item-list/', required=False)
date = forms.DateField(widget=CalanderInput())
document = forms.ModelChoiceField(Document, widget=forms.HiddenInput())
def get_default_prefix(self): return 'salelineform'
class Meta:
fields=('document','date', 'value', 'quantity', 'item', 'serial', 'active', 'delivered')
model = SaleLine
SaleLineFormSet = modelformset_factory(SaleLine, form=SaleLineForm, extra=0, prefix='salelineform', can_order=False, can_delete=True)
class NewSaleLineForm(forms.Form):
formset_id = forms.IntegerField()
item=AutoCompleteField(model=Item, url='/inventory/ajax-item-list/', required=False)
| [
"jaredtmartin@gmail.com"
] | jaredtmartin@gmail.com |
91984d48b3742244adf93f8e7500b8c3efa80728 | 68bbf3faecfdae707909647dce9a1dcffcb3491a | /searchNodeInBST.py | af52f8523b1f404f8de89e507130ec104cd462e8 | [] | no_license | Aniket-1/leetcode | d58c4b8e92888d7af000552292477e36c9a503cf | 3cb3274888c4f182f44d9eba513f92a669f9d11b | refs/heads/main | 2023-03-19T03:34:16.064981 | 2021-03-05T05:49:34 | 2021-03-05T05:49:34 | 334,960,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | //You are given the root of a binary search tree (BST) and an integer val.
//Find the node in the BST that the node's value equals val and return the subtree rooted with that node. If such a node does not exist, return null.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
while root:
if root.val > val: root = root.left
elif root.val < val: root = root.right
else: return root
return root
| [
"noreply@github.com"
] | Aniket-1.noreply@github.com |
ad6320700a9871fd710ca5dc3b06b8878292f571 | 45a5c06c89d84e689b528ebd05f982914dc9f0f2 | /rl_bolts/buffers.py | a53f82d1a6403bd000f4ecf561fe9bcbc8924a79 | [
"Apache-2.0"
] | permissive | jfpettit/rl_bolts | be0f2e56af3bab2effd5c0a0723b5eb13050fa2a | c3c3b3f91ee192048912fd48f2655b46526918a7 | refs/heads/master | 2022-11-30T15:53:32.316481 | 2020-08-14T05:45:47 | 2020-08-14T05:45:47 | 285,760,715 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,576 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_buffers.ipynb (unless otherwise specified).
__all__ = ['PGBuffer', 'ReplayBuffer']
# Cell
import numpy as np
from scipy.signal import lfilter
from typing import Optional, Any, Union
import torch
import gym
# Cell
class PGBuffer:
"""
A buffer for storing trajectories experienced by an agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
This class was written by Joshua Achaim at OpenAI. It was adapted to use PyTorch Tensors instead of NumPy arrays for the
observations and actions.
Args:
- obs_dim (tuple or int): Dimensionality of input feature space.
- act_dim (tuple or int): Dimensionality of action space.
- size (int): buffer size.
- gamma (float): reward discount factor.
- lam (float): Lambda parameter for GAE-Lambda advantage estimation
"""
def __init__(
self,
obs_dim: Union[tuple, int],
act_dim: Union[tuple, int],
size: int,
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.95,
):
self.obs_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.act_buf = torch.zeros(self._combined_shape(size, act_dim), dtype=torch.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(
self,
obs: torch.Tensor,
act: torch.Tensor,
rew: Union[int, float, np.array],
val: Union[int, float, np.array],
logp: Union[float, np.array],
):
"""
Append one timestep of agent-environment interaction to the buffer.
Args:
- obs (torch.Tensor): Current observation to store.
- act (torch.Tensor): Current action.
- rew (int or float or np.array): Current reward from environment.
- val (int or float or np.array): Value estimate for the current state.
- logp (float or np.array): log probability of chosen action under current policy distribution.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val: Optional[Union[int, float, np.array]] = 0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
Args:
- last_val (int or float or np.array): Estimate of rewards-to-go. If trajectory ended, is 0.
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = self._discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = self._discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
Returns:
- obs_buf (torch.Tensor): Buffer of observations collected.
- act_buf (torch.Tensor): Buffer of actions taken.
- adv_buf (torch.Tensor): Advantage calculations.
- ret_buf (torch.Tensor): Buffer of earned returns.
- logp_buf (torch.Tensor): Buffer of log probabilities of selected actions.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the line implement the advantage normalization trick
adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / (adv_std + 1e-8)
return [
self.obs_buf,
self.act_buf,
torch.as_tensor(self.adv_buf, dtype=torch.float32),
torch.as_tensor(self.ret_buf, dtype=torch.float32),
torch.as_tensor(self.logp_buf, dtype=torch.float32)
]
def _combined_shape(
self, length: Union[int, np.array], shape: Optional[Union[int, tuple]] = None
):
"""
Return tuple of combined shapes from input length and tuple describing shape.
Args:
- length (int or np.array): Length of resultant shape.
- shape (int or tuple): Other shape dimensions to combine.
Returns:
- tuple of shape dimensions
"""
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def _discount_cumsum(self, x: np.array, discount: float):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
# Cell
class ReplayBuffer(PGBuffer):
"""
A replay buffer for off-policy RL agents.
This class is borrowed from OpenAI's SpinningUp package: https://spinningup.openai.com/en/latest/
Args:
- obs_dim (tuple or int): Dimensionality of input feature space.
- act_dim (tuple or int): Dimensionality of action space.
- size (int): buffer size.
"""
def __init__(
self, obs_dim: Union[tuple, int], act_dim: Union[tuple, int], size: int
):
self.obs1_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.obs2_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.act_buf = torch.zeros(self._combined_shape(size, act_dim), dtype=torch.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(
self,
obs: torch.Tensor,
act: Union[float, int, torch.Tensor],
rew: Union[float, int],
next_obs: torch.Tensor,
done: bool,
):
"""
Append one timestep of agent-environment interaction to the buffer.
Args:
- obs (torch.Tensor): Current observations.
- act (float or int or torch.Tensor): Current action.
- rew (float or int): Current reward
- next_obs (torch.Tensor): Observations from next environment step.
- done (bool): Whether the episode has reached a terminal state.
"""
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size: Optional[int] = 32):
"""
Sample a batch of agent-environment interaction from the buffer.
Args:
- batch_size (int): Number of interactions to sample for the batch.
Returns:
- tuple of batch tensors
"""
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(
obs=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs],
)
return tuple(torch.as_tensor(v, dtype=torch.float32) for _, v in batch.items())
def get(self):
"""
Get all contents of the batch.
Returns:
- list of PyTorch Tensors; full contents of the buffer.
"""
return [
torch.as_tensor(self.obs1_buf, dtype=torch.float32),
torch.as_tensor(self.obs2_buf, dtype=torch.float32),
torch.as_tensor(self.act_buf, dtype=torch.float32),
torch.as_tensor(self.rew_buf, dtype=torch.float32),
torch.as_tensor(self.done_buf, dtype=torch.float32)
] | [
"jfpettit@gmail.com"
] | jfpettit@gmail.com |
a6ab4f744773dd3b24e1bb3cec4fe14a538e8c0e | 5cb6b9b654ced936aa9d7dfc665b83a1fdd19ab6 | /pyqt/first.py | 81310a8f90620ce0dc80de2b269edbbed409581a | [] | no_license | guoabyss/LearnMore | 6ed32006719ed0023d32d91af7254d1ed85457e7 | 3cc39fedd5cb5cd915721ee313526213c81ced6d | refs/heads/master | 2022-10-12T15:56:17.240679 | 2020-06-14T14:24:19 | 2020-06-14T14:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import sys
from PyQt5.QtWidgets import QApplication, QWidget
if __name__ == "__main__":
app = QApplication(sys.argv)
# 创建窗口
w = QWidget()
w.resize(400, 150)
w.move(300, 300)
# 设置标题
w.setWindowTitle("第一个GUI")
w.show()
sys.exit(app.exec_())
| [
"836463194@qq.com"
] | 836463194@qq.com |
441d362c54f38d41048090be65997b9096bd1c3e | 567c75c7801a475c26b81f94bd7b91986933d99b | /a3/sdp/raw/media.py | eee9efeaad77fae0b6d188487ef07d4bc33269fb | [] | no_license | rcslabs/a3-media-controller | 443c79bf0c341c45eeb7734d058de052f8e5d54f | 4457fe10a2d432d0e57cc2b3a914d4e4556b9695 | refs/heads/master | 2021-02-13T08:00:42.996741 | 2020-03-03T15:54:37 | 2020-03-03T15:54:37 | 244,678,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | #!/usr/bin/env python
"""
sdp.raw.Media object
"""
import attribute
from entity import MediaDescription, MediaDescriptionProto, ConnectionData
from a3.media import MediaType
class Media(object):
def __init__(self, media_type=MediaType.AUDIO, media_description=None):
assert type(media_type) is MediaType
assert media_description is None or type(media_description) is MediaDescription
if media_description:
self.__media_description = media_description
else:
self.__media_description = MediaDescription(media_type,
0,
MediaDescriptionProto.RTP_AVP,
[])
self.__media_title = None
self.__connection_data = None
self.__bandwidths = []
self.__encryption_key = None
self.__attributes = attribute.AttributeCollection()
@property
def media_type(self):
return self.__media_description.media_type
@property
def media_description(self):
"""
:rtype : MediaDescription
"""
return self.__media_description
@media_description.setter
def media_description(self, media_description):
assert type(media_description) is MediaDescription
self.__media_description = media_description
@property
def media_title(self):
return self.__media_title
@property
def connection_data(self):
return self.__connection_data
@connection_data.setter
def connection_data(self, connection_data):
assert connection_data is None or type(connection_data) is ConnectionData
self.__connection_data = connection_data
@property
def attributes(self):
return self.__attributes
@attributes.setter
def attributes(self, attributes):
assert type(attributes) is attribute.AttributeCollection
self.__attributes = attributes
def add_attribute(self, str_name, value=None):
return self.__attributes.append(attribute.Attribute(str_name, value))
def remove_attribute(self, attribute):
return self.__attributes.remove(attribute)
def to_str_list(self):
lines = []
lines.append("m=" + str(self.__media_description)) # m= (media name and transport address)
if self.__media_title: lines.append("i=" + str(self.__media_title)) # i=* (media title)
if self.__connection_data: lines.append("c=" + str(self.__connection_data)) # c=* (connection information)
for b in self.__bandwidths: lines.append("b=" + str(b)) # b=* (zero or more bandwidth information lines)
if self.__encryption_key: lines.append("k=" + str(self.__encryption_key)) # k=* (encryption key)
lines += self.__attributes.to_str_list() # a=* (zero or more media attribute lines)
return lines
def __str__(self):
return "\r\n".join(self.to_str_list()) + "\r\n"
| [
"yury.krikun@44e08e39-4b91-0410-a4d4-833ecb1b66d7"
] | yury.krikun@44e08e39-4b91-0410-a4d4-833ecb1b66d7 |
90f284e04501a00ff62afab5f4d11a2ad546a865 | 54dbbf0b3dd9ace6e3b51cb2632ae1d9302ea529 | /编程小白的第一本 Python 入门书/类.py | d34cba3071a14e5a5166c402a9777084329ebe7a | [] | no_license | zzxmona/pythontrain | c42f0bb89f31fea3149b21db38f74f03f3872946 | afcfa9ba533b52adef86d51e98cc96abb3a627d5 | refs/heads/master | 2023-04-30T20:28:44.239500 | 2021-05-31T01:27:49 | 2021-05-31T01:27:49 | 364,789,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | # 类的实例化最好加()以免报错
class z:
con = [1, 2, 3, 4]
name = 'zzx'
__name = 'zzx'
abc = z()
print(abc.con)
print(z.name)
class two:
def __init__(self, final):
self.x = final
def name2(self):
print('zzx', '22')
def name3(self):
return 'zzx'
def name4(self, name5):
print(name5)
x = two('xxx')
print(x.x)
x.name2()
x.name4('zzx name5')
class three():
def __init__(self):
self.name = 'zzx'
def age(self):
return '22'
test3 = three()
print(test3.name)
print(test3.age())
class CocaCola:
formula = ['caffeine', 'sugar', 'water', 'soda']
def __init__(self):
for element in self.formula:
print('Coke has {}!'.format(element))
def drink(self):
print('Energy!')
coke = CocaCola()
class CocaCola2():
formula = ['caffeine', 'sugar', 'water', 'soda']
def __init__(self, logo_name):
self.local_logo = logo_name
def drink(self):
print('Energy!')
coke2 = CocaCola2('可口可乐')
print(coke2.local_logo)
print(coke2.formula)
class five():
name = 'zzx'
age = '22'
sex = '男'
def __init__(self, id):
self.id = id
def lie(self):
print('{} {} {} {}'.format(self.id, self.name, self.age, self.sex))
f = five(201732110226)
f.lie()
class jcfive(five):
test = 'test'
def five2(self):
print(self.test)
jcfive1 = jcfive('zjnu')
jcfive1.lie()
jcfive1.five2()
class te1():
def tes1(self):
return 'tes1'
class te2(te1):
def tes2(self):
print('tes2')
t2 = te2()
print(t2.tes1())
class TestA:
attr = 1
def __init__(self):
self.name = 'zzx'
self.attr = 33
def rename(self):
name2 = 'zzx'
return name2
obj_a = TestA()
print(obj_a.attr)
obj_a.attr = 42
obj_a.name = 'zx'
print(obj_a.attr, obj_a.name)
print(obj_a.rename())
| [
"2577625924@qq.com"
] | 2577625924@qq.com |
cef9a68afdddd61d9d2c7d5510d7a38174bc8f1c | 4b68243d9db908945ee500174a8a12be27d150f9 | /pogoprotos/networking/requests/messages/update_fitness_metrics_message_pb2.py | 522382d168f4fe3adab53afbb40fe730c7070bd9 | [] | no_license | ykram/pogoprotos-py | 7285c86498f57dcbbec8e6c947597e82b2518d80 | a045b0140740625d9a19ded53ece385a16c4ad4a | refs/heads/master | 2020-04-20T10:19:51.628964 | 2019-02-02T02:58:03 | 2019-02-02T02:58:03 | 168,787,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 2,937 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/update_fitness_metrics_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data.fitness import fitness_sample_pb2 as pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/update_fitness_metrics_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nLpogoprotos/networking/requests/messages/update_fitness_metrics_message.proto\x12\'pogoprotos.networking.requests.messages\x1a,pogoprotos/data/fitness/fitness_sample.proto\"^\n\x1bUpdateFitnessMetricsMessage\x12?\n\x0f\x66itness_samples\x18\x01 \x03(\x0b\x32&.pogoprotos.data.fitness.FitnessSampleb\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2.DESCRIPTOR,])
_UPDATEFITNESSMETRICSMESSAGE = _descriptor.Descriptor(
name='UpdateFitnessMetricsMessage',
full_name='pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fitness_samples', full_name='pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage.fitness_samples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=167,
serialized_end=261,
)
_UPDATEFITNESSMETRICSMESSAGE.fields_by_name['fitness_samples'].message_type = pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2._FITNESSSAMPLE
DESCRIPTOR.message_types_by_name['UpdateFitnessMetricsMessage'] = _UPDATEFITNESSMETRICSMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateFitnessMetricsMessage = _reflection.GeneratedProtocolMessageType('UpdateFitnessMetricsMessage', (_message.Message,), dict(
DESCRIPTOR = _UPDATEFITNESSMETRICSMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.update_fitness_metrics_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage)
))
_sym_db.RegisterMessage(UpdateFitnessMetricsMessage)
# @@protoc_insertion_point(module_scope)
| [
"mark@noffle.net"
] | mark@noffle.net |
7bf8347897e39eb95aac73a02b6b6f56d93586c6 | d2fb817130e9d8f40dc25fec5e8e5e7d42f91ec7 | /scons_gbd_docs/Gbd/Docs/Mkdocs/MkdocsBuild.py | a54edcf9ea65abd0a9e048337b5f47f23b444f26 | [
"MIT"
] | permissive | ASoftTech/Scons.Gbd.Docs | 1d8a32aed7a4b43186ea661baee6fef1832eb266 | 4d9fb7585d9565f57306774efb4342fe9b8822f2 | refs/heads/master | 2020-03-08T12:58:35.290077 | 2018-05-28T20:48:23 | 2018-05-28T20:48:23 | 128,145,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | """
This tool will generate the documentation output as html
using markdown files as an input via mkdocs to an output directory
"""
from __future__ import (division, print_function,
absolute_import, unicode_literals)
import SCons.Script
from SCons.Environment import Environment
import os
import sys
import os.path as path
from scons_gbd_docs.Gbd.Docs.Mkdocs.Common import MkdocsCommon
from scons_gbd_docs.Gbd.Docs.Mkdocs.Common.MkdocsConfig import MkdocsConfig
from SCons.Script import Builder
def exists(env):
"""Check if we're okay to load this builder"""
return MkdocsCommon.detect(env)
def generate(env):
"""Called when the tool is loaded into the environment at startup of script"""
assert(exists(env))
if 'Mkdocs_Config' not in env:
env['Mkdocs_Config'] = MkdocsConfig(env)
env['Mkdocs_Config'].set_defaults()
scanner = env.Scanner(
MkdocsCommon.scanner,
name='MkdocsScanner'
)
bld = Builder(
action=__Build_func,
emitter=MkdocsCommon.emitter,
source_scanner=scanner,
)
env.Append(BUILDERS={'MkdocsBuild': bld})
def __Build_func(target, source, env):
"""Actual builder that does the work after the SConstruct file is parsed"""
cfg = env['Mkdocs_Config']
assert isinstance(cfg, MkdocsConfig)
cmdopts = [cfg.Exe, 'build']
cmdopts.append('--config-file=' + str(source[0]))
if cfg.CleanBuild:
cmdopts.append('--clean')
elif not cfg.CleanBuild:
cmdopts.append('--dirty')
if cfg.Strict:
cmdopts.append('--strict')
if cfg.Theme:
cmdopts.append('--theme=$Mkdocs_Theme')
if cfg.CustomDir:
cmdopts.append('--theme-dir=$Mkdocs_CustomDir')
if env['Mkdocs_SiteDir'] is not None:
cmdopts.append('--site-dir=$Mkdocs_SiteDir')
if cfg.Quiet:
cmdopts.append('--quiet')
if cfg.Verbose:
cmdopts.append('--verbose')
cmdopts = cmdopts + cfg.ExtraArgs
print('Building MkDocs Documentation:')
env.Execute(env.Action([cmdopts], chdir=cfg.WorkingDir))
| [
"garlicbready@googlemail.com"
] | garlicbready@googlemail.com |
044fd67886bf5e38dd991c48b38f1dc4f3bfd6a5 | 885569925c4c564b18121c17a85e03419ffbc308 | /app.py | 73c9aa7f38dc5fa7c33045e29a73bf8bfa579657 | [] | no_license | lluidesia/facial-keypoint-detection | 711cc9d7a7dd49f74a57779553a27fccc36731f8 | d2c042ca3532c646e7d7bc9557907f235e5ab072 | refs/heads/master | 2020-09-05T11:24:40.404776 | 2019-11-06T21:36:11 | 2019-11-06T21:36:11 | 220,089,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | import io
import os
from flask import Flask, render_template, send_file, request, redirect, url_for
from PIL import Image
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config.update(
UPLOADED_PATH=os.path.join(basedir, 'uploads'),
)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
f = request.files.get('file')
f.save(os.path.join(app.config['UPLOADED_PATH'], f.filename))
return redirect(url_for('go_to_image', file_name=f.filename))
@app.route('/go_to_image', methods=['GET'])
def go_to_image():
file_object = io.BytesIO()
img = Image.open(os.path.join(app.config['UPLOADED_PATH'], request.args.get('file_name')))
img.save(file_object, 'PNG')
file_object.seek(0)
return send_file(file_object, mimetype='image/PNG')
if __name__ == '__main__':
app.run(debug=True)
| [
"liudaprysiazhna@gmail.com"
] | liudaprysiazhna@gmail.com |
37307f0abd5565002723b66dd7bdb750cebcbf2a | 69a4e83cad7b3d5e5f35761e7223002a6940d061 | /2/2.py | 98627f4f26b66f99efa3bfbffdaddc29b90b2d8d | [] | no_license | c0mr4d3/adventofcode2020 | 408d01863b1b94872c77ab1b75e210c7b975574c | 6e506d4b170e045643ffdbd095b4a209721670ec | refs/heads/main | 2023-01-21T15:25:22.486170 | 2020-12-04T07:38:13 | 2020-12-04T07:38:13 | 317,858,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | arr = [x[:-1] for x in open("/home/comrade/Funstuff/adventofcode2020/2/input.txt").readlines()]
count = 0
for s in arr:
maxm = int(s[s.index("-")+1:s.index(" ")])
minm = int(s[:s.index("-")])
chrr = s[s.index(" ")+1]
pas = s[s.index(": ")+2:]
if (pas[minm-1]==chrr) != (pas[maxm-1]==chrr):
count+=1
print(count)
| [
"siddharthsingh.17june@gmail.com"
] | siddharthsingh.17june@gmail.com |
ce978aea403ff050f84bd8c5e869fff0a69f22c8 | fc22d8e8178aa4a47d360f1c83990ee8be1fc20e | /tools/md5_function.py | d2ce3e93b1ac9467b50883af0188b3663e7af8bb | [] | no_license | moujiangliu/interface | a13b5ebe86439f2bae55cbecd02ab5e65a77288b | b6e968271cb9bd1287a9b4950a6ccb69a7720036 | refs/heads/master | 2023-02-03T08:56:43.205534 | 2020-12-25T17:05:02 | 2020-12-25T17:05:02 | 323,383,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | # -*- coding:utf-8 -*-
import base64
import hashlib
class Secret(object):
'''
实现各种加密方式
'''
def __init__(self, string):
self._string = string.encode('utf-8')
def md5(self):
'''
md5加密方法
:return:
'''
try:
sign = hashlib.md5(self._string).hexdigest()
return sign
except:
return False
def sha1(self):
'''
实现sha1的加密方法
:return:
'''
try:
sign = hashlib.sha1(self._string).hexdigest()
return sign
except:
return False
def base64encode(self):
'''
实现一个base64 encode的方法封装
'''
try:
sign = base64.b64encode(self._string).decode('utf-8')
return sign
except:
return False
def base64decode(self):
'''
base64 decode的方法封装 (解码)
:return:
'''
try:
sign = base64.b64decode(self._string).decode('utf-8')
return sign
except:
return False
| [
"moujiang.liu@aliyun.com"
] | moujiang.liu@aliyun.com |
22ffc7c4ae1f6b16b2ece3c70722f0a2d0ec48c5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2480/59018/262642.py | 80da0919c460c290863470859367203af1d15933 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | def even_odd(N,a):
b=[]
for j in a:
if j%2==0:
b.append(j)
a.pop(j)
c=b+a
return c
T=int(input())
for i in range(T):
N=int(input())
info=input().split(' ')
a=[int(y) for y in info]
print(even_odd(N,a))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
20dcb6e05c6420b481455112a093bca40a513956 | a219c9b0f3ccd1b35c3bb7bb3c7b50e1d9d8ef93 | /arasınav_tbb_s4.py | ce88476ccc8238735b3aadf7d040888c661fa98e | [] | no_license | f0xmulder/python_ornekleri | 3293541b5d4e594dc39e6df623e47ecd4e5e94c2 | d1ebbcefdd7390a4e20a61864b150097f9919e29 | refs/heads/master | 2022-11-04T07:12:20.766931 | 2017-06-22T13:30:45 | 2017-06-22T13:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | # Soru 4
element = ""
tur = -1
cikti = ""
def turOgren(deger):#okunan karakterlerin büyük,küçük veya sayı olup olmadığını bu fonksiyon saysinde anlıyoruz.
if ord(deger) >= 65 and ord(deger) < 91:#karakterin ascii kodu bu değer aralığındaysa büyük harf
return 2
elif ord(deger) >= 97 and ord(deger) < 123:#karakterin ascii kodu bu değer aralığındaysa küçük harf
return 1
elif ord(deger) >= 49 and ord(deger) < 58:#karakterin ascii kodu bu değer aralığındaysa sayı
return 0
def elementAyristir(element):#bileşikten ayırdığımız her elementi bu fonksiyonda ayrıştırıyoruz.
transElement = ""
adet = ""
for j in element:
tur = turOgren(j)
if tur == 2 or tur == 1:
transElement = transElement + j
elif tur == 0:
adet = adet + j
if adet == "":#eğer elementten 1 tane varsa bunu if şartı ile kontrol ediyoruz.
adet = "1"
print transElement,"elementinden",adet,"tane var"
while (True):
giris=raw_input("element giriniz: ")
for i in giris:
tur = turOgren(i)
if tur == 2:#buyuk harf
if element == "":
element = i
else:
elementAyristir(element)
element = i
elif tur == 1 :#kucuk harf
element = element + i
elif tur == 0:#sayi
element = element + i
elementAyristir(element)
element = ""
tur = -1
| [
"noreply@github.com"
] | f0xmulder.noreply@github.com |
ed551b6f6b71ee37ff9df69bd2107696845fb278 | d7e68dadcab9933d1ceb89c4ac4d96993721ce07 | /PCA/pca.py | cce7c2cc767b931bd8143c336a9cd22b97f0c4d1 | [] | no_license | syedroshanzameer/Data-Mining | ff83faaffd07cf8b61783f7e160af06b65be31ae | d5dcdf9b04e76ec6b3d22e3349da933b6bfa8632 | refs/heads/master | 2021-09-10T16:34:37.349758 | 2018-03-29T11:24:00 | 2018-03-29T11:24:00 | 105,457,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | # author : Roshan Zameer Syed
# id:99999-2920
# description: Principal Component Analysis of the data set "arrhythmia.data"
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn.decomposition import PCA
data = pd.read_csv('arrhythmia.data', header=None) # Read data from the file
data.isnull().sum().sum()
data = data.replace('?', np.NaN) # Replace missing data with NaN
imp = Imputer(missing_values='NaN', strategy='mean', axis=0) # Fill missing values with "Mean"
imp.fit(data)
data_clean = imp.transform(data) # Transform the data
#print(data_clean)
pca = PCA(n_components=80)
pca.fit(data_clean)
data_red = pca.transform(data_clean)
print("Eigen Values: ", pca.explained_variance_) # Printing Eigen Values
print("Eigen Vectors: ", pca.components_) # Printing Eigen Vectors
# print(data_red)
# print (data.shape)
# print(data_clean.shape)
# print(data_red.shape)
print("Variance Ratio: ", pca.explained_variance_ratio_) # Printing Variance Ratio
print("Sum of the ratio's: ", pca.explained_variance_ratio_.sum()) # Sum of ratio's : 0.996325978866 = 99.6% | [
"RSyed9564@muleriders.saumag.edu"
] | RSyed9564@muleriders.saumag.edu |
d8329d3ce6551bc43f12339119f7cc1a1dc10936 | 93465443f6cb0bfe98c46efa9ad61383fc183470 | /demo/HelloWord.py | 6547b68d76cbf8be0825bebd881bba37727a3a7f | [] | no_license | zhangli1229/gy-1906A | 3b1352d82a715d83a8fbc15aeb1ae8fb510739ed | 54aeb5a3788afce9ecb67fcb84faa86a635c74d0 | refs/heads/master | 2020-06-22T20:41:08.829994 | 2019-07-23T09:45:22 | 2019-07-23T09:45:22 | 198,394,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | a="fjgkfgj"
print(type(a))
| [
"1208269415@qq.com"
] | 1208269415@qq.com |
f6a756d8901c7c8cdf61ca05ec0781a2c12777a4 | 427a148400c529d9bce48933605ded8aa0fbf015 | /Buble_Sort.py | 2f503758366bba48c6c13388ad92fc2a14618bea | [] | no_license | selimbd91/General_Python | 9491838472c28d267f2a026497b5360878b3d22e | 0154a86c205a7ddef127e43a8cefe1b66016bcd1 | refs/heads/master | 2020-12-21T11:35:34.009225 | 2020-07-21T22:41:49 | 2020-07-21T22:41:49 | 236,419,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | import random
class Sorting:
def __init__(self, n):
self.lists = [random.randint(1,20) for i in range(n)]
print(self.lists)
def buble_sort(self):
for i in range(len(self.lists)):
for j in range(i+1,len(self.lists)):
if self.lists[i] > self.lists[j]:
temp = self.lists[i]
self.lists[i] = self.lists[j]
self.lists[j] = temp
print(self.lists)
def selection_sort(self):
for i in range(len(self.lists)):
min_pos = i
for j in range(i, len(self.lists)):
if self.lists[min_pos] > self.lists[j]:
min_pos = j
temp = self.lists[i]
self.lists[i] = self.lists[min_pos]
self.lists[min_pos] = temp
print(self.lists)
obj = Sorting(10)
#obj.buble_sort()
obj.selection_sort() | [
"noreply@github.com"
] | selimbd91.noreply@github.com |
24067e7967bb71a2a6e31c1d0ec61bb2845bfd63 | 46d68965d76de48d1cee28f6218c9de60526eb83 | /scheduler/migrations/0005_auto__add_field_schedule_paused_at.py | e23cc371afc2a2e13ed4a981792f081a6767e89c | [] | no_license | bradmenezes/reminderapp | e6b99dbb0be6df10100223f567376599317fafc6 | 7b1b57f61fd1df4bc9d6a8d8afb2b28a04d24935 | refs/heads/master | 2020-05-04T21:11:25.182515 | 2014-08-23T23:30:42 | 2014-08-23T23:30:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,170 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Schedule.paused_at'
db.add_column(u'scheduler_schedule', 'paused_at',
self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Schedule.paused_at'
db.delete_column(u'scheduler_schedule', 'paused_at')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'scheduler.schedule': {
'Meta': {'object_name': 'Schedule'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_week': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'ONE_OFF'", 'max_length': '10'}),
'hour': ('django.db.models.fields.IntegerField', [], {'default': '12'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '150', 'blank': 'True'}),
'minute': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'paused_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Custom'", 'max_length': '15'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['scheduler'] | [
"bradmenezes10@gmail.com"
] | bradmenezes10@gmail.com |
c234418751a55af246126450f10af864edf22721 | e7aa98a1d9dfb60a38f192c2168734255376197d | /soup_test.py | 9327a002d1411d39016e6664d4366c53d1da14e5 | [] | no_license | mksvdmtr/python_csv_learning | e22cc97d662bf204fa46fbc325d76e08999aba92 | c54739635526c0286c8d1c0ed19f11d8c3b8d7a3 | refs/heads/master | 2023-05-31T14:25:34.270688 | 2020-04-29T09:14:07 | 2020-04-29T09:14:07 | 259,878,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from bs4 import BeautifulSoup
import requests
import csv
resp = requests.get("http://quotes.toscrape.com/")
html_data = BeautifulSoup(resp.text, 'html.parser')
quotes = html_data.find_all(class_='quote')
with open("quotes.csv", "w") as file:
field_names = ['Author', 'Quote', 'Tegs']
writer = csv.DictWriter(file, field_names, delimiter=";")
writer.writeheader()
for q in quotes:
writer.writerow({'Author': q.find(class_='author').get_text(), 'Quote': q.find(class_='text').get_text(), 'Tegs': q.find(class_='keywords')['content']})
| [
"mksvdmtr@yandex.ru"
] | mksvdmtr@yandex.ru |
66e5e2cd1dd250b00922b3b3211b1c0c1c510d35 | 53565e19de1d345552f5f469f4e4ea311a421bb8 | /app/artist/models/artist.py | de30a6078bcfde1cf589a711184a2c568c8bfd52 | [] | no_license | standbyme227/fc-melon | 18e17aa8b85906a62e1631e54a70ff85d72ea435 | 8f0f4d40021f75a025e91fa6aebea143bccb6ce3 | refs/heads/master | 2021-05-03T18:59:13.495171 | 2018-03-20T02:32:02 | 2018-03-20T02:32:02 | 120,418,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,632 | py | from django.conf import settings
from django.db import models
from django.forms import model_to_dict
from django.http import JsonResponse, HttpResponse
from .artist_youtube import ArtistYouTube
from .managers import ArtistManager
__all__ = (
'Artist',
)
class Artist(models.Model):
BLOOD_TYPE_A = 'a'
BLOOD_TYPE_B = 'b'
BLOOD_TYPE_O = 'o'
BLOOD_TYPE_AB = 'c'
BLOOD_TYPE_OTHER = 'x'
CHOICES_BLOOD_TYPE = (
(BLOOD_TYPE_A, 'A형'),
(BLOOD_TYPE_B, 'B형'),
(BLOOD_TYPE_O, 'O형'),
(BLOOD_TYPE_AB, 'AB형'),
(BLOOD_TYPE_OTHER, '기타'),
)
melon_id = models.CharField('멜론 Artist ID', max_length=20, blank=True, null=True, unique=True)
image = models.ImageField('프로필 이미지', upload_to='artist', blank=True)
# upload_to는 media폴더를 기준으로 그안의 경로를 지정
name = models.CharField('이름', max_length=50, )
real_name = models.CharField('본명', max_length=30, blank=True, default='')
nationality = models.CharField('국적', max_length=50, blank=True, )
birth_date = models.DateField(max_length=50, blank=True, null=True, )
constellation = models.CharField('별자리', max_length=30, blank=True, null=True)
blood_type = models.CharField('혈액형', max_length=50, blank=True, choices=CHOICES_BLOOD_TYPE)
# choices를 넣어야지만 위의 선택을 이용할 수 있다.
intro = models.TextField('소개', blank=True)
# likes = models.IntegerField(default=0)
like_users = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='ArtistLike',
related_name='like_artists',
blank=True,
)
youtube_videos = models.ManyToManyField(
ArtistYouTube,
related_name='artists',
blank=True,
)
objects = ArtistManager()
def __str__(self):
return self.name
def toggle_like_user(self, user):
# 자신이 'artist이며 user가 주어진 user인 ArtistLike를 가져오거나 없으면 생성
like, like_created = self.like_user_info_list.get_or_create(user=user)
# 만약 이미 잇엇을 경우 (새로 생성 X)
if not like_created:
# Like를 지워줌
like.delete()
# 생성여부를 반환
return like_created
# if self.like_users.filter(user=user).exists():
# self.like_users.filter(user).delete()
# else:
# self.like_users.create(user=user)
# # 자신이 artist이며, 주어진 user와의 ArtistLike의 QuerySet
# query = ArtistLike.objects.filter(artist=self, user=user)
# # QuerySet이 존재할 졍우
# if query.exists():
# query.delete()
# return False
# # QuerySet이 존재하지 않을 경우
# else:
# ArtistLike.objects.create(artist=self, user=user)
# return True
def to_json(self):
from django.db.models.fields.files import FieldFile
from django.contrib.auth import get_user_model
user_class = get_user_model()
ret = model_to_dict(self)
# model_to_dict의 결과가 dict
# 해당 dict의 item을 순회하며
# JSON Serialize할때 에러나는 타입의 value를
# 적절히 변환해서 value에 다시 대입
def convert_value(value):
if isinstance(value, FieldFile):
return value.url if value else None
elif isinstance(value, user_class):
return value.pk
elif isinstance(value, ArtistYouTube):
return value.pk
return value
def convert_obj(obj):
"""
객체 또는 컨테이너 객체에 포함된 객체들 중
직렬화가 불가능한 객체를 가능하도록 형태를 변환해주는 함수
:param obj:
:return: convert_value()를 거친 객체
"""
if isinstance(obj, list):
# list타입일 경우 각 항목을 순회하며 index에 해당하는 값을 변환
for index, item in enumerate(obj):
obj[index] = convert_obj(item)
elif isinstance(obj, dict):
# dict타입일 경우 각 항목을 순회하며 key에 해당하는 값을 변환
for key, value in obj.items():
obj[key] = convert_obj(value)
# list나 dict가 아닐 경우, 객체 자체를 변환한 값을 리턴
return convert_value(obj)
convert_obj(ret)
return ret | [
"standbyme227@gmail.com"
] | standbyme227@gmail.com |
af5b1cb49d6bdd5100e5a537254e5e9d02155253 | 399466b75d2427be9ef5efdb4fe6ed21af438872 | /project_4/handlers/json_trans.py | 566e9c36271909500c7e7b0e4b7ff5b3dd2f2670 | [] | no_license | MessiLeo919/Flowpp | a3a20a1fdad54d9f73916ad4c7cc99e096ad808f | 187d8a09e15e7ab2628976ecd73e06339e421f92 | refs/heads/master | 2020-03-21T14:48:56.801827 | 2018-06-29T03:20:28 | 2018-06-29T03:20:28 | 138,367,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,143 | py | import json
import os
import time
def json_transfer(file):
try:
with open(file,"r") as f:
load_dict = json.load(f) #转化为字典
# print(type(load_dict)) #转化为字典
print("加载文件完成...")
print('----------------')
List_date=[]
List_date_1 = []
List_date_2 = []
List_date_3 = []
for list_class in load_dict.keys():
# print(list_class)
# List_class.append(list_class) #获取四大类
if list_class in ["action","service"]:
app_L_1 = []
dict_All_1 = []
for i in load_dict[list_class]: #i为大类下面每一个字典
a=i['name_cn'].split("_")[0]
if a not in app_L_1:
app_L_1.append(a)
dict_name={a:[i['name_cn']]}
dict_All_1.append(dict_name)
else:
dict_All_1[app_L_1.index(a)][a].append(i['name_cn'])
if list_class =="action":
List_date_1.append(app_L_1)
List_date_1.append(dict_All_1)
else:
List_date_2.append(app_L_1)
List_date_2.append(dict_All_1)
elif list_class =="application":
app_L_2 = []
dict_All_2 = []
for i in load_dict[list_class]: #i为大类下面每一个字典
# print(i)
b = i['domain']
# print(b)
if b not in app_L_2:
app_L_2.append(b)
dict_name = {b: [i['name_cn']]}
dict_All_2.append(dict_name)
else:
dict_All_2[app_L_2.index(b)][b].append(i['name_cn'])
List_date_3.append(app_L_2)
List_date_3.append(dict_All_2)
List_date=List_date_1+List_date_2+List_date_3
# print("List_date_1:length",len(List_date_1),"-----------------\n")
# print(List_date_1)
# print("List_date_2:length",len(List_date_2),"-----------------\n")
# print(List_date_2)
# print("List_date_3:length",len(List_date_3),"-----------------\n")
# print(List_date_3)
print(len(List_date),"-----------------")
return List_date
except FileNotFoundError:
return [[],[],[],[],[],[]]
def json_modified(file,List_results):
print("转换开始...")
with open(file,"r") as f:
load_dict = json.load(f) #转化为字典
print("type-load_dict\n")
print(type(load_dict))
print('----------------')
with open("handlers/download/[Finished]meta.json", "w", encoding='utf-8') as fp:
print("新建...")
for list_class in load_dict.keys():
# print(list_class)
# List_class.append(list_class) #获取四大类
if list_class in ["action","service","application"]:
for i in load_dict[list_class]: #i为大类下面每一个字典
if i['name_cn'] in List_results:
i["selection"] = True
else:
i["selection"] = False
# i.update({"selection": false})
json.dump(load_dict, fp, indent=4,ensure_ascii=False)
print("转换完成")
def deletefile():
"""删除小于minSize的文件(单位:K)"""
files = os.listdir(os.getcwd()+'/handlers/upload') # 列出目录下的文件
# for file in files:
os.remove(file) # 删除文件
print(file + " deleted")
# time.sleep(30)
deletefile()
| [
"690995749@qq.com"
] | 690995749@qq.com |
ab32585b9e7c9dd55c0620ab746825b726ad0590 | 885291f5c66f242bb84effc27c400b8a1a5e5284 | /diary/management/commands/backup_diary.py | b27fb50578bc733591e384472544174b780f0074 | [] | no_license | maataro/django_app | 175e6aa153606cae7af79d98e4c43ea16ed0fe08 | e3f75ea44856a4aa800f997e5aa8d6d90dac46f3 | refs/heads/master | 2023-01-05T06:46:55.516279 | 2020-10-05T17:13:41 | 2020-10-05T17:13:41 | 298,172,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | import csv
import datetime
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from ...models import Diary
class Command(BaseCommand):
help = "Backup Diary data"
def handle(self, *args, **options):
# 実行時のYYYYMMDDを取得
date = datetime.date.today().strftime("%Y%m%d")
# 保存ファイルの相対パス
file_path = settings.BACKUP_PATH + 'diary_' + date + '.csv'
# 保存ディレクトリが存在しなければ作成
os.makedirs(settings.BACKUP_PATH, exist_ok=True)
# バックアップファイルの作成
with open(file_path, 'w') as file:
writer = csv.writer(file)
# ヘッダーの書き込み
header = [field.name for field in Diary._meta.fields]
writer.writerow(header)
# Diaryテーブルの全データを取得
diaries = Diary.objects.all()
# データ部分の書き込み
for diary in diaries:
writer.writerow([str(diary.user),
diary.title,
diary.content,
str(diary.photo1),
str(diary.photo2),
str(diary.photo3),
str(diary.created_at),
str(diary.updated_at)])
# 保存ディレクトリのファイルリストを取得
files = os.listdir(settings.BACKUP_PATH)
# ファイルが設定数以上あったら一番古いファイルを削除
if len(files) >= settings.NUM_SAVED_BACKUP:
files.sort()
os.remove(settings.BACKUP_PATH + files[0])
| [
"masahiro.infinite77@gmail.com"
] | masahiro.infinite77@gmail.com |
c0789528e506bd6d2e3b0093a98b9dc59f8a4f48 | 2c39c8b1ad251db6c616c14460db7a431c7550da | /09_sql.py | ee88e94738afa3006b6a58d9cd2f131c0f40de53 | [] | no_license | tendaitt/sql-with-python | 876ca2d1cb24d120835c87fef77ba8252ca4d137 | 3b499e952f704eb77e8b02f50e7c7ecce791a052 | refs/heads/master | 2020-08-29T13:14:22.518835 | 2019-10-31T13:00:17 | 2019-10-31T13:00:17 | 218,042,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # JOINing data from multiple tables
import sqlite3
with sqlite3.connect("new.db") as connection:
c = connection.cursor()
c.execute("""SELECT DISTINCT population.city, population.population,
regions.region FROM population, regions WHERE
population.city = regions.city ORDER by population.city ASC""")
rows = c.fetchall()
for r in rows:
print(f"City: {r[0]}")
print(f"Population: {r[1]}")
print(f"Region: {r[2]}")
print("") | [
"970497+tendaitt@users.noreply.github.com"
] | 970497+tendaitt@users.noreply.github.com |
915530cd328d21c79b8adaeb11cafbd1d868abb7 | d79aa2d186ed54ef786094bb2608eae911b4527c | /backend/bayfieldopen/bayfieldopen/auth/migrations/0001_initial.py | 0f71f7f216a6879502cc03f641ca582050e57286 | [] | no_license | JordanRClark/BayfieldOpen | 23b6a4cb80a34964d90f8d9d32ac0b01f2e2d4ea | 1bdefdaaad5372f92913afafd10c0242f5fb514e | refs/heads/main | 2023-03-07T07:00:19.776300 | 2021-02-15T05:52:13 | 2021-02-15T05:52:13 | 338,867,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | # Generated by Django 3.1.6 on 2021-02-14 23:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('first_name', models.CharField(blank=True, max_length=256)),
('middle_name', models.CharField(blank=True, max_length=256)),
('last_name', models.CharField(blank=True, max_length=256)),
('email', models.EmailField(max_length=254, null=True, unique=True)),
('handicap', models.DecimalField(blank=True, decimal_places=2, max_digits=2, null=True)),
('is_superuser', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"jordan@Jordans-MacBook-Pro.local"
] | jordan@Jordans-MacBook-Pro.local |
ac071ec7c195c0c7838f31cdd9f41fe37a46ad9c | a44a9279258ace54be0ea6d410e6ddb5a2d72bcb | /project-addons/custom_reports/models/product.py | 719faf154fd24aa8c981b08a03877ad3b5b456aa | [] | no_license | athlontado/PXGO_00064_2014_PHA | 346f33185a07c2e1766a7cc79cd300252d9b2480 | 3086baba490e47a5dcc7942c7c5fee9fc047ddcd | refs/heads/master | 2020-04-06T03:56:15.828784 | 2016-04-18T12:24:53 | 2016-04-18T12:24:53 | 59,216,028 | 0 | 0 | null | 2016-05-19T14:50:54 | 2016-05-19T14:50:54 | null | UTF-8 | Python | false | false | 1,240 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Pharmadus. All Rights Reserved
# $Óscar Salvador <oscar.salvador@pharmadus.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
class ProductCategory(models.Model):
_inherit = 'product.category'
commissions_parent_category = fields.Boolean('Commissions parent category',
default=False)
| [
"oscar.salvador@pharmadus.com"
] | oscar.salvador@pharmadus.com |
0e029895d75465efd99006fba963cce56d4204ed | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pandas-dev_pandas/pandas-master/pandas/tests/test_nanops.py | 937c20d009b6bfb2143c62b9aa96a110e0d6c71f | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 43,023 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from functools import partial
import warnings
import numpy as np
from pandas import Series, isnull
from pandas.types.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(tm.TestCase):
def setUp(self):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
self.arr_float1 = np.random.randn(*self.arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, self.arr_shape)
self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype('S')
self.arr_utf = np.abs(self.arr_float).astype('U')
self.arr_date = np.random.randint(0, 20000,
self.arr_shape).astype('M8[ns]')
self.arr_tdelta = np.random.randint(0, 20000,
self.arr_shape).astype('m8[ns]')
self.arr_nan = np.tile(np.nan, self.arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf])
self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1])
self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan,
self.arr_inf])
self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf,
self.arr_nan])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan,
self.arr_inf])
self.arr_obj = np.vstack([self.arr_float.astype(
'O'), self.arr_int.astype('O'), self.arr_bool.astype(
'O'), self.arr_complex.astype('O'), self.arr_str.astype(
'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'),
self.arr_tdelta.astype('O')])
with np.errstate(invalid='ignore'):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex,
self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex,
self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_complex_2d = self.arr_complex[:, :, 0]
self.arr_int_2d = self.arr_int[:, :, 0]
self.arr_bool_2d = self.arr_bool[:, :, 0]
self.arr_str_2d = self.arr_str[:, :, 0]
self.arr_utf_2d = self.arr_utf[:, :, 0]
self.arr_date_2d = self.arr_date[:, :, 0]
self.arr_tdelta_2d = self.arr_tdelta[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0]
self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0]
self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0]
self.arr_inf_2d = self.arr_inf[:, :, 0]
self.arr_float_inf_2d = self.arr_float_inf[:, :, 0]
self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0]
self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0]
self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_complex_1d = self.arr_complex[:, 0, 0]
self.arr_int_1d = self.arr_int[:, 0, 0]
self.arr_bool_1d = self.arr_bool[:, 0, 0]
self.arr_str_1d = self.arr_str[:, 0, 0]
self.arr_utf_1d = self.arr_utf[:, 0, 0]
self.arr_date_1d = self.arr_date[:, 0, 0]
self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0]
self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0]
self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0]
self.arr_inf_1d = self.arr_inf.ravel()
self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0]
self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0]
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
def tearDown(self):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view('i8')
return targ, res
try:
if axis != 0 and hasattr(
targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except:
# handle timedelta dtypes
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == 'O':
if targ.dtype.kind != 'O':
res = res.astype(targ.dtype)
else:
try:
res = res.astype('c16')
except:
res = res.astype('f8')
try:
targ = targ.astype('c16')
except:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == 'O':
raise
tm.assert_almost_equal(targ.real, res.real,
check_dtype=check_dtype)
tm.assert_almost_equal(targ.imag, res.imag,
check_dtype=check_dtype)
def check_fun_data(self, testfunc, targfunc, testarval, targarval,
targarnanval, check_dtype=True, **kwargs):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
try:
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim - 1),
'skipna: %s' % skipna, 'kwargs: %s' % kwargs)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(testfunc, targfunc, testarval2, targarval2,
targarnanval2, check_dtype=check_dtype, **kwargs)
def check_fun(self, testfunc, targfunc, testar, targar=None,
targarnan=None, **kwargs):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(testfunc, targfunc, testarval, targarval,
targarnanval, **kwargs)
except BaseException as exc:
exc.args += ('testar: %s' % testar, 'targar: %s' % targar,
'targarnan: %s' % targarnan)
raise
def check_funs(self, testfunc, targfunc, allow_complex=True,
allow_all_nan=True, allow_str=True, allow_date=True,
allow_tdelta=True, allow_obj=True, **kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
**kwargs)
self.check_fun(testfunc, targfunc, 'arr_int', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs)
objs = [self.arr_float.astype('O'), self.arr_int.astype('O'),
self.arr_bool.astype('O')]
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_complex_nan',
'arr_complex', **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs)
objs += [self.arr_complex.astype('O')]
if allow_str:
self.check_fun(testfunc, targfunc, 'arr_str', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs)
objs += [self.arr_str.astype('O'), self.arr_utf.astype('O')]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs)
objs += [self.arr_tdelta.astype('O')]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == 'convert':
targfunc = partial(self._badobj_wrap, func=targfunc,
allow_complex=allow_complex)
self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs)
def check_funs_ddof(self,
testfunc,
targfunc,
allow_complex=True,
allow_all_nan=True,
allow_str=True,
allow_date=False,
allow_tdelta=False,
allow_obj=True, ):
for ddof in range(3):
try:
self.check_funs(testfunc, targfunc, allow_complex,
allow_all_nan, allow_str, allow_date,
allow_tdelta, allow_obj, ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof, )
raise
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
if allow_complex:
value = value.astype('c16')
else:
value = value.astype('f8')
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(nanops.nanany, np.any, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum, allow_str=False,
allow_date=False, allow_tdelta=True, check_dtype=False)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean, allow_complex=False,
allow_obj=False, allow_str=False, allow_date=False,
allow_tdelta=True)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
# numpy < 1.9.0 is not computing this correctly
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.9.0':
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
self.assertEqual(result, a)
self.assertEqual(result, np_result)
self.assertTrue(result.dtype == np.float64)
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, 'float128'):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ['mean', 'std', 'var', 'skew', 'kurt']
group_b = ['min', 'max']
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
self.assertTrue(
result.dtype == np.float64,
"return dtype expected from %s is np.float64, "
"got %s instead" % (method, result.dtype))
else:
self.assertTrue(
result.dtype == dtype,
"return dtype expected from %s is %s, "
"got %s instead" % (method, dtype, result.dtype))
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
self.check_funs(nanops.nanmedian, np.median, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanstd(self):
self.check_funs_ddof(nanops.nanstd, np.std, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nansem(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import sem
self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
if res.dtype.kind == 'm':
res = np.atleast_1d(res)
return res
def test_nanmin(self):
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
def test_nanmax(self):
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isnull(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, 'all') and nullnan.all() or
not hasattr(nullnan, 'all') and nullnan):
res = -1
return res
def test_nanargmax(self):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func, allow_str=False,
allow_obj=False, allow_date=True, allow_tdelta=True)
def test_nanargmin(self):
func = partial(self._argminmax_wrap, func=np.argmin)
if tm.sys.version_info[0:2] == (2, 6):
self.check_funs(nanops.nanargmin, func, allow_date=True,
allow_tdelta=True, allow_str=False,
allow_obj=False)
else:
self.check_funs(nanops.nanargmin, func, allow_str=False,
allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.
return result
def test_nanskew(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
self.check_funs(nanops.nanskew, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
self.check_funs(nanops.nankurt, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod, allow_str=False,
allow_date=False, allow_tdelta=False)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
**kwargs)
res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
**kwargs)
res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
res25 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
**kwargs)
res11 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
**kwargs)
res24 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
res25 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='pearson')
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_kendall(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='kendall')
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='kendall')
def test_nancorr_spearman(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='spearman')
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='spearman')
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ('ndim: %s' % arr_float.ndim, )
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
self.assertTrue(res0)
else:
self.assertFalse(res0)
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value), )
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', False),
('arr_nan_nanj', False), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', False),
('arr_float_nan', False), ('arr_nan_nan', False),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype('f4'), correct)
self.check_bool(nanops._has_infs, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__isfinite(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', True),
('arr_nan_nanj', True), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', True),
('arr_float_nan', True), ('arr_nan_nan', True),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype('f4'), correct)
self.check_bool(func1, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__bn_ok_dtype(self):
self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_date.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test'))
class TestEnsureNumeric(tm.TestCase):
def test_numeric_values(self):
# Test integer
self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int')
# Test float
self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float')
# Test complex
self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j,
'Failed for complex')
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
self.assertTrue(np.allclose(nanops._ensure_numeric(values), values),
'Failed for numeric ndarray')
# Test object ndarray
o_values = values.astype(object)
self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values),
'Failed for object ndarray')
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values),
'Failed for convertible string ndarray')
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
self.assertRaises(ValueError, lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0),
'Failed for convertible integer string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1),
'Failed for convertible float string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j),
'Failed for convertible complex string')
def test_non_convertable_values(self):
self.assertRaises(TypeError, lambda: nanops._ensure_numeric('foo'))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric({}))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric([]))
class TestNanvarFixedValues(tm.TestCase):
# xref GH10242
def setUp(self):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5,
check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan,
check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(actual_variance, np.array(
[self.variance, 1.0 / 12]), check_less_precise=2)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var,
check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var,
check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var,
check_less_precise=2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287
], [0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292]])
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array([[[0.13762259, 0.05619224, 0.11568816
], [0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449]],
[[0.09519783, 0.16435395, 0.05082054
], [0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163]]])
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
self.assertTrue(np.isnan(var[3]))
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
self.assertTrue(np.isnan(std[3]))
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
self.assertEqual(result, 0.0)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
self.assertEqual(skew, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nanskew(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nanskew(right_tailed), 0)
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
self.assertAlmostEqual(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
self.assertTrue(np.isnan(skew))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
self.assertEqual(kurt, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nankurt(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nankurt(right_tailed), 0)
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
self.assertAlmostEqual(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
self.assertTrue(np.isnan(kurt))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
| [
"659338505@qq.com"
] | 659338505@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.