content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
class Solution:
"""
@param matrix: the given matrix
@return: True if and only if the matrix is Toeplitz
"""
def isToeplitzMatrix(self, matrix):
# Write your code here
col=len(matrix[0])
row=len(matrix)
for i in range(1, row):
for j in range(1, col):
if matrix[i][j] != matrix[i-1][j-1]:
return False
return True | class Solution:
"""
@param matrix: the given matrix
@return: True if and only if the matrix is Toeplitz
"""
def is_toeplitz_matrix(self, matrix):
col = len(matrix[0])
row = len(matrix)
for i in range(1, row):
for j in range(1, col):
if matrix[i][j] != matrix[i - 1][j - 1]:
return False
return True |
_base_ = './retinanet_r50_fpn_1x_cityscapes.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='checkpoints/resnet101-63fe2227.pth')))
# load_from="checkpoints/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth" | _base_ = './retinanet_r50_fpn_1x_cityscapes.py'
model = dict(backbone=dict(depth=101, init_cfg=dict(type='Pretrained', checkpoint='checkpoints/resnet101-63fe2227.pth'))) |
# Title : Multiply all odd number
# Author : Kiran Raj R.
# Date : 06:11:2020
def multiply_odd(num):
""" Return sum of multiple of all odd number
below user specified range """
result = 1
for i in range(1,num, 2):
result*=i
return result
print(multiply_odd(10))
def multiply_even(num):
""" Return sum of multiple of all even number
below user specified range """
result = 1
for i in range(2,num, 2):
result*=i
return result
print(multiply_even(11)) | def multiply_odd(num):
""" Return sum of multiple of all odd number
below user specified range """
result = 1
for i in range(1, num, 2):
result *= i
return result
print(multiply_odd(10))
def multiply_even(num):
""" Return sum of multiple of all even number
below user specified range """
result = 1
for i in range(2, num, 2):
result *= i
return result
print(multiply_even(11)) |
class Player:
def __init__(self, name, life_value, attack_value):
self.name = name
self.life_value = life_value
self.attack_value = attack_value
def attack(self, enemy_player: 'Player'):
enemy_player.life_value = enemy_player.life_value - self.attack_value
def is_alive(self):
return self.life_value > 0 | class Player:
def __init__(self, name, life_value, attack_value):
self.name = name
self.life_value = life_value
self.attack_value = attack_value
def attack(self, enemy_player: 'Player'):
enemy_player.life_value = enemy_player.life_value - self.attack_value
def is_alive(self):
return self.life_value > 0 |
# Define physical constants
P0 = 1000. # Ground pressure level. Unit: hPa
SCALE_HEIGHT = 7000. # Unit: m
CP = 1004. # specific heat at constant pressure for air (cp) = 1004 J/kg-K
DRY_GAS_CONSTANT = 287.
EARTH_RADIUS = 6.378e+6 # Unit: m
EARTH_OMEGA = 7.29e-5
| p0 = 1000.0
scale_height = 7000.0
cp = 1004.0
dry_gas_constant = 287.0
earth_radius = 6378000.0
earth_omega = 7.29e-05 |
with open('./input.txt') as input:
lines = [int(s.strip()) for s in input.readlines()]
last = 999999999999
counter = 0
for (a, b, c) in zip(lines[0:-2], lines[1:-1], lines[2:]):
current = a + b + c
if (current > last):
counter += 1
last = current
print(counter) # 1378
| with open('./input.txt') as input:
lines = [int(s.strip()) for s in input.readlines()]
last = 999999999999
counter = 0
for (a, b, c) in zip(lines[0:-2], lines[1:-1], lines[2:]):
current = a + b + c
if current > last:
counter += 1
last = current
print(counter) |
fileName = input("What's the name of the file? ../logs/")
results = list(map(lambda e: e.split(" "), open(
'../logs/' + fileName, 'r').readlines()))
"""
Interesting statistics:
- Average score of all runs
- Worst score
- Best score
- Average of worst 20% of scores
- Average of best 20% of scores
- Win %
"""
results = sorted(results, key=lambda r: int(r[0]))
scores = [int(r[0]) for r in results]
wins = [int(r[2].strip()) for r in results]
print("Average score:", sum(scores) / len(scores))
print("Best score:", scores[-1])
print("Worst score:", scores[0])
worstTwenty = scores[:(len(scores) // 5)]
print("Average of bottom 20%:", sum(worstTwenty) / len(worstTwenty))
bestTwenty = scores[int(len(scores) * 0.8):]
print("Average of top 20%:", sum(bestTwenty) / len(bestTwenty))
winrate = sum(wins) / len(wins)
print("Winrate: ", winrate * 100, "%", sep="")
| file_name = input("What's the name of the file? ../logs/")
results = list(map(lambda e: e.split(' '), open('../logs/' + fileName, 'r').readlines()))
'\nInteresting statistics:\n - Average score of all runs\n - Worst score\n - Best score\n - Average of worst 20% of scores\n - Average of best 20% of scores\n - Win %\n'
results = sorted(results, key=lambda r: int(r[0]))
scores = [int(r[0]) for r in results]
wins = [int(r[2].strip()) for r in results]
print('Average score:', sum(scores) / len(scores))
print('Best score:', scores[-1])
print('Worst score:', scores[0])
worst_twenty = scores[:len(scores) // 5]
print('Average of bottom 20%:', sum(worstTwenty) / len(worstTwenty))
best_twenty = scores[int(len(scores) * 0.8):]
print('Average of top 20%:', sum(bestTwenty) / len(bestTwenty))
winrate = sum(wins) / len(wins)
print('Winrate: ', winrate * 100, '%', sep='') |
# This program saves a list of numbers to a file.
def main():
# Create a list of numbers.
numbers = [1, 2, 3, 4, 5, 6, 7]
# Open a file for writing.
outfile = open('numberlist.txt', 'w')
# Write the list to the file.
for item in numbers:
outfile.write(str(item) + '\n')
# Close the file.
outfile.close()
# Call the main function.
main()
| def main():
numbers = [1, 2, 3, 4, 5, 6, 7]
outfile = open('numberlist.txt', 'w')
for item in numbers:
outfile.write(str(item) + '\n')
outfile.close()
main() |
x = int(input('Enter your Age: '))
print('****************')
for i in range(0, 1):
if x >= 18:
print('You can watch content with R-rating')
elif x >= 13:
print('You can watch movies under parental guidance ')
else:
print('Cartoons permitted')
print(' Thanks! ')
| x = int(input('Enter your Age: '))
print('****************')
for i in range(0, 1):
if x >= 18:
print('You can watch content with R-rating')
elif x >= 13:
print('You can watch movies under parental guidance ')
else:
print('Cartoons permitted')
print(' Thanks! ') |
def internal_consistency_check(Reports_dict, reportnos=None):
return_dict = {}
if reportnos:
search_list = reportnos
else:
search_list = list(Reports_dict.keys())
for reportno in search_list:
rdf = pd.DataFrame()
rdf = Reports_dict[reportno].copy()
print('REPORT', reportno)
if not rdf.empty:
if reportno == '1':
add_down_dont_match = check_add_down(rdf=rdf, tot_col='GEO', columns_to_add=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'], groupby_vars=['YEAR', 'DRUG_CODE', 'STATE'])
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_down_dont_match, add_across_dont_match)
assert return_dict[reportno] == ({}, {})
elif reportno == '2':
add_down_dont_match = check_add_down(rdf=rdf, tot_col='GEO', columns_to_add=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'], groupby_vars=['YEAR', 'DRUG_CODE'])
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_down_dont_match, add_across_dont_match)
assert return_dict[reportno] == ({}, {})
elif reportno == '3':
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_across_dont_match)
assert return_dict[reportno] == {}
elif reportno == '4':
# 4 is the only report with unexplainable internal inconsistencies, for American Samoa in 2002
add_down_dont_match = check_add_down(rdf=rdf, tot_col='STATE', columns_to_add=['TOTAL GRAMS'], groupby_vars=['YEAR', 'DRUG_CODE'])
rdf['POP'] = np.nan
if '2000 POP' in list(rdf.columns):
rdf['POP'][rdf['2000 POP'].notnull()] = rdf['2000 POP']
if '2010 POP' in list(rdf.columns):
rdf['POP'][rdf['2010 POP'].notnull()] = rdf['2010 POP']
divisor_dont_match = check_divide(rdf, 'TOTAL GRAMS', 'POP', 'GRAMS/100K POP', 100000)
assert all(divisor_dont_match[('TOTAL GRAMS', 'POP', 'GRAMS/100K POP')]['STATE'] == 'AMERICAN SAMOA')
return_dict[reportno] = (add_down_dont_match, divisor_dont_match)
elif reportno == '5' or reportno == '7':
divisor_dont_match = check_divide(rdf, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS')
return_dict[reportno] = (divisor_dont_match)
assert return_dict[reportno] == {}
return return_dict
def across_consistency_check(Reports_dict, reportlist):
returndict = {}
if reportlist == ['5', '7']:
# There are some errors, almost entirely in 2011 but a few in buyers in 2014
rdf5, rdf7 = pd.DataFrame(), pd.DataFrame()
rdf5, rdf7 = Reports_dict['5'].copy(), Reports_dict['7'].copy()
assert check_divide(rdf5, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS') == {}
assert check_divide(rdf7, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS') == {}
returndict[('5', '7')] = groupby_across_sheets(big_df=rdf5[list(set(rdf5.columns) - {'AVG GRAMS'})], small_df=rdf7[list(set(rdf7.columns) - {'AVG GRAMS'})], groupby_vars=['YEAR', 'DRUG CODE', 'BUSINESS ACTIVITY'], compare_cols=['TOTAL GRAMS', 'BUYERS'])
returndict2, returnlist_unmatch = returndict[('5', '7')]
assert all(returnlist_unmatch['YEAR'] == '2011')
for key in returndict2:
assert sum(returndict2[key]['YEAR'] == '2011') + sum(returndict2[key]['YEAR'] == '2014') == len(returndict2[key])
if reportlist == ['2', '3', '4']:
# All errors are in US totals only. But there
rdf2, rdf3, rdf4 = pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
rdf2, rdf3, rdf4 = Reports_dict['2'].copy(), Reports_dict['3'].copy(), Reports_dict['4'].copy()
target_us_row_title = list(set(rdf3['GEO']) - set(statelist))[0]
for item in set(rdf2['GEO']) - set(statelist):
rdf2['GEO'].loc[rdf2['GEO'] == item] = target_us_row_title
for item in set(rdf3['GEO']) - set(statelist):
rdf3['GEO'].loc[rdf3['GEO'] == item] = target_us_row_title
for item in set(rdf4['STATE']) - set(statelist):
rdf4['STATE'].loc[rdf4['STATE'] == item] = target_us_row_title
assert all(rdf3.columns == rdf2.columns)
for col in ['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL']:
rdf3[col] = rdf3[col].apply(lambda x: float(str(x).replace(',', '')))
rdf3[col][rdf3['YEAR'] == '2011'] = rdf3[col] / 1000
rdf4['POP'] = np.nan
if '2000 POP' in list(rdf4.columns):
rdf4['POP'][rdf4['2000 POP'].notnull()] = rdf4['2000 POP']
if '2010 POP' in list(rdf4.columns):
rdf4['POP'][rdf4['2010 POP'].notnull()] = rdf4['2010 POP']
df_pop = pd.DataFrame([(float(str(x[0]).replace(',', '')), x[1], x[2]) for x in list(set([tuple(x) for x in rdf4[['POP', 'YEAR', 'STATE']].values]))])
df_pop.columns = ['POP', 'YEAR', 'GEO']
rdf2 = pd.merge(rdf2, df_pop, on=['GEO', 'YEAR'])
merged = pd.merge(rdf2, rdf3, how='outer', on=['DRUG_CODE', 'GEO', 'YEAR'], indicator=True)
missing_entries = merged[merged['_merge'] == 'right_only']
returndict[('5', '7', 'missing_entries')] = missing_entries
merged2 = pd.merge(rdf2, rdf3, how='inner', on=['DRUG_CODE', 'GEO', 'YEAR'], )
for s1 in range(1, 5):
d = check_divide(merged2, 'Q%s_x' % s1, 'POP', 'Q%s_y' % s1, 100000)
assert set(list(d.values())[0]['GEO']) == {target_us_row_title}
returndict[('5', '7', 'Q%s' % s1)] = d
if reportlist == ['1', '2']:
rdf1, rdf2 = pd.DataFrame(), pd.DataFrame()
rdf1, rdf2 = Reports_dict['1'].copy(), Reports_dict['2'].copy()
for tot in [x for x in set(rdf1['GEO']) if x.isdigit() is False]:
rdf1 = rdf1.loc[rdf1['GEO'] != tot]
for us in [x for x in set(rdf2['GEO']) if x not in statelist]:
rdf2 = rdf2.loc[rdf2['GEO'] != us]
rdf2['STATE'] = rdf2['GEO']
returndict[('1', '2')] = groupby_across_sheets(big_df=rdf1, small_df=rdf2, groupby_vars=['YEAR', 'DRUG_CODE', 'STATE'], compare_cols=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'])
if reportlist == ['2', '5']:
rdf2, rdf5 = pd.DataFrame(), pd.DataFrame()
rdf2, rdf5 = Reports_dict['2'].copy(), Reports_dict['5'].copy()
rdf2 = rdf2.loc[rdf2['GEO'] != 'UNITED STATES']
rdf2['STATE'] = rdf2['GEO']
rdf2['TOTAL GRAMS'] = rdf2['TOTAL']
rdf2['DRUG CODE'] = rdf2['DRUG_CODE']
returndict[('2', '5')] = groupby_across_sheets(big_df=rdf5, small_df=rdf2, groupby_vars=['YEAR', 'DRUG CODE', 'STATE'], compare_cols=['TOTAL GRAMS'])
return returndict
def groupby_across_sheets(bigdf, smalldf, groupby_vars, compare_cols):
big_df = bigdf.copy()
small_df = smalldf.copy()
returndict2 = {}
for col in compare_cols:
big_df[col] = big_df[col].apply(lambda x: float(str(x).replace(',', '')))
small_df[col] = small_df[col].apply(lambda x: float(str(x).replace(',', '')))
big_df_test = big_df.groupby(groupby_vars).sum()
merged_rdf = pd.merge(big_df_test, small_df, right_on=groupby_vars, left_index=True, how='outer', indicator=True)
returnlist_unmatch = merged_rdf[merged_rdf['_merge'] != 'both']
merged_rdf = pd.merge(big_df_test, small_df, right_on=groupby_vars, left_index=True, how='inner', indicator=True)
for col in compare_cols:
colx = col + '_x'
coly = col + '_y'
df_nonmatch = merged_rdf[merged_rdf.apply(lambda x: are_close(x[colx], x[coly], 0.015) is False, axis=1)]
if len(df_nonmatch) > 0:
returndict2[col] = df_nonmatch
return returndict2, returnlist_unmatch
def check_add_down(rdf, tot_col, columns_to_add, groupby_vars):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
tot_loc = {tot_col: [x for x in list(set(rdfa[tot_col].tolist())) if x in totallist]}
add_down_dont_match = {}
tot_strings = list(tot_loc.values())[0]
for col in columns_to_add:
rdfa[col] = rdfa[col].apply(lambda x: float(x.replace(',', '')))
rdfa['bin'] = rdfa[list(tot_loc.keys())[0]].apply(lambda x: x in tot_strings)
rdf_test = rdfa.groupby(groupby_vars + ['bin']).sum()
pctc = pd.DataFrame(round(abs(rdf_test.groupby(groupby_vars).pct_change())))
totdiv = 0
for year in set(rdfa['YEAR']):
div = len(set(rdfa['bin'][rdfa['YEAR'] == year]))
for v in groupby_vars:
div = div * len(set(rdfa[v][rdfa['YEAR'] == year]))
totdiv = totdiv + div
entries = len(pctc) / totdiv
assert 0.6 <= entries <= 1
for column_to_add in columns_to_add:
r = rdf_test.loc[pctc[column_to_add].notnull() & pctc[column_to_add] != 0]
if len(r) > 0:
add_down_dont_match[column_to_add] = r
return add_down_dont_match
def check_add_across(rdf, columns_to_add, col_tot):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
add_across_dont_match = {}
for col in columns_to_add + col_tot:
rdfa[col] = rdfa[col].apply(lambda x: float(x.replace(',', '')))
r = rdfa.loc[round(rdfa[columns_to_add].sum(axis=1) - rdfa[col_tot[0]], 1) != 0]
if len(r) > 0:
add_across_dont_match[[tuple(columns_to_add + col_tot)]] = r
return add_across_dont_match
def check_divide(rdf, top_divisor, bot_divisor, equals_to, multiplier=1, tolerance=0.02):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
divide_dont_match = {}
for col in [top_divisor, bot_divisor, equals_to]:
rdfa[col] = rdfa[col].apply(lambda x: float(str(x).replace(',', '')))
rdfa['CALCULATED'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, 'calc'), axis=1)
rdfa['BOOL'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, 'bool'), axis=1)
rdfa['CLOSE'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, tolerance), axis=1)
if len(rdfa.loc[-rdfa['CLOSE']]) > 0:
columns_list = [x for x in list(rdfa.columns) if ('_x' not in x and '_y' not in x) or x in [top_divisor, bot_divisor, equals_to]]
divide_dont_match[(top_divisor, bot_divisor, equals_to)] = rdfa[columns_list].loc[-rdfa['CLOSE']]
return divide_dont_match
def custom_lambda(df, top_divisor, bot_divisor, equals_to, multiplier, returntype):
calculated = multiplier * df[top_divisor] / df[bot_divisor]
res = are_equal(calculated, df[equals_to])
if returntype == 'bool':
return res[0]
if type(returntype) is float:
return are_close(calculated, df[equals_to], returntype)
return res[1]
def are_equal(val_compare, reference_val):
r = return_round(reference_val)
val = round(val_compare, r)
comp = round(reference_val, r)
val1 = round(val_compare, r + 1)
comp1 = round(reference_val, r + 1)
return (val == comp or val1 == comp1), (val, comp, val1, comp1)
def are_close(val_compare, reference_val, tolerance):
b, (val, comp, val1, comp1) = are_equal(val_compare, reference_val)
if not b:
if reference_val != 0:
return (abs(val_compare - reference_val) / reference_val <= tolerance) or (abs(val - comp) <= tolerance)
else:
return (abs(val - comp) <= tolerance)
return b
def return_round(x):
if x > 0:
if int(math.log10(x)) == math.log10(x) and int(math.log10(x)) < 0:
magn = int(math.log10(x)) + 1
else:
magn = int(math.log10(x))
if magn < 0:
return -(magn - 1)
elif magn == 0 or magn == 1 or magn == 2:
return 2
elif magn == 3 or magn == 4:
return 1
else:
return 0
elif x == 0:
return 2
else:
raise Exception("shouldn't be less than zero")
| def internal_consistency_check(Reports_dict, reportnos=None):
return_dict = {}
if reportnos:
search_list = reportnos
else:
search_list = list(Reports_dict.keys())
for reportno in search_list:
rdf = pd.DataFrame()
rdf = Reports_dict[reportno].copy()
print('REPORT', reportno)
if not rdf.empty:
if reportno == '1':
add_down_dont_match = check_add_down(rdf=rdf, tot_col='GEO', columns_to_add=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'], groupby_vars=['YEAR', 'DRUG_CODE', 'STATE'])
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_down_dont_match, add_across_dont_match)
assert return_dict[reportno] == ({}, {})
elif reportno == '2':
add_down_dont_match = check_add_down(rdf=rdf, tot_col='GEO', columns_to_add=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'], groupby_vars=['YEAR', 'DRUG_CODE'])
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_down_dont_match, add_across_dont_match)
assert return_dict[reportno] == ({}, {})
elif reportno == '3':
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = add_across_dont_match
assert return_dict[reportno] == {}
elif reportno == '4':
add_down_dont_match = check_add_down(rdf=rdf, tot_col='STATE', columns_to_add=['TOTAL GRAMS'], groupby_vars=['YEAR', 'DRUG_CODE'])
rdf['POP'] = np.nan
if '2000 POP' in list(rdf.columns):
rdf['POP'][rdf['2000 POP'].notnull()] = rdf['2000 POP']
if '2010 POP' in list(rdf.columns):
rdf['POP'][rdf['2010 POP'].notnull()] = rdf['2010 POP']
divisor_dont_match = check_divide(rdf, 'TOTAL GRAMS', 'POP', 'GRAMS/100K POP', 100000)
assert all(divisor_dont_match['TOTAL GRAMS', 'POP', 'GRAMS/100K POP']['STATE'] == 'AMERICAN SAMOA')
return_dict[reportno] = (add_down_dont_match, divisor_dont_match)
elif reportno == '5' or reportno == '7':
divisor_dont_match = check_divide(rdf, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS')
return_dict[reportno] = divisor_dont_match
assert return_dict[reportno] == {}
return return_dict
def across_consistency_check(Reports_dict, reportlist):
returndict = {}
if reportlist == ['5', '7']:
(rdf5, rdf7) = (pd.DataFrame(), pd.DataFrame())
(rdf5, rdf7) = (Reports_dict['5'].copy(), Reports_dict['7'].copy())
assert check_divide(rdf5, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS') == {}
assert check_divide(rdf7, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS') == {}
returndict['5', '7'] = groupby_across_sheets(big_df=rdf5[list(set(rdf5.columns) - {'AVG GRAMS'})], small_df=rdf7[list(set(rdf7.columns) - {'AVG GRAMS'})], groupby_vars=['YEAR', 'DRUG CODE', 'BUSINESS ACTIVITY'], compare_cols=['TOTAL GRAMS', 'BUYERS'])
(returndict2, returnlist_unmatch) = returndict['5', '7']
assert all(returnlist_unmatch['YEAR'] == '2011')
for key in returndict2:
assert sum(returndict2[key]['YEAR'] == '2011') + sum(returndict2[key]['YEAR'] == '2014') == len(returndict2[key])
if reportlist == ['2', '3', '4']:
(rdf2, rdf3, rdf4) = (pd.DataFrame(), pd.DataFrame(), pd.DataFrame())
(rdf2, rdf3, rdf4) = (Reports_dict['2'].copy(), Reports_dict['3'].copy(), Reports_dict['4'].copy())
target_us_row_title = list(set(rdf3['GEO']) - set(statelist))[0]
for item in set(rdf2['GEO']) - set(statelist):
rdf2['GEO'].loc[rdf2['GEO'] == item] = target_us_row_title
for item in set(rdf3['GEO']) - set(statelist):
rdf3['GEO'].loc[rdf3['GEO'] == item] = target_us_row_title
for item in set(rdf4['STATE']) - set(statelist):
rdf4['STATE'].loc[rdf4['STATE'] == item] = target_us_row_title
assert all(rdf3.columns == rdf2.columns)
for col in ['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL']:
rdf3[col] = rdf3[col].apply(lambda x: float(str(x).replace(',', '')))
rdf3[col][rdf3['YEAR'] == '2011'] = rdf3[col] / 1000
rdf4['POP'] = np.nan
if '2000 POP' in list(rdf4.columns):
rdf4['POP'][rdf4['2000 POP'].notnull()] = rdf4['2000 POP']
if '2010 POP' in list(rdf4.columns):
rdf4['POP'][rdf4['2010 POP'].notnull()] = rdf4['2010 POP']
df_pop = pd.DataFrame([(float(str(x[0]).replace(',', '')), x[1], x[2]) for x in list(set([tuple(x) for x in rdf4[['POP', 'YEAR', 'STATE']].values]))])
df_pop.columns = ['POP', 'YEAR', 'GEO']
rdf2 = pd.merge(rdf2, df_pop, on=['GEO', 'YEAR'])
merged = pd.merge(rdf2, rdf3, how='outer', on=['DRUG_CODE', 'GEO', 'YEAR'], indicator=True)
missing_entries = merged[merged['_merge'] == 'right_only']
returndict['5', '7', 'missing_entries'] = missing_entries
merged2 = pd.merge(rdf2, rdf3, how='inner', on=['DRUG_CODE', 'GEO', 'YEAR'])
for s1 in range(1, 5):
d = check_divide(merged2, 'Q%s_x' % s1, 'POP', 'Q%s_y' % s1, 100000)
assert set(list(d.values())[0]['GEO']) == {target_us_row_title}
returndict['5', '7', 'Q%s' % s1] = d
if reportlist == ['1', '2']:
(rdf1, rdf2) = (pd.DataFrame(), pd.DataFrame())
(rdf1, rdf2) = (Reports_dict['1'].copy(), Reports_dict['2'].copy())
for tot in [x for x in set(rdf1['GEO']) if x.isdigit() is False]:
rdf1 = rdf1.loc[rdf1['GEO'] != tot]
for us in [x for x in set(rdf2['GEO']) if x not in statelist]:
rdf2 = rdf2.loc[rdf2['GEO'] != us]
rdf2['STATE'] = rdf2['GEO']
returndict['1', '2'] = groupby_across_sheets(big_df=rdf1, small_df=rdf2, groupby_vars=['YEAR', 'DRUG_CODE', 'STATE'], compare_cols=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'])
if reportlist == ['2', '5']:
(rdf2, rdf5) = (pd.DataFrame(), pd.DataFrame())
(rdf2, rdf5) = (Reports_dict['2'].copy(), Reports_dict['5'].copy())
rdf2 = rdf2.loc[rdf2['GEO'] != 'UNITED STATES']
rdf2['STATE'] = rdf2['GEO']
rdf2['TOTAL GRAMS'] = rdf2['TOTAL']
rdf2['DRUG CODE'] = rdf2['DRUG_CODE']
returndict['2', '5'] = groupby_across_sheets(big_df=rdf5, small_df=rdf2, groupby_vars=['YEAR', 'DRUG CODE', 'STATE'], compare_cols=['TOTAL GRAMS'])
return returndict
def groupby_across_sheets(bigdf, smalldf, groupby_vars, compare_cols):
big_df = bigdf.copy()
small_df = smalldf.copy()
returndict2 = {}
for col in compare_cols:
big_df[col] = big_df[col].apply(lambda x: float(str(x).replace(',', '')))
small_df[col] = small_df[col].apply(lambda x: float(str(x).replace(',', '')))
big_df_test = big_df.groupby(groupby_vars).sum()
merged_rdf = pd.merge(big_df_test, small_df, right_on=groupby_vars, left_index=True, how='outer', indicator=True)
returnlist_unmatch = merged_rdf[merged_rdf['_merge'] != 'both']
merged_rdf = pd.merge(big_df_test, small_df, right_on=groupby_vars, left_index=True, how='inner', indicator=True)
for col in compare_cols:
colx = col + '_x'
coly = col + '_y'
df_nonmatch = merged_rdf[merged_rdf.apply(lambda x: are_close(x[colx], x[coly], 0.015) is False, axis=1)]
if len(df_nonmatch) > 0:
returndict2[col] = df_nonmatch
return (returndict2, returnlist_unmatch)
def check_add_down(rdf, tot_col, columns_to_add, groupby_vars):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
tot_loc = {tot_col: [x for x in list(set(rdfa[tot_col].tolist())) if x in totallist]}
add_down_dont_match = {}
tot_strings = list(tot_loc.values())[0]
for col in columns_to_add:
rdfa[col] = rdfa[col].apply(lambda x: float(x.replace(',', '')))
rdfa['bin'] = rdfa[list(tot_loc.keys())[0]].apply(lambda x: x in tot_strings)
rdf_test = rdfa.groupby(groupby_vars + ['bin']).sum()
pctc = pd.DataFrame(round(abs(rdf_test.groupby(groupby_vars).pct_change())))
totdiv = 0
for year in set(rdfa['YEAR']):
div = len(set(rdfa['bin'][rdfa['YEAR'] == year]))
for v in groupby_vars:
div = div * len(set(rdfa[v][rdfa['YEAR'] == year]))
totdiv = totdiv + div
entries = len(pctc) / totdiv
assert 0.6 <= entries <= 1
for column_to_add in columns_to_add:
r = rdf_test.loc[pctc[column_to_add].notnull() & pctc[column_to_add] != 0]
if len(r) > 0:
add_down_dont_match[column_to_add] = r
return add_down_dont_match
def check_add_across(rdf, columns_to_add, col_tot):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
add_across_dont_match = {}
for col in columns_to_add + col_tot:
rdfa[col] = rdfa[col].apply(lambda x: float(x.replace(',', '')))
r = rdfa.loc[round(rdfa[columns_to_add].sum(axis=1) - rdfa[col_tot[0]], 1) != 0]
if len(r) > 0:
add_across_dont_match[[tuple(columns_to_add + col_tot)]] = r
return add_across_dont_match
def check_divide(rdf, top_divisor, bot_divisor, equals_to, multiplier=1, tolerance=0.02):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
divide_dont_match = {}
for col in [top_divisor, bot_divisor, equals_to]:
rdfa[col] = rdfa[col].apply(lambda x: float(str(x).replace(',', '')))
rdfa['CALCULATED'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, 'calc'), axis=1)
rdfa['BOOL'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, 'bool'), axis=1)
rdfa['CLOSE'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, tolerance), axis=1)
if len(rdfa.loc[-rdfa['CLOSE']]) > 0:
columns_list = [x for x in list(rdfa.columns) if '_x' not in x and '_y' not in x or x in [top_divisor, bot_divisor, equals_to]]
divide_dont_match[top_divisor, bot_divisor, equals_to] = rdfa[columns_list].loc[-rdfa['CLOSE']]
return divide_dont_match
def custom_lambda(df, top_divisor, bot_divisor, equals_to, multiplier, returntype):
calculated = multiplier * df[top_divisor] / df[bot_divisor]
res = are_equal(calculated, df[equals_to])
if returntype == 'bool':
return res[0]
if type(returntype) is float:
return are_close(calculated, df[equals_to], returntype)
return res[1]
def are_equal(val_compare, reference_val):
r = return_round(reference_val)
val = round(val_compare, r)
comp = round(reference_val, r)
val1 = round(val_compare, r + 1)
comp1 = round(reference_val, r + 1)
return (val == comp or val1 == comp1, (val, comp, val1, comp1))
def are_close(val_compare, reference_val, tolerance):
(b, (val, comp, val1, comp1)) = are_equal(val_compare, reference_val)
if not b:
if reference_val != 0:
return abs(val_compare - reference_val) / reference_val <= tolerance or abs(val - comp) <= tolerance
else:
return abs(val - comp) <= tolerance
return b
def return_round(x):
if x > 0:
if int(math.log10(x)) == math.log10(x) and int(math.log10(x)) < 0:
magn = int(math.log10(x)) + 1
else:
magn = int(math.log10(x))
if magn < 0:
return -(magn - 1)
elif magn == 0 or magn == 1 or magn == 2:
return 2
elif magn == 3 or magn == 4:
return 1
else:
return 0
elif x == 0:
return 2
else:
raise exception("shouldn't be less than zero") |
N = int(input())
X = 1
K = 0
while X <= N:
X *= 2
K += 1
print(max(0, K - 1)) | n = int(input())
x = 1
k = 0
while X <= N:
x *= 2
k += 1
print(max(0, K - 1)) |
data_in = [3.0,
1.0,
0.0,
0.0,
1.0,
6.0,
1.0,
0.0,
1.0,
0.0,
3280.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
1.0] | data_in = [3.0, 1.0, 0.0, 0.0, 1.0, 6.0, 1.0, 0.0, 1.0, 0.0, 3280.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] |
# Parsers
# Parse initial fields name to normalized form.
parse_name = lambda name: str(name).replace(' ', '_').lower()
| parse_name = lambda name: str(name).replace(' ', '_').lower() |
"""
# IMPLEMENT POW(X, N)
Implement pow(x, n), which calculates x raised to the power n (i.e. xn).
Example 1:
Input: x = 2.00000, n = 10
Output: 1024.00000
Example 2:
Input: x = 2.10000, n = 3
Output: 9.26100
Example 3:
Input: x = 2.00000, n = -2
Output: 0.25000
Explanation: 2-2 = 1/22 = 1/4 = 0.25
Constraints:
-100.0 < x < 100.0
-231 <= n <= 231-1
-104 <= xn <= 104
"""
def myPow(x, n):
if n == 0:
return 1
elif n < 0:
return 1 / myPow(x, abs(n))
elif n % 2 != 0:
return x * myPow(x, n-1)
else:
return myPow(x*x, n/2) | """
# IMPLEMENT POW(X, N)
Implement pow(x, n), which calculates x raised to the power n (i.e. xn).
Example 1:
Input: x = 2.00000, n = 10
Output: 1024.00000
Example 2:
Input: x = 2.10000, n = 3
Output: 9.26100
Example 3:
Input: x = 2.00000, n = -2
Output: 0.25000
Explanation: 2-2 = 1/22 = 1/4 = 0.25
Constraints:
-100.0 < x < 100.0
-231 <= n <= 231-1
-104 <= xn <= 104
"""
def my_pow(x, n):
if n == 0:
return 1
elif n < 0:
return 1 / my_pow(x, abs(n))
elif n % 2 != 0:
return x * my_pow(x, n - 1)
else:
return my_pow(x * x, n / 2) |
# Oct 2021
# Class for extraction.py
class FoundExpression:
def __init__(self, expression: str,
file: str,
language: str,
line_no: int):
self.expression = expression
self.language = language
self.file = file
self.line_no = line_no | class Foundexpression:
def __init__(self, expression: str, file: str, language: str, line_no: int):
self.expression = expression
self.language = language
self.file = file
self.line_no = line_no |
"""
@author: magician
@date: 2019/12/24
@file: rotate_array.py
"""
def rotate(nums, k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
# nums = nums[k + 1:] + nums[:k + 1]
for i in range(k):
nums.insert(0, nums[-1])
nums.pop()
return nums
if __name__ == '__main__':
assert rotate([1,2,3,4,5,6,7], 3) == [5,6,7,1,2,3,4]
| """
@author: magician
@date: 2019/12/24
@file: rotate_array.py
"""
def rotate(nums, k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
for i in range(k):
nums.insert(0, nums[-1])
nums.pop()
return nums
if __name__ == '__main__':
assert rotate([1, 2, 3, 4, 5, 6, 7], 3) == [5, 6, 7, 1, 2, 3, 4] |
class Solution:
def minOperations(self, nums: List[int]) -> int:
n = len(nums)
ans = n
nums = sorted(set(nums))
for i, start in enumerate(nums):
end = start + n - 1
index = bisect_right(nums, end)
uniqueLength = index - i
ans = min(ans, n - uniqueLength)
return ans
| class Solution:
def min_operations(self, nums: List[int]) -> int:
n = len(nums)
ans = n
nums = sorted(set(nums))
for (i, start) in enumerate(nums):
end = start + n - 1
index = bisect_right(nums, end)
unique_length = index - i
ans = min(ans, n - uniqueLength)
return ans |
"""
git-flow -- A collection of Git extensions to provide high-level
repository operations for Vincent Driessen's branching model.
"""
#
# This file is part of `gitflow`.
# Copyright (c) 2010-2011 Vincent Driessen
# Copyright (c) 2012 Hartmut Goebel
# Distributed under a BSD-like license. For full terms see the file LICENSE.txt
#
VERSION = (0, 6, 3)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Vincent Driessen, Hartmut Goebel"
__contact__ = "vincent@datafox.nl, h.goebel@goebel-consult.de"
__homepage__ = "http://github.com/nvie/gitflow/"
__docformat__ = "restructuredtext"
__copyright__ = "2010-2011 Vincent Driessen; 2012 Hartmut Goebel"
__license__ = "BSD"
| """
git-flow -- A collection of Git extensions to provide high-level
repository operations for Vincent Driessen's branching model.
"""
version = (0, 6, 3)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'Vincent Driessen, Hartmut Goebel'
__contact__ = 'vincent@datafox.nl, h.goebel@goebel-consult.de'
__homepage__ = 'http://github.com/nvie/gitflow/'
__docformat__ = 'restructuredtext'
__copyright__ = '2010-2011 Vincent Driessen; 2012 Hartmut Goebel'
__license__ = 'BSD' |
data = (
'jun', # 0x00
'junj', # 0x01
'junh', # 0x02
'jud', # 0x03
'jul', # 0x04
'julg', # 0x05
'julm', # 0x06
'julb', # 0x07
'juls', # 0x08
'jult', # 0x09
'julp', # 0x0a
'julh', # 0x0b
'jum', # 0x0c
'jub', # 0x0d
'jubs', # 0x0e
'jus', # 0x0f
'juss', # 0x10
'jung', # 0x11
'juj', # 0x12
'juc', # 0x13
'juk', # 0x14
'jut', # 0x15
'jup', # 0x16
'juh', # 0x17
'jweo', # 0x18
'jweog', # 0x19
'jweogg', # 0x1a
'jweogs', # 0x1b
'jweon', # 0x1c
'jweonj', # 0x1d
'jweonh', # 0x1e
'jweod', # 0x1f
'jweol', # 0x20
'jweolg', # 0x21
'jweolm', # 0x22
'jweolb', # 0x23
'jweols', # 0x24
'jweolt', # 0x25
'jweolp', # 0x26
'jweolh', # 0x27
'jweom', # 0x28
'jweob', # 0x29
'jweobs', # 0x2a
'jweos', # 0x2b
'jweoss', # 0x2c
'jweong', # 0x2d
'jweoj', # 0x2e
'jweoc', # 0x2f
'jweok', # 0x30
'jweot', # 0x31
'jweop', # 0x32
'jweoh', # 0x33
'jwe', # 0x34
'jweg', # 0x35
'jwegg', # 0x36
'jwegs', # 0x37
'jwen', # 0x38
'jwenj', # 0x39
'jwenh', # 0x3a
'jwed', # 0x3b
'jwel', # 0x3c
'jwelg', # 0x3d
'jwelm', # 0x3e
'jwelb', # 0x3f
'jwels', # 0x40
'jwelt', # 0x41
'jwelp', # 0x42
'jwelh', # 0x43
'jwem', # 0x44
'jweb', # 0x45
'jwebs', # 0x46
'jwes', # 0x47
'jwess', # 0x48
'jweng', # 0x49
'jwej', # 0x4a
'jwec', # 0x4b
'jwek', # 0x4c
'jwet', # 0x4d
'jwep', # 0x4e
'jweh', # 0x4f
'jwi', # 0x50
'jwig', # 0x51
'jwigg', # 0x52
'jwigs', # 0x53
'jwin', # 0x54
'jwinj', # 0x55
'jwinh', # 0x56
'jwid', # 0x57
'jwil', # 0x58
'jwilg', # 0x59
'jwilm', # 0x5a
'jwilb', # 0x5b
'jwils', # 0x5c
'jwilt', # 0x5d
'jwilp', # 0x5e
'jwilh', # 0x5f
'jwim', # 0x60
'jwib', # 0x61
'jwibs', # 0x62
'jwis', # 0x63
'jwiss', # 0x64
'jwing', # 0x65
'jwij', # 0x66
'jwic', # 0x67
'jwik', # 0x68
'jwit', # 0x69
'jwip', # 0x6a
'jwih', # 0x6b
'jyu', # 0x6c
'jyug', # 0x6d
'jyugg', # 0x6e
'jyugs', # 0x6f
'jyun', # 0x70
'jyunj', # 0x71
'jyunh', # 0x72
'jyud', # 0x73
'jyul', # 0x74
'jyulg', # 0x75
'jyulm', # 0x76
'jyulb', # 0x77
'jyuls', # 0x78
'jyult', # 0x79
'jyulp', # 0x7a
'jyulh', # 0x7b
'jyum', # 0x7c
'jyub', # 0x7d
'jyubs', # 0x7e
'jyus', # 0x7f
'jyuss', # 0x80
'jyung', # 0x81
'jyuj', # 0x82
'jyuc', # 0x83
'jyuk', # 0x84
'jyut', # 0x85
'jyup', # 0x86
'jyuh', # 0x87
'jeu', # 0x88
'jeug', # 0x89
'jeugg', # 0x8a
'jeugs', # 0x8b
'jeun', # 0x8c
'jeunj', # 0x8d
'jeunh', # 0x8e
'jeud', # 0x8f
'jeul', # 0x90
'jeulg', # 0x91
'jeulm', # 0x92
'jeulb', # 0x93
'jeuls', # 0x94
'jeult', # 0x95
'jeulp', # 0x96
'jeulh', # 0x97
'jeum', # 0x98
'jeub', # 0x99
'jeubs', # 0x9a
'jeus', # 0x9b
'jeuss', # 0x9c
'jeung', # 0x9d
'jeuj', # 0x9e
'jeuc', # 0x9f
'jeuk', # 0xa0
'jeut', # 0xa1
'jeup', # 0xa2
'jeuh', # 0xa3
'jyi', # 0xa4
'jyig', # 0xa5
'jyigg', # 0xa6
'jyigs', # 0xa7
'jyin', # 0xa8
'jyinj', # 0xa9
'jyinh', # 0xaa
'jyid', # 0xab
'jyil', # 0xac
'jyilg', # 0xad
'jyilm', # 0xae
'jyilb', # 0xaf
'jyils', # 0xb0
'jyilt', # 0xb1
'jyilp', # 0xb2
'jyilh', # 0xb3
'jyim', # 0xb4
'jyib', # 0xb5
'jyibs', # 0xb6
'jyis', # 0xb7
'jyiss', # 0xb8
'jying', # 0xb9
'jyij', # 0xba
'jyic', # 0xbb
'jyik', # 0xbc
'jyit', # 0xbd
'jyip', # 0xbe
'jyih', # 0xbf
'ji', # 0xc0
'jig', # 0xc1
'jigg', # 0xc2
'jigs', # 0xc3
'jin', # 0xc4
'jinj', # 0xc5
'jinh', # 0xc6
'jid', # 0xc7
'jil', # 0xc8
'jilg', # 0xc9
'jilm', # 0xca
'jilb', # 0xcb
'jils', # 0xcc
'jilt', # 0xcd
'jilp', # 0xce
'jilh', # 0xcf
'jim', # 0xd0
'jib', # 0xd1
'jibs', # 0xd2
'jis', # 0xd3
'jiss', # 0xd4
'jing', # 0xd5
'jij', # 0xd6
'jic', # 0xd7
'jik', # 0xd8
'jit', # 0xd9
'jip', # 0xda
'jih', # 0xdb
'jja', # 0xdc
'jjag', # 0xdd
'jjagg', # 0xde
'jjags', # 0xdf
'jjan', # 0xe0
'jjanj', # 0xe1
'jjanh', # 0xe2
'jjad', # 0xe3
'jjal', # 0xe4
'jjalg', # 0xe5
'jjalm', # 0xe6
'jjalb', # 0xe7
'jjals', # 0xe8
'jjalt', # 0xe9
'jjalp', # 0xea
'jjalh', # 0xeb
'jjam', # 0xec
'jjab', # 0xed
'jjabs', # 0xee
'jjas', # 0xef
'jjass', # 0xf0
'jjang', # 0xf1
'jjaj', # 0xf2
'jjac', # 0xf3
'jjak', # 0xf4
'jjat', # 0xf5
'jjap', # 0xf6
'jjah', # 0xf7
'jjae', # 0xf8
'jjaeg', # 0xf9
'jjaegg', # 0xfa
'jjaegs', # 0xfb
'jjaen', # 0xfc
'jjaenj', # 0xfd
'jjaenh', # 0xfe
'jjaed', # 0xff
)
| data = ('jun', 'junj', 'junh', 'jud', 'jul', 'julg', 'julm', 'julb', 'juls', 'jult', 'julp', 'julh', 'jum', 'jub', 'jubs', 'jus', 'juss', 'jung', 'juj', 'juc', 'juk', 'jut', 'jup', 'juh', 'jweo', 'jweog', 'jweogg', 'jweogs', 'jweon', 'jweonj', 'jweonh', 'jweod', 'jweol', 'jweolg', 'jweolm', 'jweolb', 'jweols', 'jweolt', 'jweolp', 'jweolh', 'jweom', 'jweob', 'jweobs', 'jweos', 'jweoss', 'jweong', 'jweoj', 'jweoc', 'jweok', 'jweot', 'jweop', 'jweoh', 'jwe', 'jweg', 'jwegg', 'jwegs', 'jwen', 'jwenj', 'jwenh', 'jwed', 'jwel', 'jwelg', 'jwelm', 'jwelb', 'jwels', 'jwelt', 'jwelp', 'jwelh', 'jwem', 'jweb', 'jwebs', 'jwes', 'jwess', 'jweng', 'jwej', 'jwec', 'jwek', 'jwet', 'jwep', 'jweh', 'jwi', 'jwig', 'jwigg', 'jwigs', 'jwin', 'jwinj', 'jwinh', 'jwid', 'jwil', 'jwilg', 'jwilm', 'jwilb', 'jwils', 'jwilt', 'jwilp', 'jwilh', 'jwim', 'jwib', 'jwibs', 'jwis', 'jwiss', 'jwing', 'jwij', 'jwic', 'jwik', 'jwit', 'jwip', 'jwih', 'jyu', 'jyug', 'jyugg', 'jyugs', 'jyun', 'jyunj', 'jyunh', 'jyud', 'jyul', 'jyulg', 'jyulm', 'jyulb', 'jyuls', 'jyult', 'jyulp', 'jyulh', 'jyum', 'jyub', 'jyubs', 'jyus', 'jyuss', 'jyung', 'jyuj', 'jyuc', 'jyuk', 'jyut', 'jyup', 'jyuh', 'jeu', 'jeug', 'jeugg', 'jeugs', 'jeun', 'jeunj', 'jeunh', 'jeud', 'jeul', 'jeulg', 'jeulm', 'jeulb', 'jeuls', 'jeult', 'jeulp', 'jeulh', 'jeum', 'jeub', 'jeubs', 'jeus', 'jeuss', 'jeung', 'jeuj', 'jeuc', 'jeuk', 'jeut', 'jeup', 'jeuh', 'jyi', 'jyig', 'jyigg', 'jyigs', 'jyin', 'jyinj', 'jyinh', 'jyid', 'jyil', 'jyilg', 'jyilm', 'jyilb', 'jyils', 'jyilt', 'jyilp', 'jyilh', 'jyim', 'jyib', 'jyibs', 'jyis', 'jyiss', 'jying', 'jyij', 'jyic', 'jyik', 'jyit', 'jyip', 'jyih', 'ji', 'jig', 'jigg', 'jigs', 'jin', 'jinj', 'jinh', 'jid', 'jil', 'jilg', 'jilm', 'jilb', 'jils', 'jilt', 'jilp', 'jilh', 'jim', 'jib', 'jibs', 'jis', 'jiss', 'jing', 'jij', 'jic', 'jik', 'jit', 'jip', 'jih', 'jja', 'jjag', 'jjagg', 'jjags', 'jjan', 'jjanj', 'jjanh', 'jjad', 'jjal', 'jjalg', 'jjalm', 'jjalb', 'jjals', 'jjalt', 'jjalp', 'jjalh', 'jjam', 'jjab', 'jjabs', 'jjas', 'jjass', 'jjang', 'jjaj', 'jjac', 'jjak', 'jjat', 'jjap', 'jjah', 'jjae', 'jjaeg', 'jjaegg', 'jjaegs', 'jjaen', 'jjaenj', 'jjaenh', 'jjaed') |
def calculate_area(side_length=10):
print(f"The area of a square with sides of length {side_length} is {side_length**2}.")
length=int(input("Enter side length: "))
if length<=0:
calculate_area(10)
else:
calculate_area(length) | def calculate_area(side_length=10):
print(f'The area of a square with sides of length {side_length} is {side_length ** 2}.')
length = int(input('Enter side length: '))
if length <= 0:
calculate_area(10)
else:
calculate_area(length) |
def find_max(num1, num2):
max_num=-1
if num2> num1:
data = range(num1,num2+1)
main_list = []
for x in data:
b = str(x)
if x < 0:
b = str(x*-1)
sx = list(map(int,list(b)))
if len(sx)==2 and sum(sx)%3==0 and x%5==0:
main_list.append(x)
if len(main_list)!=0:
return max(main_list)
return max_num
#Provide different values for num1 and num2 and test your program.
max_num=find_max(10,100)
print(max_num) | def find_max(num1, num2):
max_num = -1
if num2 > num1:
data = range(num1, num2 + 1)
main_list = []
for x in data:
b = str(x)
if x < 0:
b = str(x * -1)
sx = list(map(int, list(b)))
if len(sx) == 2 and sum(sx) % 3 == 0 and (x % 5 == 0):
main_list.append(x)
if len(main_list) != 0:
return max(main_list)
return max_num
max_num = find_max(10, 100)
print(max_num) |
def calc():
numOne = int(input("What is the first number of your problem?"))
numTwo = int(input("What is the second number of your problem?"))
numThree = input("What type of Math Problem is it, Addition, Subtraction, Multiplication, Division, Remainder, or Exponents? Type exactly.")
if numThree == 'Addition':
print(numOne + numTwo)
elif numThree == 'Subtraction':
print(numOne - numTwo)
elif numThree == 'Multiplication':
print(numOne * numTwo)
elif numThree == 'Division':
print(numOne / numThree)
elif numThree == 'Remainder':
print(numOne % numTwo)
elif numThree == 'Exponents':
print(numOne ** numTwo)
else:
print("Not acceptable format. Restart program and run again.")
while True:
calc()
| def calc():
num_one = int(input('What is the first number of your problem?'))
num_two = int(input('What is the second number of your problem?'))
num_three = input('What type of Math Problem is it, Addition, Subtraction, Multiplication, Division, Remainder, or Exponents? Type exactly.')
if numThree == 'Addition':
print(numOne + numTwo)
elif numThree == 'Subtraction':
print(numOne - numTwo)
elif numThree == 'Multiplication':
print(numOne * numTwo)
elif numThree == 'Division':
print(numOne / numThree)
elif numThree == 'Remainder':
print(numOne % numTwo)
elif numThree == 'Exponents':
print(numOne ** numTwo)
else:
print('Not acceptable format. Restart program and run again.')
while True:
calc() |
class Solution:
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
primes, indices = [2, 3, 5], [0, 0, 0]
ugly_numbers = [1]
for _ in range(n):
next_numbers = list(map(lambda x: x[0] * x[1], zip(primes, map(lambda x: ugly_numbers[x], indices))))
min_num = min(next_numbers)
for index in range(len(indices)):
if next_numbers[index] == min_num:
indices[index] += 1
ugly_numbers.append(min_num)
return ugly_numbers[n - 1]
if __name__ == "__main__":
print(Solution().nthUglyNumber(10))
print(Solution().nthUglyNumber(11))
| class Solution:
def nth_ugly_number(self, n):
"""
:type n: int
:rtype: int
"""
(primes, indices) = ([2, 3, 5], [0, 0, 0])
ugly_numbers = [1]
for _ in range(n):
next_numbers = list(map(lambda x: x[0] * x[1], zip(primes, map(lambda x: ugly_numbers[x], indices))))
min_num = min(next_numbers)
for index in range(len(indices)):
if next_numbers[index] == min_num:
indices[index] += 1
ugly_numbers.append(min_num)
return ugly_numbers[n - 1]
if __name__ == '__main__':
print(solution().nthUglyNumber(10))
print(solution().nthUglyNumber(11)) |
"""Codewars problem to find even index."""
def find_even_index(arr):
"""Return the index where sum of both sides are equal."""
if len(arr) == 0:
return 0
for i in range(0, len(arr)):
sum1 = 0
sum2 = 0
for j in range(0, i):
sum1 += arr[j]
for k in range(i + 1, len(arr)):
sum2 += arr[k]
if sum1 == sum2:
return i
return -1
| """Codewars problem to find even index."""
def find_even_index(arr):
"""Return the index where sum of both sides are equal."""
if len(arr) == 0:
return 0
for i in range(0, len(arr)):
sum1 = 0
sum2 = 0
for j in range(0, i):
sum1 += arr[j]
for k in range(i + 1, len(arr)):
sum2 += arr[k]
if sum1 == sum2:
return i
return -1 |
def read_pwscf_in(filepath):
"""
Note: read parameters from pwscf input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
control = {}
system = {}
electrons = {}
ions = {}
cell = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&control":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
contorl[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
control[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&system":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
system[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
system[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&electrons":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
electrons[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
electrons[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&ions":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
ions[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
ions[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&cell":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
cell[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
cell[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return control, system, electrons, ions, cell
def read_neb_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
path = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&path":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &PATH variable
path[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
path[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return path
def read_ph_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
ph = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&inputph":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &INPUTPH variable
ph[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
ph[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return ph | def read_pwscf_in(filepath):
"""
Note: read parameters from pwscf input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
control = {}
system = {}
electrons = {}
ions = {}
cell = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == '&control':
j = 1
while lines[i + j].split()[0] != '/':
if len(lines[i + j].split()) == 0:
pass
if len(lines[i + j].split('\n')[0].split('#')[0].split('=')) == 2:
contorl[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()[0]
else:
control[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()
j += 1
if lines[i].split()[0].lower() == '&system':
j = 1
while lines[i + j].split()[0] != '/':
if len(lines[i + j].split()) == 0:
pass
if len(lines[i + j].split('\n')[0].split('#')[0].split('=')) == 2:
system[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()[0]
else:
system[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()
j += 1
if lines[i].split()[0].lower() == '&electrons':
j = 1
while lines[i + j].split()[0] != '/':
if len(lines[i + j].split()) == 0:
pass
if len(lines[i + j].split('\n')[0].split('#')[0].split('=')) == 2:
electrons[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()[0]
else:
electrons[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()
j += 1
if lines[i].split()[0].lower() == '&ions':
j = 1
while lines[i + j].split()[0] != '/':
if len(lines[i + j].split()) == 0:
pass
if len(lines[i + j].split('\n')[0].split('#')[0].split('=')) == 2:
ions[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()[0]
else:
ions[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()
j += 1
if lines[i].split()[0].lower() == '&cell':
j = 1
while lines[i + j].split()[0] != '/':
if len(lines[i + j].split()) == 0:
pass
if len(lines[i + j].split('\n')[0].split('#')[0].split('=')) == 2:
cell[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()[0]
else:
cell[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()
j += 1
return (control, system, electrons, ions, cell)
def read_neb_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
path = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == '&path':
j = 1
while lines[i + j].split()[0] != '/':
if len(lines[i + j].split()) == 0:
pass
if len(lines[i + j].split('\n')[0].split('#')[0].split('=')) == 2:
path[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()[0]
else:
path[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()
j += 1
return path
def read_ph_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
ph = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == '&inputph':
j = 1
while lines[i + j].split()[0] != '/':
if len(lines[i + j].split()) == 0:
pass
if len(lines[i + j].split('\n')[0].split('#')[0].split('=')) == 2:
ph[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()[0]
else:
ph[lines[i + j].split('=')[0].split()[0]] = lines[i + j].split('\n')[0].split('#')[0].split('=')[1].split()
j += 1
return ph |
x_min = -2
y_min = (-(modelparams['weights'][0] * x_min) / modelparams['weights'][1] -
(modelparams['bias'][0] / model_params['weights'][1]))
x_max = 2
y_max = (-(modelparams['weights'][0] * x_max) / modelparams['weights'][1] -
(modelparams['bias'][0] / modelparams['weights'][1]))
fig, ax = plt.subplots(1, 2, sharex=True, figsize=(7, 3))
ax[0].plot([x_min, x_max], [y_min, y_max])
ax[1].plot([x_min, x_max], [y_min, y_max])
ax[0].scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1],
label='class 0', marker='o')
ax[0].scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1],
label='class 1', marker='s')
ax[1].scatter(X_test[y_test == 0, 0], X_test[y_test == 0, 1],
label='class 0', marker='o')
ax[1].scatter(X_test[y_test == 1, 0], X_test[y_test == 1, 1],
label='class 1', marker='s')
ax[1].legend(loc='upper left')
plt.show()
# The TensorFlow model performs better on the test set just by random chance.
# Remember, the perceptron algorithm stops learning as soon as it classifies
# the training set perfectly.
# Possible explanations why there is a difference between the NumPy and
# TensorFlow outcomes could thus be numerical precision, or slight differences
# in our implementation.
| x_min = -2
y_min = -(modelparams['weights'][0] * x_min) / modelparams['weights'][1] - modelparams['bias'][0] / model_params['weights'][1]
x_max = 2
y_max = -(modelparams['weights'][0] * x_max) / modelparams['weights'][1] - modelparams['bias'][0] / modelparams['weights'][1]
(fig, ax) = plt.subplots(1, 2, sharex=True, figsize=(7, 3))
ax[0].plot([x_min, x_max], [y_min, y_max])
ax[1].plot([x_min, x_max], [y_min, y_max])
ax[0].scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], label='class 0', marker='o')
ax[0].scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], label='class 1', marker='s')
ax[1].scatter(X_test[y_test == 0, 0], X_test[y_test == 0, 1], label='class 0', marker='o')
ax[1].scatter(X_test[y_test == 1, 0], X_test[y_test == 1, 1], label='class 1', marker='s')
ax[1].legend(loc='upper left')
plt.show() |
for i in range(0, 201, 2):
print(i)
for i in range(0, 100, 3):
print(i)
| for i in range(0, 201, 2):
print(i)
for i in range(0, 100, 3):
print(i) |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
r"""A defined list of constants available to QuickRelease users.
The are some important difference between QuickRelease's L{config items<quickrelease.config>} and C{constants}:
1. C{constants} may be accessed without a L{ConfigSpec<quickrelease.config.ConfigSpec>} reference. This makes them useful in places where it may be difficult to obtain such a reference.
2. C{constants} can be overriden by the environment. This can be useful, but should be used sparingly, since the override is not yet logged anywhere. It's mostly intended to redefine paths to executables in different situations. For instance, if you have a debug version of the C{unzip} utility that you would like to have a L{Process<quickrelease.process.Process>} use. You can set the C{UNZIP} environment variable, and if your process is using a constant, it will be picked up. (This is similar to L{ConfigSpec<quickrelease.config.ConfigSpec>}'s overrides, but cannot currently be disabled.)
3. C{constant} can return complex Python types (lists, dictionaries, etc.)
"""
QUICKRELEASE_CONSTANTS = {
'ANT' : 'ant',
# This has the _PROG suffix because bzip2 uses the BZIP/BZIP2 env
# variables as another way to read its arguments (!!?)
'BZIP_PROG': 'bzip2',
'GIT' : 'git',
'GPG' : 'gpg',
'JAR' : 'jar',
'MAKE' : 'make',
'MD5SUM' : 'md5sum',
'MV' : 'mv',
'PERL' : 'perl',
'S3CURL' : 's3curl.pl',
'SVN' : 'svn',
'RSYNC' : 'rsync',
'TAR' : 'tar',
'UNZIP' : 'unzip',
'WGET' : 'wget',
'ZIP' : 'zip',
'BUILD_PLATFORMS_MAP': { 'Windows-i686': 'win32',
'Windows-AMD64': 'win64',
'Darwin-i686': 'mac',
'Darwin-x86_64': 'mac',
'Linux-i686': 'linux',
'Linux-x86_64': 'linux-x64',
},
'BUILD_PLATFORM_EXTENSIONS': { 'win32': 'exe',
'mac': 'dmg',
'linux': 'tar.gz',
'linux-x64': 'tar.gz',
},
# in seconds, so five minutes
'RUN_SHELL_COMMAND_DEFAULT_TIMEOUT': 60 * 5,
# A way to increase the default via the environment for instances
# where you're running in slow(er) environments, e.g. virtualization
'RUN_SHELL_COMMAND_TIMEOUT_FACTOR': 1,
# Number of output lines, by default, for quickrelease.command to store
# in memory before dumping to a file backing-store.
'RUN_SHELL_COMMAND_IN_MEM_LINES': 20000,
# in seconds, so 10 mintues.
'S3_PUSH_TIMEOUT': 60 * 10,
'S3_MIME_TYPES': { 'asc' : 'text/plain',
'bz2' : 'application/x-bzip2',
'dmg' : 'application/x-apple-diskimage',
'exe' : 'application/octet-stream',
'mar' : 'application/octet-stream',
'md5' : 'text/plain',
'tar.gz' : 'application/x-gzip',
'txt': 'text/plain',
'zip': 'application/zip',
},
}
"""
Various constants that can be useful for QuickRelease L{Process<quickrelease.process.Process>}es.
"""
QUICKRELEASE_CONSTANTS['BUILD_PLATFORMS'] = QUICKRELEASE_CONSTANTS['BUILD_PLATFORMS_MAP'].values()
CONSTANTS_FROM_ENV_HANDLERS = {
'BUILD_PLATFORMS': lambda val: tuple(val.split()),
'RUN_SHELL_COMMAND_DEFAULT_TIMEOUT': lambda val: int(val),
'RUN_SHELL_COMMAND_TIMEOUT_FACTOR': lambda val: int(val),
'RUN_SHELL_COMMAND_IN_MEM_LINES': lambda val: int(val),
'S3_PUSH_TIMEOUT': lambda val: int(val),
'BUILD_PLATFORM_EXTENSIONS': lambda val: NotImplementedError("Need to turn BUILD_PLATFORM_EXTENSIONS overloads into a dict!"),
'S3_MIME_TYPES': lambda val: NotImplementedError("Need to turn S3_MIME_TYPES overloads into a dict!"),
}
"""A dictionary of named constants -> handlers to convert an environment
variable string into the expected Python type. The type should match
what the named constant in L{QUICKRELEASE_CONSTANTS<quickrelease.constants.QUICKRELEASE_CONSTANTS>} returns.
"""
#
# Application-related constants; probably not a good idea to change these
#
_PIPE_STDOUT = 1
_PIPE_STDERR = 2
| """A defined list of constants available to QuickRelease users.
The are some important difference between QuickRelease's L{config items<quickrelease.config>} and C{constants}:
1. C{constants} may be accessed without a L{ConfigSpec<quickrelease.config.ConfigSpec>} reference. This makes them useful in places where it may be difficult to obtain such a reference.
2. C{constants} can be overriden by the environment. This can be useful, but should be used sparingly, since the override is not yet logged anywhere. It's mostly intended to redefine paths to executables in different situations. For instance, if you have a debug version of the C{unzip} utility that you would like to have a L{Process<quickrelease.process.Process>} use. You can set the C{UNZIP} environment variable, and if your process is using a constant, it will be picked up. (This is similar to L{ConfigSpec<quickrelease.config.ConfigSpec>}'s overrides, but cannot currently be disabled.)
3. C{constant} can return complex Python types (lists, dictionaries, etc.)
"""
quickrelease_constants = {'ANT': 'ant', 'BZIP_PROG': 'bzip2', 'GIT': 'git', 'GPG': 'gpg', 'JAR': 'jar', 'MAKE': 'make', 'MD5SUM': 'md5sum', 'MV': 'mv', 'PERL': 'perl', 'S3CURL': 's3curl.pl', 'SVN': 'svn', 'RSYNC': 'rsync', 'TAR': 'tar', 'UNZIP': 'unzip', 'WGET': 'wget', 'ZIP': 'zip', 'BUILD_PLATFORMS_MAP': {'Windows-i686': 'win32', 'Windows-AMD64': 'win64', 'Darwin-i686': 'mac', 'Darwin-x86_64': 'mac', 'Linux-i686': 'linux', 'Linux-x86_64': 'linux-x64'}, 'BUILD_PLATFORM_EXTENSIONS': {'win32': 'exe', 'mac': 'dmg', 'linux': 'tar.gz', 'linux-x64': 'tar.gz'}, 'RUN_SHELL_COMMAND_DEFAULT_TIMEOUT': 60 * 5, 'RUN_SHELL_COMMAND_TIMEOUT_FACTOR': 1, 'RUN_SHELL_COMMAND_IN_MEM_LINES': 20000, 'S3_PUSH_TIMEOUT': 60 * 10, 'S3_MIME_TYPES': {'asc': 'text/plain', 'bz2': 'application/x-bzip2', 'dmg': 'application/x-apple-diskimage', 'exe': 'application/octet-stream', 'mar': 'application/octet-stream', 'md5': 'text/plain', 'tar.gz': 'application/x-gzip', 'txt': 'text/plain', 'zip': 'application/zip'}}
'\nVarious constants that can be useful for QuickRelease L{Process<quickrelease.process.Process>}es.\n'
QUICKRELEASE_CONSTANTS['BUILD_PLATFORMS'] = QUICKRELEASE_CONSTANTS['BUILD_PLATFORMS_MAP'].values()
constants_from_env_handlers = {'BUILD_PLATFORMS': lambda val: tuple(val.split()), 'RUN_SHELL_COMMAND_DEFAULT_TIMEOUT': lambda val: int(val), 'RUN_SHELL_COMMAND_TIMEOUT_FACTOR': lambda val: int(val), 'RUN_SHELL_COMMAND_IN_MEM_LINES': lambda val: int(val), 'S3_PUSH_TIMEOUT': lambda val: int(val), 'BUILD_PLATFORM_EXTENSIONS': lambda val: not_implemented_error('Need to turn BUILD_PLATFORM_EXTENSIONS overloads into a dict!'), 'S3_MIME_TYPES': lambda val: not_implemented_error('Need to turn S3_MIME_TYPES overloads into a dict!')}
'A dictionary of named constants -> handlers to convert an environment \nvariable string into the expected Python type. The type should match\nwhat the named constant in L{QUICKRELEASE_CONSTANTS<quickrelease.constants.QUICKRELEASE_CONSTANTS>} returns.\n'
_pipe_stdout = 1
_pipe_stderr = 2 |
def reverse_number(n: int) -> int:
""" This function takes in input 'n' and returns 'n' with all digits reversed. """
if len(str(n)) == 1:
return n
k = abs(n)
reversed_n = []
while k != 0:
i = k % 10
reversed_n.append(i)
k = (k - i) // 10
return int(''.join(map(str, reversed_n))) if n > 0 else -int(''.join(map(str, reversed_n)))
| def reverse_number(n: int) -> int:
""" This function takes in input 'n' and returns 'n' with all digits reversed. """
if len(str(n)) == 1:
return n
k = abs(n)
reversed_n = []
while k != 0:
i = k % 10
reversed_n.append(i)
k = (k - i) // 10
return int(''.join(map(str, reversed_n))) if n > 0 else -int(''.join(map(str, reversed_n))) |
# This is all about using strings
stg_1 = "this is the first message without a tab"
print(stg_1)
stg_2 = "\t this is the second message with a tab"
print(stg_2)
stg_3 = "this is another message with a newline\n"
print(stg_3)
| stg_1 = 'this is the first message without a tab'
print(stg_1)
stg_2 = '\t this is the second message with a tab'
print(stg_2)
stg_3 = 'this is another message with a newline\n'
print(stg_3) |
#!/usr/bin/python
# unicode.py
text = u'\u041b\u0435\u0432 \u041d\u0438\u043a\u043e\u043b\u0430\
\u0435\u0432\u0438\u0447 \u0422\u043e\u043b\u0441\u0442\u043e\u0439: \n\
\u0410\u043d\u043d\u0430 \u041a\u0430\u0440\u0435\u043d\u0438\u043d\u0430'
print (text)
| text = u'Лев Николаевич Толстой: \nАнна Каренина'
print(text) |
#to have some interaction
#we need an loop to look for actions
def setup():
size(400, 400)
#executed once
println("This is the setup. Executed once. Initiate things here")
#executed all the time waiting for infos
def draw():
#do the bakcground color transformation
noStroke()
fill(map(mouseX, width, 0, 0, width), 100)
rect(0, 0, width, height)
#do the circle color transformation
fill(mouseX)
#draw an ellipse
ellipse(mouseX, mouseY, 10, 10)
#print some infos
println("Frame number: " + frameCount)
print("mouse x: " + mouseX )
println(" mouse y: " + mouseY )
| def setup():
size(400, 400)
println('This is the setup. Executed once. Initiate things here')
def draw():
no_stroke()
fill(map(mouseX, width, 0, 0, width), 100)
rect(0, 0, width, height)
fill(mouseX)
ellipse(mouseX, mouseY, 10, 10)
println('Frame number: ' + frameCount)
print('mouse x: ' + mouseX)
println(' mouse y: ' + mouseY) |
##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
options = (
("documentation_folder", {
"type": "string",
"default": None,
"help": "the folder containing the documentation of the project.",
"group": "piws",
"level": 1,
}),
("show_user_status", {
"type": "yn",
"default": True,
"help": "Show or not the user status link on the website.",
"group": "piws",
"level": 1,
}),
("ldap_groups_dn", {
"type": "string",
"default": None,
"help": "LDAP groups dn for LDAP groups synchronisation in CW <= 3.20,"
"otherwise not required.",
"group": "piws",
"level": 1,
}),
('apache-cleanup-session-time',
{'type': 'time',
'default': None,
'help': ('Duration of inactivity after which an apache de-authentication'
'will be triggered'),
'group': 'piws',
'level': 1,
}),
('deauth-redirect-url',
{'type': 'string',
'default': None,
'help': 'Redirection url after apache deauthentication occured.',
'group': 'piws',
'level': 1,
}),
("enable-cwusers-watcher", {
"type": "string",
"default": 'no',
"help": ("If 'yes', an email is sent (this email address has to be "
"set in the [MAIL] all-in-one section) when a CW user is "
"created or deleted."),
"group": "piws",
"level": 1,
}),
('enable-apache-logout',
{'type': 'yn',
'default': False,
'help': 'Enable Apache logout',
'group': 'piws',
'level': 1,
}),
('logo',
{'type': 'string',
'default': 'images/nsap.png',
'help': 'Navigation bar logo',
'group': 'piws',
'level': 1,
}),
('enable-upload',
{'type' : 'yn',
'default': False,
'help': ('If true enable the upload, ie relax security on user and '
'group entities. The database must be regenerated if this '
'option is modified.'),
'group': 'piws',
'level': 1,
}),
('authorized-upload-groups',
{'type': 'csv',
'default': 'users',
'help': 'A list of groups that will be able to upload data.',
'group': 'piws',
'level': 1,
}),
('share_group_uploads',
{'type': 'yn',
'default': False,
'help': 'If true, share uploads between the memebers of a group.',
'group': 'piws',
'level': 1,
}),
("metagen_url", {
"type": "string",
"default": None,
"help": "the URL to the metagen bioresource.",
"group": "piws",
"level": 1,
}),
("allow-inline-relations", {
"type": "yn",
"default": True,
"help": ("if False remove inline relations from the schema: inline "
"relations are not compatible with the massive store."),
"group": "piws",
"level": 1,
}),
)
| options = (('documentation_folder', {'type': 'string', 'default': None, 'help': 'the folder containing the documentation of the project.', 'group': 'piws', 'level': 1}), ('show_user_status', {'type': 'yn', 'default': True, 'help': 'Show or not the user status link on the website.', 'group': 'piws', 'level': 1}), ('ldap_groups_dn', {'type': 'string', 'default': None, 'help': 'LDAP groups dn for LDAP groups synchronisation in CW <= 3.20,otherwise not required.', 'group': 'piws', 'level': 1}), ('apache-cleanup-session-time', {'type': 'time', 'default': None, 'help': 'Duration of inactivity after which an apache de-authenticationwill be triggered', 'group': 'piws', 'level': 1}), ('deauth-redirect-url', {'type': 'string', 'default': None, 'help': 'Redirection url after apache deauthentication occured.', 'group': 'piws', 'level': 1}), ('enable-cwusers-watcher', {'type': 'string', 'default': 'no', 'help': "If 'yes', an email is sent (this email address has to be set in the [MAIL] all-in-one section) when a CW user is created or deleted.", 'group': 'piws', 'level': 1}), ('enable-apache-logout', {'type': 'yn', 'default': False, 'help': 'Enable Apache logout', 'group': 'piws', 'level': 1}), ('logo', {'type': 'string', 'default': 'images/nsap.png', 'help': 'Navigation bar logo', 'group': 'piws', 'level': 1}), ('enable-upload', {'type': 'yn', 'default': False, 'help': 'If true enable the upload, ie relax security on user and group entities. The database must be regenerated if this option is modified.', 'group': 'piws', 'level': 1}), ('authorized-upload-groups', {'type': 'csv', 'default': 'users', 'help': 'A list of groups that will be able to upload data.', 'group': 'piws', 'level': 1}), ('share_group_uploads', {'type': 'yn', 'default': False, 'help': 'If true, share uploads between the memebers of a group.', 'group': 'piws', 'level': 1}), ('metagen_url', {'type': 'string', 'default': None, 'help': 'the URL to the metagen bioresource.', 'group': 'piws', 'level': 1}), ('allow-inline-relations', {'type': 'yn', 'default': True, 'help': 'if False remove inline relations from the schema: inline relations are not compatible with the massive store.', 'group': 'piws', 'level': 1})) |
class Command:
def __init__(self, name, desc="", args=[]):
self.name = name
self.desc = desc
self.args = args
| class Command:
def __init__(self, name, desc='', args=[]):
self.name = name
self.desc = desc
self.args = args |
"""
Return nth catalan number.
Recursive Formula of Catalan Numbers says:
C of (n+1) = summation of C of i* C of n-i, for range i=0 to i=n
Therefore, for C of n formula becomes
C of (n) = summation of C of i* C of n-1-i, for range i=0 to i=n-1
"""
def getCatalan(n,dp_arr):
# Lookup
if (dp_arr[n] is not None):
return dp_arr[n]
#Base Case
if (n==0):
return 1
#Rec Case
Cn = 0
for i in range(0,n):
Cn = Cn + ( getCatalan(i, dp_arr) * getCatalan(n-1-i, dp_arr) )
dp_arr[n] = Cn
return Cn
#Driver Code
if __name__ == '__main__':
dp_arr : list = [None] * 100
n = 5
returned = getCatalan(n, dp_arr)
print(returned)
| """
Return nth catalan number.
Recursive Formula of Catalan Numbers says:
C of (n+1) = summation of C of i* C of n-i, for range i=0 to i=n
Therefore, for C of n formula becomes
C of (n) = summation of C of i* C of n-1-i, for range i=0 to i=n-1
"""
def get_catalan(n, dp_arr):
if dp_arr[n] is not None:
return dp_arr[n]
if n == 0:
return 1
cn = 0
for i in range(0, n):
cn = Cn + get_catalan(i, dp_arr) * get_catalan(n - 1 - i, dp_arr)
dp_arr[n] = Cn
return Cn
if __name__ == '__main__':
dp_arr: list = [None] * 100
n = 5
returned = get_catalan(n, dp_arr)
print(returned) |
class SSLUnavailable(Exception):
"""If you haven't verified a CNAME zone within the grace period (a week),
it can't be verified any more.
"""
pass
class CustomHostnameNotFound(Exception):
pass
| class Sslunavailable(Exception):
"""If you haven't verified a CNAME zone within the grace period (a week),
it can't be verified any more.
"""
pass
class Customhostnamenotfound(Exception):
pass |
n1,n2=map(int,input().split())
a=[]
for i in range(n2):
a.append(list(map(float,input().split())))
for i in zip(*a):
print(sum(i)/n2) | (n1, n2) = map(int, input().split())
a = []
for i in range(n2):
a.append(list(map(float, input().split())))
for i in zip(*a):
print(sum(i) / n2) |
def read_txt_file_str(filename):
f=open('text_files/'+filename, "r")
contents=f.read()
f.close()
return contents
def read_txt_file_list(filename):
f=open('text_files/'+filename, "r")
contents=f.readlines()
f.close()
return contents | def read_txt_file_str(filename):
f = open('text_files/' + filename, 'r')
contents = f.read()
f.close()
return contents
def read_txt_file_list(filename):
f = open('text_files/' + filename, 'r')
contents = f.readlines()
f.close()
return contents |
# Author: Jocelino F.G.
n = int(input())
vetor = [n]
dobro = n
for i in range(0, 10):
dobro = dobro * 2
vetor.append(dobro)
print("N[{}] = {}".format(i, vetor[i]))
| n = int(input())
vetor = [n]
dobro = n
for i in range(0, 10):
dobro = dobro * 2
vetor.append(dobro)
print('N[{}] = {}'.format(i, vetor[i])) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rule_engine/errors.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
class _UNDEFINED(object):
def __bool__(self):
return False
__name__ = 'UNDEFINED'
__nonzero__ = __bool__
def __repr__(self):
return self.__name__
UNDEFINED = _UNDEFINED()
"""
A sentinel value to specify that something is undefined. When evaluated, the
value is falsy.
.. versionadded:: 2.0.0
"""
class EngineError(Exception):
"""
The base exception class from which other exceptions within this package
inherit.
"""
def __init__(self, message=''):
"""
:param str message: A text description of what error occurred.
"""
self.message = message
"""A text description of what error occurred."""
class EvaluationError(EngineError):
"""
An error raised for issues which occur while the rule is being evaluated.
This can occur at parse time while AST nodes are being evaluated during
the reduction phase.
"""
pass
class SyntaxError(EngineError):
"""A base error for syntax related issues."""
class DatetimeSyntaxError(SyntaxError):
"""
An error raised for issues regarding the use of improperly formatted
datetime expressions.
"""
def __init__(self, message, value):
"""
:param str message: A text description of what error occurred.
:param str value: The datetime value which contains the syntax error which caused this exception to be raised.
"""
super(DatetimeSyntaxError, self).__init__(message)
self.value = value
"""
The datetime value which contains the syntax error which caused this
exception to be raised.
"""
class RuleSyntaxError(SyntaxError):
"""
An error raised for issues identified in while parsing the grammar of the
rule text.
"""
def __init__(self, message, token=None):
"""
:param str message: A text description of what error occurred.
:param token: The PLY token (if available) which is related to the syntax error.
"""
if token is None:
position = 'EOF'
else:
position = "line {0}:{1}".format(token.lineno, token.lexpos)
message = message + ' at: ' + position
super(RuleSyntaxError, self).__init__(message)
self.token = token
"""The PLY token (if available) which is related to the syntax error."""
class RegexSyntaxError(SyntaxError):
"""
An error raised for issues regarding the use of improper regular expression
syntax.
"""
def __init__(self, message, error, value):
"""
:param str message: A text description of what error occurred.
:param error: The :py:exc:`re.error` exception from which this error was triggered.
:type error: :py:exc:`re.error`
:param str value: The regular expression value which contains the syntax error which caused this exception to be raised.
"""
super(RegexSyntaxError, self).__init__(message)
self.error = error
"""The :py:exc:`re.error` exception from which this error was triggered."""
self.value = value
"""
The regular expression value which contains the syntax error which
caused this exception to be raised.
"""
class AttributeResolutionError(EvaluationError):
"""
An error raised with an attribute can not be resolved to a value.
..versionadded:: 2.0.0
"""
def __init__(self, attribute_name, object_, thing=UNDEFINED):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_: The value that *attribute_name* was used as an attribute for.
:param thing: The root-object that was used to resolve *object*.
"""
self.attribute_name = attribute_name
"""The name of the symbol that can not be resolved."""
self.object = object_
"""The value that *attribute_name* was used as an attribute for."""
self.thing = thing
"""The root-object that was used to resolve *object*."""
super(AttributeResolutionError, self).__init__("unknown attribute: {0!r}".format(attribute_name))
class AttributeTypeError(EvaluationError):
"""
An error raised when an attribute with type information is resolved to a
Python value that is not of that type.
"""
def __init__(self, attribute_name, object_type, is_value, is_type, expected_type):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_type: The value that *attribute_name* was used as an attribute for.
:param is_value: The native Python value of the incompatible attribute.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this attribute.
"""
self.attribute_name = attribute_name
"""The name of the attribute that is of an incompatible type."""
self.object_type = object_type
"""The object on which the attribute was resolved."""
self.is_value = is_value
"""The native Python value of the incompatible attribute."""
self.is_type = is_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute."""
self.expected_type = expected_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this attribute."""
message = "attribute {0!r} resolved to incorrect datatype (is: {1}, expected: {2})".format(attribute_name, is_type.name, expected_type.name)
super(AttributeTypeError, self).__init__(message)
class SymbolResolutionError(EvaluationError):
"""
An error raised when a symbol name is not able to be resolved to a value.
"""
def __init__(self, symbol_name, symbol_scope=None, thing=UNDEFINED):
"""
:param str symbol_name: The name of the symbol that can not be resolved.
:param str symbol_scope: The scope of where the symbol should be valid for resolution.
:param thing: The root-object that was used to resolve the symbol.
.. versionchanged:: 2.0.0
Added the *thing* parameter.
"""
self.symbol_name = symbol_name
"""The name of the symbol that can not be resolved."""
self.symbol_scope = symbol_scope
"""The scope of where the symbol should be valid for resolution."""
self.thing = thing
"""The root-object that was used to resolve the symbol."""
super(SymbolResolutionError, self).__init__("unknown symbol: {0!r}".format(symbol_name))
class SymbolTypeError(EvaluationError):
"""
An error raised when a symbol with type information is resolved to a Python
value that is not of that type.
"""
def __init__(self, symbol_name, is_value, is_type, expected_type):
"""
:param str symbol_name: The name of the symbol that is of an incompatible type.
:param is_value: The native Python value of the incompatible symbol.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this symbol.
"""
self.symbol_name = symbol_name
"""The name of the symbol that is of an incompatible type."""
self.is_value = is_value
"""The native Python value of the incompatible symbol."""
self.is_type = is_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol."""
self.expected_type = expected_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this symbol."""
message = "symbol {0!r} resolved to incorrect datatype (is: {1}, expected: {2})".format(symbol_name, is_type.name, expected_type.name)
super(SymbolTypeError, self).__init__(message)
| class _Undefined(object):
def __bool__(self):
return False
__name__ = 'UNDEFINED'
__nonzero__ = __bool__
def __repr__(self):
return self.__name__
undefined = _undefined()
'\nA sentinel value to specify that something is undefined. When evaluated, the\nvalue is falsy.\n\n.. versionadded:: 2.0.0\n'
class Engineerror(Exception):
"""
The base exception class from which other exceptions within this package
inherit.
"""
def __init__(self, message=''):
"""
:param str message: A text description of what error occurred.
"""
self.message = message
'A text description of what error occurred.'
class Evaluationerror(EngineError):
"""
An error raised for issues which occur while the rule is being evaluated.
This can occur at parse time while AST nodes are being evaluated during
the reduction phase.
"""
pass
class Syntaxerror(EngineError):
"""A base error for syntax related issues."""
class Datetimesyntaxerror(SyntaxError):
"""
An error raised for issues regarding the use of improperly formatted
datetime expressions.
"""
def __init__(self, message, value):
"""
:param str message: A text description of what error occurred.
:param str value: The datetime value which contains the syntax error which caused this exception to be raised.
"""
super(DatetimeSyntaxError, self).__init__(message)
self.value = value
'\n\t\tThe datetime value which contains the syntax error which caused this\n\t\texception to be raised.\n\t\t'
class Rulesyntaxerror(SyntaxError):
"""
An error raised for issues identified in while parsing the grammar of the
rule text.
"""
def __init__(self, message, token=None):
"""
:param str message: A text description of what error occurred.
:param token: The PLY token (if available) which is related to the syntax error.
"""
if token is None:
position = 'EOF'
else:
position = 'line {0}:{1}'.format(token.lineno, token.lexpos)
message = message + ' at: ' + position
super(RuleSyntaxError, self).__init__(message)
self.token = token
'The PLY token (if available) which is related to the syntax error.'
class Regexsyntaxerror(SyntaxError):
"""
An error raised for issues regarding the use of improper regular expression
syntax.
"""
def __init__(self, message, error, value):
"""
:param str message: A text description of what error occurred.
:param error: The :py:exc:`re.error` exception from which this error was triggered.
:type error: :py:exc:`re.error`
:param str value: The regular expression value which contains the syntax error which caused this exception to be raised.
"""
super(RegexSyntaxError, self).__init__(message)
self.error = error
'The :py:exc:`re.error` exception from which this error was triggered.'
self.value = value
'\n\t\tThe regular expression value which contains the syntax error which\n\t\tcaused this exception to be raised.\n\t\t'
class Attributeresolutionerror(EvaluationError):
"""
An error raised with an attribute can not be resolved to a value.
..versionadded:: 2.0.0
"""
def __init__(self, attribute_name, object_, thing=UNDEFINED):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_: The value that *attribute_name* was used as an attribute for.
:param thing: The root-object that was used to resolve *object*.
"""
self.attribute_name = attribute_name
'The name of the symbol that can not be resolved.'
self.object = object_
'The value that *attribute_name* was used as an attribute for.'
self.thing = thing
'The root-object that was used to resolve *object*.'
super(AttributeResolutionError, self).__init__('unknown attribute: {0!r}'.format(attribute_name))
class Attributetypeerror(EvaluationError):
"""
An error raised when an attribute with type information is resolved to a
Python value that is not of that type.
"""
def __init__(self, attribute_name, object_type, is_value, is_type, expected_type):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_type: The value that *attribute_name* was used as an attribute for.
:param is_value: The native Python value of the incompatible attribute.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this attribute.
"""
self.attribute_name = attribute_name
'The name of the attribute that is of an incompatible type.'
self.object_type = object_type
'The object on which the attribute was resolved.'
self.is_value = is_value
'The native Python value of the incompatible attribute.'
self.is_type = is_type
'The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute.'
self.expected_type = expected_type
'The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this attribute.'
message = 'attribute {0!r} resolved to incorrect datatype (is: {1}, expected: {2})'.format(attribute_name, is_type.name, expected_type.name)
super(AttributeTypeError, self).__init__(message)
class Symbolresolutionerror(EvaluationError):
"""
An error raised when a symbol name is not able to be resolved to a value.
"""
def __init__(self, symbol_name, symbol_scope=None, thing=UNDEFINED):
"""
:param str symbol_name: The name of the symbol that can not be resolved.
:param str symbol_scope: The scope of where the symbol should be valid for resolution.
:param thing: The root-object that was used to resolve the symbol.
.. versionchanged:: 2.0.0
Added the *thing* parameter.
"""
self.symbol_name = symbol_name
'The name of the symbol that can not be resolved.'
self.symbol_scope = symbol_scope
'The scope of where the symbol should be valid for resolution.'
self.thing = thing
'The root-object that was used to resolve the symbol.'
super(SymbolResolutionError, self).__init__('unknown symbol: {0!r}'.format(symbol_name))
class Symboltypeerror(EvaluationError):
"""
An error raised when a symbol with type information is resolved to a Python
value that is not of that type.
"""
def __init__(self, symbol_name, is_value, is_type, expected_type):
"""
:param str symbol_name: The name of the symbol that is of an incompatible type.
:param is_value: The native Python value of the incompatible symbol.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this symbol.
"""
self.symbol_name = symbol_name
'The name of the symbol that is of an incompatible type.'
self.is_value = is_value
'The native Python value of the incompatible symbol.'
self.is_type = is_type
'The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol.'
self.expected_type = expected_type
'The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this symbol.'
message = 'symbol {0!r} resolved to incorrect datatype (is: {1}, expected: {2})'.format(symbol_name, is_type.name, expected_type.name)
super(SymbolTypeError, self).__init__(message) |
def is_even(number):
return number % 2 == 0
| def is_even(number):
return number % 2 == 0 |
class Solution(object):
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
if len(nums)*len(nums[0]) != r*c:
return nums
kek = []
nums = [item for sublist in nums for item in sublist]
k = 0
for i in range(r):
kek.append([])
for j in range(c):
kek[i].append(nums[i*c + j])
return kek
| class Solution(object):
def matrix_reshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
if len(nums) * len(nums[0]) != r * c:
return nums
kek = []
nums = [item for sublist in nums for item in sublist]
k = 0
for i in range(r):
kek.append([])
for j in range(c):
kek[i].append(nums[i * c + j])
return kek |
class InvalidOperationError(BaseException):
pass
class Node():
def __init__(self, value, next=None):
self.value = value
self.next = next
class Stack():
def __init__(self, node=None):
self.top = node
def __len__(self):
count = 0
curr = self.top
while curr:
count += 1
curr = curr.next
return count
def push(self, value):
node = Node(value)
node.next = self.top
self.top = node
def pop(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
else:
node = self.top.value
self.top = self.top.next
# node.next = None
return node
def peek(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
return self.top.value
def is_empty(self):
return self.top is None
class Queue():
def __init__(self):
self.front = None
self.rear = None
def enqueue(self, value):
node = Node(value)
if not self.front:
self.front, self.rear = node, node
else:
self.rear.next = node
self.rear = node
def dequeue(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
node = self.front
self.front = self.front.next
return node.value
def peek(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
return self.front.value
def is_empty(self):
if not self.front:
return True
| class Invalidoperationerror(BaseException):
pass
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
class Stack:
def __init__(self, node=None):
self.top = node
def __len__(self):
count = 0
curr = self.top
while curr:
count += 1
curr = curr.next
return count
def push(self, value):
node = node(value)
node.next = self.top
self.top = node
def pop(self):
if self.is_empty():
raise invalid_operation_error('Method not allowed on empty collection')
else:
node = self.top.value
self.top = self.top.next
return node
def peek(self):
if self.is_empty():
raise invalid_operation_error('Method not allowed on empty collection')
return self.top.value
def is_empty(self):
return self.top is None
class Queue:
def __init__(self):
self.front = None
self.rear = None
def enqueue(self, value):
node = node(value)
if not self.front:
(self.front, self.rear) = (node, node)
else:
self.rear.next = node
self.rear = node
def dequeue(self):
if self.is_empty():
raise invalid_operation_error('Method not allowed on empty collection')
node = self.front
self.front = self.front.next
return node.value
def peek(self):
if self.is_empty():
raise invalid_operation_error('Method not allowed on empty collection')
return self.front.value
def is_empty(self):
if not self.front:
return True |
"""
The file provides default secret parameters used as a reference for creating your
own secret.py or in testing.
Make sure to create your own secret.py (in the same folder) with appropriate values for when deploying
the website!
"""
SECRET_KEY = "2r4-$a^!rs=^glu=a8m=e5a$5*wg2uxjjob!diff-z*wzdx+4y"
"""
Set these if mysql is used as a database backend
"""
MYSQL_USERNAME = ""
MYSQL_PASSWORD = ""
"""
Set these if you're sending e-mail through Gmail using Google's API
"""
SECRET_GMAIL_API_CLIENT_ID = 'google_assigned_id'
SECRET_GMAIL_API_CLIENT_SECRET = 'google_assigned_secret'
SECRET_GMAIL_API_REFRESH_TOKEN = 'google_assigned_token'
"""
Set these if you're sending e-mail through SMTP
"""
SECRET_EMAIL_HOST_USER = 'username'
SECRET_EMAIL_HOST_PASSWORD = 'password'
| """
The file provides default secret parameters used as a reference for creating your
own secret.py or in testing.
Make sure to create your own secret.py (in the same folder) with appropriate values for when deploying
the website!
"""
secret_key = '2r4-$a^!rs=^glu=a8m=e5a$5*wg2uxjjob!diff-z*wzdx+4y'
'\nSet these if mysql is used as a database backend\n'
mysql_username = ''
mysql_password = ''
"\nSet these if you're sending e-mail through Gmail using Google's API\n"
secret_gmail_api_client_id = 'google_assigned_id'
secret_gmail_api_client_secret = 'google_assigned_secret'
secret_gmail_api_refresh_token = 'google_assigned_token'
"\nSet these if you're sending e-mail through SMTP\n"
secret_email_host_user = 'username'
secret_email_host_password = 'password' |
# Given
x = 10000.0
y = 3.0
print(x / y)
print(10000 / 3)
# What is happening?
# Given
print(x - 1 / y)
print((x - 1) / y)
# What is happening?
# Given
x = 'foo'
y = 'bar'
# Create 'foobar' using x and y
s = x + y
print(s)
# Create 'foo -> bar' using x and y
print(x + " -> " + y)
# Given
x = 'hello world'
# from x create 'HELLO WORLD'
print(x.upper())
# from x create 'hellX wXrld'
print(x.replace('o', 'X'))
# Given
x = 10000.0
y = 3.0
# print "10000 / 3 = 3333" using x and y
print("{x} / {y} = {z}".format(x=x, y=y, z=x/y))
# Given
s = ['hello', 'world']
# print 'helloworld'
print(s[0] + s[1])
# print 'hello world'
print(s[0] , s[1])
# print 'hello
print(s[0])
# world'
print(s[1])
# Given
x = "Monty Python and the Holy Grail"
# create the list ['Monty', 'Python', 'and', 'the', 'Holy', 'Grail']
print(x.split())
y = "one,two,three,four"
# create the list ['one', 'two', 'three', 'four'
print(y.split(','))
| x = 10000.0
y = 3.0
print(x / y)
print(10000 / 3)
print(x - 1 / y)
print((x - 1) / y)
x = 'foo'
y = 'bar'
s = x + y
print(s)
print(x + ' -> ' + y)
x = 'hello world'
print(x.upper())
print(x.replace('o', 'X'))
x = 10000.0
y = 3.0
print('{x} / {y} = {z}'.format(x=x, y=y, z=x / y))
s = ['hello', 'world']
print(s[0] + s[1])
print(s[0], s[1])
print(s[0])
print(s[1])
x = 'Monty Python and the Holy Grail'
print(x.split())
y = 'one,two,three,four'
print(y.split(',')) |
class Config:
def __init__(self):
self.data_dir = './data/'
self.data_path = self.data_dir + 'peot.txt'
self.pickle_path = self.data_dir + 'tang.npz'
self.load_path = './checkpoints/peot9.pt'
self.save_path = './checkpoints/peot9.pt'
self.do_train = False
self.do_test = False
self.do_predict = True
self.do_load_model = True
self.num_epoch = 40
self.batch_size = 128
self.lr = 1e-3
self.weight_decay = 1e-4
self.max_gen_len = 200
self.max_len = 125
self.embedding_dim = 300
self.hidden_dim = 256
| class Config:
def __init__(self):
self.data_dir = './data/'
self.data_path = self.data_dir + 'peot.txt'
self.pickle_path = self.data_dir + 'tang.npz'
self.load_path = './checkpoints/peot9.pt'
self.save_path = './checkpoints/peot9.pt'
self.do_train = False
self.do_test = False
self.do_predict = True
self.do_load_model = True
self.num_epoch = 40
self.batch_size = 128
self.lr = 0.001
self.weight_decay = 0.0001
self.max_gen_len = 200
self.max_len = 125
self.embedding_dim = 300
self.hidden_dim = 256 |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Indexer objects.
The choice to add the bug to the function rather than to the object was that
the indexer may be run on many bugs/items/etc so I didn't want the object to
become dependent on the bug it was manipulating.
"""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
class Error(Exception):
pass
class IndexerBase(object):
"""Indexer base class
Indexer are responsible creating search indices for bug from a specific
provider.
"""
def __init__(self):
pass
def Index(self, bug):
raise NotImplementedError
| """Base class for Indexer objects.
The choice to add the bug to the function rather than to the object was that
the indexer may be run on many bugs/items/etc so I didn't want the object to
become dependent on the bug it was manipulating.
"""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
class Error(Exception):
pass
class Indexerbase(object):
"""Indexer base class
Indexer are responsible creating search indices for bug from a specific
provider.
"""
def __init__(self):
pass
def index(self, bug):
raise NotImplementedError |
'''
@Author: Ofey Chan
@Date: 2020-03-03 19:23:15
@LastEditors: Ofey Chan
@LastEditTime: 2020-03-03 20:07:31
@Description: General permutation group class.
@Reference:
'''
| """
@Author: Ofey Chan
@Date: 2020-03-03 19:23:15
@LastEditors: Ofey Chan
@LastEditTime: 2020-03-03 20:07:31
@Description: General permutation group class.
@Reference:
""" |
# Forcing recursion for no good reason. But it passed so....
def solution_r(n):
if n <= 0:
return n
else:
if not n%3 or not n%5:
return n + solution_r(n-1)
else:
return solution_r(n-1)
def solution(number):
if not number:
return 0
return solution_r(number-1)
assert solution(10) == 23, "Oops, recursion is the devil"
| def solution_r(n):
if n <= 0:
return n
elif not n % 3 or not n % 5:
return n + solution_r(n - 1)
else:
return solution_r(n - 1)
def solution(number):
if not number:
return 0
return solution_r(number - 1)
assert solution(10) == 23, 'Oops, recursion is the devil' |
class Solution:
def minJumps(self, arr: List[int]) -> int:
graph = defaultdict(list)
for i in range(len(arr)):
graph[arr[i]].append(i)
visited = set()
src, dest = 0, len(arr) - 1
queue = deque()
queue.append((src, 0))
visited.add(src)
while queue:
node, dist = queue.popleft()
if node == dest:
return dist
for child in [node - 1, node + 1] + graph[arr[node]][::-1]:
if 0 <= child < len(arr) and child != node and child not in visited:
visited.add(child)
if child == dest:
return dist + 1
queue.append((child, dist + 1))
return -1
| class Solution:
def min_jumps(self, arr: List[int]) -> int:
graph = defaultdict(list)
for i in range(len(arr)):
graph[arr[i]].append(i)
visited = set()
(src, dest) = (0, len(arr) - 1)
queue = deque()
queue.append((src, 0))
visited.add(src)
while queue:
(node, dist) = queue.popleft()
if node == dest:
return dist
for child in [node - 1, node + 1] + graph[arr[node]][::-1]:
if 0 <= child < len(arr) and child != node and (child not in visited):
visited.add(child)
if child == dest:
return dist + 1
queue.append((child, dist + 1))
return -1 |
def transitions(y,x):
yield y+1,x
yield y,x+1
yield y-1,x
yield y,x-1
def valid_transitions(arr):
# print(arr)
Y = len(arr)
X = len(arr[0])
def _f(y0,x0):
for y,x in transitions(y0,x0):
if 0 <= y < Y and 0 <= x < X and arr[y][x] != "-":
yield y,x
return _f
def opp(player):
if player == "W":
return "B"
else:
return "W"
def dfs(board, init, visited, tran_fn, ans):
q = [(init, 'W')]
while q:
(y,x), player = q.pop()
if (y,x) not in visited:
visited.add((y,x))
ans[y][x] = player
for yn,xn in tran_fn(y,x):
# print((y,x), (yn,xn))
item = (yn,xn), opp(player)
q.append(item)
Y,X = [int(x) for x in input().split()]
board = []
for y in range(Y):
s = input()
board.append(s)
def run(board):
tran_fn = valid_transitions(board)
Y = len(board)
X = len(board[0])
ans = [["-" for _ in range(X)] for _ in range(Y)]
visited = set()
for y in range(Y):
for x in range(X):
if board[y][x] == '.':
dfs(board, (y,x), visited, tran_fn, ans)
ans = ["".join(xs) for xs in ans]
print("\n".join(ans))
# print(board)
run(board)
# print(list(valid_transitions(board)(0,0)))
| def transitions(y, x):
yield (y + 1, x)
yield (y, x + 1)
yield (y - 1, x)
yield (y, x - 1)
def valid_transitions(arr):
y = len(arr)
x = len(arr[0])
def _f(y0, x0):
for (y, x) in transitions(y0, x0):
if 0 <= y < Y and 0 <= x < X and (arr[y][x] != '-'):
yield (y, x)
return _f
def opp(player):
if player == 'W':
return 'B'
else:
return 'W'
def dfs(board, init, visited, tran_fn, ans):
q = [(init, 'W')]
while q:
((y, x), player) = q.pop()
if (y, x) not in visited:
visited.add((y, x))
ans[y][x] = player
for (yn, xn) in tran_fn(y, x):
item = ((yn, xn), opp(player))
q.append(item)
(y, x) = [int(x) for x in input().split()]
board = []
for y in range(Y):
s = input()
board.append(s)
def run(board):
tran_fn = valid_transitions(board)
y = len(board)
x = len(board[0])
ans = [['-' for _ in range(X)] for _ in range(Y)]
visited = set()
for y in range(Y):
for x in range(X):
if board[y][x] == '.':
dfs(board, (y, x), visited, tran_fn, ans)
ans = [''.join(xs) for xs in ans]
print('\n'.join(ans))
run(board) |
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Mandatory Common Configuration required
sharedWs = "{buildDir}/shared_ws"
XSCT_BUILD_SOURCE = "" # build source type whether to be used XSCT default or git source (i.e. XSCT_BUILD_SOURCE="git")
version = "2020.2" # Vitis version installed and to be used
vitisPath = "" # user needs to mentioned Vitispath path where vitis is installed in user's system
outoftreebuild = True
# Parallel Threads
parallel_make = 20
# delpoy artifacts
deploy_artifacts = "{buildDir}/{machine}/deploy/"
# Run test configuration
rootfs_path = "{ROOT}/build/{machine}/deploy/rootfs.cpio.gz.u-boot"
boot_scr_path = ""
deployDir = "{ROOT}/build/{machine}/deploy"
# local board configuration
# Serial communication configurations
"""
These below configurations will used to communicate,
with board which was connected to your host machine by using serial uart
"""
board_interface = "host_target"
com = "/dev/ttyUSB0" # Allocate proper com port(ttyUSB0/ttyUSB1/ttyUSB2/ttyUSB3)
baudrate = "115200"
# Remote host configuration
"""
This below configuration need to enable if target connected to remote host machine.
remote_host = ""
"""
| shared_ws = '{buildDir}/shared_ws'
xsct_build_source = ''
version = '2020.2'
vitis_path = ''
outoftreebuild = True
parallel_make = 20
deploy_artifacts = '{buildDir}/{machine}/deploy/'
rootfs_path = '{ROOT}/build/{machine}/deploy/rootfs.cpio.gz.u-boot'
boot_scr_path = ''
deploy_dir = '{ROOT}/build/{machine}/deploy'
'\nThese below configurations will used to communicate,\nwith board which was connected to your host machine by using serial uart\n'
board_interface = 'host_target'
com = '/dev/ttyUSB0'
baudrate = '115200'
'\nThis below configuration need to enable if target connected to remote host machine.\nremote_host = ""\n' |
def insertShiftArray(arr, value):
mid = len(arr) // 2
new_arr = []
for i in range(0, mid):
new_arr.append(arr[i])
new_arr.append(value)
for i in range(mid, len(arr)):
new_arr.append(arr[i])
return new_arr
test = [1, 2, 3, 4, 5]
print(test)
print(insertShiftArray(test, 8))
test2 = [1,2,3,4]
print(test2, 8)
print(insertShiftArray(test2, 8))
| def insert_shift_array(arr, value):
mid = len(arr) // 2
new_arr = []
for i in range(0, mid):
new_arr.append(arr[i])
new_arr.append(value)
for i in range(mid, len(arr)):
new_arr.append(arr[i])
return new_arr
test = [1, 2, 3, 4, 5]
print(test)
print(insert_shift_array(test, 8))
test2 = [1, 2, 3, 4]
print(test2, 8)
print(insert_shift_array(test2, 8)) |
"""
callfunc.py
The Frog Programming Language Operation & Keyword: call (func)
Development Leader: @RedoC
"""
class CALLFUNC:
"""
CALLFUNC is the multi class
>> example
run foo(boo)
run print("")
"""
def __init__(self, funcname: str, param: list):
self.funcname = funcname
self.param = param
def getCommandcode(self):
"""
getCommandcode(self)
get commandcode of function calling
:return:
"""
commandcode = "CALL ({funcname})({param})".format(funcname=self.funcname, param=",".join([str(e) for e in self.param])) # commandcode
return commandcode
if __name__ == '__main__':
# unit test
assign_ = CALLFUNC("print", ["hello, world!"])
print(assign_.getCommandcode())
| """
callfunc.py
The Frog Programming Language Operation & Keyword: call (func)
Development Leader: @RedoC
"""
class Callfunc:
"""
CALLFUNC is the multi class
>> example
run foo(boo)
run print("")
"""
def __init__(self, funcname: str, param: list):
self.funcname = funcname
self.param = param
def get_commandcode(self):
"""
getCommandcode(self)
get commandcode of function calling
:return:
"""
commandcode = 'CALL ({funcname})({param})'.format(funcname=self.funcname, param=','.join([str(e) for e in self.param]))
return commandcode
if __name__ == '__main__':
assign_ = callfunc('print', ['hello, world!'])
print(assign_.getCommandcode()) |
class Hamming:
def distance(self, first, second):
num_of_errors = 0
if type(first) != str or type(second) != str:
return "Wrong type of strands"
if len(first) != len(second):
return "Strands should be the same length"
for i in range(len(first)):
if first[i] != second[i]:
num_of_errors += 1
return num_of_errors
| class Hamming:
def distance(self, first, second):
num_of_errors = 0
if type(first) != str or type(second) != str:
return 'Wrong type of strands'
if len(first) != len(second):
return 'Strands should be the same length'
for i in range(len(first)):
if first[i] != second[i]:
num_of_errors += 1
return num_of_errors |
conditons = True
alcool = 0
gas = 0
disel = 0
while conditons :
T = int(input())
if T == 4:
conditons = False;
else:
if T == 1:
alcool +=1
if T == 2:
gas +=1
if T == 3:
disel +=1
print("MUITO OBRIGADO")
print(f"Alcool: {alcool}")
print(f"Gasolina: {gas}")
print(f"Diesel: {disel}") | conditons = True
alcool = 0
gas = 0
disel = 0
while conditons:
t = int(input())
if T == 4:
conditons = False
else:
if T == 1:
alcool += 1
if T == 2:
gas += 1
if T == 3:
disel += 1
print('MUITO OBRIGADO')
print(f'Alcool: {alcool}')
print(f'Gasolina: {gas}')
print(f'Diesel: {disel}') |
def estimator(data):
output = {'data':data, 'impact': {}, 'severeImpact': {}}
output['impact']['currentlyInfected'] = data['reportedCases'] * 10
output['severeImpact']['currentlyInfected'] = data['reportedCases'] * 50
if data['periodType'] == 'weeks':
data['timeToElapse'] = data['timeToElapse'] * 7
elif data['periodType'] == 'months':
data['timeToElapse'] = data['timeToElapse'] * 30
output['impact']['infectionsByRequestedTime'] = output['impact']['currentlyInfected'] * (2 ** (data['timeToElapse']//3))
output['severeImpact']['infectionsByRequestedTime'] = output['severeImpact']['currentlyInfected'] * (2 ** (data['timeToElapse']//3))
output['impact']['severeCasesByRequestedTime'] = int(15/100 * (output['impact']['infectionsByRequestedTime']))
output['severeImpact']['severeCasesByRequestedTime'] = int(15/100 * (output['severeImpact']['infectionsByRequestedTime']))
output['impact']['hospitalBedsByRequestedTime'] = int((35/100 * (data['totalHospitalBeds'])) - output['impact']['severeCasesByRequestedTime'])
output['severeImpact']['hospitalBedsByRequestedTime'] = int((35/100 * (data['totalHospitalBeds'])) - output['severeImpact']['severeCasesByRequestedTime'])
output['impact']['casesForICUByRequestedTime'] = int(5/100 * output['impact']['infectionsByRequestedTime'])
output['severeImpact']['casesForICUByRequestedTime'] = int(5/100 * output['severeImpact']['infectionsByRequestedTime'])
output['impact']['casesForVentilatorsByRequestedTime'] = int(2/100 * output['impact']['infectionsByRequestedTime'])
output['severeImpact']['casesForVentilatorsByRequestedTime'] = int(2/100 * output['severeImpact']['infectionsByRequestedTime'])
output['impact']['dollarsInFlight'] = int((output['impact']['infectionsByRequestedTime'] * data['region']['avgDailyIncomeInUSD'] * data['region']['avgDailyIncomePopulation']) /data['timeToElapse'])
output['severeImpact']['dollarsInFlight'] = int((output['severeImpact']['infectionsByRequestedTime'] * data['region']['avgDailyIncomeInUSD'] * data['region']['avgDailyIncomePopulation'])/data['timeToElapse'])
return output
| def estimator(data):
output = {'data': data, 'impact': {}, 'severeImpact': {}}
output['impact']['currentlyInfected'] = data['reportedCases'] * 10
output['severeImpact']['currentlyInfected'] = data['reportedCases'] * 50
if data['periodType'] == 'weeks':
data['timeToElapse'] = data['timeToElapse'] * 7
elif data['periodType'] == 'months':
data['timeToElapse'] = data['timeToElapse'] * 30
output['impact']['infectionsByRequestedTime'] = output['impact']['currentlyInfected'] * 2 ** (data['timeToElapse'] // 3)
output['severeImpact']['infectionsByRequestedTime'] = output['severeImpact']['currentlyInfected'] * 2 ** (data['timeToElapse'] // 3)
output['impact']['severeCasesByRequestedTime'] = int(15 / 100 * output['impact']['infectionsByRequestedTime'])
output['severeImpact']['severeCasesByRequestedTime'] = int(15 / 100 * output['severeImpact']['infectionsByRequestedTime'])
output['impact']['hospitalBedsByRequestedTime'] = int(35 / 100 * data['totalHospitalBeds'] - output['impact']['severeCasesByRequestedTime'])
output['severeImpact']['hospitalBedsByRequestedTime'] = int(35 / 100 * data['totalHospitalBeds'] - output['severeImpact']['severeCasesByRequestedTime'])
output['impact']['casesForICUByRequestedTime'] = int(5 / 100 * output['impact']['infectionsByRequestedTime'])
output['severeImpact']['casesForICUByRequestedTime'] = int(5 / 100 * output['severeImpact']['infectionsByRequestedTime'])
output['impact']['casesForVentilatorsByRequestedTime'] = int(2 / 100 * output['impact']['infectionsByRequestedTime'])
output['severeImpact']['casesForVentilatorsByRequestedTime'] = int(2 / 100 * output['severeImpact']['infectionsByRequestedTime'])
output['impact']['dollarsInFlight'] = int(output['impact']['infectionsByRequestedTime'] * data['region']['avgDailyIncomeInUSD'] * data['region']['avgDailyIncomePopulation'] / data['timeToElapse'])
output['severeImpact']['dollarsInFlight'] = int(output['severeImpact']['infectionsByRequestedTime'] * data['region']['avgDailyIncomeInUSD'] * data['region']['avgDailyIncomePopulation'] / data['timeToElapse'])
return output |
# -*- coding: utf-8 -*-
"""Top-level package for Temp Monitor."""
__author__ = """Goncalo Magno"""
__email__ = 'goncalo@gmagno.dev'
__version__ = '0.4.0'
| """Top-level package for Temp Monitor."""
__author__ = 'Goncalo Magno'
__email__ = 'goncalo@gmagno.dev'
__version__ = '0.4.0' |
class TwitterSearchException(Exception):
"""
This class handles all exceptions directly based on TwitterSearch.
"""
# HTTP status codes are stored in TwitterSearch.exceptions due to possible on-the-fly modifications
_error_codes = {
1000 : 'Neither a list nor a string',
1001 : 'Not a list object',
1002 : 'No ISO 6391-1 language code',
1003 : 'No valid result type',
1004 : 'Invalid number',
1005 : 'Invalid unit',
1006 : 'Invalid callback string',
1007 : 'Not a date object',
1008 : 'Invalid boolean',
1009 : 'Invalid string',
1010 : 'Not a valid TwitterSearchOrder object',
1011 : 'No more results available',
1012 : 'No meta data available',
1013 : 'No tweets available',
1014 : 'No results available',
1015 : 'No keywords given',
1016 : 'Invalid dict',
}
def __init__(self, code, msg = None):
self.code = code
if msg:
self.message = msg
else:
self.message = self._error_codes.get(code)
def __str__(self):
return "Error %i: %s" % (self.code, self.message)
| class Twittersearchexception(Exception):
"""
This class handles all exceptions directly based on TwitterSearch.
"""
_error_codes = {1000: 'Neither a list nor a string', 1001: 'Not a list object', 1002: 'No ISO 6391-1 language code', 1003: 'No valid result type', 1004: 'Invalid number', 1005: 'Invalid unit', 1006: 'Invalid callback string', 1007: 'Not a date object', 1008: 'Invalid boolean', 1009: 'Invalid string', 1010: 'Not a valid TwitterSearchOrder object', 1011: 'No more results available', 1012: 'No meta data available', 1013: 'No tweets available', 1014: 'No results available', 1015: 'No keywords given', 1016: 'Invalid dict'}
def __init__(self, code, msg=None):
self.code = code
if msg:
self.message = msg
else:
self.message = self._error_codes.get(code)
def __str__(self):
return 'Error %i: %s' % (self.code, self.message) |
# -*- python -*-
load("@drake//tools/workspace:os.bzl", "determine_os")
def _impl(repository_ctx):
os_result = determine_os(repository_ctx)
if os_result.error != None:
fail(os_result.error)
if os_result.is_macos:
repository_ctx.symlink(
"/usr/local/opt/double-conversion/include",
"include",
)
repository_ctx.symlink(
Label(
"@drake//tools/workspace/double_conversion:package-macos.BUILD.bazel", # noqa
),
"BUILD.bazel",
)
elif os_result.is_ubuntu:
repository_ctx.symlink(
"/usr/include/double-conversion",
"include/double-conversion",
)
repository_ctx.symlink(
Label(
"@drake//tools/workspace/double_conversion:package-ubuntu.BUILD.bazel", # noqa
),
"BUILD.bazel",
)
else:
fail("Operating system is NOT supported", attr = os_result)
double_conversion_repository = repository_rule(
local = True,
configure = True,
implementation = _impl,
)
| load('@drake//tools/workspace:os.bzl', 'determine_os')
def _impl(repository_ctx):
os_result = determine_os(repository_ctx)
if os_result.error != None:
fail(os_result.error)
if os_result.is_macos:
repository_ctx.symlink('/usr/local/opt/double-conversion/include', 'include')
repository_ctx.symlink(label('@drake//tools/workspace/double_conversion:package-macos.BUILD.bazel'), 'BUILD.bazel')
elif os_result.is_ubuntu:
repository_ctx.symlink('/usr/include/double-conversion', 'include/double-conversion')
repository_ctx.symlink(label('@drake//tools/workspace/double_conversion:package-ubuntu.BUILD.bazel'), 'BUILD.bazel')
else:
fail('Operating system is NOT supported', attr=os_result)
double_conversion_repository = repository_rule(local=True, configure=True, implementation=_impl) |
def linear_search(array, y):
for i in range(len(array)):
if array[i] == y:
return i
return -1
arrSize=int(input("Enter Array Size"))
array=[]
print("Enter Array Elements")
for i in range(arrSize):
array.append(int(input()))
y = int(input("Enter Number you want to find =:-"))
result = linear_search(array, y)
if(result == -1):
print("Element not found")
else:
print("Element found at index: ", result)
| def linear_search(array, y):
for i in range(len(array)):
if array[i] == y:
return i
return -1
arr_size = int(input('Enter Array Size'))
array = []
print('Enter Array Elements')
for i in range(arrSize):
array.append(int(input()))
y = int(input('Enter Number you want to find =:-'))
result = linear_search(array, y)
if result == -1:
print('Element not found')
else:
print('Element found at index: ', result) |
""" Driver args """
data_path = '/Users/aa56927-admin/Desktop/NLP_Done_Right/sentiment_classification/data/Rotten_Tomatoes/'
output_path = 'test-blind.output.txt'
model = 'RNN' # RNN, FFNN
run_on_test_flag = True
run_on_manual_flag = True
seq_max_len = 60 # also can be computed more systematically looking at length distribution in corpus
model_path = './model.pt'
if model == 'FFNN':
# training config
no_classes = 2
epochs = 5
batch_size = 64
lr_schedule = 'None' # None / CLR / CALR
optimizer = 'adam' # adagrad
initial_lr = 0.001
weight_decay = 1e-4
word_dropout_rate = 0.3
# network config
input_dim = 300
hidden_1 = 150
hidden_2 = 75
hidden_3 = 50
dropout = 0.2
elif model == 'RNN':
# training config
no_classes = 2
rec_unit = 'LSTM' # GRU
epochs = 30
batch_size = 64
lr_schedule = 'None' # None / CLR / CALR
optimizer = 'adam' # adagrad
initial_lr = 0.01
lr_decay = 0.1
weight_decay = 1e-4
dropout = 0.2
# Stacked RNN units
no_of_rec_units = 2
# inside RNN unit
hidden_size = 100
rnn_dropout = 0.05
""" ElMo Config """
""" BERT Config """
""" CNN Config """
| """ Driver args """
data_path = '/Users/aa56927-admin/Desktop/NLP_Done_Right/sentiment_classification/data/Rotten_Tomatoes/'
output_path = 'test-blind.output.txt'
model = 'RNN'
run_on_test_flag = True
run_on_manual_flag = True
seq_max_len = 60
model_path = './model.pt'
if model == 'FFNN':
no_classes = 2
epochs = 5
batch_size = 64
lr_schedule = 'None'
optimizer = 'adam'
initial_lr = 0.001
weight_decay = 0.0001
word_dropout_rate = 0.3
input_dim = 300
hidden_1 = 150
hidden_2 = 75
hidden_3 = 50
dropout = 0.2
elif model == 'RNN':
no_classes = 2
rec_unit = 'LSTM'
epochs = 30
batch_size = 64
lr_schedule = 'None'
optimizer = 'adam'
initial_lr = 0.01
lr_decay = 0.1
weight_decay = 0.0001
dropout = 0.2
no_of_rec_units = 2
hidden_size = 100
rnn_dropout = 0.05
' ElMo Config '
' BERT Config '
' CNN Config ' |
# https://atcoder.jp/contests/abc194/tasks/abc194_b
N = int(input())
job_list = []
a_min_idx, b_min_idx = 0, 0
a_2nd, b_2nd = 0, 0
for i in range(N):
a, b = list(map(int, input().split()))
job_list.append([a, b])
if job_list[a_min_idx][0] > a:
a_2nd = a_min_idx
a_min_idx = i
if job_list[b_min_idx][1] > b:
b_2nd = b_min_idx
b_min_idx = i
ans = 0
if a_min_idx == b_min_idx:
ans = min(max(job_list[a_min_idx][0], job_list[b_2nd][1]), max(job_list[a_2nd][0], job_list[b_min_idx][1]),
job_list[a_min_idx][0] + job_list[b_min_idx][1])
print(ans)
exit()
ans = max(job_list[a_min_idx][0], job_list[b_min_idx][1])
print(ans)
| n = int(input())
job_list = []
(a_min_idx, b_min_idx) = (0, 0)
(a_2nd, b_2nd) = (0, 0)
for i in range(N):
(a, b) = list(map(int, input().split()))
job_list.append([a, b])
if job_list[a_min_idx][0] > a:
a_2nd = a_min_idx
a_min_idx = i
if job_list[b_min_idx][1] > b:
b_2nd = b_min_idx
b_min_idx = i
ans = 0
if a_min_idx == b_min_idx:
ans = min(max(job_list[a_min_idx][0], job_list[b_2nd][1]), max(job_list[a_2nd][0], job_list[b_min_idx][1]), job_list[a_min_idx][0] + job_list[b_min_idx][1])
print(ans)
exit()
ans = max(job_list[a_min_idx][0], job_list[b_min_idx][1])
print(ans) |
# https://practice.geeksforgeeks.org/problems/get-minimum-element-from-stack/1#
# Approach is to store an array containing stack elements and minEle in separate variable
# For push
# if minEle is None add element to s and assign minEle - element
# if minEle <= element add element to s
# else add 2*x-minEle in s and assign minEle the element value
# For pop
# if s is empty return -1
# if last element of s is >= minEle return and remove last element of s
# else return minEle assign minEle = 2*minEle - last value of S, and also remove last element
# assign minEle None if size of s is 0
# For getMin
# return minEle if not None else -1
class Stack:
def __init__(self):
self.s = []
self.minEle = None
def push(self, x):
if self.minEle is None:
self.minEle = x
self.s.append(x)
else:
if self.minEle <= x:
self.s.append(x)
else:
self.s.append(2 * x - self.minEle)
self.minEle = x
def pop(self):
val = -1
if len(self.s) != 0:
if self.s[-1] >= self.minEle:
val = self.s[-1]
else:
val = self.minEle
self.minEle = 2 * self.minEle - self.s[-1]
del self.s[-1]
if len(self.s) == 0:
self.minEle = None
return val
def getMin(self):
return self.minEle if self.minEle is not None else -1
if __name__ == '__main__':
t = int(input())
for _ in range(t):
q = int(input())
arr = [int(x) for x in input().split()]
stk = Stack()
qi = 0
qn = 1
while qn <= q:
qt = arr[qi]
if qt == 1:
stk.push(arr[qi + 1])
qi += 2
elif qt == 2:
print(stk.pop(), end=' ')
qi += 1
else:
print(stk.getMin(), end=' ')
qi += 1
qn += 1
print()
| class Stack:
def __init__(self):
self.s = []
self.minEle = None
def push(self, x):
if self.minEle is None:
self.minEle = x
self.s.append(x)
elif self.minEle <= x:
self.s.append(x)
else:
self.s.append(2 * x - self.minEle)
self.minEle = x
def pop(self):
val = -1
if len(self.s) != 0:
if self.s[-1] >= self.minEle:
val = self.s[-1]
else:
val = self.minEle
self.minEle = 2 * self.minEle - self.s[-1]
del self.s[-1]
if len(self.s) == 0:
self.minEle = None
return val
def get_min(self):
return self.minEle if self.minEle is not None else -1
if __name__ == '__main__':
t = int(input())
for _ in range(t):
q = int(input())
arr = [int(x) for x in input().split()]
stk = stack()
qi = 0
qn = 1
while qn <= q:
qt = arr[qi]
if qt == 1:
stk.push(arr[qi + 1])
qi += 2
elif qt == 2:
print(stk.pop(), end=' ')
qi += 1
else:
print(stk.getMin(), end=' ')
qi += 1
qn += 1
print() |
class ListaMultimedia():
archivos = []
contar = 0
def __init__(self,archivos=[]):
self.archivos = archivos
def agregar(self,p):
self.archivos.append(p)
self.contar += 1
def mostrar(self):
for p in self.archivos:
print(p)
def cantidad(self):
return"""Total de objetos en la lista: {}""".format(self.contar) | class Listamultimedia:
archivos = []
contar = 0
def __init__(self, archivos=[]):
self.archivos = archivos
def agregar(self, p):
self.archivos.append(p)
self.contar += 1
def mostrar(self):
for p in self.archivos:
print(p)
def cantidad(self):
return 'Total de objetos en la lista: {}'.format(self.contar) |
# MIT License
#
# Copyright (c) 2017 Matt Boyer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
VALID_PAGE_SIZES = (1, 512, 1024, 2048, 4096, 8192, 16384, 32768)
SQLITE_TABLE_COLUMNS = {
'sqlite_master': ('type', 'name', 'tbl_name', 'rootpage', 'sql',),
'sqlite_sequence': ('name', 'seq',),
'sqlite_stat1': ('tbl', 'idx', 'stat',),
'sqlite_stat2': ('tbl', 'idx', 'sampleno', 'sample'),
'sqlite_stat3': ('tbl', 'idx', 'nEq', 'nLt', 'nDLt', 'sample'),
'sqlite_stat4': ('tbl', 'idx', 'nEq', 'nLt', 'nDLt', 'sample'),
}
# These are the integers used in ptrmap entries to designate the kind of page
# for which a given ptrmap entry holds a notional "child to parent" pointer
BTREE_ROOT_PAGE = 1
FREELIST_PAGE = 2
FIRST_OFLOW_PAGE = 3
NON_FIRST_OFLOW_PAGE = 4
BTREE_NONROOT_PAGE = 5
PTRMAP_PAGE_TYPES = (
BTREE_ROOT_PAGE,
FREELIST_PAGE,
FIRST_OFLOW_PAGE,
NON_FIRST_OFLOW_PAGE,
BTREE_NONROOT_PAGE,
)
OVERFLOW_PAGE_TYPES = (
FIRST_OFLOW_PAGE,
NON_FIRST_OFLOW_PAGE,
)
# These are identifiers used internally to keep track of page types *before*
# specialised objects can be instantiated
FREELIST_TRUNK_PAGE = 'freelist_trunk'
FREELIST_LEAF_PAGE = 'freelist_leaf'
PTRMAP_PAGE = 'ptrmap_page'
UNKNOWN_PAGE = 'unknown'
FREELIST_PAGE_TYPES = (
FREELIST_TRUNK_PAGE,
FREELIST_LEAF_PAGE,
)
NON_BTREE_PAGE_TYPES = (
FREELIST_TRUNK_PAGE,
FIRST_OFLOW_PAGE,
NON_FIRST_OFLOW_PAGE,
PTRMAP_PAGE,
)
| valid_page_sizes = (1, 512, 1024, 2048, 4096, 8192, 16384, 32768)
sqlite_table_columns = {'sqlite_master': ('type', 'name', 'tbl_name', 'rootpage', 'sql'), 'sqlite_sequence': ('name', 'seq'), 'sqlite_stat1': ('tbl', 'idx', 'stat'), 'sqlite_stat2': ('tbl', 'idx', 'sampleno', 'sample'), 'sqlite_stat3': ('tbl', 'idx', 'nEq', 'nLt', 'nDLt', 'sample'), 'sqlite_stat4': ('tbl', 'idx', 'nEq', 'nLt', 'nDLt', 'sample')}
btree_root_page = 1
freelist_page = 2
first_oflow_page = 3
non_first_oflow_page = 4
btree_nonroot_page = 5
ptrmap_page_types = (BTREE_ROOT_PAGE, FREELIST_PAGE, FIRST_OFLOW_PAGE, NON_FIRST_OFLOW_PAGE, BTREE_NONROOT_PAGE)
overflow_page_types = (FIRST_OFLOW_PAGE, NON_FIRST_OFLOW_PAGE)
freelist_trunk_page = 'freelist_trunk'
freelist_leaf_page = 'freelist_leaf'
ptrmap_page = 'ptrmap_page'
unknown_page = 'unknown'
freelist_page_types = (FREELIST_TRUNK_PAGE, FREELIST_LEAF_PAGE)
non_btree_page_types = (FREELIST_TRUNK_PAGE, FIRST_OFLOW_PAGE, NON_FIRST_OFLOW_PAGE, PTRMAP_PAGE) |
def test_login_redirect(client):
"""
Test that all requests redirect to the login page
"""
URLS = [
"/web-ui/overview/",
"/web-ui/rq/create_sip",
"/api/list-frozen-objects"
]
for url in URLS:
result = client.get(url)
assert result.status_code == 302
assert "/web-ui/login?" in result.headers["Location"]
| def test_login_redirect(client):
"""
Test that all requests redirect to the login page
"""
urls = ['/web-ui/overview/', '/web-ui/rq/create_sip', '/api/list-frozen-objects']
for url in URLS:
result = client.get(url)
assert result.status_code == 302
assert '/web-ui/login?' in result.headers['Location'] |
# #### We create a function cleanQ so we can do the cleaning and preperation of our data
# #### INPUT: String
# #### OUTPUT: Cleaned String
def cleanQ(query):
query = query.lower()
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(query)
stemmer=[ps.stem(i) for i in tokens]
filtered_Q = [w for w in stemmer if not w in stopwords.words('english')]
return filtered_Q
# #### We create a function computeTF so we can calculate the tf
# #### INPUT: Dictionary where the keys are the terms_id and the values are the frequencies of this term Id in the document
# #### OUTPUT: TF of the specific Term_id in the corresponding document
def computeTF(doc_words):
bow = 0
for k, v in doc_words.items():
bow = bow + v
tf_word = {}
for word, count in doc_words.items():
tf_word[word] = count / float(bow)
return tf_word
| def clean_q(query):
query = query.lower()
tokenizer = regexp_tokenizer('\\w+')
tokens = tokenizer.tokenize(query)
stemmer = [ps.stem(i) for i in tokens]
filtered_q = [w for w in stemmer if not w in stopwords.words('english')]
return filtered_Q
def compute_tf(doc_words):
bow = 0
for (k, v) in doc_words.items():
bow = bow + v
tf_word = {}
for (word, count) in doc_words.items():
tf_word[word] = count / float(bow)
return tf_word |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# OpneWinchPy : a library for controlling the Raspberry Pi's Winch
# Copyright (c) 2020 Mickael Gaillard <mick.gaillard@gmail.com>
__version__ = "0.1.0"
| __version__ = '0.1.0' |
lst = []
count_of_elements = int(input("How many elements want to store in list?"))
for i in range(count_of_elements):
element = input("Enter the element:")
lst.append(element)
print(lst)
| lst = []
count_of_elements = int(input('How many elements want to store in list?'))
for i in range(count_of_elements):
element = input('Enter the element:')
lst.append(element)
print(lst) |
"""
ende nose tests
project : Ende
version : 0.1.0
status : development
modifydate : 2015-05-06 19:30:00 -0700
createdate : 2015-05-05 05:36:00 -0700
website : https://github.com/tmthydvnprt/ende
author : tmthydvnprt
email : tmthydvnprt@users.noreply.github.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2015, project
credits :
"""
__all__ = ['test_data', 'test_file']
| """
ende nose tests
project : Ende
version : 0.1.0
status : development
modifydate : 2015-05-06 19:30:00 -0700
createdate : 2015-05-05 05:36:00 -0700
website : https://github.com/tmthydvnprt/ende
author : tmthydvnprt
email : tmthydvnprt@users.noreply.github.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2015, project
credits :
"""
__all__ = ['test_data', 'test_file'] |
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
ans = []
one_hot = {}
for word in strs:
mapping = [0 for _ in range(26)]
for char in word:
representation = ord(char)
mapping[representation % 26] += 1
mapping = tuple(mapping)
if mapping not in one_hot:
one_hot[mapping] = []
one_hot[mapping].append(word)
for key, values in one_hot.items():
ans.append(values)
return ans
| class Solution:
def group_anagrams(self, strs: List[str]) -> List[List[str]]:
ans = []
one_hot = {}
for word in strs:
mapping = [0 for _ in range(26)]
for char in word:
representation = ord(char)
mapping[representation % 26] += 1
mapping = tuple(mapping)
if mapping not in one_hot:
one_hot[mapping] = []
one_hot[mapping].append(word)
for (key, values) in one_hot.items():
ans.append(values)
return ans |
# -*- coding: utf-8 -*-
__author__ = 'Tommy Stallings'
__email__ = 'tommy.stallings2@gmail.com'
__version__ = '1.0'
| __author__ = 'Tommy Stallings'
__email__ = 'tommy.stallings2@gmail.com'
__version__ = '1.0' |
def GetChargeLevel():
return {'data': 42, 'error': 'NO_ERROR'}
def GetBatteryTemperature():
return {'data': 25.4, 'error': 'NO_ERROR'}
def GetBatteryVoltage():
return {'data': 3111, 'error': 'NO_ERROR'}
def GetBatteryCurrent():
return {'data': 800, 'error': 'NO_ERROR'}
def GetIoVoltage():
return {'data': 5432, 'error': 'NO_ERROR'}
def GetIoCurrent():
return {'data': 300, 'error': 'NO_ERROR'}
def GetStatus():
return {'data': {
'powerInput': 'adapter connected',
'powerInput5vIo': 'powered'
}, 'error': 'NO_ERROR'}
| def get_charge_level():
return {'data': 42, 'error': 'NO_ERROR'}
def get_battery_temperature():
return {'data': 25.4, 'error': 'NO_ERROR'}
def get_battery_voltage():
return {'data': 3111, 'error': 'NO_ERROR'}
def get_battery_current():
return {'data': 800, 'error': 'NO_ERROR'}
def get_io_voltage():
return {'data': 5432, 'error': 'NO_ERROR'}
def get_io_current():
return {'data': 300, 'error': 'NO_ERROR'}
def get_status():
return {'data': {'powerInput': 'adapter connected', 'powerInput5vIo': 'powered'}, 'error': 'NO_ERROR'} |
def message_replier(messages):
for message in messages:
userid = message.from_user.id
banlist = redisserver.sismember('zigzag_banlist', '{}'.format(userid))
if banlist:
return
if userid in messanger_list:
bot.reply_to(message, MESSANGER_LEAVE_MSG, parse_mode="Markdown")
messanger_list.remove(userid)
bot.send_message("-" + str(SUPPORT_GP), "New feedback!:")
bot.forward_message("-" + str(SUPPORT_GP), message.chat.id, message.message_id)
return
if REPLIER:
if message.text in reply_message_list:
bot.reply_to(message, reply_message_list.get(message.text), parse_mode="Markdown")
if message.text == "Send feedback":
bot.reply_to(message, MESSANGER_JOIN_MSG, parse_mode="Markdown")
messanger_list.append(userid)
return
if userid in in_chat_with_support:
bot.forward_message("-" + str(SUPPORT_GP), message.chat.id, message.message_id)
return
if message.from_user.id in ADMINS_IDS:
if message.chat.id == -SUPPORT_GP:
try:
bot.forward_message(message.reply_to_message.forward_from.id, message.chat.id, message.message_id)
bot.reply_to(message, "REPLY SENT")
except:
bot.reply_to(message, "ERROR SENDING?")
| def message_replier(messages):
for message in messages:
userid = message.from_user.id
banlist = redisserver.sismember('zigzag_banlist', '{}'.format(userid))
if banlist:
return
if userid in messanger_list:
bot.reply_to(message, MESSANGER_LEAVE_MSG, parse_mode='Markdown')
messanger_list.remove(userid)
bot.send_message('-' + str(SUPPORT_GP), 'New feedback!:')
bot.forward_message('-' + str(SUPPORT_GP), message.chat.id, message.message_id)
return
if REPLIER:
if message.text in reply_message_list:
bot.reply_to(message, reply_message_list.get(message.text), parse_mode='Markdown')
if message.text == 'Send feedback':
bot.reply_to(message, MESSANGER_JOIN_MSG, parse_mode='Markdown')
messanger_list.append(userid)
return
if userid in in_chat_with_support:
bot.forward_message('-' + str(SUPPORT_GP), message.chat.id, message.message_id)
return
if message.from_user.id in ADMINS_IDS:
if message.chat.id == -SUPPORT_GP:
try:
bot.forward_message(message.reply_to_message.forward_from.id, message.chat.id, message.message_id)
bot.reply_to(message, 'REPLY SENT')
except:
bot.reply_to(message, 'ERROR SENDING?') |
"""
import time
import redis
from flask import render_template, request, current_app, jsonify, redirect, session
from init import app
from utils.interceptors import loginOptional, jsonRequest, loginRequiredJSON
from utils.jsontools import *
from utils.logger import log
from scraper.video import dispatch
from scraper.video.Twitter import Twitter
@app.route('/helper/get_twitter_info.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
def ajax_helper_get_twitter_info(rd, user, data):
log(obj = {'url': data.url})
obj, cleanURL = dispatch(data.url)
if obj.NAME != 'twitter' :
log(obj = {'msg': 'NOT_TWITTER'})
return makeResponseFailed('NOT_TWITTER')
info = obj.get_metadata(obj, cleanURL)
if info["status"] != 'SUCCEED' :
log(obj = {'msg': 'FETCH_FAILED', 'info': info})
return makeResponseFailed('FETCH_FAILED')
return info
@app.route('/helper/get_ytb_info.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
def ajax_helper_get_ytb_info(rd, user, data):
log(obj = {'url': data.url})
obj, cleanURL = dispatch(data.url)
if obj.NAME != 'youtube' :
log(obj = {'msg': 'NOT_YOUTUBE'})
return makeResponseFailed('NOT_YOUTUBE')
info = obj.get_metadata(obj, cleanURL)
if info["status"] != 'SUCCEED' :
log(obj = {'msg': 'FETCH_FAILED', 'info': info})
return makeResponseFailed('FETCH_FAILED')
return info
"""
| """
import time
import redis
from flask import render_template, request, current_app, jsonify, redirect, session
from init import app
from utils.interceptors import loginOptional, jsonRequest, loginRequiredJSON
from utils.jsontools import *
from utils.logger import log
from scraper.video import dispatch
from scraper.video.Twitter import Twitter
@app.route('/helper/get_twitter_info.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
def ajax_helper_get_twitter_info(rd, user, data):
log(obj = {'url': data.url})
obj, cleanURL = dispatch(data.url)
if obj.NAME != 'twitter' :
log(obj = {'msg': 'NOT_TWITTER'})
return makeResponseFailed('NOT_TWITTER')
info = obj.get_metadata(obj, cleanURL)
if info["status"] != 'SUCCEED' :
log(obj = {'msg': 'FETCH_FAILED', 'info': info})
return makeResponseFailed('FETCH_FAILED')
return info
@app.route('/helper/get_ytb_info.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
def ajax_helper_get_ytb_info(rd, user, data):
log(obj = {'url': data.url})
obj, cleanURL = dispatch(data.url)
if obj.NAME != 'youtube' :
log(obj = {'msg': 'NOT_YOUTUBE'})
return makeResponseFailed('NOT_YOUTUBE')
info = obj.get_metadata(obj, cleanURL)
if info["status"] != 'SUCCEED' :
log(obj = {'msg': 'FETCH_FAILED', 'info': info})
return makeResponseFailed('FETCH_FAILED')
return info
""" |
def maior_E_menor(x, y):
if x > y:
return x, y
return y, x
x = int(input())
y = int(input())
if(x == y):
print("0")
else:
maior, menor = maior_E_menor(x, y)
soma = 0
menor +=1
while(menor < maior):
if menor % 2 != 0:
soma += menor
menor += 1
print(soma) | def maior_e_menor(x, y):
if x > y:
return (x, y)
return (y, x)
x = int(input())
y = int(input())
if x == y:
print('0')
else:
(maior, menor) = maior_e_menor(x, y)
soma = 0
menor += 1
while menor < maior:
if menor % 2 != 0:
soma += menor
menor += 1
print(soma) |
def match(key, value):
return {"match": {key: value}}
def exists(field):
return {"exists": {"field": field}}
def add_to_dict(dict, key, value):
dict.update({key: value})
def build_more_like_this_query(count, content, language):
query_body = {"size": count, "query": {"bool": {}}} # initial empty query
must = []
if language:
more_like_this = {}
add_to_dict(more_like_this, "fields", ["content", "title"])
add_to_dict(more_like_this, "like", content)
add_to_dict(more_like_this, "min_term_freq", 1)
add_to_dict(more_like_this, "max_query_terms", 25)
must.append({"more_like_this": more_like_this})
must.append(match("language", language.name))
query_body["query"]["bool"].update({"must": must})
return query_body
def build_elastic_query(
count,
search_terms,
topics,
unwanted_topics,
user_topics,
unwanted_user_topics,
language,
upper_bounds,
lower_bounds,
es_scale="3d",
es_decay=0.8,
es_weight=4.2,
):
"""
Builds an elastic search query.
Does this by building a big JSON object.
Example of a final query body:
{'size': 20.0, 'query':
{'bool':
{
'filter':
{
'range':
{
'fk_difficulty':
{
'gt': 0,
'lt': 100
}
}
},
'should': [
{'match': {'topics': 'Sport'}},
{'match': {'content': 'soccer ronaldo'}},
{'match': {'title': 'soccer ronaldo'}}
],
'must': [
{'match': {'language': 'English'}}
],
'must_not': [
{'match': {'topics': 'Health'}},
{'match': {'content': 'messi'}},
{'match': {'title': 'messi'}}
]
}
}
}
"""
# must = mandatory, has to occur
# must not = has to not occur
# should = nice to have (extra points if it matches)
must = []
must_not = []
should = []
bool_query_body = {"query": {"bool": {}}} # initial empty bool query
if language:
must.append(match("language", language.name))
if topics:
should.append(match("topics", topics))
if not search_terms:
search_terms = ""
if not user_topics:
user_topics = ""
if search_terms or user_topics:
search_string = search_terms + " " + user_topics
should.append(match("content", search_string))
should.append(match("title", search_string))
if unwanted_topics:
must_not.append(match("topics", unwanted_topics))
if unwanted_user_topics:
must_not.append(match("content", unwanted_user_topics))
must_not.append(match("title", unwanted_user_topics))
must.append(exists("published_time"))
# add the must, must_not and should lists to the query body
bool_query_body["query"]["bool"].update(
{
"filter": {
"range": {"fk_difficulty": {"gt": lower_bounds, "lt": upper_bounds}}
}
}
)
bool_query_body["query"]["bool"].update({"should": should})
bool_query_body["query"]["bool"].update({"must": must})
bool_query_body["query"]["bool"].update({"must_not": must_not})
full_query = {"size": count, "query": {"function_score": {}}}
function1 = {
# original parameters by Simon & Marcus
# "gauss": {"published_time": {"scale": "365d", "offset": "7d", "decay": 0.3}},
# "weight": 1.2,
"gauss": {
"published_time": {"origin": "now", "scale": es_scale, "decay": es_decay}
},
"weight": es_weight,
}
full_query["query"]["function_score"].update({"functions": [function1]})
full_query["query"]["function_score"].update(bool_query_body)
print(full_query)
return full_query
| def match(key, value):
return {'match': {key: value}}
def exists(field):
return {'exists': {'field': field}}
def add_to_dict(dict, key, value):
dict.update({key: value})
def build_more_like_this_query(count, content, language):
query_body = {'size': count, 'query': {'bool': {}}}
must = []
if language:
more_like_this = {}
add_to_dict(more_like_this, 'fields', ['content', 'title'])
add_to_dict(more_like_this, 'like', content)
add_to_dict(more_like_this, 'min_term_freq', 1)
add_to_dict(more_like_this, 'max_query_terms', 25)
must.append({'more_like_this': more_like_this})
must.append(match('language', language.name))
query_body['query']['bool'].update({'must': must})
return query_body
def build_elastic_query(count, search_terms, topics, unwanted_topics, user_topics, unwanted_user_topics, language, upper_bounds, lower_bounds, es_scale='3d', es_decay=0.8, es_weight=4.2):
"""
Builds an elastic search query.
Does this by building a big JSON object.
Example of a final query body:
{'size': 20.0, 'query':
{'bool':
{
'filter':
{
'range':
{
'fk_difficulty':
{
'gt': 0,
'lt': 100
}
}
},
'should': [
{'match': {'topics': 'Sport'}},
{'match': {'content': 'soccer ronaldo'}},
{'match': {'title': 'soccer ronaldo'}}
],
'must': [
{'match': {'language': 'English'}}
],
'must_not': [
{'match': {'topics': 'Health'}},
{'match': {'content': 'messi'}},
{'match': {'title': 'messi'}}
]
}
}
}
"""
must = []
must_not = []
should = []
bool_query_body = {'query': {'bool': {}}}
if language:
must.append(match('language', language.name))
if topics:
should.append(match('topics', topics))
if not search_terms:
search_terms = ''
if not user_topics:
user_topics = ''
if search_terms or user_topics:
search_string = search_terms + ' ' + user_topics
should.append(match('content', search_string))
should.append(match('title', search_string))
if unwanted_topics:
must_not.append(match('topics', unwanted_topics))
if unwanted_user_topics:
must_not.append(match('content', unwanted_user_topics))
must_not.append(match('title', unwanted_user_topics))
must.append(exists('published_time'))
bool_query_body['query']['bool'].update({'filter': {'range': {'fk_difficulty': {'gt': lower_bounds, 'lt': upper_bounds}}}})
bool_query_body['query']['bool'].update({'should': should})
bool_query_body['query']['bool'].update({'must': must})
bool_query_body['query']['bool'].update({'must_not': must_not})
full_query = {'size': count, 'query': {'function_score': {}}}
function1 = {'gauss': {'published_time': {'origin': 'now', 'scale': es_scale, 'decay': es_decay}}, 'weight': es_weight}
full_query['query']['function_score'].update({'functions': [function1]})
full_query['query']['function_score'].update(bool_query_body)
print(full_query)
return full_query |
# Want to extract domain hotmail.com
data = 'From ritchie_ng@hotmail.com Tues May 31'
at_position = data.find('@')
print(at_position)
space_position = data.find(' ', at_position)
# Starting from at_position, where's the next space
print(space_position)
host = data[at_position + 1: space_position]
print(host) | data = 'From ritchie_ng@hotmail.com Tues May 31'
at_position = data.find('@')
print(at_position)
space_position = data.find(' ', at_position)
print(space_position)
host = data[at_position + 1:space_position]
print(host) |
def stable_sorted_copy(alist, _indices=xrange(sys.maxint)):
# the 'decorate' step: make a list such that each item
# is the concatenation of sort-keys in order of decreasing
# significance -- we'll sort this auxiliary-list
decorated = zip(alist, _indices)
# the 'sort' step: just builtin-sort the auxiliary list
decorated.sort()
# the 'undecorate' step: extract the items from the
# decorated, and now correctly sorted, auxiliary list
return [ item for item, index in decorated ]
def stable_sort_inplace(alist):
# if "inplace" sorting is desired, simplest is to assign
# to a slice-of-all-items of the original input list
alist[:] = stable_sorted_copy(alist)
| def stable_sorted_copy(alist, _indices=xrange(sys.maxint)):
decorated = zip(alist, _indices)
decorated.sort()
return [item for (item, index) in decorated]
def stable_sort_inplace(alist):
alist[:] = stable_sorted_copy(alist) |
def wellbracketed(s):
c=0
for i in range(0, len(s)):
if s[i] == "(":
c = c + 1
elif s[i] == ")":
c = c - 1
if c == 0:
return(True)
else:
return(False) | def wellbracketed(s):
c = 0
for i in range(0, len(s)):
if s[i] == '(':
c = c + 1
elif s[i] == ')':
c = c - 1
if c == 0:
return True
else:
return False |
class UCOMIMoniker:
""" Use System.Runtime.InteropServices.ComTypes.IMoniker instead. """
def BindToObject(self, pbc, pmkToLeft, riidResult, ppvResult):
"""
BindToObject(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,riidResult: Guid) -> (Guid,object)
Uses the moniker to bind to the object it identifies.
pbc: A reference to the IBindCtx interface on the bind context object used in this binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
riidResult: The interface identifier (IID) of the interface the client intends to use to communicate with
the object that the moniker identifies.
"""
pass
def BindToStorage(self, pbc, pmkToLeft, riid, ppvObj):
"""
BindToStorage(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,riid: Guid) -> (Guid,object)
Retrieves an interface pointer to the storage that contains the object identified by the moniker.
pbc: A reference to the IBindCtx interface on the bind context object used during this binding
operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
riid: The interface identifier (IID) of the storage interface requested.
"""
pass
def CommonPrefixWith(self, pmkOther, ppmkPrefix):
"""
CommonPrefixWith(self: UCOMIMoniker,pmkOther: UCOMIMoniker) -> UCOMIMoniker
Creates a new moniker based on the common prefix that this moniker shares with another moniker.
pmkOther: A reference to the IMoniker interface on another moniker to compare with this for a common
prefix.
"""
pass
def ComposeWith(self, pmkRight, fOnlyIfNotGeneric, ppmkComposite):
"""
ComposeWith(self: UCOMIMoniker,pmkRight: UCOMIMoniker,fOnlyIfNotGeneric: bool) -> UCOMIMoniker
Combines the current moniker with another moniker,creating a new composite moniker.
pmkRight: A reference to the IMoniker interface on the moniker to compose onto the end of this moniker.
fOnlyIfNotGeneric: If true,the caller requires a nongeneric composition,so the operation proceeds only if
pmkRight is a moniker class that this moniker can compose with in some way other than forming a
generic composite. If false,the method can create a generic composite if necessary.
"""
pass
def Enum(self, fForward, ppenumMoniker):
"""
Enum(self: UCOMIMoniker,fForward: bool) -> UCOMIEnumMoniker
Supplies a pointer to an enumerator that can enumerate the components of a composite moniker.
fForward: If true,enumerates the monikers from left to right. If false,enumerates from right to left.
"""
pass
def GetClassID(self, pClassID):
"""
GetClassID(self: UCOMIMoniker) -> Guid
Retrieves the class identifier (CLSID) of an object.
"""
pass
def GetDisplayName(self, pbc, pmkToLeft, ppszDisplayName):
"""
GetDisplayName(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker) -> str
Gets the display name,which is a user-readable representation of this moniker.
pbc: A reference to the bind context to use in this operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
"""
pass
def GetSizeMax(self, pcbSize):
"""
GetSizeMax(self: UCOMIMoniker) -> Int64
Returns the size in bytes of the stream needed to save the object.
"""
pass
def GetTimeOfLastChange(self, pbc, pmkToLeft, pFileTime):
"""
GetTimeOfLastChange(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker) -> FILETIME
Provides a number representing the time the object identified by this moniker was last changed.
pbc: A reference to the bind context to be used in this binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
"""
pass
def Hash(self, pdwHash):
"""
Hash(self: UCOMIMoniker) -> int
Calculates a 32-bit integer using the internal state of the moniker.
"""
pass
def Inverse(self, ppmk):
"""
Inverse(self: UCOMIMoniker) -> UCOMIMoniker
Provides a moniker that,when composed to the right of this moniker or one of similar structure,
composes to nothing.
"""
pass
def IsDirty(self):
"""
IsDirty(self: UCOMIMoniker) -> int
Checks the object for changes since it was last saved.
Returns: An S_OKHRESULT value if the object has changed; otherwise,an S_FALSEHRESULT value.
"""
pass
def IsEqual(self, pmkOtherMoniker):
"""
IsEqual(self: UCOMIMoniker,pmkOtherMoniker: UCOMIMoniker)
Compares this moniker with a specified moniker and indicates whether they are identical.
pmkOtherMoniker: A reference to the moniker to be used for comparison.
"""
pass
def IsRunning(self, pbc, pmkToLeft, pmkNewlyRunning):
"""
IsRunning(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,pmkNewlyRunning: UCOMIMoniker)
Determines whether the object that is identified by this moniker is currently loaded and running.
pbc: A reference to the bind context to be used in this binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker if this moniker is part of a composite.
pmkNewlyRunning: A reference to the moniker most recently added to the Running Object Table.
"""
pass
def IsSystemMoniker(self, pdwMksys):
"""
IsSystemMoniker(self: UCOMIMoniker) -> int
Indicates whether this moniker is of one of the system-supplied moniker classes.
"""
pass
def Load(self, pStm):
"""
Load(self: UCOMIMoniker,pStm: UCOMIStream)
Initializes an object from the stream where it was previously saved.
pStm: Stream from which the object is loaded.
"""
pass
def ParseDisplayName(self, pbc, pmkToLeft, pszDisplayName, pchEaten, ppmkOut):
"""
ParseDisplayName(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,pszDisplayName: str) -> (int,UCOMIMoniker)
Reads as many characters of the specified display name as it understands and builds a moniker
corresponding to the portion read.
pbc: A reference to the bind context to be used in this binding operation.
pmkToLeft: A reference to the moniker that has been built out of the display name up to this point.
pszDisplayName: A reference to the string containing the remaining display name to parse.
"""
pass
def Reduce(self, pbc, dwReduceHowFar, ppmkToLeft, ppmkReduced):
"""
Reduce(self: UCOMIMoniker,pbc: UCOMIBindCtx,dwReduceHowFar: int,ppmkToLeft: UCOMIMoniker) -> (UCOMIMoniker,UCOMIMoniker)
Returns a reduced moniker which is another moniker that refers to the same object as this
moniker but can be bound with equal or greater efficiency.
pbc: A reference to the IBindCtx interface on the bind context to be used in this binding operation.
dwReduceHowFar: Specifies how far this moniker should be reduced.
ppmkToLeft: A reference to the moniker to the left of this moniker.
"""
pass
def RelativePathTo(self, pmkOther, ppmkRelPath):
"""
RelativePathTo(self: UCOMIMoniker,pmkOther: UCOMIMoniker) -> UCOMIMoniker
Supplies a moniker that,when appended to this moniker (or one with a similar structure),yields
the specified moniker.
pmkOther: A reference to the moniker to which a relative path should be taken.
"""
pass
def Save(self, pStm, fClearDirty):
"""
Save(self: UCOMIMoniker,pStm: UCOMIStream,fClearDirty: bool)
Saves an object to the specified stream.
pStm: The stream into which the object is saved.
fClearDirty: Indicates whether to clear the modified flag after the save is complete.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| class Ucomimoniker:
""" Use System.Runtime.InteropServices.ComTypes.IMoniker instead. """
def bind_to_object(self, pbc, pmkToLeft, riidResult, ppvResult):
"""
BindToObject(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,riidResult: Guid) -> (Guid,object)
Uses the moniker to bind to the object it identifies.
pbc: A reference to the IBindCtx interface on the bind context object used in this binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
riidResult: The interface identifier (IID) of the interface the client intends to use to communicate with
the object that the moniker identifies.
"""
pass
def bind_to_storage(self, pbc, pmkToLeft, riid, ppvObj):
"""
BindToStorage(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,riid: Guid) -> (Guid,object)
Retrieves an interface pointer to the storage that contains the object identified by the moniker.
pbc: A reference to the IBindCtx interface on the bind context object used during this binding
operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
riid: The interface identifier (IID) of the storage interface requested.
"""
pass
def common_prefix_with(self, pmkOther, ppmkPrefix):
"""
CommonPrefixWith(self: UCOMIMoniker,pmkOther: UCOMIMoniker) -> UCOMIMoniker
Creates a new moniker based on the common prefix that this moniker shares with another moniker.
pmkOther: A reference to the IMoniker interface on another moniker to compare with this for a common
prefix.
"""
pass
def compose_with(self, pmkRight, fOnlyIfNotGeneric, ppmkComposite):
"""
ComposeWith(self: UCOMIMoniker,pmkRight: UCOMIMoniker,fOnlyIfNotGeneric: bool) -> UCOMIMoniker
Combines the current moniker with another moniker,creating a new composite moniker.
pmkRight: A reference to the IMoniker interface on the moniker to compose onto the end of this moniker.
fOnlyIfNotGeneric: If true,the caller requires a nongeneric composition,so the operation proceeds only if
pmkRight is a moniker class that this moniker can compose with in some way other than forming a
generic composite. If false,the method can create a generic composite if necessary.
"""
pass
def enum(self, fForward, ppenumMoniker):
"""
Enum(self: UCOMIMoniker,fForward: bool) -> UCOMIEnumMoniker
Supplies a pointer to an enumerator that can enumerate the components of a composite moniker.
fForward: If true,enumerates the monikers from left to right. If false,enumerates from right to left.
"""
pass
def get_class_id(self, pClassID):
"""
GetClassID(self: UCOMIMoniker) -> Guid
Retrieves the class identifier (CLSID) of an object.
"""
pass
def get_display_name(self, pbc, pmkToLeft, ppszDisplayName):
"""
GetDisplayName(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker) -> str
Gets the display name,which is a user-readable representation of this moniker.
pbc: A reference to the bind context to use in this operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
"""
pass
def get_size_max(self, pcbSize):
"""
GetSizeMax(self: UCOMIMoniker) -> Int64
Returns the size in bytes of the stream needed to save the object.
"""
pass
def get_time_of_last_change(self, pbc, pmkToLeft, pFileTime):
"""
GetTimeOfLastChange(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker) -> FILETIME
Provides a number representing the time the object identified by this moniker was last changed.
pbc: A reference to the bind context to be used in this binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker,if the moniker is part of a composite
moniker.
"""
pass
def hash(self, pdwHash):
"""
Hash(self: UCOMIMoniker) -> int
Calculates a 32-bit integer using the internal state of the moniker.
"""
pass
def inverse(self, ppmk):
"""
Inverse(self: UCOMIMoniker) -> UCOMIMoniker
Provides a moniker that,when composed to the right of this moniker or one of similar structure,
composes to nothing.
"""
pass
def is_dirty(self):
"""
IsDirty(self: UCOMIMoniker) -> int
Checks the object for changes since it was last saved.
Returns: An S_OKHRESULT value if the object has changed; otherwise,an S_FALSEHRESULT value.
"""
pass
def is_equal(self, pmkOtherMoniker):
"""
IsEqual(self: UCOMIMoniker,pmkOtherMoniker: UCOMIMoniker)
Compares this moniker with a specified moniker and indicates whether they are identical.
pmkOtherMoniker: A reference to the moniker to be used for comparison.
"""
pass
def is_running(self, pbc, pmkToLeft, pmkNewlyRunning):
"""
IsRunning(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,pmkNewlyRunning: UCOMIMoniker)
Determines whether the object that is identified by this moniker is currently loaded and running.
pbc: A reference to the bind context to be used in this binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker if this moniker is part of a composite.
pmkNewlyRunning: A reference to the moniker most recently added to the Running Object Table.
"""
pass
def is_system_moniker(self, pdwMksys):
"""
IsSystemMoniker(self: UCOMIMoniker) -> int
Indicates whether this moniker is of one of the system-supplied moniker classes.
"""
pass
def load(self, pStm):
"""
Load(self: UCOMIMoniker,pStm: UCOMIStream)
Initializes an object from the stream where it was previously saved.
pStm: Stream from which the object is loaded.
"""
pass
def parse_display_name(self, pbc, pmkToLeft, pszDisplayName, pchEaten, ppmkOut):
"""
ParseDisplayName(self: UCOMIMoniker,pbc: UCOMIBindCtx,pmkToLeft: UCOMIMoniker,pszDisplayName: str) -> (int,UCOMIMoniker)
Reads as many characters of the specified display name as it understands and builds a moniker
corresponding to the portion read.
pbc: A reference to the bind context to be used in this binding operation.
pmkToLeft: A reference to the moniker that has been built out of the display name up to this point.
pszDisplayName: A reference to the string containing the remaining display name to parse.
"""
pass
def reduce(self, pbc, dwReduceHowFar, ppmkToLeft, ppmkReduced):
"""
Reduce(self: UCOMIMoniker,pbc: UCOMIBindCtx,dwReduceHowFar: int,ppmkToLeft: UCOMIMoniker) -> (UCOMIMoniker,UCOMIMoniker)
Returns a reduced moniker which is another moniker that refers to the same object as this
moniker but can be bound with equal or greater efficiency.
pbc: A reference to the IBindCtx interface on the bind context to be used in this binding operation.
dwReduceHowFar: Specifies how far this moniker should be reduced.
ppmkToLeft: A reference to the moniker to the left of this moniker.
"""
pass
def relative_path_to(self, pmkOther, ppmkRelPath):
"""
RelativePathTo(self: UCOMIMoniker,pmkOther: UCOMIMoniker) -> UCOMIMoniker
Supplies a moniker that,when appended to this moniker (or one with a similar structure),yields
the specified moniker.
pmkOther: A reference to the moniker to which a relative path should be taken.
"""
pass
def save(self, pStm, fClearDirty):
"""
Save(self: UCOMIMoniker,pStm: UCOMIStream,fClearDirty: bool)
Saves an object to the specified stream.
pStm: The stream into which the object is saved.
fClearDirty: Indicates whether to clear the modified flag after the save is complete.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass |
# https://www.tutorialspoint.com/python_data_structure/python_binary_tree.htm
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert(self, data):
if self.data:
if data < self.data:
if not self.left:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if not self.right:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
def print_tree(self):
if self.left:
self.left.print_tree()
print(self.data)
if self.right:
self.right.print_tree()
def in_order_traversal(self, root):
"""left -> root -> right"""
res = []
if root:
res = self.in_order_traversal(root.left)
res.append(root.data)
res += self.in_order_traversal(root.right)
return res
def pre_order_traversal(self, root):
"""root -> left -> right"""
res = []
if root:
res.append(root.data)
res += self.pre_order_traversal(root.left)
res += self.pre_order_traversal(root.right)
return res
def post_order_traversal(self, root):
"""left -> right -> root"""
res = []
if root:
res = self.post_order_traversal(root.left)
res += self.post_order_traversal(root.right)
res.append(root.data)
return res
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
root.insert(20)
print("Print tree:")
root.print_tree()
print("In order traversal:")
print(root.in_order_traversal(root))
print("Pre order traversal:")
print(root.pre_order_traversal(root))
print("Post order traversal:")
print(root.post_order_traversal(root))
| class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert(self, data):
if self.data:
if data < self.data:
if not self.left:
self.left = node(data)
else:
self.left.insert(data)
elif data > self.data:
if not self.right:
self.right = node(data)
else:
self.right.insert(data)
else:
self.data = data
def print_tree(self):
if self.left:
self.left.print_tree()
print(self.data)
if self.right:
self.right.print_tree()
def in_order_traversal(self, root):
"""left -> root -> right"""
res = []
if root:
res = self.in_order_traversal(root.left)
res.append(root.data)
res += self.in_order_traversal(root.right)
return res
def pre_order_traversal(self, root):
"""root -> left -> right"""
res = []
if root:
res.append(root.data)
res += self.pre_order_traversal(root.left)
res += self.pre_order_traversal(root.right)
return res
def post_order_traversal(self, root):
"""left -> right -> root"""
res = []
if root:
res = self.post_order_traversal(root.left)
res += self.post_order_traversal(root.right)
res.append(root.data)
return res
root = node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
root.insert(20)
print('Print tree:')
root.print_tree()
print('In order traversal:')
print(root.in_order_traversal(root))
print('Pre order traversal:')
print(root.pre_order_traversal(root))
print('Post order traversal:')
print(root.post_order_traversal(root)) |
#!/usr/bin/env python3
# Get superior triangular matrix a)
n = 3
A = [[1, 1/2, 1/3], [1/2, 1/3, 1/4], [1/3, 1/4, 1/5]]
b = [-1, 1, 1]
for i in range(n):
pivot = A[i][i]
b[i] /= pivot
for j in range(n):
A[i][j] /= pivot
for j in range(i+1, n):
times = A[j][i]
b[j] -= times * b[i]
for k in range(n):
A[j][k] -= times * A[i][k]
print("A:", A)
print("b:", b)
# Get x b)
x = [0, 0, 0]
x[2] = b[2]
x[1] = b[1] - A[1][2] * x[2]
x[0] = b[0] - A[0][2] * x[2] - A[0][1] * x[1]
print("x:", x)
# EXTERNAL STABILITY
# A.dx = db - dA.x
# don't feel like importing copy
A = [[1, 1/2, 1/3], [1/2, 1/3, 1/4], [1/3, 1/4, 1/5]]
# calculate new b
dA = 0.05
db = 0.05
nA = [[dA, dA, dA], [dA, dA, dA], [dA, dA, dA]]
b = [db, db, db]
for i in range(n):
for j in range(n):
b[i] -= nA[i][j] * x[j]
# solve new system
for i in range(n):
pivot = A[i][i]
b[i] /= pivot
for j in range(n):
A[i][j] /= pivot
for j in range(i+1, n):
times = A[j][i]
b[j] -= times * b[i]
for k in range(n):
A[j][k] -= times * A[i][k]
print("\nA:", A)
print("b:", b)
# Get x (external stability)
x = [0, 0, 0]
x[2] = b[2]
x[1] = b[1] - A[1][2] * x[2]
x[0] = b[0] - A[0][2] * x[2] - A[0][1] * x[1]
print("stablity:", x)
# The one with the most sensitivity to erros is x3 (aka x[2] or z) d)
# abs(x[0] < x[1] < x[2])
print("most sensitive:", max(abs(i) for i in x))
| n = 3
a = [[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]]
b = [-1, 1, 1]
for i in range(n):
pivot = A[i][i]
b[i] /= pivot
for j in range(n):
A[i][j] /= pivot
for j in range(i + 1, n):
times = A[j][i]
b[j] -= times * b[i]
for k in range(n):
A[j][k] -= times * A[i][k]
print('A:', A)
print('b:', b)
x = [0, 0, 0]
x[2] = b[2]
x[1] = b[1] - A[1][2] * x[2]
x[0] = b[0] - A[0][2] * x[2] - A[0][1] * x[1]
print('x:', x)
a = [[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]]
d_a = 0.05
db = 0.05
n_a = [[dA, dA, dA], [dA, dA, dA], [dA, dA, dA]]
b = [db, db, db]
for i in range(n):
for j in range(n):
b[i] -= nA[i][j] * x[j]
for i in range(n):
pivot = A[i][i]
b[i] /= pivot
for j in range(n):
A[i][j] /= pivot
for j in range(i + 1, n):
times = A[j][i]
b[j] -= times * b[i]
for k in range(n):
A[j][k] -= times * A[i][k]
print('\nA:', A)
print('b:', b)
x = [0, 0, 0]
x[2] = b[2]
x[1] = b[1] - A[1][2] * x[2]
x[0] = b[0] - A[0][2] * x[2] - A[0][1] * x[1]
print('stablity:', x)
print('most sensitive:', max((abs(i) for i in x))) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###
# Name: Benjamin Seeley
# Student ID: 2262810
# Email: seele105@mail.chapman.edu
# Course: PHYS220/MATH220/CPSC220 Fall 2018
# Assignment: CW03
###
"""Contains helper functions that return lists of sequences.
"""
def fibonacci(n):
"""Returns n fibonacci numbers
Args:
n: number of fibonacci numbers to return
Returns:
List that contains n fibonacci numbers
Raises:
ValueError when n is not a positive integer
"""
if n < 1:
raise ValueError("n must be a positive integer")
output = []
f1 = 0
f2 = 1
fn = 1
count = 0
for i in range(n):
output.append(fn)
fn = f1 + f2
f1 = f2
f2 = fn
return output
| """Contains helper functions that return lists of sequences.
"""
def fibonacci(n):
"""Returns n fibonacci numbers
Args:
n: number of fibonacci numbers to return
Returns:
List that contains n fibonacci numbers
Raises:
ValueError when n is not a positive integer
"""
if n < 1:
raise value_error('n must be a positive integer')
output = []
f1 = 0
f2 = 1
fn = 1
count = 0
for i in range(n):
output.append(fn)
fn = f1 + f2
f1 = f2
f2 = fn
return output |
class CustomerAddWebsitePermissionDenied(Exception):
pass
class ObjectDoesNotExist(Exception):
pass
| class Customeraddwebsitepermissiondenied(Exception):
pass
class Objectdoesnotexist(Exception):
pass |
"""
Handle
A wrapper meant to collect the python object belonging to a blizzard 'handle'.
"""
class Handle:
handles = {}
def __init__(self, handle):
# constructorfunc can be a function that returns a handle, or a handle directly
self._handle = handle
if Handle.get(handle) != None:
print("Warning: secondary object created for handle ", handle, "object", self)
Handle.handles[self._handle] = self
@staticmethod
def get(handle):
if handle in Handle.handles:
return Handle.handles[handle]
return None
def destroy(self):
del Handle.handles[self._handle]
self._handle = None
@property
def handle(self):
return self._handle
| """
Handle
A wrapper meant to collect the python object belonging to a blizzard 'handle'.
"""
class Handle:
handles = {}
def __init__(self, handle):
self._handle = handle
if Handle.get(handle) != None:
print('Warning: secondary object created for handle ', handle, 'object', self)
Handle.handles[self._handle] = self
@staticmethod
def get(handle):
if handle in Handle.handles:
return Handle.handles[handle]
return None
def destroy(self):
del Handle.handles[self._handle]
self._handle = None
@property
def handle(self):
return self._handle |
#!/bin/python3
def main(person_list):
users = []
for name, email in person_list:
if email.endswith('@gmail.com'):
users.append(name)
print(*sorted(users), sep='\n')
if __name__ == '__main__':
N = int(input())
persons = []
for N_itr in range(N):
firstName, emailID = input().split()
persons.append((firstName, emailID))
main(persons)
| def main(person_list):
users = []
for (name, email) in person_list:
if email.endswith('@gmail.com'):
users.append(name)
print(*sorted(users), sep='\n')
if __name__ == '__main__':
n = int(input())
persons = []
for n_itr in range(N):
(first_name, email_id) = input().split()
persons.append((firstName, emailID))
main(persons) |
# basic example
class MetaSpam(type):
# notice how the __new__ method has the same arguments
# as the type function we used earlier.
def __new__(metaclass, name, bases, namespace):
name = 'SpamCreateByMeta'
bases = (int,) + bases
namespace['eggs'] = 1
return type.__new__(metaclass, name, bases, namespace)
# regular Spam
class Spam(object):
pass
print(Spam.__name__)
print(issubclass(Spam, int))
try:
Spam.eggs
except Exception as e:
print(e)
# meta Spam
class Spam(object, metaclass=MetaSpam):
pass
print(Spam.__name__)
print(issubclass(Spam, int))
print(Spam.eggs)
| class Metaspam(type):
def __new__(metaclass, name, bases, namespace):
name = 'SpamCreateByMeta'
bases = (int,) + bases
namespace['eggs'] = 1
return type.__new__(metaclass, name, bases, namespace)
class Spam(object):
pass
print(Spam.__name__)
print(issubclass(Spam, int))
try:
Spam.eggs
except Exception as e:
print(e)
class Spam(object, metaclass=MetaSpam):
pass
print(Spam.__name__)
print(issubclass(Spam, int))
print(Spam.eggs) |
"""
232. Implement Queue using Stacks
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Example:
MyQueue queue = new MyQueue();
queue.push(1);
queue.push(2);
queue.peek(); // returns 1
queue.pop(); // returns 1
queue.empty(); // returns false
Notes:
You must use only standard operations of a stack -- which means only push to top, peek/pop from top, size, and is empty operations are valid.
Depending on your language, stack may not be supported natively. You may simulate a stack by using a list or deque (double-ended queue), as long as you use only standard operations of a stack.
You may assume that all operations are valid (for example, no pop or peek operations will be called on an empty queue).
"""
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.queue = []
def push(self, x: int):
"""
Push element x to the back of queue.
"""
self.queue.insert(0,x)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
"""
return self.queue.pop()
def peek(self):
"""
Get the front element.
"""
return self.queue[-1]
def empty(self):
"""
Returns whether the queue is empty.
"""
return self.queue == []
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty() | """
232. Implement Queue using Stacks
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Example:
MyQueue queue = new MyQueue();
queue.push(1);
queue.push(2);
queue.peek(); // returns 1
queue.pop(); // returns 1
queue.empty(); // returns false
Notes:
You must use only standard operations of a stack -- which means only push to top, peek/pop from top, size, and is empty operations are valid.
Depending on your language, stack may not be supported natively. You may simulate a stack by using a list or deque (double-ended queue), as long as you use only standard operations of a stack.
You may assume that all operations are valid (for example, no pop or peek operations will be called on an empty queue).
"""
class Myqueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.queue = []
def push(self, x: int):
"""
Push element x to the back of queue.
"""
self.queue.insert(0, x)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
"""
return self.queue.pop()
def peek(self):
"""
Get the front element.
"""
return self.queue[-1]
def empty(self):
"""
Returns whether the queue is empty.
"""
return self.queue == [] |
S = input()
for i in range(len(S)):
print(i + 1)
| s = input()
for i in range(len(S)):
print(i + 1) |
def c_to_f(c):
Farhenheite=(c*9/5)+32
return Farhenheite
n=int(input("Celcius="))
f=c_to_f(n)
print(f,"'F")
| def c_to_f(c):
farhenheite = c * 9 / 5 + 32
return Farhenheite
n = int(input('Celcius='))
f = c_to_f(n)
print(f, "'F") |
"""
Write a function that returns the lesser of two given numbers if both numbers are even,
but returns the greater if one or both numbers are odd.
Example 1:
lesser_of_two_evens(2, 4) output: 2
explanation:
the two parameters 2 and 4 are even numbers, therefore, we'll return the smallest even number
Example 2:
lesser_of_two_evens(2, 5) output: 5
explanation:
the first parameter 2 is even, but the second parameter 5 is odd, therefore, we'll return the greatest number
Example 3:
lesser_of_two_evens(7, 5) output: 7
explanation:
the two parameters are odd, therefore, we'll return the greatest number
"""
# write your code here
# don't forget to test your code
def lesser_of_two_evens(a, b):
pass | """
Write a function that returns the lesser of two given numbers if both numbers are even,
but returns the greater if one or both numbers are odd.
Example 1:
lesser_of_two_evens(2, 4) output: 2
explanation:
the two parameters 2 and 4 are even numbers, therefore, we'll return the smallest even number
Example 2:
lesser_of_two_evens(2, 5) output: 5
explanation:
the first parameter 2 is even, but the second parameter 5 is odd, therefore, we'll return the greatest number
Example 3:
lesser_of_two_evens(7, 5) output: 7
explanation:
the two parameters are odd, therefore, we'll return the greatest number
"""
def lesser_of_two_evens(a, b):
pass |
class QueueOverflow(BaseException):
pass
# Node of a doubly linkedlist
class Node:
# constructor
def __init__(self, data=None):
self.data = data
self.next = None
self.prev = None
# method for setting the data field of the node
def setData(self, data):
self.data = data
# method for getting the data field of the node
def getData(self):
return self.data
# method for setting the next field of the node
def setNext(self, nextOne):
self.next = nextOne
# method for getting the next field of the node
def getNext(self):
return self.next
# return True if the node has a pointer to the next node
def hasNext(self):
return self.next is not None
# method for setting the next field of the node
def setPrev(self, prevOne):
self.prev = prevOne
# method for getting the prev field of the node
def getPrev(self):
return self.prev
# return True if the node has a pointer to the previous node
def hasPrev(self):
return self.prev is not None
'''
returns a copy of the current Node's data
if include_pointers is set to True, the pointers
next and prev will be added to the returned node
'''
def copy(self, include_pointers=False):
if include_pointers:
to_return = Node(self.data)
to_return.next = self.next
to_return.prev = self.prev
return to_return
return Node(self.data)
# Linked Queue
class Queue:
'''
Initialize method, creates Stack
:param limit, max amount of elements in stack
'''
def __init__(self, limit=None):
self.limit = limit
self.head = None
self.tail = None
'''
Used to convert data to Node if data is not already Node
:param data, the data converted
'''
def __toNode(self, data):
if isinstance(data, Node):
return data.copy()
return Node(data)
'''
Checks if stack has too many elements, and if it does, it raises
StackOverflow
'''
def __isError(self):
if self.limit is not None and self.limit <= self.Size():
raise QueueOverflow
'''
Add elements to end of stack
:param data, element to add to stack
'''
def Push(self, data):
self.__isError()
data = self.__toNode(data)
if self.head is None:
self.head = data
self.tail = data
else:
data.prev = self.tail
self.tail.next = data
self.tail = data
'''
Removes last element in stack, returns the element popped
'''
def Pop(self):
if self.head == self.tail:
to_return = self.head
self.head = None
self.tail = None
else:
to_return = self.head
self.head = self.head.next
self.head.prev = None
return to_return
'''
Returns the last element in stack
'''
def Front(self):
return self.head
'''
Returns the size of the stack
'''
def Size(self):
self.current = self.head
currentNum = 0
if self.current is not None:
while self.current.getNext() is not None:
currentNum += 1
self.current = self.current.getNext()
return currentNum+1
return currentNum
'''
Returns a boolean value if the stack is empty or not
'''
def isEmptyQueue(self):
return self.head is None
'''
Returns a boolean value depending on if the stack is full
'''
def isFullQueue(self):
if isinstance(self.limit, int):
if self.limit == self.Size():
return True
return False
'''
Do not use this, testing only
'''
def showAll(self):
self.current = self.head
currentNum = 0
if self.current is not None:
while self.current.getNext() is not None:
currentNum += 1
yield self.current.data
self.current = self.current.getNext()
yield self.current.data
'''
Returns a copy of the stack
'''
def copy(self):
newStack = Queue()
self.current = self.head
currentNum = 0
if self.current is not None:
while self.current.getNext() is not None:
currentNum += 1
newStack.Push(self.current.data)
self.current = self.current.getNext()
newStack.Push(self.current.data)
return newStack
def load_from_iterable(self, iterable):
for item in iterable:
self.Push(item)
'''
Returns length of Stack
'''
def __len__(self):
return self.Size()
def __repr__(self):
return self
def __str__(self):
return str(list(self.showAll()))
| class Queueoverflow(BaseException):
pass
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
self.prev = None
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
def set_next(self, nextOne):
self.next = nextOne
def get_next(self):
return self.next
def has_next(self):
return self.next is not None
def set_prev(self, prevOne):
self.prev = prevOne
def get_prev(self):
return self.prev
def has_prev(self):
return self.prev is not None
"\n returns a copy of the current Node's data\n if include_pointers is set to True, the pointers\n next and prev will be added to the returned node\n "
def copy(self, include_pointers=False):
if include_pointers:
to_return = node(self.data)
to_return.next = self.next
to_return.prev = self.prev
return to_return
return node(self.data)
class Queue:
"""
Initialize method, creates Stack
:param limit, max amount of elements in stack
"""
def __init__(self, limit=None):
self.limit = limit
self.head = None
self.tail = None
'\n Used to convert data to Node if data is not already Node\n :param data, the data converted\n '
def __to_node(self, data):
if isinstance(data, Node):
return data.copy()
return node(data)
'\n Checks if stack has too many elements, and if it does, it raises\n StackOverflow\n '
def __is_error(self):
if self.limit is not None and self.limit <= self.Size():
raise QueueOverflow
'\n Add elements to end of stack\n :param data, element to add to stack\n '
def push(self, data):
self.__isError()
data = self.__toNode(data)
if self.head is None:
self.head = data
self.tail = data
else:
data.prev = self.tail
self.tail.next = data
self.tail = data
'\n Removes last element in stack, returns the element popped\n '
def pop(self):
if self.head == self.tail:
to_return = self.head
self.head = None
self.tail = None
else:
to_return = self.head
self.head = self.head.next
self.head.prev = None
return to_return
'\n Returns the last element in stack\n '
def front(self):
return self.head
'\n Returns the size of the stack\n '
def size(self):
self.current = self.head
current_num = 0
if self.current is not None:
while self.current.getNext() is not None:
current_num += 1
self.current = self.current.getNext()
return currentNum + 1
return currentNum
'\n Returns a boolean value if the stack is empty or not\n '
def is_empty_queue(self):
return self.head is None
'\n Returns a boolean value depending on if the stack is full\n '
def is_full_queue(self):
if isinstance(self.limit, int):
if self.limit == self.Size():
return True
return False
'\n Do not use this, testing only\n '
def show_all(self):
self.current = self.head
current_num = 0
if self.current is not None:
while self.current.getNext() is not None:
current_num += 1
yield self.current.data
self.current = self.current.getNext()
yield self.current.data
'\n Returns a copy of the stack\n '
def copy(self):
new_stack = queue()
self.current = self.head
current_num = 0
if self.current is not None:
while self.current.getNext() is not None:
current_num += 1
newStack.Push(self.current.data)
self.current = self.current.getNext()
newStack.Push(self.current.data)
return newStack
def load_from_iterable(self, iterable):
for item in iterable:
self.Push(item)
'\n Returns length of Stack\n '
def __len__(self):
return self.Size()
def __repr__(self):
return self
def __str__(self):
return str(list(self.showAll())) |
#!/usr/bin/python
'''
Copyright 2016 Aaron Stephens <aaron@icebrg.io>, ICEBRG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class CodeDirectory(object):
# Constructor
def __init__(self, version=None, flags=None, hash_offset=None,
ident_offset=None, n_special_slots=None, n_code_slots=None,
code_limit=None, hash_size=None, hash_type=None,
platform=None, page_size=None, scatter_offset=None,
team_id_offset=None, identity=None, team_id=None):
self.version = version
self.flags = flags
self.hash_offset = hash_offset
self.ident_offset = ident_offset
self.n_special_slots = n_special_slots
self.n_code_slots = n_code_slots
self.code_limit = code_limit
self.hash_size = hash_size
self.hash_type = hash_type
self._platform = platform
self.page_size = page_size
self._scatter_offset = scatter_offset
self._team_id_offset = team_id_offset
self.identity = identity
self._team_id = team_id
self.hashes = []
# Properties
@property
def platform(self):
if self.version >= 0x20200:
return self._platform
return None
@platform.setter
def platform(self, platform):
self._platform = platform
@property
def scatter_offset(self):
if self.version >= 0x20100:
return self._scatter_offset
return None
@scatter_offset.setter
def scatter_offset(self, scatter_offset):
self._scatter_offset = scatter_offset
@property
def team_id_offset(self):
if self.version >= 0x20200:
return self._team_id_offset
@team_id_offset.setter
def team_id_offset(self, team_id_offset):
self._team_id_offset = team_id_offset
@property
def team_id(self):
if self.version >= 0x20200 and self.team_id_offset != 0:
return self._team_id
return None
@team_id.setter
def team_id(self, team_id):
self._team_id = team_id
# Generators
def gen_hashes(self):
for i in self.hashes:
yield i
# Functions
def add_hash(self, hash): self.hashes.append(hash)
| """
Copyright 2016 Aaron Stephens <aaron@icebrg.io>, ICEBRG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Codedirectory(object):
def __init__(self, version=None, flags=None, hash_offset=None, ident_offset=None, n_special_slots=None, n_code_slots=None, code_limit=None, hash_size=None, hash_type=None, platform=None, page_size=None, scatter_offset=None, team_id_offset=None, identity=None, team_id=None):
self.version = version
self.flags = flags
self.hash_offset = hash_offset
self.ident_offset = ident_offset
self.n_special_slots = n_special_slots
self.n_code_slots = n_code_slots
self.code_limit = code_limit
self.hash_size = hash_size
self.hash_type = hash_type
self._platform = platform
self.page_size = page_size
self._scatter_offset = scatter_offset
self._team_id_offset = team_id_offset
self.identity = identity
self._team_id = team_id
self.hashes = []
@property
def platform(self):
if self.version >= 131584:
return self._platform
return None
@platform.setter
def platform(self, platform):
self._platform = platform
@property
def scatter_offset(self):
if self.version >= 131328:
return self._scatter_offset
return None
@scatter_offset.setter
def scatter_offset(self, scatter_offset):
self._scatter_offset = scatter_offset
@property
def team_id_offset(self):
if self.version >= 131584:
return self._team_id_offset
@team_id_offset.setter
def team_id_offset(self, team_id_offset):
self._team_id_offset = team_id_offset
@property
def team_id(self):
if self.version >= 131584 and self.team_id_offset != 0:
return self._team_id
return None
@team_id.setter
def team_id(self, team_id):
self._team_id = team_id
def gen_hashes(self):
for i in self.hashes:
yield i
def add_hash(self, hash):
self.hashes.append(hash) |
text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "Geometry/Dimension.hh"
#include "GSPH/Limiters/SuperbeeLimiter.cc"
namespace Spheral {
template class SuperbeeLimiter<Dim< %(ndim)s > >;
}
"""
| text = '\n//------------------------------------------------------------------------------\n// Explicit instantiation.\n//------------------------------------------------------------------------------\n#include "Geometry/Dimension.hh"\n#include "GSPH/Limiters/SuperbeeLimiter.cc"\n\nnamespace Spheral {\n template class SuperbeeLimiter<Dim< %(ndim)s > >;\n}\n' |
class ProfilePage:
BACK_TO_USERS = "Back to members"
EDIT_USER_BUTTON = "Change role"
USER_EMAIL = "Email"
USER_FIRST_NAME = "First name"
USER_LAST_NAME = "Last name"
USER_ROLE = "Role"
USER_STATUS = "Status"
USER_PENDING = "Pending"
USER_DEACTIVATE = "Deactivate member"
USER_REACTIVATE = "Reactivate member"
USER_NOT_ACTIVATED_YET = "This member hasn't signed in to their export control account."
class UsersPage:
MANAGE_ORGANISATIONS_MEMBERS_TAB = "Members"
USER_EMAIL = "Email"
USER_NAME = "Name"
USER_PENDING = "Pending"
USER_ROLE = "Role"
USER_STATUS = "Status"
class AddUserForm:
USER_ROLE_QUESTION = "Role"
USER_ADD_TITLE = "Add a member"
USER_EMAIL_QUESTION = "Email"
USER_ADD_FORM_BACK_TO_USERS = "Back to members"
ASSIGN_USER_QUESTION = "Assigned sites"
class EditUserForm:
USER_ROLE_QUESTION = "Role"
USER_EDIT_TITLE = "Change role"
USER_EDIT_FORM_BACK_TO_USER = "Back to member"
USER_EDIT_FORM_SAVE = "Save"
class AssignToSitesForm:
ASSIGN_USER_TO_SITES_TITLE = "Assign member to sites"
ASSIGN_USER_TO_SITES_DESCRIPTION = ""
| class Profilepage:
back_to_users = 'Back to members'
edit_user_button = 'Change role'
user_email = 'Email'
user_first_name = 'First name'
user_last_name = 'Last name'
user_role = 'Role'
user_status = 'Status'
user_pending = 'Pending'
user_deactivate = 'Deactivate member'
user_reactivate = 'Reactivate member'
user_not_activated_yet = "This member hasn't signed in to their export control account."
class Userspage:
manage_organisations_members_tab = 'Members'
user_email = 'Email'
user_name = 'Name'
user_pending = 'Pending'
user_role = 'Role'
user_status = 'Status'
class Adduserform:
user_role_question = 'Role'
user_add_title = 'Add a member'
user_email_question = 'Email'
user_add_form_back_to_users = 'Back to members'
assign_user_question = 'Assigned sites'
class Edituserform:
user_role_question = 'Role'
user_edit_title = 'Change role'
user_edit_form_back_to_user = 'Back to member'
user_edit_form_save = 'Save'
class Assigntositesform:
assign_user_to_sites_title = 'Assign member to sites'
assign_user_to_sites_description = '' |
'''Program to find the factorial of a number using recursion'''
def factorial_of_a_number(n):
if n<=1:
return 1
else:
return n * factorial_of_a_number(n-1)
#Taking number from user and passing it to the function
n = int(input())
print(factorial_of_a_number(n)) | """Program to find the factorial of a number using recursion"""
def factorial_of_a_number(n):
if n <= 1:
return 1
else:
return n * factorial_of_a_number(n - 1)
n = int(input())
print(factorial_of_a_number(n)) |
class EN:
START_TEXT = """
Hello {},
I am ROBOT.
"""
| class En:
start_text = '\nHello {},\nI am ROBOT.\n' |
def dec_to_bin(n):
if n < 0:
return bin(n * -1)[2:]
return bin(n)[2:]
def trim_to(number, length):
zeroes = ''
for i in range(int(length) - len(number)):
zeroes += '0'
return zeroes + number
def bit_not(number):
negated = ''
for bit in number:
if bit == '1':
negated += '0'
elif bit == '0':
negated += '1'
return negated
def add_one(number, length):
addone = ''
carryover = ''
if number[len(number) - 1] == '0':
addone += number[:-1]
addone += '1'
return addone
else: #Not the best way to do a bitwise addition but it works
carryover = '1' #Because the number we start with is 1 and 1 + 1 = 0 with carryover 1
#We iterate through the number but in reversed order sind we start the addition from the end.
for bit in number[::-1]:
if bit == '0':
if carryover == '1':
addone += '1'
carryover = '0'
else:
addone += '0'
if bit == '1':
if carryover == '1':
addone += '0'
else:
addone += '1'
carryover = '0'
return trim_to(addone[::-1], length)
if __name__ == '__main__':
number = input('Enter the (decimal) number you want to convert to the Two\'s Complement: ')
bitlength = len(dec_to_bin(int(number)))
length = input('Enter the length to which your number should be trimmed to: ')
if bitlength > int(length):
print('You should use at least a length equal or greater then the binary number itself!')
else:
if int(number) >= 0:
bit = trim_to(dec_to_bin(int(number)), length)
print('\nConverting ' + number + ' using the bit length ' + length + ' to -> ' + bit)
print('Finally we get...')
print(trim_to(dec_to_bin(int(number)), length))
else:
bit = trim_to(dec_to_bin(int(number)), length)
not_bit = bit_not(bit)
print('\nConverting the positive number of ' + number + ' using the bit length ' + length + ' to -> ' + bit)
print('Negating ' + bit + ' to -> ' + not_bit)
print('Finally add 1 to the negated number and we get...')
print (add_one(not_bit, length))
print('\nEnd...') | def dec_to_bin(n):
if n < 0:
return bin(n * -1)[2:]
return bin(n)[2:]
def trim_to(number, length):
zeroes = ''
for i in range(int(length) - len(number)):
zeroes += '0'
return zeroes + number
def bit_not(number):
negated = ''
for bit in number:
if bit == '1':
negated += '0'
elif bit == '0':
negated += '1'
return negated
def add_one(number, length):
addone = ''
carryover = ''
if number[len(number) - 1] == '0':
addone += number[:-1]
addone += '1'
return addone
else:
carryover = '1'
for bit in number[::-1]:
if bit == '0':
if carryover == '1':
addone += '1'
carryover = '0'
else:
addone += '0'
if bit == '1':
if carryover == '1':
addone += '0'
else:
addone += '1'
carryover = '0'
return trim_to(addone[::-1], length)
if __name__ == '__main__':
number = input("Enter the (decimal) number you want to convert to the Two's Complement: ")
bitlength = len(dec_to_bin(int(number)))
length = input('Enter the length to which your number should be trimmed to: ')
if bitlength > int(length):
print('You should use at least a length equal or greater then the binary number itself!')
elif int(number) >= 0:
bit = trim_to(dec_to_bin(int(number)), length)
print('\nConverting ' + number + ' using the bit length ' + length + ' to -> ' + bit)
print('Finally we get...')
print(trim_to(dec_to_bin(int(number)), length))
else:
bit = trim_to(dec_to_bin(int(number)), length)
not_bit = bit_not(bit)
print('\nConverting the positive number of ' + number + ' using the bit length ' + length + ' to -> ' + bit)
print('Negating ' + bit + ' to -> ' + not_bit)
print('Finally add 1 to the negated number and we get...')
print(add_one(not_bit, length))
print('\nEnd...') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.