blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a523fb24383778cfa759ebc418562c3a9384aab5
|
Python
|
mjbouvet/Linear-Regression-Modeling-for-Oliver-Wyman
|
/Python Code/EDA_NY.py
|
UTF-8
| 13,612
| 2.9375
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import seaborn as sns #visualization
import matplotlib.pyplot as plt #visualization
sns.set(color_codes=True)
#Import Files
casesData = pd.read_csv("H:/Documents/Oliver Wyman Interview/COVID Cases.csv")
mobilityIndex = pd.read_csv("H:/Documents/Oliver Wyman Interview/Mobility Index.csv")
smallBusinessData = pd.read_csv("H:/Documents/Oliver Wyman Interview/Small Business Data.csv")
unemploymentData = pd.read_csv("H:/Documents/Oliver Wyman Interview/Unemployment.csv")
#Get Types of Data
print("CasesData:", '\n', casesData.dtypes, '\n')
print("MobilityIndex:", '\n', mobilityIndex.dtypes, '\n')
print("smallBusinessData:", '\n', smallBusinessData.dtypes, '\n')
print("unemploymentData:", '\n', unemploymentData.dtypes, '\n')
#Limit to Entries from North Carolina
casesData_NY = casesData[casesData["State"] == "NY"].copy()
mobilityIndex_NY = mobilityIndex[mobilityIndex["State"] == "NY"].copy()
smallBusinessData_NY = smallBusinessData[smallBusinessData["State"]=="NY"].copy()
unemploymentData_NY = unemploymentData[unemploymentData["State"]=="NY"].copy()
#Check for Duplicate Rows
print("CasesData Shape:", casesData_NY.shape)
duplicate_rows_casesData = casesData_NY[casesData_NY.duplicated()]
print("number of duplicate rows: ", duplicate_rows_casesData.shape, '\n')
print("MobilityIndex Shape:",mobilityIndex_NY.shape)
duplicate_rows_mobilityIndex = mobilityIndex_NY[mobilityIndex_NY.duplicated()]
print("number of duplicate rows: ", duplicate_rows_mobilityIndex.shape, '\n')
print("smallBusinessData Shape:", smallBusinessData_NY.shape)
duplicate_rows_smallBusinessData = smallBusinessData_NY[smallBusinessData_NY.duplicated()]
print("number of duplicate rows: ", duplicate_rows_smallBusinessData.shape, '\n')
print("unemploymentData Shape", unemploymentData_NY.shape)
duplicate_rows_unemploymentData = unemploymentData_NY[unemploymentData_NY.duplicated()]
print("number of duplicate rows: ", duplicate_rows_unemploymentData.shape, '\n')
#Check for Null Values
print(casesData_NY.head(10))
casesData_NY.loc[casesData_NY.case_rate == '.', 'case_rate'] = np.nan
casesData_NY.loc[casesData_NY.new_case_rate == '.', 'new_case_rate'] = np.nan
print(casesData_NY.isnull().sum())
print(casesData_NY.head(10))
mean_value = pd.to_numeric(casesData_NY['new_case_rate']).mean()
casesData_NC = casesData_NY.fillna(mean_value)
print(casesData_NC.isnull().sum())
print(casesData_NC.head(10))
mobilityIndex_NY.loc[mobilityIndex_NY.gps_retail_and_recreation == '.', 'gps_retail_and_recreation'] = np.nan
mobilityIndex_NY.loc[mobilityIndex_NY.gps_grocery_and_pharmacy == '.', 'gps_grocery_and_pharmacy'] = np.nan
mobilityIndex_NY.loc[mobilityIndex_NY.gps_parks == '.', 'gps_parks'] = np.nan
mobilityIndex_NY.loc[mobilityIndex_NY.gps_transit_stations == '.', 'gps_transit_stations'] = np.nan
mobilityIndex_NY.loc[mobilityIndex_NY.gps_workplaces == '.', 'gps_workplaces'] = np.nan
mobilityIndex_NY.loc[mobilityIndex_NY.gps_residential == '.', 'gps_residential'] = np.nan
mobilityIndex_NY.loc[mobilityIndex_NY.gps_away_from_home == '.', 'gps_away_from_home'] = np.nan
print(mobilityIndex_NY.isnull().sum())
mean_value = pd.to_numeric(mobilityIndex_NY['gps_retail_and_recreation']).mean()
mobilityIndex_NY['gps_retail_and_recreation'] = mobilityIndex_NY['gps_retail_and_recreation'].fillna(mean_value)
mean_value = pd.to_numeric(mobilityIndex_NY['gps_grocery_and_pharmacy']).mean()
mobilityIndex_NY['gps_grocery_and_pharmacy'] = mobilityIndex_NY['gps_grocery_and_pharmacy'].fillna(mean_value)
mean_value = pd.to_numeric(mobilityIndex_NY['gps_parks']).mean()
mobilityIndex_NY['gps_parks'] = mobilityIndex_NY['gps_parks'].fillna(mean_value)
mean_value = pd.to_numeric(mobilityIndex_NY['gps_transit_stations']).mean()
mobilityIndex_NY['gps_transit_stations'] = mobilityIndex_NY['gps_transit_stations'].fillna(mean_value)
print(mobilityIndex_NY.isnull().sum())
smallBusinessData_NY.loc[smallBusinessData_NY['RevenueChange'] == '.', 'RevenueChange'] = np.nan
print(smallBusinessData_NY.isnull().sum())
unemploymentData_NY.loc[unemploymentData_NY['initclaims_rate'] == '.', 'initclaims_rate'] = np.nan
unemploymentData_NY.loc[unemploymentData_NY['contclaims_rate'] == '.', 'contclaims_rate'] = np.nan
print(unemploymentData_NY.isnull().sum())
mean_value = pd.to_numeric(unemploymentData_NY['contclaims_rate']).mean()
unemploymentData_NY['contclaims_rate'] = unemploymentData_NY['contclaims_rate'].fillna(mean_value)
print(unemploymentData_NY.isnull().sum())
#Looking for Outliers
#Cases Data
sns.boxplot(x = casesData_NY['case_rate'])
plt.show()
Q1 = casesData_NY['case_rate'].quantile(0.25)
Q3 = casesData_NY['case_rate'].quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR CASE_RATE IS:",IQR)
sns.boxplot(x = pd.to_numeric(casesData_NY['new_case_rate']))
plt.show()
Q1 = pd.to_numeric(casesData_NY['new_case_rate']).quantile(0.25)
Q3 = pd.to_numeric(casesData_NY['new_case_rate']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR NEW_CASE_RATE IS:", IQR)
sns.boxplot(x = pd.to_numeric(mobilityIndex_NY['gps_retail_and_recreation']))
plt.show()
Q1 = pd.to_numeric(mobilityIndex_NY['gps_retail_and_recreation']).quantile(0.25)
Q3 = pd.to_numeric(mobilityIndex_NY['gps_retail_and_recreation']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR GPS_RETAIL_AND_RECREATION IS:", IQR)
sns.boxplot(x = pd.to_numeric(mobilityIndex_NY['gps_grocery_and_pharmacy']))
plt.show()
Q1 = pd.to_numeric(mobilityIndex_NY['gps_grocery_and_pharmacy']).quantile(0.25)
Q3 = pd.to_numeric(mobilityIndex_NY['gps_grocery_and_pharmacy']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR GPS_GROCERY_AND_PHARMACY IS:", IQR)
sns.boxplot(x = pd.to_numeric(mobilityIndex_NY['gps_parks']))
plt.show()
Q1 = pd.to_numeric(mobilityIndex_NY['gps_parks']).quantile(0.25)
Q3 = pd.to_numeric(mobilityIndex_NY['gps_parks']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR GPS_PARKS IS:", IQR)
sns.boxplot(x = pd.to_numeric(mobilityIndex_NY['gps_transit_stations']))
plt.show()
Q1 = pd.to_numeric(mobilityIndex_NY['gps_transit_stations']).quantile(0.25)
Q3 = pd.to_numeric(mobilityIndex_NY['gps_transit_stations']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR GPS_TRANSIT_STATIONS IS:", IQR)
sns.boxplot(x = pd.to_numeric(mobilityIndex_NY['gps_workplaces']))
plt.show()
Q1 = pd.to_numeric(mobilityIndex_NY['gps_workplaces']).quantile(0.25)
Q3 = pd.to_numeric(mobilityIndex_NY['gps_workplaces']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR GPS_WORKPLACES IS:", IQR)
sns.boxplot(x = pd.to_numeric(mobilityIndex_NY['gps_residential']))
plt.show()
Q1 = pd.to_numeric(mobilityIndex_NY['gps_residential']).quantile(0.25)
Q3 = pd.to_numeric(mobilityIndex_NY['gps_residential']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR GPS_RESIDENTIAL IS:", IQR)
sns.boxplot(x = pd.to_numeric(mobilityIndex_NY['gps_away_from_home']))
plt.show()
Q1 = pd.to_numeric(mobilityIndex_NY['gps_away_from_home']).quantile(0.25)
Q3 = pd.to_numeric(mobilityIndex_NY['gps_away_from_home']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR GPS_AWAY_FROM_HOME IS:", IQR)
sns.boxplot(x = pd.to_numeric(smallBusinessData_NY['RevenueChange']))
plt.show()
Q1 = pd.to_numeric(smallBusinessData_NY['RevenueChange']).quantile(0.25)
Q3 = pd.to_numeric(smallBusinessData_NY['RevenueChange']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR REVENUE CHANGE IS:", IQR)
sns.boxplot(x = pd.to_numeric(unemploymentData_NY['initclaims_rate']))
plt.show()
Q1 = pd.to_numeric(unemploymentData_NY['initclaims_rate']).quantile(0.25)
Q3 = pd.to_numeric(unemploymentData_NY['initclaims_rate']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR INITCLAIMS_RATE CHANGE IS:", IQR)
sns.boxplot(x = pd.to_numeric(unemploymentData_NY['contclaims_rate']))
plt.show()
Q1 = pd.to_numeric(unemploymentData_NY['contclaims_rate']).quantile(0.25)
Q3 = pd.to_numeric(unemploymentData_NY['contclaims_rate']).quantile(0.75)
IQR = Q3 - Q1
print("THE IQR FOR CONTCLAIMS_RATE CHANGE IS:", IQR)
#PLOTTING AGAINST REVENUE CHANGE
indexingCaseData = []
indexingRevenueChange = []
indexingMobilityIndex = []
indexingUnemploymentData = []
for i in range(0, casesData_NC['year'].size):
indexingCaseData.append(i)
for i in range(0, smallBusinessData_NY['year'].size):
indexingRevenueChange.append(i)
for i in range(0, mobilityIndex_NY['year'].size):
indexingMobilityIndex.append(i)
for i in range(0, unemploymentData_NY['year'].size):
indexingUnemploymentData.append(i)
#CASE RATE
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingCaseData, casesData_NC['case_rate'])
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('Case Rate')
ax1.set_xlabel('Day')
ax1.set_ylabel('Confirmed Cases per 100k People')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#NEW CASE DATA
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingCaseData, casesData_NC['new_case_rate'])
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('New Case Rate')
ax1.set_xlabel('Day')
ax1.set_ylabel('New Confirmed Cases per 100k People')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#GPS_RETAIL_AND_RECREATION
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingMobilityIndex, pd.to_numeric(mobilityIndex_NY['gps_retail_and_recreation']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('gps_retail_and_recreation')
ax1.set_xlabel('Day')
ax1.set_ylabel('Time spent at retail and recreation locations')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#GPS_GROCERY_AND_PHARMACY
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingMobilityIndex, pd.to_numeric(mobilityIndex_NY['gps_grocery_and_pharmacy']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('gps_grocery_and_pharmacy')
ax1.set_xlabel('Day')
ax1.set_ylabel('Time spent at grocery and pharmacy')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#GPS_PARKS
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingMobilityIndex, pd.to_numeric(mobilityIndex_NY['gps_parks']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('gps_parks')
ax1.set_xlabel('Day')
ax1.set_ylabel('Time spent at Parks')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#GPS_TRANSIT_STATIONS
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingMobilityIndex, pd.to_numeric(mobilityIndex_NY['gps_transit_stations']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('gps_transit_stations')
ax1.set_xlabel('Day')
ax1.set_ylabel('Time spent at Transit Stations')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#GPS_WORKPLACES
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingMobilityIndex, pd.to_numeric(mobilityIndex_NY['gps_workplaces']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('gps_workplaces')
ax1.set_xlabel('Day')
ax1.set_ylabel('Time spent at Workplaces')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#GPS_RESIDENTIAL
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingMobilityIndex, pd.to_numeric(mobilityIndex_NY['gps_residential']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('gps_residential')
ax1.set_xlabel('Day')
ax1.set_ylabel('Time spent at Residential Locations')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#GPS_AWAY_FROM_HOME
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingMobilityIndex, pd.to_numeric(mobilityIndex_NY['gps_away_from_home']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('gps_away_from_home')
ax1.set_xlabel('Day')
ax1.set_ylabel('Time Spent Away From Home')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#INITCLAIMS_RATE
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingUnemploymentData, pd.to_numeric(unemploymentData_NY['initclaims_rate']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('initclaims_rate')
ax1.set_xlabel('Week')
ax1.set_ylabel('Weekly Inital Unemployment Insurance Claims per 100 People')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
#CONTCLAIMS_RATE
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(indexingUnemploymentData, pd.to_numeric(unemploymentData_NY['contclaims_rate']))
ax2.plot(indexingRevenueChange, smallBusinessData_NY['RevenueChange'])
ax1.set_title('contclaims_rate')
ax1.set_xlabel('Week')
ax1.set_ylabel('Weekly Continuing Unemployment Insurance Claims per 100 People')
ax2.set_title('Revenue Change')
ax2.set_xlabel('Day')
ax2.set_ylabel('Percent Change in Net Revenue')
plt.show()
| true
|
427974cd5b1e0cf36cddb4536349f8bb8e9496cd
|
Python
|
guomulian/caregivers
|
/cleaning/utils.py
|
UTF-8
| 2,229
| 3.453125
| 3
|
[] |
no_license
|
import pandas as pd
from pathlib import Path
def get_project_root() -> Path:
return Path(__file__).parent.parent
def get_cols_with_na_values(df):
return df.columns[df.isna().any()]
def print_cols_with_na_values(df):
cols = get_cols_with_na_values(df)
print("Columns with NaN values:\n\n{}".format(df[cols].isna().sum()))
return df
def remove_constant_cols(df):
"""Returns the given dataframe with the constant columns removed."""
return df.loc[:, (df != df.iloc[0]).any()]
def handle_datetime_types(df):
"""Attempts to automatically convert datetime columns to the correct type.
"""
for col in df.columns:
if df[col].dtype == "object":
try:
df[col] = pd.to_datetime(df[col])
except ValueError:
pass
return df
def handle_datetime_cols(df, datetime_cols):
for col in datetime_cols:
if col in df.columns:
df[col] = pd.to_datetime(df[col])
return df
def is_constant_col(df, group_by=None, dropna=True):
"""Returns a list of columns with constant value across all rows.
group_by : Group the DataFrame by group_by first.
dropna : Don't include NaN in the counts.
"""
if group_by:
return [col for col in df.columns if col != group_by and
all(nunique == 1
for nunique in df.groupby(group_by)[col]
.agg('nunique', dropna=dropna).values)]
else:
return list(df.columns[df.nunique(dropna=dropna) <= 1])
def squish_dataframe(df, group_by, dropna=True):
"""Aggregates by list for columns with more than one distinct value.
For constant columns it simply takes that value.
"""
constant_cols = is_constant_col(df, group_by, dropna)
aggfuncs = {col: (lambda values: values.mode()) if col in constant_cols
else list for col in df.columns if col != group_by}
return df.groupby(group_by).agg(aggfuncs).reset_index()
def squish(s: pd.Series):
if len(s) == 0:
return None
elif len(s.unique()) == 1:
return s.unique()[0]
else:
raise IndexError("Values not unique, can't squish.")
| true
|
c355580824477bdfb1f09db8640d0fb23c7f16b2
|
Python
|
aayc/code-contest-problems
|
/codeforces/678B/solution.py
|
UTF-8
| 974
| 4.09375
| 4
|
[] |
no_license
|
'''
B. The Same Calendar
The girl Taylor has a beautiful calendar for the year y. In the calendar all days are given with their days of week: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday.
The calendar is so beautiful that she wants to know what is the next year after y when the calendar will be exactly the same. Help Taylor to find that year.
Note that leap years has 366 days. The year is leap if it is divisible by 400 or it is divisible by 4, but not by 100 (https://en.wikipedia.org/wiki/Leap_year).
Input
The only line contains integer y (1000 ≤ y < 100'000) — the year of the calendar.
Output
Print the only integer y' — the next year after y when the calendar will be the same. Note that you should find the first year after y with the same calendar.
Tests
(In this directory), 'cf test 678 B solution.py'
'''
def is_leap (y):
return (y % 400 == 0 or y % 4 == 0) and not y % 100 == 0
y_p = 0
y_i = y + 1
while y_p == 0:
| true
|
cc45171806546703a8557721c49cd38ce7f0b37a
|
Python
|
siddhantdeep/MachineLearning
|
/Polynomial Regression/polynomial_regression_me.py
|
UTF-8
| 1,646
| 3.625
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 17:44:39 2019
@author: 764958
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import dataset as DataFrames and split them into independent and dependent variables
dataset = pd.read_csv('Position_Salaries.csv')
level = dataset.iloc[:, 1:2].values
salary = dataset.iloc[:, -1].values
#The data set is small so no ned for splitting the data set
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(level,salary)
#for polinomial regression
from sklearn.preprocessing import PolynomialFeatures
#increase or decrease degree to improve the accuracy of the model
poly_set = PolynomialFeatures(degree = 4)
#fit_transform used to train the model and transform the data
level_poly = poly_set.fit_transform(level)
#now we will create a linar regression with the polinomial set
poly_reg = LinearRegression()
poly_reg.fit(level_poly,salary)
#Visulise the simple linear model
plt.scatter(level,salary, color = 'red')
#represent our models finding as a line on the graph
plt.plot(level, lin_reg.predict(level), color = 'blue')
#Lable the graph for better understanding
plt.title('Comapre salary')
plt.xlabel('position level')
plt.ylabel('Salary')
plt.show()
#for polynomial Pred
plt.scatter(level,salary, color = 'red')
#represent our models finding as a line on the graph
plt.plot(level, poly_reg.predict(poly_set.fit_transform(level)), color = 'blue')
#Lable the graph for better understanding
plt.title('Comapre salary')
plt.xlabel('position level')
plt.ylabel('Salary')
plt.show()
| true
|
ebe2d1eec9a595b66dde8e04fcd077d19e1e4995
|
Python
|
quanaimaxiansheng/1805-2.py
|
/03day/6-私有公有.py
|
UTF-8
| 177
| 2.59375
| 3
|
[] |
no_license
|
class wo():
def __init__(self,mimi):
self.__mimi = mimi
def getmimi(self):
return self.__mimi
mimi=input("请输入你的秘密")
yuxing=wo(mimi)
print(yuxing.getmimi())
| true
|
34285608be053d54a240f38590098d6b4196e8b4
|
Python
|
Carlos123b/X-Serv-Python-Multiplica
|
/mult.py
|
UTF-8
| 222
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
for i in range(1,11):
print("Tabla del " + str(i) + ":")
print("---------------------------")
for j in range(1,11):
print(i,"por",j,"es",i*j)
print("\n")
| true
|
ac0f777fa1bd9b17564c8341b538f5e9a05a9036
|
Python
|
Redpike/advent-of-code
|
/2015/d11/d11.py
|
UTF-8
| 1,464
| 3.453125
| 3
|
[] |
no_license
|
def check(password_string):
if 'i' in password_string or 'o' in password_string or 'l' in password_string:
return 0
count = 0
flag = 0
char = ''
for i in range(len(password_string) - 1):
if password_string[i] == password_string[i + 1] and password_string[i] not in char:
count += 1
char += password_string[i]
for i in range(len(password_string) - 2):
if password_string[i] == chr(ord(password_string[i + 1]) - 1) and password_string[i + 1] == chr(ord(password_string[i + 2]) - 1):
flag = 1
if count >= 2 and flag == 1:
return 1
else:
return 0
def genenarate_next_password(password_string):
temp = ''
if (ord(password_string[len(password_string) - 1]) - 96) == 26:
temp += genenarate_next_password(password_string[:len(password_string) - 1]) + 'a'
else:
return password_string[:len(password_string) - 1] + chr(ord(password_string[len(password_string) - 1]) + 1)
return temp
def get_new_password(password_string):
while True:
password_string = genenarate_next_password(password_string)
if check(password_string):
break
return password_string
def main():
password_string = 'vzbxkghb'
new_password = get_new_password(password_string)
print('Day 11 Part 1:', new_password)
print('Day 11 Part 2:', get_new_password(new_password))
if __name__ == '__main__':
main()
| true
|
e76f50ee968bbce8175547a7e4a0715bc5f79c9b
|
Python
|
joose1983/answer-for-python-crush-course-2ndE
|
/Chapter 9/making_pizzas.py
|
UTF-8
| 166
| 3.359375
| 3
|
[] |
no_license
|
from pizza import *
def make_pizza(size, *toppings):
print("hello")
make_pizza(16, 'pepperoni')
make_pizza(1, 'mushrooms', 'green peppers', 'extra cheese')
| true
|
c93d11531c35b18761e0297712e7099153f6c9ee
|
Python
|
joel-roland/joel-test-commit
|
/Databricks Bootcamp/Data Engineering Workshop/Delta Lake Workshop - Delta Lake Primer.py
|
UTF-8
| 21,619
| 2.625
| 3
|
[] |
no_license
|
# Databricks notebook source
# MAGIC %md
# MAGIC
# MAGIC # Ensuring Consistency with ACID Transactions with Delta Lake (Loan Risk Data)
# MAGIC
# MAGIC <img src="https://pages.databricks.com/rs/094-YMS-629/images/delta-lake-logo-whitebackground.png" width=200/>
# MAGIC
# MAGIC This is a companion notebook to provide a Delta Lake example against the Lending Club data.
# MAGIC * This notebook has been tested with *DBR 5.4 ML Beta, Python 3*
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## The Data
# MAGIC
# MAGIC The data used is public data from Lending Club. It includes all funded loans from 2012 to 2017. Each loan includes applicant information provided by the applicant as well as the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. For a full view of the data please view the data dictionary available [here](https://resources.lendingclub.com/LCDataDictionary.xlsx).
# MAGIC
# MAGIC
# MAGIC 
# MAGIC
# MAGIC https://www.kaggle.com/wendykan/lending-club-loan-data
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Delta Lake
# MAGIC
# MAGIC Optimization Layer a top blob storage for Reliability (i.e. ACID compliance) and Low Latency of Streaming + Batch data pipelines.
# COMMAND ----------
# MAGIC %md ## Import Data and create pre-Delta Lake Table
# MAGIC * This will create a lot of small Parquet files emulating the typical small file problem that occurs with streaming or highly transactional data
# COMMAND ----------
# DBTITLE 0,Import Data and create pre-Databricks Delta Table
# -----------------------------------------------
# Uncomment and run if this folder does not exist
# -----------------------------------------------
# Configure location of loanstats_2012_2017.parquet
lspq_path = "/databricks-datasets/samples/lending_club/parquet/"
# Read loanstats_2012_2017.parquet
data = spark.read.parquet(lspq_path)
# Reduce the amount of data (to run on DBCE)
(loan_stats, loan_stats_rest) = data.randomSplit([0.10, 0.90], seed=123)
loan_stats_rest.createOrReplaceTempView("loan_data_full")
# Select only the columns needed
loan_stats = loan_stats.select("addr_state", "loan_status")
# Create loan by state
loan_by_state = loan_stats.groupBy("addr_state").count()
# Create table
loan_by_state.createOrReplaceTempView("loan_by_state")
# Display loans by state
display(loan_by_state)
# COMMAND ----------
# MAGIC %md ##  Easily Convert Parquet to Delta Lake format
# MAGIC With Delta Lake, you can easily transform your Parquet data into Delta Lake format.
# COMMAND ----------
# Configure Delta Lake Silver Path
DELTALAKE_SILVER_PATH = "/ml/loan_by_state_delta"
# Remove folder if it exists
dbutils.fs.rm(DELTALAKE_SILVER_PATH, recurse=True)
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Current example is creating a new table instead of in-place import so will need to change this code
# MAGIC DROP TABLE IF EXISTS loan_by_state_delta;
# MAGIC
# MAGIC CREATE TABLE loan_by_state_delta
# MAGIC USING delta
# MAGIC LOCATION '/ml/loan_by_state_delta'
# MAGIC AS SELECT * FROM loan_by_state;
# MAGIC
# MAGIC -- View Delta Lake table
# MAGIC SELECT * FROM loan_by_state_delta
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE DETAIL delta.`/ml/loan_by_state_delta`
# COMMAND ----------
# MAGIC %md
# MAGIC ## Stop the notebook before the streaming cell, in case of a "run all"
# COMMAND ----------
dbutils.notebook.exit("stop")
# COMMAND ----------
# MAGIC %fs ls /ml/loan_by_state_delta/_delta_log/
# COMMAND ----------
# MAGIC %md ##  Unified Batch and Streaming Source and Sink
# MAGIC
# MAGIC These cells showcase streaming and batch concurrent queries (inserts and reads)
# MAGIC * This notebook will run an `INSERT` every 10s against our `loan_stats_delta` table
# MAGIC * We will run two streaming queries concurrently against this data
# MAGIC * Note, you can also use `writeStream` but this version is easier to run in DBCE
# COMMAND ----------
# Read the insertion of data
loan_by_state_readStream = spark.readStream.format("delta").load(DELTALAKE_SILVER_PATH)
loan_by_state_readStream.createOrReplaceTempView("loan_by_state_readStream")
# COMMAND ----------
# MAGIC %sql
# MAGIC select addr_state, sum(`count`) as loans from loan_by_state_readStream group by addr_state
# COMMAND ----------
# MAGIC %md **Wait** until the stream is up and running before executing the code below
# COMMAND ----------
import time
i = 1
while i <= 6:
# Execute Insert statement
insert_sql = "INSERT INTO loan_by_state_delta VALUES ('IA', 4500)"
spark.sql(insert_sql)
print('loan_by_state_delta: inserted new row of data, loop: [%s]' % i)
# Loop through
i = i + 1
time.sleep(5)
# COMMAND ----------
# MAGIC %fs ls /ml/loan_by_state_delta/_delta_log/
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC Describe history loan_by_state_delta
# COMMAND ----------
# MAGIC %md
# MAGIC **Note**: Once the previous cell is finished and the state of Iowa is fully populated in the map (in cell 15), click *Cancel* in Cell 15 to stop the `readStream`.
# COMMAND ----------
# MAGIC %md
# MAGIC Let's review our current set of loans using our map visualization.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Review current loans within the `loan_by_state_delta` Delta Lake table
# MAGIC select addr_state, sum(`count`) as loans from loan_by_state_delta group by addr_state
# COMMAND ----------
# MAGIC %md Observe that the Iowa (middle state) has the largest number of loans due to the recent stream of data. Note that the original `loan_by_state_delta` table is updated as we're reading `loan_by_state_readStream`.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Full DML Support
# MAGIC
# MAGIC **Note**: Full DML Support is a feature that will be coming soon to Delta Lake; the preview is currently available in Databricks.
# MAGIC
# MAGIC Delta Lake supports standard DML including UPDATE, DELETE and MERGE INTO providing developers more controls to manage their big datasets.
# COMMAND ----------
# MAGIC %md Let's start by creating a traditional Parquet table
# COMMAND ----------
# Load new DataFrame based on current Delta table
lbs_df = sql("select * from loan_by_state_delta")
# Save DataFrame to Parquet
lbs_df.write.mode("overwrite").parquet("/loan_by_state.parquet")
# Reload Parquet Data
lbs_pq = spark.read.parquet("/loan_by_state.parquet")
# Create new table on this parquet data
lbs_pq.createOrReplaceTempView("loan_by_state_pq")
# Review data
display(sql("select * from loan_by_state_pq"))
# COMMAND ----------
# MAGIC %md ### DELETE Support
# MAGIC
# MAGIC The data was originally supposed to be assigned to `WA` state, so let's `DELETE` those values assigned to `IA`
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Attempting to run `DELETE` on the Parquet table
# MAGIC DELETE FROM loan_by_state_pq WHERE addr_state = 'IA'
# COMMAND ----------
# MAGIC %md **Note**: This command fails because the `DELETE` statements are not supported in Parquet, but are supported in Delta Lake.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Running `DELETE` on the Delta Lake table
# MAGIC DELETE FROM loan_by_state_delta WHERE addr_state = 'IA'
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Review current loans within the `loan_by_state_delta` Delta Lake table
# MAGIC select addr_state, sum(`count`) as loans from loan_by_state_delta group by addr_state
# COMMAND ----------
# MAGIC %md ### UPDATE Support
# MAGIC The data was originally supposed to be assigned to `WA` state, so let's `UPDATE` those values
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Attempting to run `UPDATE` on the Parquet table
# MAGIC UPDATE loan_by_state_pq SET `count` = 27000 WHERE addr_state = 'WA'
# COMMAND ----------
# MAGIC %md **Note**: This command fails because the `UPDATE` statements are not supported in Parquet, but are supported in Delta Lake.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Running `UPDATE` on the Delta Lake table
# MAGIC UPDATE loan_by_state_delta SET `count` = 27000 WHERE addr_state = 'WA'
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Review current loans within the `loan_by_state_delta` Delta Lake table
# MAGIC select addr_state, sum(`count`) as loans from loan_by_state_delta group by addr_state
# COMMAND ----------
# MAGIC %md ### MERGE INTO Support
# MAGIC
# MAGIC #### INSERT or UPDATE parquet: 7-step process
# MAGIC
# MAGIC With a legacy data pipeline, to insert or update a table, you must:
# MAGIC 1. Identify the new rows to be inserted
# MAGIC 2. Identify the rows that will be replaced (i.e. updated)
# MAGIC 3. Identify all of the rows that are not impacted by the insert or update
# MAGIC 4. Create a new temp based on all three insert statements
# MAGIC 5. Delete the original table (and all of those associated files)
# MAGIC 6. "Rename" the temp table back to the original table name
# MAGIC 7. Drop the temp table
# MAGIC
# MAGIC 
# MAGIC
# MAGIC
# MAGIC #### INSERT or UPDATE with Delta Lake
# MAGIC
# MAGIC 2-step process:
# MAGIC 1. Identify rows to insert or update
# MAGIC 2. Use `MERGE`
# COMMAND ----------
# Let's create a simple table to merge
items = [('IA', 1000000), ('CA', 25), ('OR', None)]
cols = ['addr_state', 'count']
merge_table = spark.createDataFrame(items, cols)
merge_table.createOrReplaceTempView("merge_table")
display(merge_table)
# COMMAND ----------
# MAGIC %md Instead of writing separate `INSERT` and `UPDATE` statements, we can use a `MERGE` statement.
# COMMAND ----------
# MAGIC %sql
# MAGIC MERGE INTO loan_by_state_delta as d
# MAGIC USING merge_table as m
# MAGIC on d.addr_state = m.addr_state
# MAGIC WHEN MATCHED THEN
# MAGIC UPDATE SET *
# MAGIC WHEN NOT MATCHED
# MAGIC THEN INSERT *
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Review current loans within the `loan_by_state_delta` Delta Lake table
# MAGIC select addr_state, sum(`count`) as loans from loan_by_state_delta group by addr_state
# COMMAND ----------
# MAGIC %md
# MAGIC ## Schema Evolution
# MAGIC With the `mergeSchema` option, you can evolve your Delta Lake table schema
# COMMAND ----------
# Generate new loans with dollar amounts
loans = sql("select addr_state, cast(rand(10)*count as bigint) as count, cast(rand(10) * 10000 * count as double) as amount from loan_by_state_delta")
display(loans)
# COMMAND ----------
# Let's write this data out to our Delta table
loans.write.format("delta").mode("append").save(DELTALAKE_SILVER_PATH)
# COMMAND ----------
# MAGIC %md **Note**: This command fails because the schema of our new data does not match the schema of our original data
# COMMAND ----------
# Add the mergeSchema option
loans.write.option("mergeSchema","true").format("delta").mode("append").save(DELTALAKE_SILVER_PATH)
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC select * from loan_by_state_delta
# COMMAND ----------
# MAGIC %md **Note**: With the `mergeSchema` option, we can merge these different schemas together.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Review current loans within the `loan_by_state_delta` Delta Lake table
# MAGIC select addr_state, sum(`amount`) as amount from loan_by_state_delta group by addr_state order by sum(`amount`) desc limit 10
# COMMAND ----------
# MAGIC %md ##  Let's Travel back in Time!
# MAGIC Databricks Delta’s time travel capabilities simplify building data pipelines for the following use cases.
# MAGIC
# MAGIC * Audit Data Changes
# MAGIC * Reproduce experiments & reports
# MAGIC * Rollbacks
# MAGIC
# MAGIC As you write into a Delta table or directory, every operation is automatically versioned.
# MAGIC
# MAGIC You can query by:
# MAGIC 1. Using a timestamp
# MAGIC 1. Using a version number
# MAGIC
# MAGIC using Python, Scala, and/or Scala syntax; for these examples we will use the SQL syntax.
# MAGIC
# MAGIC For more information, refer to [Introducing Delta Time Travel for Large Scale Data Lakes](https://databricks.com/blog/2019/02/04/introducing-delta-time-travel-for-large-scale-data-lakes.html)
# COMMAND ----------
# MAGIC %md ###  Review Delta Lake Table History
# MAGIC All the transactions for this table are stored within this table including the initial set of insertions, update, delete, merge, and inserts with schema modification
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE HISTORY loan_by_state_delta
# COMMAND ----------
# MAGIC %md ###  Time Travel via Version Number
# MAGIC Below are SQL syntax examples of Delta Time Travel by using a Version Number
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM loan_by_state_delta VERSION AS OF 0
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM loan_by_state_delta VERSION AS OF 9
# COMMAND ----------
# MAGIC %md ###  Delta Optimizations
# MAGIC How Databricks Delta optimizes for Performance
# COMMAND ----------
# MAGIC %md
# MAGIC ## VACUUM
# MAGIC
# MAGIC To save on storage costs you should occasionally clean up invalid files using the `VACUUM` command.
# MAGIC
# MAGIC Invalid files are small files compacted into a larger file with the `OPTIMIZE` command.
# MAGIC
# MAGIC The syntax of the `VACUUM` command is
# MAGIC >`VACUUM name-of-table RETAIN number-of HOURS;`
# MAGIC
# MAGIC The `number-of` parameter is the <b>retention interval</b>, specified in hours.
# MAGIC
# MAGIC <img alt="Caution" title="Caution" style="vertical-align: text-bottom; position: relative; height:1.3em; top:0.0em" src="https://files.training.databricks.com/static/images/icon-warning.svg"/> Databricks does not recommend you set a retention interval shorter than seven days because old snapshots and uncommitted files can still be in use by concurrent readers or writers to the table.
# MAGIC
# MAGIC The scenario here is:
# MAGIC 0. User A starts a query off uncompacted files, then
# MAGIC 0. User B invokes a `VACUUM` command, which deletes the uncompacted files
# MAGIC 0. User A's query fails because the underlying files have disappeared
# MAGIC
# MAGIC Invalid files can also result from updates/upserts/deletions.
# MAGIC
# MAGIC More details are provided here: <a href="https://docs.databricks.com/delta/optimizations.html#garbage-collection" target="_blank"> Garbage Collection</a>.
# MAGIC
# MAGIC Check the number of files before we vacuum.
# COMMAND ----------
display(dbutils.fs.ls('/ml/loan_by_state_delta'))
# COMMAND ----------
spark.conf.set("spark.databricks.delta.retentionDurationCheck.enabled", False)
# COMMAND ----------
# MAGIC %sql
# MAGIC VACUUM loan_by_state_delta RETAIN 0 HOURS
# COMMAND ----------
# MAGIC %sh ls /dbfs/ml/loan_by_state_delta | wc -l
# COMMAND ----------
# MAGIC %md
# MAGIC ## Other Optimizations
# MAGIC ### Let's See How Databricks Delta Makes Spark Queries Faster!
# MAGIC
# MAGIC In this example, we will see how Databricks Delta can optimize query performance. We create a standard table using Parquet format and run a quick query to observe its latency. We then run a second query over the Databricks Delta version of the same table to see the performance difference between standard tables versus Databricks Delta tables.
# MAGIC
# MAGIC Simply follow these 4 steps below:
# MAGIC * __Step 1__ : Create a standard Parquet based table using data from US based flights schedule data
# MAGIC * __Step 2__ : Run a query to to calculate number of flights per month, per originating airport over a year
# MAGIC * __Step 3__ : Create the flights table using Databricks Delta and optimize the table.
# MAGIC * __Step 4__ : Rerun the query in Step 2 and observe the latency.
# MAGIC
# MAGIC __Note:__ _Throughout the example we will be building few tables with a 10s of million rows. Some of the operations may take a few minutes depending on your cluster configuration._
# COMMAND ----------
# MAGIC %sql
# MAGIC DROP TABLE IF EXISTS flights;
# MAGIC
# MAGIC -- Create a standard table and import US based flights for year 2008
# MAGIC -- USING Clause: Specify parquet format for a standard table
# MAGIC -- PARTITIONED BY clause: Orginize data based on "Origin" column (Originating Airport code).
# MAGIC -- FROM Clause: Import data from a csv file.
# MAGIC CREATE TABLE flights
# MAGIC USING parquet
# MAGIC PARTITIONED BY (Origin)
# MAGIC SELECT _c0 as Year, _c1 as Month, _c2 as DayofMonth, _c3 as DayOfWeek, _c4 as DepartureTime, _c5 as CRSDepartureTime, _c6 as ArrivalTime,
# MAGIC _c7 as CRSArrivalTime, _c8 as UniqueCarrier, _c9 as FlightNumber, _c10 as TailNumber, _c11 as ActualElapsedTime, _c12 as CRSElapsedTime,
# MAGIC _c13 as AirTime, _c14 as ArrivalDelay, _c15 as DepartureDelay, _c16 as Origin, _c17 as Destination, _c18 as Distance,
# MAGIC _c19 as TaxiIn, _c20 as TaxiOut, _c21 as Cancelled, _c22 as CancellationCode, _c23 as Diverted, _c24 as CarrierDelay,
# MAGIC _c25 as WeatherDelay, _c26 as NASDelay, _c27 as SecurityDelay, _c28 as LateAircraftDelay
# MAGIC FROM csv.`dbfs:/databricks-datasets/asa/airlines/2008.csv`
# COMMAND ----------
# MAGIC %md
# MAGIC Once step 1 completes, the standard "flights" table contains details of US flights for a year.
# MAGIC
# MAGIC Next in Step 2, we run a query that get top 20 cities with highest monthly total flights on first day of week.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Get top 20 cities with highest monthly total flights on first day of week. & observe the latency!
# MAGIC -- This query may take over a minute in certain cluster configurations.
# MAGIC SELECT Month, Origin, count(*) as TotalFlights
# MAGIC FROM flights
# MAGIC WHERE DayOfWeek = 1
# MAGIC GROUP BY Month, Origin
# MAGIC ORDER BY TotalFlights DESC
# MAGIC LIMIT 20;
# COMMAND ----------
# MAGIC %md
# MAGIC Once step 2 completes, you can observe the latency with the standard "flights" table.
# MAGIC
# MAGIC In step 3 and step 4, we do the same with a Databricks Delta table. This time, before running the query, we run the `OPTIMIZE` command with `ZORDER` to ensure data is optimized for faster retrieval.
# COMMAND ----------
# MAGIC %sql
# MAGIC DROP TABLE IF EXISTS flights;
# MAGIC
# MAGIC -- Create a standard table and import US based flights for year 2008
# MAGIC -- USING Clause: Specify "delta" format instead of the standard parquet format
# MAGIC -- PARTITIONED BY clause: Orginize data based on "Origin" column (Originating Airport code).
# MAGIC -- FROM Clause: Import data from a csv file.
# MAGIC CREATE TABLE flights
# MAGIC USING delta
# MAGIC PARTITIONED BY (Origin)
# MAGIC SELECT _c0 as Year, _c1 as Month, _c2 as DayofMonth, _c3 as DayOfWeek, _c4 as DepartureTime, _c5 as CRSDepartureTime, _c6 as ArrivalTime,
# MAGIC _c7 as CRSArrivalTime, _c8 as UniqueCarrier, _c9 as FlightNumber, _c10 as TailNumber, _c11 as ActualElapsedTime, _c12 as CRSElapsedTime,
# MAGIC _c13 as AirTime, _c14 as ArrivalDelay, _c15 as DepartureDelay, _c16 as Origin, _c17 as Destination, _c18 as Distance,
# MAGIC _c19 as TaxiIn, _c20 as TaxiOut, _c21 as Cancelled, _c22 as CancellationCode, _c23 as Diverted, _c24 as CarrierDelay,
# MAGIC _c25 as WeatherDelay, _c26 as NASDelay, _c27 as SecurityDelay, _c28 as LateAircraftDelay
# MAGIC FROM csv.`dbfs:/databricks-datasets/asa/airlines/2008.csv`;
# COMMAND ----------
# MAGIC %md
# MAGIC OPTIMIZE consolidates files and orders the Databricks Delta table data by DayofWeek under each partition for faster retrieval
# COMMAND ----------
# MAGIC %sql
# MAGIC OPTIMIZE flights ZORDER BY (DayofWeek);
# COMMAND ----------
# MAGIC %md
# MAGIC Step 4 : Rerun the query from Step 2 and observe the latency
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Get top 20 cities with highest monthly total flights on first day of week. & observe the latency!
# MAGIC -- This query may take over a minute in certain cluster configurations.
# MAGIC SELECT Month, Origin, count(*) as TotalFlights
# MAGIC FROM flights
# MAGIC WHERE DayOfWeek = 1
# MAGIC GROUP BY Month, Origin
# MAGIC ORDER BY TotalFlights DESC
# MAGIC LIMIT 20;
# COMMAND ----------
# MAGIC %md
# MAGIC The query over the Databricks Delta table runs much faster after `OPTIMIZE` is run. How much faster the query runs can depend on the configuration of the cluster you are running on, however should be **5-10X** faster compared to the standard table.
| true
|
0a130263d8e40df7bfaca48d84613f8c8a66b64e
|
Python
|
alyoshinaarina/vpl19
|
/chistyakova.py
|
UTF-8
| 70
| 3.484375
| 3
|
[] |
no_license
|
n = input('What is your name? ')
print ('Nice to meet you, ',n, ':)')
| true
|
19245158710b3bd7d512910287461abe3692094f
|
Python
|
Semeriuss/LiberNet
|
/hub/routes.py
|
UTF-8
| 5,806
| 2.734375
| 3
|
[] |
no_license
|
from flask import render_template, url_for, flash, redirect, request, session
from hub import app, db, bcrypt
from hub.forms import RegistrationForm, LoginForm, ReviewForm
from hub.functions import authorize, is_authorized, api_search
from flask_login import login_user, current_user, logout_user, login_required
# Route for home page
@app.route("/", methods=["GET", "POST"])
@app.route("/index")
def index():
if request.method == "GET":
return render_template("index.html", user=session.get("user"))
# Route for search page
@app.route("/search", methods=['POST', 'GET'])
@authorize
def search():
# Return just the search page if GET request
if request.method == 'GET':
return render_template('search.html')
# Get data from search bar
string = request.form.get("search_value", None)
if string == None or string == "":
flash("You must provide a term to search", 'danger')
return redirect(url_for('search'))
# format string for database search
string = "%{}%".format(string)
books = db.execute("SELECT * FROM books WHERE (isbn LIKE :isbn OR LOWER(title) LIKE LOWER(:title) OR LOWER(author) LIKE LOWER(:author) OR year LIKE :year)", {"isbn":string, "title":string, "author":string, "year":string}).fetchall()
if not len(books):
flash("Not found. Check your input and try again.", 'danger')
redirect(url_for('search'))
# For POST (searching files) return results of user's search on the same page
flash("The search results are below. Click the ISBN numbers to check the books in detail.", 'info')
return render_template("search.html", books=books)
# Route for explore page
@app.route("/explore", methods=['GET', 'POST'])
@authorize
def explore():
# Search database for top ten results from database based on average rating and in ascending order
displays = db.execute("SELECT * FROM books ORDER BY average_rating ASC FETCH FIRST 10 ROWS ONLY").fetchall()
# Lists and a function to store image links and isbn
myList = []
myIsbn = []
for display in displays:
book_info = api_search(display[3])
book_img = book_info[3]
isbn = display[3]
myList.append(book_img)
myIsbn.append(isbn)
# Return top ten rated books for GET request
if request.method == 'GET':
return render_template('explore.html', myList=myList, myIsbn=myIsbn )
# Helper routing function for getting details about a book from search page or explore page
@app.route("/about/<string:isbn>", methods=['GET', "POST"])
@authorize
def about(isbn):
# Review form setup
form = ReviewForm()
book = db.execute("SELECT * FROM books WHERE isbn=:isbn;", {"isbn": isbn}).fetchone()
if book is None:
flash("ISBN Not found on the database.", 'warning')
redirect(url_for('search'))
reviews = db.execute("SELECT * FROM reviews WHERE book_id= :book_id",{"book_id":book[0]}).fetchall()
# Check if user has already reviewed a book
current_user_review = db.execute("SELECT * FROM reviews WHERE book_id = :book_id and user_id = :user_id",{"book_id":book[0],"user_id":session.get("user")[0] }).fetchone()
# bool value to check if user has reviewed book
hasNotReviewed = False if current_user_review!=None else True
current_user = session.get("user")[1]
book_info = api_search(isbn) #returns a list containing page count, average rating, image, and description
# if user can reveiw add to database
if form.validate_on_submit():
rating = int(form.rating.data)
desc = form.description.data
flash("You've successfully posted a review.", 'success')
db.execute("INSERT INTO reviews (user_id,book_id,rating, description) VALUES (:user_id, :book_id, :rating, :description)",
{"user_id": session.get("user_id"), "book_id": book[0], "rating": rating, "description": desc})
db.commit()
return redirect(url_for('index'))
return render_template("explore.html", book=book, book_info=book_info, reviews=reviews, form=form, hasNotReviewed=hasNotReviewed, current_user=current_user)
# Route for sign up
@app.route("/register", methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode("utf-8")
db.execute("INSERT INTO users (username, email, password) VALUES (:uname, :email, :passkey)",
{"uname": form.username.data, "email": form.email.data, "passkey": hashed_password})
db.commit()
flash('Your account has been created! You are now able to log in!', 'success')
return redirect(url_for('login'))
return render_template("register.html", title='Register', form=form)
# Route for signing in
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = db.execute("SELECT * FROM users WHERE email = :email;",{"email":form.email.data}).fetchone()
if user and bcrypt.check_password_hash(user["password"], form.password.data):
session["user"] = user
session["user_id"] = user["id"]
next_page = request.args.get('next')
flash('You have successfully Logged In!', 'success')
return redirect(next_page) if next_page else redirect(url_for('search'))
else:
flash('Login Unsuccessful. Please Check Email and Password', 'danger')
return render_template("login.html", title="Login", form=form)
# Route for signing in
@app.route("/logout")
def logout():
session.pop("user", None)
flash('You have successfully Logged out!', 'success')
return redirect(url_for("index"))
| true
|
ba3823993a47dd73b6bc998e6529107a0d56e11a
|
Python
|
j2kun/rote
|
/rote/rote.py
|
UTF-8
| 2,600
| 2.828125
| 3
|
[] |
no_license
|
import sys
class Rote(object):
def __init__(self):
self.handlers = {
'setup': None,
'newdata': None,
'foreach': None,
'skipif': None,
'describe': None,
'teardown': None,
}
self.accumulator = []
self.new_data = []
def wrap_in_try(self, f, with_teardown=True):
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except KeyboardInterrupt:
print('\n\nQuitting after teardown...'.format(f.__name__))
if with_teardown:
self.handlers['teardown'](self.accumulator)
sys.exit(0)
except:
print('{} failed!'.format(f.__name__))
if with_teardown:
self.handlers['teardown'](self.accumulator)
raise
return wrapped
def setup(self, f):
wrapped = self.wrap_in_try(f, with_teardown=False)
self.handlers['setup'] = wrapped
return f
def newdata(self, f):
wrapped = self.wrap_in_try(f)
self.handlers['newdata'] = wrapped
return f
def foreach(self, f):
wrapped = self.wrap_in_try(f)
self.handlers['foreach'] = wrapped
return f
def teardown(self, f):
wrapped = self.wrap_in_try(f, with_teardown=False)
self.handlers['teardown'] = wrapped
return f
def skipif(self, f):
wrapped = self.wrap_in_try(f)
self.handlers['skipif'] = wrapped
return f
def describe(self, f):
wrapped = self.wrap_in_try(f)
self.handlers['decribe'] = wrapped
return f
def run(self):
setup = self.handlers['setup']
newdata = self.handlers['newdata']
foreach = self.handlers['foreach']
skipif = self.handlers['skipif']
describe = self.handlers['describe']
teardown = self.handlers['teardown']
if setup:
self.accumulator = setup()
else:
self.accumulator = []
if newdata:
self.new_data = newdata(self.accumulator)
else:
self.new_data = []
for item in self.new_data:
if skipif and skipif(item):
if describe:
print('Skipping {}'.format(describe(item)))
else:
print('Skipping {}'.format(item))
if foreach:
foreach(item, self.accumulator)
if teardown:
teardown(self.accumulator)
| true
|
1ec663808c13b421ebbe0af6cbbfb37278e47b26
|
Python
|
jsz14897502/eclass
|
/home/robot/robot_campus_news_info.py
|
UTF-8
| 1,372
| 2.75
| 3
|
[] |
no_license
|
"""
Version: 0.1
Author: lvtoo
e-mail: o@oouul.com
Date: 2018/11/25
"""
import requests
from bs4 import BeautifulSoup
from home.models import New
from datetime import datetime
def del_start_blank(str1):
for i in range(100):
if str1.startswith(' '):
str1 = str1[7:]
else:
return str1
times = 0
url = 'https://www.shmtu.edu.cn'
r = requests.get(url + '/news')
soup = BeautifulSoup(r.content, 'lxml')
div_tag = soup.find('div', class_='view-content')
all_li = div_tag.find_all('li')
for li in all_li:
a = li.find('a')
title = a.string
source = url + a['href']
pub_date = li.find('span', class_='date-display-single')['content'][:10]
pub_date = datetime.strptime(pub_date, '%Y-%m-%d')
r = requests.get(source)
soup = BeautifulSoup(r.content, 'lxml')
div_tag = soup.find_all('div', class_='content')[5]
text = div_tag.text
text = del_start_blank(text)
describe = text[:70]
try:
img_src = div_tag.find('img')['src']
except TypeError:
img_src = ''
obj = New.objects.filter(title=title)
if not obj:
new = New(title=title, public='SMU', source=source, text=text, type='news', pub_date=pub_date,
describe=describe, img_url=img_src)
new.save()
times += 1
print("已更新" + str(times) + "条校园动态。")
| true
|
45198c99be41613fbfca6d562f2bc3c2988c9879
|
Python
|
zeibou/Tapiocas
|
/tapiocas/text_reco.py
|
UTF-8
| 1,960
| 2.625
| 3
|
[] |
no_license
|
from enum import Enum
import pytesseract
import logging
# OCR ENGINE MODE: https://tesseract.patagames.com/help/html/T_Patagames_Ocr_Enums_OcrEngineMode.htm
class OEM(Enum):
TESSERACT_ONLY = 0 # Run Tesseract only - fastest
CUBE_ONLY = 1 # Run Cube only - better accuracy, but slower
TESSERACT_CUBE_COMBINED = 2 # Run both and combine results - best accuracy
DEFAULT = 3 # automatic
# PAGE SEG MODE: https://tesseract.patagames.com/help/html/T_Patagames_Ocr_Enums_PageSegMode.htm
class PSM(Enum):
OSD_ONLY = 0 # Orientation and script detection only
AUTO_OSD = 1 # Automatic page segmentation with orientation and script detection
AUTO_ONLY = 2 # Automatic page segmentation, but no OSD, or OCR
AUTO = 3 # Fully automatic page segmentation, but no OSD
SINGLE_COLUMN = 4 # Assume a single column of text of variable sizes
SINGLE_BLOCK_VERT_TEXT = 5 # Assume a single uniform block of vertically aligned text
SINGLE_BLOCK = 6 # Assume a single uniform block of text
SINGLE_LINE = 7 # Treat the image as a single text line
SINGLE_WORD = 8 # Treat the image as a single word
CIRCLE_WORD = 9 # Treat the image as a single word in a circle
SINGLE_CHAR = 10 # Treat the image as a single character
SPARSE_TEXT = 11 # Find as much text as possible in no particular order
SPARSE_TEXT_OSD = 12 # Sparse text with orientation and script det
RAW_LINE = 13 # Treat the image as a single text line, bypassing hacks that are Tesseract-specific
def find_text(image, oem=OEM.DEFAULT, psm=PSM.SPARSE_TEXT, whitelist=None):
w_str = f" -c tessedit_char_whitelist={whitelist}" if whitelist else ""
config = f"--oem {oem.value} --psm {psm.value}{w_str}"
logging.debug(f"looking for text with tesseract config: '{config}'")
s = ""
try:
s = pytesseract.image_to_string(image, config=config)
except Exception as e:
logging.exception(e)
return s
| true
|
453bf60182b49345cce2b7641aa881525b4ebce3
|
Python
|
wolegeyun/python_data_analyst
|
/python统计分析/2假设检验1 3种t检验.py
|
UTF-8
| 2,892
| 3.40625
| 3
|
[] |
no_license
|
#coding:utf8
# #2.假设检验
# 是数理统计学中根据一定假设条件由样本推断总体的一种方法。具体作法是:根据问题的需要对所研究的总体作某种假设,记作H0
# 选取合适的统计量,这个统计量的选取要使得在假设H0成立时,其分布为已知;
# 由实测的样本,计算出统计量的值,并根据预先给定的显著性水平进行检验,作出拒绝或接受假设H0的判断。
# 常用的假设检验方法有u—检验法、t检验法、χ2检验法(卡方检验)、F—检验法,秩和检验等。
#1 t检验,看样本的平均值和总体的平均值差异 是否显著 https://wenku.baidu.com/view/7d36c04b2e3f5727a5e9620d.html
#https://blog.csdn.net/m0_37777649/article/details/74938120
# (1) 已知一个总体均数;
# (2) 可得到一个样本均数及该样本标准差;
# (3) 样本来自正态或近似正态总体 [3] 。
#1。1 单 (独立)样本t检验 :假设 样本总体均值为:1,或2
from scipy import stats
import numpy as np
np.random.seed(7654567)
#均值5 方差10
rvs=stats.norm.rvs(loc=5,scale=10,size=(50,2))
#print stats.ttest_1samp(rvs, [1,2])
#2。1 两独立样本t检验 假设 两总体均值相等
#先看是否有方差齐性质,再检验
np.random.seed(12345678)
#多个连续性随机变量 rvs
rvs1=stats.norm.rvs(loc=5,scale=10,size=500)
rvs2=stats.norm.rvs(loc=5,scale=10,size=500)
print stats.ttest_ind(rvs1,rvs2)
#Ttest_indResult(statistic=0.26833823296238857, pvalue=0.788494433695651) 不拒绝原假设
#不确定方差是否相等 莱文检验
stats.levene(rvs1,rvs2)
#如果方差不相等(没有齐性,equal_var设为false)
print stats.ttest_ind(rvs1,rvs2,equal_var=False)
##不同均值,不同方差,不同样本量的t检验,要设false,总之设false没有坏处
#2。2 双样本检验 配对.配对(相关)样本t检验
print stats.ttest_rel(rvs1,rvs2)
# 单样本检验:检验一个正态分布的总体的均值是否在满足零假设的值之内 [6] 。
# 双样本检验:其零假设为两个正态分布的总体的均值是相同的。这一检验通常被称为学生t检验。但更为严格地说,只有两个总体的方差是相等的情况下,才称为学生t检验;否则,有时被称为Welch检验。以上谈到的检验一般被称作“未配对”或“独立样本”t检验,我们特别是在两个被检验的样本没有重叠部分时用到这种检验方式 [6] 。
# 检验同一统计量的两次测量值之间的差异是否为零。举例来说,我们测量一位病人接受治疗前和治疗后的肿瘤尺寸大小。如果治疗是有效的,我们可以推定多数病人接受治疗后,肿瘤尺寸变小了。这种检验一般被称作“配对”或者“重复测量”t检验。
# 检验一条回归线的斜率是否显著不为零。
| true
|
6ad3546ad7ba931824358a637a75089c0864de76
|
Python
|
yudh1232/Codeup-Algorithm
|
/1126 정수 계산기.py
|
UTF-8
| 260
| 3.65625
| 4
|
[] |
no_license
|
a, b = map(int, input().split())
print("{0} + {1} = {2}".format(a, b, a + b))
print("{0} - {1} = {2}".format(a, b, a - b))
print("{0} * {1} = {2}".format(a, b, a * b))
print("{0} / {1} = {2}".format(a, b, a // b))
print("{0} % {1} = {2}".format(a, b, a % b))
| true
|
b337a306a4162105d029a1d275c42ee3a219d0c9
|
Python
|
Zidoing/learning
|
/老男孩/xxx.py
|
UTF-8
| 628
| 3.171875
| 3
|
[] |
no_license
|
import re
print re.findall('w\w{2}l', 'hello world')
print re.findall('[a-z]', 'adfdasf')
print re.findall('[^com]', 'com')
print re.findall('\w', 'com')
print re.search('\w', 'com').group()
print re.search('f(as)|ff', 'sdjkfasasff').group()
ret = re.search('(?P<id>\d{3})(?P<name>\w{3})', 'dsafsf33343fda')
print ret.group()
print ret.group('id')
print ret.group('name')
print re.split('1', '1sdjksal')
print {i for i in range(10)}
class A(object):
@property
def per(self):
return 1
@per.setter
def per(self, val):
print val
a = A()
print a.per
a.per = 2
### python 单例模式
| true
|
95ba3b99b1a055e3fa3a79ae5a91eadd0fde7193
|
Python
|
nyush-se-spring2021-forum/OurTieba
|
/ourtieba/configs/functions.py
|
UTF-8
| 2,468
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
import datetime
import functools
from contextlib import contextmanager
from flask import session, redirect
# For now, we just assume that all the sessions are not tampered.
# Forgery may be possible, but it's csrf token's lob to find it out
# (which we haven't implemented yet).
def login_required(f):
"""
Decorator of view (controller) functions to check whether user is logged in or not. If not, will not execute view
function but redirect user to login page.
:param f: view function object.
:return: decorated view function object.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if session.get("Uid"):
return f(*args, **kwargs)
else:
return redirect("/login")
return wrapper
def admin_login_required(f):
"""
Similar to func login_required. The target is admin not user.
:param f: view function object.
:return: decorated view function object.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if session.get("Aid"):
return f(*args, **kwargs)
else:
return redirect("/admin/login")
return wrapper
@contextmanager
def auto_scope(_session):
"""
Generator of self-designed database session that does not throw exceptions but print them (in log, because stdout is
implicitly set to logger file). Also, auto-commit and auto-rollback is achieved.
:param _session: SQLAlchemy database session
:return: scoped session
"""
if not _session:
raise Exception("Please connect to database first!")
try:
yield _session
_session.commit()
except Exception as e:
_session.rollback()
print(e)
def convert_time(ts: datetime.datetime):
"""
Convert datetime object into beautified string that is easy to read on web page. If on the same day, will return
"Today" + "hour:minutes"; if in the same year, will return "month-days"; else will return "year-month-days".
:param ts: timestamp (not the UNIX one, but datetime object).
:return: time string.
"""
if ts.strftime("%Y") != datetime.datetime.utcnow().strftime("%Y"):
return ts.strftime("%Y-%m-%d")
if (day := ts.strftime("%m-%d")) != datetime.datetime.utcnow().strftime("%m-%d"):
return day
return "Today " + ts.strftime("%H:%M")
if __name__ == '__main__':
new = datetime.datetime(2021, 5, 9, 15, 0, 0)
print(convert_time(new))
| true
|
29ca184d7dcda029a8824ce4b775fa047aff72d8
|
Python
|
shulhannur/Chat-App-Using-Parallel-and-Distributed-System
|
/client.py
|
UTF-8
| 1,734
| 2.65625
| 3
|
[] |
no_license
|
import socket
import threading
import tkinter
import tkinter.scrolledtext
from tkinter import simpledialog
HOST = '127.0.0.1'
PORT = 5005
class Client:
def __init__(self,host,port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
msg = tkinter.Tk()
msg.withdraw()
self.nickname = simpledialog.askstring("Nickname","Please Choose a nickname", parent=msg)
self.gui_done = False
self.running = True
gui_thread = threading.Thread(target=self.gui_loop)
receive_thread = threading.Thread(target=self.receive)
gui_thread.start()
receive_thread.start()
def gui_loop(self):
self.win = tkinter.Tk()
self.win.configure(bg="lightblue")
self.chat_label = tkinter.Label(self.win, text = (self.nickname+" - Chat Windows"), bg="lightblue")
self.chat_label.config(font=("Arial", 14))
self.chat_label.pack(padx=25, pady=5) #padding
self.text_area = tkinter.scrolledtext.ScrolledText(self.win)
self.text_area.pack(padx=25, pady=5)
self.text_area.config(state='disabled') #supaya gabisa ngerubah2 isi chat history
self.msg_label = tkinter.Label(self.win, text="Input Message:", bg="lightblue")
self.msg_label.config(font=("Arial", 14))
self.msg_label.pack(padx=25, pady=5)
self.input_area = tkinter.Text(self.win, height=5)
self.input_area.pack(padx=20, pady=5)
self.send_button = tkinter.Button(self.win, text="Send", command=self.write)
self.send_button.config(font=("Arial", 14))
self.send_button.pack(padx=25, pady=5)
self.gui_done = True
self.win.protocol("WM_DELETE_WINDOW", self.stop)
self.win.title("Tubes SISTER - Chat Application *User: "+self.nickname+"*")
self.win.mainloop()
#run
client = Client(HOST,PORT)
| true
|
50151ec2201bc0cfddc017b57c73cb5096ee79c1
|
Python
|
Suchi-M/object-storage-extension-samples
|
/vmware-ose-common-test-suites/framework/libs/clients/s3_api_client.py
|
UTF-8
| 2,150
| 2.640625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
import boto3
import botocore
class S3APIClient(object):
'''
auth_info{source, type, username, password, tenant, token, app_id, api_key}
type=basic: user, password, tenant
type=token: token
type=api_key: app_id, api_key
'''
def __init__(self,
endpoint,
auth_info,
verify=False,
retries=0,
region=None,
other_configs=None):
if isinstance(other_configs, dict):
config = botocore.config.Config(
retries=dict(
max_attempts=retries
),
**other_configs
)
else:
config = botocore.config.Config(
retries=dict(
max_attempts=retries
)
)
key = self.gen_key(auth_info)
access_key = auth_info.get('access_key', key)
secret_key = auth_info.get('secret_key', key)
self.__client = boto3.client(
's3',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
endpoint_url=endpoint,
region_name=region,
verify=verify,
config=config
)
@property
def client(self):
return self.__client
@staticmethod
def gen_key(auth_info):
if isinstance(auth_info, dict):
auth_type = auth_info.get('type', 'basic')
auth_source = auth_info.get('source', 'API_KEY' if auth_type == 'api_key' else 'VCD')
if auth_type == 'basic':
key = "%s/%s@%s:%s" % (auth_source, auth_info['username'], auth_info['tenant'], auth_info['password'])
elif auth_type == 'token':
key = "%s/%s" % (auth_source, auth_info['token'])
elif auth_type == 'api_key':
key = "%s/%s %s" % (auth_source, auth_info['app_id'], auth_info['api_key'])
else:
raise Exception("Auth type %s not supported!" % auth_type)
else:
raise Exception("Wrong auth_info: %s" % auth_info)
return key
| true
|
f9d695a0f80854f156421e719997a874006846f6
|
Python
|
StBogdan/PythonWork
|
/Leetcode/1254.py
|
UTF-8
| 1,805
| 3.609375
| 4
|
[] |
no_license
|
from typing import List
class Solution:
def closedIsland(self, grid: List[List[int]]) -> int:
islands = 0
n = len(grid)
m = len(grid[0])
def global_warm(grid_mod, i, j):
stillIsland = True
if grid_mod[i][j] == 0:
if i == 0 or j == 0 or i == n - 1 or j == m - 1:
stillIsland = False
grid_mod[i][j] = 1
if i > 0:
stillIsland &= global_warm(grid_mod, i - 1, j)
if j > 0:
stillIsland &= global_warm(grid_mod, i, j - 1)
if i < n - 1:
stillIsland &= global_warm(grid_mod, i + 1, j)
if j < m - 1:
stillIsland &= global_warm(grid_mod, i, j + 1)
return stillIsland
for i in range(n):
for j in range(m):
if grid[i][j] == 0:
newIsland = True
# Edge square
if i == 0 or j == 0 or i == n - 1 or j == m - 1:
newIsland = False
if newIsland and global_warm(grid, i, j):
islands += 1
return islands
if __name__ == "__main__":
grid = [
[0, 0, 1, 1, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 1, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 0, 1, 1, 0],
]
sol = Solution()
print(f"Ans: {sol.closedIsland(grid)}")
| true
|
d95a46d9c71f2b0dfc295f4ff048e6a329a1fbc2
|
Python
|
owkin/FLamby
|
/flamby/datasets/fed_isic2019/dataset_creation_scripts/color_constancy.py
|
UTF-8
| 1,223
| 2.953125
| 3
|
[
"CC-BY-NC-4.0",
"MIT"
] |
permissive
|
from __future__ import division
import cv2
import numpy
def color_constancy(img, power=6, gamma=None):
"""
Preprocessing step to make sure that the images appear with similar brightness
and contrast.
See this [link}(https://en.wikipedia.org/wiki/Color_constancy) for an explanation.
Thank you to [Aman Arora](https://github.com/amaarora) for this
[implementation](https://github.com/amaarora/melonama)
Parameters
----------
img: 3D numpy array, the original image
power: int, degree of norm
gamma: float, value of gamma correction
"""
img_dtype = img.dtype
if gamma is not None:
img = img.astype("uint8")
look_up_table = numpy.ones((256, 1), dtype="uint8") * 0
for i in range(256):
look_up_table[i][0] = 255 * pow(i / 255, 1 / gamma)
img = cv2.LUT(img, look_up_table)
img = img.astype("float32")
img_power = numpy.power(img, power)
rgb_vec = numpy.power(numpy.mean(img_power, (0, 1)), 1 / power)
rgb_norm = numpy.sqrt(numpy.sum(numpy.power(rgb_vec, 2.0)))
rgb_vec = rgb_vec / rgb_norm
rgb_vec = 1 / (rgb_vec * numpy.sqrt(3))
img = numpy.multiply(img, rgb_vec)
return img.astype(img_dtype)
| true
|
66ee9d5bf9f1d0b87893ef76ce16b31cf8c37465
|
Python
|
ubercareerprep2019/Uber-Career-Prep-Homework-Ammon
|
/Assignment-2/part3.py
|
UTF-8
| 2,259
| 3.6875
| 4
|
[] |
no_license
|
# Graphs - Ex1
from collections import deque
class GraphNode:
def __init__(self, data):
self.data = data
class GraphWithAdjacencyList:
def __init__(self):
self.__adj_nodes = {}
self.__nodes = {}
def add_node(self, key: int):
g_key = GraphNode(key)
self.__nodes[key] = g_key
self.__adj_nodes[g_key] = []
def remove_node(self, key: int):
if key in self.__nodes:
g_key = self.__nodes[key]
del self.__nodes[key]
del self.__adj_nodes[g_key]
def add_edge(self, node1: int, node2: int):
if node1 in self.__nodes and node2 in self.__nodes:
g_node1 = self.__nodes[node1]
g_node2 = self.__nodes[node2]
self.__adj_nodes[g_node1].append(g_node2)
self.__adj_nodes[g_node2].append(g_node1)
def remove_edge(self, node1: int, node2: int):
if node1 in self.__nodes and node2 in self.__nodes:
g_node1 = self.__nodes[node1]
g_node2 = self.__nodes[node2]
self.__adj_nodes[g_node1].remove(g_node2)
self.__adj_nodes[g_node2].remove(g_node1)
def get_adj_nodes(self, key: int):
g_key = self.__nodes[key]
if g_key not in self.__adj_nodes:
return []
return self.__adj_nodes[g_key]
graph = GraphWithAdjacencyList()
# Tests for the Graph.
# graph.add_node(5)
# graph.add_node(19)
# graph.add_node(12)
# graph.remove_node(19)
# graph.add_edge(5,12)
# graph.add_edge(18, 7)
# graph.remove_edge(19,2)
# graph.remove_node(901)
# [print(i.data) for i in graph.get_adj_nodes(5)]
# graph.remove_edge(12,5)
graph.add_node(2)
graph.add_node(0)
graph.add_node(1)
graph.add_node(3)
graph.add_edge(2,0)
graph.add_edge(2,3)
graph.add_edge(2,1)
graph.add_edge(0,1)
# Graphs - Ex2
def get_dfs(start_node_data, visited=set()):
print(start_node_data)
visited.add(start_node_data)
adj_list = graph.get_adj_nodes(start_node_data)
for node in adj_list:
if node.data not in visited:
get_dfs(node.data)
# print("DFS starting at 2")
# get_dfs(2)
# Graphs - Ex3
def get_bfs(start_node_data):
queue = deque([start_node_data])
visited = set()
while queue:
node_data = queue.popleft()
if node_data not in visited:
visited.add(node_data)
print(node_data)
adj_list = graph.get_adj_nodes(node_data)
queue.extend([_node.data for _node in adj_list])
# print("BFS starting at 2")
# get_bfs(2)
| true
|
7f6cea04f09b257972330d75b3d9baac9711c26d
|
Python
|
jonathangriffiths/Euler
|
/Page1/EvenFibonacciSum.py
|
UTF-8
| 345
| 3.421875
| 3
|
[] |
no_license
|
__author__ = 'Jono'
def getEvenFibSum(max_number):
a = 1
b = 1
sum=0
while a <= max_number and b <= max_number:
new_term = a + b
a = new_term
new_term = a + b
b = new_term
if a%2 == 0:
sum+=a
if b%2 == 0:
sum+=b
return sum
print getEvenFibSum(4000000)
| true
|
6467ba560234b906c89be46448d8eb917688af98
|
Python
|
PDXostc/rvi_big_data
|
/data_logger.py
|
UTF-8
| 8,757
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/python
#
# Copyright (C) 2014, Jaguar Land Rover
#
# This program is licensed under the terms and conditions of the
# Mozilla Public License, version 2.0. The full text of the
# Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
#
#
# A generic logger / reporter
#
import sqlite3
import Queue
import threading
import time
import dbus
CMD_ADD_SUBSCRIPTION = 1
CMD_DELETE_SUBSCRIPTION = 2
CMD_ADD_SUBSCRIPTION = 3
CMD_ADD_SAMPLE = 4
CMD_RETRIEVE_NEXT_SAMPLE = 5
CMD_DELETE_SAMPLE = 6
CMD_DELETE_ALL_SAMPLES = 7
CMD_DUMP_DATABASE = 8
CMD_SHUTDOWN = 9
class Logger(threading.Thread):
def __init__(self, db_file = '/var/tmp/big_data_demo.sql'):
threading.Thread.__init__(self)
self.db_file = db_file
self.queue = Queue.Queue()
self.subscriptions_loaded = False
self.subscriptions = {}
# Sqlite commands can only be used from the same thread that
# created the database connection to begin with.
# Hence the stupid thread solution
def run(self):
self.dbc = sqlite3.connect(self.db_file)
print "Starting logger at {}".format(self.db_file)
# Create the table that stores log data and index it on its timestamps
self.dbc.execute('''CREATE TABLE IF NOT EXISTS log (timestamp, channel, value)''')
self.dbc.execute('''CREATE INDEX IF NOT EXISTS ts_index on log (timestamp ASC)''')
# Create a table to store all our subscriptions so that they survive a
# system restert.
self.dbc.execute('''CREATE TABLE IF NOT EXISTS subscriptions (channel, interval)''')
# Retrieve all our subscriptions so that they are easily accessible
for subscription in self.dbc.execute('''SELECT channel, interval FROM subscriptions'''):
(channel, interval) = subscription
# Interval is the sample interval in sec.
# 0 is when the UTC of when last sample was made.
print "Adding subscription {}. Interval {}".format(channel, interval)
self.subscriptions[channel] = ( interval, 0 )
self.subscriptions_loaded = True
while True:
# Try to get a command sent from a member function
# call invoked by another thread.
elem = self.queue.get()
( command, arg ) = elem
if command == CMD_ADD_SUBSCRIPTION:
(channel, sample_interval) = arg
self.__add_subscription(channel, sample_interval)
elif command == CMD_DELETE_SUBSCRIPTION:
self.__delete_subscription(arg)
elif command == CMD_ADD_SAMPLE:
self.__add_sample(arg)
elif command == CMD_RETRIEVE_NEXT_SAMPLE:
# Arg is a queue object to send back the result over
self.__retrieve_next_sample(arg)
elif command == CMD_DELETE_SAMPLE:
# Arg is timestamp to delete
self.__delete_sample(arg)
elif command == CMD_DELETE_ALL_SAMPLES:
self.__delete_all_sample()
elif command == CMD_DUMP_DATABASE:
self.__dump_db()
elif command == CMD_SHUTDOWN:
print "Logger:run(): Exiting thread"
return True
else:
print "Logger.run(): Unknown command: {} ignored".format(command)
def shutdown(self):
self.queue.put((CMD_SHUTDOWN, True))
self.join()
def get_subscriptions(self):
while self.subscriptions_loaded == False:
sleep (0.1)
res = []
for channel in self.subscriptions:
(interval, tmp) = self.subscriptions[channel]
res.append((channel, interval))
return res
def add_subscription(self, channel, sample_interval):
self.queue.put((CMD_ADD_SUBSCRIPTION, (channel, sample_interval)))
def __add_subscription(self, channel, sample_interval):
if channel in self.subscriptions:
print "Called {} already in subscriptions. Ignored".format(channel)
return False
print "Adding {} to subscriptions. Interval {}".format(channel, sample_interval)
# Setup a new channel in the dictionary
self.subscriptions[channel] = (sample_interval, 0)
try:
self.dbc.execute('''INSERT INTO subscriptions VALUES (?, ?)''', (channel, sample_interval))
self.dbc.commit()
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
print "3"
return True
def delete_subscription(self, channel):
self.queue.put((CMD_DELETE_SUBSCRIPTION, channel))
def __delete_subscription(self, channel):
if not channel in self.subscriptions:
print "unsubscribe(): Channel {} not in subscriptions. Ignored".format(channel)
return False
# Remove from subscriptions
del self.subscriptions[channel]
self.dbc.execute('''DELETE FROM subscriptions WHERE channel=?''', (channel,))
return True
def add_sample(self, values):
self.queue.put((CMD_ADD_SAMPLE, values))
def __add_sample(self, values):
# If the channel is not among our subscriptions, then ignore.
# [ind for ind, elem in enumerate(self.subscriptions) if v[0] == 53]
# If it is not time for us to sample the given channel yet, then ignore
c_time = int(time.time())
print "add_sample({})".format(values)
for (channel, value) in values:
if not channel in self.subscriptions:
# print "add_sample({}): Not subscribed to. Ignored".format(channel)
continue
( sample_interval, last_sample_ts ) = self.subscriptions[channel]
# Skip if we have previously received a sample and
# the interval to the next sample has yet to elapse.
if last_sample_ts > 0 and c_time < last_sample_ts + sample_interval:
# print "add_sample({}): c_time < last_sample_ts={} + sample_interval={}. Skipped".format(c_time, last_sample_ts, sample_interval)
continue
print "add_sample({}): {}".format(channel, value)
# Store the sample
# Convert the value dictionary to a string.
self.dbc.execute('''INSERT INTO log VALUES (?, ?, ?)''', (c_time, channel, str(value)))
self.dbc.commit()
# Update the last sample timestamp
# print "Updating subscriptions[{}] with ({}, {})".format(channel, sample_interval, c_time)
self.subscriptions[channel] = ( sample_interval, c_time)
return True
# Retrieve all samples for the oldest time stamp in the database
# Return:
# False - no samples
# (timestamp, [ ( channel, value), ... ]) - Samples for the given timestamp
#
def retrieve_next_sample(self):
q = Queue.Queue()
self.queue.put((CMD_RETRIEVE_NEXT_SAMPLE, q))
# Wait for a reply to come back and return whatever it was
return q.get()
def __retrieve_next_sample(self, queue):
# Get the oldest timestamp that we have stored.
(ts, ) = self.dbc.execute('''SELECT min(timestamp) FROM log''').fetchone()
# If no timestamp, then we have no data in db.
if ts == None:
queue.put(False)
return False
res = []
# Retrieve all rows with a matching timestamp[
for row in self.dbc.execute('''SELECT channel, value FROM log where timestamp=?''', (ts,)):
# Convert value from string back to dict
res.append((row[0], eval(row[1])))
queue.put((ts, res))
return True
def delete_sample(self, timestamp):
self.queue.put((CMD_DELETE_SAMPLE, timestamp))
# Delete samples older than the given time stamp.
def __delete_sample(self, timestamp):
self.dbc.execute('''DELETE FROM log WHERE timestamp <= ?''', (timestamp,))
def delete_all_samples(self):
self.queue.put((CMD_DELETE_SAMPLE, True))
# Delete allsamples with the given timestamp.
def __delete_all_samples(self):
self.dbc.execute('''DELETE FROM log''')
def dump_db(self):
self.queue.put((CMD_DUMP_DATABASE, True))
def __dump_db(self):
print "LOG dump:"
for row in self.dbc.execute('''SELECT timestamp, channel, value FROM log'''):
print row
print "---"
print "Subscription dump:"
for row in self.dbc.execute('''SELECT * FROM subscriptions'''):
print row
print "---"
| true
|
0d0770d77de5cad2ad8f2d256589a558404303ef
|
Python
|
AthaG/Kata-Tasks
|
/5kyu/BestTravel_5kyu.py
|
UTF-8
| 1,933
| 3.9375
| 4
|
[] |
no_license
|
'''John and Mary want to travel between a few towns A, B, C ... Mary has on a sheet of paper
a list of distances between these towns. ls = [50, 55, 57, 58, 60]. John is tired of driving
and he says to Mary that he doesn't want to drive more than t = 174 miles and he will visit only 3 towns.
Which distances, hence which towns, they will choose so that the sum of the distances is the
biggest possible to please Mary and John?
Example:
With list ls and 3 towns to visit they can make a choice between: [50, 55, 57], [50, 55, 58],
[50, 55, 60], [50, 57, 58], [50, 57, 60], [50, 58, 60], [55, 57, 58], [55, 57, 60], [55, 58, 60],
[57, 58, 60].
The sums of distances are then: 162, 163, 165, 165, 167, 168, 170, 172, 173, 175.
The biggest possible sum taking a limit of 174 into account is then 173 and the distances of
the 3 corresponding towns is [55, 58, 60].
The function chooseBestSum ( or choose_best_sum or ... depending on the language) will take as
parameters t (maximum sum of distances, integer >= 0), k (number of towns to visit, k >= 1) and
ls (list of distances, all distances are positive or zero integers and this list has at least one
element). The function returns the "best" sum ie the biggest possible sum of k distances less than
or equal to the given limit t, if that sum exists, or otherwise nil, null, None, Nothing, depending
on the language.
With C++, C, Rust, Swift, Go, Kotlin, Dart return -1.
Examples:
ts = [50, 55, 56, 57, 58] choose_best_sum(163, 3, ts) -> 163
xs = [50] choose_best_sum(163, 3, xs) -> nil ( or null or ... or -1 (C++, C, Rust, Swift, Go)
ys = [91, 74, 73, 85, 73, 81, 87] choose_best_sum(230, 3, ys) -> 228
Note:
don't modify the input list ls'''
from itertools import combinations
def choose_best_sum(t, k, ls):
closest = 0
for combi in combinations(ls, k):
num = sum(combi)
if t >= num > closest:
closest = num
return closest or None
| true
|
d139c1d4d3672813da9f0a4b6a504fa0ee8aec79
|
Python
|
zqy1/pythonCookbook
|
/thread/locals.py
|
UTF-8
| 465
| 3.34375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import threading
local = threading.local()
local.name = "main"
def func():
local.name = "local"
print local.name
t1 = threading.Thread(target=func)
t1.start()
t1.join()
print local.name
# 首先,启动线程会打印线程内部的变量, join阻塞线程后,则会打印全局的name变量
# local 小写字母类 保证了线程设置的属性不会被其他线程设置的属性替换
# 主线程 和 func线程
| true
|
148804342781f073c197e6b8e477d60006b14556
|
Python
|
ginnyyang/MyPython
|
/learning_process/coroutine_test.py
|
UTF-8
| 1,112
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#协程看上去也是子程序,但执行过程中,在子程序内部可中断,然后转而执行别的子程序,在适当的时候再返回来接着执行
def consumer():
r=''
while True:
n=yield r
if not n:
return
print('[CONSUMER] Consuming %s...' %n)
r='200 OK'
def produce(c):
c.send(None)
n=0
while n<5:
n=n+1
print('[PRODUCER] Producing %s...' %n)
r=c.send(n)
print('[PRODUCER] Consumer return: %s' %r)
c.close()
c=consumer()
produce(c)
#consumer函数是一个generator,把一个consumer传入produce后:
#
#首先调用c.send(None)启动生成器;
#
#然后,一旦生产了东西,通过c.send(n)切换到consumer执行;
#
#consumer通过yield拿到消息,处理,又通过yield把结果传回;
#
#produce拿到consumer处理的结果,继续生产下一条消息;
#
#produce决定不生产了,通过c.close()关闭consumer,整个过程结束。
#
#整个流程无锁,由一个线程执行,produce和consumer协作完成任务,所以称为“协程”,而非线程的抢占式多任务
| true
|
3e8a4f5b8b226f5349188528e87d6a5ae04b11c1
|
Python
|
sonivaidehi/IoTPracticals
|
/motion_test.py
|
UTF-8
| 381
| 3.078125
| 3
|
[] |
no_license
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(15, GPIO.IN)
GPIO.setup(24, GPIO.OUT)
while True:
input_state = GPIO.input(15)
if input_state == True:
print("Motion Detected")
GPIO.output(24,True)
time.sleep(1)
GPIO.output(24,False)
time.sleep(4)
else:
print("No Motion Detected")
| true
|
1628259188b938fd0a7e580dea26f49283c4cd84
|
Python
|
IncubatorShokuhou/cpol_processing
|
/cpol_processing/filtering.py
|
UTF-8
| 6,682
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
"""
Codes for creating and manipulating gate filters.
@title: filtering
@author: Valentin Louf <valentin.louf@monash.edu>
@institutions: Monash University and the Australian Bureau of Meteorology
@date: 20/11/2017
.. autosummary::
:toctree: generated/
texture
do_gatefilter_cpol
do_gatefilter
filter_hardcoding
velocity_texture
"""
# Libraries
import pyart
import netCDF4
import numpy as np
def texture(data):
"""
Compute the texture of data.
Compute the texture of the data by comparing values with a 3x3 neighborhood
(based on :cite:`Gourley2007`). NaN values in the original array have
NaN textures. (Wradlib function)
Parameters:
==========
data : :class:`numpy:numpy.ndarray`
multi-dimensional array with shape (..., number of beams, number
of range bins)
Returns:
=======
texture : :class:`numpy:numpy.ndarray`
array of textures with the same shape as data
"""
x1 = np.roll(data, 1, -2) # center:2
x2 = np.roll(data, 1, -1) # 4
x3 = np.roll(data, -1, -2) # 8
x4 = np.roll(data, -1, -1) # 6
x5 = np.roll(x1, 1, -1) # 1
x6 = np.roll(x4, 1, -2) # 3
x7 = np.roll(x3, -1, -1) # 9
x8 = np.roll(x2, -1, -2) # 7
# at least one NaN would give a sum of NaN
xa = np.array([x1, x2, x3, x4, x5, x6, x7, x8])
# get count of valid neighboring pixels
xa_valid = np.ones(np.shape(xa))
xa_valid[np.isnan(xa)] = 0
# count number of valid neighbors
xa_valid_count = np.sum(xa_valid, axis=0)
num = np.zeros(data.shape)
for xarr in xa:
diff = data - xarr
# difference of NaNs will be converted to zero
# (to not affect the summation)
diff[np.isnan(diff)] = 0
# only those with valid values are considered in the summation
num += diff ** 2
# reinforce that NaN values should have NaN textures
num[np.isnan(data)] = np.nan
return np.sqrt(num / xa_valid_count)
def do_gatefilter_cpol(radar, refl_name='DBZ', phidp_name="PHIDP", rhohv_name='RHOHV_CORR',
zdr_name="ZDR", snr_name='SNR'):
"""
Filtering function adapted to CPOL.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
rhohv_name: str
Cross correlation ratio field name.
ncp_name: str
Name of the normalized_coherent_power field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf_despeckeld: GateFilter
Gate filter (excluding all bad data).
"""
radar_start_date = netCDF4.num2date(radar.time['data'][0], radar.time['units'].replace("since", "since "))
r = radar.range['data']
azi = radar.azimuth['data']
R, A = np.meshgrid(r, azi)
refl = radar.fields[refl_name]['data'].copy()
rho_corr = radar.fields[rhohv_name]['data']
fcut = -0.6 / 140e3 * R + 0.8
refl[rho_corr < fcut] = np.NaN
radar.add_field_like(refl_name, 'NDBZ', refl)
gf = pyart.filters.GateFilter(radar)
gf.exclude_invalid('NDBZ')
gf.exclude_below(snr_name, 9)
gf.exclude_outside(zdr_name, -3.0, 7.0)
gf.exclude_outside(refl_name, -20.0, 80.0)
# dphi = texture(radar.fields[phidp_name]['data'])
# radar.add_field_like(phidp_name, 'PHITXT', dphi)
# gf.exclude_above('PHITXT', 20)
if radar_start_date.year > 2007:
gf.exclude_below(rhohv_name, 0.5)
# Remove rings in march 1999.
if radar_start_date.year == 1999 and radar_start_date.month == 3:
radar.add_field_like(refl_name, 'RRR', R)
gf.exclude_above('RRR', 140e3)
radar.fields.pop('RRR')
gf_despeckeld = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
# Remove tmp fields.
try:
radar.fields.pop('NDBZ')
# radar.fields.pop('PHITXT')
except Exception:
pass
return gf_despeckeld
def do_gatefilter(radar, refl_name='DBZ', phidp_name="PHIDP", rhohv_name='RHOHV_CORR', zdr_name="ZDR", snr_name='SNR'):
"""
Basic filtering function for dual-polarisation data.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
rhohv_name: str
Cross correlation ratio field name.
ncp_name: str
Name of the normalized_coherent_power field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf_despeckeld: GateFilter
Gate filter (excluding all bad data).
"""
# Initialize gatefilter
gf = pyart.filters.GateFilter(radar)
# Remove obviously wrong data.
gf.exclude_outside(zdr_name, -6.0, 7.0)
gf.exclude_outside(refl_name, -20.0, 80.0)
# Compute texture of PHIDP and remove noise.
dphi = texture(radar.fields[phidp_name]['data'])
radar.add_field_like(phidp_name, 'PHITXT', dphi)
gf.exclude_above('PHITXT', 20)
gf.exclude_below(rhohv_name, 0.6)
# Despeckle
gf_despeckeld = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
try:
# Remove PHIDP texture
radar.fields.pop('PHITXT')
except Exception:
pass
return gf_despeckeld
def filter_hardcoding(my_array, nuke_filter, bad=-9999):
"""
Harcoding GateFilter into an array.
Parameters:
===========
my_array: array
Array we want to clean out.
nuke_filter: gatefilter
Filter we want to apply to the data.
bad: float
Fill value.
Returns:
========
to_return: masked array
Same as my_array but with all data corresponding to a gate filter
excluded.
"""
filt_array = np.ma.masked_where(nuke_filter.gate_excluded, my_array.copy())
filt_array = filt_array.filled(fill_value=bad)
return np.ma.masked_where(filt_array == bad, filt_array)
def velocity_texture(radar, vel_name='VEL'):
"""
Compute velocity texture using new Bobby Jackson function in Py-ART.
Parameters:
===========
radar:
Py-ART radar structure.
vel_name: str
Name of the (original) Doppler velocity field.
Returns:
========
vdop_vel: dict
Velocity texture.
"""
try:
v_nyq_vel = radar.instrument_parameters['nyquist_velocity']['data'][0]
except Exception:
vdop_art = radar.fields[vel_name]['data']
v_nyq_vel = np.max(np.abs(vdop_art))
vel_dict = pyart.retrieve.calculate_velocity_texture(radar, vel_name, nyq=v_nyq_vel)
return vel_dict
| true
|
f1b30ceb0b8ad29855b06e01dbad48cf6b22736f
|
Python
|
amanuel43/chemgymrl
|
/tests/demo_chemistrygym.py
|
UTF-8
| 7,179
| 3.078125
| 3
|
[] |
no_license
|
'''
ChemistryGym Demo
:title: demo_chemistrygym.py
:author: Chris Beeler and Mitchell Shahen
:history: 2020-07-03
'''
# pylint: disable=invalid-name
# pylint: disable=protected-access
# pylint: disable=unused-import
# pylint: disable=wrong-import-order
# pylint: disable=wrong-import-position
# import all the required external modules
import gym
import numpy as np
import os
import pickle
import sys
from time import sleep
# ensure all necessary modules can be found
sys.path.append("../") # to access chemistrylab
sys.path.append("../chemistrylab/reactions") # to access all reactions
# import all local modules
import chemistrylab
'''
# -------------------- # REACTION BENCH DEMO # -------------------- #
__ = input("PRESS ENTER TO START REACTION BENCH.")
# Use a perfect policy
perfect = False
# Initialize the environment
r_env = gym.make('WurtzReact-v0')
render_mode = "human"
# Reset the environment to get initial state
# State [0] is seconds since the start of reaction
# State [1] is the thermostat temperature of the system
# State [2 and beyond] is the remaining amounts of each reactant
__ = r_env.reset()
r_env.render(mode=render_mode)
__ = input('PRESS ENTER TO CONTINUE REACTION BENCH.')
done = False
i = 0
# Game will play until: 20 steps are completed
total_reward = 0.0
while not done:
# Select random actions
# Actions:
# a[0] changes the temperature between -dT (a[0] = 0.0) and +dT (a[0] = 1.0)
# a[1] changes the Volume between -dV (a[1] = 0.0) and +dV (a[1] = 1.0)
# a[2:] adds between none (a[2:] = 0.0) and all (a[2:] = 1.0) of each reactant
if perfect:
if i == 0:
action = np.ones(r_env.action_space.shape)
action[1] = 0.0
else:
action = np.zeros(r_env.action_space.shape)
action[0] = 1.0
else:
action = r_env.action_space.sample()
# perform the action and update the reward
state, reward, done, __ = r_env.step(action)
total_reward += reward
# render the plot and wait before continuing
r_env.render(mode=render_mode)
sleep(2)
i += 1
show_stats = input("Show Reaction Vessel Stats ('Y'/'N') >>> ")
if show_stats.lower() in ["y", "yes"]:
# open and check the material dict
vessel_path = os.path.join(os.getcwd(), "react_vessel.pickle")
with open(vessel_path, 'rb') as open_file:
v = pickle.load(open_file)
print("")
print("---------- VESSEL ----------")
print("Label: {}".format(v.label))
print("")
print("---------- THERMODYNAMIC VARIABLES ----------")
print("Temperature (in K): {:e}".format(v.temperature))
print("Volume (in L): {:e}".format(v.volume))
print("Pressure (in kPa): {:e}".format(v.pressure))
print("")
print("---------- MATERIAL_DICT ----------")
for material, value_list in v._material_dict.items():
print("{} : {}".format(material, value_list))
print("")
print("---------- SOLUTE_DICT ----------")
for solute, value_list in v._solute_dict.items():
print("{} : {}".format(solute, value_list))
__ = input("PRESS ENTER TO CONTINUE")
# -------------------- # EXTRACT BENCH DEMO # -------------------- #
__ = input('PRESS ENTER TO START EXTRACT BENCH.')
# create a registered environment
e_env = gym.make('WurtzExtract-v1')
e_env.reset()
# render the initial state
e_env.render()
# queue and perform the Extraction Vessel's pour by volume action with a multiplier of 0.5
action = np.array([4, 2])
__, __, __, __ = e_env.step(action)
# render the resulting state
e_env.render()
__ = input('PRESS ENTER TO CONTINUE EXTRACT BENCH')
done = False
step_num = 0
total_reward = 0.0
while not done:
# select and perform a random action
# actions consist of arrays of two elements
# action[0] is a number indicating the event to take place
# action[1] is a number representing a multiplier for the event
# Actions and multipliers are included below:
# 0: Valve (Speed multiplier, relative to max_valve_speed)
# 1: Mix ExV (mixing coefficient, *-1 when passed into mix function)
# 2: Pour B1 into ExV (Volume multiplier, relative to max_vessel_volume)
# 3: Pour B2 into ExV (Volume multiplier, relative to max_vessel_volume)
# 4: Pour ExV into B2 (Volume multiplier, relative to default vessel volume)
# 5: Pour S1 into ExV (Volume multiplier, relative to max_vessel_volume)
# 6: Pour S2 into ExV (Volume multiplier, relative to max_vessel_volume)
# 7: Done (Multiplier doesn't matter)
action_space = e_env.action_space
action = action_space.sample()
# ensure atleast 5 steps are completed before the done exit action
if step_num < 5:
while action[0] == 7:
action_space = e_env.action_space
action = action_space.sample()
# perform the random action and update the reward
state, reward, done, __ = e_env.step(action)
total_reward += reward
# render each of the three plots
e_env.render()
sleep(2)
step_num += 1
__ = input("PRESS ENTER TO CONTINUE.")
'''
# -------------------- # DISTILLATION BENCH DEMO # -------------------- #
__ = input('PRESS ENTER TO START DISTILLATION BENCH.')
# create the registered distillation environment
d_env = gym.make('Distillation-v0')
d_env.reset()
# render the initial state
d_env.render()
# queue and perform the Boil Vessel's change heat action but add no heat
action = np.array([0, 5])
__, __, __, __ = d_env.step(action)
# render the results of the manual action
d_env.render()
__ = input('PRESS ENTER TO CONTINUE DISTILLATION BENCH')
done = False
step_num = 0
total_reward = 0.0
while not done:
# select and perform a random action
# actions consist of arrays of two elements
# action[0] is a number indicating the event to take place
# action[1] is a number representing a multiplier for the event
# Actions and multipliers are included below:
# 0: Add/Remove Heat (Heat value multiplier, relative of maximal heat change)
# 1: Pour BV into B1 (Volume multiplier, relative to max_vessel_volume)
# 2: Pour B1 into B2 (Volume multiplier, relative to max_vessel_volume)
# 3: Pour B1 into BV (Volume multiplier, relative to max_vessel_volume)
# 4: Pour B2 into BV (Volume multiplier, relative to max_vessel_volume)
# 5: Done (Value doesn't matter)
action_space = d_env.action_space
action = action_space.sample()
# ensure atleast 5 steps are completed before the done exit action
if step_num < 5:
while action[0] == 5:
action_space = d_env.action_space
action = action_space.sample()
# perform the random action and update the reward
print(action)
state, reward, done, __ = d_env.step(action)
print(reward)
total_reward += reward
# render each of the vessel plots
d_env.render()
sleep(2)
step_num += 1
__ = input("PRESS ENTER TO EXIT")
| true
|
51f93becc7639b2805bcf946edfa206bca3afe49
|
Python
|
MFrassek/MyKoAutomationSuite
|
/utils/weekendParticipationPopulator.py
|
UTF-8
| 1,039
| 2.96875
| 3
|
[] |
no_license
|
from tablePopulator import TablePopulator
from weekendParticipation import WeekendParticipation
import os
import csv
class WeekendParticipationPopulator(TablePopulator):
@classmethod
def populate_table(cls):
weekend_participation_data = cls.get_data_from_file()
for weekend_participation in weekend_participation_data:
WeekendParticipation(
weekend_id=weekend_participation[0],
participant_name=weekend_participation[1]).add_to_db()
@classmethod
def get_data_from_file(cls):
result = []
for weekend_file_name in os.listdir(f"{cls.data_path}/participants"):
weekend_id = weekend_file_name.split("_")[0]
with open(f"{cls.data_path}/participants/{weekend_file_name}",
encoding='latin1') as participants_file:
participants = csv.reader(participants_file)
next(participants)
result.extend([[weekend_id, x[8]] for x in participants])
return result
| true
|
79c3e451b4d8c68f7e9d495eaec54febc99bc4f5
|
Python
|
shunyooo/MAZE
|
/GridMDP.py
|
UTF-8
| 5,748
| 2.984375
| 3
|
[] |
no_license
|
from MDP import MDP
from utils import argmax, vector_add, print_table # noqa
from grid import orientations, turn_right, turn_left
from pprint import pprint
import numpy as np, pandas as pd
import random
class GridMDP(MDP):
# grid引数を受け取る点が差分。
# grid: 各状態での報酬が格納されている。Mapデータみたいなもの。
# GridMDP([[-0.04, -0.04, -0.04, +1],
# [-0.04, None, -0.04, -1],
# [-0.04, -0.04, -0.04, -0.04]],
# terminals=[(3, 2), (3, 1)])
# のように記述。Noneは壁。
def __init__(self, grid, terminals, init=(0, 0), gamma=.9):
MDP.__init__(self, init, actlist=orientations,
terminals=terminals, gamma=gamma)
self.grid = grid
self.rows = len(grid)
self.cols = len(grid[0])
# print(self.rows,self.cols)
for x in range(self.cols):
for y in range(self.rows):
self.reward[y, x] = grid[y][x]
if self.state_check((y,x)):
self.states.add((y, x))
def state_check(self,state):
""" その状態が存在するかどうかを判定。"""
y,x = state
if y < 0 or x < 0 or self.rows-1 < y or self.cols-1 < x :
return False
if self.grid[y][x] is None:# Noneだったら壁。
return False
return True
# 状態stateで行動actionを取った時に、
# 次状態への遷移確率と次状態のタプル(probability,s')
# のリストを返す。
def T(self, state, action):
# print("state:{0},a:{1},ra:{2},la:{3}".format(state,action,turn_right(action),turn_left(action)))
if action is None:
#アクションが取られなかった時、そのまま。
return [(0.0, state)]
else:
# アクションが取られた時、
# 行きたい方向に0.8、その左右に0.1の確率で遷移する。
list1 = []
acts = [action,turn_right(action),turn_left(action)]
pros = [0.8,0.1,0.1]
for (a,p) in zip(acts,pros):
if self.state_check([x+y for (x,y) in zip(state,a)]):
list1.append((p, self.go(state, a)))
return list1
# return [(0.8, self.go(state, action)),
# (0.1, self.go(state, turn_right(action))),
# (0.1, self.go(state, turn_left(action)))]
# 指定した方向に移動した場合の状態を返す。
# 移動後の状態がMDPになければ、元の状態を返す(移動しない)。
def go(self, state, direction):
state1 = vector_add(state, direction)
#print("state:{0}, direction:{1} -> state'{2}".format(state,direction,state1))
return state1 if state1 in self.states else state
# 状態における報酬メソッドを変更。
# 状態と行動を引数に取ることにし、移動しない時の報酬を0にする。
# いままでは、その状態の価値を返していた。
def R(self, state, action):
n_state = self.go(state,action)
if n_state == state:
return 0.0
return self.reward[n_state]
# 状態stateでとれる行動のリストを返す。
# 問題用にオーバーライド。壁に向かう行動はできないようにする。
def actions(self, state):
if state not in self.states:
print("状態値が不正です。:{0}\n{1}にあるべきです".format(state,self.states))
raise
if state in self.terminals:
return [None]
else:# 壁への行動を抜く
return [a for a in self.actlist if self.state_check([x+y for (x,y) in zip(state,a)])]
# ________________________________________________________________________________
# ______________________________________________________________________________
def printGrid(grid,mark=None):
"""gridを受け取って迷路の出力"""
wall = "■"
road = "□"
star = "☆"
for row in grid:
for i in row:
if i == None:
print(wall,end="")
# elif i == mark:
# print(star,end="")
else:
print(road,end="")
print()
def printGridByStates(tuples,rows,cols,mark=None):
"""状態を受け取って、迷路の出力"""
wall = "■"
road = "□"
star_road = "☆"
star_wall = "★"
for y in range(rows):
for x in range(cols):
if (y,x) in tuples:
if (y,x) == mark:
print(star_road,end="")
continue
print(road,end="")
else:
if (y,x) == mark:
print(wall_road,end="")
continue
print(wall,end="")
print()
def toArrow(action):
if action == (1,0):
return "↓"
elif action == (0,1):
return "→"
elif action == (-1,0):
return "↑"
elif action == (0,-1):
return "←"
elif action == None:
return "☆"
def printPi(pi,rows = 13,cols = 10):
wall = "■"
road = "□"
# print(" ",end = "")
# for x in range(cols):
# print("{0:2d}".format(x),end = "")
# print()
for y in range(rows):
# print("{0:2d}".format(y),end = "")
for x in range(cols):
if (y,x) in pi.keys():
print(toArrow(pi[(y,x)]),end="")
else:
print(wall,end="")
print()
| true
|
d6a2aa3b1fa34c97b94551707d8e25f8c05ba217
|
Python
|
AlexandervVugt/DarkTimesMississippi
|
/gui/dice.py
|
UTF-8
| 1,496
| 3.3125
| 3
|
[] |
no_license
|
import main
def setup():
global dice1, dice2, cube, active
one = loadImage("one.png")
two = loadImage("two.png")
three = loadImage("three.png")
four = loadImage("four.png")
five = loadImage("five.png")
six = loadImage("six.png")
dice1 = six
dice2 = six
cube = [one, two, three, four, five, six]
active = True
def draw():
global dice1, dice2
background(0, 255, 0)
square(width/2 - 200, height/2 - 75, 150)
square(width/2 + 50, height/2 - 75, 150)
rect(width/2 - 225, 3*height/4 - 50, 450, 80, 10)
fill(0)
image(dice1, width/2 - 200, height/2 - 75, 150, 150)
image(dice2, width/2 + 50, height/2 - 75, 150, 150)
textAlign(CENTER)
textSize(32)
buttonText = "Click here to roll the dices" if active else "Click here to continue"
text(buttonText, width/2, 3*height/4)
fill(255)
def roll():
global dice1, dice2, cube, active, result
active = False
res1 = int(random(len(cube) + 1))
res2 = int(random(len(cube) + 1))
dice1 = cube[res1 - 1]
dice2 = cube[res2 - 1]
result = res1 + res2
def mousePressed():
if mouseX in range(width/2 - 225, width/2 + 225) and mouseY in range(3*height/4 - 50, 3*height/4 + 30):
if active:
roll()
else:
setup()
main.gameController.getTurnInfo().setSteps(result)
main.currentScene.pop()
def keyTyped():
return
def keyPressed():
return
| true
|
5b3dab18f8bd2a7969bc678b88b5945acbc07ae5
|
Python
|
sfull14/bshares
|
/flaskblog.py
|
UTF-8
| 2,206
| 2.9375
| 3
|
[] |
no_license
|
from flask import Flask, render_template, url_for, flash, redirect # import Flask class
from forms import RegistrationForm, LoginForm
app = Flask(__name__) # setting app = instance of Flask class
app.config['SECRET_KEY'] = 'b54e04d10d8a6dadfec52f3671b9e0c6'
#Key: the code blocks in the HTML code get evaluated but are not included in the page source
#Key: template inheritance -- blocks are portions that child templates (home, about) can override in parent template (layout)
posts = [
{
'author': 'Sean Fuller',
'title': 'Post 1',
'content': 'First post content',
'date_posted': '9/1/2019'
},
{
'author': 'SFuller',
'title': 'Post 2',
'content': 'Second post content',
'date_posted': '9/1/2019'
}
]
@app.route("/") # handles complicated back end stuff...this allows us to write a function that returns the text.../ is just home page
@app.route("/home") # creates the home page at http://localhost:5000/
def home():
# return "<h1>Home Page</h1>" #h1 tasks are HTML heading tags
return render_template('home.html', posts=posts) #has to be in a folder called 'Templates'!!
@app.route("/about") # creates an about page at homepage/about (i.e. http://localhost:5000/about)
def about():
# return "<h1>About Page</h1>" #h1 tasks are HTML heading tags
return render_template('about.html', title='About') #has to be in a folder called 'Templates'!!
@app.route("/register", methods=["GET", "POST"]) # needs to accept post requests
def register():
form = RegistrationForm()
if form.validate_on_submit():
flash(f'Account created for {form.username.data}!', 'success') #bootstrap method
return redirect(url_for('home')) #this is the NAME of THE FUNCTION
return render_template('register.html', title='Register', form=form) #form keyword renders RegistrationForm class instance from forms.py
@app.route("/login") # creates registration page at register/about (i.e. http://localhost:5000/register)
def login():
form = LoginForm()
return render_template('login.html', title='Login', form=form) #form keyword renders RegistrationForm class instance from forms.py
if __name__ == '__main__': # only True if we run this script directly...can't import
app.run(debug=True)
| true
|
aa4692b3eabd0dddf0cf2df1fd7ce4b0c7822767
|
Python
|
Hironobu-Kawaguchi/atcoder
|
/atcoder/iroha2019_day3_e.py
|
UTF-8
| 439
| 3.015625
| 3
|
[] |
no_license
|
# E - 「く」
# https://atcoder.jp/contests/iroha2019-day3/tasks/iroha2019_day3_e
N = int(input())
C = [input() for _ in range(N)]
ans = 0
nums = []
tmpc = '/'
tmpn = 0
for c in C:
if c == tmpc:
tmpn += 1
else:
nums.append(tmpn)
tmpn = 1
tmpc = c
nums.append(tmpn)
ans = 0
for i in range(0, len(nums)//2 * 2, 2):
if nums[i] == nums[i+1]:
ans += 1
print(ans)
| true
|
a2c3631679b5065af05ca23a9365f912df4246e6
|
Python
|
wpli/dialog
|
/tbh_info_datetime/src/tbh_info_datetime.py
|
UTF-8
| 1,427
| 3.296875
| 3
|
[] |
no_license
|
## Datetime Information
## * functions:
## o get_current_time: returns datetime.time object
## o get_current_date: returns datetime.date object
## o get_current_datetime: returns datetime.datetime object
import sys
import datetime
sys.path.append( "../../tbh_api/src" )
from tbh_api import *
#=========================================================================
##
# Description:
# Weather object
# This represents the weather on a day
@tbh_api_callable_class
class date_time_t( object ):
##
# Description:
# Creates a datetime object
# weather_condition, high, and low
@tbh_api_callable_method
def __init__( self, date_time=None ):
##
# Description:
# datetime object
self.date_time = date_time
@tbh_api_callable
def get_current_datetime():
date_time = date_time_t()
date_time.date_time = datetime.datetime.now()
return date_time.date_time
@tbh_api_callable
def get_current_date():
return get_current_datetime().date()
@tbh_api_callable
def get_current_time():
return get_current_datetime().time()
#=========================================================================
if __name__ == "__main__":
print "datetime: %s" % get_current_datetime()
print "date: %s" % get_current_date()
print "time: %s" % get_current_time()
| true
|
0ceaf215b1ae7ced6c270c95c2900c15f6289f5a
|
Python
|
lzc978/digraph
|
/logger_manage.py
|
UTF-8
| 688
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
import logging
loggers = []
class LogProxy:
"""日志代理类, 方便更换工具库中使用的日志器"""
def __init__(self, logger):
_, __ = self, logger
_.logger, _.info, _.debug, _.warning, _.error = __, __.info, __.debug, __.warning, __.error
def _get_logger(logger_name):
"""根据名称获取日志器"""
__ = logging.getLogger(logger_name)
_ = LogProxy(__)
loggers.append(_)
return _
def logger_unify(logger_proxy):
"""将所有的日志器设置为同一个日志器"""
__ = logger_proxy.logger
for _ in loggers:
_.logger, _.info, _.debug, _.warning, _.error = __, __.info, __.debug, __.warning, __.error
| true
|
8609502b8f94b1fb256655af4f033a1520acbdfe
|
Python
|
andremgbr/Sudoku_Selenium
|
/main.py
|
UTF-8
| 982
| 2.875
| 3
|
[] |
no_license
|
import numpy as np
board = np.array( [[0,7,0,3,0,0,0,0,9],
[5,0,0,0,0,0,8,0,0],
[3,0,1,0,4,9,0,5,0],
[0,0,0,0,0,0,0,0,0],
[9,3,0,2,0,0,4,0,0],
[1,8,0,6,0,0,9,0,7],
[8,4,0,9,0,0,0,0,0],
[0,2,0,0,0,4,0,1,0],
[0,0,0,0,0,0,5,0,0] ])
def encontra_zero(board):
for i in range(9):
for j in range(9):
if board[i][j] == 0:
return i,j
return None
def resolve(board):
if encontra_zero(board) == None:
print('z')
return board
else:
linha,coluna = encontra_zero(board)
for num in range(1,10):
if valido(board,linha,coluna,num):
board[linha][coluna] = num
resolve(board)
board[linha][coluna] = 0
return
def valido(board,linha,coluna,num):
for i in range(9):
if board[linha][i] == num:
return False
for i in range(9):
if board[i][coluna] == num:
return False
a = linha//3
b = coluna//3
for i in range(3):
for j in range(3):
if board[a*3 + i][b*3 + j] == num:
return False
return True
| true
|
34b66a7615421013977bcfab7078861dff8f4db9
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02927/s887960331.py
|
UTF-8
| 284
| 3.171875
| 3
|
[] |
no_license
|
M,D = map(int, input().split())
def seki(n):
s = str(n)
i1,i2 = int(s[0]), int(s[1])
if i1>=2 and i2>=2:
return i1*i2
else:
return -1
res = 0
for m in range(1,M+1):
for d in range(11,D+1):
if m==seki(d):
res += 1
print(res)
| true
|
b3eea8030bb9916d710c16e7d77635b9a91e537a
|
Python
|
thoppe/DeepMDMA
|
/src/build_descriptions.py
|
UTF-8
| 911
| 3.0625
| 3
|
[] |
no_license
|
import glob, os
import pandas as pd
images_per_line = 4
F_JPG = glob.glob("../results/images/*.jpg")
data = []
for f in sorted(F_JPG):
name = os.path.basename(f)
item = {}
item['f'] = f
item['channel'] = name.split('_')[0]
item['n'] = int(name.split('_')[-1].split('.')[0])
data.append(item)
df = pd.DataFrame(data).sort_values("n")
for channel,dfx in df.groupby("channel"):
f_save = f'display_{channel}.md'
with open(f_save, 'w') as FOUT:
FOUT.write(f"# {channel}\n")
for k in range(dfx.n.min(), dfx.n.max(), images_per_line):
FOUT.write(f"{channel}:{k} - {channel}{k+images_per_line}\n\n")
for n in range(k, k+images_per_line):
f_image = f'../results/images/{channel}_3x3_pre_relu_{n}.jpg'
img = f" "
FOUT.write(img)
FOUT.write('\n\n')
| true
|
2c664f55ba4eef2ddf190eb23dd34ed996677749
|
Python
|
sanyam-dev/hello-world
|
/Expense.py
|
UTF-8
| 2,345
| 3.734375
| 4
|
[] |
no_license
|
global expense
expense = []
Months = ["January" ,"February", "March" ,"April", "May","June", "July", "August","September", "October", "Novemeber", "December"]
response = ["Y","Yes","YES","yes","y"]
n = int(input("No. of Months:"))
if n in range(13):
pass
else:
print("ERROR MESSAGE!")
def add_data(month,exp_data):
expense.append([month,exp_data])
return(expense)
def track_input(num1):
for i in range(len(expense)):
if num1 == expense[i][1]:
print("Expense Found for Month:",expense[i][0])
else:
int1 =1903
if int1 == 1903:
print("Expense NOT Found")
def Refund_data(num1,month1):
for i in range(len(expense)):
if month1 == expense[i][0]:
expense[i][1] = expense[i][1] - num1
return(expense)
else:
print("NO ENTRY FOUND!")
def exp_input(n):
for i in range(n):
expense.append([])
for j in [0,1]:
if j == 0:
expense[i].append(Months[i])
print("Month:",expense[i][0])
else:
expense[i].append(input("Expense:"))
exp_input(n)
def compare(month1,month2):
exp1 = 0
exp2 = 0
for i in range(len(expense)):
if month1 == expense[i][0]:
exp1 = int(expense[i][1])
if month2 == expense[i][0]:
exp2 = int(expense[i][1])
extra = exp1 - exp2
print("You Spent",extra,"extra in",month2,"as compared to",month1)
if input("Do You wanna compare expenses?") in response:
month1 = input("Month1 =")
month2 = input("Month2 =")
compare(month1,month2)
else:
pass
if input("Do You Want to Append The Expense List ?") in response:
month = input("Month:")
exp_data = input("Expense:")
add_data(month,exp_data)
else:
pass
if input("Do You Want to Track Expense") in response:
data = input("Enter tracking data:")
track_input(data)
if input("Did You Get Any Refund?") in response:
Refund_input = input("Refund Amount:")
Refund_month = input("Month:")
Refund_data(Refund_month,Refund_input)
if n >= 5 and input("Do You Want to know your quareterly expense ") in response:
sum = 0
for i in range(len(expense)):
sum = sum + int(expense[i][1])
print("Rs.",sum,"is your quarterly expense.")
else:
pass
| true
|
86d5c80246a6d758ade689c0aecdc23769d71a0a
|
Python
|
ashishtrehan/market_data
|
/stock_data.py
|
UTF-8
| 831
| 3.0625
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup as bs
def get_historical_data(name, number_of_days):
def scrap(number:int):
if number == 0:
return divs[number].span.text
else:
return float(divs[number].span.text.replace(',',''))
keys = ['date','open','high','low','adj_close','volume']
data = []
url = "https://finance.yahoo.com/quote/" + name + "/history/"
response = requests.get(url)
rows = bs(response.content,"html.parser").findAll('table')[0].tbody.findAll('tr')
for each_row in rows:
divs = each_row.findAll('td')
if divs[1].span.text != 'Dividend':
vals = [scrap(x) for x in range(len(keys))]
data.append({'{0}'.format(name.upper()):dict(zip(keys,vals))
})
return data[:number_of_days]
| true
|
062bfa15a4f1148087f952e2ca818f575b3b618f
|
Python
|
KevinJeon/The-Tragedy-of-the-commons
|
/models/RuleBasedAgent.py
|
UTF-8
| 3,322
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
import random
import numpy as np
from tocenv.components.agent import Action
from models.Agent import Agent
from tocenv.components.observation import NumericObservation
def softmax(x):
y = np.exp(x - np.max(x))
f_x = y / np.sum(np.exp(x))
return f_x
class RuleBasedAgent(Agent):
def __init__(self, agent_type):
super(RuleBasedAgent, self).__init__()
self.color = agent_type
self.agent_pos = (9, 5)
''' Settings '''
self.main_favorable = 3
self.sub_favorable = 1
def act(self, obs) -> np.array:
weight = self.get_weight_matrix(obs=obs)
score = self.get_direction_score(weight=weight)
score_softmax = softmax(score)
choice = random.choices([Action.Move_Up, Action.Move_Down, Action.Move_Left, Action.Move_Right],
weights=score_softmax, k=1)
if abs(np.sum(score)) < 0.05:
choice = random.choices([Action.Rotate_Right, Action.Rotate_Left, Action.Move_Down], weights=[0.5, 0.5, 0.3], k=1)
return choice[0]
def get_weight_matrix(self, obs) -> np.array:
weight = np.zeros(shape=np.array(obs).shape, dtype=np.float32)
for y, row in enumerate(obs):
for x, data in enumerate(row):
dist = abs(self.agent_pos[0] - y) + abs(self.agent_pos[1] - x)
if data in [NumericObservation.BlueApple, NumericObservation.RedApple]:
weight[y][x] += self.main_favorable * (0.5 ** dist)
return weight
def get_direction_score(self, weight) -> np.array:
score = np.zeros(shape=4, dtype=np.float32)
# [UP, DOWN, LEFT, RIGHT]
for y, row in enumerate(weight):
for x, data in enumerate(row):
if y < self.agent_pos[0]:
score[0] += data
if y > self.agent_pos[0]:
score[1] += data
if x < self.agent_pos[1]:
score[2] = data
if x > self.agent_pos[1]:
score[3] += data
return score
def get_favorable(self, item):
if self.color == 'red':
if item == NumericObservation.RedApple:
return self.main_favorable
else:
return self.sub_favorable
elif self.color == 'blue':
if item == NumericObservation.BlueApple:
return self.main_favorable
else:
return self.sub_favorable
class RuleBasedAgentGroup(object):
def __init__(self,
name,
obs_dim,
action_dim,
device,
batch_size):
super(RuleBasedAgentGroup, self).__init__()
self.name = name
self.batch_size = batch_size
self.device = device
self.obs_dim = obs_dim
self.action_dim = action_dim
self.agents = [RuleBasedAgent(
agent_type,
) for agent_type in 'blue']
def act(self, obses, sample=False):
joint_action = []
for iter_obs, agent in zip(obses, self.agents):
joint_action.append(agent.act(iter_obs))
return np.array(joint_action)
def train(self, memory, total_step, logger=None):
pass
| true
|
19bac33768f960bb16898b47b98095ec3241f4ba
|
Python
|
swkaen/Raspberry_pi_RC_using_bottle
|
/morse_code.py
|
UTF-8
| 2,173
| 2.84375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
class Morse_Code:
morse_code_jp = {u"イ":"il",u"ロ":"ilil",u"ハ":"lii",u"ニ":"lili",u"ホ":"lii",u"ヘ":"i",u"ト":"iilii",
u"チ":"iili",u"リ":"lli",u"ヌ":"iiii",u"ル":"lilli",u"ヲ":"illl",
u"ワ":"lil",u"カ":"ilii",u"ヨ":"ll",u"タ":"li",u"レ":"lll",u"ソ":"llli",
u"ツ":"illi",u"ネ":"llil",u"ナ":"ili",u"ラ":"iii",u"ム":"l",
u"ウ":"iil",u"ヰ":"iliil",u"ノ":"iill",u"オ":"iliii",u"ク":"iiil",u"ヤ":"ill",u"マ":"liil",
u"ケ":"lill",u"フ":"llii",u"コ":"llll",u"エ":"lilll",u"テ":"ilill",
u"ア":"llill",u"サ":"lilil",u"キ":"lilii",u"ユ":"liill",u"メ":"liiil",u"ミ":"iilil",u"シ":"llili",
u"ヱ":"illii",u"ヒ":"lliil",u"モ":"liili",u"セ":"illli",u"ス":"lllil",
u"ン":"ilili",u"゛":"ii",u"゜":"iilli",
u"一":"illll",u"二":"iilll",u"三":"iiill",u"四":"iiiil",u"五":"iiiii",u"六":"liiii",u"七":"lliii",u"八":"lllii",u"九":"lllli",
u"〇":"lllll",u"ー":"illil"}
def __init__(self,led,span):
self.led = led
self.span = span
def led_setup(self):
GPIO.setup(self.led, GPIO.OUT)
def morse_test(self):
a = self.morse_code_jp[u"ネ"]
print a
def tanten(self):
GPIO.output(self.led, GPIO.HIGH)
time.sleep(self.span)
GPIO.output(self.led, GPIO.LOW)
time.sleep(self.span)
def tyouten(self):
GPIO.output(self.led, GPIO.HIGH)
time.sleep(self.span * 3)
GPIO.output(self.led, GPIO.LOW)
time.sleep(self.span)
def morse_convert(self,kotoba):
for w_f in kotoba:
if w_f != u" ":
mo = self.morse_code_jp[w_f]
for w_s in mo:
if w_s == "i":
self.tanten()
elif w_s == "l":
self.tyouten()
time.sleep(self.span * 2)
elif w_f == u" ":
time.sleep(self.span * 6)
if __name__ == '__main__':
m = Morse_Code(37, 0.2)
GPIO.setmode(GPIO.BOARD)
m.led_setup()
m.morse_convert(u"コ゛メンネ")
GPIO.cleanup()
| true
|
d39ff4ca4b119afaa55c84447ce95d879fd0ceb7
|
Python
|
sayan82/python-programs
|
/Question_2.py
|
UTF-8
| 429
| 4.4375
| 4
|
[] |
no_license
|
#Q2.Write a program to accept ‘n’ numbers from user , store these numbers into an array. Find out maximum and minimum number from an Array.¶
list=[]
z=True
while(z):
n=int(input("Enter a number:\n"))
list.append(n)
a=input("do want to enter a number(y/n):\n")
if a!="y":
z=False
print("your list is:\n")
for i in list:
print(i)
print("max is :\n",max(list),"\n min is:\n",min(list))
| true
|
dc4f9c8c83de916e14f2dfb699a6933afae60dbf
|
Python
|
zhuifengshen/xmind-utils
|
/testcase/main.py
|
UTF-8
| 3,306
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import logging
import sys
from testcase.testlink import xmind_to_testlink_json_file, xmind_to_testlink_xml_file
from testcase.utils import get_absolute_path
from testcase.zentao import xmind_to_zentao_csv_file
"""
A tool to parse xmind file into testcase file, which will help you
generate a testlink recognized xml file or a zentao recognized cvs file,
then you can import it into testlink as test suites or zentao as testcases.
Usage:
xmind2testcase [path_to_xmind_file] [-csv] [-xml]
Example:
xmind2testcase /path/to/testcase.xmind => output testcase.csv、testcase.xml
xmind2testcase /path/to/testcase.xmind -csv => output testcase.csv
xmind2testcase /path/to/testcase.xmind -xml => output testcase.xml
"""
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)s %(levelname)s [%(module)s - %(funcName)s]: %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
def main():
xmind_file = 'doc/xmind_testcase_template.xmind'
xmind_file_path = get_absolute_path(xmind_file)
logging.info('Start to convert XMind file: %s', xmind_file_path)
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
logging.info('Convert XMind file to zentao csv file successfully: %s', zentao_csv_file)
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file_path)
logging.info('Convert XMind file to testlink xml file successfully: %s', testlink_xml_file)
testlink_json_file = xmind_to_testlink_json_file(xmind_file_path)
logging.info('Convert XMind file to testlink json file successfully: %s', testlink_json_file)
def cli_main():
if len(sys.argv) > 1 and sys.argv[1].endswith('.xmind'):
xmind_file = sys.argv[1]
xmind_file_path = get_absolute_path(xmind_file)
logging.info('Start to convert XMind file: %s', xmind_file_path)
if len(sys.argv) == 3 and sys.argv[2] == '-json':
testlink_json_file = xmind_to_testlink_json_file(xmind_file_path)
logging.info('Convert XMind file to testlink json file successfully: %s', testlink_json_file)
elif len(sys.argv) == 3 and sys.argv[2] == '-xml':
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file_path)
logging.info('Convert XMind file to testlink xml files successfully: %s', testlink_xml_file)
elif len(sys.argv) == 3 and sys.argv[2] == '-csv':
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
logging.info('Convert XMind file to zentao csv file successfully: %s', zentao_csv_file)
else:
testlink_json_file = xmind_to_testlink_json_file(xmind_file_path)
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file_path)
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
logging.info('Convert XMind file successfully: \n'
'1、 testlink json file(%s)\n'
'2、 testlink xml file(%s)\n'
'3、 zentao csv file(%s)',
testlink_json_file,
testlink_xml_file,
zentao_csv_file)
else:
print(__doc__)
logging.error('%s', __doc__)
if __name__ == '__main__':
main()
| true
|
37170923a38cd2a28eef571a2ad585178a9b7239
|
Python
|
MTamPham/dublin-bikes
|
/db-all-exploration.py
|
UTF-8
| 3,615
| 2.921875
| 3
|
[] |
no_license
|
'''
Author: Tam M Pham
Created date: 22/11/2018
Modified date: 03/01/2019
Description:
Plotting distribution of activity throughout the week
Finding the most 10th busy and least 10th busy stations
'''
import os
import numpy as np
import pandas as pd
import calendar
import time
from common import Common
import matplotlib.pyplot as plt
start = time.time()
Common.create_folder(Common.PLOTS_DIR)
# get the relative path of preparation data file
rel_path = os.path.relpath(Common.CLEAN_DATA_FILE_FULL_PATH)
# read CSV files using Pandas
df = pd.read_csv(rel_path, delimiter = ",", parse_dates=["Date"])
# see how many occurrence of data for date, the date which has minor values (<10) means the data is somehow missing
#print(df.groupby([df["Date"].dt.date])["Date"].count())
# after viewing, notice that July 2016 has minor values
#print(df[df["Date"].dt.month == 7].reset_index(drop=True))
top_check_ins = pd.DataFrame(df.groupby(df["Address"])["Check In"].sum().sort_values(ascending=False).head(10))
top_check_ins = pd.merge(top_check_ins, df, on="Address")
#print("Top 10 check in stations:")
#print(top_check_ins)
top_check_outs = pd.DataFrame(df.groupby(df["Address"])["Check Out"].sum().sort_values(ascending=False).head(10))
top_check_outs = pd.merge(top_check_outs, df, on="Address")
#print("Top 10 check out stations:")
#print(top_check_outs)
total_activity = df.copy()
total_activity["Total Activity"] = total_activity["Check In"] + total_activity["Check Out"]
total_activity = total_activity.groupby(total_activity["Address"])["Total Activity"].sum()
top_activity = total_activity.copy().sort_values(ascending=False).head(10)
print("Top 10 busiest stations:")
print(top_activity)
bot_activity = total_activity.copy().sort_values().head(10)
print("Top 10 quiest stations:")
print(bot_activity)
##############################################################
################# FIND AVERAGE USAGE PER DAY #################
##############################################################
avg_ci_usage_day = df.copy()
avg_ci_usage_day = avg_ci_usage_day.groupby(["Number", "Name", "Weekday"])["Check In"].mean()
avg_ci_usage_day = avg_ci_usage_day.unstack()
avg_ci_usage_day.boxplot(column=Common.SHORT_WEEKDAY_ORDER)
plt.title("")
plt.suptitle("") # get rid of the default title of box plotting
plt.ylabel("avg_cin")
plt.savefig(Common.PLOTS_DIR + "/avg_ci_usage_day.png")
plt.gcf().clear()
avg_co_usage_day = df.copy()
avg_co_usage_day = avg_co_usage_day.groupby(["Number", "Name", "Weekday"])["Check Out"].mean()
avg_co_usage_day = avg_co_usage_day.unstack()
avg_co_usage_day.boxplot(column=Common.SHORT_WEEKDAY_ORDER)
plt.title("")
plt.suptitle("") # get rid of the default title of box plotting
plt.ylabel("avg_cout")
plt.savefig(Common.PLOTS_DIR + "/avg_co_usage_day.png")
plt.gcf().clear()
##############################################################
##################### FIND USAGE PER DAY #####################
##############################################################
usage_day = df.copy()
usage_day["Total Activity"] = usage_day["Check In"] + usage_day["Check Out"]
usage_day = usage_day.groupby(["Number", "Name", "Weekday"])["Total Activity"].sum()
usage_day = usage_day.unstack()
usage_day.boxplot(column=Common.SHORT_WEEKDAY_ORDER)
plt.title("Distribution of activity throughout the week")
plt.suptitle("") # get rid of the default title of box plotting
plt.ylabel("Activity")
plt.savefig(Common.PLOTS_DIR + "/usage_day.png")
plt.gcf().clear()
end = time.time()
print("Done exploration after {} seconds".format((end - start)))
| true
|
5695e3a3b24c4063aeab9046688531a615a383b4
|
Python
|
sksam-Encoder/pythonCourse
|
/35Later/SumOfN.py
|
UTF-8
| 108
| 3.515625
| 4
|
[] |
no_license
|
def sumOf(n):
if n == 1:
return 1
else:
return n + sumOf(n - 1)
print(sumOf(5))
| true
|
766bcbabdb67d6ce59a04fa64c1ed75c55a98f2a
|
Python
|
nolfonzo/rebrained
|
/tree.py
|
UTF-8
| 497
| 3.265625
| 3
|
[] |
no_license
|
def traverse(node):
>>> node1=Node(1)
>>> node2=Node(2)
>>> node3=Node(3,node1,node2)
>>> node4=Node(4)
>>> node5=Node(5,node3,node4)
>>> traverse(node5)
5
3
1
2
4
if node==None: return
print node.value
traverse(node.left)
traverse(node.right)
class Node:
def __init__(self,value,left=None,right=None):
self.value=value;self.left=left;self.right=right
if __name__ == "__main__":
import doctest
doctest.testmod()
| true
|
8e88dbbfcd84f340c802c6c135623d8962adc50c
|
Python
|
amirnafisa/decon4lindt
|
/detection_unit/drawing_tools.py
|
UTF-8
| 681
| 3.109375
| 3
|
[] |
no_license
|
from PIL import ImageDraw
colour = {}
colour[0] = (255, 255, 255)
colour[1] = (255, 0, 0)
colour[2] = (0, 255, 0)
colour[3] = (0, 0, 255)
def drawrect(drawcontext, xy, outline=None, width=0):
[(x0,y0),(x1,y1)] = xy
points = (x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)
drawcontext.line(points, fill=outline, width=width)
def draw_image_with_boxes(image, boxes, labels, show_image=False):
for [x0, y0, x1, y1], label in zip(boxes, labels):
drawimage = ImageDraw.Draw(image)
drawrect(drawimage, [(x0,y0),(x1,y1)], outline=colour[label], width=10)
if show_image:
cv2.imshow(cv2.resize(np.asarray(image),(416,416)))
return image
| true
|
dbd206c7779b63c76bf266998be7da17ace40cb2
|
Python
|
RahulRavishankar/Oblivious_Transfer
|
/ot.py
|
UTF-8
| 4,025
| 3.125
| 3
|
[] |
no_license
|
from hashlib import sha256
from cryptography.fernet import Fernet
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
import random
class Alice:
def __init__(self):
self.a=-1
self.A=-1
self.messages=["Coupon1","Coupon2"]
self.keys=["",""]
self.hashcodes=["",""]
def choose_a(self):
self.a=random.randint(1,10)
def get_a(self):
return self.a
def getA(self):
return self.A
def calculateHashcodes(self,B,g,b):
self.hashcodes[0]=sha256(str(pow(B,self.a)).encode()).hexdigest() #This is not the actual key used for encryption
self.hashcodes[1]=sha256(str(pow(int((B/self.A)),self.a)).encode()).hexdigest()
def getHashCodes(self):
return self.hashcodes
def getKeys(self):
return self.keys
def getEncryptedMessages(self):
#GENERATE KEYS
salt1 =os.urandom(16)
kdf1 = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt1,
iterations=1000,
backend=default_backend()
)
self.keys[0]= base64.urlsafe_b64encode(kdf1.derive(self.messages[0].encode()))
salt2 =os.urandom(16)
kdf2 = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt2,
iterations=1000,
backend=default_backend()
)
self.keys[1]= base64.urlsafe_b64encode(kdf2.derive(self.messages[1].encode()))
#ENCRYPT MESSAGES
f1=Fernet(self.keys[0])
f2=Fernet(self.keys[1])
return [f1.encrypt(self.messages[0].encode()),f2.encrypt(self.messages[1].encode())]
def getKeys(self):
return self.keys
def getHashCodes(self):
return self.hashcodes
class Bob:
def __init__(self):
self.b=-1
self.c=-1
self.B=-1
self.hashcode=""
self.decryptionKey=-1
def choose_b(self):
self.b=random.randint(1,10)
def choose_c(self):
self.c=random.randint(0,1)
def calulateB(self,A):
if(self.c ==0):
self.B=pow(g,self.b)
elif(self.c==1):
self.B=A*pow(g,self.b)
def getB(self):
return self.B
def getc(self):
return self.c
if __name__ == "__main__":
alice=Alice()
bob=Bob()
g=7
print("Value of g:",g)
alice.choose_a()
a=alice.get_a()
alice.A=pow(g,a)
print("Alice chose a=",a)
print("Value of A=",alice.getA())
bob.choose_b()
print("Bob chose b=",bob.b)
bob.choose_c()
print("Bob chose c=",bob.c)
A=alice.getA()
bob.calulateB(A)
print("Value of B=",bob.B)
bob.hashcode=sha256(str(pow(A,bob.b)).encode()).hexdigest()
print("Decryption key present with Bob:",bob.hashcode)
B=bob.getB()
alice.calculateHashcodes(B,g,bob.b)
hashcodes=alice.getHashCodes()
print("Encryption keys present with Alice: "+str(hashcodes[0])+" and "+str(hashcodes[1]))
keys=alice.getKeys()
print("\nEncrypting Messages...........")
EncryptedMessages=alice.getEncryptedMessages()
print("Encrypted messages sent by Alice: "+str(EncryptedMessages[0])+" and "+str(EncryptedMessages[1]))
print("\nDecrypting Messages...........")
c=bob.getc()
#comparing hashcodes present with Alice and Bob instead of the encrypted messages
if(hashcodes[c]==bob.hashcode):
#decrypt message
f=Fernet(keys[c])
print("Message Received: "+f.decrypt(EncryptedMessages[c]).decode())
else:
print("Error Detected! Invalid message")
| true
|
c73afcc553001c11021fee5dc3f50e4e79ee9dd3
|
Python
|
okbengii/scrapydemo
|
/miao/spiders/textproxy.py
|
UTF-8
| 993
| 2.53125
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
# import requests
# try:
# requests.get('http://wenshu.court.gov.cn/', proxies={"http":"http://222.85.50.168:808"})
# except:
# print "failed"
# else:
# print "success"
# import re
# con = '链接:<a href="http://git.ghostscript.com/?p=ghostpdl.git;h=8210a2864372723b49c526e2b102fdc00c9c4699" target="_blank" rel="nofollow">http://git.ghostscript.com/?p=ghostpdl.git;h=8210a2864372723b49c526e2b102fdc00c9c4699</a>'
# aa = '.+?(?<=href=\").+?(?=\")'
# pattern = '(.+?)<a href=\"(.+?)\"'
# rel = re.findall(pattern,con)
# print rel
# for aa in rel[0]:
# print aa
# for i in range(1,50):
# url = 'http://www.cnnvd.org.cn/vulnerability/index/p/%s' %i
# print url
# a = []
# if len(a):
# print 'true'
# else:
# print 'false'
class Test():
def __init__(self,name=None):
if name is not None:
self.name = name
print self.name
else:
print 'name is null'
def parse(self):
print "ssss"
if __name__ == '__main__':
a = 1
test = Test(a)
#test.parse()
| true
|
aa1d94e3329ae35c60e1993448e6f770c495c9c4
|
Python
|
RaymondLZhou/uplifting-news
|
/src/visualize.py
|
UTF-8
| 782
| 3.21875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
import matplotlib.pyplot as plt
def display_dataset(train_dataset):
for example, label in train_dataset.take(1):
print("Texts: ", example.numpy()[:3])
print()
print("Labels: ", label.numpy()[:3])
def display_results(test_loss, test_acc, history):
def plot_graphs(metric):
plt.plot(history.history[metric])
plt.plot(history.history["val_" + metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, "val_" + metric])
print("Test Loss: {}".format(test_loss))
print("Test Accuracy: {}".format(test_acc))
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
plot_graphs("accuracy")
plt.ylim(None, 1)
plt.subplot(1, 2, 2)
plot_graphs("loss")
plt.ylim(0, None)
| true
|
1af5282a19fb1c7f5209fe8cd0d69e7573da0154
|
Python
|
jackwong95/MMURandomStuff
|
/TDS3651 - Visual Information Processing/Assignment/Part 2/precisionCurve.py
|
UTF-8
| 2,393
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
import os
from SIFT import computeDistances as sift
from RGB import computeDistances as rgb
# Food database information
dbSize = 1000
retrieves = [i for i in range(0, 1000, 10)]
nPerCat = 100
nC = 10
labels = ('AK','BL','CD','CL','DR','MG','NL','PG','RC','ST')
# Database path
dbpath = os.getcwd() + "/fooddb/"
# Function to return a tuple of precisions and recalls.
#
# Input: pickle file name
# Output: a tuple of a list of precision and a list of recall
#
def get_results(picklefile, func):
fv = pickle.load(open(picklefile, "rb"))
D = func(fv)
avg_prec = np.zeros(dbSize)
precisions = []
recalls = []
for retrive in retrieves:
for c in range(nC):
for i in range(nPerCat):
idx = (c*nPerCat) + i;
# access distances of all images from query image, sort them asc
nearest_idx = np.argsort(D[idx, :]);
# quick way of finding category label for top K retrieved images
retrievedCats = np.uint8(np.floor((nearest_idx[1:retrive+1])/nPerCat));
# find matches
hits = (retrievedCats == np.floor(idx/nPerCat))
# calculate average precision of the ranked matches
if np.sum(hits) != 0:
avg_prec[idx] = np.sum(hits*np.cumsum(hits)/(np.arange(retrive)+1)) / np.sum(hits)
else:
avg_prec[idx] = 0.0
mean_avg_prec = np.mean(avg_prec)
recall = np.sum(hits) / nPerCat
precisions = precisions + [mean_avg_prec]
recalls = recalls + [recall]
return (precisions, recalls)
# Get the benchmark precision and recall
precisions, recalls = get_results("RGB/feat.pkl", rgb.computeDistances)
# Get the SIFT method precision and recall
precisions_sift, recalls_sift = get_results("SIFT/feat.pkl", sift.computeDistances)
plt.clf()
plt.plot(recalls, precisions, lw=2, color='navy', label='RGB Color Histogram')
plt.plot(recalls_sift, precisions_sift, lw=2, color='red', label='SIFT')
plt.legend(loc="upper right")
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.savefig("Precision Recall.png")
# plt.show()
# plt.close()
| true
|
772052abdee8092765dfa09a459aefa3a753813f
|
Python
|
baidw/languages
|
/python/exam/exam_0103_comment.py
|
UTF-8
| 310
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__='baidw'
#one comment
#two comment
def mymaxnum(a,b):
"this is a compare b ,if a>b return a,else return b "
if a>b:
return a
else:
return b
print "max number:",mymaxnum(10,20)
print "文档字符串注释[用于在线文档]:",mymaxnum.__doc__
| true
|
d9b4bc752d7d8a9ab8af1620088996c45d9832ed
|
Python
|
kannera/ocr_repair
|
/fix_tools.py
|
UTF-8
| 6,835
| 2.875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import json
import re
import operator
import dbm
INPUT = 0
OUTPUT = 1
START = 0
MIDDLE = 1
END = 2
def parse_data_row(line):
line = re.split("\t", line)[1:]
l = int(len(line)/2)
return [line[i*2:i*2+2] for i in range(0, l)]
def parse_corrections_to_list(corrections):
correction_list = []
for row in corrections:
for pair in row:
correction_list.append(pair)
return correction_list
def character_combinations(count, word):
return [[word[0:i], word[i:i+count], word[i+count:len(word)]] for i in range(0, len(word)-count+1)]
def get_trigram_factor(fragment, table):
#calculates the probability of a given fragments last three characters in the table of trigrams
if len(fragment) == 1: fragment = "##"+fragment
elif len(fragment) == 2: fragment = "#"+fragment
else: fragment = fragment[-3:]
if fragment in table: return table[fragment]
else: return table["nonce"]
def get_character_frequency_table(word_list):
#combines a character frequency list from list of corrected words
#{c1 : n1, c2 : n2}
frequency_list = dict()
for word in word_list:
combinations = []
for i in range(1, 3):
combinations.extend(character_combinations(i, word))
for c in combinations:
character = c[MIDDLE]
if character in frequency_list: frequency_list[character] += 1
else: frequency_list.update( { character : 1 } )
return frequency_list
def build_replacement_probability_table(correction_list, character_frequency_list):
#return a dict of dicts, where keys are characters and values are key-value pairs of characters possible replacements and their respective probabilities
table = { x : { x : character_frequency_list[x] } for x in character_frequency_list }
for pair in correction_list:
if pair[INPUT] in table:
table[pair[INPUT]][pair[INPUT]] -= 1
if pair[OUTPUT] in table[pair[INPUT]]: table[pair[INPUT]][pair[OUTPUT]] += 1
else: table[pair[INPUT]].update( { pair[OUTPUT] : 1 })
else:
table.update( { pair[INPUT] : { pair[INPUT] : 0, pair[OUTPUT] : 1 } } )
for row in table:
row_sum = sum(table[row].values())
for col in table[row]:
table[row][col] = table[row][col] / row_sum
for row in table:
if len(row) > 1:
for col in table[row]:
factor=1
for r in row:
factor = factor*table[r][r]
table[row][col] = table[row][col]*(1-factor)
unknown_c = "abcdefghijklmnopqrstuvxyzäö"
unknown = { c : 1/(len(unknown_c)+1) for c in unknown_c }
table.update( { "unknown" : unknown })
table["unknown"].update( { "unknown" : 1/len(unknown)+1 } )
return table
def add_split_marks(fragment, count):
for i in range(1, count): fragment += "<+>"
return fragment
def get_split_list(correction_list):
return [x for x in correction_list if len(x[INPUT]) > 1]
def get_word_combinations(word):
combinations = []
for i in range(1,4):
combinations.extend(character_combinations(i, word))
return combinations
def check_split_list(split_list, fragment):
return [x[INPUT] for x in split_list if fragment.startswith(x[INPUT])]
def run_through_matrix(word, correction_matrix, table):
guesses = dict()
fragments = dict()
for i in range(0, len(correction_matrix)):
new_fragments = []
for j in correction_matrix[i]:
j_sub = j
j_prob = correction_matrix[i][j]
if len(fragments) > 0:
for f in fragments:
if f["fragment"].endswith("<+>"):
new_fragments.append( { "fragment" : re.sub("<\+>", "", f["fragment"], count=1), "prob" : f["prob"] } )
else:
k = f["fragment"]
nf = k+j_sub
trigram_factor = get_trigram_factor(nf, table)
prob = f["prob"]*j_prob*trigram_factor
new_fragments.append( { "fragment" : nf , "prob" : prob} )
else:
trigram_factor = get_trigram_factor(j_sub, table)
new_fragments.append( { "fragment" : j_sub, "prob" : j_prob*trigram_factor } )
new_fragments = get_top_100_fragments(new_fragments)
fragments = new_fragments
return fragments
corpus_sizes = { "182x" : {"tokens" : 575179, "types" : 65941 },
"183x" : {"tokens" : 1377160, "types" : 128880 },
"184x" : {"tokens" : 2998726, "types" : 197429 },
"185x" : {"tokens" : 17038824,"types" : 525143 },
"186x" : {"tokens" : 37430663, "types": 916087 },
"187x" : {"tokens" : 79244434, "types": 1413128 },
"188x" : {"tokens" : 276140381, "types": 2824262 },
"189x" : {"tokens" : 732014562, "types": 4849579 }}
def ensure_dbs():
for corpus in corpus_sizes:
try:
dbm.open("resources/grams/OF_klk_fi_1grams_"+corpus+"-20140905.db")
except dbm.error:
with dbm.open("resources/grams/OF_klk_fi_1grams_"+corpus+"-20140905.db","c") as db:
with open("resources/grams/OF_klk_fi_1grams_"+corpus+"-20140905", "r", encoding="utf-8" ) as f:
for line in f:
w = re.split("\t", line)
db[w[0]]=w[1]
def get_word_probability(word):
freqs = []
for corpus in corpus_sizes:
with dbm.open("resources/grams/OF_klk_fi_1grams_"+corpus+"-20140905.db") as db:
freq = int(db.get(word,'0'))+1
freq = freq/(corpus_sizes[corpus]["types"]+corpus_sizes[corpus]["tokens"])
freqs.append(freq)
return sum(freqs)/len(freqs)
def run_list(f, word):
for line in f:
w = re.split("\t", line)
if word == w[0]: return int(w[1])+1
return 1
def get_new_frag(pos):
z = pos["sub"]
for j in range(1, len(pos["orig"])): z += "<+>"
return z
def get_top_100_fragments(fragments):
filtered_fragments = []
for x in fragments:
if x not in filtered_fragments: filtered_fragments.append(x)
fragments = filtered_fragments
sorted_fragments = sorted(fragments, key=lambda k: k["prob"], reverse=True)
if len(sorted_fragments) > 100:
return sorted_fragments[:100]
else:
return sorted_fragments
| true
|
728eae480cbacb206eefec64b9b487e95589a4ed
|
Python
|
marcosdaniel0616/Curso-de-Python---Geek-university
|
/counter.py
|
UTF-8
| 1,868
| 4.15625
| 4
|
[] |
no_license
|
"""
Módulo Collections - Counter (Contador)
https://docs.python.org/3/library/collections.html#collections.Counter
Collections -> High-Performance Container Datetypes
Counter -> Recebe um iterável como parâmetro e cria um objeto do tipo Collections Counter que é parecido
com um dicionário, contendo como chave o elemento da lista passada como parâmetro e como valor a quantidade
de ocorrências desse elemento.
# Realizando o import
from collections import Counter
# Exemplo 1
# Podemos utilizar qualquer iterável, aqui usamos uma lista
lista = [1, 1, 1, 2, 2, 3, 3, 3, 3, 1, 1, 2, 2, 4, 4, 4, 5, 5, 5, 5, 3, 45, 45, 66, 66, 43, 34]
# Utilizando o counter
res = Counter(lista)
print(type(res))
print(res)
# Counter({1: 5, 3: 5, 2: 4, 5: 4, 4: 3, 45: 2, 66: 2, 43: 1, 34: 1})
# Veja que, para cada elemento da lista, o Counter criou uma chave e colocou como valor a quantidade de ocorrências.
# Exemplo 2
print(Counter('Geek University'))
# Counter({'e': 3, 'i': 2, 'G': 1, 'k': 1, ' ': 1, 'U': 1, 'n': 1, 'v': 1, 'r': 1, 's': 1, 't': 1, 'y': 1})
"""
from collections import Counter
# Exemplo 3
texto = """
Christina María Aguilera é uma cantora, compositora e atriz norte-americana. Referida como a "Voz da Geração",
é creditada como uma das responsáveis por reavivar o teen pop no final da década de 1990 e somar sua habilidade vocal
para discursar sobre temas como a sexualidade e o feminismo. Ao passo em que continuamente reinventava sua imagem,
tornou-se conhecida por seus visuais extravagantes e não convencionais. Além de provocar polêmica, seus trabalhos foram
elogiados pela crítica especializada, pelos quais tem sido citada como influência para diversos artistas.
"""
palavras = texto.split()
# print(palavras)
res = Counter(palavras)
print(res)
# Encontrando as 5 palavras com mais ocorrência no texto
print(res.most_common(5))
| true
|
46657cd5071c8885586ba669ce572ecec33cc217
|
Python
|
Pastilhas/IART-FEUP-2
|
/q_learning/zhed_bot.py
|
UTF-8
| 6,662
| 2.9375
| 3
|
[] |
no_license
|
import sys
import q_learning
from levels import get_level
class Game:
def __init__(self):
self.run = True
self.win = False
self.level = 6
self.board = []
self.lastState = None
self.currState = []
self.generate_board()
self.actions = self.getActions()
self.ai = q_learning.Qlearning(len(self.actions), 0.2, 0.9)
self.file = open("result.txt", "w")
def update(self):
move = self.getAIMove()
self.make_move(move)
last_action_id = self.currState[-1]
last_action = self.actions[last_action_id]
self.file.write(" id(" + str(last_action_id) + ") => [" + str(last_action[0]) + "," + str(last_action[1]) + "] - " + str(last_action[2]) + "\n")
if self.win:
reward = 100
self.ai.update_table(tuple(self.lastState), move['action'], reward, tuple(self.currState))
#self.print_board()
#print('Win - New board')
self.file.write("Win\n")
self.generate_board()
self.lastState = None
self.currState = []
self.win = False
return True
elif self.fail():
reward = -50
self.ai.update_table(tuple(self.lastState), move['action'], reward, tuple(self.currState))
#self.print_board()
#print('Lose - New board')
self.file.write("Lose\n")
self.generate_board()
self.lastState = None
self.currState = []
return False
else:
reward = -1
self.ai.update_table(tuple(self.lastState), move['action'], reward, tuple(self.currState))
return None
def generate_board(self):
self.board = [[0]*8 for i in range(8)]
level = get_level(self.level)
pos = 0
for i in range(0, 8):
for j in range(0, 8):
self.board[i][j] = level[pos]
pos += 1
def make_move(self, move):
self.lastState = self.currState[:]
self.currState.append(move['action'])
coords = move['coords']
direction = move['direction']
#print('Move: ', coords, ' - ', direction)
moves = int(self.board[coords[1]][coords[0]])
self.board[coords[1]][coords[0]] = '-2'
tile = coords
while moves > 0:
if direction == 0:
tile[0] += 1
if tile[0] > 7:
return
else:
if self.board[tile[1]][tile[0]] == '-1':
self.win = True
elif self.board[coords[1]][coords[0]] == '0':
self.board[coords[1]][coords[0]] = '-2'
moves -= 1
if direction == 1:
tile[1] -= 1
if tile[1] < 0:
return
else:
if self.board[tile[1]][tile[0]] == '-1':
self.win = True
elif self.board[coords[1]][coords[0]] == '0':
self.board[coords[1]][coords[0]] = '-2'
moves -= 1
if direction == 2:
tile[0] -= 1
if tile[0] < 0:
return
else:
if self.board[tile[1]][tile[0]] == '-1':
self.win = True
elif self.board[coords[1]][coords[0]] == '0':
self.board[coords[1]][coords[0]] = '-2'
moves -= 1
if direction == 3:
tile[1] += 1
if tile[1] > 7:
return
else:
if self.board[tile[1]][tile[0]] == '-1':
self.win = True
elif self.board[coords[1]][coords[0]] == '0':
self.board[coords[1]][coords[0]] = '-2'
moves -= 1
def print_board(self):
print('+---+---+---+---+---+---+---+---+')
for i in self.board:
line = '| '
for j in i:
if j == '-2':
line += 'X'
elif j == '-1':
line += 'F'
elif j == '0':
line += ' '
else:
line += str(j)
line += ' | '
print(line)
print('+---+---+---+---+---+---+---+---+')
def fail(self):
for i in self.board:
for j in i:
if int(j) > 0:
return False
return True
def getActions(self):
actions = []
for y,line in enumerate(self.board):
for x,cell in enumerate(line):
if int(cell) > 0:
for d in range(4):
actions.append((x,y,d)) # each action is a tuple(x,y,d) x,y are coords of cell and d indicates direction
return actions
def getAIMove(self):
possible_actions = []
for i,line in enumerate(self.board):
for j,cell in enumerate(line):
if int(cell) > 0:
for k in range(4):
possible_actions.append(self.actions.index((j,i,k)))
action_index = self.ai.take_action(tuple(self.currState), possible_actions)
action = list(self.actions[action_index])
move = {
'action': action_index,
'coords': [action[0],action[1]],
'direction': action[2]
}
return move
Game = Game()
game_try = 0
total_tries = 10000
Game.ai.epsilon = 0.8 # Exploring phase high epsilon
Game.file.write("Game " + str(game_try) + "\n")
while game_try < total_tries:
result = Game.update()
if result is not None:
game_try += 1
if game_try == total_tries:
break
Game.file.write("Game " + str(game_try) + "\n")
game_try = 0
Game.ai.epsilon = 0.2 # Exploiting phase low epsilon
Game.file.write("Game " + str(game_try) + "\n")
while game_try < total_tries:
result = Game.update()
if result is not None:
game_try += 1
if game_try == total_tries:
break
Game.file.write("Game " + str(game_try) + "\n")
Game.ai.saveTo("results/txt_result.txt", [(10000, 0.2, 0.9, 0.8), (10000, 0.2, 0.9, 0.2)])
Game.ai.csvTo("results/csv_result.csv", [(10000, 0.2, 0.9, 0.8), (10000, 0.2, 0.9, 0.2)])
Game.file.close()
| true
|
3a014882742ae31f75108bef8cc83be2e0d678b5
|
Python
|
awwad/depresolve
|
/depresolve/resolver/rbtpip_recheck_satisfied.py
|
UTF-8
| 2,224
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
"""
Convenience script, one-time.
"""
import depresolve
import depresolve.depdata as depdata
import depresolve.resolver.resolvability as ry
def recheck_all_unsatisfied():
depdata.ensure_data_loaded(CONFLICT_MODELS=[3], include_edeps=True)
solutions = depdata.load_json_db('data/resolved_via_rbtpip.json')
installed = [d for d in solutions if solutions[d][0]]
satisfied = [d for d in solutions if solutions[d][1]]
installed_but_unsatisfied = [d for d in installed if d not in satisfied]
# We re-run this last set, to see if they're in fact unsatisfied.
for distkey in installed_but_unsatisfied:
satisfied, errstring = recheck_satisfied(distkey, solutions[distkey][2])
if satisfied or errstring != solutions[distkey][3]:
print('Updating satisfied-ness!: ' + distkey)
solutions[distkey][1] = satisfied
solutions[distkey][3] = errstring
else:
print('Still unsatisfied: ' + distkey + '. Error: ' + errstring)
return solutions
def recheck_satisfied(distkey, solution):
satisfied = False
installed = distkey in [d.lower() for d in solution] # sanitize old data
errstring = ''
assert installed, 'Expecting solutions with distkey installed.'
# Check to see if the solution is fully satisfied.
# (Note that because virtual environments start off with pip,
# wheel, and setuptools, we can't tell when a solution includes them,
# don't store those as part of the solution, and so disregard them in this
# dependency check. ):
try:
(satisfied, errstring) = ry.are_fully_satisfied(solution,
depdata.elaborated_dependencies, depdata.versions_by_package,
disregard_setuptools=True, report_issue=True)
except depresolve.MissingDependencyInfoError as e:
errstring = 'Unable to determine if satisfied: missing dep info for ' + \
str(e.args[1])
satisfied = '' # evaluates False but is not False
print(' ERROR! ' + errstring + '. Resolution for ' + distkey +
' unknown. Full exception:' + str(e))
# Return the updated satisfied-ness of this distkey:
# - whether or not the install set is fully satisfied and conflict-less
# - error string if there was an error
return (satisfied, errstring)
| true
|
c78f6c134330b3c2ff214371e10c8cf5ce3adea3
|
Python
|
farrellsc/zAutoPilot
|
/auto_pilot/data/world_map.py
|
UTF-8
| 629
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
from auto_pilot.data.coordinates import Coordinates
import numpy as np
class WorldMap:
"""
This class describes a grid cell world map.
"""
def __init__(self, mat: np.matrix):
self.map = mat
self.heuristic = None
self.shape = self.map.shape
def from_file(self, path_to_map: str):
pass
def __getitem__(self, tup):
x, y = tup
return self.map[x, y]
def make_heuristic(self, heuristic_type: str, goal: Coordinates):
"""
make a heuristic map for A*, stored in self.heuristic
:return:
"""
raise NotImplementedError
| true
|
e52a669dbe2b0257a4bac624df31ab12efef6176
|
Python
|
shubhammishra277/Utilities
|
/Fileutility.py
|
UTF-8
| 1,857
| 3.046875
| 3
|
[] |
no_license
|
import glob
import subprocess
import os
class datamovement(object):
def __init__(self):
self.datapath=input("Enter the folder path for which you want to seggregate:")
def extensiongetter(self):
distinctextenstions=[i.split(".")[-1] for i in glob.glob("%s/*"%self.datapath) if ((len(i.split(".")[-1])>0) and ( "." in i))]
return list(set(distinctextenstions))
def filemover(self):
extensions=self.extensiongetter()
print("extensions",extensions)
for i in extensions:
status,output=subprocess.getstatusoutput("mkdir -p %s/%s"%(self.datapath,i))
if status!=0:
print("Not able to make directory for following extension %s with following error %s"%(i,output))
else:
print("i",i)
filenames=[k for k in glob.glob("%s/*.%s"%(self.datapath,i))]
print("filenames",filenames)
for y in filenames:
status,output=subprocess.getstatusoutput('mv "%s" %s/%s/'%(y,self.datapath,i))
if status!=0:
print("unable to move the following file %s with error %s"%(y,output))
else:
print("following file %s moved successfully"%y)
if __name__=="__main__":
t1=datamovement()
t1.filemover()
| true
|
a0c3c1c0abd33059dd71cb78ddcb496e99c1ad23
|
Python
|
rafaelparente/pa-nests
|
/nestParser.py
|
UTF-8
| 9,690
| 2.859375
| 3
|
[] |
no_license
|
#!python3
#-*- coding: utf-8 -*-
import os
import sys
import json
import webbrowser
import operator
from math import ceil
from geopy.distance import vincenty
def dump_poke_groups(poke_groups, pokeGroups):
data = []
for groupNum in range(pokeGroups):
groupId = '@'+str(groupNum)
groupNode = poke_groups[groupId]
data.append(groupNode[1])
with open('data/locs.json', 'w') as locs:
json.dump(data, locs, indent=2)
def add_nest(poke_groups, pokeGroups):
print('\n- New Nest -')
name = input('Name: ')
coords = None
while (coords == None):
coords = input('Lat,Lng (or Google Maps link): ')
if ('@' in coords):
coords = coords.split('@')[1]
coords = coords.split(',')
try:
lat, lng = float(coords[0]), float(coords[1])
except:
coords = None
rad = None
while (rad == None):
try:
rad = int(input('Radius: '))
except:
pass
aux = input('Common list (Pokemon1, Pokemon2, ...): ')
if (aux != ''):
common = aux.split(',')
for i in range(len(common)):
common[i] = common[i].strip()
else:
common = []
node = [[],{}]
groupId = '@'+str(pokeGroups)
poke_groups[groupId] = node
pokeGroups += 1
update_nest(poke_groups, pokeGroups, groupId, lat, lng, name, rad, common)
return poke_groups, pokeGroups
def update_nest(poke_groups, pokeGroups, groupId, lat=None, lng=None, name=None, rad=None, common=None):
groupNode = poke_groups[groupId]
groupInfo = groupNode[1]
if (lat != None):
groupInfo['lat'] = lat
if (lng != None):
groupInfo['lng'] = lng
if (name != None):
groupInfo['name'] = name
if (rad != None):
groupInfo['rad'] = rad
if (common != None):
groupInfo['common'] = common
dump_poke_groups(poke_groups, pokeGroups)
return poke_groups
def add_spawn(spawnInfo, poke_groups, pokeGroups):
joined = spawnInfo['joined']
closerId = None
closerDist = None
while (spawnInfo['joined'] == 0):
for groupNum in range(pokeGroups):
groupId = '@'+str(groupNum)
groupNode = poke_groups[groupId]
groupInfo = groupNode[1]
point1 = (spawnInfo['lat'],spawnInfo['lng'])
point2 = (groupInfo['lat'],groupInfo['lng'])
dist = vincenty(point1, point2)
if (dist.meters <= groupInfo['rad']):
spawnInfo['joined'] += 1
groupNode[0].append((spawnInfo,dist.meters))
else:
if (closerId == None) or (dist.meters < closerDist):
closerId = groupId
closerDist = dist.meters
if spawnInfo['joined'] == 0:
closerInfo = poke_groups[closerId][1]
pos = str(spawnInfo['lat']) + ',' + str(spawnInfo['lng'])
print ('\nSpawn at [' + pos + '] outside of any nest range.')
url = 'https://www.google.com/maps/?q=' + pos
webbrowser.open(url)
print('Closer nest identified: ' + closerInfo['name'] + ' (Radius: ' + str(closerInfo['rad']) + 'm)')
print('Increase range of \'' + closerInfo['name'] + '\' to ' + str(ceil(closerDist)) + 'm?')
choice = ''
while (choice == ''):
choice = input('y or n: ')
choice = choice.lower()
if (choice == 'y'):
poke_groups = update_nest(poke_groups, pokeGroups, closerId, rad=ceil(closerDist))
elif (choice == 'n'):
poke_groups, pokeGroups = add_nest(poke_groups, pokeGroups)
else:
choice = ''
return poke_groups, pokeGroups
def parse_groups(nest_locs, poke_spawns):
poke_groups = {}
pokeGroups = 0
for loc in nest_locs:
node = [[],loc]
poke_groups['@'+str(pokeGroups)] = node
pokeGroups += 1
for spawnInfo in poke_spawns:
spawnInfo.pop('time', None)
spawnInfo['joined'] = 0
poke_groups, pokeGroups = add_spawn(spawnInfo, poke_groups, pokeGroups)
return poke_groups, pokeGroups
def eval_nests(poke_groups, pokeGroups):
for groupNum in range(pokeGroups):
poke_count = {}
dup_check = {}
groupId = '@'+str(groupNum)
groupNode = poke_groups[groupId]
if (groupNode[1]['rad'] == 0):
continue
all_dup = True
for spawnNode in groupNode[0]:
spawnInfo = spawnNode[0]
pokeId = spawnInfo['pokemonId']
if pokeId in poke_count:
poke_count[pokeId] += 1
else:
poke_count[pokeId] = 1
if (spawnInfo['joined'] > 1):
if pokeId in dup_check:
dup_check[pokeId] += 1
else:
dup_check[pokeId] = 1
else:
all_dup = False
for dup_key in dup_check:
if (not all_dup and poke_count[dup_key] <= dup_check[dup_key]):
poke_count[dup_key] *= -1
yield groupNode, sorted(poke_count.items(), key=operator.itemgetter(1), reverse=True)
def print_nest(groupNode, nestInfo, poke_list, global_common):
nest_common = set(global_common + groupNode[1]['common'])
os.system('cls' if os.name == 'nt' else 'clear')
pos_len = len(nestInfo)
neg_len = 0
i = pos_len - 1
while (i >= 0 and nestInfo[i][1] < 0):
pos_len -= 1
neg_len += 1
i -= 1
print ('- ' + groupNode[1]['name'] + ' -')
total_len = len(groupNode[0]) - neg_len
i = 0
while (i < pos_len and (nestInfo[i][1] / total_len) > 0.1):
id = nestInfo[i][0] - 1
name = poke_list[id]
if name not in nest_common:
print('Possible nest of:', name)
i = pos_len
else:
i += 1
print('\nUncommon spawning rate:')
for i in range(0, pos_len):
id = nestInfo[i][0] - 1
name = poke_list[id]
if name not in global_common:
spawnCount = nestInfo[i][1]
print('%-12s' % (name), '\t-\t' + '%2.f' % ((spawnCount / total_len)*100) + '% (' + str(spawnCount) + ' out of ' + str(total_len) + ')')
print('\nCommon spawning rate:')
for i in range(0, pos_len):
id = nestInfo[i][0] - 1
name = poke_list[id]
if name in global_common:
spawnCount = nestInfo[i][1]
print('%-12s' % (name), '\t-\t' + '%2.f' % ((spawnCount / total_len)*100) + '% (' + str(spawnCount) + ' out of ' + str(total_len) + ')')
if (neg_len > 0):
print('\nLikely misplaced spawns rate:')
for i in range(-1, (-1*neg_len)-1, -1):
id = nestInfo[i][0] - 1
name = poke_list[id]
spawnCount = nestInfo[i][1] * -1
total_len = len(groupNode[0])
print('%-12s' % (name), '\t-\t' + '%2.f' % ((spawnCount / total_len)*100) + '% (' + str(spawnCount) + ' out of ' + str(total_len) + ')')
input('\nPress any key to continue . . . ')
def load_data():
i = 0
while (i < 4):
try:
file = 'data/locs.json'
with open(file, 'r') as locs:
nest_locs = json.load(locs)
i += 1
file = 'data/pokealert_spawn_points.json'
with open(file, 'r') as spawns:
poke_spawns = json.load(spawns)
i += 1
file = 'data/common_pokemon.json'
with open(file, 'r') as commons:
global_common = json.load(commons)
i += 1
file = 'data/pokemon_list.json'
with open(file, 'r') as pokemons:
poke_list = json.load(pokemons)
i += 1
except FileNotFoundError:
print('\nFile', '\"' + file + '\"', 'not found.')
exampleFile = file.rstrip('json')[:-1] + '-example' + '.json'
try:
with open(exampleFile, 'r') as example:
data = json.load(example)
print('Creating it with the contents of', '\"' + exampleFile + '\"' + '.')
with open(file, 'w+') as out:
json.dump(data, out, indent=2)
except:
raise
i = 0
except:
if (i == 0):
print('Incorrect data on \'locs.json\' file.',)
elif (i == 1):
print('Incorrect data on \'pokealert_spawn_points.json\' file.',)
elif (i == 2):
print('Incorrect data on \'common_pokemon.json\' file.',)
else:
print('Incorrect data on \'pokemon_list.json\' file.')
raise
return nest_locs, poke_spawns, poke_list, global_common
if __name__ == '__main__':
os.system('cls' if os.name == 'nt' else 'clear')
try:
nest_locs, poke_spawns, poke_list, global_common = load_data()
except:
print('bug')
sys.exit()
if (len(nest_locs) == 0):
print('No nest location found on \'locs.json\'')
sys.exit()
if (len(poke_spawns) == 0):
print('No spawn point found on \'pokealert_spawn_points.json\'')
sys.exit()
poke_groups, pokeGroups = parse_groups(nest_locs, poke_spawns)
input('\nNests parsed! Press any key to start . . . ')
for groupNode, nestInfo in eval_nests(poke_groups, pokeGroups):
print_nest(groupNode, nestInfo, poke_list, global_common)
os.system('cls' if os.name == 'nt' else 'clear')
input('Press any key to continue . . . ')
| true
|
7afcb16d117f96f15ad717373a0943417841d36a
|
Python
|
surajsoni2/python-basic
|
/prime_number.py
|
UTF-8
| 271
| 4.1875
| 4
|
[] |
no_license
|
# Python program to check whether a number is Prime or not
num = int(input(("enter a number to check")))
for x in range(2,num):
if num%x == 0:
print(num,"is not prime")
break
elif x==num-1:
print(num,"is prime")
break
| true
|
2331251b0491a45ee90098c1caa0554dc86d3fcd
|
Python
|
AdracarysW/Kombat-Simulator
|
/player.py
|
UTF-8
| 2,925
| 3.1875
| 3
|
[] |
no_license
|
from textTools import *
from description import *
class Player():
def __init__(self, playerClass, name):
self.ourClass = playerClass
self.name = name
self.mana = 3
self.hp = 0
self.maxHp = 0
self.atk = 0
self.spd = 0
self.defn = 0
self.mana = 0
self.dataWidth = 40
def characters(self):
print(description)
def canAfford(self, val):
if self.gold >= val:
return True
return False
def enoughMana(self, val):
if self.mana >= val:
return True
centerprint("YOU DON'T HAVE ENOUGH MANA!")
return False
def isAlive(self):
if self.hp > 0:
return True
else:
return False
def takeDamage(self, amount):
self.hp -= amount
def playerPerks(self):
if self.ourClass == 'Arcstrider':
self.hp = 300
self.maxHp = 300
self.atk = 70
self.spd = 50
self.defn = 40
self.mana = 5
elif self.ourClass == 'Dawnblade':
self.hp = 450
self.maxHp = 450
self.atk = 100
self.spd = 20
self.defn = 20
self.mana = 5
elif self.ourClass == 'Gunslinger':
self.hp = 330
self.maxHp = 330
self.atk = 60
self.spd = 50
self.defn = 40
self.mana = 5
elif self.ourClass == 'Sentinel':
self.hp = 500
self.maxHp = 500
self.atk = 50
self.spd = 10
self.defn = 60
self.mana = 5
elif self.ourClass == 'Voidwalker':
self.hp = 410
self.maxHp = 410
self.atk = 80
self.spd = 30
self.defn = 50
self.mana = 5
elif self.ourClass == 'Warlock':
self.hp = 350
self.maxHp = 350
self.atk = 90
self.spd = 40
self.defn = 40
self.mana = 5
def printInfo(self):
marqueeprint('[PLAYER DATA]')
centerprint(lr_justify('Class:', str(self.ourClass), self.dataWidth))
centerprint(lr_justify('Name:', str(self.name), self.dataWidth))
centerprint(lr_justify('HP:', str(self.hp) + '/' + str(self.maxHp), self.dataWidth))
centerprint(lr_justify('Gold:', str(self.gold), self.dataWidth))
centerprint(lr_justify('Atk:', str(self.atk), self.dataWidth))
centerprint(lr_justify('Defense:', str(self.defn), self.dataWidth))
print('')
def datadict(self):
return {
'Class': str(self.ourClass),
'Name': str(self.name),
'HP': str(str(self.hp) + '/' + str(self.maxHp)),
'Gold': str(self.gold),
'Atk': str(self.atk),
'Def': str(self.defn)
}
| true
|
fc4763cf963de9cbbdfb6db2230965bd2fcbc82d
|
Python
|
JoshuaRabiu/String-Marionette
|
/app.py
|
UTF-8
| 1,979
| 3.71875
| 4
|
[] |
no_license
|
from tkinter import *
root = Tk()
#Creates Title
title = Label(root, text="String Marionette", bg="#336E7B", fg="white")
title.pack(side=TOP)
#Creates Input Field & Sets Cursor Focus inside the field
usr_input = Entry(root)
usr_input.pack()
usr_input.focus_set()
#Creates Output Frame
outputFrame = Frame(root)
outputFrame.pack()
def print_reversed(event):
string = usr_input.get()
global reversedText
reversedText = Label(outputFrame, text= string+ " reversed is: " + string[::-1])
reversedText.pack()
def run_piglatin(event):
string = usr_input.get()
piglatinText = Label(outputFrame, text= string + " converted to Pig Latin is: " +string[1:] + string[0] +"ay")
piglatinText.pack()
def palindrome_check(event):
string = usr_input.get()
if string[::-1] == string:
isPalindrome = Label(outputFrame, text= string + " is indeed a palindrome!")
isPalindrome.pack()
else:
notPalindrome = Label(outputFrame, text = string + " is not a palindrome...")
notPalindrome.pack()
def count_vowels(event):
count = 0
string = usr_input.get()
for char in string:
if char.lower() in 'aeiou':
count += 1
vowelCount = Label(outputFrame, text= string + " contains %d vowel(s)" % count)
vowelCount.pack()
#Function for clearing output text
def text_clear(event):
for child in outputFrame.winfo_children():
child.destroy()
#Creates Button Frame
buttonFrame = Frame(root)
buttonFrame.pack(side=BOTTOM)
#Button Creation and binding
b = Button(buttonFrame, text="Reverse!")
b.bind('<Button-1>', print_reversed)
b.pack(side = LEFT)
b2 = Button(buttonFrame, text="Convert to Pig Latin")
b2.bind('<Button-1>', run_piglatin)
b2.pack(side=LEFT)
b3 = Button(buttonFrame, text = "Check if Palindrome")
b3.bind('<Button-1>', palindrome_check)
b3.pack(side=LEFT)
b4 = Button(buttonFrame, text = "Count Vowels")
b4.bind('<Button-1>', count_vowels)
b4.pack(side=LEFT)
b5 = Button(buttonFrame, text="Clear")
b5.bind('<Button-1>', text_clear)
b5.pack(side=LEFT)
root.mainloop()
| true
|
d70cce19a18a1b8e5626d5f73aecea18a5a4fa13
|
Python
|
ohhhhmy/OhGongCo
|
/0314/베스트앨범.py
|
UTF-8
| 1,214
| 3.375
| 3
|
[] |
no_license
|
def solution(g, p):
playlist = {}
genre_length = len(g)
list_length = len(p)
only_number = {}
play = []
for i in range(list_length):
if p[i] not in only_number:
only_number[p[i]] = []
only_number[p[i]].append(i)
print(only_number)
for i in range(genre_length):
if g[i] not in playlist:
playlist[g[i]] = []
playlist[g[i]].append(p[i])
playlist[g[i]].sort(reverse=True)
#재생 수 합 기준으로 내림차순 정렬
playlist = sorted(playlist.values(), key = lambda x: sum(x), reverse=True)
play = [i[:2] for i in playlist]
answer = []
for i in play:
if len(i) == 1:
answer.append(only_number[i[0]][0])
else:
for j in range(2):
if len(only_number[i[j]]) == 1:
answer.append(only_number[i[j]][0])
else:
answer.append(only_number[i[j]][j])
return answer
#test case
print(solution(["classic", "pop", "classic", "classic", "pop"], [500, 600, 150, 800, 2500]))
print(solution(["classic", "pop", "classic", "pop", "classic", "classic"], [400,600,150,600,500,500]))
| true
|
5d4895a276a0208914d68dc43950c69719c312d2
|
Python
|
davjs/ai-programmer
|
/tests/generatortests2.py
|
UTF-8
| 2,520
| 3.109375
| 3
|
[] |
no_license
|
import main.expressions
from main.function import Function
from main.programgenerator2 import ProgramIntention, Parameter
from main.expressions import get_computed_boolean_expressions_using_all_variables
__author__ = 'David'
import unittest
from main import programgenerator2
class GeneratorTests(unittest.TestCase):
# SIMPLE X EXPRESSIONS
"""
def testExpressionX(self):
generator = programgenerator2.ProgramGenerator("x", length=1)
self.failUnless("x" in generator.get_expressions())
def testExpressionXPlus1(self):
generator = programgenerator2.ProgramGenerator("x", length=1)
self.failUnless("x + 1" in generator.get_expressions())
"""
# SIMPLE RETURN STATEMENTS
def test_get_intention_reduce_list(self):
f = Function("foo", [Parameter("list_to_sum", list)], int)
generator = programgenerator2.ProgramGenerator2(f)
self.failUnless(generator.intention == ProgramIntention.reduce_list)
def test_get_intention_combine_booleans(self):
f = Function("foo", [Parameter("x", bool), Parameter("y", bool)], bool)
generator = programgenerator2.ProgramGenerator2(f)
self.failUnless(generator.intention == ProgramIntention.combine_booleans)
def testBooleanCombineNotxOrY(self):
expressions = get_computed_boolean_expressions_using_all_variables(["x", "y"])
self.failUnless("(not x) or y" in expressions)
def testBooleanCombineXEqY(self):
expressions = get_computed_boolean_expressions_using_all_variables(["x", "y"])
self.failUnless("x == y" in expressions)
def testStatementReturn1(self):
f = Function("foo", [Parameter("list_to_sum", list)], int)
generator = programgenerator2.ProgramGenerator2(f)
programs = list(generator.get_codes(4))
self.failUnless(any("y = 0" in codes for codes in programs))
self.failUnless(any("for i in list_to_sum:" in codes for codes in programs))
self.failUnless(any("\ty = y + i" in codes for codes in programs))
self.failUnless(any("return y" in codes for codes in programs))
self.failUnless(["y = 0",
"for i in list_to_sum:",
"\ty = y + i",
"return y"] in programs)
def test_get_computed_expressions_using_both_variables_y_plus_i(self):
generator = programgenerator2
self.failUnless("y + i" in main.expressions.get_computed_expressions_using_both_variables("y", "i"))
| true
|
7c4fef5fb91d1842806e04ccd51177309456bdc2
|
Python
|
victorarodri/probprog
|
/scripts/model.py
|
UTF-8
| 1,991
| 2.84375
| 3
|
[] |
no_license
|
# Imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from edward.models import (
Dirichlet, Categorical, ParamMixture)
def mc_lda_model(D, S, V, K, w_obs):
"""Defines multi-channel LDA model in Edward.
Args:
D - Number of individuals.
S - Number of data sources.
V - List of vocabulary size for each data source.
K - Number of phenotypes to model.
w_obs - List of lists of 1D NumPy arrays containing
tokens for each individual and data source.
Returns:
alpha - Dirichlet hyperparameters for prior on theta.
beta - Dirichlet hyperparameters for prior on phi.
theta - List of categorical parameters for individual
phenotype distributions.
phi - List of categorical parameters for phenotype token distributions.
z - List of phenotype assignments for each observed token.
w - List of observed tokens modeled as parameter mixtures.
"""
alpha = tf.zeros(K) + 0.01
beta, phi = [None] * S, [None] * S
for s in range(S):
beta[s] = tf.zeros(V[s]) + 0.01
phi[s] = Dirichlet(concentration=beta[s],
sample_shape=K)
theta = [None] * D
w = [[None] * S for d in range(D)]
z = [[None] * S for d in range(D)]
for d in range(D):
theta[d] = Dirichlet(concentration=alpha)
for s in range(S):
w[d][s] = ParamMixture(mixing_weights=theta[d],
component_params={'probs': phi[s]},
component_dist=Categorical,
sample_shape=len(w_obs[d][s]),
validate_args=True)
z[d][s] = w[d][s].cat
return alpha, beta, theta, phi, z, w
def main():
"""Empty main function."""
return True
if __name__ == '__main__':
main()
| true
|
ecf31f842d63b2a6acc474e1133e99daffc1cca2
|
Python
|
developyoun/AlgorithmSolve
|
/solved/15059.py
|
UTF-8
| 184
| 3.265625
| 3
|
[] |
no_license
|
a1, b1, c1 = map(int, input().split())
a2, b2, c2 = map(int, input().split())
res = 0
if a1 < a2:
res += a2-a1
if b1 < b2:
res += b2-b1
if c1 < c2:
res += c2-c1
print(res)
| true
|
c8a0a78a693b8c9e692b12d000e263f349633a8e
|
Python
|
ksavietta/PythonExercises
|
/6.00.1x Files/probset2-1.py
|
UTF-8
| 853
| 3.015625
| 3
|
[] |
no_license
|
balance = 4842
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
monthlyInterestRate = 0
minimumMonthlyPayment = 0
monthlyUnpaidPayment = 0
updatedBalanceEachMonth = balance
total = 0
for month in range(1,13):
balance = updatedBalanceEachMonth
monthlyInterestRate = annualInterestRate/12.0
minimumMonthlyPayment = monthlyPaymentRate*balance
monthlyUnpaidPayment = balance - minimumMonthlyPayment
updatedBalanceEachMonth = monthlyUnpaidPayment + (monthlyInterestRate*monthlyUnpaidPayment)
total += minimumMonthlyPayment
print "Month: " + str(month)
print "Minimum monthly payment: " + str(round(monthlyPaymentRate*balance, 2))
print "Remaining balance: " + str(round(updatedBalanceEachMonth, 2))
print "Total paid: " + str(round(total, 2))
print "Remaining balance: " + str(round(updatedBalanceEachMonth, 2))
| true
|
8f943c05476abceaf8e874360daf646cd3ffb700
|
Python
|
Antonio24ch/Bedu-sesion141120
|
/e02_imc.py
|
UTF-8
| 748
| 3.640625
| 4
|
[] |
no_license
|
nombre = input('cual es tu nombre?\n')
peso = input(f'{nombre}, cual es tu peso?')
peso = float(peso)
altura = input(f'{nombre}, cual es tu altura?')
altura = float(altura)
imc = peso / (altura*altura)
if imc >= 40:
grado_obesidad = 'obesidad muy severa.'
elif imc >= 35:
grado_obesidad = 'obesidad severa.'
elif imc >= 30:
grado_obesidad ='obesidad moderada.'
elif imc >= 25:
grado_obesidad = 'sobrepeso.'
elif imc >= 10.5:
grado_obesidad = 'peso saludable.'
elif imc >= 16:
grado_obesidad = 'delgadez.'
elif imc >= 15:
grado_obesidad = 'delgadez severa.'
else:
grado_obesidad = 'delgadez muy severa.'
resultado = f'{nombre} tu IMC es {imc:.2f}, quiere decir que tu tienes {grado_obesidad}'
print(resultado)
| true
|
0f3cba64cd8116dc85cd747b0c874528604499fb
|
Python
|
ABenxj/leetcode
|
/31 nextPermutation.py
|
UTF-8
| 936
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env pyhton
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 , Inc. All Rights Reserved
#
"""
Authors: jufei
Date: 2021/4/7 5:34 PM
"""
from typing import List
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
第一步,依次从后向前,找出第一个变小的数的位置和值; 第二步,找出比该值大的最后一个数,对换位置,排序
:param nums:
:return:
"""
index = len(nums) - 2
while index >= 0 and nums[index] >= nums[index+1]:
index -= 1
if index < 0:
nums[:] = nums[::-1]
return None
interval = 1
while index+interval < len(nums) and nums[index] < nums[index+interval]:
interval += 1
nums[index], nums[index+interval-1] = nums[index+interval-1], nums[index]
nums[index+1:] = nums[index+1:][::-1]
return None
| true
|
d0ba017f3dd46ca4606197aa2fcc230409786079
|
Python
|
JKrysztofiak/single-layer-neural-network
|
/Perceptron.py
|
UTF-8
| 2,915
| 2.921875
| 3
|
[] |
no_license
|
from random import randint
import math
def vector_length(v: list) -> float:
value = 0
for x in v:
value+=pow(x,2)
return math.sqrt(value)
def multiply_vector(v: list, m: float) -> list:
res = []
for value in v:
res.append(float(value)*m)
return res
def add_vectors(v1: list,v2: list) -> list:
res = []
for i in range(0,len(v1)):
res.append(float(v1[i])+float(v2[i]))
return res
def calculate_net(w: list, p: list) -> float:
res = 0
for i in range(0,len(w)):
res += float(p[i])*float(w[i])
return res
def delta_rule(w: list, d: int, y: int, alfa: float, x: list, t: float) -> list:
wp = w.copy()
xp = x.copy()
wp.append(t)
xp.append(-1)
w_prim = add_vectors(wp, (multiply_vector(xp,((d-y)*alfa))))
return w_prim
class Perceptron:
# def __init__(self, k: int, group: str):
# self.weights = [randint(-10,10) for x in range(0,k)]
# self.t = randint(-10,10)
# self.group = group
def __init__(self, w: list, t: float, group: str):
self.weights = w.copy()
self.t = t
self.group = group
def training(self, trainSet: list, trueRes: str, alfa: float):
net = calculate_net(self.weights, trainSet)
# net = calculate_net(multiply_vector(self.weights, (1.0/vector_length(self.weights))),multiply_vector(trainSet, (1.0/vector_length(trainSet))))
res = 0
if net > self.t:
res = 1
i=0
while (res == 0 and trueRes == self.group) or (res == 1 and trueRes != self.group):
# uczenie
# print(f"UCZENIE {i}")
# i+=1
d = 1
y = 0
if res == 1:
d = 0
y = 1
wprim = delta_rule(self.weights, d, y, alfa, trainSet, self.t)
# wprim = delta_rule(multiply_vector(self.weights, (1.0/vector_length(self.weights))), d, y, alfa, multiply_vector(trainSet, (1.0/vector_length(trainSet))), self.t)
self.t = float(wprim[-1])
self.weights = wprim[:-1]
net = calculate_net(self.weights, trainSet)
# net = calculate_net(multiply_vector(self.weights, (1.0/vector_length(self.weights))),multiply_vector(trainSet, (1.0/vector_length(trainSet))))
res = 0
if net > self.t:
res = 1
print(f"[{self.group}] t={self.t} ") #| weights {self.weights}
def testing(self, testSet: list) -> int:
net = calculate_net(multiply_vector(self.weights, (1.0/vector_length(self.weights))),multiply_vector(testSet, (1.0/vector_length(testSet))))
#net = calculate_net(self.weights,testSet)
res = 0
if net > self.t:
res = 1
return net
# return res
| true
|
882fee7f1fb0d22af6ea3fb9bd8838b9b0fd2a25
|
Python
|
apalala/exercism
|
/python/robot-name/robot_name.py
|
UTF-8
| 700
| 3.109375
| 3
|
[] |
no_license
|
import string
from datetime import datetime
class Robot():
def __init__(self):
self.name = None
self.reset()
def reset(self):
now = datetime.now()
day_of_year = now.timetuple().tm_yday
day_half = now.hour // 12
self.name = (
i2text(day_of_year * day_half, 2)[:2] +
'%03d' % (now.microsecond // 10 % 1000)
)
def i2text(value, width=1, alphabet=string.ascii_uppercase):
n = len(alphabet)
result = []
while value != 0:
value, c = divmod(value, n)
result.append(alphabet[c])
np = max(0, width - len(result))
padding = [alphabet[0]] * np
return ''.join(padding + result)
| true
|
057e318db4a9bbd1e9007ca03f46418c01dce7ae
|
Python
|
upsidedownio/coding-club-hr
|
/season001/20180829-week2/gted/basic_bst.py
|
UTF-8
| 2,093
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/python3
class Node(object):
def __init__(self, data):
self.left = None
self.right = None
self.data = data
self.level = 0
def __str__(self):
return str(self.data)
def _get_next(sub_root, data):
if data < sub_root.data:
return sub_root, sub_root.left
else:
return sub_root, sub_root.right
class BasicBST(object):
def __init__(self, root):
self._root = root
def add_node(self, data):
node = Node(data)
self._add_node(node)
def add_node_and_print(self, data):
self.add_node(data)
self.print_tree()
def _add_node(self, node):
_sub_root, _next = _get_next(self._root, node.data)
while _next is not None:
_sub_root, _next = _get_next(_next, node.data)
node.level = _sub_root.level + 1
if node.data < _sub_root.data:
_sub_root.left = node
else:
_sub_root.right = node
def print_tree(self):
s = ""
node_queue = []
node_queue.append(self._root)
s += "[" + str(self._root.data) + "]\n"
prev_node = self._root
level = self._root.level
while node_queue:
cur_node = node_queue.pop(0)
if cur_node.level > level:
level = cur_node.level
s += "\n"
if cur_node.left:
node_queue.append(cur_node.left)
s += "[" + str(cur_node.left.data)
else:
s += "[-"
s += "|"
if cur_node.right:
node_queue.append(cur_node.right)
s += str(cur_node.right.data) + "]"
else:
s += "-]"
print(s)
if __name__ == '__main__':
root = Node(6)
bst = BasicBST(root)
bst.print_tree()
bst.add_node_and_print(2)
bst.add_node_and_print(1)
bst.add_node_and_print(10)
bst.add_node_and_print(8)
bst.add_node_and_print(9)
bst.add_node_and_print(7)
bst.add_node_and_print(0)
bst.add_node_and_print(5)
| true
|
90eaf15eb0922042f91d484104711b72565e22c6
|
Python
|
Alexfordrop/Basics
|
/анотация_типов.py
|
UTF-8
| 254
| 3.515625
| 4
|
[] |
no_license
|
from typing import List, Dict
numbers: Dict[str, int] = {'one': 1, 'two': 2}
values: List[int] = numbers.values()
def _sum(a: int, b: int) -> int:
return a + b
_sum(*values)
_sum('Rock ', 'Hard')
print(_sum(*values))
print(_sum('Rock ', 'Hard'))
| true
|
e90a12aa8649f83366d8ae0b0f9638eb21538fed
|
Python
|
fbserkov/praxis
|
/robert_martin/payroll_system/change_classification_transaction.py
|
UTF-8
| 2,148
| 2.765625
| 3
|
[] |
no_license
|
from change_employee_transaction import ChangeEmployeeTransaction
from employee import EmpId, Employee
from payment_classification import (
CommissionedClassification, HourlyClassification,
PaymentClassification, SalariedClassification)
from payment_schedule import (
BiweeklySchedule, MonthlySchedule, PaymentSchedule, WeeklySchedule)
class ChangeClassificationTransaction(ChangeEmployeeTransaction):
def __init__(self, emp_id: EmpId):
ChangeEmployeeTransaction.__init__(self, emp_id)
def change(self, e: Employee):
e.set_classification(self.get_classification())
e.set_schedule(self.get_schedule())
def get_classification(self) -> PaymentClassification:
return PaymentClassification()
def get_schedule(self) -> PaymentSchedule:
return PaymentSchedule()
class ChangeSalariedTransaction(ChangeClassificationTransaction):
def __init__(self, emp_id: EmpId, salary):
ChangeClassificationTransaction.__init__(self, emp_id)
self._salary = salary
def get_classification(self) -> SalariedClassification:
return SalariedClassification(self._salary)
def get_schedule(self) -> MonthlySchedule:
return MonthlySchedule()
class ChangeHourlyTransaction(ChangeClassificationTransaction):
def __init__(self, emp_id: EmpId, hourly_rate):
ChangeClassificationTransaction.__init__(self, emp_id)
self._hourly_rate = hourly_rate
def get_classification(self) -> HourlyClassification:
return HourlyClassification(self._hourly_rate)
def get_schedule(self) -> WeeklySchedule:
return WeeklySchedule()
class ChangeCommissionedTransaction(ChangeClassificationTransaction):
def __init__(self, emp_id: EmpId, salary, commission_rate):
ChangeClassificationTransaction.__init__(self, emp_id)
self._salary = salary
self._commission_rate = commission_rate
def get_classification(self) -> CommissionedClassification:
return CommissionedClassification(self._salary, self._commission_rate)
def get_schedule(self) -> BiweeklySchedule:
return BiweeklySchedule()
| true
|
c9859d933b31cd3038f60ed4999278e2b02d656d
|
Python
|
ZeroNetto/Dictionaries
|
/Modules/ordered_array.py
|
UTF-8
| 3,161
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
import sys
import utils
class Ordered_array:
def __init__(self, elements_type):
self.ord_array = []
(ok, msg) = utils.check_type(elements_type)
if not ok:
raise ValueError(msg)
self.elements_type = msg
def append(self, element):
element = utils.try_lead_to(element, self.elements_type)
if element is None:
raise TypeError('Unacceptable type of element: {0}\n\
\rShould be: {1}'.format(type(element),
self.elements_type))
self.ord_array.append(element)
self.ord_array.sort()
def element(self, index):
index = utils.parse_int(index)
if index is None:
raise TypeError('Index should be integer!')
if len(self.ord_array) <= index:
raise ValueError('Index should be in range of length of array!')
return self.ord_array[index]
def index_of(self, element):
element = utils.try_lead_to(element, self.elements_type)
if element is None:
raise TypeError('Unacceptable type of element: {0}\n\
\rShould be: {1}'.format(type(element),
self.elements_type))
left_border = 0
right_border = len(self.ord_array) - 1
index = int(right_border / 2)
while self.ord_array[index] != element and left_border < right_border:
if element > self.ord_array[index]:
left_border = index + 1
else:
right_border = index - 1
index = int((left_border + right_border) / 2)
if left_border > right_border or element != self.ord_array[index]:
return -1
return index
def contains(self, element):
if self.index_of(element) == -1:
return False
return True
def delete(self, element):
element = utils.try_lead_to(element, self.elements_type)
if element is None:
raise TypeError('Unacceptable type of element: {0}\n\
\rShould be: {1}'.format(type(element),
self.elements_type))
index = self.index_of(element)
if index < 0:
return
self.ord_array.pop(index)
def insert(self, index, element):
index = utils.parse_int(index)
element = utils.try_lead_to(element, self.elements_type)
if element is None:
raise TypeError('Unacceptable type of element: {0}\n\
\rShould be: {1}'.format(type(element),
self.elements_type))
if index is None:
raise TypeError('Index should be integer!')
if len(self.ord_array) <= index:
raise ValueError('Index should be in range of length of array!')
self.ord_array[index] = element
self.ord_array.sort()
def count(self):
return len(self.ord_array)
def clear(self):
self.ord_array = []
| true
|
10da4ca624ae902f917fd4071e0a76edce4a5ea8
|
Python
|
evbolokhov/PythonPY100
|
/Занятие1/Лабораторные задания/task3/main.py
|
UTF-8
| 139
| 3.421875
| 3
|
[] |
no_license
|
if __name__ == "__main__":
# Write your solution here
a = float(input("a :"))
n = int(input("n: "))
print("a^n: ", a ** n)
| true
|
e4127ca88fd4629bf59bc80f6e806e2aa8e9bbbe
|
Python
|
xenron/sandbox-da-python
|
/book/packt/Python.Data.Visualization.Cookbook/3367OS_06_Code/ch06/ch06_rec01_02_pil_thumbnails.py
|
UTF-8
| 2,234
| 3.328125
| 3
|
[] |
no_license
|
import os
import sys
from math import floor
from PIL import Image
class Thumbnailer(object):
def __init__(self, src_folder=None):
self.src_folder = src_folder
self.ratio = .3
self.thumbnail_folder = "thumbnails"
def _create_thumbnails_folder(self):
thumb_path = os.path.join(self.src_folder, self.thumbnail_folder)
if not os.path.isdir(thumb_path):
os.makedirs(thumb_path)
def _build_thumb_path(self, image_path):
root = os.path.dirname(image_path)
name, ext = os.path.splitext(os.path.basename(image_path))
suffix = ".thumbnail"
return os.path.join(root, self.thumbnail_folder, name + suffix + ext)
def _load_files(self):
files = set()
for each in os.listdir(self.src_folder):
each = os.path.abspath(self.src_folder + '/' + each)
if os.path.isfile(each):
files.add(each)
return files
def _thumb_size(self, size):
return (int(size[0] * self.ratio), int(size[1] * self.ratio))
def create_thumbnails(self):
self._create_thumbnails_folder()
files = self._load_files()
for each in files:
print "Processing: " + each
try:
img = Image.open(each)
thumb_size = self._thumb_size(img.size)
resized = img.resize(thumb_size, Image.ANTIALIAS)
savepath = self._build_thumb_path(each)
resized.save(savepath)
except IOError as ex:
print "Error: " + str(ex)
if __name__ == "__main__":
# Usage:
# ch06_rec01_02_pil_thumbnails.py my_images
assert len(sys.argv) == 2
src_folder = sys.argv[1]
if not os.path.isdir(src_folder):
print "Error: Path '{0}' does not exits.".format((src_folder))
sys.exit(-1)
thumbs = Thumbnailer(src_folder)
# optionally set the name of thumbnail folder inside *src_folder*.
thumbs.thumbnail_folder = "THUMBS"
# define ratio to resize image to
# 0.1 means the original image will be resized to 10% of its size
thumbs.ratio = 0.1
# will create set of images in temporary folder
thumbs.create_thumbnails()
| true
|
26353da2fa7f18d20e01babe4404a96a44d0c6a3
|
Python
|
Hackerfarm/python-workshops
|
/LivePygame/game02.py
|
UTF-8
| 5,629
| 2.96875
| 3
|
[
"Unlicense"
] |
permissive
|
import pygame, types, threading, traceback
class Game:
def __init__(self):
self.size = (500,500)
self.running = True
self.scene = list()
self.event_handlers = dict()
self.event_handlers[(('type',pygame.QUIT),)] = self.on_quit
self.event_handlers[(('type',pygame.KEYDOWN), ('key',pygame.K_q))] = self.on_quit
self.event_handlers[(('type',pygame.KEYDOWN), ('key',pygame.K_ESCAPE))] = self.on_quit
self.flipdelay=16
self.tickcounter=0
def render(self):
self.disp.fill((0,0,0))
for obj in self.scene:
try:
obj.render(self.disp)
except Exception:
traceback.print_exc()
self.scene.remove(obj)
print("Exception during render: Object "+str(obj)+" removed from the scene")
pygame.display.flip()
def update(self):
dt=pygame.time.get_ticks()- self.tickcounter
for obj in self.scene:
try:
obj.update(dt)
except Exception:
traceback.print_exc()
self.scene.remove(obj)
print("Exception during update: Object "+str(obj)+" removed from the scene")
self.tickcounter=pygame.time.get_ticks()
pygame.time.delay(self.flipdelay)
def on_quit(self, event):
self.running = False
def process_events(self):
for event in pygame.event.get():
dire = dir(event)
for eh in self.event_handlers.keys():
callit=True
for (attrname,attrvalue) in eh:
if (not attrname in dire) or (event.__getattribute__(attrname)!=attrvalue):
callit=False
break
if callit:
self.event_handlers[eh](event)
def refresh(self):
self.scene = list()
self.scene.append(Map())
self.player = Character(100,100)
self.scene.append(self.player)
self.event_handlers[(('type',pygame.KEYDOWN),)] = self.player.handle_keydown
def mainloop(self):
pygame.init()
self.disp=pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)
self.tickcounter=pygame.time.get_ticks()
while( self.running ):
try:
self.render()
self.process_events()
self.update()
except Exception:
traceback.print_exc()
pygame.time.delay(10000)
pygame.quit()
class Map:
def __init__(self):
# We will need to load some images and make a better Tile class later
# but now I just want to test the speed of the blit operation
self.tile = pygame.image.load('art/castle/grass.png')
self.avg_time=0.0
def update(self, dt):
return
def render(self, disp):
ta = pygame.time.get_ticks()
for y in range(0,500,32):
for x in range(0,500,32):
disp.blit(self.tile, (x,y))
self.avg_time = 0.9*self.avg_time + 0.1*float(pygame.time.get_ticks()-ta)
class Character:
def __init__(self, x, y):
self.img=pygame.image.load("art/LPC/walk.png")
# Each animation is stored in the anim dict as a list with the following
# format: (tick_per_frame, frame1, frame2, ...)
self.dirs={"up":(0,-0.5),"down":(0,0.5), "right":(1,0), "left":(-1,0)}
self.anim = dict()
self.cycle_index = 0
self.cycle_tick = 0
seq = list()
seq.append(80) # ticks per frame
for i in range(8):
seq.append(self.img.subsurface((64+i*64,0,64,64)))
self.anim["up"] = seq
seq = list()
seq.append(80) # ticks per frame
for i in range(8):
seq.append(self.img.subsurface((64+i*64,128,64,64)))
self.anim["down"] = seq
seq = list()
seq.append(80) # ticks per frame
for i in range(8):
seq.append(self.img.subsurface((64+i*64,64,64,64)))
self.anim["left"] = seq
seq = list()
seq.append(80) # ticks per frame
for i in range(8):
seq.append(self.img.subsurface((64+i*64,192,64,64)))
self.anim["right"] = seq
self.current_anim = "up"
self.current_frames = self.anim[self.current_anim]
self.pos = [x,y]
def update(self, dt):
ca = self.anim[self.current_anim]
dxdy = self.dirs[self.current_anim]
self.cycle_tick = (self.cycle_tick + dt) % ((len(ca)-1)*ca[0])
self.cycle_index = int(self.cycle_tick/ca[0])
self.pos[0]+=dxdy[0]
self.pos[1]+=dxdy[1]
def handle_keydown(self, evt):
if evt.key == pygame.K_LEFT:
self.current_anim="left"
elif evt.key == pygame.K_RIGHT:
self.current_anim="right"
if evt.key == pygame.K_UP:
self.current_anim="up"
if evt.key == pygame.K_DOWN:
self.current_anim="down"
def render(self, display):
ca = self.anim[self.current_anim]
display.blit(ca[1+self.cycle_index], (self.pos))
if __name__ == "__main__":
game = Game()
game.scene.append(Map())
game.player = Character(100,100)
game.scene.append(game.player)
game.event_handlers[(('type',pygame.KEYDOWN),)] = game.player.handle_keydown
th = threading.Thread(target = game.mainloop)
th.start()
| true
|
4c6d685cccc6536c531fcf12f6a518d822e2cac4
|
Python
|
amiraliakbari/sharif-mabani-python
|
/by-subject/mathematics/integration.py
|
UTF-8
| 502
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
def drange(start, stop, step):
'''
produces numbers with given step in the given range
'''
r = start
while r < stop:
yield r
r += step
def simpleIntegration(lower,upper,a,b,c, delta):
'''
integration of second degree polynomial:
a * x**2 + b*x + c
using simple rectangle method
'''
res = 0
for x in drange(lower,upper,delta):
res = res + (a * x**2 + b * x + c) * delta
return res
print simpleIntegration(0,1,3,2,1,0.0001)
| true
|
d8dbc2e47e3a6c39eeedc207d2763c5d17f353a4
|
Python
|
parsd/fastapi-login
|
/test/mocks.py
|
UTF-8
| 830
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
"""Mock objects for testing."""
from typing import Optional
from pydantic import BaseModel
class User(BaseModel): # noqa: D101
name: str
class SessionStore: # noqa: D101
def __init__(self): # noqa: D107
self._ids = {}
def __contains__(self, item): # noqa: D105
return item in self._ids
def __len__(self): # noqa: D105
return len(self._ids)
def new(self, session_id: str, user: User) -> None: # noqa: D102
assert session_id not in self._ids
self._ids[session_id] = user
def remove(self, session_id: str) -> None: # noqa: D102
del self._ids[session_id]
def get(self, session_id: str) -> Optional[User]: # noqa: D102
return self._ids.get(session_id, None)
def keys(self): # noqa: D102
return list(self._ids.keys())
| true
|
3396a0d9626b6bd09bff30ffdd9611a1ad42fca8
|
Python
|
orenlivne/euler
|
/rosalind/fib/rosalind_fib.py
|
UTF-8
| 2,322
| 3.75
| 4
|
[] |
no_license
|
'''
============================================================
http://rosalind.info/problems/fib/
Problem
A sequence is an ordered collection of objects (usually
numbers), which are allowed to repeat. Sequences can be finite or
infinite. Two examples are the finite sequence (PI,-SQRT(2),0,PI)
and the infinite sequence of odd numbers (1,3,5,7,9,,...). We use the notation an to represent the n-th term of a sequence.
A recurrence relation is a way of defining the terms of a sequence with respect to the values of previous terms. In the case of Fibonacci's rabbits from the introduction, any given month will contain the rabbits that were alive the previous month, plus any new offspring. A key observation is that the number of offspring in any month is equal to the number of rabbits that were alive two months prior. As a result, if Fn represents the number of rabbit pairs alive after the n-th month, then we obtain the Fibonacci sequence having terms Fn that are defined by the recurrence relation Fn=F(n-1)+F(n-2) (with F1=F2=1 to initiate the sequence). Although the sequence bears Fibonacci's name, it was known to Indian mathematicians over two millennia ago.
When finding the n-th term of a sequence defined by a recurrence relation, we can simply use the recurrence relation to generate terms for progressively larger values of n. This problem introduces us to the computational technique of dynamic programming, which successively builds up solutions by using the answers to smaller cases.
Given: Positive integers n<=40 and k<=5.
Return: The total number of rabbit pairs that will be present after n months if we begin with 1 pair and in each generation, every pair of reproduction-age rabbits produces a litter of k rabbit pairs (instead of only 1 pair).
============================================================
'''
from rosalind.rosutil import read_ints_str
import itertools as it
def fib(k, f1=1L, f2=1L):
'''Generalized Fibonacci sequence generator.'''
yield f1; yield f2
while True:
f1, f2 = f2, k * f1 + f2
yield f2
def nth_fib(file_name):
'''Main call.'''
n, k = read_ints_str(file_name)
print '%d' % (it.islice(fib(k), n - 1, n).next(),)
if __name__ == "__main__":
nth_fib('rosalind_fib_sample.dat')
nth_fib('rosalind_fib.dat')
| true
|
9bbe9d24a557a31965d4646d738e75c5e01b15e4
|
Python
|
pedroceciliocn/intro-a-prog-py-livro
|
/Cap_6_Listas_dicionários_tuplas_e_conjuntos/prog_6_9_pesquisa_sequencial.py
|
UTF-8
| 302
| 4.03125
| 4
|
[] |
no_license
|
# Programa 6.9 - Pesquisa sequencial
L = [15, 7, 27, 39]
p = int(input("Digite o valor a procurar: "))
x = 0
achou = False
while achou == False and x < len(L):
if L[x] == p:
achou = True
x += 1
if achou:
print(f"{p} achado na posição {x}")
else:
print(f"{p} não encontrado")
| true
|
10e06d1684c7cec84027455660e1d36f4080ac49
|
Python
|
MathPlus/Information-Theory
|
/info_gain_ratio_contour_plots.py
|
UTF-8
| 2,139
| 2.65625
| 3
|
[] |
no_license
|
import numpy as np
from info_theory_functions import prescribed_igr_fn101
import matplotlib.pyplot as plt
dirname_fig = 'info_gain_ratio_contour_plots'
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['figure.titleweight'] = 'bold'
plt.rcParams['axes.titleweight'] = 'bold'
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.grid'] = True
fontsize = 12
list_a = [ 0.02 , 0.1 , 0.25 , 0.5 ]
list_b = [ 0.05 , 0.5 ]
list_g = [ 0.1 , 0.25 , 0.5 , 0.75 , 0.9 ]
list_c0 = np.linspace( 0.0 , 0.5 , 51 ).tolist()
list_color = [ 'red' , 'magenta' , 'green' , 'blue' , 'black' ]
assert len(list_g) == len(list_color)
n = 2 * len(list_c0) - 1
label_x = 'c0'
label_y = 'c1'
for b in list_b :
for a in list_a :
label_title = 'a = %s , b = %s' % ( a , b )
filename_fig = '%s/%s .png' % ( dirname_fig , label_title )
fig , ax = plt.subplots()
plt.title( label_title )
for j, g in enumerate(list_g) :
x = [ np.nan for _ in range(n) ]
y1 = x.copy()
y2 = x.copy()
for i, c0 in enumerate(list_c0) :
c1 = prescribed_igr_fn101( g , a , b , c0 )
if not np.isnan(c1) :
x[i] = c0
y1[i] = c1
y2[i] = 1.0 - c1
ii = n - 1 - i
x[ii] = 1.0 - x[i]
y1[ii] = y1[i]
y2[ii] = y2[i]
color = list_color[j]
label_plot = 'g = %s' % g
plt.plot( x , y1 , color = color , label = label_plot )
plt.plot( x , y2 , color = color )
plt.xlim(0.0,1.0)
plt.ylim(0.0,1.0)
plt.tick_params( axis = 'both' , which = 'major' )
plt.xlabel( label_x , fontsize = fontsize )
plt.ylabel( label_y , fontsize = fontsize )
ax.legend( loc = 'lower left' )
fig.savefig( filename_fig , dpi = fig.dpi )
#plt.close(fig)
| true
|
cdde86e164130b9c7240da96da4fea14508245e8
|
Python
|
lew18/practicepython.org-mysolutions
|
/ex32-hangman.py
|
UTF-8
| 3,438
| 4.5625
| 5
|
[
"MIT"
] |
permissive
|
"""
https://www.practicepython.org
Exercise 32: Hangman
2 chilis
This exercise is Part 3 of 3 of the Hangman exercise series. The other exercises are: Part 1 and Part 2.
You can start your Python journey anywhere, but to finish this exercise you will have to have finished
Parts 1 and 2 or use the solutions (Part 1 and Part 2).
In this exercise, we will finish building Hangman. In the game of Hangman, the player only has 6
incorrect guesses (head, body, 2 legs, and 2 arms) before they lose the game.
In Part 1, we loaded a random word list and picked a word from it. In Part 2, we wrote the logic for
guessing the letter and displaying that information to the user. In this exercise, we have to put it
all together and add logic for handling guesses.
Copy your code from Parts 1 and 2 into a new file as a starting point. Now add the following features:
Only let the user guess 6 times, and tell the user how many guesses they have left.
Keep track of the letters the user guessed. If the user guesses a letter they already guessed, don’t
penalize them - let them guess again.
Optional additions:
When the player wins or loses, let them start a new game.
Rather than telling the user "You have 4 incorrect guesses left", display some picture art for the
Hangman. This is challenging - do the other parts of the exercise first!
Your solution will be a lot cleaner if you make use of functions to help you!
she also has this print("You have {} guesses left".format(6 - num_guesses))
"""
import random
def get_word_list(file_to_read):
word_list = [ ]
with open(file_to_read, 'r') as word_file:
word = word_file.readline().strip()
while word:
word_list.append(word)
word = word_file.readline().strip()
return(word_list)
def display_gallows(misses):
print(" ____")
print(" | |")
if misses > 0:
print(" | o")
else:
print(" | ")
if misses >= 4:
print(" | -|- ")
elif misses == 3:
print(" | -| ")
elif misses == 2:
print(" | | ")
else:
print(" | ")
if misses == 5:
print(" | / ")
elif misses == 6:
print(" | / \\ ")
else:
print(" | ")
print("__|____")
def play_hangman(word):
char_list = list(word)
answer = ['_']* len(word)
misses = set()
while '_' in answer and len(misses) < 6:
display_gallows(len(misses))
print("\nanswer is " + "".join(answer) + " (" + str(len(answer)) + " letters)")
if len(misses):
print(str(len(misses)) + " mistakes so far " + str(misses) + ", max # of mistakes is 6")
letter = input('Guess a letter ').upper()
if letter in answer or letter in misses:
print("you already guessed " + letter)
else:
found = False
for i in range(len(char_list)):
if char_list[i] == letter:
answer[i] = letter
found = True
if not found:
misses.add(letter)
if len(misses) < 6:
print('\nYou won, you figured out ' + word)
else:
display_gallows(len(misses))
print('\nThe word was ' + word + ", better luck next time.")
if __name__ == '__main__':
word_list = get_word_list('sowpods.txt')
word = word_list[random.randint(0, len(word_list) - 1)]
play_hangman(word)
| true
|
5812bba02fd2fbadd736898256932a3b46ba4df9
|
Python
|
Heinhtet612/Sandbox_SP52
|
/assignment1.py
|
UTF-8
| 5,316
| 3.890625
| 4
|
[] |
no_license
|
"""
Replace the contents of this module docstring with your own details
Name: Hein Htet Ko
Student ID: 13807190
Date started: 06/August/2021
GitHub URL: https://github.com/Heinhtet612/SP52_Assignment01
"""
name = "places.csv"
def main():
print("Travel Tracker 1.0 - by <Hein Htet Ko>")
print(readfile(), "Places loaded from places.csv")
callmenu = menu()
# CALLING MENU #
while callmenu != "Q":
if callmenu == "L":
open1()
callmenu = menu()
elif callmenu == "A".upper():
add()
callmenu = menu()
elif callmenu == "M".upper():
visitplace = readfile1()
if visitplace > 0:
open1()
visit()
callmenu = menu()
else:
print("No Unvisited Place")
callmenu = menu()
else:
print("Invalid menu choice")
callmenu = menu()
print(readfile(), "Places saved in places.csv")
print("Have a nice day :) ")
sortcsvfile()
def menu():
# display menu and input choice of menu
menuinput = input("""Menu:
L - List Places
A - Add new place
M - Mark a place as visited
Q - Quit
>>>""").upper()
return menuinput
def open1():
# open csv file and display
import csv
with open('places.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
datasort = sorted(reader, key=lambda row: (row[3], (int(row[2]))))
count = 0
row_count = sum(1 for row in datasort)
row_count1 = 0
for row in datasort:
if row[3] == 'n':
row_count1 = row_count1 + 1
count = count + 1
notvis = row[3].replace('n', '*').replace('v', ' ')
print(notvis, '{:>1}'.format(count), '{:>0}'.format('.'), '{:<10}'.format(row[0]), "in",
'{:<20}'.format(row[1]), "Times", '{:<10}'.format(row[2]))
if row_count1 == 0:
print(row_count, "Places, No places left to visit. Why not add a new place?")
else:
print(row_count, "Places, you still want to visit", row_count1, "places")
csvFile.close()
def add():
# to append and add new line data in csv
import csv
while True:
x = input("Name: ")
if x.isalpha() or '':
break
print("Input can not be blankl")
while True:
y = input("Country: ")
if y.isalpha() or '':
break
print("Input can not be blank")
class NotPositiveError(UserWarning):
pass
while True:
z = input("Times: ")
try:
number = int(z)
if number <= 0:
raise NotPositiveError
break
except ValueError:
print("Invalid input; enter a valid number")
except NotPositiveError:
print("Number must be > 0")
vn = "n"
print(x, "in", y, ("Times", z), "Has been added to travel tracker")
newrow = [x, y, z, vn]
with open('places.csv', 'a', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(newrow)
csvFile.close()
def visit():
# to mark unvisited place to visited
import csv
with open('places.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
datasort = sorted(reader, key=lambda row: (row[3], (int(row[2]))))
while True: # cheching og error
try:
x = int(input("Enter the number of a place to mark as visited"))
if x > sum(1 for row in datasort):
print("Invalid place number!")
continue
elif x <= 0:
print("Number must be > 0")
continue
except ValueError:
print("Invalid! Enter a valid number")
continue
else:
break
with open('places.csv', 'w', newline='') as csvFile1:
writer = csv.writer(csvFile1)
num = 0
for row in datasort:
num = num + 1
if num == "x":
if row[3] == "v":
print("Place is already visited")
else:
row[3] = "v"
print(row[0], "in", row[1], "is visited")
writer.writerow(row)
csvFile.close()
csvFile1.close()
def readfile():
# to sum lines in csv
import csv
with open('places.csv', 'r') as csvfile2:
reader = csv.reader(csvfile2)
row_count = sum(1 for row in reader)
return row_count
csvfile2.close()
def readfile1():
import csv
with open('places.csv', 'r') as csvfile2:
reader = csv.reader(csvfile2)
visit = 0
for row in reader:
if row[3] == 'n':
visit = visit + 1
else:
visit = visit
csvfile2.close()
return visit
def sortcsvfile():
import csv
with open("places.csv", "r") as csvfile3:
data = csv.reader(csvfile3)
sortedlist = sorted(data, key=lambda row: (row[3], int(row[2])))
with open("places.csv", "w", newline='') as f:
fileWriter = csv.writer(f)
for row in sortedlist:
fileWriter.writerow(row)
csvfile3.close()
f.close()
main()
if __name__ == '__main__':
main()
| true
|
1c855dc02d20a1596d6f231c742a60a66b716cc1
|
Python
|
potsbo/nlp100
|
/030.py
|
UTF-8
| 1,374
| 3.734375
| 4
|
[] |
no_license
|
# 30. 形態素解析結果の読み込み
# 形態素解析結果(neko.txt.mecab)を読み込むプログラムを実装せよ.ただし,各形態素は表層形(surface),基本形(base),品詞(pos),品詞細分類1(pos1)をキーとするマッピング型に格納し,1文を形態素(マッピング型)のリストとして表現せよ.第4章の残りの問題では,ここで作ったプログラムを活用せよ.
import re
def divide_into_sentences(lines):
sentences = []
sentence = []
for line in lines:
if line == 'EOS':
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
continue
sentence.append(line)
return sentences
def parse_word(word):
elements = word.split('\t')
surface = elements[0]
elements = elements[1].split(',')
base = elements[6]
pos = elements[0]
pos1 = elements[2]
return { 'surface': surface, 'base': base, 'pos': pos, 'pos1': pos1 }
def sentences(path='./data/neko.txt.mecab'):
lines = [re.sub('\n', '', line) for line in open(path)]
return divide_into_sentences(lines)
def analyzed_sentences():
return [[parse_word(word) for word in sentence] for sentence in sentences()]
def run():
return sentences()
if __name__ == '__main__':
print(run())
| true
|
03fc58778bfaf5cdf1f5e2abccc2b838b7410007
|
Python
|
Srishti14/ex_06_03_coursera_py4e
|
/ex_06_03.py
|
UTF-8
| 198
| 3.28125
| 3
|
[] |
no_license
|
def counter(string,the_letter):
count = 0
for letter in string:
if letter == the_letter:
count = count + 1
print(count)
counter("srishti","s")
counter("grapes","j")
| true
|
4ed2adce02c2341d013d6f4d6729648232198fa5
|
Python
|
limteng-rpi/ir_final
|
/src/ner/util.py
|
UTF-8
| 3,270
| 2.65625
| 3
|
[] |
no_license
|
import torch
from src.ner.model import LstmCnnOutElmo
def load_lstm_cnn_elmo_model(model_file, elmo_option, elmo_weight,
gpu=False, device=0):
"""Load the LSTM-CNN+ELMo model from file.
:param model_file: Path to the model file.
:param elmo_option: ELMo option file.
:param elmo_weight: ELMo weight file.
:param gpu: Use GPU.
:param device: GPU device index.
:return: Model object and a dict of vocabs.
"""
map_location = 'cuda:{}'.format(device) if gpu else 'cpu'
state = torch.load(model_file, map_location=map_location)
params = state['params']
model = LstmCnnOutElmo(
vocabs=state['vocabs'],
elmo_option=elmo_option,
elmo_weight=elmo_weight,
lstm_hidden_size=params['lstm_size'],
parameters=state['model_params'],
output_bias=not params['no_out_bias']
)
model.load_state_dict(state['model'])
if gpu:
torch.cuda.set_device(device)
model.cuda(device=device)
return model, state['vocabs']
def convert_result(results, to_bio=True, separator=' ', conf=True):
"""Convert model output to BIO format.
:param results: Model output.
:param to_bio: Convert BIOES to BIO.
:param separator: Delimiter character.
:param conf: Confidence value
:return: BIO formatted string.
"""
def bioes_2_bio_tag(tag):
if tag.startswith('S-'):
tag = 'B-' + tag[2:]
elif tag.startswith('E-'):
tag = 'I-' + tag[2:]
return tag
bio_str = ''
if conf:
for p_b, t_b, l_b, s_b, c_b in results:
for p_s, t_s, l_s, s_s, c_s in zip(p_b, t_b, l_b, s_b, c_b):
p_s = p_s[:l_s]
c_s = c_s[:l_s]
for p, t, s, c in zip(p_s, t_s, s_s, c_s):
if to_bio:
p = bioes_2_bio_tag(p)
c = c.item()
bio_str += separator.join(
[str(i) for i in [t, s, c, p]]) + '\n'
bio_str += '\n'
else:
for p_b, t_b, l_b, s_b in results:
for p_s, t_s, l_s, s_s in zip(p_b, t_b, l_b, s_b):
p_s = p_s[:l_s]
for p, t, s in zip(p_s, t_s, s_s):
if to_bio:
p = bioes_2_bio_tag(p)
bio_str += separator.join(
[str(i) for i in [t, s, p]]) + '\n'
bio_str += '\n'
return bio_str
def plain2bio(input_str):
"""Convert plain text to BIO format.
:param input_str: Input data string.
:return: BIO formatted string.
"""
sents = [s for s in input_str.strip().splitlines() if s]
bio_sents = []
for sent in sents:
tokens = [t for t in sent.split(' ') if t]
bio_sents.append('\n'.join(['{} {} O'.format(
t, '{}-{}'.format(len(bio_sents), i))
for i, t in enumerate(tokens)]))
return '\n\n'.join(bio_sents)
def restore_order(items, indices):
items_new = []
for item in items:
item = sorted([(i, v) for v, i in zip(item, indices)],
key=lambda x: x[0])
item = [v for i, v in item]
items_new.append(item)
return items_new
| true
|
a1a8f3c0511b7152814afb69579434be4aef8f5f
|
Python
|
msaqibdani/LeetCode-Practice
|
/LC523_ContinuousSubArraySum.py
|
UTF-8
| 566
| 2.859375
| 3
|
[] |
no_license
|
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
total = 0
seen = {}
seen[total] = -1
for i, num in enumerate(nums):
total += num
if k != 0:
total %= k
if total in seen:
if (i - seen[total] > 1):
return True
else:
seen[total] = i
return False
| true
|
ed4ff7e25e638ff0a27cd13c1658505b576bc6e6
|
Python
|
hoteldelluna/AlgoStudy
|
/TaeJuneJoung/COD/lv3.TapeEquilibrium.py
|
UTF-8
| 236
| 2.65625
| 3
|
[] |
no_license
|
def solution(A):
minV = 9876543210
left, right = 0, sum(A)
for i in range(len(A)-1):
left += A[i]
right -= A[i]
value = abs(left - right)
minV = value if value < minV else minV
return minV
| true
|
937172f717d4e5da0785beeacaa0b2e5012800f1
|
Python
|
orestis/pyobjc
|
/pyobjc/pyobjc-framework-DictionaryServices/PyObjCTest/test_dictionaryservices.py
|
UTF-8
| 746
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
'''
Some simple tests to check that the framework is properly wrapped.
'''
import objc
from PyObjCTools.TestSupport import *
from DictionaryServices import *
class TestDictionaryServices (TestCase):
def testClasses(self):
self.failUnless( issubclass(DCSDictionaryRef, objc.lookUpClass('NSCFType')) )
self.failUnless( DCSDictionaryRef is not objc.lookUpClass('NSCFType') )
def testFunctions(self):
r = DCSGetTermRangeInString(None, u"the hello world program", 5)
self.failUnlessIsInstance(r, CFRange)
self.failUnlessEqual(r, (4, 5))
r = DCSCopyTextDefinition(None, u"the hello world program", r)
self.failUnlessIsInstance(r, unicode)
if __name__ == "__main__":
main()
| true
|
2cf71c7a1fbc2be3d90c346c9e0e62e13c8c35ef
|
Python
|
phg98/SnakeGame
|
/test_apple.py
|
UTF-8
| 1,199
| 3.125
| 3
|
[] |
no_license
|
from unittest import TestCase
from apple import Apple
import pygame
from pygame.locals import *
class TestApple(TestCase):
def test_apple(self):
# create test apple
SCREEN_WIDTH = 480
SCREEN_HEIGHT = 320
screen_size = (SCREEN_WIDTH, SCREEN_HEIGHT)
TILE_SIZE_X = 10
TILE_SIZE_Y = 10
tile_size = (TILE_SIZE_X, TILE_SIZE_Y)
screen = pygame.display.set_mode(screen_size, DOUBLEBUF)
TAIL_INCREASE = 3
apples_left = 3
apple = Apple(screen, screen_size, tile_size, TAIL_INCREASE, apples_left)
self.assertEqual(apple.screen, screen)
self.assertEqual(apple.screen_size, screen_size)
self.assertEqual(apple.tile_size, tile_size)
self.assertEqual(apple.board_size, (48, 32))
self.assertEqual(apple.tail_increase, TAIL_INCREASE)
self.assertEqual(apple.apples_left, apples_left)
'''
def test_add_apple(self):
apple = Apple()
#apple.add_apple(apples, wall, snake):
self.fail()
def test_get_new_apple(self):
self.fail()
def test_draw_apple(self):
self.fail()
def test_check_apple(self):
self.fail()
'''
| true
|
4627727fc7f604582ab21d861e4d2637e0895988
|
Python
|
kobiturgeman/ReconDetect
|
/ClientSide/PacketCounting.py
|
UTF-8
| 6,280
| 2.609375
| 3
|
[] |
no_license
|
from collections import deque
class PacketCounting:
def __init__(self):
self.TenSeconds = 0
self.table = deque()
def addPacket(self, packet,conn):
if not (packet['dstPort'] == 1433 or packet['srcPort'] == 1433):
packets = {
'TimePeriod': 0,
'startTimeUnixMillisec': 0,
'endTimeUnixMillisec': 0,
'TenSeconds': self.TenSeconds,
'Day': '',
'packetCount': 0,
'counterOfSyn': 0,
'counterOfAck': 0,
'counterOfPa': 0,
'counterOfR':0,
'counterOfRA':0,
'counterOfFin': 0,
'packetsTotalSize':0
}
if self.table:
####There is value###
if (self.table[0]["TimePeriod"] == packet["TimePeriod"] and self.table[0]["Day"] == packet["Day"]):
self.table[0]["endTimeUnixMillisec"] = packet["UnixTimeMillisec"]
self.table[0]["packetCount"] += 1
if (packet['Flags'] == 'S'):
self.table[0]['counterOfSyn'] += 1
if (packet['Flags'] == 'A'):
self.table[0]['counterOfAck'] += 1
if (packet['Flags'] == 'PA'):
self.table[0]['counterOfPa'] += 1
if (packet['Flags'] == 'R'):
self.table[0]['counterOfR'] += 1
if (packet['Flags'] == 'RA'):
self.table[0]['counterOfRA'] += 1
if (packet['Flags'] == 'FA'):
self.table[0]['counterOfFin'] += 1
self.table[0]["packetsTotalSize"]+=packet["IPLen"]
else:
##################
## Write to DB ##
##################
conn.insertCountsTenSecond(self.table[0])
##################
## New Count ##
##################
try:
packets["startTimeUnixMillisec"] = packet["UnixTimeMillisec"]
# packets["endTimeUnixMillisec"] = packet["UnixTimeMillisec"]
self.TenSeconds += 1
packets["TenSeconds"] = self.TenSeconds
packets["TimePeriod"] = packet['TimePeriod']
packets["Day"] = packet["Day"]
packets['packetCount'] += 1
if (packet['Flags'] == 'S'):
packets['counterOfSyn'] += 1
if (packet['Flags'] == 'A'):
packets['counterOfAck'] += 1
if (packet['Flags'] == 'PA'):
packets['counterOfPa'] += 1
if (packet['Flags'] == 'R'):
packets['counterOfReset'] += 1
if (packet['Flags'] == 'RA'):
packets['counterOfResetAck'] += 1
if (packet['Flags'] == 'FA'):
packets['counterOfFin'] += 1
packets["packetsTotalSize"]= packet["IPLen"]
self.table.appendleft(packets)
except:
try:
print ('Trying again !!!')
packets["startTimeUnixMillisec"] = packet["UnixTimeMillisec"]
# packets["endTimeUnixMillisec"] = packet["UnixTimeMillisec"]
self.TenSeconds += 1
packets["TenSeconds"] = self.TenSeconds
packets["TimePeriod"] = packet['TimePeriod']
packets["Day"] = packet["Day"]
packets['packetCount'] += 1
if (packet['Flags'] == 'S'):
packets['counterOfSyn'] += 1
if (packet['Flags'] == 'A'):
packets['counterOfAck'] += 1
if (packet['Flags'] == 'PA'):
packets['counterOfPa'] += 1
if (packet['Flags'] == 'R'):
packets['counterOfReset'] += 1
if (packet['Flags'] == 'RA'):
packets['counterOfResetAck'] += 1
if (packet['Flags'] == 'FA'):
packets['counterOfFin'] += 1
packets["packetsTotalSize"] = packet["IPLen"]
self.table.appendleft(packets)
except:
print('Packet loss!!!')
else:
#### the table is empty ###
packets["startTimeUnixMillisec"] = packets["endTimeUnixMillisec"] = packet["UnixTimeMillisec"]
self.TenSeconds += 1
packets["TenSeconds"] = self.TenSeconds
packets["TimePeriod"] = packet['TimePeriod']
packets["Day"] = packet["Day"]
packets['packetCount'] += 1
if (packet['Flags'] == 'S'):
packets['counterOfSyn'] += 1
if (packet['Flags'] == 'A'):
packets['counterOfAck'] += 1
if (packet['Flags'] == 'PA'):
packets['counterOfPa'] += 1
if (packet['Flags'] == 'R'):
packets['counterOfReset'] += 1
if (packet['Flags'] == 'RA'):
packets['counterOfResetAck'] += 1
if (packet['Flags'] == 'FA'):
packets['counterOfFin'] += 1
packets['packetsTotalSize'] = packet["IPLen"]
self.table.appendleft(packets)
return self.table
def returnTable(self):
for row in self.table:
print(row)
| true
|
ac03367eb16823c818ee81a398bd55c9fd137f0f
|
Python
|
PiotrDabkowski/ask.fm
|
/HideFox.py
|
UTF-8
| 2,376
| 2.890625
| 3
|
[] |
no_license
|
import subprocess
try:
import win32gui
HideFox = False
except:
class dummy:
def __init__(self, *args):
pass
def __getattr__(self, a):
def f(*a):
return []
return f
HideFox = dummy
if not HideFox:
class HideFox:
def __init__(self, exe='firefox.exe', window_name=False):
self.exe = exe
if window_name:
self.hwnd = win32gui.FindWindow(0,window_name)
elif exe:
self.get_hwnd()
else:
raise ValueError('You need to give either exe name or window name. Please!')
def get_hwnd(self):
win_name = self.get_win_name(self.exe)
self.hwnd = win32gui.FindWindow(0,win_name)
return self.hwnd
def hide(self):
win32gui.ShowWindow(self.hwnd, 6)
win32gui.ShowWindow(self.hwnd, 0)
def show(self):
win32gui.ShowWindow(self.hwnd, 5)
win32gui.ShowWindow(self.hwnd, 3)
def add(self, iterable):
res=iterable[0]
iterable=iterable[1:]
for e in iterable:
res+=e
return res
def get_win_name(self, exe):
'''simple function that gets the window name of the process with the given name'''
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
raw=subprocess.check_output('tasklist /v /fo csv', startupinfo=info).split('\n')[1:-1]
data={}
for proc in raw:
try:
proc=eval('['+proc+']')
if proc[0] in data:
data[proc[0]].append(proc[8])
else:
data[proc[0]]=[proc[8]]
except:
pass
freqs=[]
values=data.values()
values= self.add(values)
for w in values:
freqs.append([len([e for e in values if e==w]),w])
no_window=sorted(freqs)[-1][-1]
try:
win_names=data[exe]
for win_name in win_names:
if win_name!=no_window:
return win_name
raise ValueError('Process with the given name does not have any windows!')
except KeyError:
raise ValueError('Could not find a process with name '+exe)
| true
|
d84b8a797900273bfc0cde0b552945658e543fd0
|
Python
|
hyeokjinson/algorithm
|
/ps프로젝트/BF/알파벳.py
|
UTF-8
| 626
| 2.671875
| 3
|
[] |
no_license
|
import sys
sys.setrecursionlimit(10**6)
input=sys.stdin.readline
def dfs(x,y,cnt):
global res
for i in range(4):
nx=x+dx[i]
ny=y+dy[i]
if 0<=nx<r and 0<=ny<c:
if ch[ord(arr[nx][ny])-65]==0:
ch[ord(arr[nx][ny])-65]=1
dfs(nx,ny,cnt+1)
ch[ord(arr[nx][ny])-65] = 0
res=max(res,cnt)
if __name__ == '__main__':
dx=[1,0,-1,0]
dy=[0,1,0,-1]
r,c=map(int,input().split())
arr=[list(map(str,input().strip()))for _ in range(r)]
ch=[0]*26
res=-2147000000
ch[ord(arr[0][0])-65]=1
dfs(0,0,1)
print(res)
| true
|
eccfeb5db1b7e0fa2dce40490ea3cc483efb77bb
|
Python
|
preetmishra/competitive-programming
|
/codechef/long/jan19/HP18.py
|
UTF-8
| 470
| 2.984375
| 3
|
[] |
no_license
|
t = int(input())
for _ in range(t) :
x = input().split()
x = [int(i) for i in x]
x, y, z = x
l = input().split()
l = [int(i) for i in l]
both = bob = alice = 0
for i in l :
if i % y == 0 and i % z == 0 :
both += 1
elif i % y == 0 :
bob += 1
elif i % z == 0 :
alice += 1
if both >= 1 :
bob += 1
if bob > alice :
print("BOB")
else :
print("ALICE")
| true
|