blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a0a16341b5fde56b70b2ef2340bf37bb74a44dc7
|
Python
|
nnizh13/WikiSource
|
/WikiSource/spiders/text_urls.py
|
UTF-8
| 2,095
| 2.859375
| 3
|
[] |
no_license
|
import scrapy
from WikiSource.items import WorkUrl
class TextUrls(scrapy.Spider):
name = 'get_text_urls'
start_urls = ['https://en.wikisource.org/wiki/Category:Works_by_era',
'https://en.wikisource.org/wiki/Category:Works_by_type',
'https://en.wikisource.org/wiki/Category:Works_by_genre',
'https://en.wikisource.org/wiki/Category:Works_by_subject']
works_url = []
def parse(self, response):
"""Send requests for every Work's subcategories."""
for category_url in response.css('a.CategoryTreeLabelCategory::attr("href")').getall():
yield response.follow(response.urljoin(category_url), callback=self.parse_works)
def parse_works(self, response):
"""Parse responses for each request."""
works = response.css('div#mw-pages > div.mw-content-ltr > '
' div.mw-category > div.mw-category-group ul li a::attr("href")').getall()
for work in works:
work_url = response.urljoin(work)
if work_url not in self.works_url:
self.works_url.append(work_url)
w_url = WorkUrl()
w_url['url'] = work_url
yield w_url
# Crawls next page if it exists
try:
next_page = response.css('div#mw-pages > a::attr("href")').getall()[1]
if next_page is not None:
next_page_url = response.urljoin(next_page)
yield scrapy.Request(response.urljoin(next_page_url), callback=self.parse_works)
except IndexError:
pass
# Crawls subcategories
subcategories = response.css('div.CategoryTreeItem a::attr("href")').getall()
for cat in subcategories:
yield scrapy.Request(response.urljoin(cat), callback=self.parse_works)
# def save_as_csv(self, works, path):
# """write the list to csv file"""
# with open(path, "w") as outfile:
# for entries in works:
# outfile.write(entries)
# outfile.write("\n")
| true
|
b7e8124d75ec7aafd020bf365c08a0a901203aac
|
Python
|
pranayyelugam/Online-Book-Store
|
/tests/cacheMiss_2.py
|
UTF-8
| 1,194
| 2.546875
| 3
|
[] |
no_license
|
import time, os, sys
import requests
import datetime
scriptDir = os.path.dirname(__file__)
def testForCacheMiss():
log = os.path.join(scriptDir, './outputs/averageTimeCacheMiss.txt')
averageTimeCacheOutput = os.path.join(scriptDir, './outputs/averageTimeCacheMissOutput.txt')
local = "http://0.0.0.0:8081"
queryList = ["/lookup/1", "/lookup/1", "/buy/1", "/lookup/1", "/search/Distributed Systems", "/search/Distributed Systems", "/buy/1", "/search/Distributed Systems"]
for query in queryList:
totalRequestTime = 0
requestStart = datetime.datetime.now()
resp = requests.get(local + query)
output = open(averageTimeCacheOutput, 'a+')
output.write(resp.text)
output.write('\n')
output.close()
request_end = datetime.datetime.now()
requestTime = request_end - requestStart
totalRequestTime = totalRequestTime + (requestTime.microseconds / 1000)
averageFile = open(log, 'a+')
averageFile.write("Average time for {} requests is: {}\n".format( query.split('/')[1],totalRequestTime))
averageFile.close()
if __name__ == "__main__":
testForCacheMiss()
| true
|
a3629f9b8b5d40744ebba698a56de0894f7945d4
|
Python
|
avaltechinova/smartusAiModeling
|
/modeling_config.py
|
UTF-8
| 4,673
| 2.609375
| 3
|
[] |
no_license
|
import numpy as np
class CrossValidationConfig:
def __init__(self, cv_type='k_fold', nr_splits=5, shuffle=False, batch_size=32):
self.__cv_type = cv_type
self.__nr_splits = nr_splits
self.__shuffle = shuffle
self.__batch_size = batch_size
@property
def cross_validation_type(self):
return self.__cv_type
@property
def nr_splits(self):
return self.__nr_splits
@property
def shuffle(self):
return self.__shuffle
def validation_steps(self, nr_samples):
return np.ceil(nr_samples / self.__batch_size)
class TrainingConfig:
def __init__(self, batch_size=32, nr_epochs=10, data_augmentation=False, outlier_detect=False, fine_tuning=False):
self.__batch_size = batch_size
self.__nr_epochs = nr_epochs
self.__data_augmentation = data_augmentation
self.__outlier_detect = outlier_detect
self.__fine_tuning = fine_tuning
@property
def batch_size(self):
return self.__batch_size
@property
def nr_epochs(self):
return self.__nr_epochs
@property
def data_augmentation(self):
return self.__data_augmentation
@property
def outlier_detect(self):
return self.__outlier_detect
@property
def fine_tuning(self):
return self.__fine_tuning
def steps_per_epoch(self, nr_samples):
return np.ceil(nr_samples / self.__batch_size)
class ConvNetConfig:
def __init__(self, conv_base=None,
nr_hidden_neurons=256,
activation='relu',
drop_out=None,
learning_rate=1e-4,
regularizers=dict()):
self.__conv_base = conv_base
self.__nr_hidden_neurons = nr_hidden_neurons
self.__activation = activation
self.__drop_out = drop_out
self.__learning_hate = learning_rate
self.__regularizers = regularizers
@property
def conv_base(self):
return self.__conv_base
@property
def nr_hidden_neurons(self):
return self.__nr_hidden_neurons
@property
def activation(self):
return self.__activation
@property
def drop_out(self):
return self.__drop_out
@property
def learning_rate(self):
return self.__learning_hate
@property
def regularizers(self):
return self.__regularizers
class DataConfig:
def __init__(self, animal_weight=False):
self.__animal_weight = animal_weight
@property
def animal_weight(self):
return self.__animal_weight
def save_configuration(path, validation_config, train_config, cnn_config, data_config):
with open(path + '/modeling_config.txt', 'w') as f:
print('---------------------------------------------------------', file=f)
print('Validation', file=f)
print('---------------------------------------------------------', file=f)
print(f'type: {validation_config.cross_validation_type}', file=f)
print(f'number of splits: {validation_config.nr_splits}', file=f)
print(f'shuffle: {validation_config.shuffle}', file=f)
print('\n', file=f)
print('---------------------------------------------------------', file=f)
print('Training', file=f)
print('---------------------------------------------------------', file=f)
print(f'data augmentation: {train_config.data_augmentation}', file=f)
print(f'fine tuning: {train_config.fine_tuning}', file=f)
print(f'outlier detection: {train_config.outlier_detect}', file=f)
print(f'batch size: {train_config.batch_size}', file=f)
print(f'number of epochs: {train_config.nr_epochs}', file=f)
print('\n', file=f)
print('---------------------------------------------------------', file=f)
print('CNN', file=f)
print('---------------------------------------------------------', file=f)
print(f'convolutional base: {cnn_config.conv_base}', file=f)
print(f'number of hidden neurons: {cnn_config.nr_hidden_neurons}', file=f)
print(f'activation function: {cnn_config.activation}', file=f)
print(f'drop out: {cnn_config.drop_out}', file=f)
print(f'learning rate: {cnn_config.learning_rate}', file=f)
print(f'regularizers: {cnn_config.regularizers}', file=f)
print('\n', file=f)
print('---------------------------------------------------------', file=f)
print('Data', file=f)
print('---------------------------------------------------------', file=f)
print(f'add animal data: {data_config.animal_weight}', file=f)
| true
|
5b42bcb078355d2448975b9309709f27143ee1c4
|
Python
|
RecluseXU/learning_spider
|
/example/0_Basic_usage_of_the_library/python/7_统计文件行数.py
|
UTF-8
| 1,501
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
# -*- encoding: utf-8 -*-
'''
@Time : 2023-05-19
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 计算大文件行数
4核8G python3.6 测算速度 单位: 秒
计算方法 100M 500M 1G 10G
readline 0.13 0.85 1.58 13.53
buffer_count 0.13 0.62 1.18 10.21
buffer_count_iter 0.08 0.42 0.83 8.33
'''
# here put the import lib
def count_by_readline(filename):
"""依次读取每行"""
lines = 0
with open(filename, 'r') as f:
for _ in f:
lines += 1
return lines
def count_by_wc(filename):
"""通过Linux Shell wc 统计"""
import subprocess
output = subprocess.getoutput('wc -l {}'.format(filename))
return int(output[:output.find(' ')])
def count_by_buffer_count(filename):
"""读取固定量级数据, 从数据中统计换行量级"""
lines = 0
buffer_size = 1024 * 1024
with open(filename, 'rb') as f:
buffer = f.read(buffer_size)
while buffer:
lines += buffer.count(b'\n')
buffer = f.read(buffer_size)
return lines
def count_by_buffer_count_iter(filename):
"""在 buffer_count 基础上引入 itertools 模块"""
from itertools import takewhile, repeat
buffer_size = 1024 * 1024
with open(filename, 'rb') as f:
buffers = takewhile(lambda x: x, (f.read(buffer_size) for _ in repeat(None)))
return sum(buffer.count(b'\n') for buffer in buffers)
| true
|
08f33ad0fd8e94e2552fbb5f050bd1eed8dda856
|
Python
|
bangbao/wsweb
|
/lib/db/mysqldb.py
|
UTF-8
| 2,092
| 2.625
| 3
|
[] |
no_license
|
# coding: utf-8
import datetime
import hashlib
import MySQLdb
md5 = lambda x: hashlib.md5(x).hexdigest()
escape_string = MySQLdb._mysql.escape_string
def force_str(text, encoding="utf-8", errors='strict'):
t_type = type(text)
if t_type == str:
return text
elif t_type == unicode:
return text.encode(encoding, errors)
return str(text)
def _smart(v):
t = type(v)
if t == str:
return v
elif t == unicode:
return force_str(v)
elif (t == int) or (t == long) or (t == float):
return str(v)
elif t == datetime.datetime:
return v.strftime("%Y-%m-%d %H:%M:%S")
return str(v)
def sql_value(dict_data):
return ','.join(map(
lambda x: """%s='%s'""" % (
x[0], escape_string(_smart(x[1])) if x[1] is not None else 'null'
),
dict_data.iteritems()
))
class MySQLConnect(object):
"""mysql"""
def __init__(self, host_config):
self.mysql_host = host_config
self.table_prefix = host_config['table_prefix']
self.conn = MySQLdb.connect(
host=host_config['host'],
user=host_config['user'],
passwd=host_config['passwd'],
db=host_config['db'],
charset="utf8"
)
self.cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
def __enter__(self,):
return self
def __exit__(self, _type, value, trace):
pass
# self.cursor.close()
# self.conn.close()
def __del__(self,):
self.conn.close()
self.cursor.close()
def get_table_by_key(self, key):
"""根据key取出所在的table
"""
sid = int(md5(str(key)), 16)
table = '%s_%s' % (self.table_prefix, sid % 16)
return table
def execute(self, sql, key):
table = self.get_table_by_key(key)
sql = sql % table
self.cursor.execute(sql)
self.conn.commit()
def insert_data(self, data, key):
sql = """INSERT INTO %s SET """ + sql_value(data)
return self.execute(sql, key)
| true
|
25c79ef0a8907530e4802a6f53731f4cb49742f9
|
Python
|
QMIND-Team/Voting-Optimization
|
/Reducing_STD_Model.py
|
UTF-8
| 6,352
| 2.71875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sat, Nov 17, 2018
@author: Caelum Kamps, Sean Kato, Dan, Denis, Ali
"""
import pandas as pd
import getData
#%% Preliminary initializations and imports
data = pd.read_pickle('OurSortedData') # Pickled data
data = data.drop(5219) #This was the row giving us trouble
data = data.drop(13988)
civic_addy = getData.get_data()[1]['addy']
del civic_addy[5219]
del civic_addy[13988]
#data.dropna(inplace=True)
#data.reset_index(drop=True, inplace=True)
voting_loc, _ = getData.get_data() # Voting locations
voting_loc = voting_loc.sort_values('num') # Sorted voting locations
ideal_num = (len(data))/(len(voting_loc)) #Ideal number of addresses per location
ratio_tolerance = 1.1 #Arbitrary distance ratio tolerance (voting_loc_n+1/voting_loc_n)
quot = pd.DataFrame(columns = ['civic addy', 'quot1', 'quot2'])
quot['civic addy'] = civic_addy
too_many = []
quot1 = [0]*len(data) #array for first difference, initialize to zero
quot2 = [0]*len(data) #array for second difference, initialize to zero
# Column for each voting location
model_output = pd.DataFrame(columns = ['location '+str(i) for i in range(54)])
# Model statistics
model_statistics = pd.DataFrame(columns = ['num of addys', 'mean', 'std', 'median', 'max'])
placeholder_column = [None for i in range(54)]
for column in model_statistics.columns:
model_statistics[column] = placeholder_column
# Placeholder to get voting locations
columns = [[] for i in range(54)]
#%% Basic function to map civic addresses to voting locations
for i in range(len(data)):
try:
columns[int(data['v_loc1'].iloc[i][0]) - 1].append(float(data['v_loc1'].iloc[i][1]))
# This except statement is to handle any shitty or missing data
except:
# No bueno
continue
#%%Expanded function to seek equal distribution (reduce standard deviation)
for i in range(len(voting_loc)):
if (len(columns[i]) > ideal_num):
too_many.append(i) #which locations have too many people
#calculating ratio between civic addy's first&second voting locations, and second&third voting locations
for i in range(len(data)):
try:
quot1[i] = float(float(data['v_loc2'].iloc[i][1])/float(data['v_loc1'].iloc[i][1]))
quot2[i] = float(float(data['v_loc3'].iloc[i][1])/float(data['v_loc2'].iloc[i][1]))
# This except statement is to handle any shitty or missing data
except:
#No bueno
continue
quot['quot1'] = quot1
quot['quot2'] = quot2
#now we must find the associated civic addies with the voting locations that have too many people
#v_loc1_dict = dict(data['v_loc1']) #for easy searching of values in column
too_many_vloc_with_civic_addies = pd.DataFrame(columns = ['voting loc', 'civic addies'])
too_many_vloc_with_civic_addies['voting loc'] = too_many
#i = 0
##instantiate the civic addty list of "too many" vlocs
#addy_list_big = [[] for j in range(len(too_many))]
#
#
#for j in range(len(too_many)):
# i = 0
# while (i < 5219):
# if data['v_loc1'].iloc[i][0] == too_many[j]:
# addy_list_big[j].append(data['addy'][i])
# #add all associated civic addies to too_many_vloc_with_civic_addies['civic addies']
# i += 1
# i += 1
# while (i < 13988):
# if data['v_loc1'].iloc[i][0] == too_many[j]:
# addy_list_big[j].append(data['addy'][i])
# i += 1
# i += 1
# while (i < 38407):
# if data['v_loc1'].iloc[i][0] == too_many[j]:
# addy_list_big[j].append(data['addy'][i])
# i += 1
# i = 38410
# while (i < 59160):
# if data['v_loc1'].iloc[i][0] == too_many[j]:
# addy_list_big[j].append(data['addy'][i])
# i += 1
# i = 59174
# while (i < len(data)):
# if data['v_loc1'].iloc[i][0] == too_many[j]:
# addy_list_big[j].append(data['addy'][i])
# i += 1
#for j in range(len(too_many)):
# too_many_vloc_with_civic_addies['civic addies'].iloc[j] = addy_list_big[j]
too_many_vloc_with_civic_addies = pd.read_pickle('too_many_vloc_with_civic_addies')
#Now we have everything we need: quot (which has the ratioss of each civic addy's top choices),
#and too_many_vloc_with_civic_addies (which has the voting locations that have too
#many addresses, with the corresponding civic address to that location). We now must iterate
#through these civic addies and see which ones have a quot less than the "threshold". If quot
#is less than threshold, then send that civic addy to its next best choice.
for j in range(len(too_many)):
for i in range(len(too_many_vloc_with_civic_addies['civic addies'][j])):
for k in range(len(quot)):
if too_many_vloc_with_civic_addies['civic addies'][j][i] == quot['civic addy'][k]:
if quot['quot1'][k] < ratio_tolerance:
row = data[data.addy == too_many_vloc_with_civic_addies['civic addies'][j][i]]
row['vloc1'] = row['vloc2']
#%% Storing function output in a pandas dataframe
# Calculate the max column length so pandas doesnt get mad
length = max([len(columns[i]) for i in range(54)])
# Making all of the columns the same length so pandas doesnt get mad
for column in columns:
for i in range(length):
try:
a = column[i]
except:
column.append(None)
for i in range(54):
model_output['location '+str(i)] = columns[i]
#%% Calculating Model Statistics for each voting location
for i in range(54):
model_statistics.loc[i,'num of addys'] = model_output['location '+str(i)].count()
model_statistics.loc[i,'mean'] = model_output['location '+str(i)].mean()
model_statistics.loc[i,'std'] = model_output['location '+str(i)].std()
model_statistics.loc[i,'max'] = model_output['location '+str(i)].max()
model_statistics.loc[i,'std'] = model_output['location '+str(i)].std()
model_statistics.loc[i,'max'] = model_output['location '+str(i)].max()
model_statistics.loc[i,'median'] = model_output['location '+str(i)].median()
# Some examples of things we might care about
print('Std # of ppl per voting location = ', model_statistics['num of addys'].std())
print('Mean mean per voting location = ', model_statistics['mean'].mean())
print('Worst std per voting location = ', model_statistics['std'].max())
| true
|
1a6e78feebca4eccab68432b0def073bf1e5ed1d
|
Python
|
NGC-6543/Seattle_Airbnb
|
/listings.py
|
UTF-8
| 19,191
| 2.625
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Created on 20l20-10-22
Authhor: NGC-6543
"""
import pandas as pd
import datetime # convert dates to timespan since 2016-01-04 (scrape date)
import math # check for NaN's
import os
os.getcwd()
# os.chdir('./')
# os.getcwd()
###############################################################################
## Listings file data cleaning
## Import listings.csv
listings_import = pd.read_csv('./source_data/listings.csv')
## keep only the desired fields
listingsDF = pd.DataFrame(listings_import, columns=[
'id'
,'host_id'
,'host_since'
,'host_location'
,'host_response_time'
,'host_response_rate'
,'host_is_superhost'
,'host_neighbourhood'
,'host_has_profile_pic'
,'host_identity_verified'
,'neighbourhood'
,'neighbourhood_group_cleansed'
,'zipcode'
,'latitude'
,'longitude'
,'property_type'
,'room_type'
,'accommodates'
,'bathrooms'
,'bedrooms'
,'beds'
,'bed_type'
,'price'
,'weekly_price'
,'monthly_price'
,'security_deposit'
,'cleaning_fee'
,'guests_included'
,'extra_people'
,'minimum_nights'
,'maximum_nights'
,'calendar_updated'
,'availability_30'
,'availability_60'
,'availability_90'
,'availability_365'
,'number_of_reviews'
,'first_review'
,'last_review'
,'review_scores_rating'
,'review_scores_accuracy'
,'review_scores_cleanliness'
,'review_scores_checkin'
,'review_scores_communication'
,'review_scores_location'
,'review_scores_value'
,'instant_bookable'
,'cancellation_policy'
,'require_guest_profile_picture'
,'require_guest_phone_verification'
,'calculated_host_listings_count'
,'reviews_per_month'
])
# drop the unused DF
del listings_import
## remove records that were dropped in the calendar file due to incorrect entry
listingsDF = listingsDF.loc[ ~listingsDF['id'].isin(['3308979','2715623','7733192','2459519','4825073']) ]
## drop these two records because they have mostly null values
listingsDF = listingsDF.loc[ ~listingsDF['id'].isin(['8354452','10235014']) ]
# replace bad zipcode containing newline character with corrected zipcode
listingsDF.loc[listingsDF['id'] == 9448215,'zipcode'] = '98122'
###############################################################################
## Listings file data transformation
## replace all 't/f' columns with 1/0
#listingsDF.info()
## check if 't' then replace with 1 else 0
#--- Function def
def item_replace(xstr):
if xstr == 't':
x = 1
else:
x = 0
return x
#---
listingsDF['host_is_superhost'] = listingsDF['host_is_superhost'].map(item_replace)
listingsDF['host_has_profile_pic'] = listingsDF['host_has_profile_pic'].map(item_replace)
listingsDF['host_identity_verified'] = listingsDF['host_identity_verified'].map(item_replace)
listingsDF['instant_bookable'] = listingsDF['instant_bookable'].map(item_replace)
listingsDF['require_guest_profile_picture'] = listingsDF['require_guest_profile_picture'].map(item_replace)
listingsDF['require_guest_phone_verification'] = listingsDF['require_guest_phone_verification'].map(item_replace)
## Update dates to time intervals in days by
## determining the time elapsed since 2016-01-04 (scrape date)
## ignore empty values (nan's)
#--- Function def
def date_replace(xstr):
if type(xstr)!=float:
xstr = int( (datetime.datetime(2016,1,4) - datetime.datetime.strptime(xstr, "%Y-%m-%d")).days )
return xstr
#---
listingsDF['host_since'] = listingsDF['host_since'].map(date_replace)
listingsDF['first_review'] = listingsDF['first_review'].map(date_replace)
listingsDF['last_review'] = listingsDF['last_review'].map(date_replace)
# check to make sure pandas functions ignore missing values
#listingsDF.to_csv('./listingsDF_check.csv', index=False)
#listingsDF['first_review'].mean() # mean ignores NaNs.
#listingsDF['first_review'].count() # even count ignores NaNs.
## check if host_location is in seattle, ignore nan's
#--- Function def
def test_str(xstr):
if type(xstr)!=float:
if 'seattle' in xstr.lower():
xstr = 1
else:
xstr = 0
return xstr
#---
listingsDF['host_location'] = listingsDF['host_location'].map(test_str)
## check if host neighbourhood matches neighbourhood, if yes then 1 else 0 (nan's in either or both will be false)
listingsDF.loc[listingsDF['host_neighbourhood'] == listingsDF['neighbourhood'],'host_neighbourhood'] = 1
listingsDF.loc[listingsDF['host_neighbourhood'] != 1,'host_neighbourhood'] = 0
## drop neighbourhood column since it is not needed anymore
del listingsDF['neighbourhood']
## check if bed_type matches 'Real Bed', if yes then 1 else 0
listingsDF.loc[listingsDF['bed_type'] == 'Real Bed','bed_type'] = 1
listingsDF.loc[listingsDF['bed_type'] != 1,'bed_type'] = 0
## check property type matches, condense to 3 possible choices
listingsDF.loc[ (listingsDF['property_type'] == 'House') | (listingsDF['property_type'] == 'Townhouse') ,'property_type'] = 'House'
listingsDF.loc[ (listingsDF['property_type'] == 'Apartment') | (listingsDF['property_type'] == 'Condominium') ,'property_type'] = 'Apartment'
listingsDF.loc[ (listingsDF['property_type'] != 'House') & (listingsDF['property_type'] != 'Apartment') ,'property_type'] = 'Other'
## check when calendar last updated, condense to two possible choices
listingsDF.loc[ (listingsDF['calendar_updated'] == 'today')
| (listingsDF['calendar_updated'] == 'yesterday')
| (listingsDF['calendar_updated'] == '2 days ago')
| (listingsDF['calendar_updated'] == '3 days ago')
| (listingsDF['calendar_updated'] == '4 days ago')
| (listingsDF['calendar_updated'] == '5 days ago')
| (listingsDF['calendar_updated'] == '6 days ago')
, 'calendar_updated'] = 1
listingsDF.loc[ listingsDF['calendar_updated'] != 1 , 'calendar_updated'] = 0
## if host_response_time is 'N/A' replace with 'unknown'
listingsDF.loc[ (listingsDF['host_response_time'] == 'N/A') , 'host_response_time'] = 'unknown'
## replace string currency with float values
#--- Function def
def replace_currency(xstr):
if type(xstr)!=float:
xstr = str.replace(xstr,'$','')
xstr = str.replace(xstr,',','')
xstr = float(xstr)
return xstr
#---
listingsDF['price'] = listingsDF['price'].map(replace_currency)
listingsDF['weekly_price'] = listingsDF['weekly_price'].map(replace_currency)
listingsDF['monthly_price'] = listingsDF['monthly_price'].map(replace_currency)
listingsDF['security_deposit'] = listingsDF['security_deposit'].map(replace_currency)
listingsDF['cleaning_fee'] = listingsDF['cleaning_fee'].map(replace_currency)
listingsDF['extra_people'] = listingsDF['extra_people'].map(replace_currency)
## replace string percentages with float values
#--- Function def
def replace_pct(xstr):
if type(xstr)!=float:
xstr = str.replace(xstr,'%','')
xstr = float(xstr)
xstr = xstr * .01
return xstr
#---
listingsDF['host_response_rate'] = listingsDF['host_response_rate'].map(replace_pct)
## replace with missing values in host_response_rate bedrooms bathrooms and beds with mean
def replaceNaN(mean, value):
if math.isnan(value):
value = mean
return value
#---
listingsDF['host_response_rate'] = listingsDF['host_response_rate'].apply(lambda x: replaceNaN(listingsDF['host_response_rate'].mean(),x))
listingsDF['bathrooms'] = listingsDF['bathrooms'].apply(lambda x: replaceNaN(round(listingsDF['bathrooms'].mean(),2),x))
listingsDF['bedrooms'] = listingsDF['bedrooms'].apply(lambda x: replaceNaN(round(listingsDF['bedrooms'].mean(),2),x))
listingsDF['beds'] = listingsDF['beds'].apply(lambda x: replaceNaN(round(listingsDF['beds'].mean(),2),x))
## replace missing zipcodes with the most common zipcode for that neighborhood
listingsDF.loc[ (listingsDF['zipcode'] == '') & (listingsDF['neighbourhood_group_cleansed'] == 'Queen Anne') ,'zipcode'] = '98109'
listingsDF.loc[ (listingsDF['zipcode'] == '') & (listingsDF['neighbourhood_group_cleansed'] == 'Ballard') ,'zipcode'] = '98107'
listingsDF.loc[ (listingsDF['zipcode'] == '') & (listingsDF['neighbourhood_group_cleansed'] == 'Interbay') ,'zipcode'] = '98119'
listingsDF.loc[ (listingsDF['zipcode'] == '') & (listingsDF['neighbourhood_group_cleansed'] == 'Capitol Hill') ,'zipcode'] = '98102'
listingsDF.loc[ (listingsDF['zipcode'] == '') & (listingsDF['neighbourhood_group_cleansed'] == 'Central Area') ,'zipcode'] = '98122'
listingsDF.loc[ (listingsDF['zipcode'] == '') & (listingsDF['neighbourhood_group_cleansed'] == 'Downtown') ,'zipcode'] = '98101'
###############################################################################
## Calendar file data cleaning
## Import calendar.csv
calendar_import = pd.read_csv('./source_data/calendar.csv')
## remove records that were coded incorrectly
calendar_import = calendar_import.loc[ ~calendar_import['listing_id'].isin(['3308979','2715623','7733192','2459519','4825073']) ]
# remove any rows in cal_sum that have the following listing ids (based on analysis of listings file)
calendar_import = calendar_import.loc[ ~calendar_import['listing_id'].isin(['8354452','10235014']) ]
## check if 't' then replace with 1 else 0
#--- Function def
def item_replace(xstr):
if xstr == 't':
x = 1
else:
x = 0
return x
#---
calendar_import['available'] = calendar_import['available'].map(item_replace)
#calendar_import.info()
## get the sum of available days for each listing for the year and put in new dataframe
df1 = calendar_import.groupby('listing_id')['available'].sum()
## use replace currency function (above) to replace string values with float
calendar_import['price'] = calendar_import['price'].map(replace_currency)
## get the mean of price for each listing for the year and put in new dataframe
df2 = calendar_import.groupby('listing_id')['price'].mean()
## round the mean price to two decimals
#--- Function def
def round_currency(xstr):
xstr = round(xstr,2)
return xstr
#---
df2 = df2.map(round_currency)
## merge the two summary dataframes
df1 = df1.reset_index()
df2 = df2.reset_index()
calendarDF = pd.merge(df1,
df2,
how='inner',
on='listing_id')
calendarDF = calendarDF.rename(
columns={"listing_id":"id", "price":"price_avg","available":"avail"})
del df1,df2,calendar_import
## merge with calendar file (must have run calendar file first)
listingsDF = pd.merge(listingsDF,
calendarDF,
how='inner',
on='id')
del calendarDF
###############################################################################
## Create two fields with bins for categorizing availability and avg_price
listingsDF['AvailCat'] = 0
listingsDF.loc[ (listingsDF['avail'] >= 0) & (listingsDF['avail'] <=124), 'AvailCat' ] = 1
listingsDF.loc[ (listingsDF['avail'] >= 125) & (listingsDF['avail'] <=308), 'AvailCat' ] = 2
listingsDF.loc[ (listingsDF['avail'] >= 309) & (listingsDF['avail'] <=360), 'AvailCat' ] = 3
listingsDF.loc[ (listingsDF['avail'] >= 361) & (listingsDF['avail'] <=365), 'AvailCat' ] = 4
listingsDF['PriceCat'] = 0
listingsDF.loc[ (listingsDF['price_avg'] >= 20) & (listingsDF['price_avg'] <=76), 'PriceCat' ] = 1
listingsDF.loc[ (listingsDF['price_avg'] >= 76.06) & (listingsDF['price_avg'] <=109), 'PriceCat' ] = 2
listingsDF.loc[ (listingsDF['price_avg'] >= 109.29) & (listingsDF['price_avg'] <=163.14), 'PriceCat' ] = 3
listingsDF.loc[ (listingsDF['price_avg'] >= 163.25) & (listingsDF['price_avg'] <=1071), 'PriceCat' ] = 4
###########################################################################
## remove 'other'-coded neighbourhoods and drop all rows with empty values
listingsDF = listingsDF.loc[ listingsDF['neighbourhood_group_cleansed'] != 'Other neighborhoods' ]
###############################################################################
## Create summary data tables and visualizations
#import matplotlib.rcsetup as rcsetup
#print(rcsetup.all_backends) # looking into rendering issues
#import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
## Count and availability of properties
# get tables
nb_count = listingsDF.groupby('neighbourhood_group_cleansed')['zipcode'].count()
nb_avail = listingsDF.groupby('neighbourhood_group_cleansed')['avail'].mean()
# convert index to column
nb_count = nb_count.reset_index()
nb_avail = nb_avail.reset_index()
# sorting can be done by value:
nb_count = nb_count.sort_values(by='zipcode', ascending=False)
nb_avail = nb_avail.sort_values(by='avail', ascending=False)
# round decimals on available:
nb_avail['avail'] = round(nb_avail['avail'],1)
# rename columns
nb_count = nb_count.rename(columns={"neighbourhood_group_cleansed":"neighborhood","zipcode":"count"})
nb_avail = nb_avail.rename(columns={"neighbourhood_group_cleansed":"neighborhood","avail":"avg days avail"})
# write out csv
#nb_count.to_csv('nb_count.csv', columns=['neighborhood', 'count'], sep=',', index=False)
#nb_avail.to_csv('nb_avail.csv', columns=['neighborhood', 'avg days avail'], sep=',', index=False)
# create visual: nb_avail
objects = tuple(nb_avail['neighborhood'])
y_pos = np.arange(len(objects))
plt.bar(y_pos, list(nb_avail['avg days avail']), align='center', alpha=0.5)
plt.xticks(y_pos, objects, rotation=90)
plt.ylabel('avg days available')
plt.title('Availability by Neighborhood')
plt.tight_layout()
fig1 = plt.gcf()
#plt.savefig('test2')
plt.show()
plt.draw()
fig1.savefig('./images/nb_avail.png')
# create visual: nb_count
objects = tuple(nb_count['neighborhood'])
y_pos = np.arange(len(objects))
plt.bar(y_pos, list(nb_count['count']), align='center', alpha=0.5)
plt.xticks(y_pos, objects, rotation=90)
plt.ylabel('Count')
plt.title('Listings by Neighborhood')
plt.tight_layout()
fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('./images/nb_count.png')
###############################################################################
## lowest, average, and highest price properties
# get tables
nb_min_price = listingsDF.groupby('neighbourhood_group_cleansed')['price_avg'].min()
nb_mean_price = listingsDF.groupby('neighbourhood_group_cleansed')['price_avg'].mean()
nb_max_price = listingsDF.groupby('neighbourhood_group_cleansed')['price_avg'].max()
# reset index
nb_min_price = nb_min_price.reset_index()
nb_mean_price = nb_mean_price.reset_index()
nb_max_price = nb_max_price.reset_index()
# merge tables
nb_price = pd.merge(nb_min_price,nb_mean_price,how='inner',on='neighbourhood_group_cleansed')
nb_price = pd.merge(nb_price,nb_max_price,how='inner',on='neighbourhood_group_cleansed')
# drop unused
del nb_min_price,nb_mean_price,nb_max_price
# rename cols
nb_price = nb_price.rename(columns={"neighbourhood_group_cleansed":"neighborhood","price_avg_x":"min","price_avg_y":"avg","price_avg":"max"})
# sorting can be done by value:
nb_price = nb_price.sort_values(by='avg', ascending=False)
# round decimals on available:
nb_price['min'] = round(nb_price['min'],1)
nb_price['avg'] = round(nb_price['avg'],1)
nb_price['max'] = round(nb_price['max'],1)
# print csv
#nb_price.to_csv('nb_price.csv', columns=['neighborhood', 'min', 'avg', 'max'], sep=',', index=False)
# create visual: nb_price
objects = tuple(nb_price['neighborhood'])
n_groups = len(objects)
price_mins = tuple(nb_price['min'])
price_avgs = tuple(nb_price['avg'])
price_maxs = tuple(nb_price['max'])
fig, ax = plt.subplots()
index = np.arange(n_groups)
index = index*2
bar_width = 0.5
opacity = 0.8
rects1 = plt.bar(index - bar_width, price_mins, bar_width,
alpha=opacity,
color='b',
label='min')
rects2 = plt.bar(index, price_avgs, bar_width,
alpha=opacity,
color='g',
label='avg')
rects3 = plt.bar(index + bar_width, price_maxs, bar_width,
alpha=opacity,
color='r',
label='max')
plt.ylabel('Price')
plt.title('Prices by Neighborhood')
plt.xticks(index, objects, rotation=90)
plt.legend()
plt.tight_layout()
fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('./images/nb_price.png')
###############################################################################
## Count by property types in each neighborhood
# group by neighborhood and property type
nb_count_property_type = listingsDF.groupby(['neighbourhood_group_cleansed','property_type'])['zipcode'].count()
# reset index
nb_count_property_type = nb_count_property_type.reset_index()
# pivot the table to get more columns
nb_count_property_type = nb_count_property_type.pivot(index = 'neighbourhood_group_cleansed'
,columns = 'property_type'
,values = 'zipcode')
# reset the index again
nb_count_property_type = nb_count_property_type.reset_index()
# sort by number of apartments
nb_count_property_type = nb_count_property_type.sort_values(by='Apartment', ascending=False)
# rename cols
nb_count_property_type = nb_count_property_type.rename(columns={"neighbourhood_group_cleansed":"neighborhood","Apartment":"Apartment","House":"House","Other":"Other"})
# print csv
#nb_count_property_type.to_csv('nb_count_property_type.csv', columns=['neighborhood', 'Apartment', 'House', 'Other'], sep=',', index=False)
# create visual nb_count_property_type
objects = tuple(nb_count_property_type['neighborhood'])
n_groups = len(objects)
means_apt = tuple(nb_count_property_type['Apartment'])
means_house = tuple(nb_count_property_type['House'])
means_other = tuple(nb_count_property_type['Other'])
fig, ax = plt.subplots()
index = np.arange(n_groups)
index = index*2
bar_width = 0.5
opacity = 0.8
rects1 = plt.bar(index - bar_width, means_apt, bar_width,
alpha=opacity,
color='b',
label='Apartment')
rects2 = plt.bar(index, means_house, bar_width,
alpha=opacity,
color='g',
label='House')
rects3 = plt.bar(index + bar_width, means_other, bar_width,
alpha=opacity,
color='r',
label='Other')
plt.ylabel('Count')
plt.title('Property type by Neighborhood')
plt.xticks(index, objects, rotation=90)
plt.legend()
plt.tight_layout()
fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('./images/nb_count_property_type.png')
###############################################################################
## User ratings in each neighborhood
# group
nb_mean_rating = listingsDF.groupby('neighbourhood_group_cleansed')['review_scores_rating'].mean()
# reset index
nb_mean_rating = nb_mean_rating.reset_index()
# rename
nb_rating = nb_mean_rating
# sort by number of apartments
nb_rating = nb_rating.sort_values(by='review_scores_rating', ascending=False)
# rename cols
nb_rating = nb_rating.rename(columns={"neighbourhood_group_cleansed":"neighborhood","review_scores_rating":"Avg Rating"})
# round decimals:
nb_rating['Avg Rating'] = round(nb_rating['Avg Rating'],1)
# print csv
#nb_rating.to_csv('nb_rating.csv', columns=['neighborhood', 'Avg Rating'], sep=',', index=False)
# create visual nb_rating
objects = tuple(nb_rating['neighborhood'])
y_pos = np.arange(len(objects))
plt.bar(y_pos, list(nb_rating['Avg Rating']), align='center', alpha=0.5)
plt.xticks(y_pos, objects, rotation=90)
plt.ylabel('Rating')
plt.title('Mean Rating by Neighborhood')
plt.tight_layout()
fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('./images/nb_rating.png')
| true
|
0dbac94eee0c28d934f6e619f21ee28b5976be12
|
Python
|
saikatsk/PythonJIRA
|
/createIssue.py
|
UTF-8
| 643
| 2.78125
| 3
|
[] |
no_license
|
# Developed by Koushik - Apr 2020
# Purpose: Create a JIRA issue
from jira import JIRA
import getpass
# login to JIRA using username and password
print("Enter credentials to login to JIRA:")
user = input("Username: ")
pw = getpass.getpass()
jira = JIRA(auth=(user, pw), options={'server': 'https://jira.kfplc.com'})
# ---- Create JIRA issue by passing values
new_issue = jira.create_issue(project='DRRR', summary="Test story title from JIRA-Python automation script", description="Test story description from JIRA Python automation script", issuetype={'name': 'Story'}, priority={'name': 'High'})
print('\nNew issue created: ', new_issue)
| true
|
73a4b8e30a34fdb14377ed8b60d9d5e48ecdc9ce
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02918/s874728961.py
|
UTF-8
| 269
| 2.53125
| 3
|
[] |
no_license
|
import sys
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
N, K = lr()
S = input()
score = 0
for i in range(N - 1):
if S[i + 1] == S[i]:
score += 1
score = min(score + 2 * K, N - 1)
print(score)
| true
|
54f1c1f94d297925a53084dd2e8cf6dc2034fefd
|
Python
|
StacyFelix/hw_ORM_Mongo
|
/ticket.py
|
UTF-8
| 3,406
| 3.09375
| 3
|
[] |
no_license
|
import csv
import re
from pymongo import MongoClient
client = MongoClient()
tickbd = client['tickbd']
def load_tickets(bd, collection='ticket', csv_file='ticket.csv'):
"""
Загрузка данных в коллекцию ticket из CSV-файла
"""
concert_ids_list = load_concerts(tickbd, 'concert', 'concert.csv')
with open(csv_file, encoding='utf8') as csvfile:
data_list = []
reader = csv.DictReader(csvfile)
for row in reader:
data = dict(row)
data['concert_id'] = concert_ids_list[int(data['concert_id'])-1]
# print(data)
data_list.append(data)
ticket_list = bd[collection].insert_many(data_list)
return ticket_list.inserted_ids
def load_concerts(bd, collection='concert', csv_file='concert.csv'):
"""
Загрузка данных в коллекцию concert из CSV-файла
"""
artist_ids_list = read_data(tickbd, 'artist', 'artist.csv')
location_ids_list = read_data(tickbd, 'location', 'location.csv')
town_ids_list = read_data(tickbd, 'town', 'town.csv')
country_ids_list = read_data(tickbd, 'country', 'country.csv')
with open(csv_file, encoding='utf8') as csvfile:
data_list = []
reader = csv.DictReader(csvfile)
for row in reader:
data = dict(row)
data['artist_id'] = artist_ids_list[int(data['artist_id'])-1]
data['location_id'] = location_ids_list[int(data['location_id'])-1]
data['town_id'] = town_ids_list[int(data['town_id'])-1]
data['country_id'] = country_ids_list[int(data['country_id'])-1]
# print(data)
data_list.append(data)
concert_list = bd[collection].insert_many(data_list)
return concert_list.inserted_ids
def read_data(bd, collection, csv_file):
"""
Загрузка данных в остальные коллекции из CSV-файлов
"""
with open(csv_file, encoding='utf8') as csvfile:
data_list = []
reader = csv.DictReader(csvfile)
for row in reader:
data = dict(row)
# print(data)
data_list.append(data)
row_list = bd[collection].insert_many(data_list)
return row_list.inserted_ids
def find_cheapest(db):
"""
Сортировка билетов по возрастания цены
"""
return list(db.ticket.find().sort("price"))
def find_by_name(name, db):
"""
Найти билеты по имени исполнителя (в том числе – по подстроке),
и вернуть их по возрастанию цены
"""
# !!! ищет только исполнителя по регулярке, без join'а с коллекцией ticket:
str = ".*({}).*".format(name)
regex = re.compile(str, re.IGNORECASE)
return list(db.artist.find({"title_artist": {"$regex": regex}}))
if __name__ == '__main__':
# загружаются билеты, концерты, места, города и страны,
# из разных csv по разным коллекциям с учетом _id:
# load_tickets(tickbd, 'ticket', 'ticket.csv')
# а как объединять(join'ить) коллекции по _id - не разобралась:
print(find_cheapest(tickbd))
print(find_by_name('cri', tickbd))
| true
|
5e7b39b8a527cc147b9adbf717ebe49782fc7c45
|
Python
|
Weidaoqin/Pythom.work
|
/ex2-4.py
|
UTF-8
| 130
| 2.828125
| 3
|
[] |
no_license
|
#2.4等边三角形.py
from turtle import *
setup(650,350,200,200)
pendown()
fd(100)
seth(120)
fd(100)
seth(240)
fd(100)
| true
|
2d2c0448381719652a6650dce221b98156c22b9f
|
Python
|
Lanceolata/code
|
/algorithms/algorithms-python/leetcode_easy/Question_214_Rotated_Digits.py
|
UTF-8
| 401
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def rotatedDigits(self, N):
"""
:type N: int
:rtype: int
"""
s1 = set([1, 8, 0])
s2 = set([1, 2, 5, 8, 6, 9, 0])
def isGood(n):
s = set([int(i) for i in str(n)])
return s.issubset(s2) and not s.issubset(s1)
return sum(isGood(i) for i in range(N + 1))
| true
|
cc36729df0d4386c1398668a8a1329e6e10b7651
|
Python
|
Mschnuff/FlaskWebSpiel
|
/gothonweb/ork.py
|
UTF-8
| 508
| 2.8125
| 3
|
[] |
no_license
|
class Ork(object):
def __init__(self, name):
self.name = name
self.hitpoints = 100
self.hp_inverse = (100 - self.hitpoints)
self.checked = False
self.amleben = True
#self.hp_inverse = (100 - self.hitpoints)
def erleideSchaden(self, schaden):
self.hitpoints = self.hitpoints - schaden
self.hp_inverse = (100 - self.hitpoints)
if self.hitpoints <= 0:
self.amleben = False
print(self.name + " stirbt.")
| true
|
53325875f6faf57de762a261d9083f7cfa3c3c3b
|
Python
|
yousukeayada/TicTacToe-RL
|
/TicTacToe.py
|
UTF-8
| 1,454
| 3.09375
| 3
|
[] |
no_license
|
from itertools import product
from enum import IntEnum, auto
import numpy as np
from Board import *
class Turn(IntEnum):
FIRST = 0
SECOND = 1
class TicTacToe:
def __init__(self, size=3):
self.size = size
self.num_squares = self.size * self.size
self.board = Board(size=self.size)
def reset(self):
self.board.reset_stage()
state = 0
return state
def step(self, action, piece):
x, y = action % self.size, int(action / self.size)
try:
winner = self.board.put_piece(x, y, piece)
next_state = self.convert_to_state(self.board.stage)
if winner:
done = True
if winner == Winner.DRAW:
reward = 0
else:
reward = 1
else:
reward, done = 0, False
return next_state, reward, done, winner
except Exception as e:
logger.info(e)
return None, np.nan, False, None
def check(self, action):
x, y = action % self.size, int(action / self.size)
return self.board.can_put(x, y)
def convert_to_state(self, stage):
s = [stage[i][j] for i in range(self.size) for j in range(self.size)]
index = 0
for i in range(self.num_squares):
index += (s[i]-1) * (len(Piece) ** (self.num_squares-i-1))
return index
| true
|
7e9f8b41359a98bfb01f3edc1b480e0c8899625a
|
Python
|
erfanian/AsciiFree
|
/engine/game_engine.py
|
UTF-8
| 4,352
| 2.96875
| 3
|
[] |
no_license
|
#! /usr/bin/env python3.0
##########################################################################
## AsciiFree project #####################################################
## An open-source, ASCII-graphics version of SkiFree #####################
## Spring 2013 ###########################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
##########################################################################
## GameEngine.py contributors: #
## Chris Cornelius #
## Eric Erfanian #
##########################################################################
import curses
import math
import ascii_rendering_manager
import screen_manager
import input_manager
class GameEngine(object):
# The main engine for a game - implement your game by making a subclass
# of this object. Manages I/O, events processing, screen drawing,
# and the run loop for the game. Subclassers should take note of
# which methods to override and which to let alone.
def __init__(self):
self._screen_manager = ascii_rendering_manager.AsciiRenderingManager(screen_manager.Screen())
self._input_man = input_manager.Input(self._screen_manager._screen.get_screen())
self._input_man.start()
self._should_keep_running = True
self._show_title = True
def iteration(self):
# This method is called on every time through the game loop. In
# this method, you should check input, determine what the display
# should be, and return quickly.
eval_char = self._input_man.get_input()
self._screen_manager.screen_refresh()
if eval_char == 10 and self._show_title:
self._show_title = False
self._screen_manager._drawable_objects.pop("start_screen")
if eval_char == 113:
# a 'q' means quit!
self._should_keep_running = False # TODO: make a helper method for this assignment
else:
pass
if not self._show_title:
if eval_char == 258:
self._screen_manager.set_object_payload("cursor", "\/")
self._screen_manager.checkBounds(0, 1) # down
elif eval_char == 259:
self._screen_manager.set_object_payload("cursor", "/\\")
self._screen_manager.checkBounds(0, -1) # up
elif eval_char == 260:
self._screen_manager.set_object_payload("cursor", "<")
self._screen_manager.checkBounds(-1, 0) # left
elif eval_char == 261:
self._screen_manager.set_object_payload("cursor", ">")
self._screen_manager.checkBounds(1, 0) # right
if self._show_title:
self._screen_manager.screen_clear()
self._screen_manager.draw()
elif eval_char is not None:
self._screen_manager.screen_clear()
self._screen_manager.draw()
self._screen_manager.screen_refresh()
# public - call but do not override!
def start(self):
self.run_loop()
def set_active_drawing_context(self, new_context):
self._drawing_context = new_nontext
# private - do not touch!
def run_loop(self):
self._screen_manager.screen_clear()
self._screen_manager.screen_refresh()
while (self._should_keep_running):
# here is where we call self.iteration() and then redraw the UI
self.iteration()
self._screen_manager.draw()
self._screen_manager.stop_screen()
if __name__ == '__main__':
engine = GameEngine()
engine.start()
| true
|
c19cd2dcd12853db4cb509f3c8edefd03cb3bfe1
|
Python
|
keszybz/minima
|
/minima.py
|
UTF-8
| 438
| 3.40625
| 3
|
[] |
no_license
|
def minima(arr):
ans = []
i = 0
for i in range(len(arr) - 1):
if arr[i-1] < arr[i] and arr[i] > arr[i+1]:
ans.append(i)
return ans
def maxima(arr):
ans = []
i = 0
for i in range(len(arr) - 1):
if arr[i-1] > arr[i] and arr[i] < arr[i+1]:
ans.append(i)
return ans
class Fake:
def __init__(self):
pass
def func(self, x, y):
return x + y
| true
|
b34cc53e7deba762869b0875296f58a452b25d74
|
Python
|
austin-niemann/Add
|
/AddStudents.py
|
UTF-8
| 3,452
| 2.984375
| 3
|
[] |
no_license
|
import pandas as pd
import xlsxwriter as xlsxwriter
import subprocess, sys
filePath = input("Enter File Path")
user = input("enter first.last name")
# import student name csv file
new = filePath[1:-1]
df = pd.read_csv(r"%s" % new, header=None, encoding='UTF-8')
# drop blank lines
df.dropna(axis=0, how="any", thresh=None, subset=None, inplace=True)
# find the number of lines without blanks
size = df.shape
print(new)
# number of students to load (determines how many times the while loops run)
students = max(size)
# write new excel document
docName = "Load.xlsx"
newPath = r'C:\Users\%s\Desktop\Load.xlsx' % user
workbook = xlsxwriter.Workbook(newPath)
worksheet = workbook.add_worksheet()
# column headers
worksheet.write('A1', "FIRSTNAME")
worksheet.write('B1', "LASTNAME")
worksheet.write('C1', "USERNAME")
worksheet.write('D1', "PASSWORD")
worksheet.write('E1', "OU")
worksheet.write('F1', "DESCRIPTION")
worksheet.write('G1', "Principal Name")
# constant variables
OU = "OU=WLC,OU=1st Battalion,DC=rti,DC=loc"
Description = "BLC Student"
Password = "password"
# while loop to input students first names
condition_First = 0
intRow_First = 0
intColumn_First = 0
intCell_First = 2
firstName = (df.iloc[intRow_First, intColumn_First])
print(firstName)
while condition_First < students:
firstName = (df.iloc[intRow_First, intColumn_First])
worksheet.write('A%d' % intCell_First, firstName)
intCell_First += 1
intRow_First += 1
intColumn_First += 0
condition_First += 1
# while loop to input students last names, OU, Password, and Description
condition_last = 0
intRow_last = 0
intColumn_last = 1
intCell_last = 2
intCell_OU = 2
intCell_Desc = 2
intCell_Pass = 2
while condition_last < students:
lastName = (df.iloc[intRow_last, intColumn_last])
worksheet.write('B%d' % intCell_last, lastName)
worksheet.write('E%d' % intCell_OU, OU)
worksheet.write('F%d' % intCell_Desc, Description)
worksheet.write('D%d' % intCell_Pass, Password)
condition_last += 1
intRow_last += 1
intColumn_last += 0
intCell_last += 1
intCell_OU += 1
intCell_Desc += 1
intCell_Pass += 1
# while loop to input students username
condition_user = 0
intRow_user = 0
intColumn_user = 2
intCell_user = 2
intCell_PN = 2
while condition_user < students:
username = (df.iloc[intRow_user, intColumn_user])
worksheet.write('C%d' % intCell_user, username)
worksheet.write('G%d' % intCell_PN, "%s@rti.loc" % username)
print("added %s" % username)
condition_user += 1
intRow_user += 1
intColumn_user += 0
intCell_user += 1
intCell_PN += 1
# while loop to input password
#condition_pass = 0
#intRow_pass = 0
#intColumn_pass = 1
#intCell_pass = 2
#while condition_pass < students:
#password = (df.iloc[intRow_pass, intColumn_pass])
#worksheet.write('D%d' % intCell_pass, password)
#condition_pass += 1
#intRow_pass += 1
#intColumn_pass += 0
#intCell_pass += 1
# end of while loops and finishes writing excel document
workbook.close()
print("File saved to desktop as %s" % docName)
#p = subprocess.Popen(['powershell.exe', r"C:\Users\austin.niemann\Desktop\powershelladmin.ps1"], stdout=sys.stdout)
#p.communicate()
print("Added %d new students to Active Directory!" % students)
| true
|
e10b13ba4de4379fa9ba313c87205366665504a4
|
Python
|
siddhism/leetcode
|
/tree/check_full_binary_tree.py
|
UTF-8
| 988
| 3.859375
| 4
|
[] |
no_license
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def __repr__(self):
return str(self.data)
def inorder(node):
if not node:
return
inorder(node.left)
print node.data
inorder(node.right)
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
def is_leaf(node):
if not node.left and not node.right:
return True
return False
def is_full_binary(node):
if not node:
return True
if is_leaf(node):
return True
if not node.left or not node.right:
return False
cond = is_full_binary(node.left) and is_full_binary(node.right)
# print ('Returning ', cond, ' for node ', node)
return cond
inorder(root)
print ('\n')
is_full_binary_tree = is_full_binary(root)
print ('is full binary tree ', is_full_binary_tree)
| true
|
ca5d95b55b8aa6f78de69b332c621bceed846d68
|
Python
|
YiseBoge/CompetitiveProgramming
|
/LeetCode/Sorting/sort_by_distance.py
|
UTF-8
| 876
| 3.1875
| 3
|
[] |
no_license
|
import sys
def sort_by_distance(R: int, C: int, r0: int, c0: int):
collected_items = {}
origin = [r0, c0]
result = []
for i in range(R):
for j in range(C):
k = [i, j]
d = abs(k[0] - origin[0]) + abs(k[1] - origin[1])
if collected_items.get(d) is None:
collected_items[d] = [k]
else:
collected_items[d] += [k]
distances = sorted(collected_items.keys())
for m in distances:
result += collected_items[m]
return result
def solution(l1, l2, l3, l4):
return sort_by_distance(l1, l2, l3, l4)
def main():
# inp1 = sys.stdin.readline().split()
# inp2 = sys.stdin.readline().split()
inp1 = 89
inp2 = 90
inp3 = 21
inp4 = 65
sys.stdout.write(str(solution(inp1, inp2, inp3, inp4)))
if __name__ == '__main__':
main()
| true
|
f6cf9f67323e6de795b25423bfa3d87b4909c4b8
|
Python
|
barcern/python-crash-course
|
/chapter8/c8_16_imports.py
|
UTF-8
| 1,196
| 3.34375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 12:52:52 2020
@author: barbora
"""
# Using a previously used function, store it in a module, and import the
# function using each import option.
# Using c8_14_cars
# Import full module
import c8_16_imports_functions
this_car = c8_16_imports_functions.make_car('renault',
'clio', year=2005, colour='silver')
print("\nFull module")
print(this_car)
# Import specific function
from c8_16_imports_functions import make_car
this_car = make_car('renault', 'clio', year=2005, colour='silver')
print("\nSpecific function")
print(this_car)
# Import specific function with alias
from c8_16_imports_functions import make_car as mc
this_car = mc('renault', 'clio', year=2005, colour='silver')
print("\nSpecific function with alias")
print(this_car)
# Import full module with alias
import c8_16_imports_functions as ic
this_car = ic.make_car('renault', 'clio', year=2005, colour='silver')
print("\nFull module with alias")
print(this_car)
# Import all functions
from c8_16_imports_functions import *
this_car = make_car('renault', 'clio', year=2005, colour='silver')
print("\nAll functions")
print(this_car)
| true
|
565dc9d8cf838852bf920ec697e15825e5af2f21
|
Python
|
ieee-saocarlos/desafios-cs
|
/Victor Macedo/Produtos.py
|
UTF-8
| 488
| 3.234375
| 3
|
[] |
no_license
|
L=12
O=12
P=24
S=5
I=6
Leite=int(input("Digite a quantidade de conjutos de Leite:" ))
Ovos=int(input("Digite a quantidade de conjutos de Ovos: "))
Prend=int(input("Digite a quantidade de conjutos de Prendedores: "))
Sabão=int(input("Digite a quantidade de conjutos de Sabão: "))
Ior=int(input("Digite a quantidade de conjutos de Iorgurte: "))
print("Há",L*Leite, "caixas de leite,",O*Ovos, "ovos,",P*Prend," prendedores,",S*Sabão,"barras de sabão e",I*Ior,"copinhos de iogurte")
| true
|
882e3be3601729364193437c89166ecad0b948f0
|
Python
|
luckycontrol/Algorithm_with_python
|
/BubbleSort.py
|
UTF-8
| 342
| 3.46875
| 3
|
[] |
no_license
|
def bubble(list):
sorted = False
length = len(list) - 1
while not sorted:
sorted = True
for i in range(length):
if list[i] > list[i+1]:
sorted = False
list[i], list[i+1] = list[i+1], list[i]
return list
lst = list(map(int, input().split()))
print(bubble(lst))
| true
|
9310cffcb7cef5dfb6ffd065844aeb24e644057a
|
Python
|
rokmokorel/Programiranje_1
|
/03_pisanje funkcij/08_napadalne kraljice.py
|
UTF-8
| 3,311
| 2.875
| 3
|
[] |
no_license
|
import random
razpored = ["a4", "c7", "d2"]
stolpci = "abcdefgh"
vrstice = "12345678"
# ********************************* ZA OCENO 6 *********************************
def stolpec_prost(stolpec, razpored):
for i in razpored:
if i[0] == stolpec:
return False
return True
print(stolpec_prost('c', razpored))
def prosti_stolpci(raz):
pr_stolpci = []
for i in stolpci:
if stolpec_prost(i, raz):
pr_stolpci.append(i)
return pr_stolpci
print(prosti_stolpci(razpored))
def prost_stolpec(raz):
for i in stolpci:
if stolpec_prost(i, raz):
return i
break
print(prost_stolpec([]))
# ********************************* REŠEVANJE ZA 7 *********************************
def napada(polje1, polje2):
return polje1[0] == polje2[0] or polje1[1] == polje2[1] or abs(ord(polje1[0]) - ord(polje2[0])) == abs(ord(polje1[1]) - ord(polje2[1]))
print('a4 napada a7: ', napada("a4", "a7"))
def napadajo(polje, razpored):
sez_napadajo = []
for i in razpored:
if napada(polje, i):
sez_napadajo.append(i)
return sez_napadajo
print(napadajo("g8", ["a4", "c7", "d2"]))
def napadeno(polje, razpored):
if napadajo(polje, razpored) == []:
return False
else:
return True
print(napadeno("g8", ["a4", "c7", "d2"]))
# ********************************* REŠEVANJE ZA 8 *********************************
def prosto_v_stolpcu(stolpec, postavitev):
prosto = []
for vrstica in vrstice:
if not napadeno(stolpec + vrstica, postavitev):
prosto.append(stolpec + vrstica)
return prosto
print(prosto_v_stolpcu("a", ["b4", "c7", "d2"]))
def prosto_polje(postavitev):
for stolpec in stolpci:
for vrstica in vrstice:
if not napadeno(stolpec + vrstica, postavitev):
return stolpec+vrstica
print(prosto_polje(["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"]))
# ********************************* REŠEVANJE ZA 9 *********************************
def napadajoce_se(razpored):
napadajoci_pari = []
for i, kraljica1 in enumerate(razpored):
for kraljica2 in razpored[:i]:
if napada(kraljica1,kraljica2):
napadajoci_pari.append((kraljica1,kraljica2))
return napadajoci_pari
print(napadajoce_se(["a4", "b1", "b7"]))
def legalna(postavitev):
if len(postavitev) == 8:
if napadajoce_se(postavitev) == []:
return True
else:
return False
else:
return False
print(legalna(["a4", "b1", "c5", "d8", "e2", "f7", "g3", "h3"]))
# ********************************* REŠEVANJE ZA 10 *********************************
def sestavi_sam():
razpored = [stolpci[random.randint(0,7)]+vrstice[random.randint(0,7)]]
print(razpored)
x = y = 0
while len(razpored) < 4:
for stolpec in stolpci[x:]:
x += 1
for vrstica in vrstice[y:]:
y += 1
spr = stolpec + vrstica
switch = False
for i in razpored:
if napada(i, spr):
switch = True
if switch == False:
razpored.append(spr)
print('bum')
return razpored
#print(sestavi_sam())
| true
|
8ea441a6644887576ba337a960ebd98c6e0a3706
|
Python
|
PulHome/infosystem
|
/pylint/tests/regPhoneNumbers/my.py
|
UTF-8
| 334
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
# С помощью регулярных выражений найдите в строке дважды подряд повторяющиеся слова.
# Удалите эти повторы, распечатайне строку без повторов.
import re
s = input()
print(re.sub(r'\b(\w+)\b(.+)\b(\1)\b', r'\1', s))
| true
|
7ed429d4e8a8efe210c44f055b24d1e1fedfe345
|
Python
|
virusrussia/MTS-scrapping
|
/Regoin.py
|
UTF-8
| 4,602
| 2.671875
| 3
|
[] |
no_license
|
from MTS import *
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
from bs4 import element
import pandas as pd
import json
import re
from time import sleep
import logging
logger = logging.getLogger(__name__)
fhandler = logging.FileHandler(filename='selen.log', mode='a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.INFO)
# Данные по регионам
regionsDF = pd.DataFrame(columns={"Регион", "class"})
# cites - сюда сохраняются все города
cities = pd.DataFrame()
# Данные по всем тарифам
df = pd.DataFrame(columns={"Название",
"Тип",
"Цена",
"Регион",
"Город",
"Описание",
"Опции"})
# Открываем браузер
driver = webdriver.Chrome(executable_path="/Applications/chromedriver")
driver.get("https://mts.ru/personal/mobilnaya-svyaz/tarifi/vse-tarifi")
# Открываем меню с регионами и городами
regionsMenuOpen(driver)
# Получаем список всех регионов, для того что бы потом
# по ним пройтись, открыть все города и посмотреть тарифы
jsObj = BeautifulSoup(driver.page_source, features="lxml")
regions = jsObj.findAll("div", {"class": "mts16-popup-regions__group"})
for i in regions:
names = i.findAll("a", {"class": ["mts16-popup-regions__link mts16-popup-regions__subregions-opener",
"mts16-popup-regions__link mts16-popup-regions__subregions-opener is-active"]})
for name in names:
regionsDF.loc[len(regionsDF)] = {"Регион": name.get_text(),
"class": name.attrs["class"][0]}
logger.info(f"Список регионов получен.\n {regionsDF}")
# Теперь у нас есть список регинов. Обновляем страницу
# и начинаем обходить все регионы и города
driver.refresh()
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "js-user-region-title")))
try:
for region in range(3):#range(len(regionsDF)):
regionsMenuOpen(driver)
# Находим регион в отктытом меню для обработки и кликаем на него
regionsMenuClick(driver, regionsDF.loc[region]["Регион"], regionsDF.loc[region]['class'])
cities = pd.concat([cities, extractCites(driver,
regionsDF.loc[region]["Регион"])],
ignore_index=True)
logger.info(f'Города в регионе:\n {cities[cities["Регион"]==regionsDF.loc[region]["Регион"]]}')
regionsMenuClose(driver)
# Перебираем все города в регионе и для каждого смотрим тарифы
for i in range(len(cities[cities["Регион"] == regionsDF.loc[region]["Регион"]])):
regionsMenuOpen(driver)
regionsMenuClick(driver, regionsDF.loc[region]["Регион"],
regionsDF.loc[region]['class'])
citesWebDriver = driver.find_elements(By.CLASS_NAME,
"mts16-popup-regions__link")
for j in citesWebDriver:
city = cities[cities["Регион"] == regionsDF["Регион"].loc[region]]["Город"].values[i]
if j.get_attribute("innerText") == city:
logger.info(f'Изучаем город: {city}')
ActionChains(driver).move_to_element(j).click(j).perform()
break
showMoreClick(driver)
t = tarifs(driver, city, regionsDF.loc[region]["Регион"])
logger.info(f'Для города {city} обнаружено {len(t)} тарифов')
df = pd.concat([df, t])
finally:
df.reset_index()
df.to_excel("tarifs1.xlsx")
driver.close()
| true
|
6c7733ce8093dcadd75adfe8739737a40339be0c
|
Python
|
rogerhoward/barb
|
/plugins/whoami/__init__.py
|
UTF-8
| 378
| 2.71875
| 3
|
[] |
no_license
|
import config
# Whoami plugin
def consider(message):
"""Whoami: returns the askers username.
Return: String containing a username, or False.
"""
if config.log: print('whoami considered')
if 'whoami' in message['text']:
if config.log: print('whoami triggered')
return 'you are {}'.format(message['user_name'])
else:
return False
| true
|
6b130fa2b59303805880b6e2cb7a5d3f4a2fefca
|
Python
|
thommms/hacker_rank
|
/algorithms/warmup/python/birthday_cake_candles.py
|
UTF-8
| 108
| 3.296875
| 3
|
[] |
no_license
|
n = int(input())
candles = [int(c) for c in input().strip().split(' ')]
print(candles.count(max(candles)))
| true
|
84277dde39dc9b9ba32d48ee811154c9ca1bf363
|
Python
|
kelraf/ifelif
|
/learn python 3/ifelif.py
|
UTF-8
| 872
| 4.375
| 4
|
[] |
no_license
|
#The program asks the user to input an intager value
#The program evaluates the value provided by the user and grades it accordingly
#The values provided must be between 0 and 100
#caution!!! if you provide other values other than ints the program will provide errors
marks=int(input("Please enter Students marks to Grade:"))
if marks >= 0:
if marks >= 0 and marks <=20:
print('The grade is E')
elif marks >= 21 and marks <= 45:
print('The grade is D')
elif marks >= 46 and marks <= 55:
print('The grade is C')
elif marks >= 56 and marks <= 80:
print('The grade is B')
elif marks >=81 and marks <=100:
print('The grade is A')
else:
print('The value you entered is invalid')
else:
print('The value you entered is invalid')
print('Done. Thank you')
| true
|
b5ea14cff8da127898c7f780610c79a11781dfe0
|
Python
|
BarryZM/zoubo
|
/3_DataAnalysis/0_Feature_engineering/101_My_Learn/99_My_Ensemble/2_Bagging/4_RandomForestRegressor.py
|
UTF-8
| 810
| 2.9375
| 3
|
[] |
no_license
|
from sklearn.datasets import load_boston # 一个标签是连续西变量的数据集
from sklearn.model_selection import cross_val_score # 导入交叉验证模块
from sklearn.ensemble import RandomForestRegressor # 导入随机森林回归系
boston = load_boston()
regressor = RandomForestRegressor(n_estimators=100, random_state=0, oob_score=True) # 实例化
regressor.fit(boston.data, boston.target)
regressor.score(boston.data, boston.target) # R方
# 如果不写 neg_mean_squared_error,回归评估默认是R平方
regressor = RandomForestRegressor(n_estimators=100, random_state=0, oob_score=True) # 实例化
scores = cross_val_score(regressor, boston.data, boston.target, cv=10
, scoring="neg_mean_squared_error" # 负最小均方差
)
| true
|
b50bf3c45edd0235b3729a1205d5cb5d7de8aa47
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03068/s848020132.py
|
UTF-8
| 139
| 3.4375
| 3
|
[] |
no_license
|
input()
S = input()
K = int(input())
c = S[K -1]
for i in S:
if i != c:
print('*', end='')
else:
print(i, end='')
| true
|
046bd214c938e63595f3d9fdeed82ecf1327b6b7
|
Python
|
ayushtiwari7112001/Rolling-_dice
|
/Dice_Roll_Simulator.py
|
UTF-8
| 640
| 4.0625
| 4
|
[] |
no_license
|
#importing modual
import random
#range of the values of dice
min_val = 1
max_val = 6
#to loop the rolling through user input
roll_again = "yes"
#loop
while roll_again == "yes" or roll_again == "y":
print("Roll the dices...")
print("** The values are **")
#generating and printing 1st random integer from 1 to 6
print(random.randint(min_val,max_val))
#generating and printing 2nd random integer from 1 to 6
print(random.randint(min_val, max_val))
#asking user to roll the dice again. Any input other than yes or y will terminate the loop
roll_again=input("Roll the dices again (yes/no) or (y/n) ")
| true
|
5aa1c6d8f54a45977e9b897f2077c10e402b0459
|
Python
|
dengl11/Leetcode
|
/problems/longest_word_in_dictionary_through_deleting/solution.py
|
UTF-8
| 460
| 2.84375
| 3
|
[] |
no_license
|
class Solution:
def findLongestWord(self, s: str, d: List[str]) -> str:
d.sort(key = lambda s: (-len(s), s))
def sub(w):
i = 0
for c in w:
while i < len(s) and s[i]!=c:
i += 1
if i >= len(s):
return False
i += 1
return True
for w in d:
if sub(w): return w
return ""
| true
|
25fcdd3fe5461ca410b97d24a6f73769f2e141e0
|
Python
|
TREMA-UNH/lstm-car
|
/src/data_preproc_qa.py
|
UTF-8
| 3,329
| 2.71875
| 3
|
[] |
no_license
|
import itertools
from utils import *
import csv
prefixlen = 40 # prefixlen must be >= maxlen!
def get_training_seqs(f: typing.io.BinaryIO, lines: int) -> Iterator[Tuple[List[Word], List[Word]]]:
'Returns list of sequences of words for training'
if lines<0:
return read_train_query_paras(f)
else:
return itertools.islice(read_train_query_paras(f), 0, lines)
# def get_test_seqs_next_word(f: typing.io.BinaryIO, lines: int) -> List[TestSeq]:
# 'Returns a list of ( sequences of words, next word )'
# paras = [para for para in read_test_query_paras(lines)]
# # Todo change from next word to next seq of words
#
# result = []
# for seq, truth, negatives in paras[0:lines]:
# result.append(TestSeq(sequence=seq,
# truth=truth[0],
# candidates=set([truth[0]]+[negtext[0]
# for negtext in negatives])))
# return result
# def read_query_paras_with_negatives(f, lines: int = None) -> Iterator[Tuple[List[Word], List[Word], List[List[Word]]]]:
# """ Read text of TREC-CAR paragraphs """
#
# rows = csv.reader(f, delimiter='\t')
# if lines is not None:
# rows = itertools.islice(rows, 0, lines)
# for row in rows:
# page, sectionpath, text = row[0:3]
# negtexts = row[4:]
# sectionpath = filter_field(sectionpath)
# text = filter_field(text)
# negtexts = list(map(filter_field, negtexts))
# if len(sectionpath) == 0 or len(text) == 0 or len(negtexts) == 0: continue
# yield (sectionpath, text, negtexts)
def tokenize(text):
text = nltk.tokenize.word_tokenize(text.lower())
return list(filter(is_good_token, text))
def read_test_qa(f, lines:int) -> Iterator[TestSeq]:
if lines<0:
return read_test_qa_(f)
else:
return itertools.islice(read_test_qa_(f), 0, lines)
def read_test_qa_(f) -> Iterator[TestSeq]:
""" Read text of TREC-CAR paragraphs from wikistein test format"""
old_query_id = ""
old_query_text = list()
candidates = []
for row in csv.reader(f, delimiter='\t'):
query_id, page, sectionpath, paragraph_id, text, judgment = row
if len(query_id) == 0 or len(text) == 0: continue
if old_query_id == "":
old_query_id = query_id
old_query_text = tokenize(" ".join([page, sectionpath]))
if query_id == old_query_id:
candidates.append(TestCandidate(paragraph_id, tokenize(text)))
else :
print( old_query_id)
yield (old_query_id, old_query_text, candidates)
old_query_id = query_id
old_query_text = tokenize(" ".join([page, sectionpath]))
candidates.append(TestCandidate(paragraph_id, tokenize(text)))
if len(candidates)>0:
yield (old_query_id, old_query_text, candidates)
def read_train_query_paras(f) -> Iterator[Tuple[List[Word], List[Word]]]:
""" Read text of TREC-CAR paragraphs from wikistein cluster format"""
for row in csv.reader(f, delimiter='\t'):
query_id, page, sectionpath, paragraph_id, text = row
if len(query_id) == 0 or len(text) == 0: continue
query_text = tokenize(" ".join([page, sectionpath]))
yield (query_text, text)
| true
|
b0435ba38c12d26d345c491a7d22792a58c142e1
|
Python
|
niuyaning/PythonProctice
|
/06/19/test2.py
|
UTF-8
| 135
| 3.09375
| 3
|
[] |
no_license
|
#位置传参
#注:参数顺序必须一致
def hobby(type,name):
print(f"my favorite {type} is {name}")
hobby("旅行","北京")
| true
|
8fcd777e805b18282876e064a55795041f7742ab
|
Python
|
davereid98133/acq4
|
/acq4/devices/LightSource/LightSource.py
|
UTF-8
| 2,138
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from acq4.devices.Device import *
from PyQt4 import QtCore, QtGui
import acq4.util.Mutex as Mutex
class LightSource(Device):
"""Simple device which reports information of current illumination source."""
sigLightChanged = QtCore.Signal(object) # to be used upstream
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
# self.lightsourceconfig = config.get('sources')
self.sourceState = {}
self.lock = Mutex.Mutex()
def describe(self):
self.description = []
for name, conf in self.lightsourceconfig.iteritems():
if not isinstance(conf, basestring):
for x in range(len(self.sourceState["leds"])):
if ((self.sourceState["leds"][x]["state"] == 1) and (self.sourceState["leds"][x]["name"] == name)):
desc = {}
desc['name'] = name
desc['state'] = 1
sourceDescription = []
for k, v in conf.iteritems():
desc[k] = v
self.description.append(desc)
return self.description
def getLightSourceState(self):
return self.sourceState["leds"]
def describeAll(self):
self.descriptionAll = []
for name, conf in self.lightsourceconfig.iteritems():
if not isinstance(conf, basestring):
desc = {}
desc['name'] = name
sourceDescription = []
for k, v in conf.iteritems():
name = k
desc = {}
desc['name'] = k
for key, value in v.iteritems():
desc[key] = value
sourceDescription.append(desc)
desc["description"] = sourceDescription
self.descriptionAll.append(desc)
statusItem = {"status": self.sourceState}
self.descriptionAll.append(statusItem)
return self.descriptionAll
| true
|
7f5919a77b44f46aabf086df3d3f448769651886
|
Python
|
coloneljuhziz/tceh_homeworks
|
/blog/blog.py
|
UTF-8
| 2,663
| 2.578125
| 3
|
[] |
no_license
|
from flask import Flask, render_template, request, redirect, url_for, abort
# from time import strftime
import json, datetime, re
app = Flask(__name__)
class Post():
def __init__(self, name, header, text, time):
self.name = name
self.header = header
self.text = text
self.id = None
self.time = time
def __repr__(self):
return str(dict((key, getattr(self, key)) for key in dir(self) if key not in dir(self.__class__)))
def to_dict(self):
d = dict((key, getattr(self, key)) for key in dir(self) if key not in dir(self.__class__))
return d
def show_preview(self,num):
return self.text[:num]+'...'
last_id = 1
def write():
posts_dump = []
global last_id
for post in posts_db:
if post.id is None:
post.id = last_id
last_id += 1
post_dict = post.to_dict()
posts_dump.append(post_dict)
print(posts_dump)
j = json.dumps(posts_dump)
f = open('database.json', 'w')
f.write(j)
def read():
global last_id
f = open('database.json')
j = f.read()
posts_dump = json.loads(j)
postz = []
for d in posts_dump:
p = Post(name=d['name'], text=d['text'], header=d['header'], time=d['time'])
p.id = d['id']
print(p)
## think about algorythm
if p.id >= last_id:
last_id = p.id + 1
postz.append(p)
print(postz)
return postz
posts_db = read()
@app.route('/')
def main():
return render_template('main.html', posts = reversed(posts_db))
@app.route('/post_entry', methods=['POST'])
def post_add():
post_header = request.form['post_header']
post_body = request.form['post_body']
post_author = request.form['post_author']
now = datetime.datetime.now()
post_time = now.strftime("%Y-%m-%d %H:%M:%S")
m = re.match('^\w+\s\w+$', post_author)
if post_body is None or post_body == '':
error_message = 'No text = No post'
elif m is None:
error_message = 'Invalid name format'
else:
post_entry = Post(header=post_header, text=post_body, name=post_author, time=post_time)
posts_db.append(post_entry)
write()
error_message = None
return render_template('post_entry.html', error_message = error_message)
@app.route('/post/<int:id>')
def post_render(id):
post_found = None
for post in posts_db:
if post.id == id:
post_found = post
break
if post_found is None:
abort(404)
return render_template('post.html', post = post_found)
if __name__ == '__main__':
app.run(debug=True)
| true
|
c771dacb5e82a6723ff5acc8266cc3b9e4ae5f28
|
Python
|
BambooFlower/Math-Scripts
|
/Code/Monte Carlo Simulations/inverse_transform_sampling.py
|
UTF-8
| 677
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
from scipy.stats import expon
import matplotlib.pyplot as plt
import numpy as np
def Inverse_transform_sampling_Exponential(M,lambda_):
expon_x = []
for i in range(M):
u = np.random.uniform(0, 1)
x = expon.ppf(u, lambda_) - 1
expon_x.append(x)
return(np.array(expon_x))
exponential_random_samples = Inverse_transform_sampling_Exponential(
M = 10000, lambda_ = 1)
counts, bins, ignored = plt.hist(
exponential_random_samples,
25,
density = True,
color = 'purple')
plt.title("""Inverse Transform Sampling from Exponential Distribution with
Unif(0,1) and Inverse CDF""")
plt.ylabel("Probability")
plt.show()
| true
|
b4610112170767ea891f390c0708495563cbcdc3
|
Python
|
kajibutest/gascrape
|
/sample_last_names.py
|
UTF-8
| 1,612
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/python
import argparse
import json
import os
import random
# Logic:
# 1) if word count < 2 => not cn
# 2) if word count = 2 => check against list
# 3) if word count > 2 => check last and second last + last against list
def classify(name, names):
parts = name.split()
if len(parts) <= 1:
return False
last = parts[-1].lower()
if last in names:
return True
if len(parts) == 2:
return False
second = parts[-2].lower()
if '%s %s' % (second, last) in names:
return True
return False
def sample(args):
with open(args.name_file, 'r') as fp:
names = set(fp.read().splitlines())
with open(args.positive_file, 'w') as pfp:
with open(args.negative_file, 'w') as nfp:
for dirpath, dirnames, filenames in os.walk(args.input_dir):
for filename in filenames:
if random.random() > args.rate:
continue
item = json.load(open(os.path.join(dirpath, filename)))
if 'name' not in item or item['name'] is None:
continue
name = item['name']
is_cn = classify(name, names)
if is_cn:
print >> pfp, name.encode('utf-8')
else:
print >> nfp, name.encode('utf-8')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', required=True)
parser.add_argument('--name_file', required=True)
parser.add_argument('--rate', type=float, default=1)
parser.add_argument('--positive_file', required=True)
parser.add_argument('--negative_file', required=True)
sample(parser.parse_args())
if __name__ == '__main__':
main()
| true
|
867008153fae7fb87ef449de504f9b8d0277fece
|
Python
|
aepuripraveenkumar/Data-structures-and-algorithms-in-python-by-michael-goodrich
|
/R-1.3.py
|
UTF-8
| 206
| 3.546875
| 4
|
[] |
no_license
|
'''Python program to find minmax using built-in functions'''
def minmax(*data):
return (*data,*data) if len(data)==1 else (min(data),max(data))
if __name__=='__main__':
print(minmax(4,100,10,1))
| true
|
f0b78b0ff2c5a6f11514df4b7bb66c87db2b9009
|
Python
|
yasin-esfandiari/InvertedPendulumRobotBalancing
|
/utils.py
|
UTF-8
| 105
| 2.90625
| 3
|
[] |
no_license
|
def find_slope(up_rect, down_rect):
return (up_rect[1] - down_rect[1])/(up_rect[0] - down_rect[0])
| true
|
d18c543f2378b0c2eb66a21f91bf417f3042ba8a
|
Python
|
xiangcao/Leetcode
|
/Python_leetcode/222_count_complete_tree_nodes.py
|
UTF-8
| 2,729
| 3.375
| 3
|
[] |
no_license
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def countNodes(self, root):
"""
:type root: TreeNode
:rtype: int
"""
left = right = root
ldepth = rdepth = 0
while left:
ldepth += 1
left = left.left
while right:
rdepth += 1
right = right.right
if ldepth == rdepth:
return (1 << ldepth) -1
else:
return 1 + self.countNodes(root.left) + self.countNodes(root.right)
def countNodes(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def getNode(root, path, depth):
while depth and root:
depth -= 1
if path & (1 << depth):
root = root.right
else:
root = root.left
#depth -= 1
return root
left = root
depth = 0
while left:
depth += 1
left = left.left
if depth == 0 or depth == 1:
return depth
print "depth is ", depth
begin, end = 0, (1 << (depth-1)) - 1
# find the first empty leaf
while begin < end:
mid = begin + (end-begin)/2
if getNode(root, mid, depth-1):
begin = mid + 1
else:
end = mid
print "begin is ", begin
# if there is no empty leaf, begin will be the last non-empty leaf element
if getNode(root, begin, depth-1):
return (1 << (depth-1))-1 + begin + 1
else:
return (1 << (depth-1))-1 + begin
def countNodes(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def getNode(root, path, depth):
while depth and root:
depth -= 1
if path & (1 << depth):
root = root.right
else:
root = root.left
#depth -= 1
return root
left = root
depth = 0
while left:
depth += 1
left = left.left
if depth == 0 :
return 0
print "depth is ", depth
begin, end = 0, (1 << (depth-1)) - 1
# find the last non-empty leaf
while begin < end:
mid = begin + (end-begin+1)/2
if getNode(root, mid, depth-1):
begin = mid
else:
end = mid - 1
return (1 << (depth-1))-1 + begin + 1
| true
|
bff5ce19f00f0e7ee69d475423d9b74444e81281
|
Python
|
lydiaq233/MovieTheaterSeating
|
/launch.py
|
UTF-8
| 3,661
| 3.234375
| 3
|
[] |
no_license
|
import sys
from helper import *
class Seating:
def __init__(self,request,sum):
self.smallest_available_row=0
# self.smallest_available_size=20
self.seat = [ [0]*20 for i in range(10)]
self.request=request
self.total_n=sum
self.result=dict()
def print_result(self):
print(self.result)
return self.result
def print_request(self):
print(self.request)
def _check_first_available_seat(self,cur_row,n):
while 0 not in self.seat[cur_row]:
cur_row+=1
if cur_row>9:
return -1,-1
s = self.seat[cur_row].index(0)
temp_s= s
while not self._check_no_unavailable_seat(cur_row,s,s+n):
if s+ n >19:
self.smallest_available_row=cur_row
cur_row+=1
s=self.seat[cur_row].index(0)
else:
s = self.seat[cur_row][temp_s:].index(0)
if s+ n==19:
self.smallest_available_row+=1
return cur_row,s
def _check_no_unavailable_seat(self,cur_row,start, end):
if end > 19:
return False
if 1 not in self.seat[cur_row][start:end+1]:
return True
return False
#place the larger group first.
def greedy_alg(self):
cur_row=0
for r,n in sorted(self.request.items(), key = lambda item :item[1], reverse = True):
cur_row, first_empty= self._check_first_available_seat(cur_row,n)
if cur_row == -1:
print("Warning: Total number of customer exceeds the room capacity. Ignoring "+r+" and all the requests after")
break
if cur_row+1<10 and 0 in self.seat[cur_row+1]:
first_empty_s = self.seat[cur_row+1].index(0)
if first_empty>first_empty_s:
cur_row+=1
first_empty = self.seat[cur_row].index(0)
self.result[r] = [ chr(cur_row + 65) + str(first_empty+i) for i in range(1,n+2)]
for i in range(-1,n+3):
if first_empty+i>=0:
if first_empty+i>=20:
break
self.seat[cur_row][first_empty+i]=1
if cur_row-1>=0 and i not in [n+2,n+3]:
self.seat[cur_row-1][first_empty + i] = 1
if cur_row + 1 <= 9 and (i !=n+2 and i!=n+3):
self.seat[cur_row + 1][first_empty + i] = 1
cur_row=self.smallest_available_row
def store_input(file):
temp_request=dict()
sum = 0
with open(file, 'r') as f:
while True:
contents = f.readline()
if not contents or contents=='\n':
break
s = contents.split(" ")
n= int(s[1])
r = int(s[0][1:])
sum+= n
if exceed_capacity(sum,s[0]):
break
if is_valid_amount(n,s[0]):
temp_request[r]= n-1
return Seating(temp_request,sum)
def output_in_text(seating,file):
with open("output_"+file, 'w') as f:
for r, n in sorted(seating.result.items(), key=lambda item: item[0]):
f.write("R"+ str(r).zfill(3)+" "+" ".join(n) + "\n")
def testing1():
seating = store_input("test_input1.txt")
seating.print_request()
seating.greedy_alg()
seating.print_result()
output_in_text(seating,"test_input1.txt")
if __name__ == '__main__':
seating = store_input(sys.argv[1])
seating.print_request()
seating.greedy_alg()
seating.print_result()
output_in_text(seating,sys.argv[1])
testing1()
| true
|
e601e058040d9468a5c105bbbaae10cc1a147b16
|
Python
|
oykuykaya/atom_project
|
/main.py
|
UTF-8
| 88
| 3.734375
| 4
|
[] |
no_license
|
x = 5
if x < 10:
print ('Smaller')
if x > 20:
print ('Bigger')
print ('Finis')
| true
|
7512854f6755ed841308f7008d8b682c9d89b8bf
|
Python
|
GeraldNDA/Advent-Of-Code-2019
|
/day20/day20_1.py
|
UTF-8
| 7,065
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# Imports
from mapping import Directions, Point
from aoc import AdventOfCode
# Input Parse
puzzle = AdventOfCode(year=2019, day=20)
puzzle_input = puzzle.get_input(raw=True)
# puzzle_input = [
# " A ",
# " A ",
# " #################.############# ",
# " #.#...#...................#.#.# ",
# " #.#.#.###.###.###.#########.#.# ",
# " #.#.#.......#...#.....#.#.#...# ",
# " #.#########.###.#####.#.#.###.# ",
# " #.............#.#.....#.......# ",
# " ###.###########.###.#####.#.#.# ",
# " #.....# A C #.#.#.# ",
# " ####### S P #####.# ",
# " #.#...# #......VT",
# " #.#.#.# #.##### ",
# " #...#.# YN....#.# ",
# " #.###.# #####.# ",
# "DI....#.# #.....# ",
# " #####.# #.###.# ",
# "ZZ......# QG....#..AS",
# " ###.### ####### ",
# "JO..#.#.# #.....# ",
# " #.#.#.# ###.#.# ",
# " #...#..DI BU....#..LF",
# " #####.# #.##### ",
# "YN......# VT..#....QG",
# " #.###.# #.###.# ",
# " #.#...# #.....# ",
# " ###.### J L J #.#.### ",
# " #.....# O F P #.#...# ",
# " #.###.#####.#.#####.#####.###.# ",
# " #...#.#.#...#.....#.....#.#...# ",
# " #.#####.###.###.#.#.#########.# ",
# " #...#.#.....#...#.#.#.#.....#.# ",
# " #.###.#####.###.###.#.#.####### ",
# " #.#.........#...#.............# ",
# " #########.###.###.############# ",
# " B J C ",
# " U P P ",
# ]
# puzzle_input = [
# " A ",
# " A ",
# " #######.######### ",
# " #######.........# ",
# " #######.#######.# ",
# " #######.#######.# ",
# " #######.#######.# ",
# " ##### B ###.# ",
# "BC...## C ###.# ",
# " ##.## ###.# ",
# " ##...DE F ###.# ",
# " ##### G ###.# ",
# " #########.#####.# ",
# "DE..#######...###.# ",
# " #.#########.###.# ",
# "FG..#########.....# ",
# " ###########.##### ",
# " Z ",
# " Z ",
# ]
class MazeObject(object):
def __init__(self, pos=None):
assert pos is not None
self.pos = pos
self.adj = set()
def set_neightbours(self, adj):
self.adj = set(adj)
def next_pos(self):
return [pos for pos in self.adj if not isinstance(pos, Wall)]
def __repr__(self):
return f"{type(self).__name__}(pos={self.pos})"
@staticmethod
def to_maze_object(pos, text):
if text == "#":
return Wall(pos=pos)
elif text == ".":
return Passage(pos=pos)
elif text.isalpha():
if text == "AA":
return Entrance(pos=pos)
if text == "ZZ":
return Exit(pos=pos)
return WarpPoint(text, pos=pos)
raise ValueError(f"No maze object for {pos, text}")
class Wall(MazeObject):
def next_pos(self):
return []
class Passage(MazeObject):
pass
class WarpPoint(MazeObject):
def __init__(self, name, **kwargs):
assert name is not None
self.name = name
self.warp_to = None
super().__init__(**kwargs)
def set_warp_to(self, other):
assert isinstance(other, WarpPoint) and other.name == self.name
self.warp_to = other
def next_pos(self):
assert self.warp_to is not None, self
return [pos for pos in self.warp_to.adj if not isinstance(pos, (Wall, WarpPoint))]
def __repr__(self):
return f"WarpPoint(name={self.name}, pos={self.pos})"
class Entrance(MazeObject):
pass
class Exit(MazeObject):
def next_pos(self):
return []
# Actual Code
class Maze(object):
def __init__(self, maze_map):
self.maze, self.entrance = Maze.parse_maze(maze_map)
def path_to_exit(self):
paths = [(self.entrance,)]
while paths:
curr_path = paths.pop(0)
# added = 0
for pos in curr_path[-1].next_pos():
if isinstance(pos, Exit):
return curr_path + (pos,)
if pos not in curr_path:
paths.append(curr_path + (pos,))
# added += 1
# if not added:
# print(curr_path[-1], curr_path[-1].next_pos())
return tuple()
@staticmethod
def parse_maze(maze_map):
temp_maze = {}
warp_points = {}
for row_idx, row in enumerate(puzzle_input):
for col_idx, elem in enumerate(row):
curr = Point(x=col_idx, y=row_idx)
above_letter = Directions.NORTH + curr
beside_letter = Directions.WEST + curr
if elem.isalpha():
if above_letter in warp_points:
text = warp_points[above_letter] + elem
temp_maze[curr] = MazeObject.to_maze_object(curr, text)
temp_maze[above_letter] = MazeObject.to_maze_object(above_letter, text)
elif beside_letter in warp_points:
text = warp_points[beside_letter] + elem
temp_maze[curr] = MazeObject.to_maze_object(curr, text)
temp_maze[beside_letter] = MazeObject.to_maze_object(beside_letter, text)
else:
warp_points[curr] = elem
elif elem in "#.":
temp_maze[curr] = MazeObject.to_maze_object(curr, elem)
# Remove duplicates
maze = {}
warp_points = {}
entrance = None
for pos, elem in temp_maze.items():
valid_neighbours = [temp_maze.get(d + pos) for d in Directions]
valid_neighbours = [neighbour for neighbour in valid_neighbours if neighbour is not None]
if isinstance(elem, (WarpPoint, Entrance, Exit)):
# only neighbour is self
if len(valid_neighbours) == 1:
continue
maze[pos] = elem
elem.set_neightbours(valid_neighbours)
if isinstance(elem, WarpPoint):
if elem.name not in warp_points:
warp_points[elem.name] = elem
else:
other = warp_points[elem.name]
elem.set_warp_to(other)
other.set_warp_to(elem)
if isinstance(elem, Entrance):
entrance = elem
# print(warp_points, "AA" in warp_points)
return maze, entrance
# Result
maze = Maze(puzzle_input)
step_count = 0
for idx, pos in enumerate(maze.path_to_exit()):
if isinstance(pos, (WarpPoint, Entrance, Exit)):
print(pos)
continue
step_count += 1
# print(idx, pos)
# Remove stepping on warp point and stepping off
print(step_count - 1)
| true
|
c2d3a908f2317dd85339147dc6342f96d696b33e
|
Python
|
pranay2063/PY
|
/Selenium/Element.py
|
UTF-8
| 348
| 2.953125
| 3
|
[] |
no_license
|
# search any element in a html page
from selenium import webdriver
browser = webdriver.Firefox()
type(browser)
browser.get('https://gabrielecirulli.github.io/2048/')
try:
elem = browser.find_element_by_class_name('game-explanation')
print('found <%s> element with this class name!' %(elem.tag_name))
except:
print('no such element')
| true
|
b1b47d8f1d1014302cc46196bbaf270902e03469
|
Python
|
amyfranz/Problem-2-fibonacci-
|
/main.py
|
UTF-8
| 387
| 3.515625
| 4
|
[] |
no_license
|
def fib(n, maxNum):
if n == 1:
return [1]
elif n == 2:
return [1,2]
else:
x = fib(n-1, maxNum)
if sum(x[:-3:-1]) > maxNum:
return x
x.append(sum(x[:-3:-1]))
return x
def getSum(n, maxNum):
fibSeq = fib(n, maxNum)
sum = 0
for i in range(0, len(fibSeq)):
if fibSeq[i] % 2 == 0:
sum += fibSeq[i]
return sum
print(getSum(100, 4000000))
| true
|
b34ee689ed03bf5c1ea851798d758413dfd91c08
|
Python
|
vishnutejakandalam/python_learning_2020
|
/first.py
|
UTF-8
| 145
| 3.171875
| 3
|
[] |
no_license
|
a = input("Enter the value of a: ")
b = input("Enter the value of nonsense: ")
# input scanf()
c = int(a)+int(b)
print("hello world! ",c)
| true
|
251a2dd167b6f4c5f49048eea2a77f962b5d6bf2
|
Python
|
AsiganTheSunk/GerardoElMagias
|
/core/game/battle/enemy/set_generator.py
|
UTF-8
| 790
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from random import choice
from core.units.constants.unit_type import UnitType
class EnemySetGenerator:
@staticmethod
def generate_set(group_size, enemy_pool):
# enemy_pool = [EnemyType.BANDIT]
tmp = []
for i in range(group_size):
tmp.append(choice(enemy_pool))
return tmp
def get_enemy_set(self, boss_level, group_size):
if boss_level > 3:
return self.generate_set(group_size, [UnitType.LIZARD, UnitType.BONE_WIZARD])
elif boss_level > 1:
return self.generate_set(group_size, [UnitType.BONE_WIZARD, UnitType.BONE_WIZARD])
else:
return self.generate_set(group_size, [UnitType.BANDIT, UnitType.BANDIT])
| true
|
61adb8d17c51be1c7388b625dc806af8fef4743b
|
Python
|
alecone/VNCC_Server_Python
|
/prova_server.py
|
UTF-8
| 1,125
| 2.921875
| 3
|
[] |
no_license
|
import socket
from threading import Thread
from socketserver import ThreadingMixIn
import os
import json
import sys
import errno
ip = '192.168.0.18'
port = 2018
class ClientThread(Thread):
def __init__(self,ip,port,sock):
Thread.__init__(self)
self.ip = ip
self.port = port
self.sock = sock
print ("New ClientThread started for ",ip,":",str(port))
def run(self):
print('Thread succefully started. Now i will shut down')
if __name__ == '__main__':
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcpsock.bind((ip, port))
while True:
tcpsock.listen(5)
print ("Waiting for incoming connections... on IP/PORT = ", ip, "/", port)
(conn, (ip_client,port_client)) = tcpsock.accept()
print('Got connection from ', ip_client, ', ',port_client)
new_client = ClientThread(ip_client, port_client, conn)
new_client.start()
print('Shutting down socket')
tcpsock.shutdown(socket.SHUT_WR)
print('Socket disconnection from server')
| true
|
807076049d0cfdfc2d8bc86b4ee2b0135a06a36e
|
Python
|
sauravp/snippets
|
/python/plus_one.py
|
UTF-8
| 703
| 3.375
| 3
|
[] |
no_license
|
# https://www.interviewbit.com/problems/add-one-to-number
class Solution:
# @param A : list of integers
# @return a list of integers
def plusOne(self, A):
carry = False
stop = False
i = len(A) - 1
while not stop and i >= 0:
if A[i] == 9:
A[i] = 0
carry = True
else:
A[i] += 1
carry = False
stop = True
i -= 1
if i==-1 and carry:
A = [1] + A
return self._strip(A)
def _strip(self, A):
i = 0
n = len(A)
B = A
while i < n and B[i] == 0:
B = B[1:]
return B
| true
|
b0fc6db5cd7b9c6283c6f66aaf1485adac0bf3a2
|
Python
|
gonzaponte/Python
|
/funny/turtle.py
|
UTF-8
| 2,155
| 3.15625
| 3
|
[] |
no_license
|
from check import *
from math import *
import swampy.TurtleWorld as tw
def maketurtle():
''' This is a function to establish the turtle and its properties.'''
w = tw.TurtleWorld()
t = tw.Turtle()
w.minsize(1000,1000)
t.set_color('orange')
t.set_pen_color('purple')
t.delay= 0.00001
tw.pu(t)
tw.lt(t)
tw.bk(t,300)
tw.rt(t)
tw.pd(t)
print '\n\nType quit() to exit.\n\n'
return w,t
def regular(t,n):
''' This is a function to make any regular polygon with a turtle and the number of sides.'''
if n<3 or not isint(n):
wrong(regular)
angle = 360./n
step = 100*angle*(2*pi/360)
for i in range(n):
t.fd(step)
t.lt(angle)
#t.die()
def square(t):
regular(t,4)
def circle(t):
regular(t,1000)
#arc( t, 50, 360 )
def pentagon(t):
regular(t,5)
def arc( t, r=10, theta=30 ):
length = (2*pi/360)*r*theta
# n = int(length/3) + 1
# step = length / n
# angle = float(theta) / n
angle = 1
n = theta/angle
step = length/n
for i in range(n):
t.fd(length)
t.lt(angle)
def petal( t, r=10, theta=30 ):
for i in range(2):
arc( t, r, theta )
t.lt(180-theta)
def flower( t, n=6, r=10, theta=30):
for i in range(1,n+1):
petal(t,r,theta)
t.lt(360/n)
def snowflake(t,step0=64,min=8):
def side(step):
if step==min:
t.fd(step)
t.rt(60)
t.fd(step)
t.lt(120)
t.fd(step)
t.rt(60)
t.fd(step)
elif step<min:
sys.exit('Turtle step began lower than minimum step')
else:
step = step/2
side(step)
t.rt(60)
side(step)
t.lt(120)
side(step)
t.rt(60)
side(step)
if step0<min:
wrong(snowflake)
for i in range(3):
side(step0/2)
t.lt(120)
def spiral(t,r=0.01,N=1000):
for i in range(N):
arc(t,r,180)
r *= 2
def spiral2(t,r=0.01,N=100000):
for i in range(N):
arc(t,r,1)
r *= 1 + 1./N
| true
|
9c3d9dbb6b1467834108668de8547bd69e6f823f
|
Python
|
xwmtp/reverse-bot-adventure
|
/Bot/Logger.py
|
UTF-8
| 1,045
| 2.65625
| 3
|
[] |
no_license
|
from Bot.Config import Configs
import logging
import os
def initalize_logger():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.handlers.clear()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
if not os.path.exists('logs'):
os.mkdir('logs')
def add_logging_handler(handler, level):
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
# console handler
add_logging_handler(logging.StreamHandler(), Configs.get('console_logging_level'))
# file handler (errors)
add_logging_handler(logging.FileHandler("logs/ERROR.log", "a"), logging.WARNING)
add_logging_handler(logging.FileHandler("logs/INFO.log", "a"), logging.INFO)
def update_logging_levels(new_level):
try:
logging.getLogger().handlers[0].setLevel(new_level)
logging.getLogger().handlers[1].setLevel(new_level)
except Exception as e:
logging.error(f"Could not update logging level: {repr(e)}")
| true
|
d007ddffaa695beb846b05a4a6cfe1a5ef12c18b
|
Python
|
CPFrog/AI_innovation_Lecture
|
/Assignments/02_Jupyter.py
|
UTF-8
| 6,697
| 4.28125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# ### 문제 1 - input, float()
# * '나' 는 통계학과 재학중이다.
# * 학점은 140학점을 이수해야 하며, 평점은 2.5이상이 되어야 졸업이 가능하다.
# * if문과 A and B 연산자를 이용하여 졸업이 가능한지, 졸업이 안되는지 확인해 보자.
# * 학점과 평점을 입력받는다.
# * 140이상, 2.5이상 졸업
# * 그외 조건 졸업이 힙듭니다.
# * 학점과 평점은 정수가 아니므로 float( input() )의 형태로 입력받아야 한다.
# In[1]:
credit = float(input("이수한 학점을 입력하세요 : "))
avg = float(input("평점을 입력해 주세요 : "))
if credit>=140 and avg>=2.5 :
#if credit<140 or avg<2.5 :
print("졸업이 힘듭니다.")
else :
print("졸업 가능합니다.")
# ### 문제 2 - 클래스
# * 아래 계산기 클래스에 곱하기 기능을 추가하시오.
# In[4]:
class CalFnc2 :
def __init__(self, result):
self.result = result
def plus (self, num):
self.result += num
return self.result
def sub (self, num):
self.result -= num
return self.result
# --------------------------------------
def mul (self, num):
self.result *= num
return self.result
# In[5]:
a = CalFnc2(0) # 계산기 한대
## 첫 초기값(result)
print(a.result)
## 더하기 기능
print(a.plus(5))
#곱하기 기능
print(a.mul(4))
# ### 문제 3 - while문을 이용하여 로그인
# * while문을 이용하여 5번까지만 id가 있는지 확인하는 프로그램을 작성하시오.
# * 초기의 id는 사용자가 정한다.
# * 매번 id를 입력받는다.
# * 있으면 있어요. 없으면 id가 없어요. 매번 출력한다.
# * id가 있는지 확인이 되면 break를 이용하여 벗어난다.
# In[6]:
ori_id = 'toto'
num=0
while num != 5 :
input_id = input("ID를 입력해주세요 : ")
if ori_id==input_id :
print("ID가 있습니다.")
break;
else :
num += 1
print("ID가 없습니다.")
# ### 문제 4
# * 세 개의 단어를 입력 받아, 맨 마지막줄에 '각각의 단어의 뒤에서 두번째 알파벳'을 연결하여 출력하는 프로그램을 작성하시오.
# In[1]:
word1 = input("첫번째 단어를 입력해 주세요. ")
word2 = input("두번째 단어를 입력해 주세요. ")
word3 = input("세번째 단어를 입력해 주세요. ")
ac = word1[-2] + word2[-2] + word3[-2]
print(ac)
# ### 문제 5
# * 세개의 상품과 가격을 아래와 같이 입력하여 text파일을 만들자.
# * mydata.txt
# * 상품1, 5000
# * 상품2, 10000
# * 상품3, 100000
# * 파일을 불러와서 전체 내용을 출력하시오.
# * open() 함수 이용
# In[2]:
w = open('mydata.txt', 'w')
w.write('상품1. 5000\n')
w.write('상품2. 10000\n')
w.write('상품3. 1000\n')
w.close()
r = open('mydata.txt', 'r')
print(r.read())
r.close()
# ### 문제 6
# * 위의 파일에 상품 4와 가격을 입력받아 추가하는 프로그램을 작성하시오.
# * (hint) 'a' 모드 이용
# In[3]:
a = open('mydata.txt', 'a')
d=input('추가할 상품과 가격을 입력하세요 : ')
a.write(d+'\n')
a.close()
r = open('mydata.txt', 'r')
print(r.read())
r.close()
# ### 문제 7
# * 하나의 이미지를 복사하는 프로그램을 작성하시오.
# * 복사된 이미지에 대한 파일을 올리도록 한다.
# In[11]:
import os
path_dir=os.getcwd()
f_list = os.listdir(path_dir)
f_list
# In[10]:
file1 = input("원본 파일 입력 : ")
file2 = input("복사 파일 입력 : ")
infile = open(file1, 'rb')
outfile = open(file2, 'wb')
while True :
copy_buffer = infile.read(1024)
if not copy_buffer:
break
outfile.write(copy_buffer)
infile.close()
outfile.close()
print("복사 완료")
# ### 문제 8
# * 방문하고 싶은 url 5개를 리스트로 만들고,
# * 희망하는 사이트를 선택지에서 선택하여 해당 사이트를 열어주는 프로그램을 작성하시오.
# * ex) 희망하는 웹페이지를 선택하세요.
# * 1. 네이버 2. 다음 3. 구글 4. lms 5. 구글원격데스크톱
# * 입력은 모두 숫자로만 이뤄진다고 가정한다.
# In[27]:
import webbrowser
url_list = ['https://naver.com', 'https://daum.net', 'https://google.com',
'http://lms.ictcog.kr', 'https://remotedesktop.google.com/support']
url_name = ['네이버', '다음', '구글', 'lms', '구글 원격 데스크톱']
print('방문하고자 하는 웹 사이트를 선택하세요.')
print(' 1. 네이버 2. 다음 3. 구글 4. lms 5. 구글 원격 데스크톱')
select=int(input("희망하는 웹사이트 : "))
webbrowser.open(url_list[select-1])
# ## 심화 문제.
# ### 아래 조건을 8번문제의 조건에 추가하여 정상적으로 실행되는 코드를 작성하시오.
#
# * 선택지의 번호뿐 아니라 사이트명으로도 해당 웹페이지를 열 수 있도록 한다.
# 이 때, 두가지 방식의 입력이 동시에 이뤄지지는 않는다고 가정한다.
# (입력 예시) 1 , 다음 // (입력 불가 예시) 1. 네이버
#
#
# * 원하는 사이트가 없는 경우, 사용자가 직접 웹사이트 주소를 입력하고
# 그 주소를 웹브라우저로 열어주는 프로그램을 작성한다.
# (hint) 선택지 6번을 만든다면..??
# In[4]:
import webbrowser
url_list = ['https://naver.com', 'https://daum.net', 'https://google.com',
'http://lms.ictcog.kr', 'https://remotedesktop.google.com/support']
url_name = ['네이버', '다음', '구글', 'lms', '구글 원격 데스크톱']
print('방문하고자 하는 웹 사이트를 선택하세요.')
print(' 1. 네이버 2. 다음 3. 구글 4. lms 5. 구글 원격 데스크톱 6. 주소 직접 입력')
select=input("희망하는 웹사이트 : ")
if select>='0' and select<='9' :
select=int(select)
if select>6 or select<1 : print('번호가 잘못 입력되었습니다.')
elif select==6 :
url=input("희망하는 웹사이트 주소를 입력하세요 : ")
webbrowser.open(url)
else:
webbrowser.open(url_list[select-1])
else:
for i in range(0,7) :
if i==6 : print('해당 이름의 웹사이트는 선택지에 없습니다.')
elif select=='주소 직접 입력' :
url=input("주소를 입력하세요 : ")
webbrowser.open(url)
break
elif i<5 and url_name[i]==select:
webbrowser.open(url_list[i])
break
| true
|
7dc74d390921e0f7fba67479da4e9341ccfe2821
|
Python
|
WalterKahn4/Python
|
/Les7/Lesnotes.py
|
UTF-8
| 838
| 3.65625
| 4
|
[] |
no_license
|
i = 7
while i <= 37:
i += 7
def hello():
'''a greeting service; it repeatedly requests the name of the user and then greets the user'''
while True:
name = input('What is your name?')
print('hello {}'.format(name))
def cities2():
lst = []
while True:
city = input('Enter city: ')
if city == '':
break
lst.append(city)
return lst
def before0():
for row in table:
for num in row:
if num == 0:
break
print(num, end=' ')
print()
def sum():
total = 0
while True:
nextInt = input('next int: ')
if nextInt == 'quit':
break
total += int(nextInt)
print(total)
employee = []
employee.append('Yin')
employee.append('Waad')
employee[0]
employee[1]
emplyee = {}
| true
|
5c94844229fb13119f3f606cbaa2169cf03f2cbe
|
Python
|
Frendyuyu/Python
|
/Mate.py
|
UTF-8
| 1,954
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class Materials(object):
"""
Material Class
Class attribute:
mate_code # 编码 Material code
mate_type # 类型 Material type
mate_Quantity # 数量 Material Quantity
Class method
Warehousing # 入库 Warehousing (Class method)
Shipments # 出货 Shipments (Class method)
Retreating # 退料 Retreating (Class method)
Rework # 返修 Rework (Class method)
Balance # 结余 Balance (Class method)
"""
def __init__(self, mate_code, mate_type, mate_quantity):
self.mate_code = mate_code # 编码 Material code
self.mate_type = mate_type # 类型 Material type
self.mate_quantity = mate_quantity # 数量 Material Quantity
def warehousing(self, depa_in, depa_out): # 入库 Warehousing (Class method)
pass
def retreating(self,depa_in, depa_out): # 退料 Retreating (Class method)
pass
def rework(self, depa_in, depa_out): # 返修 Rework (Class method)
pass
def balance(self,depa_in, depa_out): # 结余 Balance (Class method)
pass
# 驱动电源 Driver (Materials ==>> Sub Class)
class Driver(Materials):
def __init__(self, mate_code, mate_type, mate_quantity, batch):
super().__init__(mate_code, mate_type, mate_quantity)
# Materials.__init__(mate_code, mate_type, mate_quantity)
# ERROR: 2018/10/06 PM 06:57 直接用父类名"."不能继承父类的属性
# Materials.__init__(self, mate_code, mate_type, mate_quantity)
# CORRECT: 2018/10/06 PM 07:07 直用父类名"."来继承父类的属性,第一个必须是 "self"
self.batch = batch # batch 批次作割接
# 灯条 Light Bar (Materials ==>> Sub Class)
class LightBar(Materials):
pass
# 灯珠 Lamp beads (Materials ==>> Sub Class)
class LampBed(Materials):
pass
# 粉管 Powder tube (Materials ==>> Sub Class)
class PowderTub(Materials):
pass
# 堵头 Plugging (Materials ==>> Sub Class)
class Plugging(Materials):
pass
| true
|
66f8c3af79661da45c33d0fe5b97f3133d276928
|
Python
|
x31eq/lumatone_utils
|
/apply_scheme.py
|
UTF-8
| 2,358
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
"""
Apply a color scheme to a Lumatone mapping file
"""
import argparse, sys
N_BOARDS = 5
KEYS_PER_BOARD = 56
parser = argparse.ArgumentParser(
description='Apply a color scheme to a Lumatone .ltn mapping')
parser.add_argument('-t', '--tonic', type=int, nargs='?', default=0,
help='MIDI reference pitch')
parser.add_argument('-g', '--gap', type=int, nargs='?', default=0,
help='Notes to offset from one channel to another')
parser.add_argument('-i', '--input', nargs='?',
help='file to read the color scheme from')
parser.add_argument('-o', '--output', nargs='?',
help='file to write the altered mapping to')
parser.add_argument('mapping_filename')
args = parser.parse_args()
if args.input:
with open(args.input) as scheme_file:
scheme = list(filter(None,
(line.strip() for line in scheme_file)))
else:
scheme = list(filter(None, (line.strip() for line in sys.stdin)))
period = len(scheme)
if args.mapping_filename:
with open(args.mapping_filename) as mapping:
lines = list(filter(None, (line.strip() for line in mapping)))
else:
lines = list(filter(None,
(line.strip() for line in sys.stdin)))
pitches = [[0] * KEYS_PER_BOARD for _ in range(N_BOARDS)]
channels = [[0] * KEYS_PER_BOARD for _ in range(N_BOARDS)]
board = 0
for line in lines:
if line.startswith('[Board'):
board = int(line[6])
elif line.startswith('Key_') and line.count('=') == 1:
key, pitch = line[4:].strip().split('=')
pitches[board][int(key)] = int(pitch)
elif line.startswith('Chan_') and line.count('=') == 1:
key, channel = line[5:].strip().split('=')
channel = int(channel)
if channel:
channels[board][int(key)] = channel
board = 0
output = open(args.output, 'w') if args.output else sys.stdout
for line in lines:
if line.startswith('[Board'):
board = int(line[6])
if line.startswith('Col_') and line.count('=') == 1:
key, _color = line[4:].strip().split('=')
key = int(key)
channel = channels[board][key] - 1
pitch = pitches[board][key] + (channel * args.gap)
color = scheme[(pitch - args.tonic) % period]
line = "Col_{}={}".format(key, color)
output.write(line + '\n')
| true
|
7b784f260f46c5b3ca9fbde0f27ad4bd927d6fb8
|
Python
|
abhikushwaha/Hacktoberfest2019
|
/Python/nth_fibonacci.py
|
UTF-8
| 297
| 4.40625
| 4
|
[
"MIT"
] |
permissive
|
#Python Program to calculate the nth Fibonacci Number
from math import sqrt
def fibonacci(n):
return int(1/sqrt(5)*(((1+sqrt(5))/2)**n - ((1-sqrt(5))/2)**n))
your_number=int(input("Enter the value so that we can calculate its corresponding Fibonacci Number:"))
print(fibonacci(your_number))
| true
|
e994adcaa456fb9fe2687e39b5c8d691b221502d
|
Python
|
HaberkornJonas/Travel-Order-Resolver_Web_T-AIA-901
|
/backend/infrastructure/LanguageProcessing.py
|
UTF-8
| 16,126
| 2.84375
| 3
|
[] |
no_license
|
# Imports
import spacy
from enum import Enum
from spacy.symbols import PROPN, NOUN, CCONJ, ADP, VERB
import numpy as np
class RelationDirection(Enum):
NONE = 1
START = 2
DEST = 3
class RelationStrength(Enum):
NONE = 1
WEAK = 2
STRONG = 3
class LanguageProcessing:
class WordSense:
def __init__(self, word: str, direction: RelationDirection, strength: RelationStrength):
self.word = word
self.direction = direction
self.strength = strength
def __str__(self):
return f"Word '{self.word}' has a direction of {self.direction.name} and a {self.strength.name} strength."
def __repr__(self):
return f"Word '{self.word}' has a direction of {self.direction.name} and a {self.strength.name} strength."
class LinkedWordSense:
def __init__(self, word: str, fixedWord: str, direction: RelationDirection, strength: RelationStrength):
self.word = word
self.fixedWord = fixedWord
self.direction = direction
self.strength = strength
def __str__(self):
return f"Words '{self.word}' fixed with '{self.fixedWord}' has a direction of {self.direction.name} and a {self.strength.name} strength."
def __repr__(self):
return f"Words '{self.word}' fixed with '{self.fixedWord}' has a direction of {self.direction.name} and a {self.strength.name} strength."
# CCONJ links: 'cc'_child
CCONJ_Relation = [
# Start
WordSense("depuis", RelationDirection.START, RelationStrength.STRONG),
# Destination
WordSense("puis", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("et", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("enfin", RelationDirection.DEST, RelationStrength.STRONG)
]
# NOUN links: 'nmod'_parent
NOUN_Relation = [
# Start
WordSense("provenance", RelationDirection.START, RelationStrength.STRONG),
# Destination
WordSense("direction", RelationDirection.DEST, RelationStrength.WEAK),
WordSense("destination", RelationDirection.DEST, RelationStrength.WEAK)
]
# ADP_FIXED has the priority
# ADP links: 'case'_child, 'dep'_parent
ADP_FIXED_Relation = [
# Start
LinkedWordSense("à","partir", RelationDirection.START, RelationStrength.STRONG),
LinkedWordSense("en", "partant", RelationDirection.START, RelationStrength.STRONG),
# Destination
LinkedWordSense("à","destination", RelationDirection.DEST, RelationStrength.STRONG),
LinkedWordSense("en","direction", RelationDirection.DEST, RelationStrength.WEAK)
]
ADP_Relation = [
# Start
WordSense("de", RelationDirection.START, RelationStrength.STRONG),
WordSense("du", RelationDirection.START, RelationStrength.STRONG),
WordSense("des", RelationDirection.START, RelationStrength.STRONG),
WordSense("depuis", RelationDirection.START, RelationStrength.STRONG),
# Destination
WordSense("à", RelationDirection.DEST, RelationStrength.WEAK),
WordSense("au", RelationDirection.DEST, RelationStrength.WEAK),
WordSense("aux", RelationDirection.DEST, RelationStrength.WEAK),
WordSense("dans", RelationDirection.DEST, RelationStrength.WEAK),
WordSense("en", RelationDirection.DEST, RelationStrength.WEAK),
WordSense("par", RelationDirection.DEST, RelationStrength.WEAK) # par : "passer par Paris"
]
# VERB links: 'obl:arg'_parent, 'obl:mod'_parent
# "partir" is ambiguous: "partir de ..." "partir à ..."
VERB_MARK_Relation = [
WordSense("après", RelationDirection.START, RelationStrength.WEAK),
WordSense("avant", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("de", RelationDirection.START, RelationStrength.STRONG),
]
VERB_Relation = [
# Start
WordSense("décoller", RelationDirection.START, RelationStrength.STRONG),
WordSense("passer", RelationDirection.START, RelationStrength.WEAK),
WordSense("être", RelationDirection.START, RelationStrength.STRONG),
# Destination
WordSense("arriver", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("aller", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("visiter", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("atterrir", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("découvrir", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("voyager", RelationDirection.DEST, RelationStrength.STRONG),
WordSense("rendre", RelationDirection.DEST, RelationStrength.STRONG)
]
def analyseRequest(self, request):
print(f"Request: {request}")
nlp = spacy.load("fr_core_news_lg")
doc = nlp(request)
locations = []
fullTrip = []
# Extract locations
for i in doc.ents:
if i.label_ == 'LOC' or i.label_ == 'GPE':
locations.append(i.text)
print(f"Locations found: {locations}")
if len(locations) <= 1:
print("Cannot parse request or invalid request.")
else:
# Get token for each locations
tokens = np.zeros(len(locations), dtype=object)
for i in range(len(locations)):
tokenFound = False
# Priority: PROPN
for token in doc:
if token.pos == PROPN:
isUsable = True
for tokenSelected in tokens:
if type(tokenSelected) != int and tokenSelected == token:
isUsable = False
if isUsable:
if token.text in locations[i]:
tokens[i] = token
tokenFound = True
break
# Secondary: NOUN
if tokenFound == False:
for token in doc:
if token.pos == NOUN:
isUsable = True
for tokenSelected in tokens:
if type(tokenSelected) != int and tokenSelected == token:
isUsable = False
if isUsable:
if token.text in locations[i]:
tokens[i] = token
tokenFound = True
break
# Failsafe: any (e.g in "Je veux faire Paris Gare De l'Est Marseille": Marseille is parsed as a VERB)
if tokenFound == False:
for token in doc:
isUsable = True
for tokenSelected in tokens:
if type(tokenSelected) != int and tokenSelected == token:
isUsable = False
if isUsable:
if token.text in locations[i]:
tokens[i] = token
tokenFound = True
break
# None
if tokenFound == False:
print(f"Localization {locations[i]} not found")
tokens[i] = None
# Remove None tokens
tmpTokens = tokens
tokens = []
for token in tmpTokens:
if token != None:
tokens.append(token)
# Weight tokens to prepare ordering
weighedTokens = np.zeros(len(tokens), dtype=object)
for i in range(len(tokens)):
print(f"Token #{i + 1} : {tokens[i].lemma_}")
foundWeight = []
parent = tokens[i].head
# CCONJ
for child in tokens[i].children:
if child.pos == CCONJ:
for ref in self.CCONJ_Relation:
if ref.word == child.lemma_:
print(
f"Found CCONJ: {ref.word} - {ref.strength.name}")
foundWeight.append(ref)
break
# NOUN
if len(foundWeight) <= 0: # Not prioritary over CCONJ
if parent.pos == NOUN:
for ref in self.NOUN_Relation:
if ref.word == parent.lemma_:
print(
f"Found NOUN: {ref.word} - {ref.strength.name}")
foundWeight.append(ref)
break
# ADP_FIXED
if len(foundWeight) <= 0: # Not prioritary over CCONJ and NOUN
for child in tokens[i].children:
if child.pos == ADP:
for subChild in child.children:
if subChild.dep_ == 'fixed':
for ref in self.ADP_FIXED_Relation:
if ref.word == child.lemma_ and ref.fixedWord == subChild.lemma_:
print(
f"Found ADP_FIXED: {ref.word} {ref.fixedWord}")
foundWeight.append(ref)
break
# ADP
if len(foundWeight) <= 0: # Not prioritary over CCONJ, NOUN and ADP_FIXED
for child in tokens[i].children:
for ref in self.ADP_Relation:
if ref.word == child.lemma_:
print(
f"Found ADP: {ref.word} - {ref.strength.name}")
foundWeight.append(ref)
break
# VERB_MARK
if len(foundWeight) <= 1: # Prioritary over CCONJ, NOUN and ADP_FIXED
if parent.pos == VERB:
for child in parent.children:
if child.dep_ == 'mark' and child.pos == ADP:
for ref in self.VERB_MARK_Relation:
if ref.word == child.lemma_:
print(
f"Found VERB_MARK: {ref.word} - {ref.strength.name}")
foundWeight.append(ref)
break
# VERB
if len(foundWeight) <= 1: # Prioritary over CCONJ, NOUN, ADP_FIXED and VERB_MARK
for ref in self.VERB_Relation:
if ref.word == parent.lemma_:
print(
f"Found VERB: {ref.word} - {ref.strength.name}")
foundWeight.append(ref)
break
# Default - Keep position
if len(foundWeight) == 0: # Fallback
print(f"Using default weight")
foundWeight.append(self.WordSense(
"default", RelationDirection.DEST, RelationStrength.WEAK))
# Extract first strong relation
selectedWeight = None
for j in range(len(foundWeight)):
if foundWeight[j].strength == RelationStrength.STRONG:
selectedWeight = foundWeight[j]
break
if selectedWeight is None:
selectedWeight = foundWeight[0]
print(f"Using: {selectedWeight.word}")
print("---------------")
weighedTokens[i] = (tokens[i], selectedWeight)
# Order tokens
orderedTokens = []
# First pass for direction: START
numberOfStrongStrength = 0
for i in range(len(weighedTokens)):
token, weight = weighedTokens[i]
if weight.direction == RelationDirection.START:
if weight.strength == RelationStrength.STRONG:
orderedTokens.insert(numberOfStrongStrength, token)
numberOfStrongStrength = numberOfStrongStrength + 1
else:
orderedTokens.append(token)
# Second pass for direction: DEST
numberOfStrongStrength = 0
for i in range(len(weighedTokens)):
token, weight = weighedTokens[i]
if weight.direction == RelationDirection.DEST:
if weight.strength == RelationStrength.STRONG:
orderedTokens.append(token)
numberOfStrongStrength = numberOfStrongStrength + 1
else:
if numberOfStrongStrength == 0:
orderedTokens.append(token)
else:
orderedTokens.insert(
len(orderedTokens) - numberOfStrongStrength, token)
# Populate full trip cities list
for token in orderedTokens:
fullTrip.append(token.text)
print(f"Result trip: {fullTrip}")
# DEBUG
# for token in doc:
# print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_, token.shape_, token.is_alpha, token.is_stop)
# displacy.serve(doc, style="dep")
return fullTrip
# TESTS
requests = [
("Je veux partir de Mulhouse et visiter Paris depuis Strasbourg", ["Mulhouse", "Strasbourg", "Paris"]),
("J'aimerais aller d'Orléans à Paris puis dans les Vosges", ["Orléans", "Paris", "Vosges"]),
("Je veux aller à Marseille à partir de Lyon", ["Lyon", "Marseille"]),
("Je veux visiter Paris en partant de Bordeaux et en passant par Nantes", ["Bordeaux", "Nantes", "Paris"]),
("Je veux prendre le train à Mulhouse à destination de Strasbourg", ["Mulhouse", "Strasbourg"]),
("Strasbourg en provenance de Mulhouse", ["Mulhouse", "Strasbourg"]),
("Je veux aller de Mulhouse à Strasbourg", ["Mulhouse", "Strasbourg"]),
("Je veux faire Paris Gare De l'est Marseille", ["Paris", "Marseille"]),
("Je veux aller à Paris après être allé à Mulhouse depuis Lyon", ["Lyon", "Mulhouse", "Paris"]),
("Paris-Marseille", ["Paris", "Marseille"]),
("Je suis à Paris et je veux aller à Strasbourg avec mon amis Frank que je récupère à Mulhouse", ["Paris", "Mulhouse", "Strasbourg"]),
("Je veux voyager de Mulhouse pour visiter Paris en passant par Strasbourg", ["Mulhouse", "Strasbourg", "Paris"]),
("Je veux partir de Mulhouse et visiter Paris depuis la destination de Strasbourg", ["Mulhouse", "Strasbourg", "Paris"]),
("Je veux prendre le train de Mulhouse à destination de Colmar et Strasbourg", ["Mulhouse", "Colmar", "Strasbourg"]),
("Je souhaite une pizza napolitaine à Rome", []),
("Je veux aller à Lyon", [])
]
def testNLP(self):
for index in range(len(self.requests)):
sentence, expectedResult = self.requests[index]
result = self.analyseRequest(sentence)
print(
f"\n\n\n*************************** # {index} ***************************")
print(f"result: {result}")
print(f"exprected: {expectedResult}")
print(
"*****************************************************************\n\n\n")
| true
|
47ae0acd069fbc2fe67f091490d697ad32f70356
|
Python
|
ZDawang/leetcode
|
/462_Minimum_Moves_to_Equal_Array_Elements_II.py
|
UTF-8
| 3,921
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#author : zhangdawang
#data: 2018-1
#difficulty degree:
#problem: 462_Minimum_Moves_to_Equal_Array_Elements_II.py
#time_complecity:
#space_complecity:
#beats:
import heapq
class Solution(object):
#首先我们需要找到那个最终相等的数。
#考虑1个数的情况,肯定是它自身。
#考虑两个数的情况,若两个数为a与b,且a<b,则分析可以知道,相等的数取在
#a与b之间,所需要的步数是相同的,都为(b-c)+(c-a) = b-a
#所以对于两个数来说,那个相等的数的取值空间为[a, b]
#对于一个数组来说,我们将最大数与最小数拿出来作为一对,则那个相同的数的取值范围为[最小数,最大数]。
#将次大数与次小数拿出来作为一对,则那个相同的数的取值范围为[次小数,次大数],因为
#[次小数,次大数]与[最小数,最大数]的交集仍为[次小数,次大数],所以取值范围为[次小数,次大数]
#如此下去。。。
#若数组长度为奇数,则最后只剩下一个数(中位数),因为1个数的情况就是它自身,所以对于数组来说,最终相等的数就是中位数
#若数组长度为偶数,则最后只剩下两个数(两个中位数),则最终的取值范围为[较小中位数,较大中位数](包含两个中位数)。
#所以题目最终变为无序数组寻找中位数。
#有3种方法:
#1.直接排序寻找中位数。O(nlogn)
#2.维护一个大小为n//2的堆。最终堆的最小值为中位数。(O(nlogn))
#3.使用快排,寻找中位数。(O(n)-O(n2))
#排序直接找中位数
def minMoves2(self, nums):
m = sorted(nums)[len(nums) // 2]
return sum(abs(num - m) for num in nums)
#维护一个最小堆来寻找中位数。
#首先将前半部分加入堆,然后将后半部分依次加入堆中。
#若加入的数比堆的最小值大,则加入,且堆弹出一个最小值维护长度。
#若比堆的最小值小,则不加入。
def minMoves3(self, nums):
n = len(nums)
heap = nums[:(n//2+1)]
heapq.heapify(heap)
for i in range(n//2+1, n):
if nums[i] <= heap[0]:
continue
heapq.heappush(heap, nums[i])
heapq.heappop(heap)
#最终的heap[0],对于奇数长度的数组来说是中位数
#对于偶数长度的数组来说,是两个中位数中较大的那个。
return sum(abs(num - heap[0]) for num in nums)
#快排寻找中位数。
#快排的思想是寻找一个轴值,把比轴值小的值放在左边,把比轴值大的值放在右边。
#因此可以递归寻找中位数。
#根据轴值的位置,不断选择轴值左边或者右边的数组。最终轴值的位置在n//2处即可。
#TLE.......,可能是有些例子偏O(N2)了吧。。。
def minMoves4(self, nums):
def partion(nums, l, r):
pivot = nums[l]
i, j = l, r
while i < j:
#右侧扫描,将较小值放到i处
while i < j and nums[j] >= pivot:
j -= 1
nums[i] = nums[j]
#左侧扫描,将较大值放到j处
while i < j and nums[i] <= pivot:
i += 1
nums[j] = nums[i]
#轴值放在最终位置。
nums[i] = pivot
#判断递归哪一部分。
if i < len(nums)//2:
return partion(nums, i + 1, r)
elif i > len(nums)//2:
return partion(nums, l, i - 1)
else:
return nums[i]
m = partion(nums, 0, len(nums) - 1)
return sum(abs(num - m) for num in nums)
nums = [1, 2, 3]
solute = Solution()
res = solute.minMoves4(nums)
| true
|
a6502639bb77987d2a9d68aa40764d6a16a8b4aa
|
Python
|
AshBringer47/kpi_labs
|
/Lab 9/Lab 9.py
|
UTF-8
| 81
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
array = input("Enter the string here: ").split()
array.sort(key=len)
print(array)
| true
|
d4fa1be17098909fb72be30e436d66e6712ff0b1
|
Python
|
SafonovMikhail/python_000577
|
/001113StepikPyGEK/StepikPyGEK001113сh03p01st07C01_20200408.py
|
UTF-8
| 65
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
x_1 = float(input())
x_2 = float(input())
print(abs(x_1 - x_2))
| true
|
5d27f539c00628d37fd12dcf2d24d9a31caa7724
|
Python
|
monaghrp/600ocwHW2
|
/ps2_hangman.py
|
UTF-8
| 3,096
| 4.25
| 4
|
[] |
no_license
|
# 6.00 Problem Set 3
#
# Hangman
#
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
def concatenateletters(str_input):
##concatenate all letters without spaces or other characters
out_str = ''
for i in xrange(0,len(str_input)):
out_str+=str_input[i]
return out_str
# end of helper code
# -----------------------------------
# actually load the dictionary of words and point to it with
# the wordlist variable so that it can be accessed from anywhere
# in the program
wordlist = load_words()
letters=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
# your code begins here!
##initialize variables
choose_word=choose_word(wordlist)
done=0
guesses=2*len(choose_word)
solution=['_',]
for i in xrange(1,len(choose_word)):
solution.append('_')
print 'Welcome to the game, Hangman!'
print 'I am thinking of a word that is ' + str(len(choose_word)) + ' letters long'
print '-------------'
##main program loop is
while done !=1:
##Prompt input
print 'You have guesses ' + str(guesses) +' left.'
print 'Available letters: ' + concatenateletters(letters)
guess=raw_input('Please guess a letter: ')
##Check if letter is available and remove from list
if letters.count(guess)>0:
letters.remove(guess)
##Check if letter is in word
if choose_word.count(guess)>0:
##loop through all letters to check for matches and replace in solution
for i in xrange(0,len(choose_word)):
if choose_word[i]==guess:
solution[i]=guess
print 'Good guess: ' + concatenateletters(solution)
print '-------------'
##check to see if solution matches chosen word
if choose_word==''.join(solution):
print 'Congratulations, you won!'
done=1
##if not in word decrease guesses
else:
print 'That letter is not in my word: ' + concatenateletters(solution)
print '-------------'
guesses -=1
else:
print 'That letter is not available. Please enter another'
print '-------------'
if guesses==0:
##Exit main loop after the user has run out of guesses
done=1
| true
|
deb45e83b718d14f6773827c80a4ad9f242fcc23
|
Python
|
juliaguida/learning_python
|
/week2/sum_multiply.py
|
UTF-8
| 307
| 4.28125
| 4
|
[] |
no_license
|
# Write a program that takes two inputs from the user and display the sum and multiplication result of the two numbers.
numb_one = int(input(' Please enter a number: '))
numb_two = int(input(' Please enter another number: '))
total = numb_one + numb_two
print(total)
mult = numb_one * numb_two
print(mult)
| true
|
be72091129be8d7022c43f8e3aa4ffb878c53bf1
|
Python
|
yuyeh1212/University
|
/python/34.py
|
UTF-8
| 120
| 3.84375
| 4
|
[] |
no_license
|
# 輸入數字相加
str = input("請輸入數字:")
num_list = [int(num) for num in str.split()]
print(sum(num_list))
| true
|
d821400aa14f7310ffdff67850ff2ce60f283aba
|
Python
|
PanosRCng/just_war
|
/justwar/data/Room.py
|
UTF-8
| 810
| 3.015625
| 3
|
[] |
no_license
|
import pygame
from justwar.data.Config import Config
from justwar.data.GameElement import GameElement
from justwar.data.Background import Background
from justwar.data.Maze import Maze
from justwar.data.Gate import Gate
from justwar.data.Stone import Stone
gates = {}
stoneList = []
class Room(GameElement):
def __init__(self, pathWays):
GameElement.__init__(self)
self.Field = Background("field.png", (0,0))
stoneList[:] = []
gates.clear()
# gate mapping: 0->up, 1->right, 2->down, 3->left
for pathWay in pathWays:
gates[pathWay] = Gate(pathWay)
for i in range(0, Config.NUMBER_OF_STONES):
stoneList.append( Stone() )
def Show(self, surface):
self.Field.Show(surface)
for gate in gates:
gates[gate].Show(surface)
for stone in stoneList:
stone.Show(surface)
| true
|
8d6d9167feb45b84b0502b76d4838f917897c15b
|
Python
|
kgashok/GE_8151-unit-programs
|
/unit1/minList.py
|
UTF-8
| 1,019
| 4.0625
| 4
|
[] |
no_license
|
# find minimum of two numbers
# a and b are parameters''
def find_min(a, b):
if a < b:
return a
return b
print("Enter two values :")
a = int(input())
b = int(input())
print("Minimum number is ", find_min(a, b))
# find minimum of three numbers
# a, b and c are parameters
def min_of_three(a, b, c):
minVal = find_min(a, b)
if c < minVal:
return c
return minVal
print("Enter three numbers: ")
a = int(input())
b = int(input())
c = int(input())
print("Minimum number is ", min_of_three(a, b, c))
# find minimum of a list
def min_of_list(aList):
if not aList:
return None
minVal = aList[0]
for number in aList[1:]:
if number < minVal:
minVal = number
return minVal
myList = []
limit = int(input("Enter the limit: "))
print("Enter the elements:\n")
for i in range(limit):
element = int(input())
myList.append(element)
print("Minimum of list is ", min_of_list(myList))
| true
|
4ffcfc6231d712a5e9f698046fb246dbf4ae628b
|
Python
|
Ketupat-Development-Studios/lumos-api
|
/models/triggers/trigger.py
|
UTF-8
| 953
| 2.734375
| 3
|
[] |
no_license
|
from api.lumos_exception import LumosException
class Trigger:
CLOCK = 'clock'
WEATHER = 'weather'
TEMPERATURE = 'temperature'
def __init__(self, trigger_data):
self.id = trigger_data.get('id')
self.type = trigger_data.get('type')
self.data = trigger_data.get('data')
def create_trigger(trigger_data):
from models.triggers.clock_trigger import ClockTrigger
from models.triggers.weather_trigger import WeatherTrigger
from models.triggers.temperature_trigger import TemperatureTrigger
trigger = None
trigger_type = trigger_data.get('type')
if trigger_type == Trigger.CLOCK:
trigger = ClockTrigger(trigger_data)
elif trigger_type == Trigger.WEATHER:
trigger = WeatherTrigger(trigger_data)
elif trigger_type == Trigger.TEMPERATURE:
trigger = TemperatureTrigger(trigger_data)
else:
raise LumosException("invalid trigger type")
return trigger
| true
|
a68cc63050607bf817a285c46893db35732af736
|
Python
|
Waqar-107/Codeforces
|
/B-set/108B. Datatypes.py
|
UTF-8
| 230
| 3.28125
| 3
|
[] |
no_license
|
# from dust i have come, dust i will be
n = int(input())
a = list(map(int, input().split()))
a = set(a)
a = sorted(a)
for i in range(len(a) - 1):
if a[i] + a[i] > a[i + 1]:
print('YES')
exit(0)
print('NO')
| true
|
62776b9689acea3980582260db6ccbfb3b40b691
|
Python
|
SachinthaHG/VTG
|
/LandmarksCSV.py
|
UTF-8
| 699
| 2.953125
| 3
|
[] |
no_license
|
import csv
from Connector import Connector
class LandmarksCSV:
def CreateCSV(self):
writer=csv.writer(open("landmark_locations.csv",'a+'))
writer.writerow(['Landmark', 'Longitude', 'Latitiude'])
connection = Connector()
connection.makeConnection()
results = connection.retriveLandmarkLocation()
for i in range(len(results)):
data_row = []
data_row.append(results[i].Name)
data_row.append(results[i].location)
data_row.append(results[i].location2)
writer.writerow(data_row)
connection.closeConnection()
a = LandmarksCSV()
a.CreateCSV()
| true
|
1f24125dab9f40de8969bf2083ba344610781372
|
Python
|
Jaydeep-07/Python-Automation
|
/Assignment13/DuplicateFileRemoval.py
|
UTF-8
| 5,518
| 2.828125
| 3
|
[] |
no_license
|
import time
import os
import sys
import hashlib
import datetime
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import schedule
def MailSendWithAttachment(file1,receiver):
fromaddr = "jaydeepvpatil225@gmail.com"
toaddr =receiver
# instance of MIMEMultipart
msg = MIMEMultipart()
# storing the senders email address
msg['From'] = fromaddr
# storing the receivers email address
msg['To'] = toaddr
# storing the subject
msg['Subject'] = "Duplicates Files"
# string to store the body of the mail
body = "Duplicate Files Detector"
# attach the body with the msg instance
msg.attach(MIMEText(body, 'plain'))
# open the file to be sent
filename = file1
attachment = open(filename, "rb")
# instance of MIMEBase and named as p
p = MIMEBase('application', 'octet-stream')
# To change the payload into encoded form
p.set_payload((attachment).read())
# encode into base64
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# attach the instance 'p' to instance 'msg'
msg.attach(p)
# creates SMTP session
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(fromaddr, "sender pswd")
# Converts the Multipart msg into a string
text = msg.as_string()
# sending the mail
s.sendmail(fromaddr, toaddr, text)
# terminating the session
print("mail Send ")
s.quit()
def hashFile(path, blocksize=1024):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def DeleteFiles(Dict1):
DuplicateFiles1 = []
DuplicatesFileCounter = 0
results = list(filter(lambda x: len(x) > 1, Dict1.values()))
if len(results) > 0:
for result in results:
icnt = 0
for subresult in result:
icnt += 1
if icnt >= 2:
DuplicatesFileCounter += 1
DuplicateFiles1.append(subresult)
print(subresult)
os.remove(subresult)
else:
print("No Duplicates Found")
return DuplicateFiles1,DuplicatesFileCounter
def DuplicateFiles(Directoryname):
dups = {}
for Folder, SubFolders, Files in os.walk(Directoryname):
print(Folder)
for file in Files:
path = os.path.join(Folder, file)
File_hash = hashFile(path)
if File_hash in dups:
dups[File_hash].append(path)
else:
dups[File_hash] = [path]
return dups
def DuplicatesFilesWithMail(Dir,Receiver):
dir2 = "Marvellous"
filename = os.path.join(dir2, "Log%s.txt" % datetime.datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p"))
line = "_" * 60
fobj = open(filename, "w")
fobj.write(line + "\n")
fobj.write("Starting Time Of File Scanning at :")
fobj.write(time.ctime())
dups = DuplicateFiles(Dir)
print("___________________________________________________________")
DuplicatesFileNames,deletecounter = DeleteFiles(dups)
fobj.write("\n Total Duplicates Files Are :" + str(deletecounter))
fobj.write("\nDeleted Duplicate Files Are !!!!!!")
fobj.write("\n" + line)
if len(DuplicatesFileNames) > 0:
for i in DuplicatesFileNames:
fobj.write("\n" + i + "\n")
else:
fobj.write("\nNo Duplicates File Found")
fobj.close()
MailSendWithAttachment(filename,Receiver)
def main():
print("This Script Is Used For Delete Duplicates File from The Directory And Send Mail Of the Deleted Duplicate "
"Files")
if (len(sys.argv) > 4):
print("Invalid Number Of Arguments ")
print("Please use -h or -u for help and usage ");
exit()
if sys.argv[1].lower() == "-h":
print("This Script Is Used For Delete Duplicates File from The Directory And Send Mail Of the Deleted "
"Duplicate Files ");
print("Example :")
print("python Filename Folder1 Timeinterval EmailOfReceiver")
print("python DuplicateFileRemoval.py Demo 5 abc@gmail.com")
print("DuplicateFileRemoval.py : Name Of The file")
print("Demo : Name of the Folder ")
print("5 : time interval in minutes")
print("abc@gmail.com : Email Id OF the Receiver to Send the Mail OF Deleted File")
exit()
if sys.argv[1].lower() == "-u":
print("This Script Is Used For Delete Duplicates File from The Directory And Send Mail Of the Deleted "
"Duplicate Files ")
exit()
Directoryname = sys.argv[1]
flag = os.path.isabs(Directoryname)
if flag == False:
Directoryname = os.path.abspath(Directoryname)
isDir = os.path.isfile(Directoryname)
if isDir == True:
print("It Is File Please Enter Directory Name !!!")
exit()
DirExits = os.path.exists(Directoryname)
if DirExits == False:
print("Directory ", Directoryname, "Does Not Exits ")
exit()
schedule.every(int(sys.argv[2])).minutes.do(DuplicatesFilesWithMail,Dir=Directoryname,Receiver=sys.argv[3])
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
main()
| true
|
e5f97ddce5664684e6d405ba3078b7033a39e913
|
Python
|
LikeStrangersDo/Gongda_Python_summary
|
/5.0_object_oritented_programming.py
|
UTF-8
| 3,698
| 4.40625
| 4
|
[
"MIT"
] |
permissive
|
#######################################################################################################################################
# Object-oriented programming (OOP) is a different coding style from functional programming, which you probably started with.
# OOP can be very useful if your when you have a fixed set of operations on things, and as your code evolves, you primarily add new things.
# This can be accomplished by adding new classes which implement existing methods, and the existing classes are left alone.
# There is a discussion on OOP vs functional programming: https://medium.com/@shaistha24/functional-programming-vs-object-oriented-programming-oop-which-is-better-82172e53a526
#######################################################################################################################################
# Here I just provide some simple examples of "class", "object", "__init__", "methods" in Python
# By defining a class of objects, you can save infomation as different attributes of this object
# so that later you can access the data fields immediately
# For applications of OOP in research, you can check my codes for TROPOMI datasest.
# Example 1
# Here we define a class of objects, named "User". Then we can save some information of interest as the attributes.
class User:
def __init__(self,full_name,birthday,language):
''' create an object named "User", create some fields (attributes) for it'''
self.name = full_name
name_pieces = full_name.split(" ")
self.first_name = name_pieces[0]
self.last_name = name_pieces[-1]
self.birthday = birthday
self.favourite_language = language
# Now you can build some functions to process data fields associated with this object
# The results can be saved as new attributes of this object
# Or you can "call" the function to access the corresponding results
import datetime
def age(self):
'''build a function to calculate the age of this user'''
today = datetime.date(2020,8,17)
yyyy = int(self.birthday[0:4])
mm = int(self.birthday[4:6])
dd = int(self.birthday[6:8])
dob = datetime.date(yyyy,mm,dd)
age_in_days = (today - dob).days
age_in_years = age_in_days/365
return age_in_years
# An example input
user = User("Michael Jordan", "19910101","Python")
# Check the results
print(user.name)
print(user.first_name)
print(user.last_name)
print(user.birthday)
print(user.favourite_language)
# you need to call this function to make it work (since you did not save "age" as an attibute)
print(user.age())
# Example 2
# Here we define a class of objects, called "Rectangle". Then we perform some calculations based on its data fields (attributes).
# All functions rely on what has been already provided. For a nested function (e.g. "calculate cost"), it requires the inner functions (e.g. "get_area") to be recognized.
class Rectangle:
def __init__(self, length, breadth, unit_cost=0):
self.length = length
self.breadth = breadth
self.unit_cost = unit_cost
def get_perimeter(self):
return 2 * (self.length + self.breadth)
def get_area(self):
return self.length * self.breadth
def calculate_cost(self):
area = self.get_area()
return area * self.unit_cost
# breadth = 120 cm, length = 160 cm, 1 cm^2 = Rs 2000
r = Rectangle(160, 120, 2000)
print("Area of Rectangle: %s cm^2" % (r.get_area()))
print("Cost of rectangular field: Rs. %s " %(r.calculate_cost()))
# End
#######################################################################################################################################
| true
|
f0b1ecaf12f5e3d7655f16cc727196ef54944f27
|
Python
|
Nimrod-Galor/selfpy
|
/624.py
|
UTF-8
| 138
| 3.4375
| 3
|
[] |
no_license
|
def extend_list_x(list_x, list_y):
list_x = list_y + list_x
return list_x
x = [4, 5, 6]
y = [1, 2, 3]
print(extend_list_x(x, y))
| true
|
2e3e64fac69f8986f7a9bdf9223547b84d0f3ceb
|
Python
|
lizenghui1121/DS_algorithms
|
/leetcode 100/62.不同的路径.py
|
UTF-8
| 648
| 3.734375
| 4
|
[] |
no_license
|
"""
一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
@Author: Li Zenghui
@Date: 2020-06-30 14:47
"""
def uniquePaths(m, n):
dp = [[0 for i in range(n)] for j in range(m)]
for i in range(m):
dp[i][0] = 1
for j in range(n):
dp[0][j] = 1
for i in range(1, m):
for j in range(1, n):
dp[i][j] = dp[i - 1][j] + dp[i][j - 1]
return dp[m - 1][n - 1]
if __name__ == '__main__':
print(uniquePaths(3, 2))
| true
|
0c21646830756485e28a90e6faca269be24e87d7
|
Python
|
lnestor/ckt_tools
|
/ckt_tools/helpers/logger.py
|
UTF-8
| 683
| 3.78125
| 4
|
[] |
no_license
|
class Logger:
"""Generic logging class with varying logging levels.
This class has different modes that handle if something should be printed
to the screen. The following modes are:
Detailed Mode: prints human readable messages, otherwise prints csv
Debug Mode: prints useful messaging for debugging.
"""
def __init__(self, detailed, debug):
self.detailed = detailed
self.debug = debug
def log_detailed(self, text):
if self.detailed:
print(text)
def log_debug(self, text):
if self.debug:
print(text)
def log_terse(self, text):
if not self.detailed:
print(text)
| true
|
c55d93de1b87753634891ae0b5f0ed2f25c145f1
|
Python
|
miikko/Multi-Monitor-Window-Controller
|
/monitor_manager.py
|
UTF-8
| 766
| 3.015625
| 3
|
[] |
no_license
|
from win32 import win32api
from cursor_tracker import get_cursor_pos
def get_monitors():
return win32api.EnumDisplayMonitors()
def cursor_is_in_monitor(monitor):
cursor_x_pos, cursor_y_pos = get_cursor_pos()
(
monitor_x_start,
monitor_y_start,
monitor_x_end,
monitor_y_end,
) = monitor[-1:][0]
return (
monitor_x_start <= cursor_x_pos < monitor_x_end
and monitor_y_start <= cursor_y_pos < monitor_y_end
)
def get_active_monitor_name():
monitors = get_monitors()
for monitor_number, monitor in enumerate(monitors, 1):
if cursor_is_in_monitor(monitor):
return f"Monitor {monitor_number}"
raise Exception("Cursor was not inside any of the detected monitors")
| true
|
98813cd15f49040417a53ecc2c291d0862ec3873
|
Python
|
Mountan327/Mountan327.github.io
|
/try.py
|
UTF-8
| 734
| 3.1875
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
x=np.linspace(0,5,20)
x1=np.linspace(0,10,10)
X=[2,1.5,1,0.5]
y1=[]
for i in range(32):
y1.append(X[i%4])
fig=plt.figure('16010140048')
plt.subplot(321)
plt.stem(list(y1))
plt.grid(True)
plt.title('zhouqi')
plt.subplot(322)
y2=2*np.sin(0.5*np.pi*x+2)
plt.title('zhengxian')
plt.grid(True)
plt.stem(x,y2)
plt.subplot(323)
y3=[1,0,0,0,0,0,0,0,0]
plt.stem(y3)
plt.grid(True)
plt.title('chongji')
plt.subplot(324)
y4=[0,0,0,1,1,1,1]
plt.stem(y4)
plt.grid(True)
plt.title('jieyue')
plt.subplot(325)
A=2
a=0.6
y5=A*a**x1
plt.grid(True)
plt.title('shizhishu')
plt.stem(x1,y5)
plt.subplot(326)
y5=[8,3.4,1.8,5.6,2.9,0.7]
plt.grid(True)
plt.title('renyi')
plt.stem(y5)
plt.show()
| true
|
517c46ef24b9051e278118f60e1dc389a8853ff6
|
Python
|
Teoroo-CMC/PiNN
|
/tests/test_bpnn.py
|
UTF-8
| 4,404
| 2.546875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
"""unit tests for bpnn implementation"""
import pytest
import numpy as np
import tensorflow as tf
def _manual_sfs():
lambd = 1.0
zeta = 1.0
eta = 0.01
Rc = 12.0
Rs = 0.5
a = np.array([0., 0., 0.])
b = np.array([1., 0., 0.])
c = np.array([1., 1., 0.])
ab = b-a
ac = c-a
bc = c-b
Rab = np.linalg.norm(ab)
Rac = np.linalg.norm(ac)
Rbc = np.linalg.norm(bc)
cosabc = np.dot(ab, ac)/(Rab*Rac)
def fcut(R, Rcut):
return 0.5*(np.cos(np.pi*R/Rcut)+1)
abc = np.arccos(cosabc) * 180/np.pi
g2_a = np.exp(-eta*(Rab-Rs))*fcut(Rab, Rc) +\
np.exp(-eta*(Rac-Rs))*fcut(Rac, Rc)
g3_a = 2**(1-zeta) *\
(1+lambd*cosabc)**zeta*np.exp(-eta*(Rab**2+Rac**2+Rbc**2)) *\
fcut(Rab, Rc)*fcut(Rac, Rc)*fcut(Rbc, Rc)
g4_a = 2**(1-zeta) *\
(1+lambd*cosabc)**zeta*np.exp(-eta*(Rab**2+Rac**2)) *\
fcut(Rab, Rc)*fcut(Rac, Rc)
return g2_a, g3_a, g4_a
@pytest.mark.forked
def test_sfs():
# test the BP symmetry functions against manual calculations
# units in the original runner format is Bohr
from helpers import get_trivial_runner_ds
from pinn.networks.bpnn import BPNN
from pinn.io import sparse_batch
bohr2ang = 0.5291772109
dataset = get_trivial_runner_ds().apply(sparse_batch(1))
sf_spec = [
{'type': 'G2', 'i': 1, 'j': 'ALL',
'eta': [0.01/(bohr2ang**2)], 'Rs': [0.5*bohr2ang]},
{'type': 'G3', 'i': 1, 'j': 8, 'k': 1,
'eta': [0.01/(bohr2ang**2)], 'lambd': [1.0], 'zeta': [1.0]},
{'type': 'G4', 'i': 1, 'j': 8, 'k': 1,
'eta': [0.01/(bohr2ang**2)], 'lambd': [1.0], 'zeta': [1.0]}
]
nn_spec = {8: [35, 35], 1: [35, 35]}
tensors = next(iter(dataset))
bpnn = BPNN(sf_spec=sf_spec, nn_spec=nn_spec, rc=12*bohr2ang)
tensors = bpnn.preprocess(tensors)
g2_a, g3_a, g4_a = _manual_sfs()
assert np.allclose(tensors['fp_0'][0], g2_a, rtol=5e-3)
assert np.allclose(tensors['fp_1'][0], g3_a, rtol=5e-3)
assert np.allclose(tensors['fp_2'][0], g4_a, rtol=5e-3)
@pytest.mark.forked
def test_jacob_bpnn():
"""Check BPNN jacobian calculation"""
from ase.collections import g2
from pinn.networks.bpnn import BPNN
# Define the test case
sf_spec = [
{'type': 'G2', 'i': 1, 'j': 1, 'Rs': [1., 2.], 'eta': [0.1, 0.5]},
{'type': 'G2', 'i': 8, 'j': 1, 'Rs': [1., 2.], 'eta': [0.1, 0.5]},
{'type': 'G2', 'i': "ALL", 'j': "ALL",
'Rs': [1., 2.], 'eta': [0.1, 0.5]},
{'type': 'G2', 'i': "ALL", 'j': 1, 'Rs': [1.], 'eta': [0.01]},
{'type': 'G3', 'i': 1, 'j': 8, 'lambd': [
0.5, 1.], 'zeta': [1., 2.], 'eta': [0.1, 0.2]},
{'type': 'G3', 'i': "ALL", 'j': 8, 'lambd': [
0.5, 1.], 'zeta': [1., 2.], 'eta': [0.1, 0.2]},
{'type': 'G4', 'i': 8, 'j': 8, 'lambd': [
0.5, 1.], 'zeta': [1., 2.], 'eta': [0.1, 0.2]},
{'type': 'G4', 'i': 8, 'j': 8, 'k': 1, 'lambd': [
0.5, 1.], 'zeta': [1., 2.], 'eta': [0.1, 0.2]}
]
nn_spec = {8: [32, 32], 1: [32, 32]}
water = g2['H2O']
water.set_cell([3.1, 3.1, 3.1])
water.set_pbc(True)
water = water.repeat([2, 2, 2])
pos = water.get_positions()
water.set_positions(pos+np.random.uniform(0, 0.2, pos.shape))
tensors = {
"coord": tf.constant(water.positions, tf.float32),
"ind_1": tf.zeros_like(water.numbers[:, np.newaxis], tf.int32),
"elems": tf.constant(water.numbers, tf.int32),
"cell": tf.constant(water.cell[np.newaxis, :, :], tf.float32)
}
bpnn = BPNN(sf_spec, nn_spec)
with tf.GradientTape() as g:
g.watch(tensors['coord'])
tf.random.set_seed(0)
en = bpnn(tensors)
frc_jacob = - g.gradient(en, tensors['coord'])
tensors = {
"coord": tf.constant(water.positions, tf.float32),
"ind_1": tf.zeros_like(water.numbers[:, np.newaxis], tf.int32),
"elems": tf.constant(water.numbers, tf.int32),
"cell": tf.constant(water.cell[np.newaxis, :, :], tf.float32)
}
bpnn = BPNN(sf_spec, nn_spec, use_jacobian=False)
with tf.GradientTape() as g:
g.watch(tensors['coord'])
tf.random.set_seed(0)
en = bpnn(tensors)
frc_no_jacob = - g.gradient(en, tensors['coord'])
assert np.allclose(frc_jacob, frc_no_jacob, rtol=5e-3)
| true
|
a2fe3f9371d0002f4db548da1f1d71a9bdbe12e9
|
Python
|
yidaiweiren/Python
|
/study/day8/全局变量.py
|
UTF-8
| 178
| 3
| 3
|
[] |
no_license
|
#全局变量
#定义一个全局变量
a=100
def test1():
print ("test1_a=%d"%a)
def test2():
print ("test2_a=%d"%a)
test1()
test2()
'''
test1_a=100
test2_a=100
'''
| true
|
70fdc9e4a5390976b033e832ab5a5e8bfab0f579
|
Python
|
hyschive/gamer-fork
|
/example/test_problem/Hydro/ClusterMerger/gamer_cluster_ics.py
|
UTF-8
| 6,054
| 2.828125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import cluster_generator as cg
import unyt as u
from numpy.random import RandomState
import numpy as np
# Note that cluster_generator does not use unyt units for speed and simplicity,
# so mass units are Msun, length units are kpc, and time units are Myr
# Put the two clusters at a redshift z = 0.1
z = 0.1
# M200 for both clusters
M200_1 = 6.0e14 # in Msun
M200_2 = 2.0e14 # in Msun
conc = 4.0 # A good approximation to the concentration parameter for both clusters
# Find r200 for both clusters
r200_1 = cg.find_overdensity_radius(M200_1, 200.0, z=z)
r200_2 = cg.find_overdensity_radius(M200_2, 200.0, z=z)
# Scale radii to be used for the sNFW profiles
a1 = r200_1/conc
a2 = r200_2/conc
# For the total mass density profile, we will use a "super-NFW" profile, which
# is very similar to the NFW profile but falls off slightly faster (Lilley, E. J.,
# Wyn Evans, N., & Sanders, J.L. 2018, MNRAS)
# Determine the total mass for each sNFW profile
M1 = cg.snfw_total_mass(M200_1, r200_1, a1)
M2 = cg.snfw_total_mass(M200_2, r200_2, a2)
# Use this total mass to construct total mass profiles for each cluster
Mt1 = cg.snfw_mass_profile(M1, a1)
Mt2 = cg.snfw_mass_profile(M2, a2)
# Use the total mass profiles to determine r500/M500 and r2500/M2500 for
# each cluster
r500_1, M500_1 = cg.find_radius_mass(Mt1, z=z, delta=500.0)
r2500_1, M2500_1 = cg.find_radius_mass(Mt1, z=z, delta=2500.0)
r500_2, M500_2 = cg.find_radius_mass(Mt2, z=z, delta=500.0)
r2500_2, M2500_2 = cg.find_radius_mass(Mt2, z=z, delta=2500.0)
# Total mass density profiles for each cluster
rhot1 = cg.snfw_density_profile(M1, a1)
rhot2 = cg.snfw_density_profile(M2, a2)
# Sprinkle some stars in--2% of the total mass for each cluster
rhos1 = 0.02*rhot1
rhos2 = 0.02*rhot2
# Find the gas mass fraction within R500 (using the relationship between
# M500 and fgas from Vikhlinin, A., et al. 2009, ApJ, 692, 1033
f_g1 = cg.f_gas(M500_1)
f_g2 = cg.f_gas(M500_2)
# This sets the gas density profile using the functional form from Vikhlinin, A.,
# Kravtsov, A., Forman, W., et al. 2006, ApJ, 640, 691 for the first cluster. We
# set the scale density to 1.0 first and will rescale it in the next line by the
# gas mass within r500
rhog1 = cg.vikhlinin_density_profile(1.0, 0.2*r2500_1, 0.67*r200_1, 1.0, 0.67, 3.0)
rhog1 = cg.rescale_profile_by_mass(rhog1, f_g1*M500_1, r500_1)
# Same as above for the second cluster
rhog2 = cg.vikhlinin_density_profile(1.0, 0.2*r2500_2, 0.67*r200_2, 1.0, 0.67, 3.0)
rhog2 = cg.rescale_profile_by_mass(rhog2, f_g2*M500_2, r500_2)
# This is the plasma beta parameter for the ratio of the thermal pressure to the
# magnetic pressure
beta = 100.0
# This sets up the profiles for the first cluster assuming hydrostatic equilibrium,
# taking the gas density, total mass density, and stellar density as input
hse1 = cg.ClusterModel.from_dens_and_tden(0.1, 20000.0, rhog1, rhot1,
stellar_density=rhos1)
# This sets a radial magnetic field strength profile using the beta parameter and
# the pressure in the profile, assuming p_B = B^2/s (thus gaussian=False)
hse1.set_magnetic_field_from_beta(beta, gaussian=False)
# These lines are the same as above for the second cluster
hse2 = cg.ClusterModel.from_dens_and_tden(0.1, 20000.0, rhog2, rhot2,
stellar_density=rhos2)
hse2.set_magnetic_field_from_beta(beta, gaussian=False)
# Write the profiles for each cluster to files
hse1.write_model_to_h5("profile1.h5", overwrite=True)
hse2.write_model_to_h5("profile2.h5", overwrite=True)
# Set a random number generator for the generation of the magnetic field
# vector potential in 3D
prng = RandomState(24)
# This is the width of the GAMER simulation box and its center
w = 15000.0 # in kpc
center = np.array([0.5*w]*3)
# This determines the centers of the clusters, assuming a distance of
# 3 Mpc and zero impact parameter, centered on the box center
d = 3000.0 # in kpc
b = 0.0 # in kpc
center1, center2 = cg.compute_centers_for_binary(center, d, b)
# This sets up a 3D magnetic vector potential which GAMER will take the curl
# of on the AMR grid to get the initial B-field. It is a tangled field which
# uses a Kolmogorov spectrum with a large-scale cutoff of 500 kpc, a
# small-scale cutoff of 10 kpc, and is proportional on average to the pressure
# everywhere (given by the magnetic field profile of the clusters from above).
# Outside of r_max = 5000.0 kpc from each cluster center the average B-field
# is constant
left_edge = center-0.5*w
right_edge = center+0.5*w
dims = (256,)*3
bfield = cg.RadialRandomMagneticVectorPotential(left_edge, right_edge, dims,
10.0, 500.0, center1,
"profile1.h5", ctr2=center2,
profile2="profile2.h5", r_max=5000.0)
# Write the 3D vector potential to the B_IC file
bfield.write_to_h5("B_IC", overwrite=True, length_unit="Mpc",
field_unit="sqrt(1e14*Msun/Mpc**3)*Mpc/(10*Gyr)")
# We now set up the velocities of the two clusters. Assume 1500 km/s
# relative velocity, and then use the M200 of the two clusters to
# set velocity vectors in roughly the CM frame. The velocity is in
# the x-direction only
velocity = (1500.0*u.km/u.s).to_value("kpc/Myr")
velocity1 = np.array([velocity*M200_2/(M200_1+M200_2), 0.0, 0.0])
velocity2 = np.array([-velocity*M200_1/(M200_1+M200_2), 0.0, 0.0])
# Now we set up the cluster initial conditions. use 2e6 DM particles,
# 4e4 star particles. At r_max = 5000.0 kpc, the profiles of each cluster
# are constant
num_particles = {"dm": 2_000_000, "star": 40_000}
ics = cg.ClusterICs("1to3_b0.0", 2, ["profile1.h5", "profile2.h5"],
[center1, center2], [velocity1, velocity2],
num_particles=num_particles, mag_file="B_IC", r_max=5000.0)
# This writes the GAMER-specific IC files that are needed, generates
# the particles, and prints out the contents of the Input__TestProb
# file which should be used
cg.setup_gamer_ics(ics)
| true
|
07c08d4aad0571b64756c86d855fd6e343f5f6aa
|
Python
|
vug/coding-moding
|
/problems/uva/272_TEX_Quotes/main2.py
|
UTF-8
| 262
| 3.21875
| 3
|
[] |
no_license
|
import sys
if __name__ == "__main__":
is_left = True
for char in sys.stdin.read():
if char != '"':
out = char
else:
out = "``" if is_left else "''"
is_left = not is_left
sys.stdout.write(out)
| true
|
c7a33419fe9f6ec9b39e4ee311ed4bd0e18cb25a
|
Python
|
bus1029/HackerRank
|
/Interview Preparation Kit/Sorting/Sorting_MergeSort.py
|
UTF-8
| 1,675
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/python3
import math
import os
import random
import re
import sys
def mergeSort(a):
if len(a) > 1:
mid = len(a) // 2
lx, rx = a[:mid], a[mid:]
mergeSort(lx)
mergeSort(rx)
li, ri, i = 0, 0, 0
while li < len(lx) and ri < len(rx):
# 왼쪽 Array의 값이 오른쪽보다 작다면
if lx[li] < rx[ri]:
a[i] = lx[li]
li += 1
# 그 반대라면
else:
a[i] = rx[ri]
ri += 1
i += 1
# 위 While문을 나왔다면, lx나 rx 둘 중 하나는 끝까지 도달
# 도달하지 못한 Array를 x에 붙여줌
a[i:] = lx[li:] if li != len(lx) else rx[ri:]
# Complete the countSwaps function below.
def countSwaps(a):
"""
1. Number of swaps
2. First Element
3. Last Element
"""
"""
폰 노이만이 개발했으며, 두 부분으로 쪼개는 작업을 재귀적으로 반복한 뒤,
쪼갠 순서의 반대로 작은 값부터 병합해나가는 분할 정복 알고리즘의 일종이다.
두 부분으로 쪼개는데 O(logn)이고, 데이터 병합이 O(n)이므로, 정렬 상태와
무관하게 언제나 O(nlogn)이다. 데이터 크기만한 메모리가 더 필요한게 단점이다.
"""
swap_count = 0
# Using Shell Sort
mergeSort(a)
print("Array is sorted in " + str(swap_count) + " swaps.")
print("First Element:", a[0])
print("Last Element:", a[-1])
print("Array:", a)
if __name__ == '__main__':
n = int(input())
a = list(map(int, input().rstrip().split()))
countSwaps(a)
| true
|
212e0473e90a327dd8c86c8c2cd1c63705374bfa
|
Python
|
piotrkumala-zz/ProjektGrafy
|
/Shared/Generator.py
|
UTF-8
| 1,171
| 2.78125
| 3
|
[] |
no_license
|
from Shared.CheckSeries import CheckSeries
from Shared.Graph import Graph
def GenerateGraph(a: [], n: int):
if CheckSeries(a[:], n):
a.sort(reverse=True)
b = list(range(0, n))
g = Graph(n, 0.0, 1)
while True:
empty = True
negative = False
for x in a:
if x != 0:
empty = False
if x < 0:
negative = True
if empty:
return g
elif a[0] < 0 or a[0] >= n or negative:
return False
else:
i = 1
while i <= a[0]:
a[i] -= 1
g.addEdge(b[0], b[i])
i += 1
a[0] = 0
for i in range(n - 1):
for j in range(n - 1):
if a[j] < a[j + 1]:
a[j], a[j + 1] = a[j + 1], a[j]
b[j], b[j + 1] = b[j + 1], b[j]
else:
return False
def GenerateKGraph(n:int, k:int):
a = list()
for i in range(n):
a.append(k)
return GenerateGraph(a,n)
| true
|
c89aa066aee7f7e2e70b6ba3b9f4be942d2a3608
|
Python
|
JNWED/git_test
|
/automation/testcase/iOS/test06_search.py
|
UTF-8
| 4,955
| 2.59375
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
import sys
import time
import re
from common.basetestcase import BaseTestCase
sys.path.append('../..')
class SearchTest(BaseTestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
self.tester.logger.info("Device: %s Start case: %s" % (
self.tester.device.deviceName, self._testMethodName))
time.sleep(4)
def tearDown(self):
self.tester.addfailscreenshot(self._testMethodName)
self.tester.back_to_start()
'''
搜索功能正确
'''
def test_SearchTest_01_searchTag(self):
try:
self.tester.find_element_by_xpath_and_click('//XCUIElementTypeStaticText[1]')
self.tester.find_element_by_xpath_and_send_keys('//XCUIElementTypeStaticText[1]', "baby", timeout=20)
self.tester.find_element_by_xpath_and_click('// XCUIElementTypeButton[@name="Search"]')
self.tester.swipe_ios('down')
time.sleep(2)
list = self.tester.driver.find_elements_by_xpath('//XCUIElementTypeScrollView/XCUIElementTypeButton')
for element in list:
element.click()
time.sleep(2)
self.tester.logger.info("设备: %s 点击 %s" %((self.tester.device.deviceName),
(element.get_attribute("name"))))
self.tester.logger.info("左滑")
for j in range(1, len(list)):
self.tester.swipe_ios("left")
time.sleep(2)
if j == len(list):
break
except Exception:
self.fail("设备: %s 搜索功能异常" %(self.tester.device.deviceName))
'''
单曲搜索结果正确
'''
def test_SearchTest_02_song(self):
self.tester.find_element_by_xpath_and_click('//XCUIElementTypeStaticText[1]')
self.tester.find_element_by_xpath_and_send_keys('//XCUIElementTypeStaticText[1]', "baby", timeout=20)
self.tester.find_element_by_xpath_and_click('// XCUIElementTypeButton[@name="Search"]')
time.sleep(2)
self.assertTrue(self.tester.is_element_exist("Baby"),
"设备: %s 单曲结果错误" %(self.tester.device.deviceName))
self.tester.find_element_by_xpath_and_click('(//XCUIElementTypeStaticText[@name="Baby"])[1]')
time.sleep(2)
if self.tester.is_element_exist("暂停"):
self.tester.logger.info("设备: %s 歌曲成功播放" %(self.tester.device.deviceName))
self.tester.find_element_by_xpath_and_click('//XCUIElementTypeButton[@name="暂停"]')
if self.tester.is_element_exist("播放"):
self.tester.logger.info("设备: %s 歌曲成功暂停" %(self.tester.device.deviceName))
else:
self.fail(" 设备: %s 单曲未播放" %(self.tester.device.deviceName))
'''
歌手搜索结果正确
'''
def test_SearchTest_03_singer(self):
self.tester.find_element_by_xpath_and_click('//XCUIElementTypeStaticText[1]')
self.tester.find_element_by_xpath_and_send_keys('//XCUIElementTypeStaticText[1]', "baby", timeout=20)
self.tester.find_element_by_xpath_and_click('// XCUIElementTypeButton[@name="Search"]')
time.sleep(2)
'''点击进入"歌手"界面'''
self.tester.find_element_by_xpath_and_click('//XCUIElementTypeButton[@name="歌手 未选定"]')
singer = self.tester.driver.find_element_by_xpath('//XCUIElementTypeStaticText[@name="Justin Bieber (贾斯汀.比伯)"]')
self.assertIsNotNone(singer, "歌手搜索结果错误")
singer.click()
time.sleep(2)
singe_info = self.tester.driver.find_element_by_xpath('//XCUIElementTypeButton[@name="艺人信息 未选定"]')
self.assertIsNot(singe_info, "设备: %s 进入艺人信息界面失败" %(self.tester.device.deviceName))
'''
视频搜索结果正确
'''
def test_SearchTest_04_video(self):
try:
self.tester.find_element_by_xpath_and_click('//XCUIElementTypeStaticText[1]')
self.tester.find_element_by_xpath_and_send_keys('//XCUIElementTypeStaticText[1]', "baby", timeout=20)
self.tester.find_element_by_xpath_and_click('// XCUIElementTypeButton[@name="Search"]')
time.sleep(2)
'''点击进入"视频"界面'''
self.tester.find_element_by_xpath_and_click('//XCUIElementTypeButton[@name="视频 未选定"]')
time.sleep(2)
video = self.tester.driver.find_element_by_xpath('//XCUIElementTypeCell[1]')
self.assertIsNotNone(video, "设备: %s 视频搜索结果错误" %(self.tester.device.deviceName))
video.click()
time.sleep(3)
except Exception:
self.fail("设备: %s 搜索视频异常" %(self.tester.device.deviceName))
@classmethod
def tearDownClass(cls):
pass
| true
|
6e64220414843cde7453ff3c5f6fc6ba7d42ea1c
|
Python
|
OKoop/NURHandinKoop2
|
/Problem6/functions26.py
|
UTF-8
| 2,097
| 3.578125
| 4
|
[] |
no_license
|
import numpy as np
#This function scales a given array to a region where the logarithmic
#regression will work better. It calculates the mean and variance
#and then subtracts the mean from the array and divides by sqrt(sigma).
def Scalefeat(arr):
n = len(arr)
muxo = sum(arr)/n
sigxo = sum((arr-muxo)**2.)/n
arr = (arr - muxo)/(sigxo**(1./2.))
return arr
#The sigmoid activation function to use for the logistic regression.
def sigmoid(x):
return 1./(1. + np.exp(-x))
#This defines the standard cost function for lgistic regression.
def cost(labels, yhat):
loss = -(labels * np.log(yhat) + (1. - labels) * np.log(1. - yhat))
return sum(loss)/len(labels)
#This function returns the predicted values for each data-point, using a linear
#combination of the data-columns with the parameters theta as vector.
def ht(data, theta):
s = theta[0]
for i in range(data.shape[1]):
s += theta[i + 1] * data[:,i]
return sigmoid(s)
#A logistic regression algorithm with a first-order it takes the data, the
#known labels, a learning parameter, a target accuracy and maximal amount of
#iterations.
def logreg1storder(data,labels,alph=.1,tareps=10**-6.,maxit=100):
#Initialize the needed arrays.
n = len(data[:,0])
no_of_parms = len(data[0,:]) + 1
theta = [i for i in range(no_of_parms)]
#Find the initial cost-function
yhat = ht(data, theta)
c = cost(labels, yhat)
i = 0
eps = 1000000000.
costs = np.zeros(maxit)
#For each iteration, find how we need to update the parameters using the
#difference between the predicted values and the labels (0 or 1)
while eps > tareps and i < maxit:
b = np.ones((n,no_of_parms))
b[:,1:] = data
for j in range(no_of_parms):
update = (yhat - labels) * b[:,j]
theta[j] -= alph * sum(update)/n
#Find the new predicted values and the new accuracy.
yhat = ht(data, theta)
cn = cost(labels, yhat)
eps = abs(cn-c)
c = cn
costs[i] = c
i += 1
return theta, eps, i, costs
| true
|
e2d88cbf9ceed9fde6b2996be7e11d8016ee4ea7
|
Python
|
Savanthravi/BMI-CALCULATOR
|
/bmi2.py
|
UTF-8
| 741
| 3.46875
| 3
|
[] |
no_license
|
name = input("How do you want to enter details si or us: ")
if (name=="si"):
Height=float(input("enter your Height in m: "))
Weight=float(input("enter your Weight in kg: "))
BMI=Weight/(Height*Height)
print(BMI)
else:
Height=float(input("enter your Height in inches: "))
Weight=float(input("enter your Weight in pounds: "))
BMI=703*Weight/(Height*Height)
print(BMI)
if(BMI <= 18.4):
print("you are under weight.")
elif (BMI <= 24.9):
print("you are healthy.")
elif (BMI <= 29.9):
print("you are over weight.")
elif (BMI <= 34.9):
print("you are obesity class1.")
elif (BMI <= 39.9):
print("you are obesity class2.")
elif (BMI <= 40):
print("you are obesity class3.")
else:
print("enter valid details")
| true
|
4bfcbef91b238bd15be7438bec2a864e9362244c
|
Python
|
slimpotatoes/FEM_InAsP_strain
|
/generate_elastic_stiffness_tensor.py
|
UTF-8
| 1,782
| 3.265625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Code to generate elastic stiffness tensor (Cijkl) adapted for FEniCS
# Sigma_ij = C_ijkl * epsilon_kl (4th order rank tensor in 3D space)
import numpy as np
class Generator(object):
def __init__(self):
self.symmetry = None
self.stiff_tensor = np.zeros((3, 3, 3, 3))
self.cijkl_list = []
def import_elements(self, cijkl_list, symmetry):
"""Put he the cijkl in proper order (rising indices)"""
self.symmetry = symmetry
self.cijkl_list = cijkl_list
if self.symmetry is 'cubic':
if len(self.cijkl_list) != 3:
print('Improper number of coefficient for cubic symmetry')
else:
for i in range(0, 3):
self.stiff_tensor[i, i, i, i] = self.cijkl_list[0]
for j in range(0, 3):
if j != i:
self.stiff_tensor[i, i, j, j] = self.cijkl_list[1]
self.stiff_tensor[i, j, i, j] = self.cijkl_list[2]
self.stiff_tensor[i, j, j, i] = self.cijkl_list[2]
print('Elastic stiffness tensor imported')
else:
print('Non-cubic material not supported')
def rotation_stiffness_tensor(self, P):
"""Taken from
https://stackoverflow.com/questions/4962606/fast-tensor-rotation-with-numpy
user : Philipp
P : Rotation matrix = np.array(3x3)"""
PP = np.outer(P, P)
PPPP = np.outer(PP, PP).reshape(4 * P.shape)
axes = ((0, 2, 4, 6), (0, 1, 2, 3))
self.stiff_tensor = np.tensordot(PPPP, self.stiff_tensor, axes)
def export_tensor(self, filename):
np.save(filename, self.stiff_tensor)
print("Tensor exported ==> ", filename)
| true
|
0d5f68dd11b561f9e1d3035a2b5fd3493d0359e7
|
Python
|
nickmcadden/Kaggle
|
/NCAA/2016-17/code/dixoncoles.py
|
UTF-8
| 5,442
| 2.546875
| 3
|
[] |
no_license
|
import sys
import pandas as pd
import numpy as np
import time
import data_dixoncoles as data
import argparse
from sklearn.metrics import log_loss
from scipy.stats import norm
from scipy.special import factorial
from scipy.optimize import minimize
from scipy.stats import skellam
parser = argparse.ArgumentParser(description='Dixon Coles Model')
parser.add_argument('-r','--r_seed', help='Set random seed', type=int, default=1)
parser.add_argument('-cv','--cv', action='store_true')
parser.add_argument('-sy','--start_year', type=int, default=2017)
parser.add_argument('-ey','--end_year', type=int, default=2017)
m_params = vars(parser.parse_args())
if m_params["start_year"] < m_params["end_year"]:
stage_1 = True
stage_2 = False
else:
stage_2 = True
stage_1 = False
print("NCAA Machine Learning Mania 2016-17: MLE optimisation via Dixon-Coles method...\n")
def oddspredict(fixtures, att_params, def_params, hmean, amean):
resultodds = []
neutralscore = (hmean+amean)/2
for j in range(len(fixtures)):
lamda = neutralscore * att_params[fixtures[j,0]] * def_params[fixtures[j,1]]
mu = neutralscore * att_params[fixtures[j,1]] * def_params[fixtures[j,0]]
px = skellam.cdf(-1, lamda, mu)
p0 = skellam.pmf(0, lamda, mu)
resultodds.append(1-(px+p0*0.5))
return np.array(resultodds)
def get_vec(teams, params):
vec = np.array([params[team] for team in teams])
return vec
'''
def objective(params, hmean, amean):
attparams = params[:364]
defparams = params[364:]
f=0
for i in range(len(X)):
x = X[i,4] # home score
y = X[i,5] # away score
h = X[i,2] # home team
a = X[i,3] # away team
lamda = hmean * attparams[h] * defparams[a]
mu = amean * attparams[a] * defparams[h]
p = ((np.power(lamda,x)*np.exp(-lamda)) / factorial(x, exact=False)) * ((np.power(mu,y)*np.exp(-mu)) / factorial(y, exact=False))
f -= np.log(p)
return f
'''
def objective_vectorized(params, hmean, amean):
# attack and defense params
attparams = params[:364]
defparams = params[364:728]
# distance coefficient
#dcf = params[728]
home_teams = X[:,2]
away_teams = X[:,3]
home_team_scores = X[:,4]
away_team_scores = X[:,5]
#travel_distances = X[:,7].astype(np.float32)
ht_att_vec = get_vec(home_teams, attparams)
ht_def_vec = get_vec(home_teams, defparams)
at_att_vec = get_vec(away_teams, attparams)
at_def_vec = get_vec(away_teams, defparams)
lamda = hmean * ht_att_vec * at_def_vec
mu = amean * at_att_vec * ht_def_vec # - (travel_distances * dcf)
p = np.sum(lamda) + np.sum(mu) - np.sum(home_team_scores*np.log(lamda)) - np.sum(away_team_scores*np.log(mu))
return p
submission_probs = []
for year in range(m_params['start_year'], m_params['end_year']+1):
print("year:", year)
# Load data
X, X_val, X_sub, Teams = data.load(year, stage_1)
initparams = np.ones(728).astype(np.float32)
#X=X[:1000]
'''
neutralgames = (X[:,6]=='N')
meanhomescore = np.sum(X[:,4].astype(np.float32) * ~neutralgames) / np.sum(~neutralgames)
meanawayscore = np.sum(X[:,5].astype(np.float32) * ~neutralgames) / np.sum(~neutralgames)
meanneutralscore = (np.sum(X[:,5] * neutralgames) / np.sum(neutralgames) + np.sum(X[:,4] * neutralgames) / np.sum(neutralgames)) / 2
meanhomescore_vec = meanhomescore * ~neutralgames + meanneutralscore * neutralgames
meanawayscore_vec = meanawayscore * ~neutralgames + meanneutralscore * neutralgames
print(meanneutralscore, meanhomescore, meanawayscore)
'''
meanhomescore = np.mean(X[:,4])
meanawayscore = np.mean(X[:,5])
meanhomescore_vec = np.array([(meanhomescore+meanawayscore)/2] * len(X)) + 2
meanawayscore_vec = np.array([(meanhomescore+meanawayscore)/2] * len(X))
print("Optimising attack and defense parameters")
t0 = time.time()
optim = minimize(objective_vectorized, initparams, args=(meanhomescore_vec, meanawayscore_vec), method="Powell")
t1 = time.time()
print(t1-t0, "seconds")
attparams = optim['x'][:364]
defparams = optim['x'][364:728]
attparams_df = pd.DataFrame({'teamid': range(364), 'attack': attparams})
defparams_df = pd.DataFrame({'teamid': range(364), 'defence': defparams})
Teams = pd.merge(Teams, attparams_df, left_on=["Team_Id"], right_on=["teamid"])
Teams = pd.merge(Teams, defparams_df, left_on=["Team_Id"], right_on=["teamid"])
Teams['strength'] = Teams['attack'] / Teams['defence']
Teams = Teams.sort('strength', ascending=False)
print(Teams.ix[:, ['Team_Id','Team_Name','attack','defence','strength']])
if stage_1:
print("Predicting odds based on optimised parameters")
# Get odds for the cv tournament data to score against the log loss measure
fixtures = X_val[:,2:4]
probs = oddspredict(fixtures, attparams, defparams, meanhomescore, meanawayscore)
X_val = np.concatenate((X_val, np.round(probs[:, None] ,2)), axis=1)
print(X_val)
y_val = np.array(X_val[:,4] > X_val[:,5])
print("logloss", log_loss(y_val, probs))
# Get odds for all potential matchups for this year, for the submission file
fixtures = X_sub[:,1:3]
probs = oddspredict(fixtures, attparams, defparams, meanhomescore, meanawayscore)
submission_probs.extend(probs)
print("Saving Results.")
if stage_1:
preds = pd.read_csv("../input/sample_submission_stage1.csv")
preds["pred"] = submission_probs
preds.to_csv("../output/dixoncoles_stage1" + '.csv', index=False)
else:
preds = pd.read_csv("../input/sample_submission_stage2.csv")
preds["pred"] = submission_probs
preds.to_csv("../output/dixoncoles_stage2" + '.csv', index=False)
| true
|
f4538e7edefa2203f5f9193e4109d18d5fa9bbac
|
Python
|
taliaa10/DigitalCrafts
|
/Assignments/6-25/test.py
|
UTF-8
| 293
| 4.03125
| 4
|
[] |
no_license
|
total = input("What is your total? $")
tip_pct = input("What percentage would you like to tip? ")
def tip_calc(total, tip_pct):
total_amt = float(total) * float(tip_pct)/100.0
return total_amt
total_amt = tip_calc(total, tip_pct)
print(f'The amount you should tip is ${total_amt:,.2f}')
| true
|
305c1d97c369eab96ad67877b0fc2b71a462358d
|
Python
|
Iagoakiosaito/IA-ChatBot-UFMS
|
/ChatBot/telegram_bot.py
|
UTF-8
| 2,600
| 2.609375
| 3
|
[] |
no_license
|
from token import set_token
from dicts import getDict_ent, getDict_price
from configs_chatbot import main_function
import logging
from telegram import Update, ForceReply
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
global comanda
comanda = []
# Log
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
def start(update: Update, context: CallbackContext) -> None:
user = update.effective_user
update.message.reply_markdown_v2(
fr'Olá {user.mention_markdown_v2()}, o que deseja?',
reply_markup=ForceReply(selective=True),
)
def intention(update: Update, context: CallbackContext) -> None:
global preco, msg_comanda, msg_comanda_fim
detalhes= main_function(update.message.text)
if detalhes[1] == "Saudação":
user = update.effective_user
update.message.reply_markdown_v2(
fr'Olá {user.mention_markdown_v2()}, o que deseja?',
reply_markup=ForceReply(selective=True),
)
if (detalhes[1] != "Finalizar" and detalhes[1] != "Saudação"):
update.message.reply_text(detalhes[0])
comanda.append(detalhes[2])
elif detalhes[1] == "Finalizar":
dict_prec = getDict_price()
preco = 0
msg_comanda = ("\nCerto! \nO pedido de: ")
i = 1
for pedido in comanda:
for item in pedido:
msg_comanda += ("\n• {} {}". format(item[0], item[1]))
i += 1
for pedido in comanda:
for item in pedido:
preco += item[0] * dict_prec[item[1]]
msg_comanda_fim = ("Preço final: R${}".format(preco))
update.message.reply_text(msg_comanda)
update.message.reply_text(msg_comanda_fim)
def main() -> None:
"""Inicia o bot"""
# instanciador do Updater com o token do bot
updater = Updater(set_token)
# dispatcher para registrar os handlers
dispatcher = updater.dispatcher
# comandos diferentes, com resposta no app
dispatcher.add_handler(CommandHandler("start", start))
# mensagem recebida do usuário, resposta no app
dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, intention))
# Inicia o bot
updater.start_polling()
# responsável para caso necessário, desligar o bot, com o comando ^C
updater.idle()
if __name__ == '__main__':
main()
| true
|
85d962d972848a0de4fe6e4d2c529df6076a3a03
|
Python
|
akotek/data_final_project
|
/main.py
|
UTF-8
| 4,674
| 2.75
| 3
|
[] |
no_license
|
from data_parsing.similarity import *
import data_parsing.clustering as clustering
import data_parsing.visualization as visualizer
import matplotlib.pyplot as plt
from data_parsing.clustering import determine_num_of_clusters
from predictor.predictor import make_predictions
NUM_OF_CLUSTERS = 4
# Similarity:
# ------------------------------------------
def plot_similarity(df):
sim, players = run_similarity(df)
freq_dict = sim.set_index('Name').to_dict()['distance']
for p in players:
freq_dict[p] = 1
visualizer.plot_tag_clouds(freq_dict)
def run_similarity(df):
pd.set_option('display.expand_frame_repr', False)
eval_func, players = get_user_input()
eval_func = eval_cosine_dist
original_df = pd.DataFrame(df).set_index('ID')
original_df = original_df.drop_duplicates(subset=['Name'])
gk_players, other_players = split_player_type(original_df, players)
sim = pd.DataFrame()
if len(gk_players):
player_type_df = df[df['Position'] == 'GK']
player_type_df.is_copy = False
sim = find_similar_players(player_type_df, gk_players, original_df,
GK_PLAYER_FEATURES_VECTOR, eval_func)
if len(other_players):
player_type_df = df[df['Position'] != 'GK']
player_type_df.is_copy = False
sim = find_similar_players(player_type_df, other_players, original_df,
PLAYER_FEATURES_VECTOR, eval_func)
print(sim)
return sim, players
def get_user_input():
names = input("Enter player/s name you want to compute, spare them by comma\n")
names = names.split(",")
players = list()
for name in names:
players.append(name.strip())
func = input("which distance function you want to use: Cosine,"
" Manhattan or Euclidean?\n")
func = func.strip().lower()
if func == 'manhattan':
eval_func = eval_manhatan_dist
print("you chose Manhattan")
elif func == 'euclidean':
eval_func = eval_euclidean_dist
print("you chose Euclidean")
else:
eval_func = eval_cosine_dist
print("you chose Cosine")
return eval_func, players
def split_player_type(original_df, players):
"""
split a list of players to goalkeppers and other kind of players
:return: 2 list of players sname
"""
gk_players = list()
other_players = list()
for player in players:
if original_df[original_df['Name'] == player]['Position'].eq('GK').any():
gk_players.append(player)
else:
other_players.append(player)
return gk_players, other_players
# Clustering
# ------------------------------------------
def run_pca(df):
processed_df = clustering.pre_process(df) # diff pre processing than similarity one
norm_df = clustering.normalize(processed_df)
transformed_df = clustering.pca(norm_df, 2)
return processed_df, transformed_df
def plot_pca(df):
prcss_df, trnsf_df = run_pca(df)
visualizer.plot_pca(prcss_df, trnsf_df)
def run_clustering(df):
# Builds data with cluster column and name column
processed_df, transformed_df = run_pca(df)
labels, C, clusters = clustering.cluster(transformed_df, NUM_OF_CLUSTERS)
transformed_df['Cluster'] = clusters
print(transformed_df.head())
return processed_df, transformed_df
def plot_clustering(df):
prcss_df, clstr_df = run_clustering(df)
visualizer.plot_clustering(clstr_df)
def clusters_distribution(df: pd.DataFrame):
processed_df, transformed_df = run_clustering(df)
clustered_df = pd.merge(transformed_df['Cluster'], processed_df, left_index=True, right_index=True, how='inner')
num_of_positions = len(df['Position'].dropna().unique())
clusters = [clustered_df[clustered_df['Position'] == position] for position in df['Position'].dropna().unique()]
for position, position_name, number in zip(clusters, df['Position'].dropna().unique(), range(1, num_of_positions + 1)):
position['Cluster'].value_counts().plot(kind='bar', rot=0)
plt.title('Histogram of clusters of for position ' + position_name + ':')
plt.xlabel('Cluster Number')
plt.xticks(rotation=0)
plt.ylabel('Number of Players')
plt.show()
# ------------------------------------------
if __name__ == "__main__":
fifa_df = pd.read_csv(utils.relpath('csv/players_f19_edited.csv'))
run_similarity(fifa_df)
# plot_similarity(fifa_df)
# plot_pca(fifa_df)
# plot_clustering(fifa_df)
# run_clustering(fifa_df)
# clusters_distribution(fifa_df)
# determine_num_of_clusters(fifa_df)
make_predictions()
| true
|
a62efcd438da500c37657f34872bc7490d540f54
|
Python
|
burak-karakus/pygta5
|
/pygta5-4.py
|
UTF-8
| 990
| 2.609375
| 3
|
[] |
no_license
|
import numpy as np
from PIL import ImageGrab
import cv2
import time
from directkeys_mac import KeyPress,KeyDown,KeyUp
def roi(img, vertices):
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, 255)
masked = cv2.bitwise_and(img, mask)
return masked
def process_img(org_img):
p_img = cv2.cvtColor(np.float32(org_img), cv2.COLOR_BGR2GRAY)
p_img = cv2.Canny(np.uint8(p_img), threshold1=200, threshold2=300)
vertices = np.array(([10,500],[10,300],[300,200],[500,200],[800, 300],[800,500]))
p_img = roi(p_img, [vertices])
return p_img
def main():
last_time = time.time()
while(True):
screen = ImageGrab.grab(bbox=(0,40,800,640))
new_screen = process_img(screen)
print('loop took {} seconds'.format(time.time()-last_time))
last_time=time.time()
cv2.imshow('window', new_screen)
#cv2.imshow('window', cv2.cvtColor(np.array(screen), cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if __name__ == "__main__":
main()
| true
|
3c27cde857082c692b55641d7995cb1c7306be97
|
Python
|
0x6b7966/mytools
|
/evaluate.py
|
UTF-8
| 516
| 3.296875
| 3
|
[] |
no_license
|
import pdb
choose=input("buy or sell? default [1]\n1.buy\n2.sell\n:>")
if choose=='2':
sell=float(input("please input your sell price:\n"))
high=float(input("please input high price:\n"))
low=float(input("please input low price:\n"))
score=(sell-low)/(high-low)*100
else:
buy=float(input("please input your buy price:\n"))
high=float(input("please input high price:\n"))
low=float(input("please input low price:\n"))
score=(high-buy)/(high-low)*100
print("得分:"+str(round(score)))
| true
|
ffdc08a6ca565f6acc96c188c86261065f95c1f0
|
Python
|
yoophi/sample-posts-api
|
/tests/core/domain/test_comment.py
|
UTF-8
| 550
| 2.78125
| 3
|
[] |
no_license
|
from app.core.domain.comment import Comment
def test_comment_model_init():
id_ = 1
comment = Comment(id_, body="body text")
assert comment.id == id_
assert comment.body == "body text"
def test_comment_model_from_dict():
comment = Comment.from_dict({"id": 1, "body": "body text"})
assert comment.id == 1
assert comment.body == "body text"
def test_comment_model_to_dict():
comment_dict = {"id": 1, "body": "body text"}
comment = Comment.from_dict(comment_dict)
assert comment.to_dict() == comment_dict
| true
|
f40cd8939626e63bd9e5ab432812570a8e46f4d1
|
Python
|
psavine42/finaccview
|
/src/discrete/automota.py
|
UTF-8
| 1,975
| 3.03125
| 3
|
[] |
no_license
|
import numpy as np
from copy import deepcopy
class Automaton(object):
_MOVES = []
def __init__(self, pos, moves=None):
self._pos = pos
self._matrix = None
# self._step_every = step_every
self._moves = moves if moves else self._MOVES
@property
def pos(self):
return np.asarray(self._pos)
def next_states(self):
for m in self._moves:
new_moves = deepcopy(self._MOVES)
new_moves.remove( (np.array(m)*-1).tolist() )
new_pos = [self._pos[0] + m[0], self._pos[1] + m[1]]
yield self.__class__(new_pos, moves=new_moves)
def constrain(self, constraints):
for constraint in constraints:
if constraint in self._moves:
self._moves.remove(constraint)
def __lt__(self, other):
if isinstance(other, self.__class__):
if other._pos > self._pos:
return True
return False
def __gt__(self, other):
if isinstance(other, self.__class__):
if other._pos < self._pos:
return True
return False
def __eq__(self, other):
if isinstance(other, self.__class__):
if other._pos == self._pos:
return True
return False
def __str__(self):
return '<{}> at {}'.format(self.__class__.__name__,
self._pos)
def __hash__(self):
return tuple(self._pos).__hash__()
class Automaton4(Automaton):
_MOVES = [
[0, 1], # up
[0, -1], # down
[1, 0], # right
[-1, 0], # left
]
def __init__(self, pos, moves=None):
Automaton.__init__(self, pos, moves=moves)
class Automaton8(Automaton):
_MOVES = [
[0, 1], [0, -1], [1, 0], [-1, 0],
[1, 1], [-1, -1], [1, -1], [-1, 1]
]
def __init__(self, pos, moves=None):
Automaton.__init__(self, pos, moves=moves)
| true
|
e039c9f711b61418c918ee93281b476c1d0ab95c
|
Python
|
ino-shan/catwellbeing
|
/users/tests/test_models.py
|
UTF-8
| 921
| 2.5625
| 3
|
[] |
no_license
|
from django.contrib.auth.models import User
from django.test import TestCase
# python manage.py test users/tests
class UserTest(TestCase):
def setUp(self):
the_user = User.objects.create_user(username="inoshan", email="inoshan@yahoo.com", password="BA3bfuf3S", first_name="Inoshan", last_name="Inoshan")
def tearDown(self):
User.objects.all().delete()
def test_user_variables(self):
the_user = User.objects.get(username="inoshan")
self.assertEqual(the_user.email,"inoshan@yahoo.com")
self.assertEqual(the_user.first_name,"Inoshan")
self.assertEqual(the_user.last_name,"Inoshan")
def test_user_exist(self):
the_user = User.objects.filter(username="inoshan").exists()
self.assertTrue(the_user)
def test_update_db(self):
the_user = User.objects.get(username="inoshan")
the_user.first_name = "Bruce"
the_user.save()
the_user = User.objects.get(username="inoshan")
self.assertEqual(the_user.first_name,"Bruce")
| true
|
5df95695b9b9e200322abefe7fd73e3060026ba2
|
Python
|
batzzinga/Curso-Python
|
/Ejemplo12.py
|
UTF-8
| 294
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#! /usr/bin/python
# -*- coding iso-8859-15
from random import *
def adivina(n):
n = randint(1, 6)
return n
a = int(input("Adivina el numero: "))
for i in range(a):
print adivina(i)
if a == adivina(i):
print ("Acertaste: ", a,i)
else:
print ("Fallo: ", a,i)
| true
|
0f049f09728a5180d6dba89445c652a9a63b256a
|
Python
|
Ramesh1589/PythonPrograms
|
/PythonPrograms/10_dictonary_search_name.py
|
UTF-8
| 646
| 4.1875
| 4
|
[] |
no_license
|
# Program To Create Dictonary with key value and search name form Dictonary.
n = int(input('Enter number of students ::'))
d = {}
for i in range(n):
name = input('Enter student name :: ')
marks = input('Enter student marks :: ')
d[name] = marks
while True:
name = input('Enter student name to be search :: ')
marks = d.get(name, -1)
if marks == -1:
print('sorry Student not found....')
else:
print("Marks of", name , "is", marks )
options = input(' Do you want to continue [ Yes | No ] ::')
if options == 'No':
break
print('Thank you using Application...')
| true
|
9e69543637f8c4a078a27ff8ec3e823ba70218d5
|
Python
|
Manish1094/Udacity--Data_Warehouse_Project
|
/create_tables.py
|
UTF-8
| 1,432
| 3.078125
| 3
|
[] |
no_license
|
# Import Libraries
import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
# Drop Tables function will call the queries to drop the fact & dimension tables
def drop_tables(cur, conn):
"""
This function iterates over all the drop table queries and executes them.
INPUTS:
* cur the cursor variable of the database
* conn the connection variable of the database
"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
# Create Table function will call the queries to create the fact & dimension
def create_tables(cur, conn):
"""
This function iterates over all the create table queries and executes them.
INPUTS:
* cur the cursor variable of the database
* conn the connection variable of the database
"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
"""
Main Function connects to the redshift cluster which has already been created & started using the host'
"""
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| true
|
7c3b0f867637c28e29c21e9255ca4b199caaf43a
|
Python
|
kunalt4/ProblemSolvingDSandAlgo
|
/LeetCode/find_all_anagrams.py
|
UTF-8
| 322
| 3.125
| 3
|
[] |
no_license
|
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
res = []
p = "".join(sorted(p))
p_len = len(p)
print(p)
print(p_len)
for i in range(len(s)-p_len+1):
if "".join(sorted(s[i:i+p_len])) == p:
res.append(i)
return res
| true
|
cdd345a1377e43ee2d7c7b8a34e06db822a062e6
|
Python
|
liuw123/leetcode
|
/Q2_Add_Two_Numbers/solution.py
|
UTF-8
| 582
| 3.34375
| 3
|
[] |
no_license
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
if l1==None:
return l2
if l2==None:
return l1
cur_val = l1.val+l2.val
add_num = 0
if cur_val>9:
cur_val = cur_val-10
add_num = 1
result = ListNode(cur_val)
tmp_sol = Solution()
next_result = tmp_sol.addTwoNumbers(l1.next,l2.next)
if add_num==1:
add_obj = ListNode(1)
next_result = tmp_sol.addTwoNumbers(next_result,add_obj)
result.next = next_result
return result
| true
|