blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
98e0ad4b2689f09c35d283536cf3c2c08e95e4ae
|
Python
|
9vinny/cw_api
|
/utils/donation.py
|
UTF-8
| 1,758
| 2.609375
| 3
|
[] |
no_license
|
from classes.sql_conn import SqlConn
import logging
def calculate_donor_month_total():
"""calculate and save donor's accumulated change amount"""
pass
def make_donations():
"""make donations for users who have selected drives"""
#get user_id and amts for active donors
#make donation entry
#reset donation cycle date as curr date, update accumulated amount zer, update ltd amount
pass
# charities=[]
# try:
# db_obj=SqlConn()
# if uid==0:
# #get all charities
# print("all")
# query="Select * from charity"
# data=None
# else:
# query="Select * from charity where charity_id \
# in (Select distinct charity_id from donor_drive \
# where donor_id=%s and status = %s)"
# data = (uid,True,)
#
# result=db_obj.get_query(query,data)
# if len(result)>0:
# print("db queried")
# for record in result:
# print(record)
# result2=db_obj.get_query(query,data)
# r={'charity_id':record[0],'charityName':record[1],
# 'charityAbout': record[2],
# 'charityImageURL':record[3],
# 'charityAddress':record[4],
# 'charityCity':record[5],
# 'charityState':record[6],
# 'charityActiveDrives':record[11],
# 'charityCauses':record[12].split(",")
# }
# charities.append(r)
# else:
# raise Exception("No charities found")
# return charities
# except Exception as e:
# logging.info(e)
# raise
# finally:
# db_obj.close_conn()
| true
|
4596f01883cc85093f11ce30f373423da6cf3b41
|
Python
|
terrenceliu01/python1
|
/src/languageBasics/operator/comparison.py
|
UTF-8
| 384
| 4.0625
| 4
|
[] |
no_license
|
"""
Comparison Operators
== Equal
!= NotEqual
> Greater than
< Less than
>= Greater than or equal to
<= Less than or equal to
"""
a, b = 10, 20
f = a==b
# comparison operator always return True or False bool type.
print(type(f))
print(f)
print(a == b)
print(a != b)
print(a <= b)
print(a >= b)
print(a > b)
print(a < b)
print()
# combine comparison operator with logical operator
| true
|
18546f4f5d60c372a459ac8bb997e304717c2fb4
|
Python
|
helcerion/T1000
|
/src/t1000/infrastructure/persistence/events_in_memory_repo.py
|
UTF-8
| 3,466
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
from datetime import datetime
from ...domain.entity import Event, Events
class EventsInMemoryRepo():
DATE_FORMAT = '%Y-%m-%d'
def __init__(self):
self._events = [
{'uuid': 'asdf', 'date': '2019-10-01', 'time': '07:20:00'},
{'uuid': 'qwer', 'date': '2019-10-01', 'time': '14:35:00'},
{'uuid': 'zxcv', 'date': '2019-10-15', 'time': '07:05:30'},
{'uuid': 'zxcv', 'date': '2019-10-15', 'time': '08:05:30'},
{'uuid': 'zxcv', 'date': '2019-10-15', 'time': '09:05:30'},
{'uuid': 'zxcv', 'date': '2019-10-15', 'time': '09:15:30'},
{'uuid': 'zxcv', 'date': '2019-10-15', 'time': '10:05:30'},
{'uuid': 'zxcv', 'date': '2019-10-16', 'time': '07:05:30'},
]
def get_from_date(self, date: str) -> Events:
return self.__find(init=date, end=date)
def get_from_interval(self, init: str = '', end: str = '') -> Events:
init = None if init == '' else init
end = None if end == '' else end
return self.__find(init=init, end=end)
def find_all(self):
return self.__find(all=True)
def save(self, event: Event):
self._events.append(
{'uuid': event.uuid, 'date': event.date, 'time': event.time}
)
return True
def __find(self, init: str=None, end: str=None, all: bool=False):
events = []
event_type = ('entrada', 'salida')
event_num = 0
last_event = None
for event in self._events:
if self.__date_between_dates(init, end, event['date']) or \
self.__date_before(init, end, event['date']) or \
self.__date_after(init, end, event['date']) or all is True:
if last_event is None or \
last_event != self.__get_day(event['date']):
event_num = 0
event_entity = Event(
uuid=event['uuid'],
date=event['date'],
time=event['time'],
event_type=event_type[event_num % 2]
)
event_num += 1
last_event = self.__get_day(event['date'])
events.append(event_entity)
return Events(events)
@classmethod
def __date_between_dates(cls, init: str, end: str, date: str):
date_between_dates = False
if init is not None and end is not None and \
cls.__get_date(date) >= cls.__get_date(init)\
and cls.__get_date(date) <= cls.__get_date(end):
date_between_dates = True
return date_between_dates
@classmethod
def __date_before(cls, init: str, end: str, date: str):
date_before = False
if init is None and end is not None and \
cls.__get_date(date) <= cls.__get_date(end):
date_before = True
return date_before
@classmethod
def __date_after(cls, init: str, end: str, date: str):
date_after = False
if init is not None and end is None and \
cls.__get_date(date) >= cls.__get_date(init):
date_after = True
return date_after
@classmethod
def __get_date(cls, date: str):
return datetime.strptime(date, cls.DATE_FORMAT).date()
@classmethod
def __get_day(cls, date: str):
return datetime.strptime(date, cls.DATE_FORMAT).day
| true
|
cf605449ce61daf7a1553117138e91b2e70cd37f
|
Python
|
juanmatg/practica
|
/6-kyu/denumerate string.py
|
UTF-8
| 439
| 3.34375
| 3
|
[] |
no_license
|
def denumerate(enum_list):
#rebuild the string
res = ''
try:
enum_list = sorted(enum_list, key = lambda x: x[0])
for i in xrange(len(enum_list)):
if enum_list[i][0] != i or len(enum_list[i]) != 2 or not enum_list[i][1].isalnum() or len(enum_list[i][1]) != 1:
return False
res += enum_list[i][1]
return res if res != '' else False
except:
return False
| true
|
141df951d7139f7ed6f25a38d6301ff578329aa1
|
Python
|
josephramsay/anzlic-validator
|
/setup.py
|
UTF-8
| 3,153
| 2.96875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
anzlic-validator setup.py run by
'sudo python setup.py'
Will Check the current system has the correct versions:
- QGIS v2.18
- PyQT v4
- Koordinates python module is installed, if not will install.
- Python v2.7
- An Api file exists, .apikey
Then move the anzlic-validator directory to the correct
HOME/.qgis2/python/plugins directory.
"""
import pip
import os
import sys
import subprocess
try:
import qgis.utils
except Exception as e:
print ("Error Incorrect QGIS Version {}".format(e))
try:
import PyQt4
except Exception as e:
print ("Error Incorrect PyQt Version {}".format(e))
def install():
"""
Check Python, QGIS Version, create .apikey file if doesn't already exist,
move anzlic-validator from current directory to the QGIS Plugin Directory.
:return: None
"""
# Python Version
if '2.7' not in sys.version:
raise Exception("Error Incorrect Python Version {}".format(sys.version))
# QGIS Version
try:
if '2.18' not in qgis.utils.QGis.QGIS_VERSION:
raise Exception(
"Got Version {}".format(qgis.utils.QGis.QGIS_VERSION))
except Exception as er:
raise Exception("Error Incorrect QGIS Version {}".format(er))
# Koordinates Module (Exists or is installed)
try:
packages = [package.project_name for package in
pip.get_installed_distributions()]
if 'koordinates' not in packages:
print ('Installing Koordinates')
subprocess.call("sudo python2.7 -m pip install koordinates",
shell=True)
except Exception as er:
raise Exception("Error Installing Koordinates Module: {}".format(er))
# .apikey File (Exists or is created)
home = None
try:
home = os.getenv('HOME')
if not os.path.isfile(home+'/.apikey'):
print ('Creating File .apikey in {}'.format(home))
with open(home+'/.apikey') as f:
f.write('key0=API_KEY')
print ('Remember to change text "API_KEY" in ' +
'{} to your LDS API KEY'.format(home+'/.apikey'))
except Exception as er:
raise Exception(
"Error Creating .apikey file in {}./n{}".format(home, er))
# Move anzlic-validator from current directory to QGIS Plugin Directory
try:
cwd = os.getcwd()
home = os.getenv('HOME')
name = cwd.split('/')[len(cwd.split('/'))-1]
from_dir = cwd
to_dir = '{}/.qgis2/python/plugins/{}'.format(home, name)
if from_dir != to_dir:
print ('Moving From "{}" To "{}"'.format(from_dir, to_dir))
os.system(
'sudo -u "$SUDO_USER" cp -rf {} {}'.format(from_dir, to_dir))
os.system('sudo rm -r {}'.format(from_dir))
except Exception as er:
raise Exception("Error Moving anzlic-validator to qgis2 plugin " +
"directory./n{}".format(er))
if __name__ == "__main__":
try:
install()
print ("Setup Complete")
except Exception as e:
print ("Setup Incomplete")
print (e)
| true
|
d6332035dd99f8beca6ca7ffb5472a45150ffb05
|
Python
|
alex-i-git/LearnPython
|
/bot.py
|
UTF-8
| 4,800
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# version 0.1
# Добавляем функцию подсчета слов
# Usage: /wcount word1 word2 ...
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from datetime import date, datetime
import ephem
import csv
#Updater - связь с telegram
#CommandHandler - обработчик команд
#MessageHandler - обработчик сообщений
#Filters - фильтрует сообщения, кроме текстовых
def start(bot, update): # Пишет в консоль о вызове старт в клиенте telegram
print("Вызван /start")
bot.sendMessage(update.message.chat_id, text="Привет, человек! Я бот, который помогает")
def get_answer(user_key,user_dict):
return user_dict[user_key]
dialog={"привет": "И тебе привет!", "как дела": "Лучше всех", "пока": "Увидимся",
"добрый день":"здравствуйте"}
def talk_to_me(bot, update):
#print('Пришло сообщение: %s' % update.message.text)
user_input = (((update.message.text).lower()).rstrip()).lstrip()
bot.sendMessage(update.message.chat_id, get_answer(user_input,dialog))
def word_count(bot,update,args):
count = str(len(args))
bot.sendMessage(update.message.chat_id, "В фразе " + count + " слов")
def division(a,b):
try:
return a/b
except ZeroDivisionError:
return "Division by zero"
# Функция для арифметических действий с проверкой деления на 0
# Предполагает, что операторов только 2 и все символы разделены пробелом
def calc(bot,update,args):
if len(args) == 0:
bot.sendMessage(update.message.chat_id, "Usage: /calc arg1 operation arg2 =")
a = float(args[0])
b = float(args[2])
if str(args[1]) == '+':
c = a+b
elif str(args[1]) == '-':
c = a-b
elif str(args[1]) == '*':
c = a*b
elif str(args[1]) == '/':
c = division(a,b)
bot.sendMessage(update.message.chat_id, c)
# Калькулятор с распознаванием текстового ввода чисел
def wicalc(bot,update,args):
nums = {"один":1, "два":2, "три":3, "четыре":4, "пять":5, "шесть":6, "семь":7, "восемь":8, "девять":9, "десять":10}
operations = ["минус", "плюс", "умножить", "разделить"]
l=list()
for i in args:
if i in nums.keys():
l.append(nums[i])
if i in operations:
math_operation = i
if math_operation == "минус":
result = l[0]-l[1]
elif math_operation == "плюс":
result = l[0]+l[1]
elif math_operation == "умножить":
result = l[0]*l[1]
elif math_operation == "разделить":
result = division(l[0],l[1])
bot.sendMessage(update.message.chat_id, result)
def fullmoon(bot,update,args):
logger(update.message.chat.username, update.message.text)
bot.sendMessage(update.message.chat_id, str(ephem.next_full_moon(args[-1])))
logger(update.message.chat.username, str(ephem.next_full_moon(args[-1])))
def hmdays(bot,update,args):
ng = datetime(2017,1,1)
now = datetime.now()
logger(update.message.chat.username, update.message.text)
num = {1:"день", 2:"дня", 3: "дня", 4: "дня", 5: "дней", 6: "дней", 7: "дней", 8: "дней", 9: "дней", 0: "дней"}
days = ((str(ng - now)).split())[0]
i=days[-1]
if int(i) in num.keys():
quantity = str(num[int(i)])
bot.sendMessage(update.message.chat_id, days + ' ' + quantity)
log_data = days + ' ' + quantity
logger(update.message.chat.username, log_data)
def logger(username, log_data):
log_file = 'bot.log'
now=datetime.now().strftime('%d-%m-%Y %H:%m:%S')
with open(log_file, 'a', encoding='utf-8') as f:
fields = [now.split(' ')[0], now.split(' ')[1], username, log_data]
writer = csv.DictWriter(f, fields, delimiter=';')
writer.writeheader()
#f.write(str(now) + ' ' + log_data)
#f.write('\n')
f.close()
def run_bot():
updater = Updater("195034229:AAG8LDc4Q-O0NL991wza6ovbwQKVZ1zT2Rk")
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("wcount", word_count, pass_args=True))
dp.add_handler(CommandHandler("calc", calc, pass_args=True))
dp.add_handler(MessageHandler([Filters.text], talk_to_me))
dp.add_handler(CommandHandler("wicalc", wicalc, pass_args=True))
dp.add_handler(CommandHandler("fullmoon", fullmoon, pass_args=True))
dp.add_handler(CommandHandler("hmdays", hmdays, pass_args=True))
updater.start_polling() # опрашивает telegram на наличие сообщений
updater.idle()
if __name__ == '__main__':
run_bot()
| true
|
d822042380f54ef9f0d596163048b5cac479cc4c
|
Python
|
ashleymcnamara/LearnPythonTheHardWay
|
/ex36.py
|
UTF-8
| 3,819
| 3.65625
| 4
|
[] |
no_license
|
from sys import exit
def joe():
print "Joe has a beautiful woman in his sights, but she is diseased!"
print "which disease does she have? herpes, aids, or the clap?"
next = raw_input()
if next == "herpes":
std_treatment()
elif next == "aids":
dead("Joe is DEAD!")
if next == "clap":
std_treatment()
else:
dead("Joe is DEAD!")
def welcome_back():
print "Joe has overcome his disease but can he do it again? Don't underestimate how dumb Joe really is."
print "Joe hires a hooker but she is also diseased, what disease does the hooker have??"
print "aids, warts, or plague"
next = raw_input()
if next == "aids":
dead("Joe contracts AIDS and dies a slow, painful, death.")
elif next == "warts":
std_treatment()
if next == "plague":
dead("Joe contracts the plague and dies.")
else:
std_treatment()
def std_treatment():
print "Joe has contracted a non-life threatening disease. How is his disease treated?"
print "Pills, Creams, Or Dick Scraping??"
next = raw_input()
if next == "creams":
std_clinic()
elif next == "pills":
std_clinic()
if next == "dick scraping":
std_clinic()
else:
dead("Joe goes untreated and dies a painful death.")
def std_clinic():
print "Joe is being treated for his STD. How long will he be out of commission?"
print "a week, a month, or a year?"
next = raw_input()
if "a week" in next:
week_treatment()
elif "a month" in next:
month_treatment()
elif "a year" in next:
year_treatment()
else:
dead("Joe decided to go right back to being a whore and dies from infection.")
def month_treatment():
print "Joe is out of commission for a month and is already making plans to get back to fucking."
print "Joe considers using condoms to prevent further spreading but isn't sure he cares."
print "Doe he decide to use the condoms? Yes or No?"
next = raw_input()
if "yes" in next:
safety_zone()
elif "no" in next:
danger_zone()
else:
dead("Joe dies from indecision, his brain is just too small.")
def week_treatment():
print "Joe is out of commission for a week but can't stiffle that sex-aholic feeling so he masterbates furiously!"
print "How many times a day does Joe masterbate?"
next = raw_input()
if next > 100:
safety_zone()
elif next < 50:
dead("Joe masterbates himself to death.")
else:
danger_zone()
def year_treatment():
print "Joe is out of commission for an entire year and the thought of having to masterbate for that long makes him furious!"
print "Does Joe continue to masterbate in order to stop the spread of infection?"
print "Yes or No"
next = raw_input()
if next == "yes":
safety_zone()
elif next == "no":
danger_zone()
else:
dead("Joe is proving he isn't capeable of making decisions and dies.")
def danger_zone():
print "Even though the Dr's at the clinic suggested that he take a fuck break, Joe thinks he knows better."
print "How many others will Joe infect?"
next = raw_input()
if next > 100:
safety_zone()
elif next < 50:
dead("Joe causes an STD outbreak that spreads world wide. Killing millions, including himself")
else:
start()
def safety_zone():
print "Joe is making some rare wise decisions and his infection is clearing up!"
print "Does Joe wait to fully heal? Yes or No?"
next = raw_input()
if next == "yes":
welcome_back()
elif next == "no":
dead("Joe refuses to wait until he is fully healed and dies from infection.")
else:
dead("Joe dies from indecision.")
def dead(why):
print why, "Because Joe is an idiot."
exit(0)
def start():
print "Joe has an STD, It's either life treatening or not"
print "Which one is it, life of death?"
next = raw_input()
if next == "life":
joe()
elif next == "death":
dead("Joe contracted a deadly disease.")
else:
joe("Joe is confused about his condition.")
start()
| true
|
dd18762ec144c39d18ea7b5eb4d74f7a07fb2ae3
|
Python
|
Quynhbh122/BuiHuongQuynh-Fundermantals-C4E32
|
/L1/Homework/area.py
|
UTF-8
| 55
| 3.296875
| 3
|
[] |
no_license
|
r = int(input('radius?'))
a = r*r*3.14
print('area=',a)
| true
|
eb5923f1aad3c6d502880415f1c5a6b8c3d90cc4
|
Python
|
ngodoy/asoc_members
|
/website/members/management/commands/import_members.py
|
UTF-8
| 2,430
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
import csv
import os
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from members.models import Person, Member, Category
class Command(BaseCommand):
help = "Import members from csv generated with data from Google Spreadsheet"
def add_arguments(self, parser):
parser.add_argument('filename', type=str)
def handle(self, *args, **options):
if options['filename'] == None:
raise CommandError("You must specify the path of file.")
# make sure file path resolves
if not os.path.isfile(options['filename']):
raise CommandError("File has no exists.")
dataReader = csv.reader(open(options["filename"]), delimiter=',', quotechar='"')
for member_value in dataReader:
person = self.get_or_create_person(member_value)
category = self.get_or_create_category(member_value)
member, created = Member.objects.update_or_create(
legal_id=member_value[0].strip(),
defaults={
'registration_date': datetime.strptime(member_value[1].strip(), "%d/%M/%Y"),
'category': category,
'person': person,
'has_student_certificate': True if category.name == 'Estudiante' else False,
'has_subscription_letter': True
})
self.stdout.write("Member imported: {})".format(member))
def get_or_create_category(self, values):
category, created = Category.objects.get_or_create(
name=values[5].strip(),
defaults={
'description': values[5].strip(),
'fee': 0
})
return category
def get_or_create_person(self, values):
person, created = Person.objects.update_or_create(
first_name=values[3].strip(),
last_name=values[4].strip(),
email=values[7].strip(),
defaults={
'document_number': values[6].strip(),
'nickname': values[8].strip(),
'nationality': values[10].strip(),
'marital_status': values[11].strip(),
'occupation': values[12].strip(),
'birth_date': datetime.strptime(values[13].strip(), "%d/%M/%Y"),
'street_address': values[14].strip()
})
return person
| true
|
449b16c129c391575332415d49df7f0c80e46423
|
Python
|
BilalQadar/Data_visualization
|
/main.py
|
UTF-8
| 3,507
| 3.234375
| 3
|
[] |
no_license
|
from framework import *
import numpy as np
if __name__ == "__main__":
parameters = ("#763dff",'-',"#000000",'--', 3)
destination_folder = "/Users/bilalqadar/Documents/GitHub/Data_visualization/saved_figures/"
# x_bully = [2007,2009,2010,2011,2013,2014,2015,2016,2019]
# y_bully = [18.8,21.5,20.8,29.2,24.1,34.6,34.0,33.6,36.5]
# bullying_title = "Cyberbullying Victimization Rates (2007-2019)"
# bullying_plot = DataVis(bullying_title)
# bullying_plot.add_caption("Figure 1: Percentage of Toronto students who identify with being bullied each year. Dataset is of random Toronto middle and highschool students (N=3000).")
# bullying_plot.add_data(x_bully,y_bully)
# bullying_plot.add_label("x","Year")
# bullying_plot.add_label("y", "Bullying Rate (%)")
# bullying_plot.plot(0,parameters,3,False,destination_folder)
# cyber_title = "Cyberbullying Related Google Searches (2004-2019)"
# file_location ="/Users/bilalqadar/Documents/GitHub/Data_visualization/csv_files/cyberbullying.csv"
# cyber_trend = DataVis(cyber_title)
# cyber_trend.add_caption("Figure 2: Candian Google searches related to cyberbullying since 2004. Youth sometimes don’t feel comfortable identifying being bullied. The data is normalized with 100 being the peak number of searches and all other data points scaled appropriately.")
# cyber_trend.add_data_csv(file_location, 2, "Month", "Cyberbullying")
# cyber_trend.add_label("y", "Normalized Interest")
# cyber_trend.plot(0,parameters,3,False,destination_folder)
insta_title = "Instagram Bullying Related Google Searches (2006-2019)"
file_location ="/Users/bilalqadar/Documents/GitHub/Data_visualization/csv_files/instaBully.csv"
insta_trend = DataVis(insta_title)
insta_trend.add_caption("Figure 3: Canadian Google searches related to instagram bullying since 2006. The data is normalized with 100 being the peak number of searches and all other data points scaled appropriately.")
insta_trend.add_data_csv(file_location, 2, "Month", "instagram bullying: (Worldwide)")
insta_trend.add_label("y", "Normalized Interest (%)")
insta_trend.add_label("x","Time (Years)")
insta_trend.plot(48,parameters,3,False,destination_folder)
# y_vc = [1147.0,2613.0,3297.0,4093.0,5425.0,9334.0]
# x_vc = [2013,2014,2015,2016,2017,2018]
# vc_title = "Total VC funding for AI & ML startups (2013-2018)"
# vc_plot = DataVis(vc_title)
# vc_plot.add_caption("Figure 4: Amount of VC funding for AI & Machine learning startups over time. As trendline suggest VC's investing in machine learning is growing linearly with respect to time.")
# vc_plot.add_data(x_vc,y_vc)
# vc_plot.add_label("x","Year")
# vc_plot.add_label("y", "VC funding (Millions)")
# vc_plot.plot(0,parameters,1,False,destination_folder)
# howML_title = "How to Learn Machine Learning Related Google Searches (2004-2019)"
# file_location ="/Users/bilalqadar/Documents/GitHub/Data_visualization/csv_files/howML.csv"
# howML = DataVis(howML_title)
# howML.add_caption("Figure 5: anadian Google searches related to learning about machine learning since 2006. The data is normalized with 100 being the peak number of searches and all other data points scaled appropriately.")
# howML.add_data_csv(file_location, 2, "Month", "how to learn machine learning: (Worldwide)")
# howML.add_label("y", "Normalized Interest")
# howML.plot(0,parameters,3,False,destination_folder)
| true
|
534b82ac77f309ae4ee9060a9305aa3725639cd2
|
Python
|
Erniejie/PYTHON---multiply-two-matrices-using-nested-loops
|
/2021-08-20_Python_Program to Multiply two Matrices using Nested Loops.py
|
UTF-8
| 619
| 4.0625
| 4
|
[] |
no_license
|
#Program to multiply two matrices using nested loops
"Computer Programmming Tutor, 17th August 2021"
#3x3 Matrix
A = [[5,3,2],
[2,3,4],
[3,4,3]
]
#3x4 Matrix
B = [[2,4,1,2],
[3,2,3,0],
[3,2,5,1]
]
# Result is a 3x4 Matrix
result = [[0,0,0,0],
[0,0,0,0],
[0,0,0,0]
]
#Iterate Through Rows of A
for p in range(len(A)):
# Iterate Through Columns of B
for t in range(len(B[0])):
#Iterate Through Rows of B
for h in range(len(B)):
result[p][t] +=A[p][h]*B[h][t]
for r in result:
print(r)
| true
|
86fbeadb29247b0b99e509ae8701937379c52b0b
|
Python
|
VenishaDias/Leaf-Classification
|
/hog.py
|
UTF-8
| 1,353
| 2.828125
| 3
|
[] |
no_license
|
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import imutils
import math
from scipy import ndimage
def foreground(image):
boundaries = [([0,0,0],[100,255,100])]
# loop over the boundaries
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower,upper)
output = cv2.bitwise_and(image, image, mask = mask)
return output
# construct the argument parse and parse the arguments
MIN_AREA=200
test=5
hog = cv.HOGDescriptor()
image_path = "test/" + "23" + ".jpg"
# load the image
image = cv.imread(image_path)
fixed_size = tuple((500, 500))
image = cv.resize(image, fixed_size)
image = foreground(image)
#boundaries = [([0,0,0],[100,255,100])]
#grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#h = hog.compute(grey)
cnt=image
leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
cv.imshow("hog", cnt)
cv.waitKey(0)
cv.destroyAllWindows()
'''
print(h)
print(len(h))
'''
| true
|
e3c8278858c088dc20afb97d74f3dd5df253c62c
|
Python
|
Zahidsqldba07/codefights-2
|
/intro/throughTheFog/stringsRearrangement.py
|
UTF-8
| 877
| 4.21875
| 4
|
[] |
no_license
|
"""
Given an array of equal-length strings, check if it is possible to rearrange the strings in such a way that after the rearrangement the
strings at consecutive positions would differ by exactly one character.
Example
For inputArray = ["aba", "bbb", "bab"], the output should be
stringsRearrangement(inputArray) = false;
All rearrangements don't satisfy the description condition.
For inputArray = ["ab", "bb", "aa"], the output should be
stringsRearrangement(inputArray) = true.
Strings can be rearranged in the following way: "aa", "ab", "bb".
"""
from itertools import permutations
def diff(w1, w2):
return sum([a[0] != a[1] for a in zip(w1, w2)]) == 1
def stringsRearrangement(inputArray):
for z in permutations(inputArray):
if sum([diff(*x) for x in zip(z, z[1:])]) == len(inputArray) - 1:
return True
return False
| true
|
fa052383097c3b66bd82a24959675196fb7636a0
|
Python
|
nadjacarolyneckert/basictrack_2021
|
/Week 4/Homework Part 1/4.9.4.py
|
UTF-8
| 380
| 3.75
| 4
|
[] |
no_license
|
import turtle
paper = turtle.Screen()
leonardo = turtle.Turtle()
leonardo.color("pink")
def draw_poly(draw_turtle, number_of_sides, size):
angle = 360 / number_of_sides
for _ in range(number_of_sides):
draw_turtle.forward(size)
draw_turtle.left(angle)
for _ in range(20):
draw_poly(leonardo, 4, 150)
leonardo.left(360 / 20)
paper.exitonclick()
| true
|
e50475089e1c42e98c41aac74792adc85fe71f7d
|
Python
|
linlin547/UI_25_11
|
/Base/data.py
|
UTF-8
| 478
| 2.75
| 3
|
[] |
no_license
|
import os, json
class Data:
"""解析测试数据"""
@classmethod
def get_json_data(cls, file):
"""
解析json文件
:param file: 项目Data目录下文件名字
:return: json文件数据
"""
# 打开json文件
with open("./Data" + os.sep + file, "r", encoding="utf-8") as f:
# 使用json库解析数据
return json.load(f)
def get_csv_data(self):
"""解析csv文件"""
| true
|
a8602b820d3ec6b200fbc0fb1d8a793e743ce7cb
|
Python
|
tikhomirovd/2sem_Python3
|
/rk/rk_python.py
|
UTF-8
| 2,038
| 2.765625
| 3
|
[] |
no_license
|
import sys, pygame
import time
pygame.init()
def smoke():
i = 0
x = 170
y = 380
while i < 10:
pygame.draw.ellipse(screen, grey, [x-i, y-3*i, 15, 20])
pygame.draw.ellipse(screen, grey, [x+i, y-3*i, 15, 20])
x, y = x-i, y-3*i
time.sleep(0.25)
pygame.draw.ellipse(screen, grey, [x+10, y-8, 15, 20])
y -= 8
pygame.display.update()
i +=0.2
pygame.display.flip()
##def UFO():
## x01 = 650
## y01 = 120
## x02 = 665
## y02 = 100
##
## while i < 30:
## x_1 = 3(x - x01)+x01
## y_1 = 3(y - y01)+y01
## x_2 = 3(x - x02)+x02
## x_2 = 3(y - y01)+y02
## pygame.draw.ellipse(screen, light_green, [x_1, y_1, 60, 30])
## pygame.draw.ellipse(screen, light_green, [x_2, y_2, 30, 40])
##
size = 800, 680
red = 200, 26, 65
white = 255, 255, 255
roof = 255,0,0
light_green = 200, 255, 200
grey = 83, 80, 89
sky = 204, 204, 255
green = 0, 128, 0
house = 215, 110, 0
yellow = 247, 242, 42
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
milli = seconds = 0.0
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
screen.fill(sky)
pygame.draw.polygon(screen, green, [(0, 680),(0, 580),(800, 580), (800, 680)])
pygame.draw.polygon(screen, house, [(150, 580),(150, 450),(350, 450),(350, 580)])
pygame.draw.polygon(screen, roof, [(150, 450),(350, 450),(250, 400)])
pygame.draw.polygon(screen, sky, [(170, 500),(170, 470),(200, 470),(200, 500)])
pygame.draw.polygon(screen, yellow, [(220, 580),(220, 510),(250, 510),(250, 580)])
pygame.draw.polygon(screen, sky, [(270, 500),(270, 470),(300, 470),(300, 500)])
pygame.draw.polygon(screen, roof, [(170, 450),(170, 400),(200, 400),(200, 450)])
time.sleep(1)
smoke()
pygame.display.update()
pygame.display.flip()
| true
|
6b892aa8254b7301d5aa53f01589e0cde60f572e
|
Python
|
binbinErices/python_crawler
|
/maoyan_top100/code/test.py
|
UTF-8
| 916
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@author:binbinzhang
@file: test.py
@time: 2018/04/27
@email:binbin_Erices@163.com
@function:测试使用webdriver拿网页数据
"""
import datetime
from selenium import webdriver
from openpyxl import load_workbook
service_args = [].append('--load-images=false') ##关闭图片加载
driver = webdriver.PhantomJS(executable_path="./phantomjs", service_args=service_args)
driver.get('http://maoyan.com/films/246433')
# text2 = driver.find_element_by_class_name("dra").text
# print(text2)
# actor = driver.find_elements_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/div[1]/div[2]/div[2]/div/div/ul/li/div/a')
# actors =""
# for i in range(len(actor)):
# str = actor[i].text +" "
# actors += str
# print(actors)
# print(actors)
str = driver.find_element_by_xpath('/html/body/div[3]/div/div[1]/div/img[@class="avatar"]').get_attribute('src')
print(str)
| true
|
ec24623d44580908bb849e0c562c67df204c60df
|
Python
|
XuLongjia/PyTorchLearning
|
/start4.py
|
UTF-8
| 2,511
| 3.578125
| 4
|
[] |
no_license
|
#1、学习如何用PyTorch做个回归:regression
#2、学习两种保存模型的方法
#3、学习DataLoader
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
x = torch.linspace(-1,1,100)
#torch.squeeze()是用来对数据的维度进行压缩,去掉维数为1的维度
#squeeze(a)就是将a中所有为1的维度删掉,不为1的没有影响
#a.squeeze(N) 就是去掉a中指定维数为1的维度 还有一种形式就是b = torch.squeeze(a,N)
#torch.unsqueeze(x,dim = 1) 用来增加维度
x = torch.unsqueeze(x,dim = 1) #增加一个维度
y = x.pow(2) + 0.2 * torch.randn(x.size())
x,y = Variable(x),Variable(y)
#plt.scatter(x.data.numpy(),y.data.numpy())
#plt.show()
class Net(nn.Module):
def __init__(self, n_features, n_hidden, n_output):
super(Net,self).__init__()
self.hidden = nn.Linear(n_features, n_hidden)
self.predict = nn.Linear(n_hidden,n_output)
def forward(self,x):
x = torch.relu(self.hidden(x))
x = self.predict(x)
return x
net = Net(1,10,1)
print(net) #打印一下搭建的神经网络的结构
plt.ion() #变成实时打印的过程
plt.show()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
loss_fn = nn.MSELoss()
for t in range(100):
prediction = net(x)
loss = loss_fn(prediction,y) #定义损失函数
print(t,loss.item())
optimizer.zero_grad() #清空梯度
loss.backward() #损失函数求导
optimizer.step() #使用SGD更新参数
if t%5 ==0:
plt.cla()
plt.scatter(x.data.numpy(),y.data.numpy())
plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)
plt.text(0.5,0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size':20,'color':'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
#下面介绍两种保存模型的方法
#第一种是保存整个模型:
torch.save(net,'net.pkl')
#如何加载?
net2 = torch.load('net.pkl')
#第二种是仅仅保存模型的参数:
torch.save(net.state_dict(),'net_para.pkl')
#这种方式又如何加载呢?
#首先需要建立一个与net的结构一模一样的网络
net3 = Net(1,10,1)
net3.load_state_dict(torch.load('net_para.pkl')) #第二种方法的效率高,推荐
import torch.utils.data as Data
torch_data = Data.TensorDataset(data_tensor = x,target_tensor = y)
loader = Data.DataLoader(
dataset = torch_data,
batch_size = 10,
shuffle = True,
num_workers = 2,
)
| true
|
dc70a866afc0e6d3cdc8701b1b68e13a65cea99e
|
Python
|
TeBeau/AlgorithmsExamples
|
/longest_palindromic_sequene.py
|
UTF-8
| 812
| 3.796875
| 4
|
[] |
no_license
|
import random
def palindrome(s):
n= len(s)
#create table
A= [[0 for x in range(n)]for x in range(n)]
#strings of len 1 are palindromes of len 1
for i in range(n):
A[i][i]=1
#Fill table
for l in range(2,n+1):
for i in range(n-l+1):
j=i+l-1
if s[i]==s[j] and l==2:
A[i][j]=2
elif s[i]==s[j]:
A[i][j]= A[i+1][j-1]+2
else:
A[i][j]= max(A[i][j-1], A[i+1][j])
return A[0][n-1]
s= [7, 2, 4, 6, 9, 11, 2, 6, 10, 6, 15, 6, 14, 2, 7, 5, 13, 9, 12, 15]
print("small palindrome len is... ", palindrome(s))
long= [random.randrange(1, 100, 1) for n in range(1000)]
print("long palindrome len is... ", palindrome(long))
#turns out this is typically 170-180
| true
|
10ca0d73627b8b9ff0780ac49e49bd86646aa381
|
Python
|
slavkoBV/solved-tasks-SoftGroup-course
|
/rock_scissors_paper.py
|
UTF-8
| 709
| 4.03125
| 4
|
[] |
no_license
|
import random
items = {1: 'rock', 2: 'paper', 3: 'scissors'}
def select_winner(user_choice):
if user_choice not in ('1', '2', '3'):
return print('Make correct choice!')
computer_choice = random.randrange(1, 3, 1)
print('Computer choice: ', items[computer_choice])
who_win = (int(user_choice) - computer_choice) % 3
if who_win == 0:
print('draw')
elif who_win == 1:
print('You won!')
else:
print('You lost')
while True:
select_winner(input('Make your choice (1 - rock, 2 - paper, 3 - scissors): '))
if input('Want you more? Y, y or N, n: ') in ('Y', 'y'):
continue
else:
break
print('Bye!')
| true
|
8d40b566d0dc612995ed67873ac10b196a1df2be
|
Python
|
wbl1996/python
|
/spiders/beautifulsoup_urlretrive_spider.py
|
UTF-8
| 758
| 2.734375
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import urllib.request
url = "https://tieba.baidu.com/p/5639915974"
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
def get_html(url):
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
res = urllib.request.urlopen(req)
html = res.read().decode("utf-8")
return html
def download(html):
soup = BeautifulSoup(html,'lxml')
items = soup.select("div.post_bubble_middle_inner img.BDE_Image")
x=0
for item in items:
urllib.request.urlretrieve(item['src'],"f:\\%s.jpg"%x)
x=x+1
if __name__ == '__main__':
html = get_html(url)
print("正在下载图片...")
download(html)
print("图片下载完成!")
| true
|
0b58000dd482b8cec8d7372c65a661ab665fb359
|
Python
|
bmuller/readembedability
|
/readembedability/parsers/oembed.py
|
UTF-8
| 2,906
| 2.609375
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
import json
from bs4 import BeautifulSoup
from robostrippy.utils import absolute_url
from readembedability.parsers.base import BaseParser
from readembedability.io import get_page
class OEmbedParser(BaseParser):
async def enrich(self, result):
if self.soup is None:
return result
oembed = await get_embed_from_content(self.response)
if oembed is None:
return result
if 'author_name' in oembed:
result.set('authors', [oembed['author_name']], 3)
result.set_if('title', oembed.get('title'))
result.set_if('primary_image', oembed.get('thumbnail_url'))
# Don't trust oembed articles because they're probably crap, like
# NYTimes oembeded articles that are just iframes
isarticle = oembed.get('asset_type', '').lower() != 'article'
if 'html' in oembed and isarticle:
# if this is a wordpress embed, then let's not call it
# embedded and use the actual content
if "Embedded WordPress Post" not in oembed['html']:
result.set('embed', True)
# only lock if the html field actually contains html
lock = ">" in oembed['html'] and "<" in oembed['html']
conf = 3 if lock else 2
result.set('content', oembed['html'], conf)
elif 'url' in oembed and oembed['type'] == 'photo':
result.set('embed', True)
result.set('content', "<img src='%s' />" % oembed['url'], 3)
result.set('title', oembed.get('title', result.get('title')))
result.set('primary_image', oembed['url'], 3)
return result
def _parse_xml(xml):
result = {}
# bs4 can't handle a html child elem
xml = xml.replace('<html>', '<content>')
xml = xml.replace('</html>', '</content>')
doc = BeautifulSoup(xml, "html.parser")
for kid in doc.oembed.children:
name = kid.name if kid.name != 'content' else 'html'
result[name] = kid.text
return result
def _parse_json(jsonstr):
try:
# per the spec at oembed.com this must be a dict
# some bad websites return lists of gobbligook
result = json.loads(jsonstr)
return result if isinstance(result, dict) else None
except ValueError:
return None
async def get_embed_from_content(response):
page = response.body
if page is None or len(page) < 10:
return None
soup = BeautifulSoup(page, "html.parser")
types = ('application/json+oembed', 'text/xml+oembed')
link = soup.find('link', type=types, href=True)
if link and link['type'] == 'application/json+oembed':
parser = _parse_json
elif link:
parser = _parse_xml
else:
return None
url = absolute_url(response.url, link['href'])
page = await get_page(url)
return parser(page.body) if page else None
| true
|
dc006bf341e02e633ae01dec54de94eff555dbc3
|
Python
|
SebastianRatanczuk/Uczelnia
|
/main.py
|
UTF-8
| 1,808
| 2.859375
| 3
|
[] |
no_license
|
# Sebastian Ratańczuk 44476
import numpy as np, math, random
from sklearn import datasets
from sklearn.model_selection import train_test_split
class MLP:
def __init__(self, hidden=100, epochs=100, eta=0.1, shuffle=True):
self.hidden = hidden
self.epochs = epochs
self.eta = eta
self.shuffle = shuffle
self._sigmoid = lambda x: 1 / (1 + math.exp(1) ** x)
def _forward(self, x):
out_hidden = x.dot(self.w_h) + self.b_h
out_hidden_sigmoid = self._sigmoid(out_hidden)
out_activated = out_hidden_sigmoid.dot(self.w_out) + self.b_out
out_activated_sigmoid = self._sigmoid(out_activated)
return out_hidden_sigmoid, out_activated_sigmoid
def _compute_cost(self, y, output):
return sum((np.sum(-(y * np.log2(output)) + (1 - y) * np.log2(1 - output), axis=1)))
def fit(self, x_train, y_train):
self.w_h = np.random.normal(0, 0.1, (x_train.shape[1], self.hidden))
self.b_h = np.zeros(self.hidden)
self.w_out = np.random.normal(0, 0.1, (self.hidden, y_train.shape[1]))
self.b_out = np.zeros(y_train.shape[1])
if self.shuffle:
tmp = np.hstack((x_train, y_train))
np.random.shuffle(tmp)
x_train = tmp[:, :x_train.shape[1]]
y_train = tmp[:, x_train.shape[1]:]
_, out = self._forward(x_train)
print(self._compute_cost(y_train, out))
def predict(self, x):
pass
irys = datasets.load_iris()
irys_data = irys.data
irys_labels = irys.target
irys_labels_coded = np.zeros((irys_labels.size, 3))
for i in range(irys_labels.size):
irys_labels_coded[i, irys_labels[i]] = 1
x_train, x_test, y_train, y_test = train_test_split(irys_data, irys_labels_coded, random_state=13)
mlp = MLP()
mlp.fit(x_train, y_train)
| true
|
ce7d33321d5f78514e2c3edc4d9e026cc7e0b6fa
|
Python
|
ksdivesh/python-test-project
|
/console-app/main.py
|
UTF-8
| 786
| 3.390625
| 3
|
[] |
no_license
|
from models.Item import Item
from models.ItemCategory import ItemCategory
# category = ItemCategory(category_id=1)
# result = category.get()
# print(result)
# category_name = input("Enter category name")
#
# itemCategory = ItemCategory(category_name = category_name)
# itemCategory.insert()
#
# print("Category inserted successfully {}".format(itemCategory.category_id))
#
# item_name = input('Please enter item name')
# item_price = input('Please enter price')
#
# item = Item(category_id=itemCategory.category_id, item_name=item_name, item_price=item_price)
# item.insert()
#
# print("Item created successfully with ID {}" .format(item.item_id))
# item1 = Item(item_name='Item 1', item_price=200)
# item1.insert()
#
# print('Item inserted with ID {}'.format(item1.item_id))
| true
|
949f0cf006561d56751e95d73cc2cfbe5d8986ce
|
Python
|
DanielDworakowski/flot
|
/rasp/blimp_ws/src/blimp_control/src/PID.py
|
UTF-8
| 2,161
| 3.046875
| 3
|
[] |
no_license
|
import time
class PID:
"""PID Controller"""
def __init__(self, P = 0.0, I = 0.0, D = 0.0, min_ = 0.0, max_ = 0.0):
self.k_p = P
self.k_i = I
self.k_d = D
self.min_ = min_
self.max_ = max_
self.current_time = time.time()
self.last_time = self.current_time
self.clear()
def clear(self):
"""Clears PID computations and coefficients"""
self.ref = 0.0
self.p_error = 0.0
self.i_error = 0.0
self.d_error = 0.0
self.last_error = 0.0
self.command = 0.0
def getCmd(self, meas):
"""Calculates PID value for given reference feedback"""
error = self.ref - meas
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
"""prevent zero division"""
if(delta_time <= 0.0):
raise ValueError('delta_time less than or equal to 0')
"""Integrate"""
self.i_error += delta_time*k_i*error
"""Anti-windup"""
self.i_error += min(max(i_error, self.min_),self.max_)
"""p and d error"""
self.p_error = error
self.d_error = delta_error / delta_time
"""calculate command"""
self.command = (self.k_p*self.p_error) + (self.k_i * self.i_error) + (self.k_d * self.d_error)
self.command = min(max(self.output, self.min_ ), self.max_)
# Remember last time and last error for next calculation
self.last_time = self.current_time
self.last_error = error
def setKp(self, proportional_gain):
"""Proportional Gain"""
self.k_p = proportional_gain
def setKi(self, integral_gain):
"""Integral Gain"""
self.k_i = integral_gain
def setKd(self, derivative_gain):
"""Derivative Gain"""
self.k_d = derivative_gain
def setMinMax(self,min_value, max_value):
"""set min and max clampping values """
self.min_ = min_value
self.max_ = max_value
def setRef(self, reference):
"""set reference value"""
self.ref = reference
| true
|
ab902b0602b621f20153bb94ffbe4e6db5f965eb
|
Python
|
michaelkerr/influence_api
|
/tests/api_test_system.py
|
UTF-8
| 16,898
| 2.75
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# api_test_system.py
""" System functional testing """
#from itertools import combinations
import json
from types import *
import unittest
from urllib2 import Request, urlopen
server_ip = '127.0.0.1'
server_port = '5000'
base_url = 'http://' + server_ip + ':' + server_port + '/metrics/centrality?'
api_key = '45fd499-07be-4d92-93b3-d47f4607506d'
base_params = {'start_date': '20140301', 'end_date': '20140301', 'network': 'twitter.com',
'metric': 'degree', 'key': '45fd499-07be-4d92-93b3-d47f4607506d'}
def build_query(query_url, query_dict):
temp_string = query_url
for key, value in query_dict.iteritems():
if len(value) > 0:
if ('project' in key) or ('topic' in key):
temp_string += '&' + key + '=' + html_encode(value)
else:
temp_string += '&' + key + '=' + value
return temp_string
def html_encode(to_encode):
#TODO handle tha backslash
replace_dict = {
' ': '%20', '!': '%21', '#': '%22', '$': '%24', '%': '%25', '&': '%26', '"': '%27',
'(': '%28', ')': '%29', '*': '%2A', '+': '%2B', ',': '%2C', '-': '%2D', '.': '%2E',
'/': '%2F', '[': '%5B', '[': '%5', '^': '%5E', '-': '%5F', '`': '%60',
'{': '%7B', '|': '%7C', '}': '%7D', '~': '%7E'
}
encoded = ''
for entry in to_encode:
if entry in replace_dict.keys():
encoded += replace_dict[entry]
else:
encoded += entry
return encoded
def query_api(query):
request = Request(query)
try:
return urlopen(request)
except Exception as e:
return e
# Tests for all
class TestBasicQuery(unittest.TestCase):
""" General API centrality query check for correct responses """
def setUp(self):
self.params = dict(base_params)
self.url = str(base_url)
def test_no_error(self):
""" Checking API response to a valid base query, expect no HTTP no error """
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 401: UNAUTHORIZED')
def test_valid_response(self):
""" Checking API response to a valid base query, expect a valid JSON response """
json_data = json.load(query_api(build_query(self.url, self.params)))
#TODO Check for the number of entries in metrics to be > 0?
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
class TestAPIKey(unittest.TestCase):
""" API key tests, check for correct response """
def setUp(self):
self.params = dict(base_params)
self.url = str(base_url)
def test_key_missing(self):
""" Checking API response for no key present """
del self.params['key']
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 401: UNAUTHORIZED')
def test_empty_key(self):
""" Checking API response for empty key """
self.params['key'] = ''
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 401: UNAUTHORIZED')
class TestRequiredParameters(unittest.TestCase):
""" Required parameter tests, check for correct response"""
def setUp(self):
self.params = dict(base_params)
self.url = str(base_url)
def test_missing_start(self):
""" Checking API response for missing start_date """
del self.params['start_date']
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
def test_missing_end(self):
""" Checking API response for missing end_date """
del self.params['end_date']
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
def test_missing_network(self):
""" Checking API response for missing network """
del self.params['network']
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
def test_missing_metric(self):
""" Checking API response for missing metric """
del self.params['metric']
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
def test_invalid_metric(self):
""" Checking API response for invalid metric """
self.params['metric'] = 'test_metric'
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
def test_reversed_dates(self):
""" Checking API response for end_date before start_date """
self.params['start_date'] = '20140302'
self.params['end_date'] = '20140301'
self.assertEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
class TestOptional(unittest.TestCase):
""" Centrality, optional parameter tests """
#TODO compress these into reusable components
def setUp(self):
self.params = dict(base_params)
self.url = str(base_url)
def test_matched_project_HTTP(self):
""" Checking API response for valid matched project response, expect no HTTP errors """
self.params['matched_project'] = 'CBW'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
def test_matched_project_JSON(self):
""" Checking API response for valid matched project response, expect valid JSON """
self.params['matched_project'] = 'CBW'
print self.params
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_matched_topic_HTTP(self):
""" Checking API response for valid matched topic response, expect no HTTP errors """
#TODO Pick GCC topic to test against
self.params['matched_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_matched_topic_JSON(self):
""" Checking API response for valid matched topic response, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_scored_project_HTTP(self):
""" Checking API response for valid scored project, expect no HTTP errors """
#TODO Pick GCC project to test against
self.params['scored_project'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_scored_project_JSON(self):
""" Checking API response for valid scored project, expect valid JSON """
#TODO Pick GCC project to test against
self.params['scored_project'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_scored_topic_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC topic to test against
self.params['scored_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_scored_topic_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['scored_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mp_mt_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_project'] = 'test'
self.params['matched_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mp_mt_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_project'] = 'test'
self.params['matched_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mp_sp_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_project'] = 'test'
self.params['scored_project'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mp_sp_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_project'] = 'test'
self.params['scored_project'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mp_st_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_project'] = 'test'
self.params['scored_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 401: UNAUTHORIZED')
@unittest.skip('no data, returns 416')
def test_mp_st_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_project'] = 'test'
self.params['scored_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mt_sp_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_topic'] = 'test'
self.params['scored_project'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mt_sp_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_topic'] = 'test'
self.params['scored_project'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mt_st_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_toppic'] = 'test'
self.params['scored_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mt_st_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_toppic'] = 'test'
self.params['scored_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_sp_st_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['scored_project'] = 'test'
self.params['scored_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_sp_st_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['scored_project'] = 'test'
self.params['scored_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mp_mt_sp_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_project'] = 'test'
self.params['matched_topic'] = 'test'
self.params['scored_project'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mp_mt_sp_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_project'] = 'test'
self.params['matched_topic'] = 'test'
self.params['scored_project'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mp_mt_st_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_project'] = 'test'
self.params['matched_topic'] = 'test'
self.params['scored_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mp_mt_st_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_project'] = 'test'
self.params['matched_topic'] = 'test'
self.params['scored_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mp_sp_st_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_project'] = 'test'
self.params['scored_project'] = 'test'
self.params['scored_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mp_sp_st_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_project'] = 'test'
self.params['scored_project'] = 'test'
self.params['scored_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
def test_mp_sp_mt_st_HTTP(self):
""" Checking API response for valid scored topic, expect no HTTP errors """
#TODO Pick GCC project and topic to test against
self.params['matched_project'] = 'test'
self.params['scored_project'] = 'test'
self.params['matched_topic'] = 'test'
self.params['scored_topic'] = 'test'
self.assertNotEqual(str(query_api(build_query(self.url, self.params))), 'HTTP Error 400: BAD REQUEST')
@unittest.skip('no data, returns 416')
def test_mp_sp_mt_st_JSON(self):
""" Checking API response for valid scored topic, expect valid JSON """
#TODO Pick GCC topic to test against
self.params['matched_project'] = 'test'
self.params['scored_project'] = 'test'
self.params['matched_topic'] = 'test'
self.params['scored_topic'] = 'test'
json_data = json.load(query_api(build_query(self.url, self.params)))
json_data['result']['metrics'] = {}
expected = {u'result': {u'metrics': {}}}
self.assertEqual(json_data, expected)
# Tests for Metrics, Centrality
# All Degree metrics - test normalized and not
# Closeness, Betweenness, Pagerank test normalized and not
# Skip Eigenvector
# Check for error 416 - no results
'''
{'start_date': '20140301', 'end_date': '20140301', 'network': 'twitter.com', 'metric': 'in_degree'},
#{'start_date': '20140301', 'end_date': '20140301', 'network': 'mturkgrind.com', 'metric': 'in_degree'},
{'start_date': '20140301', 'end_date': '20140301', 'network': 'twitter.com', 'metric': 'out_degree'},
{'start_date': '20140301', 'end_date': '20140301', 'network': 'twitter.com', 'metric': 'closeness'},
{'start_date': '20140301', 'end_date': '20140301', 'network': 'twitter.com', 'metric': 'betweenness'},
#{'start_date': '20140301', 'end_date': '20140301', 'network': 'twitter.com', 'metric': 'eigenvector'},
{'start_date': '20140301', 'end_date': '20140301', 'network': 'twitter.com', 'metric': 'pagerank'}
]
'''
# Tests for Metrics, Other
# Test Twitter
# Test Forums
# Tests for Graph
| true
|
cfe6258989b562229da14ff5454d043bcefb5908
|
Python
|
emilmanolov/holidayextras
|
/models/user.py
|
UTF-8
| 2,462
| 3.234375
| 3
|
[] |
no_license
|
import re
class User(object):
def __init__(self, email='', forename='', surname=''):
self.id = None
self.email = email
self.forename = forename
self.surname = surname
self.created = None
def __str__(self):
return '<User {0}>'.format(self.id)
__repr__ = __str__
class UserValidator(object):
def __init__(self, email_validator, name_validator):
self.email_validator = email_validator
self.name_validator = name_validator
def validate(self, user):
return (self.validate_email(user.email) and
self.validate_forename(user.forename) and
self.validate_surname(user.surname))
def validate_email(self, email):
return self.email_validator.validate(email)
def validate_forename(self, forename):
return self.name_validator.validate(forename)
def validate_surname(self, surname):
return self.name_validator.validate(surname)
@classmethod
def factory(cls):
return cls(EmailAddressValidator(DomainValidator()), NameValidator())
class NameValidator(object):
def validate(self, name):
return len(name.strip()) in range(3, 51)
class DomainValidator(object):
""" Domain names may be formed from the set of alphanumeric ASCII
characters (a-z, A-Z, 0-9), but characters are case-insensitive.
In addition the hyphen is permitted if it is surrounded by characters
or digits, i.e. it is not the start or end of a label. Labels are always
separated by the full stop (period) character in the textual name representation.
"""
def validate(self, domain):
pattern = r'^(([a-z0-9]{1,63}\.)|([a-z0-9][a-z0-9\-]{1,61}[a-z0-9]\.))+[a-z]{2,63}$'
match = re.search(pattern, domain, re.IGNORECASE)
return (len(domain) <= 253 and match is not None)
class EmailAddressValidator(object):
def __init__(self, domain_validator):
self.domain_validator = domain_validator
def validate(self, email):
match = re.search(r'^([a-z0-9.\-_]+)@([a-z0-9.\-]+)$', email, re.IGNORECASE)
if match:
return (self.validate_mailbox(match.group(1)) and
self.validate_domain(match.group(2)))
return False
def validate_domain(self, domain):
return self.domain_validator.validate(domain)
def validate_mailbox(self, mailbox):
return len(mailbox) <= 64
| true
|
8981340c78e4635a3950c3aac54511d9f9697062
|
Python
|
bioCKO/lpp_Script
|
/HTseq_Count.py
|
UTF-8
| 1,240
| 2.53125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2014/11/1
"""
import collections,sys,HTSeq
from optparse import OptionParser
usage = '''usage: python2.7 %prog [options] Kmer
Kmer is a list of K value you want,e.g [ 1, 2, 3, 4 ]'''
parser = OptionParser(usage =usage )
parser.add_option("-s", "--SAM", action="store",
dest="sam",
help="Sam File")
parser.add_option("-o", "--OUT", action="store",
dest="out",
help="output")
parser.add_option("-p", "--Pair",
dest="pair",
action="store_true",
default = False,
help="is paried")
(options, args) = parser.parse_args()
sam = options.sam
out = options.out
pair = options.pair
print(pair)
counts = collections.Counter( )
if sam.endswith("bam"):
almnt_file = HTSeq.BAM_Reader( sam)
else:
almnt_file = HTSeq.SAM_Reader( sam)
for almnt in almnt_file:
if almnt.aligned:
if pair:
if not almnt.proper_pair:
continue
gene_name = almnt.iv
gene_name=gene_name.chrom
counts[ gene_name ] += 1
END = open(out,'w')
for gene_id in counts:
END.write('%s\t%s\n'%(gene_id, counts[ gene_id ]) )
| true
|
b96cc8b400309f0f5a8411c5df8f800dba885b9d
|
Python
|
jamiejamiebobamie/pythonPlayground
|
/tictactoe2.py
|
UTF-8
| 18,573
| 3.78125
| 4
|
[] |
no_license
|
# Design a Tic-tac-toe game that is played between two players on a n x n grid.
#
# You may assume the following rules:
#
# A move is guaranteed to be valid and is placed on an empty block.
# Once a winning condition is reached, no more moves is allowed.
# A player who succeeds in placing n of their marks in a horizontal, vertical, or diagonal row wins the game.
# Example:
# Given n = 3, assume that player 1 is "X" and player 2 is "O" in the board.
#
# TicTacToe toe = new TicTacToe(3);
#
# toe.move(0, 0, 1); -> Returns 0 (no one wins)
# |X| | |
# | | | | // Player 1 makes a move at (0, 0).
# | | | |
#
# toe.move(0, 2, 2); -> Returns 0 (no one wins)
# |X| |O|
# | | | | // Player 2 makes a move at (0, 2).
# | | | |
#
# toe.move(2, 2, 1); -> Returns 0 (no one wins)
# |X| |O|
# | | | | // Player 1 makes a move at (2, 2).
# | | |X|
#
# toe.move(1, 1, 2); -> Returns 0 (no one wins)
# |X| |O|
# | |O| | // Player 2 makes a move at (1, 1).
# | | |X|
#
# toe.move(2, 0, 1); -> Returns 0 (no one wins)
# |X| |O|
# | |O| | // Player 1 makes a move at (2, 0).
# |X| |X|
#
# toe.move(1, 0, 2); -> Returns 0 (no one wins)
# |X| |O|
# |O|O| | // Player 2 makes a move at (1, 0).
# |X| |X|
#
# toe.move(2, 1, 1); -> Returns 1 (player 1 wins)
# |X| |O|
# |O|O| | // Player 1 makes a move at (2, 1).
# |X|X|X|
# Follow up:
# Could you do better than O(n2) per move() operation?
#
# Hint:
#
# Could you trade extra space such that move() operation can be done in O(1)?
# You need two arrays: int rows[n], int cols[n], plus two variables: diagonal, anti_diagonal.
import random as rand
# def tictac(n):
# array = list('1'*n)
# turn = 0 #X = 1, O = 0
# turns = 0
# while turns < n:
# array[turns], array[rand.randint(0,n-turns)] = turn^turn, array[rand.randint(0,n-turns)]
# turn = 1
# turns += 1
# return array
#
# print(tictac(8))
#learn numpy...
#
# #build____
# n= 3
# array = []
# for i in range(n):
# array.append(list(" "*n))
# # print(array)
#
# unhash = {}
#
# for i, row in enumerate(array):
# for j, column in enumerate(row):
# # print(i,j,3*i+j*2)
# # dict[3*i+j*2] = " "
# unhash[3*i+j*2] = [i, j, " "]
# # print(len(unhash))
#
# #build____
#
#
# #choose____
#
#
# iter = 0
# keys = list(unhash.keys())
# print(keys)
# for key in keys:
# rando = rand.randint(iter,len(keys)-1)
# # print(rando)
# keys[rando], keys[iter] = keys[iter], keys[rando]
# iter += 1
# print(keys)
# print(keys)
# #choose____
#
# #play___
# value = True
# for key in keys:
# # print(unhash[key])
# if value:
# unhash[key][2] = "X"
# else:
# unhash[key][2] = "O"
# value = not value
# iter += 1
#play___
# 0 2 4
# 3 5 7
# 6 8 10
# X X O
# X O X
# O X O
# _ X O
# _ O X
# O X _
#ended at 4 after 6 moves:
# [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]
# (0, 0, 0)
# (0, 1, 2)
# (0, 2, 4)
# (1, 0, 3)
# (1, 1, 5) #hash function: 3*i+j*2
# (1, 2, 7)
# (2, 0, 6)
# (2, 1, 8)
# (2, 2, 10)
# d[0] = (0, 0)
# d[2] = (0, 1)
# d[4] = (0, 2)
# d[3] = (1, 0)
# d[5] = (1, 1)
# d[7] = (1, 2)
# d[6] = (2, 0)
# d[8] = (2, 1)
# d[10] = (2, 2)
# print(unhash)
# [0, 2, 3, 4, 5, 6, 7, 8, 10]
# [7, 5, 2, 6, 8, 4, 0, 10, 3]
# {0: 'X', 2: 'X', 3: 'X', 4: 'O', 5: 'O', 6: 'O', 7: 'X', 8: 'X', 10: 'O'}
# fisher-yates shuffle
# [0, 2, 3, 4, 5, 6, 7, 8, 10]
# [3, 2, 0, 4, 5, 6, 7, 8, 10]
# [3, 0, 2, 4, 5, 6, 7, 8, 10]
# [3, 0, 2, 4, 5, 6, 7, 8, 10]
# [3, 0, 2, 4, 5, 6, 7, 8, 10]
# [3, 0, 2, 4, 8, 6, 7, 5, 10]
# [3, 0, 2, 4, 8, 6, 7, 5, 10]
# [3, 0, 2, 4, 8, 6, 5, 7, 10]
# [3, 0, 2, 4, 8, 6, 5, 10, 7]
# [3, 0, 2, 4, 8, 6, 5, 10, 7]
# [3, 0, 2, 4, 8, 6, 5, 10, 7]
"""
must span 'n' to win.
(i,j)
rows have repeat j's
(a row's key would be j)
columns have repeat i's
(a column's key would be i)
no one cheats in this game so we don't need to keep track of if the move is valid
only if there's a winner or if all spaces have been filled.
the key insight is that we don't need to worry about rows and columns the moment
they contain both an X and O, because it can't be a winner.
make bins out of all rows and columns.
at each bin have a linked list of nodes with the properties:
value: index of space in multidimensional array,
player: X/O,
next: next node in linked list,
#ALL NODES HAVE THESE PROPERTIES, BUT THEY'RE
ONLY CHANGED ON THE FIRST NODE OF THE LIST:
homogenous: True #True if this list contains only X or O elements,
count: # of nodes in list
Do not append to list if 'homogenous' is False.
diagonals??
X X O X X O
X O X X O X
O X O O X O
X X O X X O
X O X X O X
O X O O X O
(0,5)(1,4)(2,3)(3,2)(4,1)(5,0)
X X O
X O X
O X O
(0,2)(1,1)(2,0) = 6
O X X
X O X
O X O
(0,0)(1,1)(2,2) = 6
O X O X
X O X X
O X O O
X O O O
(0,0)(1,1)(2,2)(3,3) = 12
(0,3)(1,2)(2,1)(3,0) = 12
O X
X O
(0,0)(1,1)
(0,1)(1,0)
diagonal indices are a pallindrome or 'stepwise' ascending
diagDict keys: 'pal' and 'step'
' value = 0 #O
player = {0: O, 1: X}
spacesCount = 0
dictRow, dictCol, dictDiag = {}, {}, {}
while spacesCount < n**2:
value = not value #X starts
if key in dictRow:
if dictRow[key].homogenous:
dictRow[key].addNode(i,j,player[value])
dictRow[key].count += 1
if dictRow[key].count == n:
return dictRow[key].player
if player[value] != dictRow[key].peek(): #peek(), in at the first node and return it's player attribute
dictRow[key].homogenous = False
else:
dictRow[key].addNode(i,j,player[value])
dictRow[key].count += 1
if key in dictRow:
if dictCol[key].homogenous:
dictCol[key].addNode(i,j,player[value])
dictCol[key].count += 1
if dictCol[key].count == n:
return dictCol[key].player
if player[value] != dictCol[key].peek(): #peek(), in at the first node and return it's player attribute
dictCol[key].homogenous = False
else:
dictCol[key].addNode(i,j,player[value])
dictCol[key].count += 1
#look at the indices and add them to the appropriate diagonal bin:
if i == j:
if len(dictDiag) != 0:
if dictDiag['step'].homogenous:
dictDiag['step'].addNode(i,j,player[value])
dictDiag[key].count += 1
if dictDiag['step'].count == n:
return dictDiag['step'].player
if player[value] != dictDiag['step'].peek():
dictDiag['step'].homogenous = False
else:
dictDiag['step'].addNode(i,j,player[value])
dictDiag[key].count += 1
if i + j == n-1:
if len(dictDiag) != 0:
if dictDiag['pal'].homogenous:
dictDiag['pal'].addNode(i,j,player[value])
dictDiag[key].count += 1
if dictDiag['pal'].count == n:
return dictDiag['pal'].player
if player[value] != dictDiag['pal'].peek():
dictDiag['pal'].homogenous = False
else:
dictDiag['pal'].addNode(i,j,player[value])
dictDiag[key].count += 1
spaceCount += 1 #update move count each turn.
else:
return "Tie"
'
"""
class TicTacToe:
def __init__(self, n=None):
# a dictionary of nodes:
self.n = n
self.rows = {}
self.cols = {}
self.diags = {}
self.board = self.buildBoard(n) #an array of i-j board indices
self.order = self.randomOrder(self.board)
# self.play = True
# self.count = 0 # keep track of the overall tiles or unneccessary?
def buildBoard(self, n):
"""Returns an array of (i,j) index keys to be randomized.
Builds rows, cols, and diags dictionaries."""
boardDict = []
for i in range(n):
for j in range(n):
if (i == j) or (i + j == n-1):
self.diags[i,j] = None
# if i not in self.rows:
# self.rows[i] = None
if j not in self.rows:
self.rows[j] = None
if i not in self.cols:
self.cols[i] = None
# if j not in self.cols:
# self.cols[j] = None
boardDict.append((i,j))
return boardDict
def randomOrder(self, array):
"""takes a dictionary of index-keys and mixes their order up
returning and array of index-keys"""
iter = 0
order = array
for key in array:
rando = rand.randint(iter,len(order)-1)
order[rando], order[iter] = order[iter], order[rando]
iter += 1
return order
def play(self):
value = 0 #O
player = {0: 'O', 1: 'X'}
spacesCount = 0
iter = 0
turn = ""
while spacesCount < self.n**2:
value = not value #X starts
turn = player[value]
key = self.order[iter]
iter +=1
spacesCount += 1 #update move count each turn.
print(turn, key, iter, spacesCount, self.n)
else:
return "Tie"
# for order in self.order):
#
# class LinkedList:
# def __init__(self):
# self.heads = {}
#
# class LL_Node:
# def __init__(self, value_i=None, value_j=None, player=None):
# """new idea: the first tile in either a row or column or diagonal
# becomes the head of the list you add to the node by setting
# the row, column, or diag attributes to the next node in the row,
# column, or diagonal, keeping the count (of each...)
# before you add a node you check a node's type(?)
# and if it conflicts with the current type of the node you set the
# row, column, or diagonal to "Nope" to say 'don't add more nodes...'
# The tile becomes a 'blocker' for that row, column, or diagonal.
# """
#
# self.row = None # does this become a graph if you have multiple pointers / next nodes?
# self.column = None # how do I keep track of their respective heads?
# self.diag = None #
#
# self.value = (value_i, value_j) # (i,j)
# self.player = player # X or O
#
# self.next = None # the chonological order of the tiles played / chosen
# self.homogenous = True
# self.count = 0 # n-1 to end game
# def addNode(value_i, value_j, player): # must account for a node not being present.
# # if value_i in self.heads:
# # self.board[value_i, value_j].column
# # else:
# # self.board[value_i, value_j] = LL_Node(value_i, value_j, player)
# # self.heads[self.board[value_i, value_j]]
# # if value_j in self.heads:
# # pass
#
# if key[0] in self.rows:
# if self.rows[key[0].row!="Nope":
# self.rows[key[0]].row = LL_Node(i,j,player[value])
#
# # this is getting kind of convoluted.
# # you don't need to add a new node for each turn.
# # just keep track of count and homogenity
# # at each 'starter' tile at the given row/column/diagonal.
#
# dictRow[key].count += 1
# if dictRow[key].count == n:
# return dictRow[key].player
# if player[value] != dictRow[key].peek(): #peek(), in at the first node and return it's player attribute
# dictRow[key].homogenous = False
# else:
# self.rows[key[0]].addNode(i,j,player[value])
# self.rows[key[0]].count += 1
# if key in dictRow:
# if dictCol[key].homogenous:
# dictCol[key].addNode(i,j,player[value])
# dictCol[key].count += 1
# if dictCol[key].count == n:
# return dictCol[key].player
# if player[value] != dictCol[key].peek(): #peek(), in at the first node and return it's player attribute
# dictCol[key].homogenous = False
# else:
# dictCol[key].addNode(i,j,player[value])
# dictCol[key].count += 1
# #look at the indices and add them to the appropriate diagonal bin:
# if i == j:
# if len(dictDiag) != 0:
# if dictDiag['step'].homogenous:
# dictDiag['step'].addNode(i,j,player[value])
# dictDiag[key].count += 1
# if dictDiag['step'].count == n:
# return dictDiag['step'].player
# if player[value] != dictDiag['step'].peek():
# dictDiag['step'].homogenous = False
# else:
# dictDiag['step'].addNode(i,j,player[value])
# dictDiag[key].count += 1
# if i + j == n-1:
# if len(dictDiag) != 0:
# if dictDiag['pal'].homogenous:
# dictDiag['pal'].addNode(i,j,player[value])
# dictDiag[key].count += 1
# if dictDiag['pal'].count == n:
# return dictDiag['pal'].player
# if player[value] != dictDiag['pal'].peek():
# dictDiag['pal'].homogenous = False
# else:
# dictDiag['pal'].addNode(i,j,player[value])
# dictDiag[key].count += 1
new = TicTacToe(4)
new.play()
# values = []
# for i, order in enumerate(new.order):
# print(i, order)
# print(new.rows.keys(),new.cols.keys(),new.diags.keys())
# X X O
# X O X
# O X O
#
# #choose____
# def makeTurnPositionsArray(board)
#
# iter = 0
# keys = list(unhash.keys())
# print(keys)
# for key in keys:
# rando = rand.randint(iter,len(keys)-1)
# # print(rando)
# keys[rando], keys[iter] = keys[iter], keys[rando]
# iter += 1
# print(keys)
# print(keys)
# #choose____
#
# #play___
# value = True
# for key in keys:
# # print(unhash[key])
# if value:
# unhash[key][2] = "X"
# else:
# unhash[key][2] = "O"
# value = not value
#
#
# value = 0 #O
# player = {0: O, 1: X}
# spacesCount = 0
# dictRow, dictCol, dictDiag = {}, {}, {}
#
# while spacesCount < n**2:
#
# value = not value #X starts
#
# key =
#
# if key in dictRow:
# if dictRow[key].homogenous:
# dictRow[key].addNode(i,j,player[value])
# dictRow[key].count += 1
# if dictRow[key].count == n:
# return dictRow[key].player
# if player[value] != dictRow[key].peek(): #peek(), in at the first node and return it's player attribute
# dictRow[key].homogenous = False
# else:
# dictRow[key].addNode(i,j,player[value])
# dictRow[key].count += 1
#
# if key in dictRow:
# if dictCol[key].homogenous:
# dictCol[key].addNode(i,j,player[value])
# dictCol[key].count += 1
# if dictCol[key].count == n:
# return dictCol[key].player
# if player[value] != dictCol[key].peek(): #peek(), in at the first node and return it's player attribute
# dictCol[key].homogenous = False
# else:
# dictCol[key].addNode(i,j,player[value])
# dictCol[key].count += 1
#
# #look at the indices and add them to the appropriate diagonal bin:
# if i == j:
# if len(dictDiag) != 0:
# if dictDiag['step'].homogenous:
# dictDiag['step'].addNode(i,j,player[value])
# dictDiag[key].count += 1
# if dictDiag['step'].count == n:
# return dictDiag['step'].player
# if player[value] != dictDiag['step'].peek():
# dictDiag['step'].homogenous = False
# else:
# dictDiag['step'].addNode(i,j,player[value])
# dictDiag[key].count += 1
#
# if i + j == n-1:
# if len(dictDiag) != 0:
# if dictDiag['pal'].homogenous:
# dictDiag['pal'].addNode(i,j,player[value])
# dictDiag[key].count += 1
# if dictDiag['pal'].count == n:
# return dictDiag['pal'].player
# if player[value] != dictDiag['pal'].peek():
# dictDiag['pal'].homogenous = False
# else:
# dictDiag['pal'].addNode(i,j,player[value])
# dictDiag[key].count += 1
#
# spaceCount += 1 #update move count each turn.
#
# else:
# return "Tie"
| true
|
c4082c30e187ba2de5ef99bfc4931da860e46071
|
Python
|
iam-vignesh/network-monitoring-scripts
|
/ping.py
|
UTF-8
| 856
| 3.09375
| 3
|
[] |
no_license
|
import os
import csv
print("+------------------------------------------------------------------+")
with open('PATH TO FILE\\filename.csv', newline='') as csvfile:
filereader = csv.reader(csvfile)
next(filereader)
for row in filereader:
ips = (row[0])
print(f"Pinging....{ips}")
ping_response = os.popen(f"ping {ips}").read()
if "Received = 4" in ping_response:
print(f"{ips} is UP. Ping Successful!")
else:
print(f"Ping to {ips} failed. Log entry created!")
down_ip = []
down_ip.append(ips)
with open('down iplist.csv', 'a' , newline='') as writefile:
w = csv.writer(writefile)
w.writerow(down_ip)
print("+------------------------------------------------------------------+")
| true
|
2e16588e115c8bca74bdc14aaf66f92b21bd826f
|
Python
|
herolibra/PyCodeComplete
|
/Others/Modules/kafka/access_kafka.py
|
UTF-8
| 884
| 2.5625
| 3
|
[] |
no_license
|
# coding=utf-8
from pykafka import KafkaClient
import codecs
import logging
logging.basicConfig(level=logging.INFO)
# create kafka data, string format
def produce_kafka_data(kafka_topic):
with kafka_topic.get_sync_producer() as producer:
for i in range(4):
producer.produce('test message ' + str(i ** 2))
# consume data
def consume_simple_kafka(kafka_topic, timeout):
consumer = kafka_topic.get_simple_consumer(consumer_timeout_ms=timeout)
for message in consumer:
if message is not None:
print message.offset, message.value
client = KafkaClient(hosts = "192.168.253.147:6667")
topic = client.topics["test"]
produce_kafka_data(topic)
consumer = topic.get_simple_consumer(consumer_timeout_ms=1000)
cnt = 0
for message in consumer:
if message is not None:
print message.offset, message.value
cnt += 1
print cnt
| true
|
70075756a020a68f98df3f7f28ca59a01ceb8f48
|
Python
|
shivduttbharadwaj/data_structures_algorithms
|
/guess_find_binary_search.py
|
UTF-8
| 621
| 4.65625
| 5
|
[] |
no_license
|
def binary_search(sorted_list, item):
"""
This binary_search function takes a sorted array and an item.
If the item is in the array, the function returns its position.
"""
low = 0
high = len(sorted_list) - 1
while low <= high:
mid = int((low + high) / 2)
print("mid is, ", mid)
guess = sorted_list[mid]
print("guess is, ", guess)
if guess == item:
return mid
elif guess > item:
high = mid - 1
else:
low = mid + 1
return None
my_list = [1, 2, 3, 4, 5, 7, 8, 10]
print(binary_search(my_list, 3))
| true
|
b3d36a94ef39addfe5c7abd613a5b3159d272ece
|
Python
|
nekromant8/Jet_projects
|
/Banking/banking.py
|
UTF-8
| 5,928
| 3.125
| 3
|
[] |
no_license
|
import random
import string
import sqlite3
conn = sqlite3.connect('card.s3db')
cur = conn.cursor()
first_6 = 400000
count = 0
card = 0
pin = 0
card_number = None
pin_code = None
n = None
user_card = None
cur.execute('CREATE TABLE IF NOT EXISTS card(id INTEGER PRIMARY KEY, number TEXT, pin TEXT,balance INTEGER DEFAULT 0)')
conn.commit()
def info():
print("1. Create an account")
print("2. Log into account")
print("0. Exit")
global n
n = input()
def create_account():
global n, count
count += 1
card_generate()
print("""Your card has been created
Your card number:""")
print(card)
print("Your card PIN:")
print(pin)
info()
user_input()
def card_generate():
global first_6, card, pin
size = 4
chars = string.digits
card_no = [int(i) for i in str(first_6)]
card_num = [int(i) for i in str(first_6)]
seventh_15 = random.sample(range(9), 9)
for i in seventh_15:
card_no.append(i)
card_num.append(i)
for t in range(0, 15, 2):
card_no[t] = card_no[t] * 2
for i in range(len(card_no)):
if card_no[i] > 9:
card_no[i] -= 9
s = sum(card_no)
mod = s % 10
check_sum = 0 if mod == 0 else (10 - mod)
card_num.append(check_sum)
card_num = [str(i) for i in card_num]
card = ''.join(card_num)
pin = ''.join(random.choice(chars) for _i in range(size))
cur.execute(f"INSERT INTO card(number, pin) VALUES ({card}, {pin})")
conn.commit()
return card, pin
def sum_digits(digit):
if digit < 10:
return digit
else:
_sum = (digit % 10) + (digit // 10)
return _sum
def pass_luhn(recipient):
# reverse the credit card number
recipient = recipient[::-1]
# convert to integer list
recipient = [int(x) for x in recipient]
# double every second digit
doubled_second_digit_list = list()
digits = list(enumerate(recipient, start=1))
for index, digit in digits:
if index % 2 == 0:
doubled_second_digit_list.append(digit * 2)
else:
doubled_second_digit_list.append(digit)
# add the digits if any number is more than 9
doubled_second_digit_list = [sum_digits(x) for x in doubled_second_digit_list]
# sum all digits
sum_of_digits = sum(doubled_second_digit_list)
# return True or False
return sum_of_digits % 10 == 0
def log_in():
global n, card_number, pin_code
print('Enter your card number:')
user_card_number = input('> ')
print('Enter your PIN:')
user_pin_code = input('> ')
cur.execute('SELECT CAST(EXISTS (SELECT number || pin FROM card WHERE number = ? AND pin = ?) AS VARCHAR(2));',
(user_card_number, user_pin_code))
account_check = cur.fetchone()
if '1' in account_check:
print('You have successfully logged in!')
card_number = user_card_number
pin_code = user_pin_code
successful_login()
else:
print('Wrong card number or PIN!')
info()
user_input()
n = input()
def balance():
balance = cur.execute('SELECT balance FROM card WHERE number = ? AND pin = ?', (card_number, pin_code)).fetchone()
print("Balance: {}".format(balance[0]))
conn.commit()
def add_income():
print("Inter income:")
income = int(input())
cur.execute("UPDATE card SET balance = (balance + ?) WHERE number = ? AND pin = ?", (income, card_number, pin_code))
conn.commit()
print("Income was added!")
def do_transfer(card_number):
row = get_account_info(card_number)
print("Transfer")
recipient = input("Enter card number:")
if recipient == card_number:
print("You can't transfer money to the same account!")
successful_login()
elif pass_luhn(recipient):
cur.execute('SELECT * FROM card WHERE number=' + recipient)
conn.commit()
rec_row = cur.fetchone()
if rec_row: # record exist
amount = int(input("Enter how much money you want to transfer:"))
if amount < row[-1]: # enough balance
cur.execute("UPDATE card SET balance=(balance - ?) WHERE number = ? AND pin = ?", (amount, card_number, pin_code))
conn.commit()
cur.execute("UPDATE card SET balance=(balance + ?) WHERE number = ? ", (amount, recipient))
conn.commit()
print("Success!")
else: # When balance is not enough
print("Not enough money!")
else: # When no record in db found
print("Such a card does not exist.")
else: # When Luhn test fails
print("Probably you made a mistake in the card number. Please try again!")
successful_login()
def get_account_info(card_number):
cur.execute('SELECT * FROM card WHERE number={}'.format(card_number))
conn.commit()
return cur.fetchone()
def close_account(card_number):
cur.execute('DELETE FROM card WHERE number={}'.format(card_number))
conn.commit()
print("The account has been closed!")
def successful_login():
global n
print("""1. Balance
2. Add income
3. Do transfer
4. Close account
5. Log out
0. Exit""")
n = input()
if n == "1":
balance()
successful_login()
n = input()
elif n == "2":
add_income()
successful_login()
n = input()
elif n == "3":
do_transfer(card_number)
successful_login()
n = input()
elif n == "4":
close_account(card_number)
info()
user_input()
elif n == "5":
print("You have successfully logged out!")
info()
user_input()
elif n == "0":
print("Bye!")
exit()
def user_input():
global n
if n == "1":
create_account()
elif n == "2":
log_in()
elif n == "0":
print("Bye!")
exit()
info()
user_input()
| true
|
568e372a82c35dcbf7b5247df871ff7ab4335400
|
Python
|
Vincannes/TradeFinance
|
/app/libs/widgets/combobox.py
|
UTF-8
| 1,637
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
from PySide2 import QtGui, QtCore, QtWidgets
class ComboBox(QtWidgets.QComboBox):
def __init__(self, parent=None, *args, **kwargs):
super(ComboBox, self).__init__(parent)
class StyleComboBox(ComboBox):
"""Combobox used to define line style in the setting indicator dialog"""
def __init__(self, parent=None, *args, **kwargs):
super(StyleComboBox, self).__init__(parent, *args, **kwargs)
self._items = []
def build(self, line_styles: dict):
"""Build items of the combobox
:param line_styles: Available line styles
:type line_styles: dict
"""
for name, line_style in line_styles.items():
icon = QtGui.QIcon(line_style.get("icon"))
data = line_style
self.addItem(icon, name, data)
self._items.append(data)
def set_current_style(self, line_style):
"""Set the current item of the combobox
:param line_style: The line style
:type line_style: QtCore.Qt.PenStyle
"""
for index, item in enumerate(self._items):
if item.get("style") != line_style:
continue
self.setCurrentIndex(index)
class InputComboBox(ComboBox):
"""Combobox used to define choices in the Input setting indicator dialog"""
def __init__(self, parent=None):
super(InputComboBox, self).__init__(parent)
def build(self, choices):
"""Build items of the combobox
:param choices: List of all available choices
:type choices: list or tuple
"""
for choice in choices:
self.addItem(choice)
| true
|
3238e76cf0fb368b4bed5a7dcf4a8813ba0227c4
|
Python
|
soh516/cmcdata
|
/plotspeed
|
UTF-8
| 1,717
| 2.65625
| 3
|
[] |
no_license
|
#! /usr/bin/python
import datetime
from time import sleep
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as md
pauseDur = 10.0 #pasue in min
index = 0
with open('/home/tst023/physics/cmc-home-migration/datamigrationspeed_2017_11_07_10_39_17') as file:
for line in file:
index = index + 1
file.closed
iteration = index/2
timePlotIndex = np.zeros(iteration)
storageArray = np.zeros(iteration)
print iteration
#for i in range(1, iteration):
# timeIndex[i] = timeIndex[i-1] + pauseDur
#timeIndex = timeIndex / float(60)
index = 0
with open('/home/tst023/physics/cmc-home-migration/datamigrationspeed_2017_11_07_10_39_17') as file:
for line in file:
if (line.find("2017") != -1):
timeForData = map(int, re.findall('\d+', line))
timeForDay = timeForData[0]
timeForHour = timeForData[1]
timeForMin = timeForData[2]
timeForPlot = (timeForDay*24) + timeForHour + (timeForMin/60)
timePlotIndex[index] = timeForPlot
else:
if (line.find("copy") != -1):
storage = map(float, re.findall(r"[-+]?\d*\.\d+|\d+", line))
storageArray[index] = storage[0]
print index
index = index + 1
file.closed
timePlotIndex = timePlotIndex - timePlotIndex[0]
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(timePlotIndex,storageArray, 'ro--', linewidth=2)
#ax2.plot(timeIndex,fileNumArray, 'bo--', linewidth=2)
ax1.set_xlabel('Time in hours', fontsize=18)
ax1.set_ylabel('Size in GB', color='r', fontsize=18)
#ax2.set_ylabel('Number of files', color='b', fontsize=18)
#plt.title('Pangea Data Migration', fontsize=22)
plt.show()
| true
|
ee9d15b07dc4ad0d00dd7e5a664a8425b6c13356
|
Python
|
dineshram/PythonModule1
|
/Day9/Classwork/ReadFromDatabase.py
|
UTF-8
| 291
| 2.75
| 3
|
[] |
no_license
|
import sqlite3
def readTableItems(dbName):
with sqlite3.connect(dbName) as db:
cursor = db.cursor()
sql = 'SELECT * FROM Customer'
cursor.execute(sql)
items = cursor.fetchall()
print(items)
db.commit()
readTableItems("EspressoHouse.db")
| true
|
99bbf6781c28bcc59c79890939c5d7638c5b5e58
|
Python
|
Taiji-pipeline/Taiji-utils
|
/python/taiji-utils/taiji_utils/Knee.py
|
UTF-8
| 836
| 2.59375
| 3
|
[] |
no_license
|
import numpy as np
import math
from csaps import csaps
def fitSpline(X, Y):
def getW(xs):
ds = []
for i in range(len(xs)):
if i == 0:
d = (Y[0] - xs[0]) + (xs[0] - xs[1])
elif i == len(xs)-1:
d = (xs[-2] - xs[-1]) + (xs[-1] - Y[-1])
else:
d = xs[i-1] - xs[i+1]
ds.append(d)
return ds
sp = csaps(X, Y, smooth=0.5)
ddsp = sp.spline.derivative(2)
ddx = [x for x in ddsp.solve() if ddsp(x-0.01) * ddsp(x+0.01) < 0]
i = np.argmax(getW(sp(ddx)))
return ddx[i]
def selectBarcode(args):
with open(args.input, 'r') as fl:
Y = [float(l.strip().split('\t')[1]) for l in fl]
X = np.log10(np.array(list(range(1, len(Y)+1))))
Y = np.log10(np.array(Y))
print(10**fitSpline(X, Y))
| true
|
22476310666034c5047909479bf75168425bb949
|
Python
|
dolejska-daniel/fit_vutbr-dp2020
|
/python/other/point_cluster.py
|
UTF-8
| 2,184
| 3.359375
| 3
|
[] |
no_license
|
import itertools
import functools
import math
import operator
class Cluster:
def __init__(self):
self.points = set()
def __hash__(self) -> int:
return functools.reduce(operator.xor, map(hash, self.points))
def __len__(self) -> int:
return len(self.points)
def __repr__(self) -> str:
return "Cluster(points=%s)" % (self.points,)
def join(self, point):
self.points.add(point)
return self
def leave(self, point):
self.points.remove(point)
def center(self) -> tuple[float, float]:
return sum([pt.x for pt in self.points]) / len(self.points), sum([pt.y for pt in self.points]) / len(
self.points)
def distance(self, point) -> float:
x, y = self.center()
return math.sqrt((x - point.x) ** 2 + (y - point.y) ** 2)
class Point:
cluster: Cluster
cluster_dist = float("inf")
def __init__(self, x: float, y: float, cluster: Cluster = None):
self.x = x
self.y = y
cluster = cluster or Cluster()
self.cluster = cluster.join(self)
def __repr__(self) -> str:
return "Point(x=%s, y=%s)" % (self.x, self.y)
def distance(self, point) -> float:
return math.sqrt((self.x - point.x) ** 2 + (self.y - point.y) ** 2)
def try_clusterize(self, point, distance_limit: float):
distance = self.distance(point)
if distance > distance_limit or distance > self.cluster_dist:
return
self.cluster.leave(self)
self.cluster = point.cluster.join(self)
self.cluster_dist = point.cluster.distance(self)
def clusterize(points: list[tuple[float, float]], distance_limit: float = 30) -> set[Cluster]:
points: list[Point] = list(map(lambda pt: Point(*pt, cluster=Cluster()), points))
for pt1, pt2 in itertools.permutations(points, 2):
pt1.try_clusterize(pt2, distance_limit)
return set([point.cluster for point in points])
def largest_cluster(points: list[tuple[float, float]], distance_limit: float = 30) -> Cluster:
clusters = clusterize(points, distance_limit)
return sorted(clusters, key=lambda c: len(c), reverse=True)[0]
| true
|
1d757cdb3140326bc143dd70876d9c14b52c0c70
|
Python
|
dmitrikuksik/GermanCredit
|
/random_forest.py
|
UTF-8
| 6,957
| 2.953125
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import sys
from Node import Node
def numeric2categoric(df,attr,q): #konwertacja ciaglych danych na kategorii
df[attr] = pd.qcut(df[attr], q=q, labels=False)
def split_dataframe(df,split_at): # podzial danych na zbior trenujacy i testujacy
df_train = df.sample(n=split_at)
df_test = df.drop(df_train.index)
return [df_train,df_test]
def get_predicted_value(prediction,index): #zwraca przewidywana wartosc klasy
for i in prediction:
if i[0] == index: #gdzie i[0] to index w zbiorze df
return i[1] #i[1] jest przewidywana klasa
# ustawia wagi (wieksze wagi dla tych, na ktorych dotychczasowy model sie mylil)
# dodaje delta do elementow
# pozniej funckja sample w bagging normalizuje te wagi
def set_weights(df,weights,predictions,delta):
wrong_predicts_indx = []
if len(predictions) == 0:
return weights
else:
for i in range(df.shape[0]):
pr_val = get_predicted_value(predictions[-1],df.iloc[[i]].index[0])
if df.iloc[[i]]['decision'].values[0] != pr_val:
weights[i] += delta #dodanie wagi do elemenot
wrong_predicts_indx.append(df.iloc[[i]].index[0])
return weights
def bagging(df,weights): # metoda bootsrapowa tworzenia podziobiora ze zbioru trenujacego
return df.sample(n=df.shape[0],replace=True,weights=weights)
# Entropy and Information Gain for ID3
def calculate_entropy(df):
P = df[df['decision']==1].shape[0]
N = df[df['decision']==2].shape[0]
if P==0 or N==0:
return 0
return ((-P)/(P+N)*np.log2(P/(P+N)))-((N)/(P+N)*np.log2(N/(P+N)))
def select_node_infgain(df,attributes):
infgain_sums = []
for attr in attributes:
attr_entropy = 0
for ctg in df[attr].unique():
inf_gain = calculate_entropy(df[df[attr]==ctg])
attr_entropy += (df[df[attr]==ctg].shape[0]/df.shape[0])*inf_gain
infgain_sums.append(calculate_entropy(df)-attr_entropy)
return attributes[infgain_sums.index(max(infgain_sums))]
# GINI for CART
def calculate_gini(df):
P = df[df['decision']==1].shape[0]
N = df[df['decision']==2].shape[0]
return 1-(np.square(P/(P+N))+np.square(N/(P+N)))
def select_node_gini(df,attributes):
gini_sums = []
for attr in attributes:
sum = 0
for ctg in df[attr].unique():
sum += (df[df[attr]==ctg].shape[0]/df[attr].shape[0])*calculate_gini(df[df[attr]==ctg])
gini_sums.append(sum)
return attributes[gini_sums.index(min(gini_sums))]
def random_attributes(amount,attributes): #losowanie atrybutow dla kazdego wezla
rand = random.sample(range(0,len(attributes)-1),amount)
return [attributes[rand[i]] for i in range(amount)]
def check_leaves(df): # sprawdzenie czy mozna ustawic liscie
P = df[df['decision']==1].shape[0]
N = df[df['decision']==2].shape[0]
if P == 0:
return 2
if N == 0:
return 1
return 0
def decision_tree(df_tree,amount,attributes): #funckja tworzenia root i start tworzenia drzewa
attrs = random_attributes(amount,attributes)
root = select_node_gini(df_tree,attrs)
categories = df_tree[root].unique()
root_node = Node(root,categories)
for i,ctg in enumerate(categories): # sprawdzenie wszystkich wartosci atrybutu
split_tree(df_tree[df_tree[root]==ctg],
ctg,root_node,
amount,attributes)
return root_node
def split_tree(df,ctg,parent_node,amount,attributes): # rekurencyjna funkcja tworzenia drzewa
if check_leaves(df) == 2:
parent_node.set_leave(ctg,2)
return
if check_leaves(df) == 1:
parent_node.set_leave(ctg,1)
return
attrs = random_attributes(amount,attributes)
node_attr = select_node_gini(df,attrs)
child_node = Node(node_attr,df[node_attr].unique())
parent_node.set_child(ctg,child_node)
for ctg in child_node.values:
split_tree(df[df[node_attr]==ctg],
ctg,child_node,
amount,attributes)
def test_tree(df,root_node): #testowanie drzewa
predictions = []
for i,row in df.iterrows():
walk_tree(root_node,row,predictions,i)
return predictions
def walk_tree(node,row,predictions,i): # rekurencyjne testowanie drzewa
check_leave = node.get_class(row[node.attr])
if check_leave == None:
for val in node.values:
if row[node.attr] == val:
if node.next_node(row[node.attr]) != None:
walk_tree(node.next_node(row[node.attr]),row,predictions,i)
else:
predictions.append([i,check_leave])
return check_leave
def ensemble_voting(df,predictions): # glosowanie poszczegolnych drzew
votes_table = []
for i,row in df.iterrows():
votes = []
for pr in predictions:
pr_val = get_predicted_value(pr,i)
if pr_val != None:
votes.append(pr_val)
votes_table.append(votes)
return votes_table
def count_votes(votes_table): # oblicznie liczby glosow
forest_classification = []
for votes in votes_table:
good = votes.count(1)
bad = votes.count(2)
if good > bad:
forest_classification.append(1)
elif bad > good:
forest_classification.append(2)
return forest_classification
def main(argv):
columns = ['account_status','duration_month','credit_history',
'purpose','credit_amount','savings',
'employment_time','installment_rate','sex:status',
'guarantor','residence_since','property','age',
'installment_plan','housing','existing_credits',
'job','maintenance_people','telephone', 'foreign','decision']
attributes = columns[:-1]
split_at = 900
forest_size = [int(argv[0])]
classification_error = []
delta = float(argv[1])
attr_amount = 4
df = pd.read_csv('data.csv',sep=' ',names=columns,header=None)
numeric2categoric(df,'duration_month',4)
numeric2categoric(df,'credit_amount',4)
numeric2categoric(df,'age',4)
[df_train,df_test] = split_dataframe(df, split_at)
for fs in forest_size:
train_predictions = []
test_predictions = []
votes_table = []
forest_classification = []
roots = []
correct_classification = 0
cost = 0
weights = [1/df_train.shape[0] for _ in range(df_train.shape[0])]
print("Building tree's models...")
for _ in range(fs):
weights = set_weights(df_train,weights,train_predictions,delta)
df_bootstrap = bagging(df_train,weights)
root_node = decision_tree(df_bootstrap,attr_amount,attributes)
roots.append(root_node)
train_predictions.append(test_tree(df_train,root_node))
print("Testing random forest...")
for root in roots:
test_predictions.append(test_tree(df_test,root))
votes_table = ensemble_voting(df_test,test_predictions)
forest_classification = count_votes(votes_table)
for i,fc in enumerate(forest_classification):
if df_test.iloc[[i]]['decision'].values[0] == fc:
correct_classification += 1
elif df_test.iloc[[i]]['decision'].values[0] == 1 and fc == 2:
cost += 1
elif df_test.iloc[[i]]['decision'].values[0] == 2 and fc == 1:
cost += 5
print("Amount of correct classifications:",correct_classification)
classification_error.append(1 - correct_classification/df_test.shape[0])
print("Classification error:",classification_error[-1])
print("Cost:",cost)
if __name__ == '__main__':
main(sys.argv[1:])
| true
|
0fffc05dacf2646bb493626eb4359404be5c53eb
|
Python
|
kren1504/Training_codewars_hackerrank
|
/sumaDigitosRecursiva.py
|
UTF-8
| 609
| 3.609375
| 4
|
[] |
no_license
|
def sumOfDigits(num):
if num == 0:
return num
else:
return (( num % 10) + sumOfDigits( (num//10)))
def sumarDigitosDeNumero(num):
suma = 0
tam = len(str(num))
for i in range(tam):
suma += num % 10
num = num //10
return suma
def back(num,tamNum):
if tamNum == 1:
return num
else:
sumaTotal= sumOfDigits(num)
return back(sumaTotal,len(str(sumaTotal)))
def superDigit(num,n):
tam = len(num)
num = int(num*n)
return back(num,tam)
if __name__ == "__main__":
print(superDigit( "123",3))
| true
|
f6237d1167464c1c3a3ebb9bcd540771d220a0fe
|
Python
|
Al153/Programming
|
/Python/Cryptography/Julian's Code/Key Generator 1.0.py
|
UTF-8
| 418
| 2.8125
| 3
|
[] |
no_license
|
import time
while True:
try:
keylen = int(raw_input("Enter the keylength: "))
break
except ValueError:
pass
start = time.time()
cddkey = (keylen + 1)*[0]
while cddkey[keylen] != 1:
print cddkey
cddkey[0] += 1
for n in range(keylen):
if cddkey[n] == 26:
cddkey[n] = 0
cddkey[n+1] += 1
else:
break
print time.time() - start
| true
|
2131f5597d72898eacd65bf22e88bb6790b4f710
|
Python
|
alexelt/linkedin
|
/linkedin_scraper.py
|
UTF-8
| 10,480
| 2.5625
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException as Nosuch
from bs4 import BeautifulSoup
from random import randint
import time
import csv
import ast
def login():
opts = Options()
opts.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36")
driver = webdriver.Chrome("C:/Users/alexel_t91/Downloads/chromedriver_win32/chromedriver.exe", chrome_options=opts)
driver.get("https://linkedin.com") # opens up chrome - okcupid
agent = driver.execute_script("return navigator.userAgent")
print(agent)
user_name = "" # give username
password = "" # give password
n = randint(0, 10)
time.sleep(n)
driver.find_element_by_xpath('//*[@id="login-email"]').send_keys(user_name)
driver.find_element_by_xpath('//*[@id="login-password"]').send_keys(password)
driver.find_element_by_xpath('//*[@id="login-submit"]').click()
time.sleep(n)
def scrape():
with open('C:/Users/alexel_t91/Desktop/users/csvusers11.csv', 'r') as userfile:
userfilereader = csv.reader(userfile)
for col in userfilereader:
userlist.append(col)
user_list = ast.literal_eval(str(userlist[0]))
user_list = list(set(user_list))
userfile.close()
exp_list = []
for user in user_list:
k = randint(80, 120)
n = randint(70, k)
time.sleep(n)
try:
buttons = driver.browser.find_elements_by_css('.pv-profile-section__see-more-inline.pv-profile-section__text-truncate-toggle.link')
for button in buttons:
button.click()
except Nosuch:
pass
driver.get('https://www.linkedin.com/in/' + user)
html = driver.page_source
source = BeautifulSoup(html, 'html.parser')
sections = source.find_all('section')
skills = []
name = None
headline = None
location = None
connections = None
first_text = None
work_name = None
school_name = None
for section in sections:
section_class = section.get('class')
section_class = ' '.join(section_class)
if section_class == 'pv-profile-section pv-top-card-section artdeco-container-card ember-view': # Name
print('name section --------------------')
n = randint(4, 14)
time.sleep(n)
try:
name = section.find('h1', {'class': 'pv-top-card-section__name Sans-26px-black-85%'}).text
except:
pass
try:
headline = section.find('h2', {'class': 'pv-top-card-section__headline mt1 Sans-19px-black-85%'}).text
except:
pass
try:
location = section.find('h3', {
'class': 'pv-top-card-section__location Sans-17px-black-55%-dense mt1 inline-block'}).text
except:
pass
try:
connections = section.find('span', {
'class': 'pv-top-card-v2-section__entity-name pv-top-card-v2-section__connections ml2 Sans-15px-black-85%'}).text
except:
pass
try:
first_text = section.find('p', {
'class': 'pv-top-card-section__summary-text text-align-left mt4 ember-view'}).text
except:
pass
print(name, headline, location, connections, first_text)
elif section_class == 'pv-profile-section experience-section ember-view':
print('experience section --------------------')
n = randint(4, 14)
time.sleep(n)
try:
exp_divs = section.find_all('div', {'class': 'pv-entity__summary-info'})
except:
pass
for exp_div in exp_divs:
try:
work_name = exp_div.find('h3', {'class': 'Sans-17px-black-85%-semibold'}).text
except:
pass
try:
exp_div_h4 = exp_div.find_all('h4')
except:
pass
print(work_name)
for exp_div1 in exp_div_h4:
print(exp_div1.text)
elif section_class == 'pv-profile-section education-section ember-view':
print('education section --------------------')
n = randint(4, 14)
time.sleep(n)
try:
edu_divs = section.find_all('div', {'class': 'pv-entity__summary-info'})
except:
pass
for edu_div in edu_divs:
try:
school_name = edu_div.find('h3',
{'class': 'pv-entity__school-name Sans-17px-black-85%-semibold'}).text
except:
pass
try:
edu_div_h4s = edu_div.find_all('span', {'class': 'pv-entity__comma-item'})
except:
pass
print(school_name)
for edu_div_h4 in edu_div_h4s:
print(edu_div_h4.text)
elif section_class == 'pv-profile-section volunteering-section ember-view':
print('volunteer section --------------------')
n = randint(4, 14)
time.sleep(n)
try:
vol_uls = section.find_all('ul', {
'class': 'pv-profile-section__section-info section-info pv-profile-section__section-info--has-no-more ember-view'})
except:
pass
for vol_ul in vol_uls:
try:
vol_name = vol_ul.find('h3', {'class': 'Sans-17px-black-85%-semibold'}).text
except:
pass
print(vol_name)
try:
vol_div_h4 = vol_ul.find_all('h4')
except:
pass
for vol_div1 in vol_div_h4:
print(vol_div1.text)
elif section_class == 'pv-profile-section pv-skill-categories-section artdeco-container-card ember-view':
print('Skills section --------------------')
n = randint(4, 14)
time.sleep(n)
try:
header = section.find('h2', {'class': 'pv-profile-section__card-heading Sans-21px-black-85%'})
except:
pass
if header == 'Skills & Endorsements':
try:
spans = section.find_all('span', {'class': 'Sans-17px-black-100%-semibold'})
except:
pass
for span in spans:
skills.append(span.text)
print(*skills)
elif header == 'Skills':
try:
ps = section.find_all('p',
{'class': 'pv-skill-category-entity__name Sans-17px-black-100%-semibold'})
except:
pass
for par in ps:
skills.append(par.text)
print(*skills)
elif section_class == 'pv-profile-section pv-accomplishments-section artdeco-container-card ember-view':
print('accomplishments section --------------------')
try:
acc_h3s = list(section.find_all('h3', {'class': 'pv-entity__summary-info'}))
except:
pass
try:
acc_h3s_no = list(
section.find_all('h3', {'class': 'pv-accomplishments-block__count Sans-34px-black-100% pr3'}))
except:
pass
try:
ul_list = list(section.find('ul', {
'class': 'pv-accomplishments-block__summary-list Sans-15px-black-70% pv-accomplishments-block__summary-list-more'}))
except:
pass
for i in range(0, len(acc_h3s)):
print(acc_h3s[i].text)
print(acc_h3s_no[i].text)
print(ul_list[i].text)
elif section_class == 'pv-profile-section pv-interests-section artdeco-container-card ember-view':
print('asdf section --------------------')
try:
spans_interests = section_class.find_all('span', {'class': 'pv-entity__summary-title-text'})
except:
pass
for span_interest in spans_interests:
span_interest.text
elif section_class == 'pv-profile-section pv-recommendations-section artdeco-container-card ember-view':
print('recommendations section --------------------')
n = randint(4, 14)
time.sleep(n)
try:
rec_lis = section.find_all('li', {'class': 'pv-recommendation-entity ember-view'})
except:
pass
for rec_li in rec_lis:
try:
person = rec_li.find('div', {'class': 'pv-recommendation-entity__header'})
except:
pass
try:
letter = rec_li.find('div', {'class': 'pv-recommendation-entity__highlights'})
except:
pass
print(person)
print(letter)
opts = Options()
opts.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36")
driver = webdriver.Chrome("C:/Users/alexel_t91/Downloads/chromedriver_win32/chromedriver.exe", chrome_options=opts)
userlist = []
scrape()
| true
|
71532dba09e974748d322ee91ac1187f7ef9bd77
|
Python
|
MrNewaz/Learning-Dgango
|
/Done/lesson 2/integers.py
|
UTF-8
| 57
| 2.890625
| 3
|
[] |
no_license
|
items = 4
price = 5.12
print('Na' * 10)
print('Batman')
| true
|
af055cbd2759be60833605b54bc459637d15fb2a
|
Python
|
nikolkam/Automated-Plant-Watering-System
|
/water.py
|
UTF-8
| 2,197
| 3.328125
| 3
|
[] |
no_license
|
#initialization
import RPi.GPIO as GPIO
import time
import schedule
import datetime
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
soil_in = 21 #PIN for reading soil moisture
soil_out = 18 #PIN for turning on/off soil moisture sensor
motor = 20 #PIN for water pump
#setting up PINS
GPIO.setup(soil_out,GPIO.OUT)
GPIO.setup(soil_in,GPIO.IN)
GPIO.setup(motor,GPIO.OUT)
GPIO.output(motor,1)
#logging
print("---Starting the program---")
temp_time = datetime.datetime.now() #current time
temp_str = temp_time.strftime('%Y/%m/%d %H:%M:%S') #time to string
with open('water_log.txt','a') as file_object:
file_object.write("Program Started: "+temp_str+"\n")
#User input
lim = int(input("What is the limit for sucsessive watering?"))
freq = int(input("How often do you want to check the soil?(Hour):"))
#reads soil moisture sensor Dry:1 Wet:0
def read_moisture():
GPIO.output(soil_out,1)
time.sleep(5)
moisture = GPIO.input(soil_in)
#for sensor testing
"""
if(moisture): #soil is dry when sensor is 1 wet when 0
print('Soil is dry.')
else:
print('Soil is wet.')
"""
GPIO.output(soil_out,0)
return moisture
#gives water for 10 seconds (default)
def water(seconds=10):
GPIO.output(motor,0)
time.sleep(seconds)
GPIO.output(motor,1)
#process to be repeatedly executed
#limit: maximum successive watering
def process(limit = lim):
moisture =read_moisture()
temp_time = datetime.datetime.now()
temp_str = temp_time.strftime('%Y/%m/%d %H:%M:%S')
if(moisture):
with open('water_log.txt', 'a') as file_object:
file_object.write("WATERED:"+temp_str+"\n")
count = 0 #counts number of consecutive watering
while(moisture):
if(count>=limit):
time.sleep(15)
return
elif (not moisture):
return
water()
moisture = read_moisture()
count = count + 1
#program starting time
temp_time = datetime.datetime.today()
print(temp_time)
schedule.every(freq).hours.do(process)
while True:
try:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
GPIO.cleanup()
| true
|
9db388d616413c6f04cd29d85f0b2790dfba873f
|
Python
|
ns-m/netology_ad_py_Tests
|
/API_yandex/main.py
|
UTF-8
| 569
| 2.546875
| 3
|
[] |
no_license
|
import requests
URL = "https://translate.yandex.net/api/v1.5/tr.json/translate"
def translate_to_file(text, lang):
resp_translate = requests.post(URL, params={
"key": "trnsl.1.1.20191128T170444Z.c56d359e1889b3b7.8fccca1aa4fe51ff1bb52de2213efc89f26608ff",
"text": text,
"lang": lang})
resp_translate = resp_translate.json()["text"]
resp_translate = ' '.join(resp_translate)
return resp_translate
def translate_to_file_status_code(text, lang):
resp_translate = requests.post(URL, params={
"key": "trnsl.1.1.20191128T170444Z.c56d359e1889b3b7.8fccca1aa4fe51ff1bb52de2213efc89f26608ff",
"text": text,
"lang": lang})
return resp_translate.status_code
| true
|
6fb82edd6333569362acbdf0df8f890fdcac5986
|
Python
|
ChrisEngelhardt/flambda-sentim
|
/lambdas/sentim-batch/sentim-batch.py
|
UTF-8
| 785
| 2.765625
| 3
|
[] |
no_license
|
import json
import math
def batch(arr, n):
n = max(1, n)
return list((arr[i:i+n] for i in range(0, len(arr), n)))
def main(j):
all_tweets = j['arrayToSplit']
desired_num_batches = j['splitNumber']
# do the calculation
batch_size = math.ceil(len(all_tweets) / desired_num_batches)
batched_all_tweets = batch(all_tweets, batch_size)
keyed = [ { 'tweets': batch } for batch in batched_all_tweets ]
# return the result
res = {
'subArrays': keyed,
'num_batches': len(batched_all_tweets),
'num_tweets_total': len(all_tweets)
}
return res
if __name__ == "__main__":
import sys
import json
# read the json
f = open("jsonInput.json")
j = json.loads(f.read())
result = main(j)
# write to std out
print (json.dumps(result))
| true
|
584cad7a09a5637e1451d0fbfd2fb3d88a7a1ffc
|
Python
|
Tayuba/Data-MInning-From-Webscrapping
|
/exams.py
|
UTF-8
| 612
| 3.03125
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
num = [0,1,2,3,4,5,6,7,8]
dict = {
"name": num
}
let = [ "a", "b", "c", "d", "e", "f"]
array_num = np.array(num)
df = pd.DataFrame(num, columns=["list"])
print(df)
half = []
for i in range(len(num)//2):
print(num[i])
mat =np.array([[1,2,3],[4,6,7]])
print(mat)
page = "https://www.imdb.com/search/title/?groups=top_100&sort=user_rating,desc"
response = requests.get(page)
print(response)
soup = BeautifulSoup(response.content, "html.parser")
title_soup = soup.find_all("h3")
print(list(title_soup[0])[3].text)
| true
|
fea58c2ea0adca5242f7b77fe4edd0d239b1656f
|
Python
|
quekdylan/Assignment1
|
/RatVenture/RatVenture.py
|
UTF-8
| 8,724
| 3.265625
| 3
|
[] |
no_license
|
from RatVenture_functions import *
from RatVenture_classes import *
import sys
#Default values
v_filename = "save.txt"
v_location="0,0"
v_day = 1
v_rat_encounter = False
v_town_locations = ["0,0", "3,1", "5,2", "1,3", "4,6"]
v_orb_location = setOrbLocation(v_town_locations)
v_rat_king_alive = True
#Display Main Menu
while(True):
mainMenu()
option = int(input("Enter your option: "))
if(option == 1):
#Creates a new game using newGame() function and receives player object
player = newGame()
break
elif(option == 2):
player, v_location, v_day = resumeGame(v_filename)
break
elif(option == 3):
print("The game will now exit.")
exitGame()
else:
print("Invalid option. Please enter again.")
#Main program
while(True):
# If player is in a town
if(checkLocation(v_location, v_town_locations) == "You are in a town"):
# If orb in town
if (v_location == v_orb_location and player.orb == False):
player = pickOrb(player)
townMenu(v_day)
choice = int(input("Enter your choice: "))
# View Character
if(choice == 1):
viewCharacter(player)
continue
# View Map
elif(choice == 2):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
continue
# Move
elif(choice == 3):
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + " " + checkLocation(v_location, v_town_locations))
break
# Rest
elif(choice == 4):
v_day, player.health = rest(v_day, player.health)
continue
# Save Game
elif(choice == 5):
saveGame(player.health, v_location, v_day)
continue
# Exit Game
elif(choice == 6):
exitGame()
#User inputs invalid option
else:
print("Invalid option")
continue
# Rat encounter
elif(checkLocation(v_location, v_town_locations) == "You are in the open" and v_rat_encounter == False):
enemy = Entity('Rat', 10, '1-3', 1)
in_combat = True
while(in_combat):
combatMenu(enemy)
combatChoice = input("Enter Choice: ")
# Attack
if(combatChoice == '1'):
player, enemy, status = attack(player, enemy, player.orb)
if(status == 2):
continue
elif(status == 0):
print('The rat is dead! You are victorious!')
in_combat = False
v_rat_encounter = True
elif(status == 1):
print('You died. Game over.')
exitGame()
# Run
elif(combatChoice == '2'):
run()
outdoorMenu()
outdoorChoice = input("Enter choice: ")
# View Character
if(outdoorChoice == '1'):
viewCharacter(player)
# Rat encounter (Health is reset)
enemy = Entity('Rat', 10, '1-3', 1)
# View Map
elif(outdoorChoice == '2'):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
# Rat encounter (Health is reset)
enemy = Entity('Rat', 10, '1-3', 1)
# Move
elif(outdoorChoice == '3'):
in_combat = False
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + ". " + checkLocation(v_location, v_town_locations))
break
# Exit Game
elif(outdoorChoice == '4'):
exitGame()
else:
print("Invalid option. Please try again.")
else:
print("Invalid option. Please try again.")
continue
# Rat King encounter
elif(checkLocation(v_location, v_town_locations) == "You see the Rat King!" and v_rat_king_alive == True):
enemy = Entity('Rat King', 25, '8-12', 5)
in_combat = True
while(in_combat):
combatMenu(enemy)
combatChoice = input("Enter Choice: ")
# Attack
if(combatChoice == '1'):
player, enemy, status = attack(player, enemy, player.orb)
if(status == 2):
continue
elif(status == 0):
print('The Rat King is dead! You are victorious!')
in_combat = False
v_rat_king_alive = False
exitGame()
elif(status == 1):
print('You died. Game over.')
exitGame()
# Run
elif(combatChoice == '2'):
run()
outdoorMenu()
outdoorChoice = input("Enter choice: ")
# View Character
if(outdoorChoice == '1'):
viewCharacter(player)
# Rat encounter (Health is reset)
enemy = Entity('Rat King', 25, '8-12', 5)
# View Map
elif(combatChoice == '2'):
print(viewMap(v_location))
# Rat encounter (Health is reset)
enemy = Entity('Rat King', 25, '8-12', 5)
# Move
elif(combatChoice == '3'):
in_combat = False
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + ". " + checkLocation(v_location, v_town_locations))
break
# Exit Game
elif(outdoorChoice == '4'):
exitGame()
else:
print("Invalid option. Please try again.")
else:
print("Invalid option. Please try again.")
continue
# If player is in the open and has already encountered a rat
elif(checkLocation(v_location, v_town_locations) == "You are in the open"):
outdoorMenu()
outdoorChoice = input("Enter choice: ")
# View Character
if(outdoorChoice == '1'):
viewCharacter(player)
# View Map
elif(outdoorChoice == '2'):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
# Move
elif(outdoorChoice == '3'):
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + ". " + checkLocation(v_location, v_town_locations))
break
# Exit Game
elif(outdoorChoice == '4'):
exitGame()
else:
print("Invalid option. Please try again.")
| true
|
89d0fb0b31a8b320e1392551ed74bff4b3f28fbd
|
Python
|
s70c3/searchmachine
|
/service/models/nomeclature_recognition/utils.py
|
UTF-8
| 2,100
| 3.125
| 3
|
[] |
no_license
|
import pdf2image
import cv2
import numpy as np
from PIL import Image
def pil2cv(pil_img):
return cv2.cvtColor(np.asarray(pil_img), cv2.COLOR_RGB2GRAY)
def cv2pil(cv_img):
return Image.fromarray(cv_img)
def stats(arr, unique=False):
print(f'shape={arr.shape}, type={arr.dtype}, min={arr.min()}, max={arr.max()}')
if unique:
print('unique values: ', np.unique(arr))
def read_pdf(pt):
img = pdf2image.convert_from_path(pt)[0]
return pil2cv(img)
def threshold(img, th):
return (img > th).astype(np.uint8) * 255
def identity(x):
return x
def invert(img):
return 255 - img
def gray2rgb(i): return cv2.cvtColor(i, cv2.COLOR_GRAY2RGB)
def extract_contours(img):
H, W = img.shape
#thresholding the image to a binary image
thresh,img_bin = cv2.threshold(img,254,255,cv2.THRESH_BINARY |cv2.THRESH_OTSU)
#inverting the image
img_bin = invert(img_bin)
# Length(width) of kernel as 100th of minimal dimension
kernel_len = min(H, W)//100
# # Length(width) of kernel as 100th of height
# kernel_len = H//100
# Defining a vertical kernel to detect all vertical lines of image
ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_len))
# Defining a horizontal kernel to detect all horizontal lines of image
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_len, 1))
# A kernel of 2x2
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
#Use vertical kernel to detect and save the vertical lines in a jpg
image_1 = cv2.erode(img_bin, ver_kernel, iterations=3)
vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=3)
#Use horizontal kernel to detect and save the horizontal lines in a jpg
image_2 = cv2.erode(img_bin, hor_kernel, iterations=3)
horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=3)
# Combine horizontal and vertical lines in a new third image, with both having same weight.
img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)
t_vh = threshold(img_vh, 10)
return t_vh
| true
|
3d9ce669d1007b93732b21c4a3ee1a666b955662
|
Python
|
ebebbington/denv-creator
|
/denv-creator/test.py
|
UTF-8
| 1,990
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
import os
def get_current_directory():
current_dir = os.path.dirname(os.path.realpath(__file__))
print(current_dir)
def add_white_spaces_at_start_of_string(text, numberOfSpaces) -> str:
spacey_string = text.rjust(len(text)+numberOfSpaces)
return spacey_string
spaces = add_white_spaces_at_start_of_string('hello world', 5)
print(spaces)
def get_current_file_name():
print(__file__)
def write_array_to_file():
docker_compose_content = [
" nginx:",
" container_name: {}".format('nginx'),
" build:",
" context: .",
" dockerfile: .docker/{}".format('nginx.dockerfile'),
" volumes:",
" - ./src:/var/www/src",
" working_dir: /var/www/src",
" ports:",
" - '{}:{}'".format(3001, 3001),
" networks:",
" - {}-network".format('test')
]
file = open('./test.txtttt', 'w')
for i in docker_compose_content:
file.write(i + '\n')
write_array_to_file()
def val_is_set(val):
if len(val) < 1:
Response.show_error('You did not specify a value')
def check_is_array(vals):
if not isinstance(vals, list):
Response.show_error('The given containers is not a list')
def contains_only_one_web_server(container_list):
possible_web_servers = [
'nginx',
'apache'
]
count = 0
for container in container_list:
for possible_web_server in possible_web_servers:
# Create the count
if possible_web_server == container:
count = count + 1
# Check if more thn 2 servers are defined
if count > 1:
Response.show_error('You have defined more than one web server')
tabs = 'some text:' + '\tsome text part of the above text'
#print(tabs)
# Get directory of where the user was when they executed the script
#print(os.getcwd() + '/../')
| true
|
ead39756db3a23879feb6c9d6cd9bb83b79faef4
|
Python
|
TaylorBoyd/Pente
|
/PenteMainCode.py
|
UTF-8
| 8,219
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
class Board(object):
def __init__(self, height, width):
self.width = width
self.height = height
self.winner = False
self.spaces = {}
for x in range(self.width):
for y in range(self.height):
self.spaces[(x, y)] = "+"
def is_stone_at_position(self, x, y):
if self.spaces[(x, y)] == "+":
return False
else:
return True
def clear_board(self):
for x in range(self.width):
for y in range(self.height):
self.spaces[(x, y)] = "+"
self.winner = False
def is_winner(self):
return self.winner
def set_winner(self):
self.winner = True
def is_on_board(self, x, y):
return (0 <= x <= self.width - 1) and (0 <= y <= self.height - 1)
def place_stone(self, x, y, stone):
if not self.is_stone_at_position(x, y):
self.spaces[(x, y)] = stone
else:
print("Already a stone at that position")
def show_board(self):
for y in range(self.height - 1, -1, -1):
board_line = ""
for x in range(self.width):
board_line = board_line + self.spaces[(x, y)] + " "
print(board_line)
def remove_stone(self, x, y):
self.spaces[(x, y)] = "+"
def capture_stones(self, x, y, stone):
"""Takes in a position and a players stone
Looks first to see if a capture is even possible by a player having a stone 3 spaces away
Then checks the space in between for possible captures
Removes the stones and returns the number of captures made
"""
captures = 0
try:
if self.spaces[(x+3, y+0)] == stone:
if self.spaces[(x+1, y+0)] != stone and self.is_stone_at_position(x+1, y+0):
if self.spaces[(x+2, y+0)] != stone and self.is_stone_at_position(x+2, y+0):
captures += 1
self.remove_stone(x+1, y)
self.remove_stone(x+2, y)
except KeyError:
pass
try:
if self.spaces[(x-3, y+0)] == stone:
if self.spaces[(x-1, y+0)] != stone and self.is_stone_at_position(x-1, y+0):
if self.spaces[(x-2, y+0)] != stone and self.is_stone_at_position(x-2, y+0):
captures += 1
self.remove_stone(x-1, y)
self.remove_stone(x-2, y)
except KeyError:
pass
try:
if self.spaces[(x+0, y+3)] == stone:
if self.spaces[(x+0, y+1)] != stone and self.is_stone_at_position(x+0, y+1):
if self.spaces[(x+0, y+2)] != stone and self.is_stone_at_position(x+0, y+2):
captures += 1
self.remove_stone(x, y+1)
self.remove_stone(x, y+2)
except KeyError:
pass
try:
if self.spaces[(x+0, y-3)] == stone:
if self.spaces[(x+0, y-1)] != stone and self.is_stone_at_position(x+0, y-1):
if self.spaces[(x+0, y-2)] != stone and self.is_stone_at_position(x+0, y-2):
captures += 1
self.remove_stone(x, y-1)
self.remove_stone(x, y-2)
except KeyError:
pass
try:
if self.spaces[(x+3, y+3)] == stone:
if self.spaces[(x+1, y+1)] != stone and self.is_stone_at_position(x+1, y+1):
if self.spaces[(x+2, y+2)] != stone and self.is_stone_at_position(x+2, y+2):
captures += 1
self.remove_stone(x+1, y+1)
self.remove_stone(x+2, y+2)
except KeyError:
pass
try:
if self.spaces[(x+3, y-3)] == stone:
if self.spaces[(x+1, y-1)] != stone and self.is_stone_at_position(x+1, y-1):
if self.spaces[(x+2, y-2)] != stone and self.is_stone_at_position(x+2, y-2):
captures += 1
self.remove_stone(x+1, y-1)
self.remove_stone(x+2, y-2)
except KeyError:
pass
try:
if self.spaces[(x-3, y+3)] == stone:
if self.spaces[(x-1, y+1)] != stone and self.is_stone_at_position(x-1, y+1):
if self.spaces[(x-2, y+2)] != stone and self.is_stone_at_position(x-2, y+2):
captures += 1
self.remove_stone(x-1, y+1)
self.remove_stone(x-2, y+2)
except KeyError:
pass
try:
if self.spaces[(x-3, y-3)] == stone:
if self.spaces[(x-1, y-1)] != stone and self.is_stone_at_position(x-1, y-1):
if self.spaces[(x-2, y-2)] != stone and self.is_stone_at_position(x-2, y-2):
captures += 1
self.remove_stone(x-1, y-1)
self.remove_stone(x-2, y-2)
except KeyError:
pass
return captures
def five_in_a_row(self, x, y, stone):
""" Takes in the stone position and a player stone
Checks in each direction for 5 in a row
Returns True if there is a Pente or False otherwise
"""
count_vertical = 0
count_horizontal = 0
count_diagonal_1 = 0
count_diagonal_2 = 0
for i in range(-4, 5):
if self.is_on_board(x+i, y):
if self.spaces[(x+i, y)] == stone:
count_vertical += 1
else:
count_vertical = 0
if count_vertical >= 5:
return True
if self.is_on_board(x, y+i):
if self.spaces[(x, y+i)] == stone:
count_horizontal += 1
else:
count_horizontal = 0
if count_horizontal >= 5:
return True
if self.is_on_board(x+i, y+i):
if self.spaces[(x+i, y+i)] == stone:
count_diagonal_1 += 1
else:
count_diagonal_1 = 0
if count_diagonal_1 >= 5:
return True
if self.is_on_board(x+i, y-i):
if self.spaces[(x+i, y-i)] == stone:
count_diagonal_2 += 1
else:
count_diagonal_2 = 0
if count_diagonal_2 >= 5:
return True
return False
class Player(object):
def __init__(self, board, stone):
self.captures = 0
self.player_stone = stone
self.board = board
def __str__(self):
return("The player has {} captures and is using stone {}".format(self.captures, self.player_stone))
def take_turn(self, x, y):
self.board.place_stone(x, y, self.player_stone)
self.captures += self.board.capture_stones(x, y, self.player_stone)
if self.captures >= 5 or self.board.five_in_a_row(x, y, self.player_stone):
self.board.set_winner()
def reset(self):
self.captures = 0
if __name__ == "__main__":
num_players = 2
standard_board = Board(19, 19)
players = []
turn = 0
for i in range(num_players):
players.append(Player(standard_board, str(i)))
while not standard_board.is_winner():
player_turn = (turn % num_players)
players[player_turn].take_turn()
turn +=1
print("")
print("")
print("Player{} WINS!!!".format((turn % num_players)+ 1))
print("")
print("")
| true
|
d04e1b55cf756e5a542139098e28d21059387bcd
|
Python
|
MiKoronjoo/Finglish2Farsi-telegram-bot
|
/finglish2farsi.py
|
UTF-8
| 1,792
| 2.828125
| 3
|
[] |
no_license
|
import telepot
from telepot.loop import MessageLoop
from telepot.namedtuple import InlineQueryResultArticle, InputTextMessageContent
from config import TOKEN
from time import sleep
from finglish import f2p
def f2f(text):
std_text = ''
for char in text:
if(not char.isalpha()):
std_text += ' %c '%char
else:
std_text += char
farsi_text = ''
for word in std_text.split():
farsi_text += ' ' + f2p(word)
return farsi_text
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if chat_type == u'private':
if content_type == 'text':
if msg['text'] == '/start':
bot.sendMessage(chat_id, '*WELCOME* 🙂\nI translate Finglish to Farsi', 'Markdown')
else:
bot.sendMessage(chat_id, f2f(msg['text']))
elif chat_type in [u'group', u'supergroup']:
if content_type == 'text':
if msg['text'].lower() == '/f2f':
try:
bot.sendMessage(chat_id, f2f(msg['reply_to_message']['text']))
except KeyError:
bot.sendMessage(chat_id, '`/f2f` should reply to a message that you want to translate it', 'Markdown')
def on_inline_query(msg):
query_id, from_id, query_string = telepot.glance(msg, flavor='inline_query')
articles = [InlineQueryResultArticle(
id='f2f',
title='Finglish 2 Farsi',
input_message_content=InputTextMessageContent(
message_text=f2f(query_string)
)
)]
bot.answerInlineQuery(query_id, articles)
bot = telepot.Bot(TOKEN)
MessageLoop(bot, {'chat': handle,
'inline_query': on_inline_query}).run_as_thread()
# Keep the program running ...
while 1:
sleep(10)
| true
|
07ecadc07c1410b8a3091ebb3e52a3ab1c87ef79
|
Python
|
rlecaro2/uni-operating-systems-and-networks-homeworks
|
/verPosicion.py
|
UTF-8
| 481
| 2.703125
| 3
|
[] |
no_license
|
from proceso import Proceso
from fileManager import fileManager
from time import localtime, strftime
class verPosicion(Proceso):
def __init__(self, args):
Proceso.__init__(self,args)
self.duracion = int(args[4])
def imprimir(self):
return strftime("%Y-%m-%d %H:%M:%S", localtime()) + " - Posicion revisada durante " + str(self.duracion) + " segundos."
def finish(self):
fileManager.appendToFile("Log.txt", self.imprimir())
| true
|
b19edcee688801cd11b85ac499851ab22411bf3a
|
Python
|
Sebastian-Torres-Matrix/mysql-project
|
/mysql-from-python.py
|
UTF-8
| 805
| 2.859375
| 3
|
[] |
no_license
|
import os
import datetime
import pymysql
# Get username from workspace
# modify this variable if runnin on another environment
username = os.getenv('C9_USER')
# Connect to database
connection = pymysql.connect(host='localhost', user = username, password = '', db = 'Chinook')
try:
# Run a query
with connection.cursor(pymysql.cursors.DictCursor) as cursor:
sql = "SELECT * FROM Genre;"
cursor.execute("""CREATE TABLE IF NOT EXISTS
Friends(name char(20), age int, DOB datetime);""")
# Note that the above will still display a warning (not error) if the
# table already exists
for row in cursor:
print(row)
finally:
#Close the connection, regardless of wheter the above was succesful
connection.close()
| true
|
6115ff39e9a25d7df13f1432813edde4ff522d06
|
Python
|
ppuetsch/tankstellen_crawler
|
/tankstellen_crawler.py
|
UTF-8
| 1,196
| 3.078125
| 3
|
[] |
no_license
|
"""
Ermittelt links zu individuellen Tankstellen von mehr-tanken.de
"""
import requests_html
import re
from multiprocessing.pool import ThreadPool
def get_links_for_plz(plz):
"Gibt alle Tankstellen, die im Umkreis um eine bestimmte PLZ sind, als Links (URLs) zu den Detailseiten zurück"
with requests_html.HTMLSession() as session:
print("Crawling Tankstellen Links für PLZ {}".format(plz))
matcher = re.compile("https://mehr-tanken.de/tankstelle/......../")
request_params = {"searchText": plz}
r = session.get("https://mehr-tanken.de/tankstellen", params=request_params)
return {link for link in r.html.absolute_links if matcher.match(link)}
def get_links_for_plz_set(plz_set=None, max_concurrent_requests=8):
"""Gibt alle Tankstellen, die im Umkreis um Postleitzahlen aus einem übergebenen
Iterable sind, als Links (URLs) zu den Detailseiten zurück"""
print("Crawling {} Postleitahlen nach Tankstellen".format(len(plz_set)))
link_set = set()
linkSets = ThreadPool(max_concurrent_requests).map(get_links_for_plz, plz_set)
for small_link_set in linkSets:
link_set.update(small_link_set)
return link_set
| true
|
a9f7c958fd140736520b299d4c028a093f992c00
|
Python
|
uzin1021/pyworks
|
/ch01/indent.py
|
UTF-8
| 491
| 4.21875
| 4
|
[] |
no_license
|
#indent(들여쓰기) - 4칸 들여쓰기 : {}을 대체
n = 10 #자료형 int 생략, 세미콜론을 찍지 않음
if n % 2 == 0: #if문(명령문)에서 콜론(:)을 사용 - 자동 들여쓰기
print("짝수입니다")
else:
print("홀수입니다")
"""
# 예시
print('a')
print('b')
print('c') #붙여서 쓰기 들여쓰기 주의!
"""
#주석 달기
"""주석 달기 """ #따옴표 위치 조심
msg = '''
사과
귤
감
''' #여러줄로 문자열 출력
print(msg)
| true
|
ee3e56c9dbbb6bd1d0083c69cf30bcf8438c9f00
|
Python
|
half-potato/loopclosure
|
/models/finetune_contrast.py
|
UTF-8
| 1,698
| 2.546875
| 3
|
[] |
no_license
|
import tensorflow as tf
import model_util
slim = tf.contrib.slim
# Top half of the network that computes the features of each image
# layer_cutoff: The layer at which to cutoff the frozen mobilenet
# graph_path: The filepath to the frozen mobilenet
# is_training: Whether or not the network is training
# Returns:
# l_output: left output of network
# r_output: right output of network
def top(is_training, ckpt_dir, layer_cutoff):
def branch(name):
inputs, output = model_util.load_mobilenet_and_continue(name, graph_path, layer_cutoff)
flat = tf.contrib.layers.flatten(output)
norm = tf.nn.l2_normalize(flat, 1, name="Norm")
return inputs, norm
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with tf.variable_scope("mobilenet") as scope:
l_inputs, l_output = branch("net")
with tf.variable_scope(scope, reuse=True):
r_inputs, r_output = branch("net")
def post(sess):
ckpt = tf.train.latest_checkpoint(ckpt_dir)
rest_var = slim.get_variables_to_restore()
saver = tf.train.Saver(rest_var)
saver.restore(sess, ckpt)
return l_inputs, r_inputs, l_output, r_output, post
# Bottom half of the network that takes the top half outputs and produces a score
# l_output: left output of top network
# r_output: right output of top network
# is_training: Whether or not the network is training
# Returns:
# logits: The raw output of the network
# result: The true output of the network
def bot(l_output, r_output):
d = tf.reduce_sum(tf.square(l_output - r_output), 1)
#d_sqrt = tf.sqrt(d)
l2 = tf.expand_dims(d, 1)
tf.summary.scalar('mean_output', tf.reduce_mean(l2))
return l2
| true
|
d139fa492feb37d83bc8309ab3186d26a589131e
|
Python
|
tommymcglynn/samsa
|
/samsa/topics.py
|
UTF-8
| 3,785
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
__license__ = """
Copyright 2012 DISQUS
Copyright 2013 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from samsa.exceptions import NoAvailablePartitionsError
from samsa.partitioners import random_partitioner
from samsa.partitions import PartitionMap
from samsa.consumer import Consumer
from samsa.utils import attribute_repr
logger = logging.getLogger(__name__)
class TopicMap(object):
"""
Provides a dictionary-like interface to :class:`~samsa.topics.Topic`
instances within a cluster.
:param cluster: The cluster this topic mapping is associated with.
:type cluster: :class:`samsa.cluster.Cluster`
"""
def __init__(self, cluster):
self.cluster = cluster
self.__topics = {}
def __getitem__(self, key):
"""
Returns a :class:`samsa.topics.Topic` for the given key.
This is a proxy to :meth:`~TopicMap.get` for a more dict-like
interface.
"""
return self.get(key)
def get(self, name):
"""
Returns a :class:`samsa.topics.Topic` for this topic name, creating a
new topic if one has not already been registered.
"""
topic = self.__topics.get(name, None)
if topic is None:
topic = self.__topics[name] = Topic(self.cluster, name)
logger.info('Registered new topic: %s', topic)
return topic
class Topic(object):
"""
A topic within a Kafka cluster.
:param cluster: The cluster that this topic is associated with.
:type cluster: :class:`samsa.cluster.Cluster`
:param name: The name of this topic.
:param partitioner: callable that takes two arguments, ``partitions`` and
``key`` and returns a single :class:`~samsa.partitions.Partition`
instance to publish the message to.
:type partitioner: any callable type
"""
def __init__(self, cluster, name, partitioner=random_partitioner):
self.cluster = cluster
self.name = name
self.partitions = PartitionMap(self.cluster, self)
self.partitioner = partitioner
__repr__ = attribute_repr('name')
def latest_offsets(self):
return [(p.broker.id, p.latest_offset())
for p
in self.partitions]
def publish(self, data, key=None):
"""
Publishes one or more messages to a random partition of this topic.
:param data: message(s) to be sent to the broker.
:type data: ``str`` or sequence of ``str``.
:param key: a key to be used for semantic partitioning
:type key: implementation-specific
"""
if len(self.partitions) < 1:
raise NoAvailablePartitionsError('No partitions are available to '
'accept a write for this message. (Is your Kafka broker '
'running?)')
partition = self.partitioner(self.partitions, key)
return partition.publish(data)
def subscribe(self, group):
"""
Returns a new consumer that can be used for reading from this topic.
:param group: the name of the consumer group this consumer belongs to
:type group: ``str``
:rtype: :class:`samsa.consumer.consumer.Consumer`
"""
return Consumer(self.cluster, self, group)
| true
|
f3446133e004cc3d7c7ffdededed5eef1fb92355
|
Python
|
Esantomi/ygl
|
/hg-python/format01.py
|
UTF-8
| 355
| 4.3125
| 4
|
[] |
no_license
|
# format() 함수로 숫자를 문자열로 변환하기
format_a="{}만 원".format(5000)
format_b="파이썬 열공하여 첫 연봉 {}만 원 만들기 ".format(5000)
format_c="{} {} {}".format(3000, 4000, 5000)
format_d="{} {} {}".format(1, "문자열", True)
# 출력하기
print(format_a)
print(format_b)
print(format_c)
print(format_d)
| true
|
4d13996d26a3061d87231b2eaea467d4e264ec4c
|
Python
|
Simplon-IA-Bdx-1/the-movie-predictor-nicoOkie
|
/utils.py
|
UTF-8
| 250
| 3.5625
| 4
|
[] |
no_license
|
def split_name(name):
name_list = name.split(" ")
for name in name_list:
firstnames = (len(name_list) - 1)
firstname = " ".join(name_list[:firstnames])
lastname = name_list[firstnames]
return (firstname, lastname)
| true
|
f75542e2c3a39f7b4bac1bafca5070c025578d84
|
Python
|
cyy0523xc/pytorch_yolov1
|
/util/loss.py
|
UTF-8
| 3,387
| 2.59375
| 3
|
[] |
no_license
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import *
from shapely.geometry import Polygon
import numpy as np
class YoloLoss(Module):
def __init__(self, num_class=20):
super(YoloLoss, self).__init__()
self.lambda_coord = 5
self.lambda_noobj = 0.5
self.S = 7
self.B = 2
self.C = num_class
def conver_box(self, box, index):
x, y, w, h = box
i, j = index
step = 1 / self.S
x = (x + j) * step
y = (y + i) * step
# x, y, w, h = x.item(), y.item(), w.item(), h.item()
a, b, c, d = [x - w / 2, y - h / 2, x + w / 2, y + h / 2]
return [max(a.item(), 0), max(b.item(), 0), w, h]
def compute_iou(self, box1, box2, index):
box1 = self.conver_box(box1, index)
box2 = self.conver_box(box2, index)
x1, y1, w1, h1 = box1
x2, y2, w2, h2 = box2
# 获取相交
inter_w = (w1 + w2) - (max(x1 + w1, x2 + w2) - min(x1, x2))
inter_h = (h1 + h2) - (max(y1 + h1, y2 + h2) - min(y1, y2))
if inter_h <= 0 or inter_w <= 0: # 代表相交区域面积为0
return 0
# 往下进行应该inter 和 union都是正值
inter = inter_w * inter_h
union = w1 * h1 + w2 * h2 - inter
return (inter / union).item()
def forward(self, pred, target):
batch_size = pred.size(0)
mask = target[:, :, :, 4] > 0
# noobj_mask = target[:, :, :, 4] == 0
target_cell = target[mask]
pred_cell = pred[mask]
obj_loss = 0
arry = mask.cpu().numpy()
indexs = np.argwhere(arry == True)
for i in range(len(target_cell)):
box = target_cell[i][:4]
index = indexs[i][1:]
pbox1, pbox2 = pred_cell[i][:4], pred_cell[i][5:9]
iou1, iou2 = (
self.compute_iou(box, pbox1, index),
self.compute_iou(box, pbox2, index),
)
if iou1 > iou2:
target_cell[i][4] = iou1
target_cell[i][9] = 0
else:
target_cell[i][9] = iou2
target_cell[i][4] = 0
tc = target_cell[..., :10].contiguous().view(-1, 5)
pc = pred_cell[..., :10].contiguous().view(-1, 5)
noobj_mask = tc[..., 4] == 0
noobj_pred = pred[:, :10].contiguous().view(-1, 5)[noobj_mask]
noobj_target = target[:, :10].contiguous().view(-1, 5)[noobj_mask]
mask = tc[..., 4] != 0
tc = tc[mask]
pc = pc[mask]
noobj_loss = F.mse_loss(noobj_target[:, 4], noobj_pred[:, 4], reduction="sum")
obj_loss = F.mse_loss(tc[:, 4], pc[:, 4], reduction="sum")
xy_loss = F.mse_loss(tc[:, :2], pc[:, :2], reduction="sum")
wh_loss = F.mse_loss(
torch.sqrt(tc[:, 2:4]), torch.sqrt(pc[:, 2:4]), reduction="sum"
)
class_loss = F.mse_loss(pred_cell[:, 10:], target_cell[:, 10:], reduction="sum")
loss = [
obj_loss,
self.lambda_noobj * noobj_loss,
self.lambda_coord * xy_loss,
self.lambda_coord * wh_loss,
class_loss,
]
loss = [ele / batch_size for ele in loss]
return loss
a = [0.8980, 0.0853, 0.0400, 0.1333]
b = [1.3521e-02, 8.1162e-01, 9.1178e-03, 3.3432e-04]
| true
|
07b36690c90020eca963ff0d589799accecb5797
|
Python
|
recuraki/PythonJunkTest
|
/atcoder/_codeforces/1658_b.py
|
UTF-8
| 1,690
| 2.65625
| 3
|
[] |
no_license
|
import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
"""
TLEのポイント:
- 入力高速化(*dat)
- グラフをsetでたどろうとしていませんか?
REの時のポイント
- inputしきっていますか?
"""
def resolve():
#import pypyjit
#pypyjit.set_param('max_unroll_recursion=-1')
import math
INF = 1 << 63
ceil = lambda a, b: (((a) + ((b) - 1)) // (b))
p = 998244353
N = 20000 # N は必要分だけ用意する
fact = [1, 1]
factinv = [1, 1]
inv = [0, 1]
for i in range(2, N + 1):
fact.append((fact[-1] * i) % p)
inv.append((-inv[p % i] * (p // i)) % p)
factinv.append((factinv[-1] * inv[-1]) % p)
def do():
mod = 998244353
n = int(input())
if n%2 == 1:
print(0)
return
ans = fact[n//2] **2
ans %= mod
print(ans)
# n questions
q = int(input())
for _ in range(q):
do()
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """7
1
2
3
4
5
6
1000"""
output = """0
1
0
4
0
36
665702330"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
| true
|
086237b63f7f05b35b5a46acbe807c677b601aaf
|
Python
|
naisargidave/PageRank
|
/PageRank.py
|
UTF-8
| 6,662
| 3.046875
| 3
|
[] |
no_license
|
# homework 3
# goal: ranked retrieval, PageRank, crawling
# exports:
# student - a populated and instantiated cs525.Student object
# PageRankIndex - a class which encapsulates the necessary logic for
# indexing and searching a corpus of text documents and providing a
# ranked result set
# ########################################
# first, create a student object
# ########################################
import cs525
import re
import requests
import numpy as np
from urllib.parse import urljoin
from collections import defaultdict
import bs4 as BeautifulSoup # you will want this for parsing html documents
# our index class definition will hold all logic necessary to create and search
# an index created from a web directory
#
# NOTE - if you would like to subclass your original Index class from homework
# 1 or 2, feel free, but it's not required. The grading criteria will be to
# call the index_url(...) and ranked_search(...) functions and to examine their
# output. The index_url(...) function will also be examined to ensure you are
# building the index sanely.
class PageRankIndex(object):
def __init__(self):
# you'll want to create something here to hold your index, and other
# necessary data members
self.index_url_dict = {}
self._documents = []
self._inverted_index = {}
# index_url( url )
# purpose: crawl through a web directory of html files and generate an
# index of the contents
# preconditions: none
# returns: num of documents indexed
# hint: use BeautifulSoup and urllib
# parameters:
# url - a string containing a url to begin indexing at
def index_url(self, url):
# ADD CODE HERE
req = requests.get(url)
data = BeautifulSoup.BeautifulSoup(req.text, "lxml")
children = data.find_all("a")
self.all_url = [ child.get("href") for child in children]
i = 0
n = len(self.all_url)
self._inverted_index = defaultdict(list)
while(i < n):
if self.all_url[i] not in self.index_url_dict:
url_i = urljoin(url, children[i].string)
req_i = requests.get(url_i)
data_i = BeautifulSoup.BeautifulSoup(req_i.text, "lxml")
children_i = data_i.find_all("a")
text_i = str(data_i.find_all(text=True)[-1])
text_i = re.split('[^a-zA-Z0-9]', text_i)
token_s = []
for j in range(len(text_i)):
tokens = self.tokenize(text_i[j])
token_s.append(tokens)
for idx, text in enumerate(token_s):
self._inverted_index[text[0]].append(i)
for Key in (self._inverted_index):
self._inverted_index[Key] = list(set(self._inverted_index[Key]))
self.index_url_dict[self.all_url[i]] = [child.get("href") for child in children_i]
i += 1
self.mapped_url = {}
for i, url in enumerate(self.all_url):
self.mapped_url[url] = i
n = len(self.mapped_url)
transition_matrix = np.zeros((n,n))
teleporting_matrix = np.full((n, n), 1/n)
for key in self.index_url_dict:
for value in self.index_url_dict[key]:
transition_matrix[self.mapped_url[key]][self.mapped_url[value]] = 1
transition_matrix = transition_matrix/ np.sum(transition_matrix, axis = 1, keepdims = True)
alpha = 0.9
self.P = (alpha * transition_matrix) + ((1 - alpha) * teleporting_matrix)
self.x = np.full((1,n), 1/n)
while(True):
a = np.dot(self.x, self.P)
if np.linalg.norm(a-self.x) < 1e-8:
break
else:
self.x = a
num_files_indexed = len(self._documents)
return num_files_indexed
# tokenize( text )
# purpose: convert a string of terms into a list of terms
# preconditions: none
# returns: list of terms contained within the text
# parameters:
# text - a string of terms
def tokenize(self, text):
# ADD CODE HERE
text = text.lower()
tokens = re.split('[^a-zA-Z0-9]', text)
return tokens
# ranked_search( text )
# purpose: searches for the terms in "text" in our index and returns
# AND results for highest 10 ranked results
# preconditions: .index_url(...) has been called on our corpus
# returns: list of tuples of (url,PageRank) containing relevant
# search results
# parameters:
# text - a string of query terms
def ranked_search(self, text):
# ADD CODE HERE
query_tokens = self.tokenize(text)
token_url = set(range(len(self.all_url)))
for token in query_tokens:
# print(self._inverted_index[token])
if token in self._inverted_index:
token_url = token_url & set(self._inverted_index[token])
token_url = list(token_url)
# print(token_url)
page_name = []
score = []
for url in token_url:
score.append(self.x[:, url])
page_name.append(self.all_url[url])
# print('\n')
top_10 = sorted(list(zip(page_name, score)), reverse=True)[:10]
top_10_pages = list(map(lambda x : (x[0], x[1][0]), top_10))
return top_10_pages
# now, we'll define our main function which actually starts the indexer and
# does a few queries
def main(args):
index = PageRankIndex()
url = 'http://web.cs.wpi.edu/~kmlee/cs525/new10/index.html'
num_files = index.index_url(url)
search_queries = (
'palatial', 'college ', 'palatial college', 'college supermarket', 'famous aggie supermarket'
)
for q in search_queries:
results = index.ranked_search(q)
print("searching: %s -- results: %s" % (q, results))
# this little helper will call main() if this file is executed from the command
# line but not call main() if this file is included as a module
if __name__ == "__main__":
import sys
main(sys.argv)
I = PageRankIndex()
r = I.index_url('http://web.cs.wpi.edu/~kmlee/cs525/new10/index.html')
| true
|
13edb85fecc36fd76fec818051231480de1e3292
|
Python
|
SwagLyrics/SwSpotify
|
/SwSpotify/__main__.py
|
UTF-8
| 253
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
from SwSpotify import spotify, SpotifyNotRunning
def main():
try:
title, artist = spotify.current()
except SpotifyNotRunning as e:
print(e)
else:
print(f"{title} - {artist}")
if __name__ == "__main__":
main()
| true
|
b28fc6c37f98f92333bcc9abd1d9fbc6f5319a65
|
Python
|
Julymycin/codes
|
/traditional_features/rootsift_test.py
|
UTF-8
| 827
| 2.78125
| 3
|
[] |
no_license
|
# import the necessary packages
from traditional_features.rootsift import RootSIFT
import cv2
# load the image we are going to extract descriptors from and convert
# it to grayscale
# image = cv2.imread("example.png")
image = cv2.imread("example1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
# detect Difference of Gaussian keypoints in the image
# detector = cv2.FeatureDetector_create("SIFT")
# kps = detector.detect(gray)
# extract normal SIFT descriptors
# extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, descs) = sift.detectAndCompute(gray, None)
print("SIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape))
# extract RootSIFT descriptors
rs = RootSIFT()
(kps, descs) = rs.compute(gray, kps)
print("RootSIFT: kps=%d, descriptors=%s " % (len(kps), descs.shape))
| true
|
d4c283f71492760a37534ea34325520a28474e0d
|
Python
|
Yin-dihe/Python
|
/07-格式化字符串.py
|
UTF-8
| 181
| 4.09375
| 4
|
[] |
no_license
|
name = 'TOM'
age = 18
# 我的名字是x 今年x岁了
print('我的名字是%s, 今年%s岁' % (name, age))
# 语法f'{表达式}'
print(f'我的名字是{name}, 今年{age}岁')
| true
|
9914538eca0bb029b011f6fe8b98ba6f4e891924
|
Python
|
SurenderHarsha/CogModSudkou
|
/Brain_class.py
|
UTF-8
| 14,451
| 2.953125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 17:41:03 2020
@author: battu
"""
import threading
import time
import numpy as np
from Strategy_selection import *
## A function to get all data in the focus
def get_focus_data(matrix,focus):
data = []
data.append(focus)
square = (int(focus[0]/3)*3,int(focus[1]/3)*3)
sqr = {}
for i in range(square[0],square[0]+3):
for j in range(square[1],square[1]+3):
if matrix[i][j]!=0:
sqr[matrix[i][j]]=(i,j)
data.append(sqr)
row1 = matrix[square[0]]
row2 = matrix[square[0]+1]
row3 = matrix[square[0]+2]
col1 = [matrix[x][square[1]] for x in range(0,9)]
col2 = [matrix[x][square[1]+1] for x in range(0,9)]
col3 = [matrix[x][square[1]+2] for x in range(0,9)]
rw1 = {}
for i in range(len(row1)):
if row1[i]!=0:
rw1[row1[i]]=(square[0],i)
rw2 = {}
for i in range(len(row2)):
if row2[i]!=0:
rw2[row2[i]]=(square[0]+1,i)
rw3 = {}
for i in range(len(row3)):
if row3[i]!=0:
rw3[row3[i]]=(square[0]+2,i)
co1 = {}
for i in range(len(col1)):
if col1[i]!=0:
co1[col1[i]]=(i,square[1])
co2 = {}
for i in range(len(col2)):
if col2[i]!=0:
co2[col2[i]]=(i,square[1]+1)
co3 = {}
for i in range(len(col3)):
if col3[i]!=0:
co3[col3[i]]=(i,square[1]+2)
data.append(rw1)
data.append(rw2)
data.append(rw3)
data.append(co1)
data.append(co2)
data.append(co3)
empty_sqr= []
for i in range(square[0],square[0]+3):
for j in range(square[1],square[1]+3):
if matrix[i][j]==0 and i!=focus[0] and j!=focus[1]:
empty_sqr.append((i,j))
data.append(empty_sqr)
return data
'''
Template for strategy class
class strategy_a():
def __init__(self,matrix):
#Content for initialization
self.matrix = matrix
self.lock = 0 #This decides if the simulation should be paused
self.focus = (0,0) #The current cordinate that is being focused on
self.answer = (0,(0,0)) # The number and the cordinate it should be placed at
#-----------------------------Do any extra initialization here --------------------------------------------------
#-----------------------------Stop Initialization --------------------------------------------------------------
t1 = threading.Thread(target=self.think)
t1.start()
Any other functions you want for your strategies you can write here.
def think(self):
#This is your main thinking strategy function, here you will write logic for your code
#Always update the focus when you are looking at a cell. This will run in a thread, so it will happen parallely
pass
'''
# The basic brain class
class Basic():
def __init__(self,matrix):
self.matrix = matrix
self.focus = (0,0) #The current cordinate that is being focused on
self.answer = (5,(0,0)) # The number and the cordinate it should be placed at, not implemented
self.lock = 0
#Variables initialization
self.wait_time = 1
self.thread_break = 1
#Movespeed between cells
self.focus_wait = np.random.uniform(1/50,1/20)
self.stack = []
self.current_place = (0,0)
self.inserted = []
self.solved =[]
self.cells = []
self.correct_solved = 0
self.total_solved = 0
self.total_empty = 0
#Starting the thread
self.t1 = threading.Thread(target=self.think)
self.t1.start()
pass
#This is the function that moves the focus from current place to destination cell-by-cell
def find_focus_path(self,i,j,x,y):
a = x - i
b = y - j
c_x = i
c_y = j
if a<0:
a_sign = -1
else:
a_sign = 1
if b<0:
b_sign = -1
else:
b_sign = 1
for i in range(abs(a)):
c_x += a_sign
self.focus = (c_x,c_y)
time.sleep(self.focus_wait)
for i in range(abs(b)):
c_y += b_sign
self.focus = (c_x,c_y)
time.sleep(self.focus_wait)
return
#Buggy function, not used.
def solve(self):
self.solved = [x[:] for x in self.matrix]
return
i = 0
j = 0
track = []
for i in range(9):
for j in range(9):
if self.solved[i][j]==0:
track.append((i,j))
current_index = 0
numbers_track = [0 for x in range(len(track))]
sol = False
iterations = 0
while not sol:
iterations+=1
if current_index >= len(track):
sol = True
break
try:
numbers_track[current_index] +=1
except:
return
numbers_track[current_index] +=1
if numbers_track[current_index] > 9:
numbers_track[current_index] = 0
current_index -= 1
continue
self.solved[track[current_index][0]][track[current_index][1]] = numbers_track[current_index]
result = self.solve_check(track[current_index][0],track[current_index][1])
if result == -1:
continue
else:
current_index += 1
continue
pass
#Buggy function, not used.
def solve_check(self,x,y):
#check row
for j in range(0,9):
if y==j:
continue
if self.solved[x][j] == self.solved[x][y]:
return -1
#check column
for i in range(0,9):
if i==x:
continue
if self.solved[i][y] == self.solved[x][y]:
return -1
#check square
c_x = int(x/3)*3
c_y = int(y/3)*3
for i in range(3):
for j in range(3):
if i+c_x == x and j+c_y==y:
continue
if self.solved[i+c_x][j+c_y] == self.solved[x][y]:
return -1
return 1
#Checking if a solution is correct or not. cell-by-cell movement.
def perform_check(self):
time.sleep(self.focus_wait)
#Check part
check_x = self.focus[0]
check_y = self.focus[1]
digit = self.answer[0]
self.find_focus_path(check_x,check_y,check_x,0)
#Check row
for j in range(0,9):
self.focus = (check_x,j)
time.sleep(self.focus_wait)
if j == check_y:
continue
if self.matrix[check_x][j] == self.matrix[check_x][check_y]:
self.find_focus_path(check_x,j,check_x,check_y)
return -1
self.find_focus_path(self.focus[0],self.focus[1],check_x,check_y)
#Check column
for i in range(0,9):
self.focus = (i,check_y)
time.sleep(self.focus_wait)
if i == check_x:
continue
if self.matrix[i][check_y] == self.matrix[check_x][check_y]:
self.find_focus_path(i,check_y,check_x,check_y)
return -1
self.find_focus_path(self.focus[0],self.focus[1],check_x,check_y)
t_x = int(check_x/3)*3
t_y = int(check_y/3)*3
self.find_focus_path(check_x,check_y,t_x,t_y)
#Check square
for i in range(t_x,t_x+3):
for j in range(t_y,t_y+3):
self.focus = (i,j)
time.sleep(self.focus_wait)
if i==check_x and j == check_y:
continue
if self.matrix[i][j] == self.matrix[check_x][check_y]:
self.find_focus_path(i,j,check_x,check_y)
return -1
return 1
#Calculate probability distribution of all empty cells(Higher density of numbers = higher probability)
def calc_dist(self,empty,focus):
dist= []
for i in empty:
focus = i
row = self.matrix[focus[0]]
col = [self.matrix[x][focus[1]] for x in range(9)]
a,b = int(focus[0]/3)*3,int(focus[1]/3)*3
square = []
for j in range(3):
for k in range(3):
square.append(self.matrix[j+a][k+b])
#print(square,row,col,square.count(0))
s = (9-square.count(0) + 9-row.count(0) + 9-col.count(0))
ss = 9-square.count(0)
rs = 9-row.count(0)
cs = 9-col.count(0)
dist.append(max(s,ss,rs,cs)**5)
return [x/sum(dist) for x in dist]
#Functions to pause or resume that can lock the thread.
def pause(self):
self.lock = 1
return
def resume(self):
self.lock=0
#The main think function
def think(self):
#Unimplemented function, the function is run to satisfy dependency for another variable
self.solve()
#Programatically countin empty cells, can be used for a future update
for i in range(9):
for j in range(9):
if self.matrix[i][j]==0:
self.cells.append((i,j))
self.total_empty = len(self.cells)
time.sleep(self.wait_time)
counter = 1
done = False
temp_matrix = self.matrix
#Storing empty cells
empty_cells = []
for i in range(9):
for j in range(9):
if self.matrix[i][j]==0:
empty_cells.append((i,j))
if len(empty_cells)==0:
done = True
x = empty_cells[0][0]
y = empty_cells[0][1]
#Run solving until done
while not done:
#This is implemented to stop the thread by setting thread_break to 0.
try:
if 1/self.thread_break:
pass
except:
return
#If the lock is active, the program waits.
if self.lock !=0 :
continue
#Not important, can be implemented in a future update
self.correct_solved = 0
self.total_solved = 0
for i in self.cells:
if self.matrix[i[0]][i[1]] == self.solved[i[0]][i[1]]:
self.correct_solved +=1
for i in self.cells:
if self.matrix[i[0]][i[1]] != 0 :
self.total_solved +=1
if len(empty_cells)==0:
done = True
continue
#Choosing an emtpy cell based on the probability distribution
choice = np.random.choice(list(range(len(empty_cells))),p = self.calc_dist(empty_cells,(x,y)))
new_cell = empty_cells[choice]
#Move to the new focus/empty cell
self.find_focus_path(self.focus[0],self.focus[1],new_cell[0],new_cell[1])
self.focus = new_cell
#Obtain focus data
dt = get_focus_data(self.matrix,self.focus)
time.sleep(0.1)
#Call strategy selection and solve cell
n,s,name = strategy_cycle(dt[0],dt[1],dt[2],dt[3],dt[4],dt[5],dt[6],dt[7],dt[8])
#Wait time for strategies and fatigue implemented
if name=='simple_strategies':
time.sleep(np.random.randint(1,10))
self.focus_wait += 1/500
if name=='medium_strategies':
time.sleep(np.random.randint(3,20))
self.focus_wait += 1/400
#If solution is wrong, try again
if n==False:
continue
x = self.focus[0]
y= self.focus[1]
#Set solution in the matrix
self.matrix[self.focus[0]][self.focus[1]] = s
empty_cells.remove(self.focus)
time.sleep(0.2)
#Perform a check if the solution fits the cell.
result = self.perform_check()
#If solution doesnt fit, that means the strategy function has failed. Retry.
if result == -1:
self.find_focus_path(self.focus[0],self.focus[1],x,y)
self.matrix[x][y] = 0
empty_cells.append((x,y))
print("Wrong Answer!",x,y,"Number:",s)
self.focus_wait += 1
continue
return
#Placeholder for better understanding
def communicate(self):
return self.focus,self.correct_solved,self.total_solved,self.total_empty
#For now same brain class is used for both levels, can be improved in future updates
class Strategy():
def __init__(self,name):
self.name = name
def return_strategy(self,matrix):
if self.name == 'Easy':
obj = Basic(matrix.copy())
if self.name == "Medium":
obj = Basic(matrix.copy())
#Create the object for second strategy
# obj = second_strat(matrix)
return obj
| true
|
55500cf02422e6ae50bd3506e8188c83824476e0
|
Python
|
Akimyou/onigiri_jp_dict
|
/tmp.py
|
UTF-8
| 787
| 2.59375
| 3
|
[
"BSD-3-Clause",
"MIT",
"CC-BY-3.0"
] |
permissive
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import json
import codecs
tmp_path = './_tmp'
tmp_file_path = tmp_path + '/.tmp'
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
def set (tmp):
tmp_file = codecs.open(tmp_file_path, 'w+', 'utf-8')
try:
tmp_file_w_con = json.dumps(tmp).decode('unicode-escape')
except Exception as e:
tmp_file_w_con = ''
tmp_file.write(tmp_file_w_con)
tmp_file.close()
def get ():
try:
tmp_file = codecs.open(tmp_file_path, 'r', 'utf-8')
except Exception as e:
tmp_file = codecs.open(tmp_file_path, 'w+', 'utf-8')
tmp_file_con = tmp_file.read()
tmp_file.close()
try:
tmp = json.loads(tmp_file_con)
except Exception as e:
tmp = {}
return tmp
| true
|
d098793fa090609d27661f1feb64d228b35ccaa6
|
Python
|
PouringRain/Algorithmn
|
/39.py
|
UTF-8
| 435
| 3.1875
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
class Solution:
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
def duplicate(self, numbers, duplication):
# write code here
d = {}
for num in numbers:
if not d.has_key(num):
d[num] = 0
else:
duplication[0] = num
return True
return False
| true
|
bf92398599a96da99884ee898271fe04e193c5ea
|
Python
|
Patrick-Ali/PythonLearning
|
/Writing_To_Pickling.py
|
UTF-8
| 466
| 2.921875
| 3
|
[] |
no_license
|
import pickle
f = open("DataStore.txt", "wb")
eng2sp = { "one": "uno", "four": "cuatro", "three": "tres", "two": "dos"}
sp2eng = { "cinco": "five", "seis": "six", "siete": "seven", "ocho": "eight"}
DiceCombos = { "12": "6+6", "11": "6+5", "10": "5+5", "9": "5+4",
"7": "4+3", "6": "3+3", "5": "3+2", "8": "4+4",
"4": "2+2", "3": "2+1", "2": "1+1"}
pickle.dump(sp2eng, f)
pickle.dump(DiceCombos, f)
f.close()
| true
|
141e23c912f0c8ca03873b4784485294f9b0a32c
|
Python
|
20Mhz/copperv
|
/scripts/namespace.py
|
UTF-8
| 5,579
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
from typing import List
import dataclasses
import string
from collections.abc import Iterable
import logging
class Template(string.Template):
def __init__(self,template,*args, **kwargs):
super().__init__(str(template),*args, **kwargs)
self.names = self.get_var_names()
def get_var_names(self):
var_names= []
for i in self.pattern.finditer(self.template):
i = i.groupdict()
if i['named'] is not None:
var_names.append(i['named'])
elif i['braced'] is not None:
var_names.append(i['braced'])
return var_names
def substitute(self, **kws):
mapping = kws
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
if named in mapping:
return str(mapping[named])
else:
return mo[0]
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
class Namespace:
def __init__(self, **variables: str):
self._nodes = [Node(k,self.process_input(v),None) for k,v in variables.items()]
self.logger = logging.getLogger(__name__)
def process_input(self, v):
r = v
if not isinstance(v, str) and isinstance(v, Iterable):
r = ' '.join([str(i) for i in v])
return r
@staticmethod
def collect(*args):
collected = {}
for ns in args:
for k,v in ns.items():
if k in collected:
raise KeyError(f'Duplicated variable "{k}"')
else:
collected[k] = v
return Namespace(**collected)
def resolve(self):
self.logger.debug(f'before {self=}')
self.process_deps()
self.logger.debug(f'process {self=}')
self.substitute_deps()
self.logger.debug(f'substitute {self=}')
r = self.to_dict()
self.logger.debug(f'dict {r=}')
return r
def process_deps(self):
for node in self._nodes:
node.set_deps(self)
def eval(self,value):
self.resolve()
self.logger.debug(f'resolve {self=}')
node = Node('key',value,None)
node.set_deps(self)
self.logger.debug(f'set_deps {node=}')
r = node.substitute_deps().value
self.logger.debug(f'substitute_deps {node=}')
self.logger.debug(f'substitute_deps {node.substitute_deps()=}')
return r
def substitute_deps(self):
self._nodes = [node.substitute_deps() if not node.is_leaf else node for node in self._nodes]
def __contains__(self, item):
return item in self.to_dict()
def __getitem__(self, key):
for n in self._nodes:
if n.name == key:
return n
def to_dict(self):
return {i.name:i.value for i in self._nodes}
def __str__(self):
return str(self.to_dict())
def __iter__(self):
return self._nodes.__iter__()
def append(self, node):
self._nodes.append(node)
def __repr__(self):
return 'Namespace<'+str(self._nodes)+'>'
@staticmethod
def from_list(_nodes):
ns = Namespace()
ns._nodes = _nodes
return ns
def __len__(self):
return len(self._nodes)
class Node:
def __init__(self, name, value, deps = None):
self.name = name
self.value = value
self.deps = deps
if deps is None:
self.deps = Namespace()
def __str__(self):
return f'Node<{self.name},{self.value},{self.deps}>'
def __repr__(self):
return f'Node({repr(self.name)},{repr(self.value)},{repr(self.deps)})'
@property
def template(self):
return Template(self.value)
@property
def is_leaf(self):
return len(self.deps) == 0
def sanity_check(self):
Node._sanity_check(self, [])
@staticmethod
def _sanity_check(root: 'Node', stack: 'List[Node]'):
for dep in root.deps:
if dep == root:
raise KeyError(f'Variable depends on itself: {root.name}={root.value}')
if dep in stack:
#culprit = next(i for i in stack if i == dep)
raise KeyError(f'Circular dependency in variable: {root.name}={root.value}')
stack.append(root)
Node._sanity_check(dep,stack)
def substitute_deps(self) -> "Node":
self.sanity_check()
substituted_deps = Namespace()
for dep in self.deps:
temp = dep
if not dep.is_leaf:
temp = dep.substitute_deps()
substituted_deps.append(temp)
new_value = None
if self.value is not None:
new_value = self.template.substitute(**substituted_deps.to_dict())
return Node(
self.name,
new_value,
substituted_deps,
)
def set_deps(self, namespace: Namespace):
dep_list = []
for name in self.template.names:
if name in namespace:
dep_list.append(namespace[name])
else:
dep_list.append(Node(name,"",deps=[]))
self.deps = Namespace.from_list(dep_list)
| true
|
949b73aaa56ea106fe0519b45e7540b42d74797b
|
Python
|
dujiacheng1994/python
|
/NLP/Tfidf_test_3.py
|
UTF-8
| 672
| 3.171875
| 3
|
[] |
no_license
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
document = ["I have a pen.", "I have an apple."] # 字符串list,每个元素为1个document
tfidf_model = TfidfVectorizer().fit(document) # 建立tfidf模型,vocabulary表示词频
sparse_result2 = TfidfVectorizer().transform(document)
sparse_result = tfidf_model.transform(document) # 得到tfidf模型的稀疏矩阵表示法
print(sparse_result)
print(sparse_result.todense()) # 转化为更直观的一般矩阵
print(tfidf_model.vocabulary_) # 词语与列的对应关系,
| true
|
607bc1b571de3b2ea30b061d87657fb14f240ca5
|
Python
|
tanlei0/land_use_modified_FLUS
|
/simulation_ca.py
|
UTF-8
| 14,549
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020/10/28 9:19
# @Author : cmk
# @Email : litaoyo@163.com
# @File : simulation_ca.py
# @Software: PyCharm
import multiprocessing
import time
import numpy as np
from numpy.ma import MaskedArray
from typing import List
import random
from tqdm import tqdm
from Raster import Raster
np.seterr(divide='ignore', invalid='ignore')
# ############# Tips ######################
# all values which represent the type of the land start from 0, while start from 1 in the land matrix
# ###########################################
class LandCell(object):
def __init__(self, row: int, col: int, land_type: int, nei_offset: np.ndarray):
# cell's coordinate
self.row = row
self.col = col
self.land_type = land_type
# the coordinate of cell's neighbor
self.nei_coores = nei_offset + [self.row, self.col]
# neighbor's effect
self.nei_effect = None
# combine probability
self.combine_prob = None
def update_neis(self, nTypes: int, nei_window: float, land_mask_data: MaskedArray, wnb: np.ndarray) -> np.ndarray:
"""
update the neighbor's effect of the cell
:param nTypes: The total number of land types
:param nei_window: the window of neighbor
:param land_mask_data: landUse data
:param wnb: the weight of each neighbor
:return:
"""
nei_count = np.zeros((nTypes,))
nrows, ncols = land_mask_data.shape
for nei_coor in self.nei_coores:
if 0 <= nei_coor[0] < nrows and 0 <= nei_coor[1] < ncols:
if land_mask_data[nei_coor[0], nei_coor[1]] != land_mask_data.fill_value:
nei_type = land_mask_data[nei_coor[0], nei_coor[1]] - 1
if 0 <= nei_type < nTypes:
nei_count[nei_type] += 1
self.nei_effect = nei_count / nei_window * wnb
return self.nei_effect
def cal_com_probs(self, nTypes: int, nei_window: float, wnb: np.ndarray, land_mask_data: MaskedArray,
probs_data: np.ndarray,
cost_matrix: np.ndarray) -> None:
"""
calculate the combine probability but the land inertia
:param nTypes:
:param wnb:
:param land_mask_data:
:param probs_data:
:param cost_matrix:
:return:
"""
# 1. get probability
probs = probs_data[:, self.row, self.col]
# 2. nei effect
self.update_neis(nTypes=nTypes, nei_window=nei_window, land_mask_data=land_mask_data, wnb=wnb)
# 3. get cost
change_cost = cost_matrix[self.land_type, :]
self.combine_prob = probs * self.nei_effect * change_cost
def get_nei_offset(nei_nums):
"""
get the coordinate offsets of neighbors
:param nei_nums: 邻域大小
:return: 偏移量
"""
# 获取邻域的偏移量
nei_offset = []
s = (nei_nums - 1) // 2
r = c = -s
for i in range(nei_nums):
for j in range(nei_nums):
if r + i == 0 and c + j == 0:
continue
else:
nei_offset.append([r + i, c + j])
return np.asarray(nei_offset)
def initial_land_cell(nei_offset: np.ndarray, land_data: MaskedArray, restricted_data: np.ndarray = None) -> List[
LandCell]:
# get the coordinate of valid pixel
# rows: [...]
# cols: [...]
valid_coors = np.where(land_data.mask == False)
valid_nums = len(valid_coors[0])
land_cells = []
for i in range(valid_nums):
row = valid_coors[0][i]
col = valid_coors[1][i]
# skip the restricted area
if restricted_data is not None and restricted_data[row][col] == 0:
continue
land_type = land_data[row][col] - 1
lc = LandCell(valid_coors[0][i], valid_coors[1][i], land_type, nei_offset)
land_cells.append(lc)
return land_cells
def func_update_com_prob(cells: List[LandCell], nTypes: int, nei_window: float, wnb: np.ndarray,
land_mask_data: MaskedArray,
probs_data: np.ndarray,
cost_matrix: np.ndarray) -> None:
t_start = time.time()
for cell in cells:
cell.cal_com_probs(nTypes=nTypes, nei_window=nei_window, wnb=wnb, land_mask_data=land_mask_data,
probs_data=probs_data, cost_matrix=cost_matrix)
t_stop = time.time()
print("执行完成,耗时%.2f" % (t_stop - t_start))
def start_simulation(config: dict):
num_process = multiprocessing.cpu_count()
# 邻域大小
N = config['simConfig']['neighboorhoodOdd']
nei_window = N * N - 1
max_iter_nums = config['simConfig']['maxIterNum']
nei_offset = get_nei_offset(N)
lanuse_demand = np.asarray(config['landUseDemand'])
cost_matrix = np.asarray(config['costMatrix'])
wnb = np.asarray(config['weightOfNeighborhood'])
degree = config['degree']
# 读取需要的tif文件
land_raster = Raster(config['landUsePath'])
land_mask_data = land_raster.maskedData
nrows, ncols = land_mask_data.shape
num_classes = land_raster.vmax
if 'restrictedPath' in config:
restricted_data = Raster(config['restrictedPath']).data
else:
restricted_data = None
probs_raster = Raster(config['probsPath'])
probs_data = np.asarray(probs_raster.data)
# get valid land cells
land_cells = initial_land_cell(nei_offset, land_mask_data, restricted_data)
save_count = np.asarray([len(np.where(land_mask_data == i + 1)[0]) for i in range(num_classes)])
sumPixels = save_count.sum()
# difference between demand and current land in the beginning
initialDist = np.copy(save_count)
# t - 2 difference
dynaDist = np.copy(save_count)
# the minimum difference
best_dist = np.copy(save_count)
# t - 1 difference
mIminDis2goal = np.copy(save_count)
# 1 means resist transition
oppo_trans = np.zeros_like(save_count)
# land inertia
adjustment = np.zeros_like(save_count)
adjustment_effect = np.ones_like(save_count)
# roulette
mroulette = np.zeros((num_classes + 1,))
print("start simulation: ")
print("landuse demand: ", lanuse_demand)
print("initial count: ", save_count)
st = time.time()
for k in range(max_iter_nums):
print("=====================================")
print("interation: ", k)
print("curr land use: ", save_count)
print("diff: ", lanuse_demand - save_count)
# ============= update inertia ======================
stui = time.time()
print("-----------------------------")
print("update inertia...")
for i in range(num_classes):
mIminDis2goal[i] = lanuse_demand[i] - save_count[i]
if k == 0:
initialDist[i] = mIminDis2goal[i]
dynaDist[i] = initialDist[i] * 1.01
best_dist[i] = initialDist[i]
if abs(best_dist[i] > abs(mIminDis2goal[i])):
best_dist[i] = mIminDis2goal[i]
else:
if abs(mIminDis2goal[i] - abs(best_dist[i])) / abs(initialDist[i] > 0.05):
oppo_trans[i] = 1
adjustment[i] = mIminDis2goal[i] / dynaDist[i] if dynaDist[i] != 0 else 1
if 0 < adjustment[i] < 1:
dynaDist[i] = mIminDis2goal[i]
if initialDist[i] > 0 and adjustment[i] > 1 - degree:
adjustment_effect[i] = adjustment_effect[i] * (adjustment + degree)
if initialDist[i] < 0 and adjustment[i] > 1 - degree:
adjustment_effect[i] = adjustment_effect[i] * (1.0 / (adjustment[i] + degree))
if initialDist[i] > 0 and adjustment[i] > 1:
adjustment_effect[i] = adjustment_effect[i] * adjustment[i] * adjustment[i]
if initialDist[i] < 0 and adjustment[i] > 1:
adjustment_effect[i] = adjustment_effect[i] * (1.0 / adjustment[i]) * (1.0 / adjustment[i])
print("update inertia end!!! Time used: ", stui - time.time())
# ===================================================
# ============= cal combine probability =============
stui = time.time()
print("--------------------------")
print("cal combine probability...")
# To Do 多进程还有些问题,进程中修改对象,不对外部对象起作用
# process = []
# prange = int(len(land_cells) / num_process)
# process_index = [[i * prange, (i + 1) * prange] if i != num_process - 1 else [(i - 1) * prange, len(land_cells)]
# for i in range(num_process)]
# for pi in process_index:
# pro = multiprocessing.Process(target=func_update_com_prob, args=(
# land_cells[pi[0]: pi[1]], num_classes, nei_window, wnb, land_mask_data, probs_data, cost_matrix))
# pro.start()
# process.append(pro)
# for p in process:
# p.join()
# 使用进程池比不使用进程池还要慢很多
# pool = multiprocessing.Pool(processes=num_process)
# for index, cell in enumerate(land_cells):
# pool.apply_async(func_update_com_prob,
# (index, cell, num_classes, nei_window, wnb, land_mask_data, probs_data, cost_matrix))
# pool.close()
# pool.join()
for cell in land_cells:
func_update_com_prob(0, cell, num_classes, nei_window, wnb, land_mask_data, probs_data, cost_matrix)
print("cal combine probability end!!!, Time used: ", stui - time.time())
# ===================================================
# =================== do transition =================
stui = time.time()
for land_cell in land_cells:
i = land_cell.row
j = land_cell.col
old_type = land_cell.land_type
# get land inertia
land_inertia = 10 * num_classes * adjustment_effect[old_type]
land_cell.combine_prob[old_type] *= land_inertia
# roulette choice
mroulette[0] = 0
for ii in range(num_classes):
mroulette[ii + 1] = mroulette[ii] + land_cell.combine_prob[ii]
# get a random float number and do roulette choice
temp_rand = random.random()
isConvert = False
for ii in range(num_classes):
new_type = ii
if mroulette[ii] < temp_rand <= mroulette[ii + 1]:
# if save_count[new_type] != lanuse_demand[new_type] or \
# save_count[old_type] != lanuse_demand[old_type]:
#
# save_count[new_type] += 1
# save_count[old_type] -= 1
if cost_matrix[old_type][new_type] != 0 and new_type != old_type:
isConvert = True
else:
isConvert = False
_disChangeFrom = mIminDis2goal[old_type]
_disChangeTo = mIminDis2goal[new_type]
if initialDist[new_type] >= 0 and _disChangeTo == 0:
adjustment_effect[new_type] = 1
isConvert = False
if initialDist[old_type] <= 0 and _disChangeFrom == 0:
adjustment_effect[old_type] = 1
isConvert = False
if initialDist[old_type] >= 0 and oppo_trans[old_type] == 1:
isConvert = False
if initialDist[new_type] <= 0 and oppo_trans[new_type] == 1:
isConvert = False
if isConvert:
# update datas
land_cell.land_type = new_type
land_mask_data[i][j] = new_type + 1
save_count[new_type] += 1
save_count[old_type] -= 1
mIminDis2goal[new_type] = lanuse_demand[new_type] - save_count[new_type]
mIminDis2goal[old_type] = lanuse_demand[old_type] - save_count[old_type]
break
oppo_trans[old_type] = 0
oppo_trans[new_type] = 0
sumDis = np.fabs(mIminDis2goal).sum()
if sumDis == 0 or sumDis < sumPixels * 0.0001:
break
print("-------------------------------------")
print("Time used: ", time.time() - stui)
print("--------------------------------------------------")
# the NoDataValue in the simulation file should be zero
land_raster.NoDataValue = 0
land_raster.data = land_mask_data.data
land_raster.write(config['saveSimPath'])
print("simulation end! The file is saved to ", config['saveSimPath'])
print("Time used: ", time.time() - st)
if __name__ == '__main__':
params1 = {
"landUsePath": "./dg2001coor.tif",
"probsPath": "./Probability-of-occurrence.tif",
"saveSimPath": "./sim_file/test_sim.tif",
"restrictedPath": "./restriction/restrictedarea.tif",
"simConfig": {
"maxIterNum": 35,
"neighboorhoodOdd": 3
},
"landUseDemand": [80016, 54427, 43599, 42433, 28446],
"costMatrix": [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[1, 1, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 0, 1],
],
"weightOfNeighborhood": [1, 0.9, 0.5, 1, 0.1],
'degree': 0.1
}
params = {
"landUsePath": "../test/hgs/hgs_datas/12n.tif",
"probsPath": "../test/hgs/hgs_datas/sim_file/hgs_probs.tif",
"saveSimPath": "../test/hgs/hgs_datas/sim_file/黄瓜山my_sim.tif",
"simConfig": {
"maxIterNum": 35,
"neighboorhoodOdd": 3
},
"landUseDemand": [337942, 131006, 203277, 11992, 67340, 19631, 458, 4894],
"costMatrix": [
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 1]
],
"weightOfNeighborhood": [0.2, 0.1, 0.6, 0.2, 0.5, 0.2, 0, 0.1],
'degree': 0.1
}
start_simulation(params)
| true
|
370a80ec0f8931cb827f92fa64ef73ca4c9a48d4
|
Python
|
Ilovelibrary/Leetcode-Problems
|
/682-Baseball-Game.py
|
UTF-8
| 656
| 2.9375
| 3
|
[] |
no_license
|
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
def func(x):
try:
x=int(x)
return isinstance(x,int)
except ValueError:
return False
stack = []
for i in xrange(len(ops)):
op = ops[i]
if func(op):
stack.append(int(op))
elif op=='D':
stack.append(stack[-1]*2)
elif op=='+':
stack.append(stack[-1]+stack[-2])
elif op=='C':
stack.pop()
return sum(stack)
| true
|
5a98ddb1def614a4d4e3989b0b3e9134c38f1fe3
|
Python
|
Hackin7/Programming-Crappy-Solutions
|
/School Exercises/3. H2 Computing Notes/Searching and Sorting/test1.py
|
UTF-8
| 2,304
| 3.59375
| 4
|
[] |
no_license
|
import random
array = [random.randint(0,1000) for i in range(10)]
print(array)
def bubblesort(array):
noSwaps = False
while not noSwaps:
noSwaps = True
for i in range(len(array)-1):
if array[i] > array[i+1]:
noSwaps = False
temp = array[i]
array[i] = array[i+1]
array[i+1] = temp
def insertionSort(array):
for i in range(1, len(array)):
key = array[i]
j = i-1
while j>=0:
if array[j] > key:
array[j+1] = array[j]
else:
array[j+1] = key
j = -1
j -= 1
def mergeSort(arr):
#print(arr)
array = arr
if len(array) <= 1:
return array
else:
midpos = int(len(arr)/2)
#print(arr[:midpos], arr[midpos:])
left = mergeSort(arr[:midpos])
right = mergeSort(arr[midpos:])
result = merge(left, right)
#print(left, right,result)
return result
def merge(left, right):
result = []
l = 0
r = 0
while l+r < len(left)+len(right):
if (l < len(left) and r < len(right) and left[l] < right[r]) or r >= len(right):
result.append(left[l])
l += 1
else:#if (l < len(left) and r < len(right) and right[r] < left[l]) or l >= len(left):
result.append(right[r])
r += 1
return result
arr = mergeSort(array)
print(arr)
print(array)
def quicksort(array, low=0, high=0):
if low >= high:
return
pivot = array[low]
l = low+1
r = high
while l <= r:
if (array[l]> pivot and array[r]<pivot):
temp = array[l]
array[l] = array[r]
array[r] = temp
elif array[l]<= pivot and array[r] <= pivot:
l += 1
elif array[l]> pivot and array[r] > pivot:
r -= 1
elif array[l]<= pivot and array[r] > pivot:
l += 1
r -= 1
#Swap in pivot
mid = r
array[low]= array[mid]
array[mid] = pivot
quicksort(array, low, mid)
quicksort(array, mid+1, high)
quicksort(array, 0, len(array)-1)
print(array)
| true
|
f73cc4d8040adff861da601097478ddb368baa1a
|
Python
|
aliciatoshima/python-exercises
|
/9-4.py
|
UTF-8
| 599
| 3.578125
| 4
|
[] |
no_license
|
# Write a program to read through a mail log, build a histogram using a dictionary to count how many messages have come grom each email address, and print the dictionary.
# basically exercise 8-5 to start
mbox = open('mbox-short.txt')
list = []
dict = dict()
for line in mbox:
this_line = line.split()
if len(this_line) <1:
continue
if this_line[0] == 'From:':
list.append(this_line[1])
for email in list:
dict[email] = dict.get(email,0) +1
max = None
for k,v in dict.items():
if max == None or max < v:
max = v
max_key = k
print(max_key, max)
| true
|
eae3578a81d39a16fd4fd06274c3e96502f79270
|
Python
|
haiyang1013/PythonSpiderEast
|
/EastSpider/spiders/eastspider.py
|
UTF-8
| 1,945
| 2.515625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from EastSpider.items import EastspiderItem
class eastspider(scrapy.spiders.CrawlSpider):
name = 'eastspider'
allowed_domains = ['eastbay.com']
start_urls = ['https://www.eastbay.com/category/sale.html?query=sale%3Arelevance%3AstyleDiscountPercent%3ASALE%3Agender%3AMen%27s%3Abrand%3AASICS+Tiger']
rules = (
Rule(LinkExtractor(allow='/product/asics-tiger.+.html$'), callback='parseContent', follow=True),
)
def parseContent(self, response):
print("----------已爬取成功--------------")
print(response.url)
# 创建item字段对象, 用来存储信息
item = EastspiderItem()
title = response.xpath("//span[@class='ProductName-primary']/text()").extract_first()
price_final = response.xpath("//span[@class='ProductPrice-final']/text()").extract_first()
price_original = response.xpath("//span[@class='ProductPrice-original']/text()").extract_first()
color = response.xpath("//p[@class='ProductDetails-form__label']/text()").extract_first()
size = response.xpath("//div[@class='ProductSize']/label/span/text()").extract()
sku = response.xpath("//div[@id='ProductDetails-tabs-details-panel']/text()").extract()
details = response.xpath("//div[@class='ProductDetails-description']").extract()
img_urls = response.xpath("//span[@class='product']/span/img/@src").extract()
item['title'] = title
item['price_final'] = price_final
item['price_original'] = price_original
item['color'] = color
item['size'] = size
item['sku'] = "#"+sku[1]
item['details'] = details
item['img_urls'] = img_urls
# 返回提取到的每一个item数据 给管道文件处理,同时还会回来继续执行后面的代码
yield item
| true
|
963c154d5cfa4d212ac4d68162f95a7e1983452a
|
Python
|
allanlealluz/Python-First-tests
|
/ex074.py
|
UTF-8
| 456
| 3.796875
| 4
|
[] |
no_license
|
from random import randint
n1 = randint(1,10)
n2 = randint(1,10)
n3 = randint(1,10)
n4 = randint(1,10)
n5 = randint(1,10)
lista = (n1,n2,n3,n4,n5)
print(lista)
c = 0
menor = lista[0]
maior = lista[0]
while c < len(lista):
if(lista[c] < menor):
menor = lista[c]
print(lista[c])
if(lista[c] > maior):
maior = lista[c]
print(lista[c])
c += 1
print(f'o maior numero é {maior}')
print(f'o menor numero é {menor}')
| true
|
343b236f37f0fe21083dc1c3f44173f3e9182fae
|
Python
|
Dancing-in-air/Bd-bar-sp
|
/百度贴吧/baidu_tieba01.py
|
UTF-8
| 753
| 3.25
| 3
|
[] |
no_license
|
import requests
import re
class BaiduTieba():
def __init__(self, name, page):
self.name = name
self.page = page
self.url = "https://tieba.baidu.com/f?kw={}&ie=utf-8&pn={}".format(self.name, (self.page - 1) * 50)
self.header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36"}
def run(self):
response = requests.get(self.url, headers=self.header)
ret = response.content.decode()
# pat = re.compile("<!--|-->")
# result = pat.sub("", ret)
with open("{}贴吧--第{}页.html".format(self.name, self.page), "w") as f:
f.write(ret)
cat = BaiduTieba("毛泽东", 1)
cat.run()
| true
|
28005fdbfed6c22d828f1e21f1e623c77edcb41a
|
Python
|
chetakks/SV_SDA
|
/test_load_data.py
|
UTF-8
| 1,566
| 2.671875
| 3
|
[] |
no_license
|
# import pickle
# import cPickle
# data_path = '/home/aditya/store/Datasets/pickled/bbbc+feat/'
# f1 = open(data_path+'bbbc+feat_valid.pkl', 'rb')
# valid_set = cPickle.load(f1)
# print 'done'
# print 'nr validation instances: ', len(valid_set[0])
# print 'nr features: ',len(valid_set[0][0])
# print 'nr targets: ', len(list(set(valid_set[1])))
# f2 = open(data_path+'bbbc+feat_test.pkl', 'rb')
# test_set = cPickle.load(f2)
# print 'nr test instances: ', len(test_set[0])
# print 'nr features: ',len(test_set[0][0])
# print 'nr targets: ', len(list(set(test_set[1])))
import cPickle
import gzip
def save(object, filename, protocol = -1):
"""Save an object to a compressed disk file.
Works well with huge objects.
By Zach Dwiel.
"""
file = gzip.GzipFile(filename, 'wb')
cPickle.dump(object, file, protocol)
file.close()
def load(filename):
"""Loads a compressed object from disk.
By Zach Dwiel.
"""
file = gzip.GzipFile(filename, 'rb')
object = cPickle.load(file)
file.close()
return object
filename = '/home/aditya/store/Datasets/pickled/bbbc+feat/bbbc+feat_gzip_train.pkl'
filename = '/media/883E0F323E0F1938/Chetak/Dataset/pickled/mnist2_train.pkl'
f = open('/media/883E0F323E0F1938/Chetak/Dataset/pickled/mnist2_train.pkl', 'rb')
train_set = cPickle.load(f)
f.close()
#train_set = load(filename)
print 'nr training instances: ', len(train_set[0])
print 'nr features: ',len(train_set[0][0])
print 'nr targets: ', len(list(set(train_set[1])))
print
| true
|
792f4979c72e54c2e36448af2736b94c0fc660b5
|
Python
|
xiciluan/YelpDataAnalysis
|
/Code/DataCleaning_Business/biz_resturants.py
|
UTF-8
| 1,619
| 2.5625
| 3
|
[] |
no_license
|
import csv
import json
from collections import defaultdict
with open('category_biz.csv') as f:
reader = csv.reader(f)
restaurants_id = set()
for row in reader:
if row[0] == 'Restaurants':
restaurants_id.add(row[1])
f.close()
# print(restaurants_id)
id_features = defaultdict(lambda: defaultdict(str))
features = ["BusinessParking", "RestaurantsDelivery", "RestaurantsReservations",
"NoiseLevel", "RestaurantsTakeOut", "RestaurantsPriceRange2", "WiFi"]
head = ['business_id', 'stars'] + features
with open('business.json') as f:
iter_f = iter(f)
line = f.readline()
for line in iter_f:
d = json.loads(line)
id = d['business_id']
attributes = d['attributes']
if id not in restaurants_id:
continue
id_features[id]['stars'] = d['stars']
for feature in features:
if attributes and feature in attributes:
if feature != "BusinessParking":
id_features[id][feature] = attributes[feature].strip('u').strip('\'')
else:
info = attributes[feature]
for c in '\'\".{}()[]:;,.!?':
info = info.replace(c, ' ')
lst = info.split()
id_features[id][feature] = str('True' in lst)
f.close()
# print(id_features)
with open('biz_resturants.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(head)
for id in id_features:
writer.writerow([id, id_features[id]['stars']]+[id_features[id][feature] for feature in features])
| true
|
b3d25474a838e378f7543dda980737ff1b5e1013
|
Python
|
ankurs/Game-On
|
/Game-On-toy_car/proxy.py
|
UTF-8
| 257
| 2.515625
| 3
|
[] |
no_license
|
import socket
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s1.bind(("",9092))
s1.listen(1)
a,b = s1.accept()
print b
s2.connect(("192.168.122.163",9091))
while True:
s2.send(a.recv(1))
| true
|
e01011c1cc91f58ad950e80adcc71c9da235320a
|
Python
|
EDA2021-1-SEC05-G8/Reto1-G08
|
/App/controller.py
|
UTF-8
| 2,432
| 2.515625
| 3
|
[] |
no_license
|
"""
* Copyright 2020, Departamento de sistemas y Computación,
* Universidad de Los Andes
*
*
* Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along withthis program. If not, see <http://www.gnu.org/licenses/>.
"""
import config as cf
import model
import csv
"""
El controlador se encarga de mediar entre la vista y el modelo.
"""
# Inicialización del Catálogo de videos
def initCatalog(TypeList):
"""
Llama la funcion de inicializacion del catalogo del modelo.
"""
catalog = model.newCatalog(TypeList)
return catalog
# Funciones para la carga de datos
def loadData(catalog, TypeList):
loadVideos(catalog, TypeList)
loadCategory(catalog)
sortVideos(catalog)
def loadVideos(catalog, TypeList):
videosfile = cf.data_dir + "videos-small.csv"
input_file = csv.DictReader(open(videosfile, encoding='utf-8'))
for video in input_file:
model.addVideo(catalog, video, TypeList)
def loadCategory(catalog):
categoryfile = cf.data_dir + "category-id.csv"
input_file = csv.DictReader(open(categoryfile, encoding="utf-8"), delimiter='\t')
for category in input_file:
model.addCategory(catalog, category)
# Funciones de ordenamiento
def sortVideos(catalog):
"""
Ordena los libros por average_rating
"""
return model.sortVideos(catalog)
def sortVideosLikes(catalog):
return model.sortVideosLikes(catalog)
# Funciones de consulta sobre el catálogo
def VidByCatPais(catalog, cat, pais, number):
return model.VidByCatPais(catalog, cat, pais, number)
def VidByPais(catalog, pais):
return model.VidByPais(catalog, pais)
def VidbyCat(catalog, cat):
return model.VidbyCat(catalog, cat)
def VidBytagPais(catalog, tag, pais, number):
sortVideosLikes(catalog)
return model.VidBytagPais(catalog, tag, pais, number)
| true
|
18f77329505ef51bc95e9b5586f408a43686104e
|
Python
|
semipumpkin/BOJ
|
/15686_Chicken.py
|
UTF-8
| 1,364
| 2.9375
| 3
|
[] |
no_license
|
import sys
sys.stdin = open('input.txt', 'r')
def chicken_distance():
global answer
total = 0
for house in houses:
min_distance = 9999999
for sel in sels:
distance = abs(sel[0] - house[0]) + abs(sel[1] - house[1])
if distance < min_distance:
min_distance = distance
total += min_distance
if total > answer:
return answer
return total
def power_set(idx, n, k):
global answer
if len(sels) > k:
return
if idx == n:
# return
if len(sels) == k:
# print(*sel)
total = chicken_distance()
if total < answer:
answer = total
return
return
power_set(idx+1, n, k)
sels.append(chickens[idx])
power_set(idx+1, n, k)
sels.pop()
for _ in range(4):
n, m = map(int, input().split())
city = [list(map(int, input().split())) for _ in range(n)]
# print(city)
chickens = []
houses = []
answer = 9999999
for i in range(n):
for j in range(n):
if city[i][j] == 2:
chickens.append([i, j])
elif city[i][j]:
houses.append([i, j])
k = len(chickens)
sels = []
power_set(0, k, m)
# print(sel)
# print(houses)
# print(chickens)
print(answer)
| true
|
ee6c95d9315f7c16764b8d4ba73ebb47f112b06f
|
Python
|
ashleyliangzy/maya-parallel-samelength
|
/maya_average_parallel.py
|
UTF-8
| 3,346
| 2.78125
| 3
|
[] |
no_license
|
import pymel.core as pm
import pymel.util as pmu
import pymel.core.datatypes as dt
window=pm.window(title="align edge length",widthHeight=(350,350))
pm.columnLayout(rowSpacing=10)
startVertsList=[]
edgesList=[]
directionList = []
standardEdgeLen=0
#def OnChoosePlane(*args):
# selectObjList=pm.ls(selection=True)
# selectObj=selectObjList[0]
# return selectObj
#pm.button(label='choose a plane',command=OnChoosePlane)
pm.text(label='1 choose some vertices as start')
def OnChooseStartVert(*args):
global startVertsList
startVertsList=pm.ls(selection=True,flatten=True)
pm.button(label='choose start vertex',command=OnChooseStartVert)
pm.text(label='2 choose some edges')
def OnChooseEdge(*args):
global edgesList
edgesList=pm.ls(selection=True,flatten=True)
print edgesList
pm.button(label='choose edges',command=OnChooseEdge)
pm.text(label='3 choose an edge as reference length')
def OnChooseStandardEdge(*args):
standardEdge=pm.ls(selection=True,flatten=True)
global standardEdgeLen
for v in standardEdge:
standardEdgeLen=v.getLength('world')
print edgesList
pm.button(label='choose standard edge',command=OnChooseStandardEdge)
pm.text(label='4 choose an edge as reference direction')
def OnChooseDirectionEdge(*args):
global directionList
directionList=pm.ls(selection=True,flatten=True)
print directionList
pm.button(label='choose direction edge',command=OnChooseDirectionEdge)
pm.text(label='5 align edges')
def OnAlignEdge(*args):
print startVertsList
print edgesList
print standardEdgeLen
for e in edgesList:
connectVerts=e.connectedVertices()
vIndex_end=0
i=0
startV=startVertsList[0]
endV=startVertsList[0]
print 'edge:',e
for v in connectVerts:
if v in startVertsList:
startV=v
print 'start:',startV
if v not in startVertsList:
endV=v
vIndex_end=i
print 'endV:',endV
i=i+1
startV_pos=startV.getPosition('world')
endV_pos=endV.getPosition('world')
for ed in directionList:
dirconnectVerts=ed.connectedVertices()
dirvIndex_end=0
a=0
dirstartV=startVertsList[0]
direndV=startVertsList[0]
for vd in dirconnectVerts:
if vd in startVertsList:
dirstartV=vd
print 'dirstart:',dirstartV
if vd not in startVertsList:
direndV=vd
dirvIndex_end=a
print 'direndV:',direndV
a=a+1
dirstartV_pos=dirstartV.getPosition('world')
direndV_pos=direndV.getPosition('world')
orientation=direndV_pos-dirstartV_pos
orientation=orientation.normal()
endV_pos_change=orientation*standardEdgeLen+startV_pos
e.setPoint(endV_pos_change,vIndex_end,'world')
pm.button(label='align edge',command=OnAlignEdge)
pm.showWindow(window)
| true
|
aefccdfe4067cd1229f4b2e63a2195a6281b06b4
|
Python
|
hzengin/ArchiveOrganizer
|
/NameFinder.py
|
UTF-8
| 3,528
| 2.765625
| 3
|
[] |
no_license
|
#-*-coding: utf-8-*-
from xml.dom.expatbuilder import parseString
__author__ = 'hzengin'
import re
import urllib.request
import os
from Movie import *
from xml.dom.minidom import parse
class NameFinder:
texts=[]
def __init__(self):
toDelete = open("toDelete","r")
data=toDelete.read()
for satir in data.splitlines():
self.texts.append(satir)
def stripName(self,dirname):
text=dirname
text=self.stripBrackets(text)
text=self.char2space(text)
text=self.stripYears(text)
text=self.stripLabels(text)
text=self.spaceStrip(text)
text=self.text2url(text)
print(text)
return text
def FindTitle(self,dirname):
name = self.stripName(dirname)
return self._searchTitles(name)
def stripLabels(self,dirname):
str=dirname
for text in self.texts:
str=str.replace(text,"")
return str
def stripBrackets(self,dirname):
result = re.sub("\[.+\]"," ",dirname)
result = re.sub("\(.+\)"," ",result)
result = re.sub("\{.+\}"," ",result)
return result
def char2space(self,dirname):
result=dirname.replace("."," ")
result=result.replace("_"," ");
result = result.replace("-"," ")
return result
def stripYears(self,dirname):
result = re.sub("\\b\d\d\d\d\\b","",dirname)
return result
def text2url(self,text):
text = text.replace("ş","s")
text = text.replace("ç","c")
text = text.replace("ğ","g")
text = text.replace("ı","i")
text = text.replace("ü","u")
text = text.replace("ö","o")
text = text.replace("Ş","S")
text = text.replace("Ç","C")
text = text.replace("Ğ","G")
text = text.replace("İ","I")
text = text.replace("Ü","U")
text = text.replace("Ö","O")
return text.replace(" ","+")
def spaceStrip(self,text):
text = re.sub(" +",' ',text)
text = text.strip()
return text
def _searchTitles(self,title,limit="1",iteration=0):
if(title.__len__()<2):
return ""
req = urllib.request.Request("http://mymovieapi.com/?type=xml&plot=none&episode=0&yg=0&mt=M&lang=en-US&offset=&aka=simple&release=simple&business=0&tech=0&title="+title+"&limit="+limit,headers={'User-Agent' : ""})
con = urllib.request.urlopen(req)
result=con.read()
i=0
dom = parseString(result)
for node in dom.getElementsByTagName("IMDBDocumentList"):
for node2 in node.getElementsByTagName("title"):
movie=Movie()
movie.title=node.getElementsByTagName("title")[i].firstChild.nodeValue
movie.id=node.getElementsByTagName("imdb_id")[i].firstChild.nodeValue
movie.rating=node.getElementsByTagName("rating")[i].firstChild.nodeValue
movie.year=node.getElementsByTagName("year")[i].firstChild.nodeValue
sonuc=movie
i=i+1
return sonuc
#success
return self.titleManipulateRight(title,limit,iteration+1)
def titleManipulateRight(self,title,limit,iteration):
if(iteration>2):
return ""
title=title.rsplit('+',1)[0]
return self._searchTitles(title,limit,iteration+1)
def sub_dirs(dir):
return [name for name in os.listdir(dir)
if os.path.isdir(os.path.join(dir, name))]
| true
|
03f8b927f43f39c1bcee159db2294f0f373b02dd
|
Python
|
donno/warehouse51
|
/basic/py/lexer.py
|
UTF-8
| 7,527
| 3.28125
| 3
|
[] |
no_license
|
"""Parses (tokenizers) a stream of characters into tokens (lexeme) for a BASIC
language.
Copyright (C) 2019 Sean Donnellan.
SPDX-License-Identifier: MIT
"""
import enum
import functools
import string
class LexemeType(enum.Enum):
Unknown = 0
Integer = 1
IntegerBase16 = 2
Real = 3
String = 4
Identifier = 5
Symbol = 6
Comment = 7
class Integer:
def __init__(self, value, base=10):
self.value = value
self.base = base
def __str__(self):
return 'Integer(%d)' % self.value
class Real:
def __init__(self, value):
self.value = value
def __str__(self):
return 'Real(%f)' % self.value
class Identifier:
def __init__(self, value):
self.value = value
def __str__(self):
return 'Identifier(%s)' % self.value
class String:
def __init__(self, value):
self.value = value
def __str__(self):
return '"%s"' % self.value
class Comment:
def __init__(self, value):
self.value = value
class Symbol:
Tokens = ('=', '+', '-', '*', '/', '(', ')', ';', ',', ':', '.', '>', '<')
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def parse(reader):
lexeme_type = LexemeType.Unknown
tokens = []
def _terminate():
if lexeme_type == LexemeType.Comment:
return Comment(''.join(tokens))
elif lexeme_type == LexemeType.Integer:
value = int(''.join(tokens))
return Integer(value)
elif lexeme_type == LexemeType.IntegerBase16:
value = int(''.join(tokens), 16)
return Integer(value, base=16)
if lexeme_type == LexemeType.Real:
value = float(''.join(tokens))
return Real(value)
elif lexeme_type == LexemeType.Identifier:
return Identifier(''.join(tokens))
elif lexeme_type == LexemeType.Symbol:
return Symbol(''.join(tokens))
elif lexeme_type == LexemeType.String:
return String(''.join(tokens))
else:
raise ValueError('Can not terminate %s' % lexeme_type)
for ch in iter(functools.partial(reader.read, 1), ''):
if lexeme_type == LexemeType.Unknown:
# Try to figure out the type.
if ch == '&':
lexeme_type = LexemeType.IntegerBase16
tokens = []
elif ch.isdigit():
# Integer or real, lets assume integer until proven otherwise
lexeme_type = LexemeType.Integer
tokens.append(ch)
elif ch.isalpha():
lexeme_type = LexemeType.Identifier
tokens.append(ch)
elif ch in Symbol.Tokens:
lexeme_type = LexemeType.Symbol
tokens.append(ch)
elif ch == "'":
lexeme_type = LexemeType.Comment
tokens = []
elif ch == '"':
lexeme_type = LexemeType.String
tokens = []
elif ch.isspace():
continue
else:
raise NotImplementedError('Unknown char "%s"' % ch)
elif lexeme_type == LexemeType.Integer:
if ch.isdigit():
tokens.append(ch)
elif ch.isspace():
yield _terminate()
lexeme_type = LexemeType.Unknown
tokens = []
elif ch in Symbol.Tokens:
yield _terminate()
lexeme_type = LexemeType.Symbol
tokens = [ch]
elif ch == '.':
lexeme_type = LexemeType.Real
tokens.append(ch)
else:
# If this is a . then we go from Integer to Real.
raise NotImplementedError('Unknown char "%s"' % ch)
elif lexeme_type == LexemeType.IntegerBase16:
if not tokens:
if ch in string.hexdigits:
tokens.append(ch)
elif ch == 'H':
pass
else:
raise ValueError('Expected & followed by H got %s', ch)
elif ch in string.hexdigits:
tokens.append(ch)
elif ch.isspace():
yield _terminate()
lexeme_type = LexemeType.Unknown
tokens = []
else:
raise NotImplementedError('Unknown char "%s"' % ch)
elif lexeme_type == LexemeType.Real:
if ch.isdigit():
tokens.append(ch)
elif ch.isspace():
yield _terminate()
lexeme_type = LexemeType.Unknown
tokens = []
elif ch in Symbol.Tokens:
yield _terminate()
lexeme_type = LexemeType.Symbol
tokens = [ch]
else:
raise NotImplementedError('Unknown char "%s" for %s' % (ch, lexeme_type))
elif lexeme_type == LexemeType.Identifier:
if ch.isalpha() or ch.isdigit():
tokens.append(ch)
elif ch.isspace():
yield _terminate()
lexeme_type = LexemeType.Unknown
tokens = []
elif ch in ('$', '%', '#'):
# Sigils on the end of variable name that mean types in
# QBasic. Where $ means string, % means integer, & means long,
# ! is float and # is float.
tokens.append(ch)
elif ch in Symbol.Tokens:
yield _terminate()
# Start the next one.
lexeme_type = LexemeType.Symbol
tokens = [ch]
else:
raise NotImplementedError('Unknown char "%s" for %s (%s)' % (ch, lexeme_type, tokens))
elif lexeme_type == LexemeType.Symbol:
if ch.isspace():
yield _terminate()
lexeme_type = LexemeType.Unknown
tokens = []
elif ch.isdigit():
yield _terminate()
lexeme_type = LexemeType.Integer
tokens = [ch]
elif ch == '"':
yield _terminate()
lexeme_type = LexemeType.String
tokens = []
elif ch.isalpha():
yield _terminate()
lexeme_type = LexemeType.Identifier
tokens = [ch]
elif ch in Symbol.Tokens:
yield _terminate()
lexeme_type = LexemeType.Symbol
tokens = [ch]
else:
raise NotImplementedError('Unknown char "%s" for %s (%s)' % (ch, lexeme_type, tokens))
elif lexeme_type == LexemeType.String:
if ch == '"':
yield _terminate()
lexeme_type = LexemeType.Unknown
tokens = []
else:
tokens.append(ch)
elif lexeme_type == LexemeType.Comment:
if ch == '\n':
yield _terminate()
lexeme_type = LexemeType.Unknown
tokens = []
else:
tokens.append(ch)
else:
raise NotImplementedError('Unknown char "%s" for %s' % (ch, lexeme_type))
if lexeme_type != LexemeType.Unknown:
yield _terminate()
if __name__ == '__main__':
with open('../tests/lexer_example.bas') as reader:
for token in parse(reader):
print(token)
| true
|
e42ea9cd7f5f8e8a1a7b7f848b2430d08d65f02e
|
Python
|
muskan-aggarwal/PYTHON
|
/pattern_twochardownandup.py
|
UTF-8
| 656
| 3.65625
| 4
|
[] |
no_license
|
def printTwoCharUp(symbol,n):
ch='.'
space=0
chars=n
m=1
if(n%2==0):
line=n//2
else:
line=(n//2)+1
count=1
while(count<line):
print(ch*((chars//2)-space),symbol*m,ch*((chars//2)-space))
space+=1
m+=2
count+=1
return
def printTwoCharDown(symbol,n):
ch='.'
space=0
chars=n
if(n%2==0):
line=n//2
else:
line=(n//2)+1
count=1
while(count<=line):
print(ch*space,symbol*chars,ch*space)
space+=1
chars-=2
count+=1
return
printTwoCharUp('*',8)
printTwoCharDown('*',8)
| true
|
07e04e8d0f7a66517a5c96b61870689c8ca2bca5
|
Python
|
CVxTz/taxi_duration
|
/read_data.py
|
UTF-8
| 3,965
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
import math
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from geopy.distance import vincenty
def read_data_base():
train = pd.read_csv("data/train.csv", parse_dates=['pickup_datetime', 'dropoff_datetime'])
test = pd.read_csv("data/test.csv", parse_dates=['pickup_datetime'])
numeric_variables = ['vendor_id', 'passenger_count', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude',
'dropoff_latitude', 'store_and_fwd_flag' ]
train["store_and_fwd_flag"] = train["store_and_fwd_flag"].map({"N":0, "Y":1})
test["store_and_fwd_flag"] = test["store_and_fwd_flag"].map({"N":0, "Y":1})
target = "trip_duration"
return train[numeric_variables], test[numeric_variables], train["id"], test["id"], train[target]
#Create some distance related columns
def compute_distance(x):
#'pickup_longitude' 'pickup_latitude' 'dropoff_longitude' 'dropoff_latitude'
lat_1 = x["pickup_latitude"]
lat_2 = x["dropoff_latitude"]
long_1 = x["pickup_longitude"]
long_2 = x["dropoff_longitude"]
return vincenty((lat_1, long_1), (lat_2, long_2)).miles
#Compute bearing from https://gist.github.com/jeromer/2005586
def calculate_initial_compass_bearing(pointA, pointB):
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(x, y)
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
def compute_bearing(x):
#'pickup_longitude' 'pickup_latitude' 'dropoff_longitude' 'dropoff_latitude'
lat_1 = x["pickup_latitude"]
lat_2 = x["dropoff_latitude"]
long_1 = x["pickup_longitude"]
long_2 = x["dropoff_longitude"]
return calculate_initial_compass_bearing((lat_1, long_1), (lat_2, long_2))
def read_data_add_features():
train = pd.read_csv("data/train.csv", parse_dates=['pickup_datetime', 'dropoff_datetime'])
test = pd.read_csv("data/test.csv", parse_dates=['pickup_datetime'])
numeric_variables = ['vendor_id', 'passenger_count', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude',
'dropoff_latitude', 'store_and_fwd_flag' ]
train["store_and_fwd_flag"] = train["store_and_fwd_flag"].map({"N":0, "Y":1})
test["store_and_fwd_flag"] = test["store_and_fwd_flag"].map({"N":0, "Y":1})
target = "trip_duration"
train["day_of_week"] = train.pickup_datetime.dt.dayofweek
train["month"] = train.pickup_datetime.dt.month
train["day_of_month"] = train.pickup_datetime.dt.day
train["day_of_year"] = train.pickup_datetime.dt.dayofyear
train["hour"] = train.pickup_datetime.dt.hour
train["minute"] = train.pickup_datetime.dt.minute
train["distance"] = train.apply(lambda x: compute_distance(x), axis=1)
train["bearing"] = train.apply(lambda x: compute_bearing(x), axis=1)
test["day_of_week"] = test.pickup_datetime.dt.dayofweek
test["month"] = test.pickup_datetime.dt.month
test["day_of_month"] = test.pickup_datetime.dt.day
test["day_of_year"] = test.pickup_datetime.dt.dayofyear
test["hour"] = test.pickup_datetime.dt.hour
test["minute"] = test.pickup_datetime.dt.minute
test["distance"] = test.apply(lambda x: compute_distance(x), axis=1)
test["bearing"] = test.apply(lambda x: compute_bearing(x), axis=1)
add_features = ["day_of_week", "month", "day_of_month", "day_of_year", "hour", "minute", "distance", "bearing"]
return train[numeric_variables+add_features], test[numeric_variables+add_features], train["id"],\
test["id"], train[target]
| true
|
a374a88220dc7dc135726bfc7164ca7dfcf2135e
|
Python
|
Aye-Theingi/learningpython
|
/learningPython/learningPython/OOP_methods.py
|
UTF-8
| 803
| 3.75
| 4
|
[] |
no_license
|
'''
Created on Jun 8, 2020
@author: isgm137
'''
class Dog():
species='mammal'
def __init__(self,breed,name):
self.breed=breed
self.name=name
# print('Breed',self.breed)
# print('Name',self.name)
def bark(self):
print("Wolf,My name is {}".format(self.name))
my_dog=Dog('Lab','Frankie')
print('Species',my_dog.species)
print('Breed',my_dog.breed)
print('Name',my_dog.name)
my_dog.bark()
class Circle():
pi=3.14
def __init__(self,radius=1):
self.radius=radius
self.area=radius * radius * self.pi
def get_circumference(self):
return self.radius * self.pi *2
my_circle=Circle()
print('PI',my_circle.pi)
print('get_circumference',my_circle.get_circumference())
print('area',my_circle.area)
| true
|
d4c190cd841cc1ee38fc7fba2e69ca472009a639
|
Python
|
mengjie514/Twitter-notes
|
/notes_file_objects.py
|
UTF-8
| 1,176
| 3.3125
| 3
|
[] |
no_license
|
# File Objects
f = open('pre-SCOvENG.json', 'r')
print(f.name)
print(f.mode)
f.close()
# Open file with context manager (small file)
with open('pre-SCOvENG.json', 'r') as f:
print(f.read())
with open('pre-SCOvENG.json', 'r') as f:
f_contents = f.read()
print(f_contents)
# Open large file
with open('pre-SCOvENG.json', 'r') as f:
# grab the first line
f_contents = f.readline()
print(f_contents)
with open('pre-SCOvENG.json', 'r') as f:
# simply iterate over the lines in file and read all
for line in f:
print(line)
with open('pre-SCOvENG.json', 'r') as f:
# with f.read() to specify the amount of data want to read at a time
# passing in a 100 and print out the first 100 characters of our file
f_contents = f.read(100)
print(f_contents)
with open('pre-SCOvENG.json', 'r') as f:
size_to_read = 100
f_contents = f.read(size_to_read)
print(f_contents)
with open('pre-SCOvENG.json', 'r') as f:
size_to_read = 100
f_contents = f.read(size_to_read)
while len(f_contents) > 0:
print(f_contents, end='')
f_contents = f.read(size_to_read)
| true
|
91884b4cbcfe68308ccfb804f864fc116c5c0790
|
Python
|
meizhaohui/flask_web
|
/usesqlalchemy.py
|
UTF-8
| 6,425
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
"""
@Author : 梅朝辉 Meizhaohui
@Email : mzh.whut@gmail.com
@Time : 2018/11/1 23:54
@File : usesqlalchemy.py
@Version : 1.0
@Interpreter: Python3.6.2
@Software: PyCharm
@Description: 使用sqlalchemy操作数据库
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Boolean
from sqlalchemy import Text
from sqlalchemy import and_
from sqlalchemy import or_
from sqlalchemy import ForeignKey
# create_engine() 会返回一个数据库引擎
engine = create_engine(
"mysql+pymysql://root:root@localhost:3306/flask?charset=utf8mb4",
echo=False)
# sessionmaker()会生成一个数据库会话类。这个类的实例可以当成一个数据库连接
# 它同时还记录了一些查询的数据,并决定什么时候执行SQL语句
DBSession = sessionmaker(bind=engine)
# 实例化数据库会话类,建立与数据库的连接
session = DBSession()
# 创建对象的基类,declarative_base()创建一个Base类,这个类的子类可以自动与一个表关联
Base = declarative_base()
# 定义表
class User(Base):
# 表名user,用户表
__tablename__ = 'user'
# 字段id,主键,默认自增长
id = Column(Integer, primary_key=True, autoincrement=True)
# 字段username,用户名,最大50位变长字符串,非空
username = Column(String(50), nullable=False)
# 字段email,邮箱
email = Column(String(50))
# 字段is_active,是否激活,
is_active = Column(Boolean, default=True)
class Article(Base):
# 表名article,文章表
__tablename__ = 'article'
# 字段id,主键,默认自增长
id = Column(Integer, primary_key=True, autoincrement=True)
# 字段title,标题,最大50位变长字符串,非空
title = Column(String(50), nullable=False)
# 字段content,文章内容,长文本
content = Column(Text)
# 字段tag,文章标签
tag = Column(String(50))
# 字段is_active,是否激活
is_active = Column(Boolean, server_default='1')
# 删除所有数据表
Base.metadata.drop_all(engine)
# 创建数据表
# Base.metadata.create_all(engine)会找到Base的所有子类,并在数据库中建立这些表
Base.metadata.create_all(engine)
# # 向数据表中添加数据
user1 = User(username='user1', email='user1@flask.com')
user2 = User(username='user2', email='user2@flask.com')
user3 = User(username='user3', email='user3@flask.com')
user4 = User(username='user4', email='user4@flask.com')
user5 = User(username='user5', email='user5@flask.com')
# 增加一个用户到session中
session.add(user1)
# 增加一组用户到session中
session.add_all([user2, user3, user4])
session.add(user5)
article1 = Article(title='标题1', content='正文1', tag='Python')
article2 = Article(title='标题2', content='正文2', tag='Java')
article3 = Article(title='标题3', content='正文3', tag='Python')
article4 = Article(title='标题4', content='正文4', tag='Java')
session.add(article1)
session.add_all([article2, article3, article4])
# 提交即保存到数据库中
session.commit()
# 查询数据库数据
print('EQUAL:')
print(session.query(User).filter(User.username == 'user1').one().username) # equal
print('NOT EQUAL:')
print(session.query(User).filter(User.username != 'user1').all()[0].username) # not equal
print('LIKE:')
print(session.query(User).filter(User.username.like('%2')).one().username) # LIKE
print('IN:')
for user in session.query(User).filter(User.username.in_(['user1', 'user2', 'user3'])).all(): # in
print(user.id, user.username)
print('NOT IN:')
for user in session.query(User).filter(~User.username.in_(['user1', 'user2', 'user3'])).all(): # not in
print(user.id, user.username)
print('AND:')
print(session.query(User).filter(User.username.like('user%'), User.id == '2').one().username) # AND
print(session.query(User).filter(and_(User.username.like('user%'), User.id == '2')).one().username) # AND
print(session.query(User).filter(User.username.like('user%')).filter(User.id == '2').one().username) # AND
print('OR:')
for user in session.query(User).filter(or_(User.username.like('user%'), User.id == '3')).all():
print(user.id, user.username)
print('COUNT QUERY')
print(session.query(User).filter(User.username.like('user%')).count())
print([i.username for i in session.query(User).order_by(User.username.desc()).all()])
print([i.username for i in session.query(User).order_by(User.username.asc()).all()])
# 更新数据表
# 更新一条数据
user1 = session.query(User).filter(User.username == 'user1').first()
print(user1.username, user1.email)
user1.email = user1.username + '@python.org'
session.flush()
print(user1.username, user1.email)
session.commit()
print(user1.username, user1.email)
# 更新多条数据
user4 = session.query(User).filter(User.username == 'user4').first()
user5 = session.query(User).filter(User.username == 'user5').first()
print(user4.username, user4.email)
print(user5.username, user5.email)
# synchronize_session='fetch'在更新操作之前,先发一条sql到数据库中进行查询符合条件的记录
session.query(User).filter(User.id > 3).update(
{User.email: User.username + '@python.org'}, synchronize_session='fetch')
# flush就是把客户端尚未发送到数据库服务器的SQL语句发送过去,此时数据库未生效,flush之后你才能在这个Session中看到效果
session.flush()
print(user4.username, user4.email)
print(user5.username, user5.email)
# commit就是告诉数据库服务器提交事务,commit之后你才能从其它Session中看到效果,数据库才真正生效
session.commit()
# 查询所有数据
print([(user.id, user.username, user.email, user.is_active) for user in
session.query(User).all()])
# 删除数据
user5 = session.query(User).filter(User.username == 'user5').first()
# 删除一条数据
session.delete(user5)
session.flush()
session.commit()
# 删除多条数据
session.query(User).filter(User.id > 2).delete(synchronize_session='fetch')
session.flush()
session.commit()
print([(user.id, user.username, user.email, user.is_active) for user in
session.query(User).all()])
| true
|
ee82866e7a0e684fd785138da27a8eeea1500c33
|
Python
|
thehimel/data-structures-and-algorithms-udacity
|
/m04c02-graph-algorithms/i13e00_heapq.py
|
UTF-8
| 1,433
| 4.9375
| 5
|
[] |
no_license
|
"""
Python's inbuilt heapq
With heapq module, you can convert a list into a min-heap.
The following two functionalities can be very handy for this task:
heappush(heap, item) — add item to the heap
heappop(heap) — remove the smallest item from the heap
Let's look at the above methods in action. We start by creating a list of int.
"""
import heapq
"""heappush"""
# initialize an empty list
min_heap = list()
heapq.heappush(min_heap, 6)
heapq.heappush(min_heap, 6)
heapq.heappush(min_heap, 2)
heapq.heappush(min_heap, 1)
heapq.heappush(min_heap, 9)
print("After pushing, heap: {}".format(min_heap))
""" heappop """
# pop and return smallest element from the heap
smallest = heapq.heappop(min_heap)
print("Smallest element: {}".format(smallest))
print("After popping, heap: {}".format(min_heap))
"""
heappush and heappop for items with multiple entries
Note: If you insert a tuple inside the heap,
the element at 0th index of the tuple is used for comparision
"""
min_heap = list()
heapq.heappush(min_heap, (0, 1))
heapq.heappush(min_heap, (-1, 5))
heapq.heappush(min_heap, (2, 0))
heapq.heappush(min_heap, (5, -1))
heapq.heappush(min_heap, (-1, -1))
heapq.heappush(min_heap, (2, -1))
print("After pushing, heap: {}".format(min_heap))
# pop and return smallest element from the heap
smallest = heapq.heappop(min_heap)
print("Smallest element: {}".format(smallest))
print("After popping, heap: {}".format(min_heap))
| true
|
39854d8c0a7b8b44d1f1074145a1424617863618
|
Python
|
victornjr/SoftwareQuality-Testing
|
/UnitTests/test_hello.py
|
UTF-8
| 723
| 3.546875
| 4
|
[] |
no_license
|
import unittest
import hello
class TestHello(unittest.TestCase):
# First test case -> returning Hello World!
def test_SayHello(self):
# If the method returns "Hello World!", then the test will pass
self.assertEqual(hello.sayHello(),"Hello World!")
def test_add(self):
# For this test case, I will have two asserts
# if one fails, then all the test case fails.
self.assertEqual(hello.add(3,5),8)
# This second assert will fail,
# because I'm saying that the resukt will be 3 when the result is 15
# if we change the value to 15, then it will pass
self.assertEqual(hello.add(10,5),3)
if __name__ == '__main__':
unittest.main()
| true
|
1014a1e8f6b4110c2e5b8c8a0c853429a8f371cb
|
Python
|
igortereshchenko/amis_python
|
/km73/Mirniy_Nickolay/5/task5.py
|
UTF-8
| 343
| 3.5
| 4
|
[] |
no_license
|
x = []
y = []
n = 8
for i in range(n) :
x.append(int(input('Введите x'+str(i+1) + ':' )))
y.append(int(input('Введите y'+str(i+1)+':')))
for i in range(n) :
for j in range(i+1 , n) :
if (abs(x[i]-x[j])) == (abs(y[j]-y[i])) or (x[i] == x[j]) or (y[i] == y[j]):
answer = 'Yes'
else :
answer = 'No'
print(answer)
| true
|
2ca2a51c3248075131085edc1b068ba563801820
|
Python
|
itsintheletterbox/Racing
|
/Racing.py
|
UTF-8
| 9,621
| 2.609375
| 3
|
[] |
no_license
|
#Imports
import time
import datetime
import urllib2
from lxml import etree
from multiprocessing import Pool
## Parameters
valid_meets = {"AR"} #, "BR", "MR", "SR", "PR", "NR", "QR", "VR"}
## Date
curDate = datetime.datetime.today()
year = curDate.year
month = curDate.month
day = curDate.day
## Daylight savings time adjustment
dstAdj = time.localtime().tm_isdst
# Collection times (minutes before/after the race)
collectTimes = [-30,-25,-20,-15,-10,-5,-4,-3,-2,-1,1,3,5,10]
collectTimes = [-5,-4,-3,-2,-1,1,3,5]
#Function to load xml feed into etree object
def load_tree(url):
#Open url
xml_data = urllib2.urlopen(url)
#Parse xml
tree = etree.parse(xml_data)
#Close url
xml_data.close()
del xml_data
return tree
### Function to check a field exists, then assign the value
def get_val(var, attr):
if attr in var.keys():
return var[attr]
else:
return None
### Function to get race data
def get_race_data(race_tree):
#Track details
meetingInfo = race_tree.findall("//Meeting")[0].attrib
venueName = meetingInfo["VenueName"]
trackDesc = get_val(meetingInfo,"TrackDesc")
trackCond = get_val(meetingInfo,"TrackCond")
trackRating = get_val(meetingInfo,"TrackRating")
## Race details
raceInfo = race_tree.findall("//Race")[0].attrib
raceDate = raceInfo["RaceTime"].split("T")[0]
raceTime = raceInfo["RaceTime"].split("T")[1]
raceName = raceInfo["RaceName"]
raceDist = raceInfo["Distance"]
raceFav = get_val(raceInfo,"SubFav")
## Tipsters
tipsterList = race_tree.findall("//Tipster")
tipsterTipList = race_tree.findall("//TipsterTip")
tipsters = [tipster.attrib["TipsterName"] for tipster in tipsterList]
tips = ["x-"+tip.attrib["Tips"].replace("*","") for tip in tipsterTipList]
## Results
resultList = race_tree.findall("//Result")
poolList = race_tree.findall("//PoolResult")
winPlace = [get_val(result.attrib,"RunnerNo") for result in resultList]
poolDivCode = [get_val(pool.attrib,"PoolType") for pool in poolList]
poolDividend = [get_val(pool.attrib,"Dividend") for pool in poolList]
## Exotics
exoticsList = race_tree.findall("//Pool")
dividendList = race_tree.findall("//Dividend")
exoticType = [get_val(exotic.attrib, "PoolType") for exotic in exoticsList]
exoticPool = [get_val(exotic.attrib, "PoolTotal") for exotic in exoticsList]
divAmt = [get_val(div.attrib, "DivAmount") for div in dividendList]
divID = [get_val(div.attrib, "DivId") for div in dividendList]
return {"Date":raceDate, "Meet":meet, "VenueName":venueName, "TrackDesc":trackDesc, \
"TrackCond":trackCond, "TraclRating": trackRating, "RaceNo":raceNo,\
"Time":raceTime, "RaceName":raceName, "RaceDist":raceDist, \
"RaceFav":raceFav, "Tipsters":tipsters, "Tips":tips, \
"WinPlace":winPlace, "PoolDivCode":poolDivCode, "PoolDividend":poolDividend, \
"ExoticType":exoticType, "ExoticPool":exoticPool, "DivAmt":divAmt, "DivID":divID}
### Function to get field data
def get_field_data(race_tree):
#Get data
runnerList = race_tree.findall("//Runner")
winOddsList = race_tree.findall("//WinOdds")
placeOddsList = race_tree.findall("//PlaceOdds")
fixedOddsList = race_tree.findall("//FixedOdds")
#Get details for each runner for each race
fieldDict = {}
for i in range(0,len(runnerList)):
#Grab details for the race
runnerInfo = runnerList[i].attrib
winOddsInfo = winOddsList[i].attrib
placeOddsInfo = placeOddsList[i].attrib
fixedOddsInfo = fixedOddsList[i].attrib
#Get horse details
runnerNo = get_val(runnerInfo,"RunnerNo")
runnerName = get_val(runnerInfo,"RunnerName")
runnerWeight = get_val(runnerInfo,"Weight")
runnerJockey = get_val(runnerInfo,"Rider")
runnerForm = get_val(runnerInfo, "LastResult")
runnerChanged = get_val(runnerInfo,"RiderChanged")
runnerBarrier = get_val(runnerInfo,"Barrier")
runnerScratched = get_val(runnerInfo,"Scratched")
runnerRtng = get_val(runnerInfo,"Rtng")
runnerHandicap = get_val(runnerInfo,"Handicap")
#Odds
oddsWin = get_val(winOddsInfo,"Odds")
oddsWinLost = get_val(winOddsInfo,"Lastodds")
oddsPlace = get_val(placeOddsInfo,"Odds")
oddsPlaceLast = get_val(placeOddsInfo,"Lastodds")
fixWin = get_val(fixedOddsInfo, "Odds")
retailWin = get_val(fixedOddsInfo,"RetailWinOdds")
fixPlace = get_val(fixedOddsInfo,"PlaceOdds")
retailPlace = get_val(fixedOddsInfo,"RetailPlaceOdds")
#Timestamp
calcTime = get_val(winOddsInfo,"CalcTime")
lastCalcTime = get_val(winOddsInfo,"LastCalcTime")
if calcTime is not None:
calcTime = calcTime.split("T")[1]
if lastCalcTime is not None:
lastCalcTime = lastCalcTime.split("T")[1]
#Compile all details into dict
fieldDict[runnerNo] = {"Name":runnerName, "Weight":runnerWeight, "Jockey":runnerJockey,\
"Form":runnerForm, "Changed":runnerChanged, "Barrier":runnerBarrier, \
"Scratched":runnerScratched, "Rating":runnerRtng, "Handicap":runnerHandicap, \
"OddsWin":oddsWin, "OddsWinLost":oddsWinLost, "OddsPlace":oddsPlace,\
"OddsPlaceLast":oddsPlaceLast, "FixWin":fixWin, "RetailWin":retailWin, \
"FixPlace":fixPlace, "RetailPlace":retailPlace, \
"CalcTime":calcTime, "LastCalcTime":lastCalcTime}
#Return dict holding details for all runners in the race
return fieldDict
def raceLoop(meet, collectTimes):
nraces = int(race_meets[meet])
for race in range(1,nraces):
raceKey = meet+str(race)
raceTimeStr = raceDict[raceKey]['Time'].split(":")
raceTime = datetime.datetime(year,month,day,int(raceTimeStr[0]),int(raceTimeStr[1]), int(raceTimeStr[2]))
if (raceTime + datetime.timedelta(hours = dstAdj, minutes = min(collectTimes))) < datetime.datetime.now():
None ## If missed the first collection time skip the race
else:
for wait in collectTimes:
curTime = datetime.datetime.now()
colTime = raceTime + datetime.timedelta(hours = dstAdj, minutes = wait)
waitTime = (colTime-curTime).total_seconds()
print "Waiting for %s%s at %s in %i minutes" %(meet,race,raceTime,waitTime/60)
time.sleep(waitTime)
fieldDict[raceKey,wait] = get_field_data(race_tree)
return fieldDict
def main():
for k in range(nMeets):
meet = race_meets.keys()[k]
print "*** Meet = " + meet + " ***"
raceLoop(meet, collectTimes)
#Create multiprocessing pool - one stream for each race meet
pool = Pool(processes = nMeets)
for k in range(nMeets):
meet = race_meets.keys()[k]
print "*** Meet = " + meet + " ***"
try:
pools[meet] = pool.apply_async(raceLoop, [meet, collectTimes])
except:
pool.close()
pool.close()
pool.join()
for k in range(nMeets):
meet = race_meets.keys()[k]
results[meet] = pools[meet].get()
########################################
## First get the race meets for the day
########################################
url = "https://tatts.com/pagedata/racing/%s/%s/%s/RaceDay.xml" %(year,month,day)
tree = load_tree(url)
## Build dict mapping race meets to number of races in each
race_meets = {}
for meet in tree.iterfind("Meeting"):
code = meet.attrib["MeetingCode"]
if code in valid_meets and meet.attrib["Abandoned"] == "N":
race_meets[code] = meet.attrib["HiRaceNo"]
nMeets = len(race_meets.keys())
######################################
## Next loop through each of the races
######################################
# Column lookup key for raceDict and fieldDict
# raceCols: "Date", "Code", "Venue", "TrackDescription", "TrackCondition", \
# "TrackRating", "RaceNo", "Time", "RaceName", "Distance", \
# "Favourite", "Tipster", "Tip", "WinPlace", "DivCode", \
# "Dividend", "Exotic", "Pool", "ExoticDividend", "DividendId"
#
# fieldCols: "Number", "Name", "Weight", "Jockey", "Form", "RiderChange", \
# "Barrier", "Scratched", "Rating", "Handicap", "WinLast", "Win", \
# "PlaceLast", "Place", "FixWin", "RetailWin", "FixPlace", \
# "RetailPlace", "LastTime", "Time"
'''
raceDict = {}
for meet in race_meets.keys():
last_race = int(race_meets[meet])
for raceNo in range(1,last_race+1):
race_url = "http://tatts.com/pagedata/racing/%s/%s/%s/%s%s.xml" %(year, month, day, meet, raceNo)
print race_url
race_xml = urllib2.urlopen(race_url)
race_tree = etree.parse(race_xml)
race_xml.close()
# Race data
raceDict[meet+str(raceNo)] = get_race_data(race_tree)
print "Races:"
print sorted(raceDict.keys())
'''
## Loop over race meets
fieldDict = {}
pools = {}
results = {}
#raceLoop('AR',[-60])
if __name__ == '__main__':
main()
#sys.exit()
| true
|
f3e2a3b0a0fa1a7281d756556025219860480d6a
|
Python
|
yuribyk/library-assistant
|
/Library_Manager/DatabaseInteractor/DatabaseUtilities.py
|
UTF-8
| 1,329
| 2.546875
| 3
|
[] |
no_license
|
import pymysql
from DatabaseInteractor.DTO.DataPacker import DataPacker
class DataBaseUtilities:
def __init__(self):
pass
@staticmethod
def get_data_base_connection():
try:
db_connection = pymysql.connect("localhost", "root", "Peacer1490!", "LibrarySystem")
return db_connection
except Exception as e:
raise e
@staticmethod
def close_data_base_connection(bd_connect):
try:
bd_connect.close()
except Exception as e:
raise e
def update_data(self, query):
db_connection = self.get_data_base_connection()
try:
cursor = db_connection.cursor()
cursor.execute(query)
db_connection.commit()
#print("Data update was successful.")
except Exception as e:
raise e
finally:
self.close_data_base_connection(db_connection)
def get_info(self, query):
db_connection = self.get_data_base_connection()
try:
cursor = db_connection.cursor()
cursor.execute(query)
records = cursor.fetchall()
return DataPacker(records)
except Exception as e:
raise e
finally:
self.close_data_base_connection(db_connection)
| true
|
1ef4eba1ca70947cdedad1336f8117ad9c254497
|
Python
|
alb7979s/boj
|
/삼성기출/17136_색종이붙이기.py
|
UTF-8
| 1,320
| 2.625
| 3
|
[] |
no_license
|
#시간초과 뜸 수정하기
from sys import*
input = stdin.readline
def check(x, y, pos):
if colored_paper[pos] <= 0: return 0
for i in range(x, x+pos+1):
for j in range(y, y+pos+1):
if i>9 or j>9 or not a[i][j]: return 0
for i in range(x, x+pos+1): #색종이 덮기
for j in range(y, y+pos+1):
a[i][j]=0
return 1
def solve(pos, cnt, covered):
global a, res
if covered == one_cnt: return cnt
if pos == one_cnt or res<=cnt: return res
x, y = one_list[pos]
if a[x][y]:
for i in range(5):
if check(x,y,i):
colored_paper[i]-=1
res = min(res, solve(pos+1, cnt+1, covered+(i+1)**2))
colored_paper[i]+=1
for n in range(x, x+i+1):
for m in range(y, y+i+1):
a[n][m]=1
#res = min(res, solve(pos+1, cnt, covered)) 불필요함, 시간초과 원인
else: res=min(res, solve(pos+1, cnt, covered))
return res
INF=1e9
a=[]
colored_paper=[5]*5
one_cnt=0
one_list=[]
res=INF
for i in range(10):
a.append(list(map(int,input().split())))
for j in range(10):
if a[i][j]:
one_cnt+=1
one_list.append((i,j))
ans=solve(0,0,0)
print(ans if ans!=INF else -1)
| true
|
bcb9909ba0d0b9ad850c78fa624147be4e4fde3e
|
Python
|
samparkewolfe/mido_extension_classes
|
/MidiSaver.py
|
UTF-8
| 2,573
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
import mido_extension_classes.MidiNote as MidiNote
from mido import Message, MidiFile, MidiTrack, MetaMessage, second2tick
import copy
"""
MidiSaver Class
Details:
This object converts a list of custom MidiNote objects back to the mido midi message format and writes it to file.
Usage:
MidiSaver(notesToWrite, ticks_per_beat = 480)
Constructor copys a list of MidiNote objects to write and sets the ticks_per_beat the user wants to write the midi file to.
Args:
notesToWrite: a list of MidiNote objects to write to file.
ticks_per_beat: (see https://mido.readthedocs.io/en/latest/midi_files.html)
write(fileName):
Writes a list of MidiNote objects to file.
Args:
fileName: the absolute path to write the midi file to (not including the .mid extension)
"""
class MidiSaver:
def __init__(self, notesToWrite, ticks_per_beat = 480):
self.notesToWrite = []
self.notesToWrite = copy.deepcopy(notesToWrite)
self.ticks_per_beat = ticks_per_beat
print("New Midi Saver Made")
def write(self, fileName):
self.restrictNoteRanges(self.notesToWrite)
self.convertTimesFromSecondsToTicks(self.notesToWrite)
self.writeMidiFile(self.notesToWrite, fileName)
print("MidiNotes have been written to file")
def restrictNoteRanges(self, notes):
for note in notes:
note.pitch = int(round(note.pitch))
note.velocity = int(round(note.velocity))
if(note.pitch < 0):
note.pitch = 0
if(note.velocity < 0):
note.velocity = 0
if(note.time < 0):
note.time = 0
if(note.pitch > 127):
note.pitch = 127
if(note.velocity > 127):
note.velocity = 127
def convertTimesFromSecondsToTicks(self, notes):
for note in notes:
note.time = int(second2tick(note.time, self.ticks_per_beat, 500000))
def writeMidiFile(self, notes, fileName):
midiOut = MidiFile(type=1, ticks_per_beat = self.ticks_per_beat)
track = MidiTrack()
for note in notes:
# print(note)
track.append (Message ('note_on',
note = note.pitch,
velocity = note.velocity,
time = note.time))
track.append (MetaMessage ('end_of_track', time=0))
midiOut.tracks.append(track)
midiOut.save(fileName + '.mid')
| true
|
99483e4c7a9ff7e7b21851089586ac733fab638f
|
Python
|
jvanvugt/advent-of-code-2019
|
/day19.py
|
UTF-8
| 1,670
| 3.03125
| 3
|
[] |
no_license
|
from intcode import Computer
from utils import neighbours
from collections import deque
import itertools
class BeamComputer(Computer):
def __init__(self, program, x, y):
self.inputs = iter([x, y])
self.res = float("nan")
super().__init__(program)
def process_output(self, o):
self.res = o
def get_input(self):
return next(self.inputs)
def probe_location(program, x, y):
computer = BeamComputer(program, x, y)
computer.run()
assert computer.res in [0, 1]
return computer.res
def a(program):
N = 50
res = 0
w = [["" for _ in range(N)] for _ in range(N)]
for x in range(N):
for y in range(N):
s = probe_location(program, x, y)
res += s
w[y][x] = ".#"[s]
print("\n".join("".join(row) for row in w))
return res
def check_square(program, right, top):
S = 100 - 1
return all(
[
probe_location(program, right - S, top),
probe_location(program, right, top + S),
probe_location(program, right - S, top + S),
]
)
def b(program):
pos = (6, 8)
assert probe_location(program, *pos)
while True:
if check_square(program, *pos):
return (pos[0] - 99) * 10_000 + pos[1]
for neighbour in [(pos[0] + 1, pos[1] + 1), (pos[0], pos[1] + 1)]:
if probe_location(program, *neighbour) == 1:
pos = neighbour
break
else:
raise ValueError()
def main():
program = list(map(int, open("input19.txt").read().split(",")))
print(b(program))
if __name__ == "__main__":
main()
| true
|