blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ed95b83ff040d1bc71b374c36530b901b70f0bca | Python | ptrucinskas/BullyShield | /web/py/BullyShield.py | UTF-8 | 1,982 | 2.796875 | 3 | [] | no_license | import MySQLdb
import re
import sys
import time
from HelperClass import MySQLConnect
from HelperClass import Bays_accurate
from HelperClass import ProcessMessage
from HelperClass import Email
from POS import Pos
SCORE_TO_TRIGGER_FLAG = 8
userID = int(sys.argv[1])
messageID = int(sys.argv[2])
#main program. Given a userId and messageId, this retrives the message
#calls the Bayes algorithm, and calls methods to set flags accordingly
def bully_shield(userID, messageID):
isSuspect = False
#wait for 0.2s to ensure message is in db. Then get message.
time.sleep(0.2)
message = MySQLConnect.get_message(messageID)
isSuspect = ProcessMessage.scan_message(message)
#place message into MessagesToProcess.txt
#file = open("POS/MessagesToProcess.txt", 'a')
#file.write(message + "\n")
#file.close()
#check if there is a blacklisted word in message, pass to Bayes if true
#decrease score of user if false
if isSuspect == True:
score = float(Bays_accurate.startPoint(message))
if score > 0.0:
MySQLConnect.increase_score(userID, 1)
else:
MySQLConnect.increase_score(userID, -1)
else:
MySQLConnect.increase_score(userID, -1)
if Pos.main1(message) == True:
MySQLConnect.increase_score(userID, 1)
time.sleep(0.2)
#Check if user score has reached a certain limit. Set Flag if so
current_score = MySQLConnect.get_score(userID)
if current_score > SCORE_TO_TRIGGER_FLAG:
MySQLConnect.set_flag(userID, 'flag')
log_flag(userID, messageID)
Email.sendEmail(userID, messageID)
#Prints to log.txt. Should be called everytime a message is flagged
def log_flag(userID, messageID):
try:
file = open('log.txt', 'a')
file.write("UserID: " + str(userID) + " was flagged by messageID: "
+ str(messageID) + "\n")
file.close()
has_Flagged = False
except:
print "Something went wrong in writing to the file"
file.close()
bully_shield(userID, messageID)
| true |
fd81e8bd6ec95fe9242c8edcb09ed40a918c5b74 | Python | TashanM/Programs | /Time Converter.py | UTF-8 | 1,017 | 3.921875 | 4 | [] | no_license | # Time Converter
# Tashan Maniyalaghan
# 693776
# ICS3U0-B
# 29 November 2017
# Mr. Veera
def hours_to_minutes():
hours = int(input('Enter the number of hours: '))
minutes = hours*60
print ('%i hours is %i minutes' %(hours, minutes))
def days_to_hours():
days = int(input('Enter the number of days: '))
hours = days*24
print ('%i days is %i hours' %(days, hours))
def minutes_to_hours():
minutes = int(input('Enter the number of minutes: '))
hours = minutes/60
print ('%i minutes is %.2f hours' %(hours))
def hours_to_days():
hours = int(input('Enter the number of hours: '))
days = hours/24
print ('%i hours is %.2f days' %(days))
method = int(input('Enter:\n1 for Hours to Minutes\n2 for Days to Hours\n3 for Minutes to Hours\n4 for Hours to Days\nChoose:'))
print ()
if (method == 1):
hours_to_minutes()
elif (method == 2):
days_to_hours()
elif (method == 3):
minutes_to_hours()
else:
hours_to_days()
| true |
ce6b846061f7cb7d8fd69d6cbd2e68fc83c90b5e | Python | Vrinda112001/WAD_Team_B_Glover | /glover/choices.py | UTF-8 | 14,888 | 3.015625 | 3 | [] | no_license |
class CourseChoices:
""" Courses to populate the Course table with and let users choose from """
ACCOUNTING = "Accounting"
ANATOMY = "Anatomy"
ARCHAEOLOGY = "Archaeology"
ASTRONOMY = "Astronomy"
BIOCHEMISTRY = "Biochemistry"
BUSINESS = "Business"
CELTIC_STUDIES = "Celtic Studies"
CHEMISTRY = "Chemistry"
CIVIL_ENGINEERING = "Civil Engineering"
COMPUTING_SCIENCE = "Computing Science"
DENTISTRY = "Dentistry"
EARTH_SCIENCE = "Earth Science"
ECONOMICS = "Economics"
ENGINEERING = "Engineering"
ENGLISH_LITERATURE = "English Literature"
FILM = "Film and Television Studies"
FINANCE = "Finance"
FRENCH = "French"
GAELIC = "Gaelic"
GENETICS = "Genetics"
GEOGRAPHY = "Geography"
GEOLOGY = "Geology"
GERMAN = "German"
GREEK = "Greek"
HISTORY = "History"
HISTORY_OF_ART = "History of Art"
HUMAN_BIOLOGY = "Human Biology"
IMMUNOLOGY = "Immunology"
ITALIAN = "Italian"
LATIN = "Latin"
MARINE_BIOLOGY = "Marine Biology"
MATHS = "Mathematics"
MEDICINE = "Medicine"
MICROBIOLOGY = "Microbiology"
MUSIC = "Music"
NEUROSCIENCE = "Neuroscience"
NURSING = "Nursing"
PHARMACOLOGY = "Pharmacology"
PHILOSOPHY = "Philosophy"
PHYSICS = "Physics"
PHYSIOLOGY = "Physiology"
POLITICS = "Politics"
PORTUGUESE = "Portuguese"
PSYCHOLOGY = "Psychology"
RUSSIAN = "Russian"
SOCIOLOGY = "Sociology"
SOFTWARE_ENGINEERING = "Software Engineering"
SPANISH = "Spanish"
STATISTICS = "Statistics"
THEOLOGY = "Theology"
VET_MEDICINE = "Veterinary Medicine"
ZOOLOGY = "Zoology"
ALL_CHOICES = [ACCOUNTING, ANATOMY, ARCHAEOLOGY, ASTRONOMY, BIOCHEMISTRY, BUSINESS, CELTIC_STUDIES,
CHEMISTRY, CIVIL_ENGINEERING, COMPUTING_SCIENCE, DENTISTRY, EARTH_SCIENCE, ECONOMICS, ENGINEERING,
ENGLISH_LITERATURE, FILM, FINANCE, FRENCH, GAELIC, GENETICS, GEOGRAPHY, GEOLOGY, GERMAN, GREEK,
HISTORY, HISTORY_OF_ART, HUMAN_BIOLOGY, IMMUNOLOGY, ITALIAN, LATIN, MARINE_BIOLOGY, MATHS, MEDICINE,
MICROBIOLOGY, MUSIC, NEUROSCIENCE, NURSING, PHARMACOLOGY, PHILOSOPHY, PHYSICS, PHYSIOLOGY, POLITICS,
PORTUGUESE, PSYCHOLOGY, RUSSIAN, SOCIOLOGY, SOFTWARE_ENGINEERING, SPANISH, STATISTICS, THEOLOGY,
VET_MEDICINE, ZOOLOGY]
@staticmethod
def get_choices():
""" Static method to get all course choices above. """
return sorted([(v,v) for v in CourseChoices.ALL_CHOICES], key=lambda x: x[0])
class SocietyChoices:
""" Societies to populate the Society table with and let users choose from """
ACAPELLA = "Acapella"
ART_LIFE_DRAWING = "Art and Life Drawing"
ART_APPRECIATION = "Art Appreciation"
ASTROLOGY = "Astrology"
ASTRONOMY = "Astronomy"
BAD_MOVIE = "Bad Movie"
BAKING = "Baking"
BALLROOM_LATIN = "Ballroom and Latin Dancing"
BEEKEEPING = "Beekeeping"
BHAKTI_YOGA = "Bhakti Yoga"
BIG_BAND = "Big Band"
GUBES = "Bridging Education"
BUDDHIST = "Buddhist"
BUSINESS = "Business Club"
FBP = "Food and Body Positivity"
BHF = "Friends of BHF"
GUCVS = "Cardiovascular"
CATHOLIC = "Catholic Association"
CECILIAN = "Cecilian"
CHICKEN = "Chicken Wing"
CHINESE = "Chinese Students Community"
CHOCOLATE = "Chocolate"
CHRISTIAN = "Christian Union"
COMIC = "Comic Creators Club"
COMMUNIST = "Communist"
COMP_PROGRAMMING = "Competitive Programming"
CRAFTS = "Crafts"
CHOICE = "Glagow Students for Choice"
OPEN_CAGES = "Open Cages"
GUSCDC = "Scottish Country Dance Club"
CLIMATE = "Students Against Climate Change"
DANCE4WATER = "Dance4Water Glasgow"
DANCEMANIA = "Dancemania"
DISNEY = "Disney"
DOC_WHO = "Doctor Who"
DOCUMENTARY = "Documentary"
DRAG = "Drag"
POLE_DANCING = "Pole Dancing Club"
GSDC = "Student Dance Company"
SELF_DEFENCE = "Self Defence"
EUROPEAN = "European"
EUROVISION = "Eurovision"
EXPLORATION = "Exploration"
X_REBELION = "Extinction Rebellion"
FILM = "Film"
WAR_FILM = "War Film"
GAMING = "Gaming"
GIN = "Gin"
GIST = "Glasgow International Student Theatre"
GUDEV = "Game Design and Development"
GREENS = "Scottish Greens"
GIST = "Glasgow Insight into Science and Technology"
HARM = "Harm Reduction"
HARRY_POTTER = "Harry Potter"
HISTORY = "History"
JAPAN = "Japan"
JEWISH = "Jewish"
JUGGLING = "Juggling at GU"
JANE_AUSTEN = "Students of a Jane Austen Persuasion"
KPOP = "K-Pop"
KOREAN = "Korean"
GULGBTQ = "GULGBTQ+"
MANGA = "Manga and Anime"
MARXISTS = "Marxists"
MATURE = "Mature Students Association"
MORGUL = "GU Rock and Metal"
MUSIC = "Music Club"
MUSLIM = "Muslim Student Association"
OPERA = "Opera"
ONEKIND = "Onekind"
PHILOSOPHY = "Philosophy"
PHYSICS = "Physics"
PLASTIC_SURGERY = "Plastic Surgery"
POLITICS = "Politics"
QUIZ = "Quiz"
REAL_ALE = "Real Ale"
ROBOTICS = "Robotics"
SCREENWRITING = "Screenwriting"
SEWING = "Sewing"
SEXPRESSION = "Sexpression"
SHAKESPEARE = "Shakespeare"
SHREK = "Shrek"
SIGN_LANGUAGE = "Sign Language"
SOCIALIST = "Socialist"
WOMEN_TECH = "Society for Women in Tech"
STAG = "Student Theatre at Glasgow"
SWAG = "Successful Women at Glasgow"
SURGICAL = "Surgical"
GUSTS = "Sustainable Technologies"
IMPROV = "Improv Teatime"
TEA = "Tea"
TECH = "Tech"
TEDX = "Tedx"
TENNENTS = "Tennents Lager Appreciation"
RACING = "UGRacing"
VEGAN = "Vegan"
WALKING = "Walking"
WINE = "Wine"
WISTEM = "Women in Science, Tech, Engineering and Maths"
CREATIVE_WRITING = "Createive Writing"
RUNNING = "Hares and Hounds Running Club"
KARATE = "Karate"
HOCKEY = "Hockey"
RUGBY = "Rugby Football"
SAILING = "Sailing Club"
SKI_SNOWBOARD = "Ski & Snowboard Club"
AIKIDO = "Aikido"
AMERICAN_FOOTBALL = "American Football"
ATHLETICS = "Athletics"
BADMINTON = "Badminton"
BASKETBALL = "Basketball"
ROWING = "Boat/Rowing"
BOXING = "Boxing"
CANOE = "Canoe"
CHEERLEADING = "Cheerleading"
CRICKET = "Cricket"
CURLING = "Curling"
CYCLING = "Cycling"
FENCING = "Fencing"
FOOTBALL = "Football"
GAELIC_FOOTBALL = "Gaelic Football"
GOLF = "Golf"
GYMNASTICS = "Gymnastics"
JUDO = "Judo"
KENDO = "Kendo"
LACROSSE = "Lacrosse"
MOUNTENEERING = "Mounteneering"
MUAY_THAI = "Muay Thai Boxing"
NETBALL = "Netball"
POTHOLING = "Potholing (Caving)"
RIDING = "Riding/Equestrian"
SHINTY = "Shinty"
SHORINJI = "Shorinji Kempo"
SKYDIVE = "Skydive"
SQUASH = "Squash"
SURF = "Surf"
SWIMMING_WATERPOLO = "Swimming and Waterpolo"
TAEKWONDO = "Taekwondo"
TRAMPOLINE = "Trampoline"
TRIATHLON = "Triathlon"
FRISBEE = "Ultimate Frisbee"
VOLLEYBALL = "Volleyball"
WAKEBOARDING = "Wakeboarding"
WEIGHTLIFTING = "Weightlifting"
YOGA = "Yoga"
TABLE_TENNIS = "Table Tennis"
TENNIS = "Tennis"
ALL_CHOICES = [ACAPELLA, ART_LIFE_DRAWING, ART_APPRECIATION, ASTROLOGY, ASTRONOMY, BAD_MOVIE, BAKING,
BALLROOM_LATIN, BEEKEEPING, BHAKTI_YOGA, BIG_BAND, GUBES, BUDDHIST, BUSINESS, FBP, BHF, GUCVS, CATHOLIC,
CECILIAN, CHICKEN, CHINESE, CHOCOLATE, CHRISTIAN, COMIC, COMMUNIST, COMP_PROGRAMMING, CRAFTS, CHOICE,
OPEN_CAGES, GUSCDC, CLIMATE, DANCE4WATER, DANCEMANIA, DISNEY, DOC_WHO, DOCUMENTARY, DRAG, POLE_DANCING,
GSDC, SELF_DEFENCE, EUROPEAN, EUROVISION, EXPLORATION, X_REBELION, FILM, WAR_FILM, GAMING, GIN, GIST,
GUDEV, GREENS, GIST, HARM, HARRY_POTTER, HISTORY, JAPAN, JEWISH, JUGGLING, JANE_AUSTEN, KPOP, KOREAN,
GULGBTQ, MANGA, MARXISTS, MATURE, MORGUL, MUSIC, MUSLIM, OPERA, ONEKIND, PHILOSOPHY, PHYSICS, PLASTIC_SURGERY,
POLITICS, QUIZ, REAL_ALE, ROBOTICS, SCREENWRITING, SEWING, SEXPRESSION, SHAKESPEARE, SHREK, SIGN_LANGUAGE,
SOCIALIST, WOMEN_TECH, STAG, SWAG, SURGICAL, GUSTS, IMPROV, TEA, TECH, TEDX, TENNENTS, RACING, VEGAN,
WALKING, WINE, WISTEM, CREATIVE_WRITING, RUNNING, KARATE, HOCKEY, RUGBY, SAILING, SKI_SNOWBOARD, AIKIDO,
AMERICAN_FOOTBALL, ATHLETICS, BADMINTON, BASKETBALL, ROWING, BOXING, CANOE, CHEERLEADING, CRICKET, CURLING,
CYCLING, FENCING, FOOTBALL, GAELIC_FOOTBALL, GOLF, GYMNASTICS, JUDO, KENDO, LACROSSE, MOUNTENEERING, MUAY_THAI,
NETBALL, POTHOLING, RIDING, SHINTY, SHORINJI, SKYDIVE, SQUASH, SURF, SWIMMING_WATERPOLO, TAEKWONDO, TRAMPOLINE,
TRIATHLON, FRISBEE, VOLLEYBALL, WAKEBOARDING, WEIGHTLIFTING, YOGA, TABLE_TENNIS, TENNIS]
@staticmethod
def get_choices():
""" Static method to get all society choices above. """
return sorted([(v,v) for v in SocietyChoices.ALL_CHOICES], key=lambda x: x[0])
class InterestChoices:
""" Interests to populate the Interest table with and let users choose from """
YOGA = "Yoga"
FOOTBALL = "Football"
TENNIS = "Tennis"
ART = "Art"
PHOTOGRAPHY = "Photography"
SWIMMING = "Swimming"
ACTING = "Acting"
ANIMATION = "Animation"
BAKING = "Baking"
BLOGGING = "Blogging"
BOWLING = "Bowling"
CAR_FIXING = "Car Fixing"
CARS = "Cars"
CHEESEMAKING = "Cheesemaking"
STAMP_COLLECTING = "Stamp Collecting"
PROGRAMMING = "Programming"
COOKING = "Cooking"
CRAFT = "Craft"
DANCE = "Dance"
DRAWING = "Drawing"
PAINTING = "Painting"
FASHION = "Fashion"
FLOWER_ARRANGING = "Flower Arranging"
LANGUAGES = "Languages"
GAMING = "Gaming"
HACKING = "Hacking"
KARAOKE = "Karaoke"
KNITTING = "Knitting"
MUSIC = "Music"
FILMS = "Films"
MAKEUP = "Makeup"
PODCASTS = "Podcasts"
POTTERY = "Pottery"
RAPPING = "Rapping"
SOAPMAKING = "Soapmaking"
VIDEO_EDITING = "Video Editing"
WRITING = "Writing"
POETRY = "Poetry"
SPORTS = "Sports"
GYM = "Gym"
BIRDWATCHING = "Birdwatching"
BODYBUILDING = "Bodybuilding"
CAMPING = "Camping"
CANOEING = "Canoeing"
CYCLING = "Cycling"
TRAVEL = "Travel"
GRAFFITI = "Graffiti"
HIKING = "Hiking"
HORSEBACK_RIDING = "Horseback Riding"
HUNTING = "Hunting"
MARTIAL_ARTS = "Martial Arts"
PARKOUR = "Parkour"
SCUBA_DIVING = "Scuba Diving"
SHOPPING = "Shopping"
SKATEBOARDING = "Skateboarding"
SKIING = "Skiing"
SKYDIVING = "Skydiving"
SURVIVALISM = "Survivalism"
FARMING = "Farming"
KNIFE_COLLECTING = "Knife Collecting"
BOXING = "Boxing"
FOOD = "Food"
EATING = "Eating"
DEBATE = "Debate"
POKER = "Poker"
FISHING = "Fishing"
MEDITATION = "Meditation"
READING = "Reading"
LEARNING = "Learning"
PEOPLE_WATCHING = "People Watching"
GARDENING = "Gardening"
ANIMAL_CARE = "Animal Care"
SINGING = "Singing"
JEWELRY_MAKING = "Jewelry Making"
SOCIAL_MEDIA = "Social Media"
INSTAGRAM = "Instagram"
TWITTER = "Twitter"
YOUTUBE = "Youtube"
TIKTOK = "TikTok"
GOLF = "Golf"
HEALTH_FITNESS = "Health and Fitness"
WINE_TASTING = "Wine Tasting"
PET_TRAINING = "Pet Training"
PARTYING = "Partying"
HOSTING_PARTIES = "Hosting Parties"
ALCOHOL = "Alcohol"
CANNABIS = "Cannabis"
EXTREME_SPORTS = "Extreme Sports"
SOCIAL_WORK = "Social Work"
NAIL_ART = "Nail Art"
INTERIOR_DESIGN = "Interior Design"
DIET_NUTRITION = "Diet and Nutrition"
SCULPTURE = "Sculpture"
ASTROLOGY = "Astrology"
TAROT_CARD_READING = "Tarot Card Reading"
CANDLE_MAKING = "Candle Making"
COMIC_BOOKS = "Comic Books"
VOLUNTEERING = "Volunteering"
VEGAN = "Vegan"
POLITICS = "Politics"
ENVIRONMENT = "Environment"
ANIMALS = "Animals"
ALL_CHOICES = [YOGA, FOOTBALL, TENNIS, ART, PHOTOGRAPHY, SWIMMING, ACTING, ANIMATION, BAKING, BLOGGING,
BOWLING, CAR_FIXING, CARS, CHEESEMAKING, STAMP_COLLECTING, PROGRAMMING, COOKING, CRAFT, DANCE, DRAWING,
PAINTING, FASHION, FLOWER_ARRANGING, LANGUAGES, GAMING, HACKING, KARAOKE, KNITTING, MUSIC, FILMS, MAKEUP,
PODCASTS, POTTERY, RAPPING, SOAPMAKING, VIDEO_EDITING, WRITING, POETRY, SPORTS, GYM, BIRDWATCHING, BODYBUILDING,
CAMPING, CANOEING, CYCLING, TRAVEL, GRAFFITI, HIKING, HORSEBACK_RIDING, HUNTING, MARTIAL_ARTS, PARKOUR,
SCUBA_DIVING, SHOPPING, SKATEBOARDING, SKIING, SKYDIVING, SURVIVALISM, FARMING, KNIFE_COLLECTING, BOXING,
FOOD, EATING, DEBATE, POKER, FISHING, MEDITATION, READING, LEARNING, PEOPLE_WATCHING, GARDENING, ANIMAL_CARE,
SINGING, JEWELRY_MAKING, SOCIAL_MEDIA, INSTAGRAM, TWITTER, YOUTUBE, TIKTOK, GOLF, HEALTH_FITNESS, WINE_TASTING,
PET_TRAINING, PARTYING, HOSTING_PARTIES, ALCOHOL, CANNABIS, EXTREME_SPORTS, SOCIAL_WORK, NAIL_ART, INTERIOR_DESIGN,
DIET_NUTRITION, SCULPTURE, ASTROLOGY, TAROT_CARD_READING, CANDLE_MAKING, COMIC_BOOKS, VOLUNTEERING, VEGAN,
POLITICS, ENVIRONMENT, ANIMALS]
@staticmethod
def get_choices():
""" Static method to get all interest choices above. """
return sorted([(v,v) for v in InterestChoices.ALL_CHOICES], key=lambda x: x[0])
class GenderChoices:
""" Gender choices users can choose from """
FEMALE = "F"
MALE = "M"
NON_BINARY = "N"
ALL_CHOICES = [FEMALE, MALE, NON_BINARY]
@staticmethod
def get_choices():
""" Static method to get all gender choices above as well as a proper display label for each choice. """
labels = ["Female", "Male", "Non-Binary"]
return [(gender, label) for gender, label in zip(GenderChoices.ALL_CHOICES, labels)]
class LibraryFloorChoices:
""" Favourite library floor choices users can choose from """
LEVEL_1 = "Level 1"
LEVEL_2 = "Level 2"
LEVEL_3 = "Level 3"
LEVEL_4 = "Level 4"
LEVEL_5 = "Level 5"
LEVEL_6 = "Level 6"
LEVEL_7 = "Level 7"
LEVEL_8 = "Level 8"
LEVEL_9 = "Level 9"
LEVEL_10 = "Level 10"
LEVEL_11 = "Level 11"
LEVEL_12 = "Level 12"
READINGROOM = "Round Reading Room"
OTHER = "Other"
ALL_CHOICES = [LEVEL_1, LEVEL_2, LEVEL_3, LEVEL_4, LEVEL_5, LEVEL_6, LEVEL_7, LEVEL_8,
LEVEL_9, LEVEL_10, LEVEL_11, LEVEL_12, READINGROOM, OTHER]
@staticmethod
def get_choices():
""" Static method to get all favourite library floor choices above. """
return [(v,v) for v in LibraryFloorChoices.ALL_CHOICES]
class YearInChoices:
ACCESS = "Access"
YEAR1 = "1st Year"
YEAR2 = "2nd Year"
YEAR3 = "3rd Year"
YEAR4 = "4th Year"
MASTERS = "Masters"
MPHIL = "MPhil"
PHD = "PHD"
ALL_CHOICES = [ACCESS, YEAR1, YEAR2, YEAR3, YEAR4, MASTERS, MPHIL, PHD]
@staticmethod
def get_choices():
""" Static method to get all year-in choices above. """
return [(v,v) for v in YearInChoices.ALL_CHOICES] | true |
257cafd65e928a708dd8f5cac861e66d2afceeb9 | Python | vin-lz/udacity-full-stack-web-developer-nanodegree-projects | /FSND-Virtual-Machine/vagrant/logs_analysis/logs_analysis.py | UTF-8 | 2,940 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python3
import psycopg2
DBNAME = "news"
def get_popular_articles():
"""Return the most popular articles with numbers of views from 'news',
descending order."""
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute("""SELECT articles.title, COUNT(*) AS num FROM articles, log
WHERE log.path LIKE '%' || articles.slug
GROUP BY articles.title
ORDER BY num DESC LIMIT 3;""")
popular_articles = c.fetchall()
db.close()
print("=============================================================")
print("1. What are the most popular three articles of all time?")
for item in popular_articles:
print(u"\u2022 " + item[0].title() + " — " + str(item[1]) + " views")
print("=============================================================")
return
def get_popular_authors():
"""Return the most popular authors with numbers of views from 'news',
descending order."""
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute("""SELECT authors.name, SUM(popular_articles.num) AS t_num
FROM (SELECT articles.title, COUNT(*) AS num FROM articles, log
WHERE log.path LIKE '%' || articles.slug
GROUP BY articles.title) as popular_articles, articles, authors
WHERE popular_articles.title = articles.title
AND authors.id = articles.author
GROUP BY authors.name
ORDER BY t_num DESC;""")
popular_articles = c.fetchall()
db.close()
print("=============================================================")
print("2. Who are the most popular article authors of all time?")
for item in popular_articles:
print(u"\u2022 " + item[0] + " — " + str(item[1]) + " views")
print("=============================================================")
return
def get_error_dates():
"""Return the dates and error rates when more than 1% of requests led
to errors occurs from 'news'."""
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute("""SELECT total.dates,
ROUND(CAST(errors.num::Float / total.num * 100 AS NUMERIC),2) FROM
(SELECT DATE(time) as dates, COUNT(*) AS num FROM log
WHERE status!='200 OK' GROUP BY DATE(time)) AS errors,
(SELECT DATE(time) as dates, COUNT(*) AS num FROM log
GROUP BY DATE(time)) AS total
WHERE errors.dates = total.dates
AND errors.num::float / total.num > 0.01;""")
popular_articles = c.fetchall()
db.close()
print("=============================================================")
print("3. On which days did more than 1% of requests lead to errors?")
for item in popular_articles:
print(u"\u2022 " + str(item[0]) + " — " + str(item[1]) + "% errors")
print("=============================================================")
return
get_popular_articles()
get_popular_authors()
get_error_dates()
| true |
6ee74a2ba3060647fa0aec393107e9927da775ea | Python | arigemini/postman | /utils/copy_emails_for_training.py | UTF-8 | 2,119 | 2.78125 | 3 | [] | no_license | import collections
import os
import shutil
def mkdir_p(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class CopyEmails:
def __init__(self, email_from_to_file):
self.email_from_to_file = email_from_to_file
def process(self, email_ids, output_dir):
self.directory_to_counter = collections.defaultdict(int)
with open(self.email_from_to_file, 'r') as f:
for idx, line in enumerate(f):
if idx % 1000 == 0:
print "{0}: {1}".format(idx, line)
email, _, to = line.strip().split(',')
if to in email_ids:
to_dir = os.path.join(output_dir, to.replace("@", "_at_"))
to_file = os.path.join(to_dir, str(self.directory_to_counter[to_dir]))
self.directory_to_counter[to_dir] += 1
mkdir_p(to_dir)
shutil.copy(email, to_file)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Copy emails for training into a different directory")
parser.add_argument("--email_from_to_file", type=str, required=True, help="input (email,from,to) file")
parser.add_argument("--output_dir", type=str, required=True, help="directory to put the emails into")
args = parser.parse_args()
# Sorted in decreasing number of incoming emails
email_ids = ['gerald.nemec@enron.com',
'kenneth.lay@enron.com',
'sara.shackleton@enron.com',
'jeff.skilling@enron.com',
'jeff.dasovich@enron.com',
'tana.jones@enron.com',
'rick.buy@enron.com',
'barry.tycholiz@enron.com',
'lcampbel@enron.com',
'tracy.geaccone@enron.com',
'joe.parks@enron.com',
'sally.beck@enron.com',
'mark.whitt@enron.com',
'matt.smith@enron.com',
'kay.mann@enron.com',
'j.kaminski@enron.com',
'elizabeth.sager@enron.com',
'don.baughman@enron.com',
'kam.keiser@enron.com',
'jason.wolfe@enron.com']
copy_emails = CopyEmails(args.email_from_to_file)
copy_emails.process(set(email_ids), args.output_dir)
| true |
1522e2635bbaa2b91f28ab268298afb85219aac5 | Python | mzhang367/dcwh | /loss.py | UTF-8 | 1,700 | 2.71875 | 3 | [] | no_license | import torch
import torch.nn as nn
class ClassWiseLoss(nn.Module):
'''
Normalized gaussian-based cross-entropy loss
'''
def __init__(self, num_classes, bit_length, inv_var=1, update_grad=False, use_gpu=True):
super(ClassWiseLoss, self).__init__()
self.num_classes = num_classes
self.bits = bit_length
self.use_gpu = use_gpu
self.sigma = inv_var
self.update = update_grad # update by intra-class hashing outputs or gradient descent
if update_grad:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.bits).cuda())
def forward(self, x, labels, centroids=None):
"""
Args:
x: batch_size * feat_dim
labels: (batch_size, )
"""
if not self.update:
self.centers = centroids
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2) # shape of (bs * num_bit)
numer = torch.exp(-0.5*self.sigma*distmat)
denumer = numer.sum(dim=1, keepdim=True)
dist_div = numer/(denumer+1e-6)
classes = torch.arange(self.num_classes).long()
if self.use_gpu:
classes = classes.cuda()
labels = labels.view(-1, 1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist_log = torch.log(dist_div+1e-6) * mask.float()
loss = -dist_log.sum() / batch_size
return loss
| true |
f217f16c88304bda5179d9076647ed592875a589 | Python | kennykat/CourseWork-TTA | /Python/pythonCourse/PythonInADay2/CSVdrill.py | UTF-8 | 652 | 3.3125 | 3 | [] | no_license | # import os
import os, csv
# the path to the script
currentPath = os.path.dirname(os.path.abspath(__file__))
print currentPath
# make the spreadsheet path
outputCsv = currentPath + '/spreadsheet.csv'
print outputCsv
# open the file
csvFile = open(outputCsv, "wb")
# # inproper way of writing to a file
# csvFile.write('Testing')
# csvFile.close()
# create writer object
writer = csv.writer(csvFile, delimiter=',')
# remember to use module csv
# data to go in csv
row_1 = [1, "Row 1", 123]
row_2 = [2, "Row 2", 456]
row_3 = [3, "Row 3", 789]
# .writerow()
# write rows to csv
writer.writerow(row_1)
writer.writerow(row_2)
writer.writerow(row_3)
| true |
fe963cbf9ba43b08175c63a3981e5d15c34e815d | Python | detectAna/detectAna | /exploratory_analysis/TweetAnalyzer.py | UTF-8 | 7,309 | 3.015625 | 3 | [] | no_license | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import json
import jsonlines
from TweetPreprocessor import TweetPreprocessor
from timeit import default_timer as timer
TWEETS_FILE = 'user_tweets.jsonl'
DEBUG = True
#Building features from raw data
class TweetAnalyzer:
def __init__(self, tweets=None):
if not tweets:
try:
with jsonlines.open(TWEETS_FILE) as reader:
self.tweets = [tweet for tweet in reader]
print('Loaded {} tweets fron {}'.format(
len(self.tweets), TWEETS_FILE))
except FileNotFoundError:
print("Can't find the tweets file")
except Exception as e:
print(e)
else:
self.tweets = tweets
# Extract the keys from the first tweet and spread them into a list
columns = [*self.tweets[0]]
self.tfidf_result = None
self.feature_names = None
self.df = pd.DataFrame(self.tweets, columns=columns)
self.clean_tweets()
if DEBUG:
print(self.df.head())
def clean_tweets(self):
start = timer()
self.df.text = self.df.text.apply(TweetPreprocessor.strip_links)
self.df.text = self.df.text.apply(TweetPreprocessor.strip_mentions)
self.df.text = self.df.text.apply(TweetPreprocessor.strip_hashtags)
self.df.text = self.df.text.apply(TweetPreprocessor.strip_rt)
self.df.text = self.df.text.apply(
TweetPreprocessor.remove_special_characters)
end = timer()
print('Cleaned tweets in {}'.format(end - start))
def vectorize(self):
self.vectorizer = TfidfVectorizer(stop_words='english')
self.tfidf_result = self.vectorizer.fit_transform(self.df['text'])
self.feature_names = self.vectorizer.get_feature_names()
def top_n(self, top=100):
if self.feature_names is None or self.tfidf_result is None:
print('Must run vectorize() first before calling top_n')
return
scores = zip(self.feature_names,
np.asarray(self.tfidf_result.sum(axis=0)).ravel())
sorted_scores = sorted(scores, key=lambda x: x[1], reverse=True)
labels, scores = [], []
# Get the scores and labels of the top 100 tweets
for item in sorted_scores[:top]:
print("{0:50} Score: {1}".format(item[0], item[1]))
# sns.distplot(item[1], label=item[0])
labels.append(item[0])
scores.append(item[1])
index = np.arange(len(scores))
plt.bar(index, scores)
plt.xlabel('Word', fontsize=12)
plt.ylabel('TFIDF Score', fontsize=12)
plt.xticks(index, labels, fontsize=8, rotation=90)
plt.title('Top {} features'.format(top))
plt.savefig('Top_{}'.format(top))
def topic_model(self, num_topics=10):
if DEBUG:
print('Performing topic modeling with {} topics'.format(num_topics))
# Build a Latent Dirichlet Allocation Model
self.lda_model = LatentDirichletAllocation(n_topics=num_topics, max_iter=10, learning_method='online')
lda_Z = self.lda_model.fit_transform(self.tfidf_result)
print('LDA shape: ')
print(lda_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Build a Non-Negative Matrix Factorization Model
self.nmf_model = NMF(n_components=num_topics)
nmf_Z = self.nmf_model.fit_transform(self.tfidf_result)
print('NMF shape: ')
print(nmf_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Build a Latent Semantic Indexing Model
self.lsi_model = TruncatedSVD(n_components=num_topics)
lsi_Z = self.lsi_model.fit_transform(self.tfidf_result)
print('LSI shape: ')
print(lsi_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
if DEBUG:
# Let's see how the first document in the corpus looks like in different topic spaces
print("LDA Model:")
self.print_topics(self.lda_model)
print("=" * 20)
print("NMF Model:")
self.print_topics(self.nmf_model)
print("=" * 20)
print("LSI Model:")
self.print_topics(self.lsi_model)
print("=" * 20)
# Helper function to print topics
def print_topics(self, model, top_n=10):
for idx, topic in enumerate(model.components_):
print("Topic %d:" % (idx))
print([(self.vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
def plot_topic_model_SVD(self):
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, LabelSet
output_notebook()
self.svd = TruncatedSVD(n_components=2)
words_2d = self.svd.fit_transform(self.tfidf_result.T)
df = pd.DataFrame(columns=['x', 'y', 'word'])
df['x'], df['y'], df['word'] = words_2d[:,0], words_2d[:,1], self.feature_names
source = ColumnDataSource(ColumnDataSource.from_df(df))
labels = LabelSet(x="x", y="y", text="word", y_offset=8,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
plot = figure(plot_width=600, plot_height=600)
plot.circle("x", "y", size=12, source=source, line_color="black", fill_alpha=0.8)
plot.add_layout(labels)
show(plot, notebook_handle=True)
ta = TweetAnalyzer()
ta.vectorize()
# ta.top_n(100)
ta.topic_model()
ta.plot_topic_model_SVD()
# best_threshold = -1
# best_accuracy = -1
# best_index = -1
# for index, result in enumerate(results):
# # df, features, threshold = result['df'], result['features'], result['threshold']
# features, threshold = result['features'], result['threshold']
# print('Fitting for threshold = {}'.format(threshold))
# # K-fold construction
# folds = 10
# kf = model_selection.KFold(n_splits=folds, shuffle=True)
# # K-fold cross validation and performance evaluation
# foldid = 0
# totacc = 0.
# ytlog = []
# yplog = []
# for train_index, test_index in kf.split(df.airline_sentiment):
# foldid += 1
# print("Starting Fold %d" % foldid)
# print("\tTRAIN:", len(train_index), "TEST:", len(test_index))
# X_train, X_test = features[train_index], features[test_index]
# y_train, y_test = df.airline_sentiment[train_index], df.airline_sentiment[test_index]
# clf.fit(X_train, y_train)
# y_pred = clf.predict(X_test)
# acc = accuracy_score(y_pred, y_test)
# totacc += acc
# ytlog += list(y_test)
# yplog += list(y_pred)
# print('\tAccuracy:', acc)
# print("Average Accuracy: %0.3f" % (totacc / folds,))
# if (totacc / folds) > best_accuracy:
# best_accuracy = totacc / folds
# best_threshold = threshold
# best_index = index
# print(classification_report(ytlog, yplog, target_names=df.airline_sentiment))
# print('\n\n The best accuracy was {} using a threshold of {}'.format(best_accuracy, best_threshold))
| true |
4c88c32b701c241e2ac98bc7130a85559d96d6fd | Python | zb5003/simulacra | /src/simulacra/utils.py | UTF-8 | 25,697 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | """
Simulacra utility sub-package.
Copyright 2017 Josh Karpel
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import datetime
import functools
import itertools
import multiprocessing
import subprocess
import os
import sys
import time
import logging
from typing import Optional, Union, NamedTuple, Callable, Iterable
import numpy as np
import psutil
from . import core
from .units import uround
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
LOG_FORMATTER = logging.Formatter('%(asctime)s [%(levelname)s] - %(message)s', datefmt = '%y/%m/%d %H:%M:%S') # global log format specification
key_value_arrays = collections.namedtuple('key_value_arrays', ('key_array', 'value_array'))
def field_str(obj, *fields, digits: int = 3):
"""
Generate a repr-like string from the object's attributes.
Each field should be a string containing the name of an attribute or a ('attribute_name', 'unit_name') pair. uround will be used to format in the second case.
:param obj: the object to get attributes from
:param fields: the attributes or (attribute, unit) pairs to get from obj
:param digits: the number of digits to round to for uround
:return: the formatted string
"""
field_strings = []
for field in fields:
try:
field_name, unit = field
try:
field_strings.append('{} = {} {}'.format(field_name, uround(getattr(obj, field_name), unit, digits = digits), unit))
except TypeError:
field_strings.append('{} = {}'.format(field_name, getattr(obj, field_name)))
except (ValueError, TypeError):
field_strings.append('{} = {}'.format(field, getattr(obj, field)))
return '{}({})'.format(obj.__class__.__name__, ', '.join(field_strings))
def dict_to_arrays(dct: dict):
"""
Return the keys and values of a dictionary as two numpy arrays, in key-sorted order.
:param dct: the dictionary to array-ify
:type dct: dict
:return: (key_array, value_array)
"""
key_list = []
val_list = []
for key, val in sorted(dct.items()):
key_list.append(key)
val_list.append(val)
return key_value_arrays(np.array(key_list), np.array(val_list))
def get_now_str() -> str:
"""Return a formatted string with the current year-month-day_hour-minute-second."""
return datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S')
class LogManager:
"""
A context manager to easily set up logging.
Within a managed block, logging messages are intercepted if their highest-level logger is named in `logger_names`.
The object returned by the LogManager ``with`` statement can be used as a logger, with name given by `manual_logger_name`.
"""
def __init__(self,
*logger_names,
manual_logger_name: str = 'simulacra',
stdout_logs: bool = True,
stdout_level = logging.DEBUG,
file_logs: bool = False,
file_level = logging.DEBUG,
file_name: Optional[str] = None,
file_dir: Optional[str] = None,
file_mode: str = 'a',
disable_level = logging.NOTSET):
"""
Parameters
----------
logger_names
The names of loggers to intercept.
manual_logger_name
The name used by the logger returned by the LogManager ``with`` statement.
stdout_logs : :class:`bool`
If ``True``, log messages will be displayed on stdout.
stdout_level : :class:`bool`
file_logs
file_level
file_name
file_dir
file_mode : :class:`str`
the file mode to open the log file with, defaults to 'a' (append)
disable_level
"""
"""
Initialize a Logger context manager.
:param logger_names: the names of loggers to catch/modify and/or create
:param manual_logger_name: the name of the logger that will be returned by the context manager's __enter__ method
:param stdout_logs: whether to print log messages to stdout
:param stdout_level: the lowest level for stdout log messages
:param file_logs: whether to print log messages to a file
:param file_level: the lowest level for file log messages
:param file_name: the filename for the log file, defaults to 'log__{timestamp}'. If file_name does not end with '.log', it will be appended.
:param file_dir: the director for the log file, defaults to the current working directory
:param file_mode: the file mode to open the log file with, defaults to 'a' (append)
:param disable_level: log level to disable, short-circuits propagation of logs <= this level
:return None
"""
self.logger_names = list(logger_names)
if manual_logger_name is not None and manual_logger_name not in self.logger_names:
self.logger_names = [manual_logger_name] + self.logger_names
self.stdout_logs = stdout_logs
self.stdout_level = stdout_level
self.file_logs = file_logs
self.file_level = file_level
if file_name is None:
file_name = f'log__{get_now_str()}'
self.file_name = file_name
if not self.file_name.endswith('.log'):
self.file_name += '.log'
if file_dir is None:
file_dir = os.getcwd()
self.file_dir = os.path.abspath(file_dir)
self.file_mode = file_mode
self.disable_level = disable_level
self.logger = None
def __enter__(self):
"""Gets a logger with the specified name, replace it's handlers with, and returns itself."""
logging.disable(self.disable_level)
self.loggers = {name: logging.getLogger(name) for name in self.logger_names}
new_handlers = [logging.NullHandler()]
if self.stdout_logs:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(self.stdout_level)
stdout_handler.setFormatter(LOG_FORMATTER)
new_handlers.append(stdout_handler)
if self.file_logs:
log_file_path = os.path.join(self.file_dir, self.file_name)
ensure_dir_exists(log_file_path) # the log message emitted here will not be included in the logger being created by this context manager
file_handler = logging.FileHandler(log_file_path, mode = self.file_mode)
file_handler.setLevel(self.file_level)
file_handler.setFormatter(LOG_FORMATTER)
new_handlers.append(file_handler)
self.old_levels = {name: logger.level for name, logger in self.loggers.items()}
self.old_handlers = {name: logger.handlers for name, logger in self.loggers.items()}
for logger in self.loggers.values():
logger.setLevel(logging.DEBUG)
logger.handlers = new_handlers
return self.loggers[self.logger_names[0]]
def __exit__(self, exc_type, exc_val, exc_tb):
"""Restores the logger to it's pre-context state."""
logging.disable(logging.NOTSET)
for name, logger in self.loggers.items():
logger.level = self.old_levels[name]
logger.handlers = self.old_handlers[name]
ILLEGAL_FILENAME_CHARACTERS = ['<', '>', ':', '"', '/', '\\', '|', '?', '*'] # these characters should be stripped from file names before use
def strip_illegal_characters(string: str) -> str:
"""Strip characters that cannot be included in file names from a string."""
return ''.join([char for char in string if char not in ILLEGAL_FILENAME_CHARACTERS])
NearestEntry = collections.namedtuple('NearestEntry', ('index', 'value', 'target'))
def find_nearest_entry(array: np.ndarray, target: Union[float, int]) -> NamedTuple:
"""
Returns the ``(index, value, target)`` of the `array` entry closest to the given `target`.
Parameters
----------
array : :class:`numpy.ndarray`
The array to for `target` in.
target
The value to search for in `array`.
Returns
-------
:class:`tuple`
A tuple containing the index of the nearest value to the target, that value, and the original target value.
"""
array = np.array(array) # turn the array into a numpy array
index = np.argmin(np.abs(array - target))
value = array[index]
return NearestEntry(index, value, target)
def ensure_dir_exists(path):
"""
Ensure that the directory tree to the path exists.
Parameters
----------
path
A path to a file or directory.
Returns
-------
:class:`str`
The path that was created.
"""
"""
:param path: the path to a file or directory
:type path: str
"""
split_path = os.path.splitext(path)
if split_path[0] != path: # path is file
path_to_make = os.path.dirname(split_path[0])
else: # path is dir
path_to_make = split_path[0]
os.makedirs(path_to_make, exist_ok = True)
logger.debug('Ensured dir {} exists'.format(path_to_make))
return path_to_make
def downsample(dense_x_array: np.ndarray,
sparse_x_array: np.ndarray,
dense_y_array: np.ndarray):
"""
Downsample (dense_x_array, dense_y_array) to (sparse_x_array, sparse_y_array).
The downsampling is performed by matching points from sparse_x_array to dense_x_array using find_nearest_entry. Use with caution!
Parameters
----------
dense_x_array : :class:`numpy.ndarray`
A dense array of x values.
sparse_x_array : :class:`numpy.ndarray`
A sparse array of x values.
dense_y_array : :class:`numpy.ndarray`
A dense array of y values corresponding to `dense_x_array`.
Returns
-------
:class:`numpy.ndarray`
The sparsified y array.
"""
sparse_y_array = np.zeros(len(sparse_x_array), dtype = dense_y_array.dtype) * np.NaN
for sparse_index, x in enumerate(sparse_x_array):
dense_index, _, _ = find_nearest_entry(dense_x_array, x)
sparse_y_array[sparse_index] = dense_y_array[dense_index]
return sparse_y_array
def run_in_process(func, args = (), kwargs = None):
"""
Run a function in a separate thread.
:param func: the function to run
:param args: positional arguments for function
:param kwargs: keyword arguments for function
"""
if kwargs is None:
kwargs = {}
with multiprocessing.Pool(processes = 1) as pool:
output = pool.apply(func, args, kwargs)
return output
def find_or_init_sim(spec, search_dir: Optional[str] = None, file_extension = '.sim'):
"""
Try to load a :class:`simulacra.Simulation` by looking for a pickled :class:`simulacra.core.Simulation` named ``{search_dir}/{spec.file_name}.{file_extension}``.
If that fails, create a new Simulation from `spec`.
Parameters
----------
spec : :class:`simulacra.core.Specification`
search_dir : str
file_extension : str
Returns
-------
:class:`simulacra.core.Simulation`
"""
try:
if search_dir is None:
search_dir = os.getcwd()
path = os.path.join(search_dir, spec.file_name + file_extension)
sim = core.Simulation.load(file_path = path)
except FileNotFoundError:
sim = spec.to_simulation()
return sim
def multi_map(function, targets, processes = None, **kwargs):
"""
Map a function over a list of inputs using multiprocessing.
Function should take a single positional argument (an element of targets) and any number of keyword arguments, which must be the same for each target.
Parameters
----------
function : a callable
The function to call on each of the `targets`.
targets : an iterable
An iterable of arguments to call the function on.
processes : :class:`int`
The number of processes to use. Defaults to the half of the number of cores on the computer.
kwargs
Keyword arguments are passed to :func:`multiprocess.pool.map`.
Returns
-------
:class:`tuple`
The outputs of the function being applied to the targets.
"""
if processes is None:
processes = max(int(multiprocessing.cpu_count() / 2) - 1, 1)
with multiprocessing.Pool(processes = processes) as pool:
output = pool.map(function, targets, **kwargs)
return tuple(output)
class cached_property:
"""
A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def method_dispatch(func):
"""Works the same as :func:`functools.singledispatch`, but uses the second argument instead of the first so that it can be used for instance methods."""
dispatcher = functools.singledispatch(func)
def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw)
wrapper.register = dispatcher.register
functools.update_wrapper(wrapper, func)
return wrapper
def hash_args_kwargs(*args, **kwargs):
"""Return the hash of a tuple containing the args and kwargs."""
return hash(args + tuple(kwargs.items()))
def memoize(func: Callable):
"""Memoize a function by storing a dictionary of {inputs: outputs}."""
memo = {}
@functools.wraps(func)
def memoizer(*args, **kwargs):
key = hash_args_kwargs(*args, **kwargs)
if key not in memo:
memo[key] = func(*args, **kwargs)
return memo[key]
return memoizer
def watcher(watch):
"""
Returns a decorator that memoizes the result of a method call until the watcher function returns a different value.
The watcher function is passed the instance that the original method is bound to.
:param watch: a function which is called to check whether to recompute the wrapped function
:return: a Watcher decorator
"""
class Watcher:
__slots__ = ('func', 'cached', 'watched', '__doc__')
def __init__(self, func):
self.func = func
self.cached = {}
self.watched = {}
self.__doc__ = func.__doc__
def __str__(self):
return 'Watcher wrapper over {}'.format(self.func.__name__)
def __repr__(self):
return 'watcher({})'.format(repr(self.func))
def __call__(self, instance, *args, **kwargs):
check = watch(instance)
if self.watched.get(instance) != check:
self.cached[instance] = self.func(instance, *args, **kwargs)
self.watched[instance] = check
return self.cached[instance]
def __get__(self, instance, cls):
# support instance methods
return functools.partial(self.__call__, instance)
return Watcher
def timed(func: Callable):
"""A decorator that times the execution of the decorated function. A log message is emitted at level ``DEBUG`` with the timing information."""
@functools.wraps(func)
def timed_wrapper(*args, **kwargs):
time_start = datetime.datetime.now()
val = func(*args, **kwargs)
time_end = datetime.datetime.now()
time_elapsed = time_end - time_start
logger.debug(f'Execution of {func} took {time_elapsed}')
return val
return timed_wrapper
class BlockTimer:
"""A context manager that times the code in the ``with`` block. Print the :class:`BlockTimer` after exiting the block to see the results."""
__slots__ = (
'wall_time_start', 'wall_time_end', 'wall_time_elapsed',
'proc_time_start', 'proc_time_end', 'proc_time_elapsed'
)
def __init__(self):
self.wall_time_start = None
self.wall_time_end = None
self.wall_time_elapsed = None
self.proc_time_start = None
self.proc_time_end = None
self.proc_time_elapsed = None
def __enter__(self):
self.wall_time_start = datetime.datetime.now()
self.proc_time_start = time.process_time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.wall_time_end = datetime.datetime.now()
self.proc_time_end = time.process_time()
self.wall_time_elapsed = self.wall_time_end - self.wall_time_start
self.proc_time_elapsed = self.proc_time_end - self.proc_time_start
def __str__(self):
if self.wall_time_end is None:
return 'Timer started at {}, still running'.format(self.wall_time_start)
else:
return 'Timer started at {}, ended at {}, elapsed time {}. Process time: {}.'.format(self.wall_time_start, self.wall_time_end, self.wall_time_elapsed, datetime.timedelta(seconds = self.proc_time_elapsed))
class Descriptor:
"""
A generic descriptor that implements default descriptor methods for easy overriding in subclasses.
The data is stored in the instance dictionary.
"""
__slots__ = ('name',)
def __init__(self, name):
self.name = name
def __get__(self, instance, cls):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __delete__(self, instance):
del instance.__dict__[self.name]
class RestrictedValues(Descriptor):
"""
A descriptor that forces the attribute to have a certain set of possible values.
If the value is not in the set of legal values a ValueError is raised.
"""
__slots__ = ('name', 'legal_values')
def __init__(self, name, legal_values = set()):
self.legal_values = set(legal_values)
super().__init__(name)
def __set__(self, instance, value):
if value not in self.legal_values:
raise ValueError('Expected {} to be from {}'.format(value, self.legal_values))
else:
super().__set__(instance, value)
class Typed(Descriptor):
"""
A descriptor that forces the attribute to have a certain type.
If the value does not match the provided type a TypeError is raised.
"""
__slots__ = ('name', 'legal_type')
def __init__(self, name, legal_type = str):
self.legal_type = legal_type
super().__init__(name)
def __set__(self, instance, value):
if not isinstance(value, self.legal_type):
raise TypeError('Expected {} to be a {}'.format(value, self.legal_type))
else:
super().__set__(instance, value)
class Checked(Descriptor):
"""
A descriptor that only allows setting with values that return True from a provided checking function.
If the value does not pass the check a ValueError is raised.
"""
__slots__ = ('name', 'check')
def __init__(self, name, check = None):
if check is None:
check = lambda value: True
self.check = check
super().__init__(name)
def __set__(self, instance, value):
if not self.check(value):
raise ValueError(f'Value {value} did not pass the check function {self.check} for attribute {self.name} on {instance}')
else:
super().__set__(instance, value)
def bytes_to_str(num: Union[float, int]) -> str:
"""Return a number of bytes as a human-readable string."""
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def get_file_size(file_path: str):
"""Return the size of the file at file_path."""
return os.stat(file_path).st_size
def get_file_size_as_string(file_path: str) -> str:
"""Return the size of the file at file_path as a human-readable string."""
if os.path.isfile(file_path):
file_info = os.stat(file_path)
return bytes_to_str(file_info.st_size)
def try_loop(*functions_to_run,
wait_after_success: datetime.timedelta = datetime.timedelta(hours = 1),
wait_after_failure: datetime.timedelta = datetime.timedelta(minutes = 1),
begin_text: str = 'Beginning loop',
complete_text: str = 'Completed loop'):
"""
Run the given functions in a constant loop.
:param functions_to_run: call these functions in order during each loop
:param wait_after_success: a datetime.timedelta object specifying how long to wait after a loop completes
:param wait_after_failure: a datetime.timedelta object specifying how long to wait after a loop fails (i.e., raises an exception)
:param begin_text: a string to print at the beginning of each loop
:type begin_text: str
:param complete_text: a string to print at the end of each loop
:type complete_text: str
"""
while True:
logger.info(begin_text)
with BlockTimer() as timer:
failed = False
for f in functions_to_run:
try:
f()
except Exception as e:
logger.exception(f'Exception encountered while executing loop function {f}')
failed = True
logger.info(complete_text + '. Elapsed time: {}'.format(timer.wall_time_elapsed))
if failed:
wait = wait_after_failure
logger.info(f'Loop cycle failed, retrying in {wait_after_failure.total_seconds()} seconds')
else:
wait = wait_after_success
logger.info(f'Loop cycle succeeded, next cycle in {wait_after_success.total_seconds()} seconds')
time.sleep(wait.total_seconds())
def grouper(iterable: Iterable, n: int, fill_value = None) -> Iterable:
"""
Collect data from iterable into fixed-length chunks or blocks of length n
See https://docs.python.org/3/library/itertools.html#itertools-recipes
:param iterable: an iterable to chunk
:param n: the size of the chunks
:param fill_value: a value to fill with when iterable has run out of values, but the last chunk isn't full
:return:
"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue = fill_value)
class SubprocessManager:
def __init__(self, cmd_string, **subprocess_kwargs):
self.cmd_string = cmd_string
self.subprocess_kwargs = subprocess_kwargs
self.name = self.cmd_string[0]
self.subprocess = None
def __enter__(self):
self.subprocess = subprocess.Popen(self.cmd_string,
**self.subprocess_kwargs)
logger.debug(f'Opened subprocess {self.name}')
return self.subprocess
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.subprocess.communicate()
logger.debug(f'Closed subprocess {self.name}')
except AttributeError:
logger.warning(f'Exception while trying to close subprocess {self.name}, possibly not closed')
def get_processes_by_name(process_name: str) -> Iterable[psutil.Process]:
"""
Return an iterable of processes that match the given name.
:param process_name: the name to search for
:type process_name: str
:return: an iterable of psutil Process instances
"""
return [p for p in psutil.process_iter() if p.name() == process_name]
def suspend_processes(processes: Iterable[psutil.Process]):
"""
Suspend a list of processes.
Parameters
----------
processes : iterable of psutil.Process
"""
for p in processes:
p.suspend()
logger.info('Suspended {}'.format(p))
def resume_processes(processes: Iterable[psutil.Process]):
"""
Resume a list of processes.
Parameters
----------
processes : iterable of psutil.Process
"""
for p in processes:
p.resume()
logger.info('Resumed {}'.format(p))
def suspend_processes_by_name(process_name: str):
processes = get_processes_by_name(process_name)
suspend_processes(processes)
def resume_processes_by_name(process_name: str):
processes = get_processes_by_name(process_name)
resume_processes(processes)
class SuspendProcesses:
def __init__(self, *processes):
"""
Parameters
----------
processes
:class:`psutil.Process` objects or strings to search for using :func:`get_process_by_name`
"""
self.processes = []
for process in processes:
if type(process) == str:
self.processes += get_processes_by_name(process)
elif type(process) == psutil.Process:
self.processes.append(process)
def __enter__(self):
suspend_processes(self.processes)
def __exit__(self, exc_type, exc_val, exc_tb):
resume_processes(self.processes)
| true |
8911cd7195b426e3b4097427c40a136d1d970d83 | Python | ronythakur/ML-IPl-predictor | /Trained_Model.py | UTF-8 | 3,420 | 3.328125 | 3 | [] | no_license | import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from sklearn import linear_model
# Taking Data-Frame
df=pd.read_csv("IPL Data.csv")
#Label Encoding for Team-name
from sklearn.preprocessing import LabelEncoder
le_team1=LabelEncoder()
le_team2=LabelEncoder()
le_winner=LabelEncoder()
le_toss=LabelEncoder()
#Creating a new data-frame
dfle=df
dfle['Team1']=le_team1.fit_transform(dfle['Team1'])
dfle['Team2']=le_team2.fit_transform(dfle['Team2'])
dfle['Toss Winner']=le_toss.fit_transform(dfle['Toss Winner'])
dfle['Winner']=le_winner.fit_transform(dfle['Winner'])
#Extracting columns from Data-Frame
#Team1
Team1=dfle['Team1']
P1=dfle['Team1 Potential']
S1=dfle['Team1 Standing']
#Team2
Team2=dfle['Team2']
P2=dfle['Team2 Potential']
S2=dfle['Team2 Standing']
#Toss-Winner and Match-Winner
Toss=dfle['Toss Winner']
Win=dfle['Winner']
#Length of dataframe
total=len(Toss)
#Potential-difference and Standing difference
Pdif=P1-P2
Sdif=S2-S1
#Creating new dataframe
Data=[[]]
Data.clear()
for i in range(total):
p=1
if(i%3!=0):
if(Team1[i]==Toss[i]):
p=1
Data.insert(i+1,[int(Team1[i]),int(Team2[i]),int(Sdif[i]),int(Pdif[i]),p,1])
else:
if(Team2[i]==Toss[i]):
p=1
Data.insert(i+1,[int(Team2[i]),int(Team1[i]),int(-Sdif[i]),int(-Pdif[i]),p,2])
#Creating Dataframes
New=pd.DataFrame(Data,columns=['T-1','T-2','S-dif','P-dif','Toss','Winner'])
New_Data=New.drop(['T-1','T-2'],axis='columns')
#Printing Dataframes
print(New_Data)
print(New)
Win=New['Winner']
New=New.drop(['Winner'],axis='columns')
Win1=New_Data['Winner']
New_Data=New_Data.drop(['Winner'],axis='columns')
cnt=0
cnt1=0
P_dif=New['P-dif']
S_dif=New['S-dif']
Toss=New['Toss']
for i in range(total):
if(P_dif[i]>=0 and S_dif[i]>=0):
cnt+=1
if(Win[i]==1):
cnt1+=1
labels="Team winning after high potential and standing","Team losing with high potential and standing"
sizes=[cnt1,cnt-cnt1]
colors=['green','red']
plt.pie(sizes,labels=labels,colors=colors,autopct='%1.1f%%',startangle=0,shadow=True)
plt.axis('equal')
#To show the pie
#plt.show()
#Train-Test Split
from sklearn.model_selection import train_test_split
#1.New-Dataframe Train Test
X_train,X_test,Y_train,Y_test=train_test_split(New,Win,test_size=0.2)
#2.New-Data-Dataframe Train Test
X_train1,X_test1,Y_train1,Y_test1=train_test_split(New_Data,Win,test_size=0.2)
#Logistic Regression
#1.New-Dataframe
reg=linear_model.LogisticRegression()
reg.fit(X_train,Y_train)
print(reg.score(X_test,Y_test))
#2.New-Data Dataframe
reg1=linear_model.LogisticRegression()
reg1.fit(X_train1,Y_train1)
print(reg1.score(X_test1,Y_test1))
#Dumping into File
2New-Data Dataframe
with open("Logistic-Regression.pickle",'wb') as f:
pickle.dump(reg1,f)
f.close()
#Random Forest Classfier
from sklearn.ensemble import RandomForestClassifier
#1.New-Dataframe
clf=RandomForestClassifier(max_depth=2,random_state=1)
clf.fit(X_train,Y_train)
print(clf.score(X_test,Y_test))
#2.New-Data Dataframe
clf1=RandomForestClassifier(max_depth=2,random_state=1)
clf1.fit(X_train1,Y_train1)
print(clf1.score(X_test1,Y_test1))
#Dumping File
#1.New-Dataframe
with open("Random1.pickle",'wb') as f:
pickle.dump(clf,f)
f.close()
#2.New-Data-Dataframe
with open("Random-Forest.pickle",'wb') as f:
pickle.dump(clf1,f)
f.close()
| true |
c4f62807ccdda076cc8f2145db5a864ee85a5335 | Python | Aasthaengg/IBMdataset | /Python_codes/p02744/s286757972.py | UTF-8 | 265 | 3.421875 | 3 | [] | no_license | N = int(input())
a_num = 97
def dfs(s, n): #s: 現在の文字列, n: 残りの文字数, cnt: 現在の文字列の最大の値
if n == 0:
print(s)
return
for i in range(ord("a"), ord(max(s))+2):
dfs(s+chr(i), n-1)
dfs("a", N-1)
| true |
63d2bfb63ac02f77ae6144a5541725481a148cd8 | Python | ChrisMichael11/ds_from_scratch | /clustering.py | UTF-8 | 6,777 | 3.265625 | 3 | [] | no_license | from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import pdb
class KMeans:
"""
Class for K-Means Clustering
"""
def __init__(self, k):
"""
Initialize with # of clusters you want
"""
self.k = k # Number o' clusters
self.means = None # Means of clusters
def classify(self, input):
"""
Return index of cluster closest to input
"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
"""
Choose K random points as initial means
"""
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, Done!!!
if assignments == new_assignments:
return
# Otherwise, keep the new assignments
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# Avoid division by zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""
Find total squared error for k-means clustering inputs
"""
clusteerer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input, means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
"""
Plot clustering
"""
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("K")
plt.ylabel("Total Squared Error")
plt.show()
# USING CLUSTERING TO RECOLOR IMAGE
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # Can be slow
def recolor(pixel):
cluster = clusterer.classify(pixel) # Index of closest cluster
return clusterer.means[clusterer] # Mean of closest cluster
new_img = [[recolor(pixel) for pixel in row] for row in img]
plot.imshow(new_img)
plt.axis('off')
plt.show()
## HIERARCHICAL CLUSTERING
def is_leaf(cluster):
"""
A cluster is a leaf if it has length 1
"""
return len(cluster) == 1
def get_children(cluster):
"""
Returns the 2 children of this cluster if it's a merged cluster; raises
an exception if this leafe is a leaf cluster
"""
if is_leaf(cluster):
raise TypeError("A leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""
Returns value in this cluster (if it is a leaf cluster) or all the values
in the leafe clusters below it (if it is not a leaf cluster)
"""
if is_leaf(cluster):
return cluster # Already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""
Find aggregate distance between elements of Cluster1 and elements of
cluster2
"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
"""
Smaller number = later merge. I.E. when unmerge, do so from lowest merge
order to highest. Leaf clusters never merged (and we don't want to
unmerge them), assign to infinity
"""
if is_leaf(cluster):
return float("inf")
else:
return cluster[0] # Merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# Start w/ every input in a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# As long as more than one cluster remains
while len(clusters) > 1:
# Find two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# Remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# Merge clusters, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# Add the merge
clusters.append(merged_cluster)
# When only 1 cluster left, return that
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# Start with list with just base cluster
clusters = [base_cluster]
# As lon as we don't have clusters yet
while len(clusters) < num_clusters:
# Choose the last merged of clusters
next_cluster = min(clusters, key=get_merge_order)
# Remove from list
clusters = [c for c in clusters if c != next_cluster]
# Add its children to the list (i.e. unmerge it)
clusters.extend(get_children(next_cluster))
# Once we have enough clusters, return it
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],
[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],
[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(11)
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-Means: "
print clusterer.means
print
random.seed(11)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-Means: "
print clusterer.means
print
print "Errors as a function of K: "
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "Bottom Up Hierarchical Clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "Three Clusters, MIN:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "Three Clusters, MAX:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
| true |
28a18e4f2b6a2599ca36a2715b60eab99d1f0088 | Python | HimNG/CrackingTheCodeInterview | /2.1/2.1/_2.1.py | UTF-8 | 1,906 | 3.59375 | 4 | [] | no_license | class Node:
def __init__(self,data):
self.data=data
self.next=None
class linkedlist :
def __init__(self):
self.head=None
def insert_at_begin(self,data):
node=Node(data)
node.next=self.head
self.head=node
def insert_at_end(self,data):
node=Node(data)
if self.head==None:
self.head=node
return
temp=self.head
while temp.next:
temp=temp.next
temp.next=node
# remove duplicates with hashing
def removeDup_with_hash(self):
temp=self.head
s=set()
s.add(temp.data)
while temp.next:
if temp.next.data in s :
temp.next=temp.next.next
else:
s.add(temp.data)
temp=temp.next
def print_ll(self):
ll=self.head
while ll!=None :
print(ll.data,end=" ")
ll=ll.next
#remove Duplicates without extra space
def removeDup_without_hash(self):
if not self.head.next :
return
temp1,temp2=self.head,self.head.next
while temp1 :
temp2=temp1.next
while temp2 :
if temp2.data == temp1.data :
temp2=temp2.next
else :
temp1.next=temp2
temp2=temp2.next
break
if temp2==None :
break
temp1=temp2
if __name__ == '__main__' :
llist=linkedlist()
llist.insert_at_begin(10)
llist.insert_at_begin(14)
llist.insert_at_begin(10)
llist.insert_at_begin(10)
llist.insert_at_begin(10)
llist.insert_at_end(11)
llist.print_ll()
llist.removeDup_with_hash()
#llist.removeDup_without_hash()
print("")
llist.print_ll()
| true |
15e12e1f15fac0e458826be8ed5650073309a0bd | Python | bwz3891923/LearningLog | /Python 第八周作业/20171031 m5.3.0.py | UTF-8 | 273 | 3.234375 | 3 | [] | no_license | from datetime import *
someday=datetime(2016,9,16,22,33,32,47)
print(someday)
today=datetime.now()
print(today)
today1=datetime.utcnow()
print(today1)
print(someday.isoformat())
print(someday.isoweekday())
print(someday.strftime("%Y-%m-%d %H:%M:%S"))
| true |
eb2d12db77ae84cfc4ecd0333079e2fd6a7d0089 | Python | Hunter1235/Project-2 | /messsage.py | UTF-8 | 10,763 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 13:41:18 2021
@author: Hunter Stiles
"""
import tkinter as tk
from tkinter import ttk
from twilio.rest import Client
import sqlite3
import pandas as pd
import logging
import os
try:
logLevel = os.environ['MESSAGE_LOG_LEVEL']
except:
logLevel = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%I:%M:%S %p')
logging.getLogger().setLevel(logLevel)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filehandler = logging.FileHandler(r'messagelog.txt')
filehandler.setFormatter(formatter)
logging.getLogger().addHandler(filehandler)
logging.warning('messaging application started.')
con = sqlite3.connect('directory.db')
cur = con.cursor()
try:
account_sid = os.environ['TWILIO_ACCOUNT_SID']
except:
logging.warning("Unable to get twilio account_sid from environment")
account_sid = 'a valid twilio sid'
try:
account_sid = os.environ['TWILIO_ACCOUNT_TOKEN']
except:
logging.warning("Unable to get twilio auth_token from environment")
auth_token = 'a valid twilio token'
client = Client(account_sid, auth_token)
# checkNumber checks to see if the number is numeric digits and has
# a length of 10. If so it returns True, if not it pops up an error box and
# returns False.
def checkNumber(telephoneNumber):
if telephoneNumber.isnumeric() and len(telephoneNumber) == 10:
return True
tk.messagebox.showerror("Invalid Phone number", "Number must be 10 digits")
logging.info("Invalid phone number entered " + telephoneNumber)
return False
# below are the callback functions for the numbered buttons
# each one simply appends the number to the number in the phone display
def button1():
logging.debug("pressed 1")
display_number.insert('end', '1')
def button2():
logging.debug("pressed 2")
display_number.insert('end', '2')
def button3():
logging.debug("pressed 3")
display_number.insert('end', '3')
def button4():
logging.debug("pressed 4")
display_number.insert('end', '4')
def button5():
logging.debug("pressed 5")
display_number.insert('end', '5')
def button6():
logging.debug("pressed 6")
display_number.insert('end', '6')
def button7():
logging.debug("pressed 7")
display_number.insert('end', '7')
def button8():
logging.debug("pressed 8")
display_number.insert('end', '8')
def button9():
logging.debug("pressed 9")
display_number.insert('end', '9')
def button0():
logging.debug("pressed 0")
display_number.insert('end', '0')
def button_dir():
def button_Submit():
logging.debug("pressed Submit button in directory window")
logging.debug("selected " + dirCombBox.get())
ind=df.index[(df['name'] == dirCombBox.get())].tolist()
display_number.delete(0, "end")
display_number.insert(0, df.at[ind[0], 'number'])
windowDirectory.destroy()
logging.debug("pressed directoy button")
windowDirectory = tk.Toplevel(root)
windowDirectory.geometry("250x250")
windowDirectory.title("Directory")
instructionLabel = tk.Label(windowDirectory, text="select entry from directory")
instructionLabel.pack()
df = pd.read_sql_query("SELECT name, number FROM PhoneNumbers ORDER BY name", con)
logging.debug(df)
dirCombBox = ttk.Combobox(windowDirectory, values=df['name'].tolist())
dirCombBox.pack()
frameA = tk.Frame(windowDirectory)
frameA.pack(side="bottom")
Button_sendText = tk.Button(frameA, text="Submit", command=button_Submit)
Button_sendText.grid(row = 1, column = 1)
Button_cancel = tk.Button(frameA, text="Cancel", command=windowDirectory.destroy)
Button_cancel.grid(row = 1, column = 3)
def button_ad():
logging.debug("pressed add/delete")
def button_Add():
logging.debug("pressed Add button in add/delete window")
name=nameEntry.get()
number=numberEntry.get()
if len(name)==0:
tk.messagebox.showerror("Invalid name", "You must enter a name")
return
if checkNumber(number):
cur.execute('INSERT INTO PhoneNumbers VALUES (?,?)', (name, number))
con.commit()
logging.info("added " + name + "to the directory")
tk.messagebox.showinfo("Added", "Added " + name + " to contacts")
windowAD.destroy()
def button_Delete():
logging.debug("pressed Delete button in add/delete window")
name = dirCombBox.get();
tk.messagebox.showinfo("Deleted", "Deleted " + name + " from contacts")
cur.execute('DELETE FROM PhoneNumbers WHERE name=?', (name,))
con.commit()
logging.info("deleted " + name + "from the directory")
tk.messagebox.showinfo("Deleted", "Deleted " + name + " from contacts")
windowAD.destroy()
logging.debug("pressed directoy button")
windowAD = tk.Toplevel(root)
windowAD.geometry("250x300")
windowAD.title("Add/Delete")
instructionLabel = tk.Label(windowAD, text="select entry to delete")
instructionLabel.pack()
df = pd.read_sql_query("SELECT name, number FROM PhoneNumbers ORDER BY name", con)
logging.debug(df)
dirCombBox = ttk.Combobox(windowAD, values=df['name'].tolist())
dirCombBox.pack()
frameA = tk.Frame(windowAD)
frameA.pack()
Button_Delete = tk.Button(frameA, text="Delete", command=button_Delete)
Button_Delete.grid(row = 1, column = 1)
Button_cancel = tk.Button(frameA, text="Cancel", command=windowAD.destroy)
Button_cancel.grid(row = 1, column = 3)
frameB = tk.Frame(windowAD)
frameB.pack(side="bottom")
instructionLabel1 = tk.Label(frameB, text="Enter name and number to add")
instructionLabel1.pack()
nameLabel = tk.Label(frameB, text="Name")
nameLabel.pack()
nameEntry= tk.Entry(frameB, width = 30)
nameEntry.pack()
numberLabel = tk.Label(frameB, text="Number")
numberLabel.pack()
numberEntry= tk.Entry(frameB, width = 30)
numberEntry.pack()
frameC = tk.Frame(frameB)
frameC.pack(side="bottom")
Button_Add = tk.Button(frameC, text="Add", command=button_Add)
Button_Add.grid(row = 1, column = 1)
Button_cancel1 = tk.Button(frameC, text="Cancel", command=windowAD.destroy)
Button_cancel1.grid(row = 1, column = 3)
def button_send():
def button_sendText():
logging.debug("pressed Send Text in send popup window")
phoneNum = display_number1.get()
if (checkNumber(phoneNum)):
logging.debug("sending text to " + phoneNum )
message = client.messages \
.create(
body=display_MSG.get("1.0","end"),
to=phoneNum,
from_='+19082244077'
)
logging.info("sent the following text to " + phoneNum)
logging.info(display_MSG.get("1.0","end"))
def button_sendVoice():
logging.debug("pressed Send Voice in send popup window")
phoneNum = display_number1.get()
if (checkNumber(phoneNum)):
logging.debug("sending voice message to " + phoneNum )
call = client.calls.create(
twiml='<Response><Say>' + display_MSG.get("1.0","end") + '</Say></Response>',
to=phoneNum,
from_='+19082244077'
)
logging.info("sent the following text to " + phoneNum)
logging.info(display_MSG.get("1.0","end"))
logging.debug("pressed send message")
logging.debug(display_number.get())
windowSendMsg = tk.Toplevel(root)
windowSendMsg.geometry("250x250")
windowSendMsg.title("Send")
display_number1 = tk.Entry(windowSendMsg, width = 30)
display_number1.insert("end", display_number.get())
display_number1.pack()
scrollbar = tk.Scrollbar(windowSendMsg)
display_MSG = tk.Text(windowSendMsg, width = 30, height = 10, yscrollcommand=scrollbar.set, wrap='word')
scrollbar.config(command=display_MSG.yview)
scrollbar.pack(side="right")
display_MSG.pack(side="top")
frameA = tk.Frame(windowSendMsg)
frameA.pack()
Button_sendText = tk.Button(frameA, text="Send Text", command=button_sendText)
Button_sendText.grid(row = 1, column = 1)
Button_cancel = tk.Button(frameA, text="Cancel", command=windowSendMsg.destroy)
Button_cancel.grid(row = 1, column = 2)
Button_sendVoice = tk.Button(frameA, text="Send Voice", command=button_sendVoice)
Button_sendVoice.grid(row = 1, column = 3)
root = tk.Tk()
root.title("Phone")
root.geometry("250x300")
frame1 = tk.Frame(root)
frame1.pack()
display_number = tk.Entry(frame1, width = 30)
display_number.pack()
frame2 = tk.Frame(root)
frame2.pack()
button1 = tk.Button(frame2, text = "1", height = 3, width = 6, command = button1)
button1.grid(row = 1, column = 1)
button2 = tk.Button(frame2, text = "2", height = 3, width = 6, command = button2)
button2.grid(row = 1, column = 2)
button3 = tk.Button(frame2, text = "3", height = 3, width = 6, command = button3)
button3.grid(row = 1, column = 3)
button4 = tk.Button(frame2, text = "4", height = 3, width = 6, command = button4)
button4.grid(row = 2, column = 1)
button5 = tk.Button(frame2, text = "5", height = 3, width = 6, command = button5)
button5.grid(row = 2, column = 2)
button6 = tk.Button(frame2, text = "6", height = 3, width = 6, command = button6)
button6.grid(row = 2, column = 3)
button7 =tk. Button(frame2, text = "7", height = 3, width = 6, command = button7)
button7.grid(row = 3, column = 1)
button8 = tk.Button(frame2, text = "8", height = 3, width = 6, command = button8)
button8.grid(row = 3, column = 2)
button9 = tk.Button(frame2, text = "9", height = 3, width = 6, command = button9)
button9.grid(row = 3, column = 3)
button0 = tk.Button(frame2, text = "0", height = 3, width = 6 , command = button0)
button0.grid(row = 4, column = 2)
frame3 = tk.Frame(root)
frame3.pack()
buttonDir = tk.Button(frame3, text = "Contacts", height = 3, width = 10, command = button_dir)
buttonDir.pack(side="left")
buttonConnect = tk.Button(frame3, text = "Add\nDelete", height = 3, width = 10, command = button_ad)
buttonConnect.pack(side="left")
buttonSend = tk.Button(frame3, text = "Send\nMessage", height = 3, width = 10, command = button_send)
buttonSend.pack(side="right")
root.mainloop() | true |
4631c5ad196d30c8b23f475772d8b233363dc4be | Python | s0ap/tex | /delta_debugging/decider.py | UTF-8 | 1,423 | 2.625 | 3 | [] | no_license | import DD
class decider:
def __init__(self):
self.pass_errors = ''
self.pass_extra = ''
self.fail_errors = ''
self.fail_extra = ''
def extract_master_errors(self, dd_obj):
ccode = dd_obj.test_with_no_deltas()
# We don't have master errors yet
# assert 'PASS' == ccode, "No-delta run should bring 'PASS', got: " + ccode
rl = dd_obj.get_last_run()
self.pass_errors = rl.get_errors()
self.pass_extra = rl.get_reference()
ccode = dd_obj.test_with_all_deltas()
rl = dd_obj.get_last_run()
self.fail_errors = rl.get_errors()
self.fail_extra = rl.get_reference()
if (self.pass_errors == self.fail_errors) and (self.pass_extra == self.fail_extra):
self.print_master_errors()
assert 0, "PASS and FAIL errors and references are the same"
def print_master_errors(self):
print '[[[PASS error messages:[[[' + self.pass_errors + ']]]]]]'
print '[[[PASS reference:[[[' + self.pass_extra + ']]]]]]'
print '[[[FAIL error messages:[[[' + self.fail_errors + ']]]]]]'
print '[[[FAIL reference:[[[' + self.fail_extra + ']]]]]]'
def get_result(self, latex_run):
errors = latex_run.get_errors()
ref = latex_run.get_reference()
if (errors == self.pass_errors) and (ref == self.pass_extra):
return DD.DD.PASS
if (errors == self.fail_errors) and (ref == self.fail_extra):
return DD.DD.FAIL
return DD.DD.UNRESOLVED
| true |
c6bc16d8061189e32703a69a917d468ddc8ae99e | Python | TomasHalko/pythonSchool | /School exercises/Exercise_IV_Dictionaries.py | UTF-8 | 289 | 3.390625 | 3 | [] | no_license | dictionary_example = {}
dictionary_example["value_1"] = "keyn1"
dictionary_example["value_2"] = "keyn2"
dictionary_example["value_3"] = "keyn3"
dictionary_example["value_4"] = "keyn4"
print(dictionary_example)
for i in dictionary_example:
print(f"{i} ---> {dictionary_example[i]}") | true |
a5e4349e36dfcc24bd654f8d6b5818d6c75899b7 | Python | NachivelKishoreKumar/Nachivel | /pattern3.py | UTF-8 | 85 | 3.65625 | 4 | [] | no_license | n=6
for i in range(0,n):
for j in range(n,i,-1):
print(" ",end=" ")
print("*"*i)
| true |
974cfb17a3acdfb4ea7d7b5c97026d82b5972010 | Python | dehaineb/mopsi | /Tdlog_TP1.py | UTF-8 | 3,239 | 2.90625 | 3 | [] | no_license | groups = {
1: { "Den", "Eng", "F", "Sweden" },
2: { "CIS", "Germany", "Netherlands", "Scotland" }
}
matches = [
("Sweden", 1, 1, "France"),
("Denmark", 0, 0, "England"),
("Netherlands", 1, 0, "Scotland"),
("CIS", 1, 1, "Germany"),
("France", 0, 0, "England"),
("Sweden", 1, 0, "Denmark"),
]
#affichage du classement
for i in range(1,len(groups)+1):
print("")
print("Group "+str(i))
print("-------")
classement={pays:[0,0,0] for pays in groups[i]}
nombrecaractèremax=0
for pays in groups[i]:
if(len(pays)>nombrecaractèremax):
nombrecaractèremax=len(pays)
for k in range(len(matches)):
if (pays in matches[k]):
if (matches[k][1]==matches[k][2]):
classement[pays][0]+=1;
classement[pays][2]+=matches[k][1];
elif(pays==matches[k][0] and matches[k][1]>matches[k][2]):
classement[pays][0]+=2;
classement[pays][1]+=matches[k][1]-matches[k][2]
classement[matches[k][3]][1]-=matches[k][1]-matches[k][2]
classement[pays][2]+=matches[k][1];
classement[matches[k][3]][2]+=matches[k][2]
elif(pays==matches[k][3] and matches[k][2]>matches[k][1]):
classement[pays][0]+=2;
classement[pays][1]+=matches[k][2]-matches[k][1]
classement[matches[k][3]][1]-=matches[k][2]-matches[k][1]
classement[pays][2]+=matches[k][2];
classement[matches[k][3]][2]+=matches[k][1]
classementfinal={i:"" for i in range(1,len(groups[i])+1)}
egalite={pays:0 for pays in groups[i]}
for pays1 in groups[i]:
a=1;
for pays2 in groups[i]:
if pays2!=pays1:
if (classement[pays1][0]<classement[pays2][0]):
a=a+1
if ((classement[pays1][0]==classement[pays2][0]) and (classement[pays1][1]<classement[pays2][1])):
a=a+1
if((classement[pays1][0]==classement[pays2][0]) and (classement[pays1][1]==classement[pays2][1]) and (classement[pays1][2]<classement[pays2][2])):
a=a+1
if((classement[pays1][0]==classement[pays2][0]) and (classement[pays1][1]==classement[pays2][1]) and (classement[pays1][2]==classement[pays2][2]) and pays1!=pays2):
a=a+egalite[pays1]
egalite[pays1]+=1
egalite[pays2]+=1
classementfinal[a]=pays1;
for k in range(1,len(groups[i])+1):
if(classement[classementfinal[k]][1]>=0):
print(classementfinal[k]+"."*(3+nombrecaractèremax-len(classementfinal[k]))+" "+ str(classement[classementfinal[k]][0])+" pts "+ "+" +str(classement[classementfinal[k]][1]))
else:
print(classementfinal[k]+"."*(3+nombrecaractèremax-len(classementfinal[k]))+" "+ str(classement[classementfinal[k]][0])+" pts " +str(classement[classementfinal[k]][1]))
print(" ")
for k in range(len(matches)):
if(matches[k][0] in groups[i]):
print(matches[k][0]+" "*(nombrecaractèremax+2-len(matches[k][0]))+ str(matches[k][1])+" - " + str(matches[k][2])+" " +matches[k][3])
print("")
doublon=[]
for pays1 in groups[i]:
for pays2 in groups[i]:
matchavenir=True
for k in range(len(matches)):
if((pays1 in matches[k]) and (pays2 in matches[k])):
matchavenir=False
if (matchavenir and {pays1,pays2} not in doublon):
print(pays1+" "*(nombrecaractèremax+2-len(pays1))+"vs "+pays2)
doublon+=[{pays1,pays2}]
| true |
13f848650dfa019c7198e831706b7b5d12bac303 | Python | novalis78/prelude-engine | /py/deeplearning/doc2vecprelude.py | UTF-8 | 1,237 | 2.703125 | 3 | [] | no_license | import pandas as pd
# Import various modules for string cleaning
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
import nltk.data
# Import the built-in logging module and configure it so that Word2Vec
# creates nice output messages
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',\
level=logging.INFO)
import gensim
class LabeledLineSentence(object):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
for uid, line in enumerate(open(filename)):
yield LabeledSentence(words=line.split(), labels=['SENT_%s' % uid])
#sentences = LabeledLineSentence("mind-sentences.txt")
sentences = gensim.models.doc2vec.LabeledLineSentence("mind-sentences.txt")
for a in sentences:
print a
# model = gensim.models.Doc2Vec(alpha=0.025, min_alpha=0.025) # use fixed learning rate
# model.build_vocab(sentences)
# for epoch in range(10):
# model.train(sentences)
# model.alpha -= 0.002 # decrease the learning rate
# model.min_alpha = model.alpha # fix the learning rate, no decay
# # store the model to mmap-able files
# model.save('mindmodel2.doc2vec')
# print model.most_similar("SENT_0")
| true |
2f9de940c2a61abba2418cd16e1dbce8aa764261 | Python | nathanesau/advent_of_code_2020 | /solutions/day13/test_solution.py | UTF-8 | 1,361 | 2.828125 | 3 | [] | no_license | import os
from .solution import (
solution_1,
solution_2
)
BASEDIR = os.path.dirname(os.path.realpath(__file__))
def read_input():
with open(f"{BASEDIR}/assets/input.txt") as f:
data = f.read()
return data
def test_solution_1_example():
"""
provided example
"""
data = """
939
7,13,x,x,59,x,31,19
"""
lines = [line.strip() for line in data.splitlines() if line.strip()]
timestamp = int(lines[0])
stops = lines[1].split(',')
s1 = solution_1(timestamp, stops)
assert s1 == 295
def test_solution_1():
"""
use input.txt
"""
data = read_input()
lines = [line.strip() for line in data.splitlines() if line.strip()]
timestamp = int(lines[0])
stops = lines[1].split(',')
s1 = solution_1(timestamp, stops)
assert s1 == 3966
def test_solution_2_example():
"""
provided example
"""
data = """
939
7,13,x,x,59,x,31,19
"""
lines = [line.strip() for line in data.splitlines() if line.strip()]
stops = lines[1].split(',')
s2 = solution_2(stops)
assert s2 == 1068781
def test_solution_2():
"""
use input.txt
"""
data = read_input()
lines = [line.strip() for line in data.splitlines() if line.strip()]
stops = lines[1].split(',')
s2 = solution_2(stops)
assert s2 == 800177252346225
| true |
6a41bb18748610f28bb74c885351bd6a55003de6 | Python | migueldelafuente1/Brink_Boeker_TBME | /BM_brackets.py | UTF-8 | 7,912 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 17:55:30 2018
@author: miguel
"""
from __future__ import division, print_function
import numpy as np
from sympy.physics.wigner import racah
from sympy import evalf
def fact_build(max_order_fact):
# construct a global factorial base
global fact
fact = [np.log(1)]
for i in range(1,max_order_fact):
fact.append(fact[i-1] + np.log(i))
# !! logarithm values for the factorials !!
def matrix_r2(n,l,N,L, n_q,l_q,N_q,L_q, lamda):
if((n_q<0) or (l_q<0) or (N_q<0) or(L_q<0)):
return 0
if(n_q == (n-1)):
# relations 1,3,4
if(l_q == l):
if(N_q == N):
if(L_q == L):
# rel 1
return 0.5*np.sqrt(n*(n+l+0.5))
else:
return 0
else:
return 0
elif(l_q == (l+1)):
if(N_q == (N-1)):
if(L_q == (L+1)):
# rel 3
try:
racah_aux = (racah(l,(l+1),L,(L+1),1,lamda)).evalf()
except AttributeError:
racah_aux = 0
return (((-1)**(lamda+L+l))*
np.sqrt(n*N*(l+1)*(L+1)) * racah_aux)
else:
return 0
elif(N_q == N):
if(L_q == (L-1)):
# rel 4
try:
racah_aux = (racah(l,(l+1),L,(L-1),1,lamda)).evalf()
except AttributeError:
racah_aux = 0
return (((-1)**(lamda+L+l))*
np.sqrt(n*L*(l+1)*(N+L+0.5)) * racah_aux)
else:
return 0
else:
return 0
else:
return 0
elif(n_q == n):
# relations 2,5,6
if(l_q == (l-1)):
if(N_q == (N-1)):
if(L_q == (L+1)):
# rel 5
try:
racah_aux = (racah(l,(l-1),L,(L+1),1,lamda)).evalf()
except AttributeError:
racah_aux = 0
return (((-1)**(lamda+L+l))*
np.sqrt(N*l*(n+l+0.5)*(L+1))*racah_aux)
else:
return 0
elif(N_q == N):
if(L_q == (L-1)):
# rel 6
try:
racah_aux = (racah(l,(l-1),L,(L-1),1,lamda)).evalf()
except AttributeError:
racah_aux = 0
return (((-1)**(lamda+L+l))*
np.sqrt(L*l*(n+l+0.5)*(N+L+0.5))*racah_aux)
else:
return 0
else:
return 0
elif(l_q == l):
if(N_q == (N-1)):
if(L_q == L):
# rel 2
return 0.5*np.sqrt(N*(N+L+0.5))
else:
return 0
else:
return 0
else:
return 0
else:
return 0
return ValueError
def A_coeff(l1,l,l2,L, x):
# Coefficient for the BMB_00
const = (0.5 * (fact[l1+l+x+1]+fact[l1+l-x]+fact[l1+x-l]-
fact[l+x-l1]))
const += (0.5 * (fact[l2+L+x+1]+fact[l2+L-x]+fact[l2+x-L]-
fact[L+x-l2]))
const = np.exp(const)
aux_sum = 0.
# limits for non negative factorials
# q is non negative
c1 = l1 - l
c2 = l2 - L
c3 = -x - 1
c4 = l + l1
c5 = L + l2
major = min(c4,c5);
minor = max(max(max(max(c1,c2),c3),x),0)
for q in range(minor, major+1):
if( ((l+q-l1)%2) == 0 ):
numerator = (fact[l+q-l1] + fact[L+q-l2])
denominator = ((fact[int((l+q-l1)/2)]
+ fact[int((l+l1-q)/2)] + fact[q-x] + fact[q+x+1] +
fact[int((L+q-l2)/2)] + fact[int((L+l2-q)/2)]))
aux_sum += (((-1)**((l+q-l1)/2)) *
np.exp(numerator - denominator))
return const * aux_sum
def BM_Bracket00(n,l,N,L, l1,l2, lamda):
# Limit of the recurrence relation
const = ((fact[l1] + fact[l2] + fact[n+l] + fact[N+L]) -
(fact[2*l1] + fact[2*l2] + fact[n] + fact[N] +
fact[2*(n+l)+1] + fact[2*(N+L)+1]))
const += ((np.log(2*l+1)+np.log(2*L+1)) - ((l+L)*np.log(2)))
aux_sum = 0.
major = min((l+l1),(L+l2))
minor = max(abs(l-l1),abs(L-l2))
for x in range(minor, major+1):
try:
racah_aux = racah(l,L,l1,l2, lamda,x).evalf()
except AttributeError:
racah_aux = 0
aux_sum += ((2*x+1)*A_coeff(l1,l,l2,L, x)*racah_aux)
return np.exp(0.5*const) * aux_sum * ((-1)**(n+l+L-lamda))
def BM_Bracket(n,l,N,L, n1,l1,n2,l2, lamda):
# Non-negative conditions over constants
if((n<0) or (l<0) or (N<0) or(L<0)):
return 0
if((n1<0) or (l1<0) or (n2<0) or (l1<0) or (lamda<0)):
return 0
# Energy condition
if((2*(n1+n2)+l1+l2) != (2*(n+N)+l+L)):
return 0
# Angular momentum conservation
if((abs(l1-l2) > lamda) or ((l1+l2) < lamda)):
return 0
if((abs(l-L) > lamda) or ((l+L) < lamda)):
return 0
# RECURRENCE RELATIONS
# there is only 6 non-zero combinations of n'l'N'L'
if(n1 == 0):
if(n2 == 0):
# BMB00
return BM_Bracket00(n,l,N,L, l1,l2, lamda)
else:
# permutate the n1 l1 with n2 l2
fase = ((-1)**(L-lamda))
aux_sum = 0.
aux_sum += fase * (matrix_r2(n,l,N,L ,n-1,l,N,L, lamda)*
BM_Bracket(n-1,l,N,L, n2-1,l2,n1,l1, lamda))
aux_sum += fase * (matrix_r2(n,l,N,L ,n,l,N-1,L, lamda)*
BM_Bracket(n,l,N-1,L, n2-1,l2,n1,l1, lamda))
aux_sum += fase * (matrix_r2(n,l,N,L, n-1,l+1,N-1,L+1, lamda)*
BM_Bracket(n-1,l+1,N-1,L+1, n2-1,l2,n1,l1, lamda))
aux_sum += fase * (matrix_r2(n,l,N,L, n-1,l+1,N,L-1, lamda)*
BM_Bracket(n-1,l+1,N,L-1, n2-1,l2,n1,l1, lamda))
aux_sum += fase * (matrix_r2(n,l,N,L, n,l-1,N-1,L+1, lamda)*
BM_Bracket(n,l-1,N-1,L+1, n2-1,l2,n1,l1, lamda))
aux_sum += fase * (matrix_r2(n,l,N,L, n,l-1,N,L-1, lamda)*
BM_Bracket(n,l-1,N,L-1, n2-1,l2,n1,l1, lamda))
return np.sqrt(1./(n2*(n2+l2+0.5))) * aux_sum
else:
# normal case
aux_sum = 0.
aux_sum += (matrix_r2(n,l,N,L, n-1,l,N,L, lamda)*
BM_Bracket(n-1,l,N,L, n1-1,l1,n2,l2, lamda))
aux_sum += (matrix_r2(n,l,N,L, n,l,N-1,L, lamda)*
BM_Bracket(n,l,N-1,L, n1-1,l1,n2,l2, lamda))
aux_sum += (matrix_r2(n,l,N,L, n-1,l+1,N-1,L+1, lamda)*
BM_Bracket(n-1,l+1,N-1,L+1, n1-1,l1,n2,l2, lamda))
aux_sum += (matrix_r2(n,l,N,L, n-1,l+1,N,L-1, lamda)*
BM_Bracket(n-1,l+1,N,L-1, n1-1,l1,n2,l2, lamda))
aux_sum += (matrix_r2(n,l,N,L, n,l-1,N-1,L+1, lamda)*
BM_Bracket(n,l-1,N-1,L+1, n1-1,l1,n2,l2, lamda))
aux_sum += (matrix_r2(n,l,N,L, n,l-1,N,L-1, lamda)*
BM_Bracket(n,l-1,N,L-1, n1-1,l1,n2,l2, lamda))
return np.sqrt(1./(n1*(n1+l1+0.5))) * aux_sum
return ValueError
| true |
2fef7df3fd9baba54518a4e7ca2bb893b6930dc9 | Python | wangqi1996/leetcode | /target-sum.py | UTF-8 | 839 | 2.625 | 3 | [] | no_license | class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
if S > 1000:
return 0
_len = len(nums)
dp = [[0 for _ in range(_len)] for _ in range(2002)]
pre = [nums[0] + 1000, 1000 - nums[0]]
for p in pre:
dp[p][0] = 1
for j, n in enumerate(nums[1:]):
new = []
for p in pre:
new.extend([p + n, p - n])
dp[p + n][j + 1] += dp[p][j]
dp[p - n][j + 1] += dp[p][j]
pre = set(new)
return dp[S + 1000][_len - 1]
if __name__ == '__main__':
print(Solution().findTargetSumWays(
[43, 9, 26, 24, 39, 40, 20, 11, 18, 13, 14, 30, 48, 47, 37, 24, 32, 32, 2, 26],
47))
| true |
063ffe0dae73b7a74bb48a7b0611a358cef9f8f7 | Python | thruc/practice_python | /sandbox/browser.py | UTF-8 | 441 | 2.609375 | 3 | [] | no_license | import sys
from PyQt5 import QtWidgets
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEngineView
#start my_app
my_app = QApplication(sys.argv)
#open webpage
initurl = 'https://www.google.co.jp'
# setting browser
browser = QWebEngineView()
browser.load(QUrl(initurl))
browser.resize(1000,600)
browser.move(100,100)
browser.show()
#sys exit function
sys.exit(my_app.exec_()) | true |
9ae39e626beeb1e769b1916563d688b3b0a9dce6 | Python | Rivarrl/leetcode_python | /leetcode/1501-1800/1512.py | UTF-8 | 747 | 3.265625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# ======================================
# @File : 1512.py
# @Time : 2021/1/7 23:42
# @Author : Rivarrl
# ======================================
from algorithm_utils import *
class Solution:
"""
[1512. 好数对的数目](https://leetcode-cn.com/problems/number-of-good-pairs/)
"""
@timeit
def numIdenticalPairs(self, nums: List[int]) -> int:
from collections import Counter
ctr = Counter(nums)
res = 0
for k in ctr:
v = ctr[k]
res += (v-1)*v//2
return res
if __name__ == '__main__':
a = Solution()
a.numIdenticalPairs(nums = [1,2,3,1,1,3])
a.numIdenticalPairs(nums = [1,1,1,1])
a.numIdenticalPairs(nums = [1,2,3]) | true |
cdec8daf0c739a9c5c4acacc556130b0a8112518 | Python | pingsoli/python | /python_cookbook_3rd/chap03/05.py | UTF-8 | 985 | 3.484375 | 3 | [] | no_license | # 3.5 Packing and Unpacking Large Integers from Bytes
data = b'\x00\x124V\x00x\x90\xab\x00\xcd\xef\x01\x00#\x004'
#print(len(data)) # 16
#print(int.from_bytes(data, 'little')) # 69120565665751139577663547927094891008
#print(int.from_bytes(data, 'big')) # 94522842520747284487117727783387188
x = 94522842520747284487117727783387188
#print(x.to_bytes(16, 'big')) # b'\x00\x124V\x00x\x90\xab\x00\xcd\xef\x01\x00#\x004'
#print(x.to_bytes(16, 'little')) # b'4\x00#\x00\x01\xef\xcd\x00\xab\x90x\x00V4\x12\x00'
# Bit endian and little endian.
x = 0x01020304
#print(x.to_bytes(4, 'big')) # b'\x01\x02\x03\x04'
#print(x.to_bytes(4, 'little')) # b'\x04\x03\x02\x01'
x = 523 ** 23
#print(x) # 335381300113661875107536852714019056160355655333978849017944067
#print(x.bit_length()) # 208
nbytes, rem = divmod(x.bit_length(), 8)
if rem:
nbytes += 1
print(x.to_bytes(nbytes, 'little'))
# b'\x03X\xf1\x82iT\x96\xac\xc7c\x16\xf3\xb9\xcf\x18\xee\xec\x91\xd1\x98\xa2\xc8\xd9R\xb5\xd0'
| true |
b90332e6c9ca988f190250060876d9447b121900 | Python | Rossbj92/relationship-advice-recommendations | /flask/recommender.py | UTF-8 | 2,617 | 2.671875 | 3 | [] | no_license | from sqlalchemy import create_engine
import load_models as load
import en_core_web_sm
import numpy as np
import pandas as pd
from sentence_transformers import SentenceTransformer
import preprocessing as prep
from sklearn.metrics import pairwise_distances
lda, bert, encoder, pretrained_vecs = load.load_models()
def clean_input(text):
"""Preprocesses text for transformation.
This function formats text to be transformed for tf-idf.
Text is lower-cased, line break characters are removed,
and any whitespace is removed. After tokenization, text
is lemmatized with no stop words or punctuation.
Args:
text (str): User entered form text.
Returns:
A list of lemmatized words.
"""
lemmas = prep.NlpPipe([text]).lemmatize()
return lemmas
def lda_vec(lda, text):
"""
"""
bow_input = lda.id2word.doc2bow(text[0])
topics_probs = lda.get_document_topics(bow_input)
lda_preds = np.zeros((1, 20)) #matrix of topics + probabiliies
for topic, prob in topics_probs:
lda_preds[0, topic] = prob
return lda_preds
def bert_vec(bert, text):
"""
"""
encoded_preds = np.array(bert.encode(' '.join(word for word in text[0])))
return encoded_preds
def autoencode_vecs(lda_preds, encoded_preds, encoder):
"""
"""
bert_lda_combo = np.c_[lda_preds * 15, encoded_preds]
final_encoding = encoder.predict(bert_lda_combo)
return final_encoding
def recommend(text, lda=lda, bert=bert, encoder=encoder, pretrained_vecs=pretrained_vecs):
try:
engine = create_engine('postgresql+psycopg2://postgres:@34.94.44.13:5432/')
engine.connect()
processed_text = clean_input(text)
lda_input = lda_vec(lda, processed_text)
bert_input = bert_vec(bert, processed_text)
encoded_vecs = autoencode_vecs(lda_input, bert_input, encoder)
dists = np.zeros(len(pretrained_vecs))
for i in range(len(pretrained_vecs)):
dists[i] = pairwise_distances(encoded_vecs.reshape(1,-1), pretrained_vecs[i].reshape(1,-1), metric = 'cosine')
top_five = dists.argsort()[:5]
indexes_to_query = tuple(top_five.tolist())
query = f'SELECT index,title,url FROM data WHERE index IN {indexes_to_query}'
df = pd.read_sql(query, engine)
recommended_posts = []
for idx,post in df.iterrows():
info = {
'title': post['title'],
'url': post['url']
}
recommended_posts.append(info)
return recommended_posts
except TypeError:
return
| true |
3dc615366e00d7859b2690c618a5985b5341f57b | Python | legutierr/django-classy-tags | /performance.py | UTF-8 | 1,719 | 3.015625 | 3 | [
"BSD-3-Clause"
] | permissive | """
Tests the performance of django builtin tags versus classytags implementations
of them.
"""
from testdata import pool, Benchmark
import sys
def format_num(num):
try:
return "%0.3f" % num
except TypeError:
return str(num)
def get_max_width(table, index):
return max([len(format_num(row[index])) for row in table])
def pprint_table(out, table):
col_paddings = []
for i in range(len(table[0])):
col_paddings.append(get_max_width(table, i))
for row in table:
# left col
print >> out, row[0].ljust(col_paddings[0] + 1),
# rest of the cols
for i in range(1, len(row)):
col = format_num(row[i]).rjust(col_paddings[i] + 2)
print >> out, col,
print >> out
def run(prnt, iterations):
print
print "Performance of django tags versus classytags. %s iterations." % iterations
print
pool.autodiscover()
table = []
table.append(["Tagname", "Django", "Classytags", "Ratio"])
for tagname, data in pool:
bench = Benchmark(data['tag'])
django = bench.django(iterations)
classy = bench.classy(iterations)
ratio = classy / django
if tagname.startswith('ct_'):
tagname = tagname[3:]
table.append([tagname, django, classy, ratio])
if prnt:
pprint_table(sys.stdout, table)
else:
return table
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
options, args = parser.parse_args()
if len(args):
try:
iterations = int(args[0])
except TypeError:
iterations = 10000
else:
iterations = 10000
run(True, iterations) | true |
8c4392ac9732dc617488c53004c297bace1f3feb | Python | amufti12/adventofcode2020 | /Day3.py | UTF-8 | 467 | 3.546875 | 4 | [] | no_license | with open('day03.input', 'r') as file:
text = file.read().splitlines()
def tree(slopeRow, slopeCol, rows):
row = 0
col = 0
count = 0
while row < len(rows):
if rows[row][col] == "#":
count += 1
row += slopeRow
col = (col + slopeCol) % len(rows[0])
return count
print(tree(1, 3, text))
print(tree(1, 1, text) * tree(1, 3, text) * tree(1, 5, text) *
tree(1, 7, text) * tree(2, 1, text)) | true |
256add7c7bbf65778da9103503a35ea054400a4e | Python | SabinaBeisembayeva/WEB-dev | /lab_python_django/python_codingbat/Warmup-1/parrot_trouble.py | UTF-8 | 187 | 3.046875 | 3 | [] | no_license | def parrot_trouble(talking, hour):
if talking and (hour < 7 or hour > 20):
return True
return False
print(
parrot_trouble(True, 6),
parrot_trouble(True, 7),
parrot_trouble(False, 6))
| true |
d0a6852086ebe895f65d54ac72fb27b8e654fa46 | Python | sujiny-tech/preparing-for-coding-test | /leetcode/Range_Sum_of_BST.py | UTF-8 | 738 | 3.640625 | 4 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rangeSumBST(self, root: TreeNode, low: int, high: int) -> int:
#using DFS
def dfs(node):
sum_=0
#if node is None -> return 0
if not node:
return 0
#sum_ : values of nodes with a value in the range [low, high]
if node.val>=low and node.val<=high:
sum_+=node.val
sum_+=dfs(node.left)
sum_+=dfs(node.right)
return sum_
return dfs(root)
| true |
8b21dd7d09a6d194c03237e34cba8964a5ca889e | Python | jhuseman/CryptoTwitPredict | /ConcatArrayData.py | UTF-8 | 1,843 | 3.03125 | 3 | [] | no_license | #! /usr/bin/env python
"""Concatenates data from different encodings"""
import sys
import json
from PlotData import get_by_keys
def load_arr_from_file(fname):
"""returns the arr data from the file"""
with open(fname) as filep:
return json.load(filep)
def write_arr_to_file(fname, data):
"""encodes the arr data in json format"""
with open(fname, 'w') as filep:
json.dump(data, filep)
def concat_two_arrs(arr1, arr2):
"""concatenates the two arrays and makes sure headers match"""
if arr1[0] == arr2[0]:
return [arr1[0], (arr1[1]+arr2[1])]
headers = []
for head in arr1[0]: # get the headers that are in both
if head in arr2[0]:
headers.append(head)
if headers == arr1[0]:
trim_a1 = arr1[1]
else:
trim_a1 = get_by_keys(headers, arr1[0], arr1[1])
if headers == arr2[0]:
trim_a2 = arr2[1]
else:
trim_a2 = get_by_keys(headers, arr2[0], arr2[1])
return [headers, (trim_a1+trim_a2)]
def concat_arr_data_files(out_file, in_files):
"""
concatenates all of the pieces of data from the list in_files
and outputs it to a single file described by out_file
only works with arr.json format files (or sent.arr.json)
"""
print "Loading from file {f}".format(f=in_files[0])
data = load_arr_from_file(in_files[0])
for infi in in_files[1:]:
print "Concatenating data from file {f}".format(f=infi)
data = concat_two_arrs(data, load_arr_from_file(infi))
print "Writing to file {f}".format(f=out_file)
write_arr_to_file(out_file, data)
def get_kwargs():
"""returns the arguments that should be given to the initializer for ConvertData"""
ret = {
"out":None,
"in":[],
}
i = 0
while i < (len(sys.argv)-2):
ret['in'].append(sys.argv[i+1])
i = i + 1
ret['out'] = sys.argv[i+1]
return ret
if __name__ == "__main__":
FILEINFO = get_kwargs()
concat_arr_data_files(FILEINFO["out"], FILEINFO["in"])
| true |
ea776aeb32ce099b64b7cd068ec715e7927b0455 | Python | yeastpro/ExpressYeaself | /expressyeaself/tests/test_encode_sequences.py | UTF-8 | 3,414 | 3.296875 | 3 | [
"MIT"
] | permissive | """
A script containing unit tests for the functions in the
encode_sequences.py script.
"""
import expressyeaself.tests.context as context
import numpy as np
import os
test = context.encode_sequences
organize = context.organize_data
def test_encode_sequences_with_method():
"""
Tests the wrapper function that encodes all promoter sequences
in an input file by a specified method.
"""
# Test case 1: homogeneous sequences, no extra padding
trial_path = 'trial_file.txt'
oligos = ['AAAA', 'TTTT', 'GGGG', 'CCCC']
with open(trial_path, 'w') as f:
el = - 0.67
num = 5.5
for oligo in oligos:
el += num
f.write(oligo + '\t' + str(el) + '\n')
organize.write_num_and_len_of_seqs_to_file(trial_path)
seqs, els, abs_max = test.encode_sequences_with_method(trial_path)
assert isinstance(seqs, np.ndarray)
assert isinstance(els, np.ndarray)
assert isinstance(abs_max, float)
assert len(seqs) == len(oligos)
assert max(els) <= 1
assert min(els) >= -1
assert abs_max == - 0.67 + (4 * 5.5)
os.remove(trial_path)
return
def test_one_hot_encode_sequence():
"""
Tests the function that encodes the string representation of a
nucleotide sequence using the 'One-Hot' encoding method.
"""
# Test case 1 : valid input
seq = 'AAA'
one_hot_seq = test.one_hot_encode_sequence(seq)
print(one_hot_seq)
assert np.allclose(one_hot_seq, np.array([[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0]]))
# Test case 2: valid characters but lower case
seq = 'acgnt'
out_seq = test.one_hot_encode_sequence(seq)
assert np.allclose(out_seq, np.array([[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0]]))
# Test case 2: invalid input
seq = 'XYZXYZXYZ'
try:
one_hot_seq = test.one_hot_encode_sequence(seq)
except Exception:
pass
return
# def test_resize_array():
# """
# Tests the function that resizes a 2D array to a specified
# desired length by adding or removing vectors from the front
# or end of the array.
# """
# trial_seq = [[1],[2],[3],[4],[5],[6]]
# # Test 1
# out_1 = test.resize_array(trial_seq, resize_to=6, edit_front=True)
# assert isinstance(out_1, list), 'Function should be outputting a list'
# assert out_1 == trial_seq, 'Max length same as length of input list \
# so function should return list unchanged'
# # Test 2
# out_2 = test.resize_array(trial_seq, resize_to=5, edit_front=True)
# assert len(out_2) == 5
# assert out_2 == [[2],[3],[4],[5],[6]]
# # Test 3
# out_3 = test.resize_array(trial_seq, resize_to=7, edit_front=True)
# assert len(out_3) == 7
# assert out_3 == [[0],[1],[2],[3],[4],[5],[6]]
# # Test 4
# trial_seq = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]
# out_4 = test.resize_array(trial_seq_2, resize_to=5, edit_front=False)
# assert out_4 == [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1],[0,0,0,0]]
# # Test 5
# trial_seq = [[1,0,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]]
# out_5
#
# return
| true |
92b525dad6d0bb26396b7c543bc4d8e00092ca87 | Python | WangJS2002/TJCS-Courses | /数据结构课程设计-沈坚/排序算法可视化/SourceCode/ShellSort.py | UTF-8 | 1,119 | 2.609375 | 3 | [
"MIT"
] | permissive | # 希尔排序
from Visual import DataSeq
from math import ceil
# 不稳定排序
def ShellSort(v, ck):
if (ck == 1):
v.StartTimer()
n = v.length
D = ceil(n / 2)
while D > 0:
i = 0
while i < n:
tmp = v.data[i]
tmp_c = v.color[i]
j = i
while j >= 1 and v.data[j - D] > tmp:
v.SetVal(j, v.data[j - D], v.color[j - D])
j -= D
v.SetVal(j, tmp, tmp_c)
i += D
D //= 2
v.Visualize()
v.StopTimer()
else:
v.StartTimer()
n = v.length
D = ceil(n / 2)
while D > 0:
i = 0
while i < n:
tmp = v.data[i]
tmp_c = v.color[i]
j = i
while j >= 1 and v.data[j - D] < tmp:
v.SetVal(j, v.data[j - D], v.color[j - D])
j -= D
v.SetVal(j, tmp, tmp_c)
i += D
D //= 2
v.Visualize()
v.StopTimer()
| true |
65ea8e904b24d92ba57705c3493110bec36a34e9 | Python | tammoippen/kongcli | /src/kongcli/_raw.py | UTF-8 | 2,755 | 2.6875 | 3 | [
"MIT"
] | permissive | from typing import Any, Dict, Optional, Tuple, Union
import click
from ._session import LiveServerSession
from ._util import dict_from_dot, json_dumps
@click.command()
@click.option(
"--dry-run", is_flag=True, help="Only create the request without sending it."
)
@click.option("--header", "-H", type=(str, str), multiple=True, help="Add headers.")
@click.option(
"--data",
"-d",
type=(str, str),
multiple=True,
help="Add key-value data points to the payload.",
)
@click.argument("method")
@click.argument("url")
@click.pass_context
def raw(
ctx: click.Context,
method: str,
url: str,
header: Tuple[Tuple[str, str], ...],
data: Tuple[Tuple[str, str], ...],
dry_run: bool,
) -> None:
"""Perform raw http requests to kong.
You can provide headers using the --header / -H option:
\b
- to get the header 'Accept: application/json' use
-H Accept application/json
- to get the header 'Content-Type: application/json; charset=utf-8' use
-H Content-Type "application/json; charset=utf-8"
\b
You can provide a json body using the --data / -d option
-d foo bar # => {"foo": "bar"}
-d foo true # => {"foo": true}
-d foo '"true"' # => {"foo": "true"}
-d foo.bar.baz 2.3 # => {"foo": {"bar": {"baz": 2.3}}}
-d name bar -d config.methods '["GET", "POST"]'
# => {"name": "bar", "config": {"methods": ["GET", "POST"]}}
The first argument to `--data / -d` is the key. It is split by dots
and sub-dictionaries are created. The second argument is assumed to be
valid JSON; if it cannot be parsed, we assume it is a string. Multiple
usages of `--data / -d` will merge the dictionary.
"""
session: LiveServerSession = ctx.obj["session"]
headers_dict = {h[0]: h[1] for h in header}
click.echo(f"> {method} {session.prefix_url}{url}", err=True)
for k, v in {**session.headers, **headers_dict}.items():
click.echo(f"> {k}: {v}", err=True)
click.echo(">", err=True)
payload: Optional[Union[str, Dict[str, Any]]] = None
if data:
payload = dict_from_dot(data)
if payload:
payload = json_dumps(payload)
headers_dict["content-type"] = "application/json"
click.echo("> Body:", err=True)
click.echo(f"> {payload}", err=True)
if dry_run:
click.echo("---<<== Done with dry-run. ==>>---")
return
resp = session.request(method, url, headers=headers_dict, data=payload)
click.echo(
f"\n< http/{resp.raw.version} {resp.status_code} {resp.reason}", err=True
)
for k, v in resp.headers.items():
click.echo(f"< {k}: {v}", err=True)
click.echo(err=True)
click.echo(resp.text)
| true |
771e7abeaa581b9544e01d0bb070242667dd1e09 | Python | mailme/mailme.io | /src/mailme/web/forms.py | UTF-8 | 3,175 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | from django import forms
from django.core import validators
from django.utils.translation import ugettext_lazy as _
from mailme.models.user import User
class UserDetailsForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
class RegistrationForm(forms.ModelForm):
password1 = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput,
validators=[validators.MinLengthValidator(5)],
help_text=_(
'Your password should not contain your username. '
'Please use special signs too.'
)
)
password2 = forms.CharField(
label=_('Password confirm'),
widget=forms.PasswordInput,
validators=[validators.MinLengthValidator(5)]
)
error_messages = {
'username_in_use': _('The username is already in use.'),
'password_mismatch': _('Passwords don\'t match.')
}
class Meta:
model = User
fields = ('username', 'email')
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
for field in ('username', 'email', 'password1', 'password2'):
self.fields[field].widget.attrs['placeholder'] = self.fields[field].label
def clean_username(self):
username = self.cleaned_data.get('username')
if User.objects.filter(username__iexact=username).exists():
raise forms.ValidationError(
self.error_messages['username_in_use'], code='username_in_use')
return username
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
class LoginForm(forms.Form):
username = forms.CharField(
label=_('Username'),
widget=forms.TextInput(attrs={'placeholder': _('Username')}))
password = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput(attrs={'placeholder': _('Password')}))
error_messages = {
'authentication_mismatch': _(
u'Please enter a correct username and password. '
u'Note that the password is case-sensitive.'
)
}
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = self.authenticate(username, password)
if not user:
raise forms.ValidationError(
self.error_messages['authentication_mismatch'],
code='authentication_mismatch')
self.cleaned_data['user'] = user
return self.cleaned_data
def authenticate(self, username, password):
if not username or not password:
return
try:
user = User.objects.get(username__iexact=username)
if user.check_password(password):
return user
except User.DoesNotExist:
pass
| true |
6ffc42d01733beeec76d3e9f6849e8a573485ab8 | Python | nvisal1/Python-Algorithms-1 | /decimalToBinary.py | UTF-8 | 635 | 4.46875 | 4 | [] | no_license | """
Name: Nicholas Visalli
Assignment number: 1
Purpose: Convert decimal to binary
"""
"""Recursive function to convret a given decimal to binary.
:param decimal: decimal to convert to binary
:type decimal: str
:returns: error string or binary string
:rtype: str
"""
def decimalToBinary(decimal):
if decimal < 0:
return 'Decimal must be a positive integer'
elif decimal == 0:
return '0';
else:
return decimalToBinary(decimal//2) + str(decimal%2)
"""
Orchestrate the program and print result
"""
number = input('Enter a decimal')
result = decimalToBinary(number)
print('Binary representation: ' + result) | true |
0a3670b3fb8ea47273978c2b6f789df64eb7e5eb | Python | ji-eun-k/Algorithm-Study | /스택/17298_오큰수.py | UTF-8 | 637 | 3.28125 | 3 | [] | no_license | import sys
from collections import deque
input = sys.stdin.readline
N = int(input())
A = list(map(int, input().split()))
# 오큰수 초기화
NGE = [-1]*N
stack = deque()
# A 배열 앞에서부터 차례로 돌면서 확인
for i in range(N):
# stack이 있고 stack top의 첫번째값(수열 A[i-k]의 값)이 A[i]보다 작을 경우, pop 해주면서 오큰수 업데이트
while stack and (stack[-1][0] < A[i]):
tmp, idx = stack.pop()
NGE[idx] = A[i]
# stack이 없거나, stack top의 첫번째 값이 A[i]보다 클 경우 stack에 추가
stack.append([A[i], i])
print(*NGE)
| true |
8df879a5ed4f0aea96416a4764fca07586e9e040 | Python | elenagiraldo3/TFG_Informatica | /main.py | UTF-8 | 3,703 | 2.640625 | 3 | [
"MIT"
] | permissive | """MIT License
Copyright (c) 2021 Elena Giraldo del Viejo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import os
import pathlib
import time
import tensorflow as tf
from PIL import Image, ImageDraw
from object_detection.utils import label_map_util
from object_detection.utils import ops as utils_ops
from utils import show_inference, load_model, filter_vehicles, gap_detection, valid_gaps
# patch tf1 into `utils.ops`
utils_ops.tf = tf.compat.v1
# Patch the location of gfile
tf.gfile = tf.io.gfile
if __name__ == "__main__":
start_time = time.time()
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'mscoco_label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
# 1. Detection model
MODEL_NAME = 'efficientdet_d0_coco17_tpu-32'
# MODEL_NAME = 'ssd_mobilenet_v2_320x320_coco17_tpu-8'
# MODEL_NAME = 'efficientdet_d7_coco17_tpu-32'
detection_model = load_model(MODEL_NAME)
# 2. Input of images
# If you want to test the code with your images, just add path of the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = pathlib.Path('images')
TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob("*.jpg")))
threshold = 0.5
cont_vehicles = 0
cont_gaps = 0
for i, image_path in enumerate(TEST_IMAGE_PATHS):
im = Image.open(image_path)
width, height = im.size
draw = ImageDraw.Draw(im)
car_color = (0, 255, 255) # Yellow
gap_color = (248, 0, 0) # Red
# 3. Object detection
objects = show_inference(detection_model, image_path)
# 4. Filter vehicles
boxes = filter_vehicles(
output_dict=objects,
width=width,
height=height,
threshold=threshold
)
cont_vehicles += len(boxes)
# 5. Gap detection
gaps = gap_detection(
boxes=boxes,
width=width,
draw=draw,
car_color=car_color
)
# 6. Filter only valid gaps
solution = valid_gaps(
gaps=gaps,
height=height,
width=width,
draw=draw,
gap_color=gap_color
)
cont_gaps += len(solution)
# 7. Output image
string_path = os.path.basename(image_path)
print(f"Image: {string_path}")
print(f"Number of vehicles: {len(boxes)}")
print(f"Number of gaps: {len(solution)}")
im.save(f"outputs/{string_path}")
print(f"Number of totals vehicles: {cont_vehicles}")
print(f"Number of totals gaps: {cont_gaps}")
print(f"Total time: {(time.time() - start_time)}")
| true |
7a91f367a25d286811cfe41bb3298188ae59ac3b | Python | lebrice/pop-based-training | /epbt/operators/crossover.py | UTF-8 | 3,218 | 2.90625 | 3 | [] | no_license | import copy
import dataclasses
import logging
import random
from collections.abc import MutableMapping, MutableSequence
from dataclasses import Field
from functools import singledispatch
from typing import Any, Dict, List, Tuple, TypeVar, Union, overload
from ..candidate import Candidate
from ..hyperparameters import HyperParameters
from ..utils import Dataclass, T, field_dict
logger = logging.getLogger(__file__)
P = TypeVar("P", bound=List[Candidate])
class CrossoverOperator:
def __init__(self, swap_p: float=0.5):
self.swap_p = swap_p
@overload
def __call__(self, obj1: P, inplace:bool=False) -> P:
pass
@overload
def __call__(self, obj1: T, obj2: T, inplace:bool=False) -> Tuple[T, T]:
pass
def __call__(self, obj1, obj2=None, inplace=False):
obj1 = obj1 if inplace else copy.deepcopy(obj1)
obj2 = obj2 if inplace else copy.deepcopy(obj2)
if obj2 is not None:
assert type(obj1) == type(obj2), "Can only crossover between objects of the same type (for now)"
crossover(obj1, obj2)
if obj2 is None:
return obj1
return obj1, obj2
@singledispatch
def crossover(obj1: object, obj2: object, swap_p: float=0.5) -> Tuple[object, object]:
""" Most General case: Randomly swap the attributes on two objects """
raise RuntimeError(f"Cannot perform crossover between objects {obj1} and {obj2}.")
@crossover.register
def crossover_hparam(obj1: HyperParameters, obj2: HyperParameters, swap_p: float=0.5) -> None:
""" Performs crossover between two dataclass instances in-place. """
## TODO: unused for now:
# obj1_fields: Dict[str, dataclasses.Field] = field_dict(obj1)
# obj2_fields: Dict[str, dataclasses.Field] = field_dict(obj2)
for field in dataclasses.fields(obj1):
v1 = getattr(obj1, field.name)
v2 = getattr(obj2, field.name, v1)
if random.random() <= swap_p:
setattr(obj1, field.name, v2)
setattr(obj2, field.name, v1)
@crossover.register
def crossover_pop(pop1: list, pop2: list=None, swap_p: float=0.5) -> None:
""" Performs crossover either within one or between two `Population` instances in-place. """
if not pop2:
pop2 = pop1[1::2]
pop1 = pop1[0::2]
assert pop2, f"pop2 should not be empty or None: {pop1}, {pop2}"
for c1, c2 in zip(pop1, pop2):
crossover(c1, c2, swap_p)
@crossover.register
def crossover_candidate(candidate1: Candidate, candidate2: Candidate, swap_p: float=0.5) -> None:
""" Performs crossover between two `Candidate` instances in-place. """
crossover(candidate1.hparams, candidate2.hparams, swap_p)
@crossover.register
def crossover_dicts(obj1: dict, obj2: dict, swap_p: float=0.5) -> None:
""" Performs crossover between two `dict` instances in-place. """
for key, v1 in obj1.items():
if key not in obj2:
continue
v2 = obj2[key]
# TODO: also crossover the nested dicts?
if isinstance(v1, dict):
crossover(obj1[key], obj2[key])
else:
if random.random() <= swap_p:
obj2[key] = v1
obj1[key] = v2
| true |
462c92358ae2b17216f99214915c56930f8d7b36 | Python | ProgramistaZaDyche/TragedyOfTheCommonsModel | /main.py | UTF-8 | 7,021 | 3.265625 | 3 | [] | no_license | # glowny plik modelu
import funkcje
import wartosci
if __name__ == "__main__":
zadowolenie_populusu = []
zadowolenie_gracza = []
flota_autobusow = []
n = wartosci.liczba_uczestnikow - 1 # liczba pasazerow do poczatkowego rozsadzenia po autobusach
# rozsadzenie graczy po autobusach
for i in range(wartosci.liczba_autobusow):
pojemnosc_autobus = wartosci.pojemnosc_autobus # optymalizacja
if n >= pojemnosc_autobus:
flota_autobusow.append(pojemnosc_autobus)
n -= pojemnosc_autobus
else:
flota_autobusow.append(n)
n = 0
flota_autobusow.sort() # pasazerowie wchodza do najbardziej zapelnionego (ale niepelnego) autobusu)
# logika jest w tym taka, ze autobus ktory jest w czesci zapelniony nadjechal troszeczke szybciej od pustego
# przypisywanie zadowolenia
ostatnia_iteracja = wartosci.liczba_uczestnikow - n - 1 # zeby nie wywolywac dodatkowego len() co obrot petli
autobus_do_oproznienia = 0
autobus_do_odwiedzenia = -1 # autobus do ktorego wsiadzie wylosowany gracz
for i in range(wartosci.liczba_uczestnikow - n):
ilosc_pasazerow = 0
for iterator, autobus in enumerate(flota_autobusow):
if not autobus: # autobus jest pusty, rowny zero
continue
if autobus == wartosci.pojemnosc_autobus: # jesli pierwszy niepusty autobus jest pelny
ilosc_pasazerow = 0
autobus_do_odwiedzenia = iterator - 1
else:
ilosc_pasazerow = autobus
autobus_do_odwiedzenia = iterator
autobus_do_oproznienia = iterator # sprawdzamy, z ktorego autobusu nalezy usunac pasazera
break # usuwamy tylko jednego pasazera z tylko najmniej zapelnionego (niepustego) busa
if autobus_do_odwiedzenia == -1:
zadowolenie_gracza.append((funkcje.zadowolenieSamochod(n + i + 1),wartosci.INT_MIN))
else:
zadowolenie_gracza.append((funkcje.zadowolenieSamochod(n+i+1),
funkcje.zadowolenieAutobus(n+i, ilosc_pasazerow)))
# przypisywanie wartosci zadowolenia populusu
if not i == ostatnia_iteracja:
laczne_zadowolenie_autobusow = 0
for iterator, autobus in enumerate(flota_autobusow):
if not autobus:
continue
# zadowolenie autobusa_do_odwiedzenia mozna obliczyc dopiero przy decyzji wylosowanego gracza
if not iterator == autobus_do_odwiedzenia:
laczne_zadowolenie_autobusow += autobus * funkcje.zadowolenieAutobus(n + i, autobus-1)
zadowolenie_populusu.append((laczne_zadowolenie_autobusow + (flota_autobusow[autobus_do_odwiedzenia] * funkcje.zadowolenieAutobus(n + i + 1, flota_autobusow[autobus_do_odwiedzenia]-1)) + (n+i+1) * funkcje.zadowolenieSamochod(n+i+1),
laczne_zadowolenie_autobusow + ((flota_autobusow[autobus_do_odwiedzenia]+1) * funkcje.zadowolenieAutobus(n + i, flota_autobusow[autobus_do_odwiedzenia])) + (n+i) * funkcje.zadowolenieSamochod(n+i)))
flota_autobusow[autobus_do_oproznienia] -= 1 # odjecie pasazera do nastepnej strategii
# zadowolenie populusu dla ostatniej strategii - tylko wylosowany gracz wchodzi do autobusu
zadowolenie_populusu.append((wartosci.liczba_uczestnikow * zadowolenie_gracza[-1][0],
(wartosci.liczba_uczestnikow - 1) * zadowolenie_gracza[-2][0] + zadowolenie_gracza[-1][1]))
print("Zadowolenie gracza i populusu w zaleznosci od sytuacji na drodze (strategii kolumny):")
for i, s in enumerate(zadowolenie_gracza):
print(f"{i+1}: ({round(s[0], 3)}, {round(s[1], 3)})\t({round(zadowolenie_populusu[i][0], 3)})({round(zadowolenie_populusu[i][1], 3)})")
# tworzenie macierzy wyplat
macierz_wyplat = [[], [], []]
for i, s in enumerate(zadowolenie_gracza):
macierz_wyplat[0].append((float(s[0]), float(zadowolenie_populusu[i][0])))
macierz_wyplat[1].append((float(s[1]), float(zadowolenie_populusu[i][1])))
macierz_wyplat[2].append(i+1)
# usuwanie strategii zdominowanych
maks_index = len(macierz_wyplat[0])
i = 0
while i < maks_index:
j = i + 1
while j < maks_index:
if (macierz_wyplat[0][i][0] >= macierz_wyplat[0][j][0] and macierz_wyplat[0][i][1] >= macierz_wyplat[0][j][1]) and (macierz_wyplat[1][i][0] >= macierz_wyplat[1][j][0] and macierz_wyplat[1][i][1] >= macierz_wyplat[1][j][1]):
del(macierz_wyplat[0][j])
del(macierz_wyplat[1][j])
del(macierz_wyplat[2][j])
j -= 1
maks_index -= 1
elif (macierz_wyplat[0][i][0] <= macierz_wyplat[0][j][0] and macierz_wyplat[0][i][1] <= macierz_wyplat[0][j][1]) and (macierz_wyplat[1][i][0] <= macierz_wyplat[1][j][0] and macierz_wyplat[1][i][1] <= macierz_wyplat[1][j][1]):
del (macierz_wyplat[0][i])
del (macierz_wyplat[1][i])
del (macierz_wyplat[2][i])
maks_index -= 1
j -= 1
j += 1
i += 1
#zaokraglona macierz dla czytelnosci wynikow
zaokraglona_macierz = [[(round(wyplata[0], 3), round(wyplata[1], 3)) for wyplata in macierz_wyplat[0]],
[(round(wyplata[0], 3), round(wyplata[1], 3)) for wyplata in macierz_wyplat[1]]]
print("\nMacierz wyplat po redukcji zdominowanych strategii kolumny")
print("Pierwszy wiersz przedstawia numery strategii kolumny z poczatkowej macierzy wyplat")
print(macierz_wyplat[2])
for wiersz in zaokraglona_macierz:
print(wiersz)
# obliczenie stosunkow zadowolen uzytkownikow samochodow do zadowolen populusu
stosunki_zadowolen = []
for indeks, wyplata in enumerate(macierz_wyplat[0]):
stosunki_zadowolen.append(abs((wyplata[0] * (macierz_wyplat[2][indeks]-1 + n)) / (abs(wyplata[1]) + abs(wyplata[0])*(macierz_wyplat[2][indeks]-1))))
stosunki_zadowolen = [round(stosunek, 3) for stosunek in stosunki_zadowolen]
print("\nWartosci stosunkow zadowolen uzytkownikow samochodow do zadowolen populusu")
print(stosunki_zadowolen)
print("\nindeks: wyplata -- stosunek")
for i in range(len(stosunki_zadowolen)):
print(f"{macierz_wyplat[2][i]}:\t{zaokraglona_macierz[0][i]}\t--\t{stosunki_zadowolen[i]}")
# indeksy
najwyzsze_zadowolenie_ogolu = macierz_wyplat[0].index(max(macierz_wyplat[0], key=lambda x: x[1]))
lista_najwyzszych = []
for wyplata in macierz_wyplat[0]:
if wyplata[1] == macierz_wyplat[0][najwyzsze_zadowolenie_ogolu][1]:
lista_najwyzszych.append(macierz_wyplat[0].index(wyplata))
max_zadowolenie_min_stosunek = stosunki_zadowolen.index(min([stosunki_zadowolen[x] for x in lista_najwyzszych]))
print(f"\nOptymalna strategia jest strategia numer: {macierz_wyplat[2][max_zadowolenie_min_stosunek]}.") | true |
de1242c1adc8601282f269b57714a6bda63a5fb4 | Python | halkua/competiciones--kaggle | /titanic/scripts/titanic_objetivo.8-3-quitaroutliers.py | UTF-8 | 34,625 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 19:45:09 2021
@author: albertomengual
"""
"""
En este script voy a repetir el desarrollo del modelo de clasificación para el
conjunto de datos de la competición de kaggle del titanic.
La puntuación más alta obtenida hasta ahora ha sido aprximadamente 0.78. El primer
objetivo de este script es llegar a una puntuación de 0.8.
Dentro de los objetivos se encuentran también analizar la tecnicas de ingeniería
de variables y asentar las habilidades de programción necesarias.
Para conseguir dichos objetivos voy a seguir las tecnicas del blog de Ahmed Besbes.
"""
"""
RESUMEN DE PASOS A SEGUIR
Será necesario ampliar la descripción de los pasos a medida que se desarrolle el
análisis.
1. Exploratory Data Analysis (DATA EXPLORATION AND VISUALIZATION)
* Data Extraction
* Cleaning (DATA CLEANING)
* Plotting
* Assumptions
2. Ingenieria de Variables (FEATURE ENGINEERING)
* Append test set
* Extracting the passengers titles
* Processing the ages
* Processing Fare
* Processing Embarked
* Processing Cabin
* Processing Sex
* Processing Pclass
* Processing Ticket
* Processing Family
3. Modelling
* Break de combined data set in train and test set
* Use the train set to build a predictive model
+ Feature Selection (FEATURE SELECTION)
+ Trying different models
+ Hyperparameters tuning (HYPERPARAMETERS TUNING)
* Evaluate de model using de train set
* Generate an output file for the submission (SUBMISSION)
* Blending different models
"""
# 0.Importar las librerias
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sb
# import pylab as plot
from statsmodels.graphics.mosaicplot import mosaic
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest, SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.svm import SVC
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import confusion_matrix, accuracy_score
# 1. Analisis Exploratorio de los Datos
## Extraer los datos
train = pd.read_csv('../datasets/train.csv', index_col='PassengerId')
test = pd.read_csv('../datasets/test.csv')
df = train.copy()
dt = test.copy()
### Analisis Básico de los Datasets
print(train.shape)
print(test.shape)
print(train.head)
print(train.describe())
"""
La variable objetivo a predecir es Survived
"""
### Traducir las variables y categorias
df.columns = ['Superviviente', 'clase', 'nombre', 'genero', 'edad', 'SibSp', 'Parch',
'tique','tarifa', 'cabina', 'puerto']
dt.columns = ['PassengerId','clase', 'nombre', 'genero', 'edad', 'SibSp', 'Parch', 'tique',
'tarifa', 'cabina', 'puerto']
df.genero = df.genero.map({'female' : 'mujer',
'male' : 'hombre'}, na_action = None)
dt.genero = dt.genero.map({'female' : 'mujer',
'male' : 'hombre'}, na_action = None)
dt.set_index("PassengerId", inplace=True)
#dt.drop("PassengerId", axis=1, inplace=True)
### Contar los NAN
print (df.isna().sum())
print (dt.isna().sum())
## Hacer algunos gráficos (Plotting)
### Visualizar los supervivientes basados en el genero
"""
Aqui se pueden sacar las frecuencias relativas con la media de las variables categoricas
porque se trata de una categoria lógica y creando una categoria extra nos basta
para incluirla en la groupby con la media agregada.
"""
df["fallecido"] = 1 - df.Superviviente
#### frecuencias absolutas
df.groupby("genero").agg("sum")[["Superviviente","fallecido"]].plot(kind="bar",
figsize=(25,7),
stacked=True,
color= ['g','darkorange'],
table = True
)
#### frecuencias relativas
df.groupby("genero").agg("mean")[["Superviviente","fallecido"]].plot(kind="bar",
figsize=(25,7),
stacked=True,
color= ['g','darkorange'],
table = True
)
"""
Queda claro que el genero es una categoría discriminatoria. Las mujeres tienen una
tasa de superviviencia más alta con diferencia.
"""
### Relacionar la supervivencia y el genero con la variable edad
fig = plt.figure(figsize=(15,15))
sb.set_theme(style="whitegrid")
sb.violinplot(x="genero", y="edad",
hue="Superviviente", data = df,
split = True,
palette = {0: "darkorange", 1: "g"},
scale = "count", scale_hue = False,
inner="stick",
cut=0
)
#plt.yticks()
plt.show()
"""
YA NO ME DISGUSTAN LOS DIAGRAMAS DE VIOLIN: me salen valores negativos
con las escalas mejoran un poco.
TODAVIA NO HE TRATADO LOS NAN DE LA EDAD, es posible que haya supervivientes o
fallecidos de algún genero que no aparezcan en los gráficos de violin. Da igual
porque la estrategia de tratamiento de NAN se orienta a no modificar la proporcion
actual.
"""
# =============================================================================
# Se confirma a simple vista que las mujeres tienen una tasa de supervivencia más
# alta que los hombres. Es decir, la mayoría de los supervienvientes son femeninos
# superando a las fallecidas del mismo genero y a los supervivientes masculinos.
# La mayoría de las supervivientes femeninas se encuentran entre los 13 y los 45 años.
# Se mantiene una alta tasa de supervivencia en la mujeres menores de 8 años así
# como en las mayores de 50.
# No exiten fallecidas menores de un año.
# En el genero masculino destaca una tasa de mortalidad muy elevada entre los 14
# y los 50 años.
# Teniendo en cuenta la baja tasa de supervivencia del genero masculino en todas
# las edades se aprecia un incremento relativo de la superviviencia en los niños.
# En particular no fallecen los bebes menores de un año.
# MUJERES Y NIÑOS PRIMERO
# NOTA: Estaría bien comprobar si las familias que se salvaron tenian bebes.
# Es decir, comprobar los familiares de los menores.
# =============================================================================
# =============================================================================
# familia_Baclini = df[df.nombre.str.contains("Baclini")]
#
# familia_Allison = df[df.nombre.str.contains("Allison")]
#
# familia_Hamalainen = df[df.nombre.str.contains("Hamalainen")]
#
# familia_Caldwell = df[df.nombre.str.contains("Caldwell")]
#
# familia_Thomas = df[df.tique == "2625"]
#
# familia_Richards = df[df.tique == "29106"]
#
# =============================================================================
# =============================================================================
# La mayoría de los familiares de los bebes sobrevivieron. Solo en la familia
# Allison, extrañamente fallecieron dos mujeres de 2 y 25 años.
# Las personas de la misma familia tienen el mismo n de tique.
# Cabría hacer la misma comprobación para los niños que sobrevivieron.
# Y para las mujeres que sobrevivieron.
# =============================================================================
"""
¿Cómo lo hago en modo Big Data? ¿Creando clusters?
"""
### Relacionar la supervivencia con la variable tarifa
figure = plt.figure(figsize=(25,7))
plt.hist([df.tarifa[df.Superviviente == 1],df.tarifa[df.Superviviente == 0]],
stacked=True, color = ["g", "darkorange"],
bins = 100, label = ["superviviente", "fallecido"])
plt.xlabel("Tarifa")
plt.ylabel("N de pasajeros")
plt.legend()
plt.show()
# =============================================================================
# La mayoría de los pasajeros se encuentran en unos rangos de tarifa inferiores
# a los 50. Hay es donde están la mayoria de los fallecidos y de los supervivientes.
# En especial se observa una tasa elevada de fallecidos entre las tarifas más bajas,
# por debajo incluso de 25.
# =============================================================================
### La tarifa en función de la edad clasificado por la supervivencia
plt.figure(figsize=(25,15))
ax = plt.subplot()
ax.scatter(df.edad[df.Superviviente == 1], df.tarifa[df.Superviviente == 1],
c='g', s=df.tarifa[df.Superviviente == 1])
ax.scatter(df.edad[df.Superviviente == 0], df.tarifa[df.Superviviente == 0],
c='darkorange', s=df.tarifa[df.Superviviente == 0])
plt.title("Tarifa en funcion de la edad ordenado por supervivencia")
plt.xlim(0,85)
plt.ylim(0,550)
plt.show()
### La tarifa en función de la edad clasificado por la genero
plt.figure(figsize=(25,15))
ax = plt.subplot()
ax.scatter(df.edad[df.genero == "mujer"], df.tarifa[df.genero == "mujer"],
c='fuchsia', s=df.tarifa[df.genero == "mujer"])
ax.scatter(df.edad[df.genero == "hombre"], df.tarifa[df.genero == "hombre"],
c='b', s=df.tarifa[df.genero == "hombre"])
plt.title("Tarifa en funcion de la edad ordenado por genero")
plt.xlim(0,85)
plt.ylim(0,550)
plt.show()
#### La tarifa en función de la edad clasificado por la supervivencia separado por generos
##### Mujeres
plt.figure(figsize=(25,15))
ax = plt.subplot()
ax.scatter(df.edad[df.Superviviente == 1][df.genero == "mujer"],
df.tarifa[df.Superviviente == 1][df.genero == "mujer"],
c='g', s=df.tarifa[df.Superviviente == 1][df.genero == "mujer"])
ax.scatter(df.edad[df.Superviviente == 0][df.genero == "mujer"],
df.tarifa[df.Superviviente == 0][df.genero == "mujer"],
c='darkorange', s=df.tarifa[df.Superviviente == 0][df.genero == "mujer"])
plt.title("Tarifa en funcion de la edad por supervivencia: FEMENINO")
plt.xlim(0,85)
plt.ylim(0,550)
plt.show()
##### Hombres
plt.figure(figsize=(25,15))
ax = plt.subplot()
ax.scatter(df.edad[df.Superviviente == 1][df.genero == "hombre"],
df.tarifa[df.Superviviente == 1][df.genero == "hombre"],
c='g', s=df.tarifa[df.Superviviente == 1][df.genero == "hombre"])
ax.scatter(df.edad[df.Superviviente == 0][df.genero == "hombre"],
df.tarifa[df.Superviviente == 0][df.genero == "hombre"],
c='darkorange', s=df.tarifa[df.Superviviente == 0][df.genero == "hombre"])
plt.title("Tarifa en funcion de la edad por supervivencia: MASCULINO")
plt.xlim(0,85)
plt.ylim(0,550)
plt.show()
"""
Estos graficos están bien para analizar lo que pasa.
Si busco un cluster para crear una nueva variable habria que evitar la variable
Supervivencia.
En la ingenieria de variables Habría que hacer un clustering con las variables:
edad, tarifa, genero y clase.
"""
### La tarifa en funcion de la edad clasificada por clase
plt.figure(figsize=(25,15))
ax = plt.subplot()
ax.set_facecolor("whitesmoke")
ax.scatter(df.edad[df.clase == 1], df.tarifa[df.clase == 1],
c='gold', s=df.tarifa[df.clase == 1])
ax.scatter(df.edad[df.clase == 2], df.tarifa[df.clase == 2],
c='turquoise', s=df.tarifa[df.clase == 2])
ax.scatter(df.edad[df.clase == 3], df.tarifa[df.clase == 3],
c='maroon', s=df.tarifa[df.clase == 3])
plt.title("Tarifa en funcion de la edad ordenado por clase")
plt.xlim(0,85)
plt.ylim(0,550)
plt.show()
#### La tarifa en funcion de la edad clasificada por agrupado por generos
##### Mujeres
plt.figure(figsize=(25,15), facecolor="lightgrey")
ax = plt.subplot()
ax.scatter(df.edad[df.clase == 1][df.genero == "mujer"],
df.tarifa[df.clase == 1][df.genero == "mujer"],
c='gold', s=df.tarifa[df.clase == 1][df.genero == "mujer"])
ax.scatter(df.edad[df.clase == 2][df.genero == "mujer"],
df.tarifa[df.clase == 2][df.genero == "mujer"],
c='turquoise', s=df.tarifa[df.clase == 2][df.genero == "mujer"])
ax.scatter(df.edad[df.clase == 3][df.genero == "mujer"],
df.tarifa[df.clase == 3][df.genero == "mujer"],
c='maroon', s=df.tarifa[df.clase == 3][df.genero == "mujer"])
plt.title("Tarifa en funcion de la edad ordenado por clase: FEMENINO")
plt.xlim(0,85)
plt.ylim(0,550)
plt.show()
##### Hombres
plt.figure(figsize=(25,15), facecolor="lightgrey")
ax = plt.subplot()
ax.scatter(df.edad[df.clase == 1][df.genero == "hombre"],
df.tarifa[df.clase == 1][df.genero == "hombre"],
c='gold', s=df.tarifa[df.clase == 1][df.genero == "hombre"])
ax.scatter(df.edad[df.clase == 2][df.genero == "hombre"],
df.tarifa[df.clase == 2][df.genero == "hombre"],
c='turquoise', s=df.tarifa[df.clase == 2][df.genero == "hombre"])
ax.scatter(df.edad[df.clase == 3][df.genero == "hombre"],
df.tarifa[df.clase == 3][df.genero == "hombre"],
c='maroon', s=df.tarifa[df.clase == 3][df.genero == "hombre"])
plt.title("Tarifa en funcion de la edad ordenado por clase: MASCULINO")
plt.xlim(0,85)
plt.ylim(0,550)
plt.show()
# =============================================================================
# No se aprecia una relación clara entre la edad y la tarifa.
# Se aprecian varios clusters:
# El cluster de los niños menores de 14 años.
# Hay un cluster de mujeres mayores de 14 años que pagan una tarifa superior
# aproximadamente a los 50 donde se agrupa una tasa muy alta de supervivencia.
# En este cluster destaca con mayoria la primera clase.
# De hecho aparece un outlier claro con 25 años y 150 de tarifa aprox.
# Se puede indicar otro cluster de mujeres con tarifas inferiores a los 50 y
# edades superiores a los 13 -14 años donde empieza a aumentar las tasa de
# mortalidad y se hace significativo pertenecer a la 3ª clase.
# En los hombres hay un cluster muy diferenciado en tarifas bajas, inferiores
# a 50 desde los 14 hasta los 40 años sobre todo (y se extiende hasta los 75
# años) donde se concentra la mayor tasa de mortalidad.
# Se podría indicar otro cluster de hombres con edades superiores a los 15
# años y tarifas aproximadamente superiores a 50 donde empiezan a observarse
# ciertas tasas de supervivencia. En este cluster es donde se hace significativa
# la clase: por debajo de la barrera proxima a los 50 los supervivientes masculinos
# son de primera clase. Por encima de esa barrera, pocos son de 3ª clase.
# =============================================================================
### Relacionar la tarifa con las clases
plt.figure()
ax = plt.subplot()
ax.set_ylabel("Tarifa Media")
df.groupby("clase").mean()["tarifa"].plot(kind = "bar", ax = ax)
plt.show()
# =============================================================================
# Obviamente la tarifa y la clase están relacionadas. A mejor clase, mayor tarifa
# =============================================================================
### Relacionar los puertos con la supervivencia
plt.figure(figsize=(15,15))
sb.violinplot(x="puerto", y="tarifa", hue="Superviviente", data = df, split=True,
scale = "count", scale_hue=False, cut=0, inner= "stick",
palette = {0:"darkorange", 1:"g"})
plt.title("Tarifa por puerto según supervivencia")
plt.show()
#### Relacionar las tarifas con los puertos y el genero
plt.figure(figsize=(15,15))
sb.violinplot(x="puerto", y="tarifa", hue="genero",
data = df,
split=True,
scale = "count", scale_hue=False, cut=0, inner= "stick",
palette = {"hombre":"b", "mujer":"m"})
plt.title("Tarifa por puerto según genero")
plt.show()
"""
Hacer diagrama puerto-clase
"""
# =============================================================================
# La mayoría de los pasajeros embarcaron en el puerto S.
# En este puerto la curva de fallecidos y hombres embarcados son muy semejantes.
# Aqui es donde embarcaron la mayoria de los pasajeros de 3ª clase o con tarifas
# inferiores a 50.
# Se observa un grupo de pasajeros masculinos en el puerto S que no pagan por
# su billete y de los cuales solo sobrevive uno (de tercera). DATOS FALTANTES??.
# Sobreviven aproximadamente tantas personas como mujeres embarcan.
# En el puerto C el rango de tarifas es más alto. Aunque embarcan menos pasajeros,
# se observa que sobreviven más pasajeros que el numero de mujeres embarcadas.
# Comparando la curva de los hombres y los fallecidos, se observa un grupo de
# hombres supervivientes cuya tarifa es superior a los 50.
# El puerto Q embarcan solo 77 pasajeros. No se observa ningún patron especial.
# La tasa de supervivencia es inferior a la de fallecidos y embarcan aproximadamente
# el mismo numero de mujeres que de hombres.
# =============================================================================
# =============================================================================
# pasajero_gratis = df[df.tarifa == 0]
#
# pijos_C = df[df.genero == "hombre"][df.tarifa >=50][df.puerto == "C"]
#
# df[df.puerto == "C"][df.genero == "hombre"].groupby("clase").sum()
#
# familias = df[df.duplicated("tique")]
#
# fami_m_1 = familias[df.genero == "mujer"][df.clase == 1]
#
# capitan = df[df.nombre.str.contains("Capt")]
# =============================================================================
# familia_Fortune = df[df.nombre.str.contains("Fortune")]
# #df[df.tique == "19950"]
# familia_Lurette = df[df.tique == "PC 17569"]
# familia_Ryerson = df[(df.tique == "PC 17608") | (df.nombre.str.contains("Ryerson"))]
# familia_Carter = df[(df.tique == "113760")]
# # hay otros carter de segunda que cascan
# famila_shutes = df[df.tique == "PC 17582"]
# familia_Taussig = df[df.tique == "110413"]
# familia_china = df[df.tique == "1601"]
# =============================================================================
# A excepción de los chinos, no se observa que al ser familiar de una mujer de
# primera clase tengas que sobrevivir
# =============================================================================
"""
Despues de descubrir algunas relaciones interesantes entre los datos, vamos a
transformar los datos para que sean manejables por un algoritmo de ML
"""
# 2. INGENIERIA DE VARIABLES
## Cargar los datos
"""
Vamos a unir las matrices de variables del conjunto de entrenamineto y test
"""
y = df.Superviviente
X = df.drop(["Superviviente","fallecido"],axis=1)
x_t = dt.copy()
X_combi = X.append(x_t)
## Extraer y simplificar los titulos de los nombres
"""
Esto se hace para calcular las medias de las edades.
¿Se pueden sacar los apellidos?
"""
tratamiento = set()
# apellido = set()
for name in df.nombre:
tratamiento.add(name.split(',')[1].split('.')[0].strip())
# apellido.add(name.split(',')[0].strip())
"""
un set es una lista cuyos elementos no se pueden repetir
split divide la cadena de caracteres por un elemento y devuelve una lista
strip elimina los espacios en blanco
"""
# apellido = sorted(apellido)
tratamiento = sorted(tratamiento)
tratamiento_dict = {
'Capt' : "Oficial",
'Col' : "Oficial",
'Don' : "Noble",
'Dr' : "Oficial",
'Jonkheer' : "Noble",
'Lady' : "Noble",
'Major' : "Oficial",
'Master' : "Chavea",
'Miss' : "Srta",
'Mlle' : "Srta",
'Mme' : "Sra",
'Mr' : "Sr",
'Mrs' : "Sra",
'Ms' : "Sra",
'Rev' : "Oficial",
'Sir' : "Noble",
'the Countess' : "Noble"
}
X_combi["tratamiento"] = X_combi.nombre.map(lambda nombre: nombre.split(',')[1].split('.')[0].strip())
X_combi["tratamiento"] = X_combi.tratamiento.map(tratamiento_dict)
X_combi.tratamiento.isna().sum()
# 0
# 1
## Crear la variable bebé
X_combi["bebe"] = X_combi.edad.map(lambda e: 1 if e <=1 else 0)
## Crear la variable familia de bebe
tiques_bebeDf = set(df.tique[df.edad <= 1])
tiques_bebeDt = set(X_combi.tique[X_combi.edad <= 1])
X_combi["fam_bebe"] = ""
"""
tengo que crear una variable que se llama fam_bebe
si el tique esta en el set de bebe tienes un 1 Y si no es un bebe
"""
def condiFamB_train (fila):
if (fila["tique"] in tiques_bebeDf) & (fila["bebe"] == 0):
return True
def condiFamB_test (fila):
if ((fila["tique"] in tiques_bebeDt) &
(fila["bebe"] == 0)) :
return True
def procesaFam_bebe ():
global X_combi
X_combi["fam_bebe"].iloc[:len(df)] = X_combi.iloc[:len(df)].apply(lambda fila: 1 if condiFamB_train(fila) else 0, axis = 1)
# X_combi["fam_bebe"].iloc[:len(df)] = X.tique.map(lambda t: 1 if t == data.tique[data.bebe == 1])
X_combi.fam_bebe.iloc[len(df):] = X_combi.iloc[len(df):].apply(lambda fila: 1 if condiFamB_test(fila) else 0, axis = 1)
procesaFam_bebe()
## Procesar Pijos_C
"""
¿se puede hacer con map?
"""
def condiPijo_C (fila):
if ((fila["puerto"] == "C") & (fila["genero"] == "hombre") &
(fila["tarifa"] > 55)):
return True
def procesaPijo_C():
global X_combi
X_combi["pijo_C"] = X_combi.apply(lambda f: 1 if condiPijo_C(f) else 0, axis = 1)
procesaPijo_C()
## Procesar las edades
df.edad.isna().sum()
# 177
dt.edad.isna().sum()
# 86
ent_ag = X_combi.iloc[:len(df)].groupby(["genero","clase","tratamiento"])
"""
esto crea un objeto pandas
"""
ent_ag_ana = ent_ag.median()
"""
esto ya crea un dataFrame con los valores de las medianas
"""
ent_ag_ana = ent_ag_ana.reset_index()[["genero", "clase", "tratamiento", "edad"]]
"""
Esto ya selecciona las variables que forman parte del DataFrame agrupado.
se acaba de crear un data frame que permite imputar las edades faltantes de acuerdo
con el tratamiento, el genero y la clase.
"""
"""
¿Para que sirve la función reset_index? para seleccionar las variables del
dataFrame agrupado
"""
# =============================================================================
# OJO VIENE LA FUNCION **LAMBDA** PARA RELLENAR LAS NAN DE LAS EDADES
# =============================================================================
def rellena_edad(x):
condicion = (
(ent_ag_ana.genero == x["genero"]) &
(ent_ag_ana.tratamiento == x["tratamiento"]) &
(ent_ag_ana.clase == x["clase"])
)
return ent_ag_ana[condicion]["edad"].values[0]
"""
¿para que el [0]?
"""
def procesado_edad():
global X_combi
# una funcion que rellena los nan de la variable edad
X_combi.edad = X_combi.apply(lambda x: rellena_edad(x) if np.isnan(x["edad"]) else x["edad"], axis = 1)
return X_combi
procesado_edad()
"""
ANTES DE ESTO TENGO QUE TENER LAS FAMILIAS DE LOS BEBES (al final lo he hecho con
los tiquets) OJO!!
"""
## Procesar los nombres
X_combi.drop("nombre", axis = 1, inplace=True)
X_combi = pd.get_dummies(X_combi, prefix="tratamiento", columns=["tratamiento"])
## Procesar las tarifas
X_combi.tarifa.fillna(X_combi.iloc[:len(df)].tarifa.mean(), inplace = True)
## Procesar puertos
X_combi.puerto.fillna(X_combi.iloc[:len(df)].puerto.mode()[0], inplace = True)
"""
el metodo mode() crea una serie
"""
X_combi = pd.get_dummies(X_combi, prefix = "puerto", columns = ["puerto"])
## Procesar la cabina
### Crear listas con las letras de las cabinas en los conjuntos de entrenamiento
### y test
cabina_ent, cabina_test = set(), set()
for c in X_combi.cabina.iloc[:len(df)]:
try:
cabina_ent.add(c[0])
except:
cabina_ent.add("U")
for c in X_combi.cabina.iloc[len(df):]:
try:
cabina_test.add(c[0])
except:
cabina_test.add("U")
cabina_ent = sorted(cabina_ent)
cabina_test = sorted(cabina_test)
"""
No aparece ninguna cabina en el conjunto de test que no aparezca en el de entrenamiento.
Si al reves. Yo creo que es un outlier...
"""
### Rellenar los NAN con el valor U
X_combi.cabina.fillna("U", inplace = True)
### Mapear las cabinas por la primera letra
X_combi.cabina = X_combi.cabina.map(lambda x: x[0])
### Obtener las variables dummy
X_combi = pd.get_dummies(X_combi, prefix="cabina", columns=["cabina"])
## Procesar genero
X_combi.genero = X_combi.genero.map({"mujer":1, "hombre": 0})
## Procesar clase
X_combi = pd.get_dummies(X_combi, prefix="clase", columns=["clase"])
## Procesado de tique
# =============================================================================
# Antes de hacer el procesado de tique ¿hay que sacar el cluster de los familiares
# de bebes? Lo dejo para la siguiente versión, voy a terminar según el ejemplo
# a ver que pasa
# =============================================================================
def limpiatique(tique):
tique = tique.replace('.','')
tique = tique.replace('/','')
# =============================================================================
# los reemplaza por nada
# =============================================================================
tique = tique.split()
# =============================================================================
# devuelve una lista con las palabras que componen la cadena de caracteres
# =============================================================================
#tique = tique.strip()
#error : 'list' object has no attribute 'strip'
tique = map(lambda t : t.strip(), tique)
# =============================================================================
# devuelve un 'map object'
# =============================================================================
tique = list(filter(lambda t : not t.isdigit(), tique))
if len(tique) > 0:
return tique[0]
else:
return 'XXX'
"""
la función filter trabaja con funciones condicionales y devuelve objetos
booleanos. En este caso le estamos metiendo un map object de una lista.
Para obtener el valor hay que castearlo con la función list:
+ si un elemento de la lista mapeada no es un digito (por completo),
lo devuelve a dicha lista de la variable tique.
Entonce tiene longitud y devuelve el primer elemento de la lista.
+ si es un digito integro (dentro de los strings que pertenecen a la lista
mapeada) no lo mete en la lista de la variable tique. Como la lista
no tiene longitud devuelve XXX.
"""
tiques = set()
for t in X_combi.tique:
tiques.add(limpiatique(t))
print (len(tiques))
X_combi.tique = X_combi.tique.map(limpiatique)
X_combi = pd.get_dummies(X_combi, prefix="tique", columns=["tique"])
## Procesado Familia
df["familia"] = df.Parch + df.SibSp + 1
df.familia = pd.Categorical(df.familia, ordered=True)
plt.figure(figsize=(20,20))
mosaic(df,["familia", "Superviviente"])
plt.show()
# =============================================================================
# Las familias de 2,3 y 4 miembros tienen una tasa de supervivencia mayor
# =============================================================================
X_combi["familia"] = X_combi.Parch + X_combi.SibSp + 1
def procesarFamilia():
global X_combi
X_combi["solitario"] = X_combi.familia.map(lambda f: 1 if f == 1 else 0)
X_combi["familia_peq"] = X_combi.familia.map(lambda f: 1 if 2<= f <= 4 else 0)
X_combi["familia_num"] = X_combi.familia.map(lambda f: 1 if 5 <= f else 0)
procesarFamilia()
# 3. Modelado
# =============================================================================
# PASOS
# 1. Dividir el dataset combinado
# 2. Usar el conjunto de entrenamiento para construir un modelo predictivo
# 3. Evaluar el modelo con el conjunto de entrenamiento
# 4. Comprobar el modelo usando el conjunto de test y generar el output de envio
# Habra que iterar los puntos 2 y 3 hasta conseguir una evaluación aceptable
# =============================================================================
## Para evaluar el modelo usaremos la cross validation
def procesaResultado (classifier, X_train, y_train, scoring='accuracy'):
xval = cross_val_score(classifier, X_train, y_train, cv = 5, scoring = scoring)
return np.mean(xval)
"""
esto YA lo entiendo. ¿return np.mean(xval)? ¿Qué está devolviendo esta función
para hacerle la media? ¿no se puede pedir la media directamente como argumento
de la función?
OK
xval es un array con cinco resultados para cada uno de los kfolds
"""
## Dividir X_combi
X = X_combi.iloc[:891]
x_t = X_combi.iloc[891:]
## Selección de variables
# =============================================================================
# BENEFICIOS
# 1. Disminuye la redundancia entre los datos
# 2. Acelera el proceso de entrenamiento
# 3. Reduce el "overfitting"
# Podemos usar el modelo de Random Forest procesar la importancia de las variables.
# Y a su vez descartar variables irrelevantes.
# =============================================================================
classifier = RandomForestClassifier(n_estimators=50, max_features='sqrt')
classifier = classifier.fit(X,y)
### Hacer grafico con las variables y su importancia
variables = pd.DataFrame()
variables["variable"] = X.columns
variables["importancia"] = classifier.feature_importances_
variables.sort_values(by=["importancia"], ascending=True, inplace=True)
variables.set_index("variable", inplace=True)
variables.plot(kind="barh", figsize=(25,25))
plt.show()
### Comprimir los datasets
modelo = SelectFromModel(classifier, prefit=True)
"""
Sigo sin entender que es el prefit
"""
X_redu = modelo.transform(X)
x_t_redu = modelo.transform(x_t)
print(X_redu.shape)
print(x_t_redu.shape)
# Queda por comprobar si usaremos el reducido o el grande
## Probar distintos modelos base
ksvm = SVC(kernel = "rbf", random_state = 0)
logreg = LogisticRegression()
xgb = XGBClassifier(booster = "gbtree", objective='binary:logistic')
rf = RandomForestClassifier()
modelos = [ksvm, logreg, xgb, rf]
for m in modelos:
print("Cross-validation de: {0}".format(m.__class__))
score=procesaResultado(classifier = m,
X_train = X_redu, y_train=y, scoring ="accuracy")
print("resultado CV = {0}".format(score))
print("****")
"""
sigo sin entender los __
"""
# =============================================================================
# Cross-validation de: <class 'sklearn.svm._classes.SVC'>
# resultado CV = 0.6757454020463248
# ****
# Cross-validation de: <class 'sklearn.linear_model._logistic.LogisticRegression'>
# resultado CV = 0.8181721172556651
# ****
# Cross-validation de: <class 'xgboost.sklearn.XGBClassifier'>
# resultado CV = 0.8091833532107211
# ****
# Cross-validation de: <class 'sklearn.ensemble._forest.RandomForestClassifier'>
# resultado CV = 0.8204255853367647
# ****
#
# =============================================================================
## Tuneado de hiperparametros
# =============================================================================
# Vamos a tunear el modelo de RandomForest con el conjunto sin reducir
# =============================================================================
run_gs = False
if run_gs:
parameter_grid = {
'criterion' : ["entropy", "gini"],
'max_depth' : [4,6,8,10,12],
'n_estimators' : [10,50,100,500,1000],
'max_features' : ['sqrt', 'auto', 'log2'],
'min_samples_split' : [2,3,10],
'min_samples_leaf' : [1,2,3,10],
'bootstrap' : [True, False],
'random_state' : [0]
}
bosque = RandomForestClassifier()
cv = StratifiedKFold(n_splits=5)
grid_search = GridSearchCV(bosque,
scoring='accuracy',
param_grid = parameter_grid,
cv = cv,
verbose = 1,
n_jobs = -1
)
grid_search.fit(X,y)
modelo = grid_search
parametros = grid_search.best_params_
print("Mejor resultado: {}".format(grid_search.best_score_))
print("Mejores parametros: {}".format(parametros))
# 0.8383842
else:
parametros = {'bootstrap': True,
'criterion' : 'gini',
'min_samples_leaf' : 1,
'n_estimators' : 10,
'min_samples_split' : 2,
'max_features' : 'log2',
'max_depth' : 6,
'random_state' : 0}
modelo = RandomForestClassifier(**parametros)
modelo.fit(X,y)
print(modelo.get_params())
## Generar el output
output = modelo.predict(x_t).astype(int)
df_output = pd.DataFrame()
df_output["PassengerId"] = test.PassengerId
df_output["Survived"] = output
df_output.set_index("PassengerId", inplace = True)
df_output.to_csv("../datasets/subm_am_py_RF.8+opt_190821.csv")
# 0.78708 el mejor resultado hasta ahora dentro del 3,5% mejor
# 0.77751 al añadir más variables y dejarlo con los mismos parametros lo he estropeado
# 0.77033 con random forest optimizado
# Mezclado de modelos
# =============================================================================
# Hacerlo como una función, no?
# =============================================================================
modelos_entrenados = []
for m in modelos:
m.fit(X,y)
modelos_entrenados.append(m)
predicciones = []
for m in modelos_entrenados:
predicciones.append(m.predict_proba(x_t)[:,1])
"""
esto no lo entiendo ¿que es predict_proba? Supongo que no devuelve 0 o 1, si no
su probabilidad.
AttributeError: predict_proba is not available when probability=False
Hacerlo modelo por modelo
"""
df_predicciones = pd.DataFrame(predicciones).T
"""
tampoco entiendo la .T
"""
df_predicciones["out"] = df_predicciones.mean(axis=1)
"""
no entiendo de que está haciendo la media
"""
df_predicciones["PassengerId"] = test.PassengerId
df_predicciones.out = df_predicciones.out.map(lambda p: 1 if p>= 0.5 else 0)
df_predicciones = df_predicciones[["PassengerId","out"]]
df_predicciones.columns = ["PassengerId", "Survived"]
df_predicciones.set_index("PassengerId", inplace=True)
| true |
392688cc63054184669812117551e9976b7a3206 | Python | c4rl0sFr3it4s/Python_Basico_Avancado | /script_python/ordem_apresentacao_020.py | UTF-8 | 638 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | '''entre no sistema o nome de quatro alunos e saia com uma ordem
aleatória escolhida pelo sistema'''
from random import shuffle
print('{:-^40}'.format('APRESENTAÇÃO ALEATÓRIA'))
aluno_01 = str(input('Digite o nome do Aluno 01: ')).strip().capitalize()
aluno_02 = str(input('Digite o nome do Aluno 02: ')).strip().capitalize()
aluno_03 = str(input('Digite o nome do Aluno 03: ')).strip().capitalize()
aluno_04 = str(input('Digite o nome do Aluno 04: ')).strip().capitalize()
alunos = list([aluno_01, aluno_02, aluno_03, aluno_04])
shuffle(alunos)
print(f'\033[1;34mOrdem dos Alunos.\n{alunos}\033[m')
print('{:-^40}'.format('FIM'))
| true |
776c92763366bae394d0d3ef124aa1ec48b5ccc7 | Python | RYNO8/chess-AI | /backups/3/pygame gui.py | UTF-8 | 17,396 | 2.921875 | 3 | [
"MIT"
] | permissive | from possibleMoves import initBoard, findPiece, movePiece, possibleMoves, evaluateScore, idenPiece, getMove, distance, getKingPos, getRook, protecting, attacking, threatening, inCheck
#from possibleMoves import pawn, knight, bishop, rook, queen, king
import time
import pygame
from copy import deepcopy
from pygame.locals import *
def printBoard(board):
for x in range(8):
for y in range(8):
if (x+y) % 2 == 0:
pygame.draw.rect(windowSurface, WHITE, (x*tileLength+spacing, y*tileLength+spacing, tileLength-spacing, tileLength-spacing))
elif (x+y) % 2 == 1:
pygame.draw.rect(windowSurface, GREEN, (x*tileLength+spacing, y*tileLength+spacing, tileLength-spacing, tileLength-spacing))
imgConversion = {"p":wP, "P":bP, "n":wN, "N":bN, "b":wB, "B":bB, "r":wR, "R":bR, "q":wQ, "Q":bQ, "k":wK, "K":bK}
for r, row in enumerate(board):
for c, cell in enumerate(row):
if cell != " ":
windowSurface.blit(imgConversion[cell], (c*tileLength+spacing*2, r*tileLength+spacing*2))
def Print(board):
for row in board:
print(" ".join([i.replace(" ", ".") for i in row]))
print()
def rateboard(board, turn): #more discenment
#takes 0.013s to 0.015s
#instead of playing your turn (turn being “turn”), return rating
rating = 0
state = evaluateScore(board, turn)
if state == "won":
rating += 10000
whiteMoves = possibleMoves(board, whiteMove=True) #doing this here saves computational time
whiteMoveValues = []
[whiteMoveValues.extend(i) for i in whiteMoves.values()]
blackMoves = possibleMoves(board, whiteMove=False)
blackMoveValues = []
[blackMoveValues.extend(i) for i in blackMoves.values()]
weightings = {"p":1, "n":3, "b":3, "r":5, "q":9, "k":200}
#mobilityBias = {"p":1, "n":1, "b":0.25, "r":0.75, "q": 0.1, "k":1} #edit biases
whitePieces = sum([sum([weightings[i.lower()] for i in row if i.replace(" ", "").islower()]) for row in board])
blackPieces = sum([sum([weightings[i.lower()] for i in row if i.replace(" ", "").isupper()]) for row in board])
#print(time.time())
if turn:
func = lambda x: x.lower()
elif not turn:
func = lambda x: x.upper()
center4 = [(3, 3), (3, 4), (4, 3), (4, 4)]
for r, row in enumerate(board):
for c, cell in enumerate(row):
cellrating = []
isWhite = False if cell.isupper() else True
if cell == " ":
pass
else:
defences = protecting(board, r, c)
attacks = attacking(board, r, c)
threats = threatening(board, r, c)
freePieces = {i:attacks[i] for i in attacks if len(protecting(board, i[0], i[1])) == 0} #attacking and not being defended
attacks = {i:attacks[i] for i in attacks if i not in freePieces} #attacking and not free
if len(threats) > len(defences):
#print(r, c)
cellrating.append(-weightings[cell.lower()] * 2)
cellrating.append(len(attacks) * 1) #this makes queen advance early on
if threats and min([weightings[i.lower()] for i in threats.values()]) < weightings[cell.lower()]:
#piece attackers are worth less than you (will be bad trade)
#print(r, c, cell, attacking)
cellrating.append(-weightings[cell.lower()]*2)
#0.1 for doubled, blocked and isolated pawns instead of 1
#0.1 for pawn past half way line in early game
cellrating.append(weightings[cell.lower()]*1) #piece is not captured
if ((r != 7 and cell.islower()) or (r != 0 and cell.isupper())) and cell.lower() in ["b", "n"]: #back row pieces moving forwards
#print(cell, r)
cellrating.append(2)
if (cell == "k" and r==7 and c==4) or (cell == "K" and r==0 and c==4): #king hasnt moved
cellrating.append(15)
if freePieces and isWhite == turn: #its your turn, can take free piece
for cell in freePieces.values():
cellrating.append(weightings[cell.lower()]*0.5)
elif freePieces and isWhite != turn: #not your turn
#forcing trade (is this good? depends on no. pieces)
yourPieces = whitePieces if turn else blackPieces
oppPieces = blackPieces if turn else whitePieces
if yourPieces >= blackPieces: #therefore trading is good
cellrating.append(weightings[cell.lower()]*2)
else: #your down on pieces
pass
allMoves = whiteMoves if isWhite else blackMoves
if (r, c) in allMoves: #this piece can move
cellrating.append(len(allMoves[(r, c)])*0.1) #mobility
#in opening: mobility of bishops, knights is more important than that of the rooks
#forward mobility is scored higher than backward mobility
#rooks: vertical mobility gets priority over horizontal mobility
if allMoves[(r, c)] in center4: #piece can move to center
cellrating.append(2)
if (r, c) in center4: #piece in centre
cellrating.append(5)
#print(r, c, cell, cellrating)
if func(cell) == cell: #your piece
rating += sum(cellrating)
else:
rating -= sum(cellrating)
#castled
"""rookCols = [i[1] for i in findPiece(board, func("r"))]
try:
kingCol = findPiece(board, func("k"))[0][1]
except: #king doesnt exist
rating -= 100
else:
if inCheck(board, not turn):
rating += 100
if kingCol < min(rookCols) or kingCol > max(rookCols):
rating += 4 #castled"""
#development - early game
"""backCol = 7 if turn else 0
for row in [1, 2, 5, 6]:
if board[backCol][row] == " ":
rating += 3"""
#king safety
if rating == -0.0: #change -0.0 to 0
rating = 0
return rating
def rateBoth(board, turn):
yourKing, oppKing = findPiece(board, "k"), findPiece(board, "K")
if yourKing and oppKing:
return round(rateboard(board, turn), ndigits=3)
else: # a king doesnt exist
assert not(yourKing and oppKing)
if not yourKing:
return -100000000
elif not oppKing:
return 100000000
def Minimax(board, turn, depth):
#prefered depth = 2
#for depth 2: when depth==2 max, when depth==1 min, when depth==0 rate
newBoard, score = Max(board, turn, depth, 10000, -10000)
return newBoard
def Max(board, turn, depth, high, low): #maximise through increasing minimum
if depth == 0 or evaluateScore(board, turn) in ["won", "tie"]:
#print(board, rateBoth(board, turn))
return board, rateBoth(board, turn)
moves = possibleMoves(board, whiteMove=turn)
bestScore = low
bestBoard = None
allBoards = []
for startPos in moves:
for endPos in moves[startPos]:
newboard = movePiece(board, startPos, endPos)
if getKingPos(newboard, turn) == endPos and distance(startPos, endPos) == 2: #this is castle
rookPrev, rookNew = getRook(endPos)
newboard = movePiece(newboard, rookPrev, rookNew)
threats = threatening(newboard, endPos[0], endPos[1])
defences = protecting(newboard, endPos[0], endPos[1])
#print(threats, defences)
if len(threats) <= len(defences): #its a reasonable move
allBoards.append((rateBoth(newboard, not turn), newboard))
possibleBoards = []
if len(allBoards) > 5:
cutoff = len(allBoards)//2
else:
cutoff = -1
for sortingScore, boardMoved in sorted(allBoards, key=lambda x:x[0])[:cutoff]: #sort by rating (do best first)
tempBoard, score = Min(boardMoved, not turn, depth-1, high, bestScore)
#Print(boardMoved)
#Print(tempBoard)
#print(sortingScore, score) #first score pertains to first board
if not bestBoard: #this helps find the best move when no score is better than low
possibleBoards.append((score, boardMoved))
if score > bestScore:
bestScore = score
bestBoard = boardMoved
if score > high: #this is pruning, dont have to search after finding this
return bestBoard, high
if bestBoard:
return bestBoard, bestScore #this doesnt exist, no score is higher than "low"
else:
bestBoard, lowerScore = max(possibleBoards, key=lambda x: x[0]) #get the best board based on the score
return bestBoard, bestScore
def Min(board, turn, depth, high, low): #minimise through decreasing maximum (search space decresaes)
if depth == 0 or evaluateScore(board, turn) in ["won", "tie"]:
return board, rateBoth(board, turn)
moves = possibleMoves(board, whiteMove=turn)
bestScore = high
bestBoard = None
allBoards = []
for startPos in moves:
for endPos in moves[startPos]:
newboard = movePiece(board, startPos, endPos)
if getKingPos(newboard, turn) == endPos and distance(startPos, endPos) == 2: #this is castle
rookPrev, rookNew = getRook(endPos)
newboard = movePiece(newboard, rookPrev, rookNew)
allBoards.append((rateBoth(newboard, not turn), newboard))
possibleBoards = []
if len(allBoards) > 5:
cutoff = len(allBoards)//2
else:
cutoff = 1000
for sortingScore, boardMoved in sorted(allBoards, key=lambda x:x[0])[:cutoff]: #sort by rating (do best first)
tempBoard, score = Max(boardMoved, not turn, depth-1, bestScore, low)
#Print(board)
#Print(boardMoved)
#print(score)
if not bestBoard:
possibleBoards.append((boardMoved, score))
if score < bestScore:
bestScore = score
bestBoard = boardMoved
if score < low: #this is pruning, dont have to search after finding this
return bestBoard, low
if bestBoard:
return bestBoard, bestScore #this doesnt exist, no score is lower than "high"
else:
bestBoard, lowerScore = min(possibleBoards, key=lambda x: x[0])
return bestBoard, bestScore
def test():
board = [['R', ' ', ' ', 'Q', 'K', 'B', 'N', 'R'],
['P', 'P', 'P', ' ', 'P', 'P', 'P', 'P'],
[' ', ' ', ' ', ' ', 'B', ' ', ' ', ' '],
[' ', ' ', ' ', 'P', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', 'N', ' ', 'b', ' ', ' '],
[' ', ' ', 'n', ' ', ' ', 'n', ' ', ' '],
['p', 'p', 'p', ' ', 'p', 'p', 'p', 'p'],
['r', ' ', ' ', 'q', 'k', 'b', ' ', 'r']] #shouldn't move the king
#print(rateBoth(board, False)) #look worse
#print(rateBoth(board, True))
start = time.time()
board = Minimax(board, False, 2)
Print(board)
#print(score)
#print(rateBoth(board, True))
print(time.time()-start)
wat
def posToCell(pos):
assert type(pos) == tuple
return int(pos[1]/tileLength), int(pos[0]/tileLength)
def renderText(text, x, y, textColour=(255, 255, 255), textSize=48):
font = pygame.font.SysFont(None, textSize)
text = font.render(" " + text + " ", True, textColour, BACKGROUND)
textRect = text.get_rect()
textRect.centerx, textRect.centery = x, y
windowSurface.blit(text, textRect)
#test() #will terminate program wunce finished
WHITE = (255, 255, 255)
GREEN = (0, 255, 0) #(165, 42, 42)
BLACK = (0, 0, 0)
BACKGROUND = BLACK
tileLength = 70
spacing = 5
running = True
imgLength = 55
#load all pieces
pygame.init()
wP = pygame.transform.scale(pygame.image.load('white-pawn.png'), [imgLength, imgLength])
bP = pygame.transform.scale(pygame.image.load('black-pawn.png'), [imgLength, imgLength])
wN = pygame.transform.scale(pygame.image.load('white-knight.png'), [imgLength, imgLength])
bN = pygame.transform.scale(pygame.image.load('black-knight.png'), [imgLength, imgLength])
wB = pygame.transform.scale(pygame.image.load('white-bishop.png'), [imgLength, imgLength])
bB = pygame.transform.scale(pygame.image.load('black-bishop.png'), [imgLength, imgLength])
wR = pygame.transform.scale(pygame.image.load('white-rook.png'), [imgLength, imgLength])
bR = pygame.transform.scale(pygame.image.load('black-rook.png'), [imgLength, imgLength])
wQ = pygame.transform.scale(pygame.image.load('white-queen.png'), [imgLength, imgLength])
bQ = pygame.transform.scale(pygame.image.load('black-queen.png'), [imgLength, imgLength])
wK = pygame.transform.scale(pygame.image.load('white-king.png'), [imgLength, imgLength])
bK = pygame.transform.scale(pygame.image.load('black-king.png'), [imgLength, imgLength])
dot = pygame.transform.scale(pygame.image.load('dot.png'), [imgLength, imgLength])
icon = pygame.image.load("icon.png")
windowSurface = pygame.display.set_mode((900, 600), pygame.RESIZABLE)
pygame.display.set_caption("Chess")
pygame.display.set_icon(icon)
windowSurface.fill(BACKGROUND)
renderText("CHESS: By Ryan Ong", 900, 100, textSize=60)
board = initBoard()
"""board = [['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R'],
['P', 'P', 'P', ' ', 'P', 'P', 'P', 'P'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', 'P', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', 'p', ' ', 'b', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['p', 'p', 'p', ' ', 'p', 'p', 'p', 'p'],
['r', 'n', ' ', 'q', 'k', 'b', 'n', 'r']]"""
printBoard(board)
renderText(str(rateBoth(board, True)), 700, 300)
pygame.display.update()
prevMove = (None, None)
whiteMove = True #start on white
onePlayer = True
compStarts = False
if onePlayer and compStarts:
board = Minimax(board, whiteMove, 2)
whiteMove = not whiteMove
printBoard(board)
renderText(str(rateBoth(board, False)), 700, 300)
pygame.display.update()
while running:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pos = pygame.mouse.get_pos()
currentMove = posToCell(pos)
moves = possibleMoves(board, whiteMove=whiteMove)
if currentMove == prevMove:
printBoard(board)
prevMove = (None, None)
elif currentMove in moves:
printBoard(board)
for nR, nC in moves[currentMove]:
windowSurface.blit(dot, (nC*tileLength+2*spacing, nR*tileLength+2*spacing))
prevMove = currentMove
else:
#this might be where the user wants to move the piece
if (prevMove in moves and currentMove in moves[prevMove]): #valid move
board = movePiece(board, prevMove, currentMove)
if getKingPos(board, whiteMove) == currentMove and distance(prevMove, currentMove) == 2: #this is castle
rookPrev, rookNew = getRook(currentMove)
board = movePiece(board, rookPrev, rookNew)
prevMove = (None, None)
printBoard(board)
renderText(str(rateBoth(board, whiteMove)), 700, 300)
if evaluateScore(board, whiteMove) in ["won", "lost", "stalemate"]:
pygame.quit()
whiteMove = not whiteMove #done turn, change to other player
print(board)
if onePlayer:
pygame.display.update()
board = Minimax(board, whiteMove, 2)
whiteMove = not whiteMove
printBoard(board)
renderText(str(rateBoth(board, whiteMove)), 700, 300)
if evaluateScore(board, whiteMove) in ["won", "lost", "stalemate"]:
pygame.quit()
pygame.display.update()
if event.type in [QUIT,K_ESCAPE]:
pygame.quit()
#sys.exit()"""
#oneplayer(False)
| true |
3a083d0bbce6496884ab166989d99c97903a8216 | Python | kemeng1417/spider_study | /day05_异步爬虫/1_线程池操作.py | UTF-8 | 1,149 | 3.296875 | 3 | [] | no_license | import time
from multiprocessing.dummy import Pool
import requests
# 普通的同步请求
# urls = [
# 'http://127.0.0.1:5000/jay',
# 'http://127.0.0.1:5000/jj',
# 'http://127.0.0.1:5000/hh',
# ]
#
#
# def get_request(url):
# page_text = requests.get(url=url).text
# print(len(page_text))
#
#
# if __name__ == '__main__':
# start = time.time()
# for url in urls:
# get_request(url)
# end = time.time()
# print(end - start)
# 基于线程池的异步请求
urls = [
'http://127.0.0.1:5000/jay',
'http://127.0.0.1:5000/jj',
'http://127.0.0.1:5000/hh',
]
def get_request(url):
page_text = requests.get(url=url).text
return len(page_text)
if __name__ == '__main__':
start = time.time()
pool = Pool(3) # 启动三个线程
# 参数1:回调函数
# 参数2:可迭代对象alist
# 作用:可以将alist中的每一个元素依次传递给回调函数作为参数,回调函数会异步对列表的元素进行相关操作运算
# map返回值就是回调函数返回的所有结果
print(pool.map(get_request, urls))
print(time.time()-start)
| true |
74083211112f3d560babb2b0ee768fb171d0283b | Python | Letractively/simulation-modeling | /interface.py | UTF-8 | 2,768 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'Модуль интерфейса'
from lxml import etree
import os
def serialize_input(values, errors = {}, node = None):
'''Сериализация входных данных input в XML-дерево с корнем в элементе node
с учётом ошибок errors'''
# Если родительский элемент не указан, нужно его создать
if node is None:
node = etree.Element('input')
t = type(values)
# Текущий узел является группой узлов
if t == dict:
for child, value in values.items():
try:
error = errors[child]
except:
error = {}
serialize_input(value, error, etree.SubElement(node, child))
# Текущий узел является значением
else:
node.text = unicode(values)
if errors:
node.set('error', unicode(errors))
return node
def serialize_output(results):
'Подготовка вывода'
def spider(variable, parent):
if type(variable) == dict:
for key, value in variable.items():
spider(value, etree.SubElement(parent, key))
elif type(variable) in (tuple, list):
for value in variable:
spider(value, etree.SubElement(parent, 'item'))
else:
parent.text = unicode(variable)
return parent
return spider(results, etree.Element('output'))
def notfound(model):
return 'NOT FOUND: ' + model
def view(root, model = None, accepts = {}, input = {}, output = {}, errors = {}, fatal = None, path = ''):
'Основная процедура интерфейса. Генерирует содержимое страницы.'
# Фатальная ошибка
if fatal is not None:
return 'FATAL: ' + str(fatal)
tree = etree.Element('model')
tree.set('title', model.__doc__)
tree.set('sitename', u'Имитатор')
tree.set('path', path)
#return str(errors)
input = serialize_input(input, errors = errors)
#return etree.tostring(input)
tree.append(input)
if output:
output = serialize_output(output)
tree.append(output)
xsl = etree.parse(root + '/templates/' + model.__name__ + '.xsl')
transform = etree.XSLT(xsl)
#return etree.tostring(tree)
return etree.tostring(transform(tree))
def presenter(**basic_args):
'Осуществляет карринг для более удобного вызова interface.view().'
def viewer(**args):
basic_args.update(args)
return view(**basic_args)
return viewer
| true |
1a2eebb2525e23309f315bbca378449531769593 | Python | FZWYGX/DingDianScrapy | /DDXiaoShuo/DDXiaoShuo/items.py | UTF-8 | 906 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ChapterItem(scrapy.Item):
"""
这张表存储(小说-章节)
"""
# 小说章节url
url = scrapy.Field()
# 小说风格
style = scrapy.Field()
# 小说名字
name = scrapy.Field()
# 小说作者
author = scrapy.Field()
# 小说章节汇总
chapters = scrapy.Field()
# 小说章节汇总和url汇总, 组成字典
chapters_urls = scrapy.Field()
class ContentItem(scrapy.Item):
"""
这张表存储(章节-内容)
"""
# 每小说具体一章的url
url = scrapy.Field()
# 小说的名字
name = scrapy.Field()
# 小说这一章的名字
chapter = scrapy.Field()
# 小说这一章的内容
content = scrapy.Field() | true |
3319207ebd358cdfb27569b084fcdb9cb797dcd5 | Python | TheCDC/NSWC_Spring2018_Team1 | /flask_webapp/views.py | UTF-8 | 2,303 | 2.609375 | 3 | [] | no_license | """Class based views for the webapp."""
import datetime
import flask
from flask.views import MethodView
from werkzeug import secure_filename
from flask_webapp import forms
from flask_webapp import ocr
import os
class IndexView(MethodView):
"""The home page view."""
count = 1
def get_template_name(self):
"""Return a string containing the name of this view's template."""
return 'index.html'
def get_context(self, request, **kwargs):
"""Process data given in the request."""
# instantiate the form. It gets what it needs from flask.request
form = forms.UploadForm()
# generate initial context
context = dict(form=form)
# over write context with any keyword arguments
context.update(**kwargs)
return context
def get(self, **kwargs):
"""Handle get requests."""
context = self.get_context(flask.request, **kwargs)
context.update(**kwargs)
return flask.render_template(self.get_template_name(), context=context)
def post(self, **kwargs):
form = forms.UploadForm()
if form.validate_on_submit():
uniqueID = IndexView.count
IndexView.count += 1
file = flask.request.files[form.file.name]
name = uniqueID.__str__() + secure_filename(file.filename)
old_path = os.path.join(os.path.dirname(__file__), 'uploads', name)
# save image to file
file.save(old_path)
# get OCR output from saved image file
output = ocr.ocr_file(old_path)
# process ocr output to extract serial number
serialNumber = ocr.filter_serial(output)
if not serialNumber:
serialNumber = 'NOSERIAL'
# rename the saved file to include the extracted serial number and
# the date
newname = str(serialNumber) + ' ' + \
str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S'))
newpath = os.path.join(
os.path.dirname(__file__), 'uploads', newname)
os.rename(old_path, newpath)
return self.get(successful_upload=True, data=file, serial_number=serialNumber, ocr_output=output)
return self.get(successful_upload=False)
| true |
b63254f6393adb0a465bbda6ccbab5dcb9439582 | Python | wujeevan/grade_prediction_svm | /step1.py | UTF-8 | 660 | 2.640625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import xlrd
colnames = ('NO_', 'COURE_NAME', 'ZHSCORE')
filename = 'train.xls'
book = xlrd.open_workbook(filename, 'r')
sheet = book.sheet_by_index(0)
result = dict()
for i in range(sheet.ncols):
if sheet.cell_value(0,i) in colnames:
result[sheet.cell_value(0,i)] = sheet.col_values(i,1)
result = pd.DataFrame(result)
result = result.replace('', np.nan)
result = result.dropna()
result = result.pivot_table(index=colnames[0], columns=colnames[1], values=colnames[2], fill_value=0)
t = result[result > 0].count()
idx = t[t < len(result)/2].index
result = result.drop(idx, 1)
result[result > 100] = result - 60 | true |
25f74fa4390f99b7483ae0e69d9a1881564d059e | Python | HebbaleLabs/python-manipulate-string-into-anagram-assessment | /stringstoanagrams.py | UTF-8 | 676 | 3.546875 | 4 | [] | no_license |
def anagram(input_string):
#
# IMPORTANT:
# 1. Write your solution in this function
# 2. Do not make changes to the function signature
# 3. Use the 'pip-install' command in the IDE, to install any requirements. This takes a few seconds.
# 4. Use the 'run' command in the IDE, to run the main method. You can also invoke the main method from the terminal.
# 5. Use the 'run-tests' command in the IDE, to run unit tests. The 'pip-install' command must be run before this.
return None
def main():
no_of_changes_for_anagram = anagram('anna')
print('Number of changes for anagram: {0}\n'.format(no_of_changes_for_anagram))
if __name__ == '__main__':
main() | true |
e7fea0f48902f111fa9ea349ba4331cbeb66e2f1 | Python | nicolair/IPT2 | /resolsystlin_cours.py | UTF-8 | 2,873 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 6 18:18:40 2014
@author: remy
"""
import numpy
def chercherIndPiv(A,i):
p = len(A) #taille de la matrice
iP = i #indice prov. de plus gde valeur
for k in range(i+1,p):
if abs(A[k][i]) > abs(A[iP][i]):
iP = k
#print("indice pivot",i,iP)
return iP
def permuter(A,i,j):
p = len(A[0])
for k in range(p):
A[i][k] , A[j][k] = A[j][k] , A[i][k]
def ajouter(A,i,j,coeff):
p = len(A)
for k in range(p):
A[i][k] += coeff*A[j][k]
def resol(A,Y):
p = len(A)
#print("taille matrice",p)
for i in range(p):
iP = chercherIndPiv(A,i)
#if iP == -1:
#return "erreur"
if iP > i:
permuter(A,i,iP)
Y[i][0],Y[iP][0] = Y[iP][0],Y[i][0]
for j in range(i+1,p):
coeff = -A[j][i]/A[i][i]
ajouter(A,j,i,coeff)
Y[j][0] += coeff * Y[i][0]
#phase de remontée
# print(A,Y)
X = [0.]*p
X[p-1] = Y[p-1][0]/A[p-1][p-1]
i = p-2
while i >= 0:
X[i]=(Y[i][0] - sum(A[i][k]*X[k] for k in range(i+1,p)))/A[i][i]
i -= 1
return X
#A =[[2., 2., -3.],[-2., -1., -3.],[6.,4.,4.]]
#Y = [[2], [-5.], [16.]]
#A=[[7., 1.,11.,10.],[2.,6.,5.,2.],[8.,11.,3.,8.],[6.,9.,3.,6.]]
#Y=[[29.001],[15.],[30.],[24.]]
#A=[[4.,-2.,1.],[-2.,4.,-2.],[1.,-2.,4.]]
#Y=[[11.],[-16.],[17.]]
#print("test resol",resol(A,Y))
#A =[[2., 2., -3.],[-2., -1., -3.],[6.,4.,4.]]
#Y = [[2], [-5.], [16.]]
#A=[[7., 1.,11.,10.],[2.,6.,5.,2.],[8.,11.,3.,8.],[6.,9.,3.,6.]]
#Y=[[29.001],[15.],[30.],[24.]]
#A=[[4.,-2.,1.],[-2.,4.,-2.],[1.,-2.,4.]]
#Y=[[11.],[-16.],[17.]]
#print("test numpy",numpy.linalg.solve(A,Y))
#génération aléatoire de 5 points
import random as rdm
M = [ (1+rdm.random()*9 , 1+rdm.random()*9) for i in range(5) ]
# génération de la matrice du système
A = [0]*5
B = [0]*5
for i in range(5):
x,y = M[i]
A[i] = [x*x , y*y, x*y, x, y]
B[i] = [x*x , y*y, x*y, x, y]
#print(A)
#print(B)
# second membre
Y = [[-1.],[-1.],[-1.],[-1.],[-1.]]
Z = [[-1.],[-1.],[-1.],[-1.],[-1.]]
#print(Y)
E = resol(A,Y)
EE = numpy.linalg.solve(B,Z)
print("maison",E)
print(Y)
print("numpy",EE)
print(Z)
def phi(E,P,t):
a,b,c,d,e = E
x,y = P
num = 2.*a*x + c*y + d +(2.*b*y + c*x + e)*t
den = a + (b*t + c)*t
l = - num/den
return x + l , y+t*l
def F(E,P):
a,b,c,d,e = E
x,y = P
return a*x*x + b*y*y + c*x*y + d*x + e*y +1
#t = (M[1][1] - M[0][1])/(M[1][0] - M[0][0])
#print(M[1],phi(E,M[0],t))
import matplotlib.pyplot as plt
plt.clf()
for P in M:
plt.plot(P[0],P[1],'bo')
print(F(E,P))
t = [(M[i+1][1] - M[0][1])/(M[i+1][0] - M[0][0]) for i in range(4)]
tmin = min(t)
tmax = max(t)
#print(tmin, tmax)
n = 100
p = 5
pas = (tmax - tmin)/n
tt = tmin - p*pas
for i in range(n+2*p):
P = phi(E,M[0],tt)
plt.plot(P[0],P[1],'ro')
tt += pas
| true |
deb98a33567378503870f44ef44518e050431c9e | Python | ricardokirkner/beehive | /samples/basic.py | UTF-8 | 1,740 | 2.765625 | 3 | [] | no_license | import pygame
from beehive.core import Agent, SimpleVehicle
from beehive.geometry import Vector3
from beehive.steering import Seek, Flee
from honeycomb import Game
from honeycomb.controller import GameController
from honeycomb.event import Event
from honeycomb.model import GameModel
from honeycomb.view import GameView
SET_TARGET = 'set_target'
MODEL_UPDATE_START = 'model_update_start'
MODEL_UPDATE_COMPLETE = 'model_update_complete'
class DemoController(GameController):
MOUSEBUTTONDOWN = {
1: 'set_target',
}
def set_target(self, event):
return Event(SET_TARGET, target=Vector3(*event.pos))
class DemoView(GameView):
def __init__(self, manager):
super(DemoView, self).__init__(manager, 640, 480)
def on_model_update_complete(self, event):
model = event.model
x, y, _ = map(int, model.position)
pygame.draw.circle(self.window, model.color, (x, y), model.radius)
class AgentModel(GameModel):
def on_init(self, event):
self.agent = Agent(body=SimpleVehicle())
self.agent.learn(0.2, Seek)
self.agent.learn(0.1, Flee)
self.agent.target = self.agent.position
self.agent.color = (255, 0, 0)
self.agent.radius = 10
def on_tick(self, event):
self.dispatcher.dispatch(Event(MODEL_UPDATE_START))
self.agent.update()
self.dispatcher.dispatch(Event(MODEL_UPDATE_COMPLETE, model=self.agent))
def on_set_target(self, event):
target = event.target
self.agent.target = target
def main():
game = Game('Demo')
game.add_controller(DemoController)
game.add_view(DemoView)
game.add_model(AgentModel)
game.run()
if __name__ == '__main__':
main()
| true |
2151734eae6a6ed8aff3056a356eea7c37ab6d3b | Python | artheadsweden/PythonFundamentalsMar18 | /Day2/03 Exceptions.py | UTF-8 | 497 | 3.53125 | 4 | [] | no_license | class ArgumentValueError(ValueError):
pass
def myfunc(x, y):
if y == 0:
raise ArgumentValueError("Parameter y can't be 0")
z = x / y
return z
def main():
try:
print(myfunc(10, 0))
except ArgumentValueError as e:
print("ArgumentValueError", e)
except ValueError as e:
print("ValueError", e)
else:
print("No error")
finally:
print("Will always go here")
print("Done")
if __name__ == '__main__':
main() | true |
e9535541d83448acb9f51b65653586fd9568140b | Python | orosz-usgs/waterdataui | /wdfn-server/waterdata/services/nwis.py | UTF-8 | 3,704 | 3 | 3 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | """
Classes and functions for calling NWIS services and working with
the returned data.
"""
from ..utils import execute_get_request, parse_rdb
class NwisWebServices:
"""
Access to NWISWeb services required by the waterdataui application
to render pages.
"""
def __init__(self, service_root, path='/nwis/site/', data_format='rdb'):
"""
Constructor method.
:param str service_root: the scheme and host of the NWISWeb service
:param str path: the service path to be queried, defaults to `/nwis/site/`
:param str data_format: the data format to be returned from the service call, defaults to `rdb`
"""
self.service_root = service_root
self.path = path
self.data_format = data_format
def get_site(self, site_no, agency_cd):
"""
Get a the sites owned by an agency.
:param str site_no: site identifier
:param str agency_cd: identifier for the agency that owns the site
:return: an object containing the sites requested
:rtype: requests.Response
"""
resp = execute_get_request(
self.service_root,
path=self.path,
params={
'site': site_no,
'agencyCd': agency_cd,
'siteOutput': 'expanded',
'format': self.data_format
}
)
return resp
def get_site_parameters(self, site_no, agency_cd):
"""
Get the parameters measured at a site.
:param str site_no: site identifier
:param str agency_cd: identifier for the agency that owns the site:
:return: parameters that have been measured at a site
:rtype: list
"""
resp = execute_get_request(
self.service_root,
path=self.path,
params={
'sites': site_no,
'format': self.data_format,
'seriesCatalogOutput': True,
'siteStatus': 'all',
'agencyCd': agency_cd
}
)
status_code = resp.status_code
data = []
if status_code == 200:
data = [datum for datum in parse_rdb(resp.iter_lines(decode_unicode=True))]
return data
def get_huc_sites(self, huc_cd):
"""
Get all sites within a hydrologic unit as identified by its
hydrologic unit code (HUC).
:param str huc_cd: hydrologic unit code
:return: all sites in the specified HUC
:rtype: iterator, either a list or generator
"""
resp = execute_get_request(
self.service_root,
path=self.path,
params={
'format': self.data_format,
'huc': huc_cd
}
)
monitoring_locations = []
if resp.status_code == 200:
monitoring_locations = parse_rdb(resp.iter_lines(decode_unicode=True))
return monitoring_locations
def get_county_sites(self, state_county_cd):
"""
Get all sites within a county.
:param str state_county_cd: FIPS ID for a county
:return: all sites within the specified county
:rtype: iterator, either a list or generator
"""
resp = execute_get_request(
self.service_root,
path=self.path,
params={
'format': self.data_format,
'countyCd': state_county_cd
}
)
monitoring_locations = []
if resp.status_code == 200:
monitoring_locations = parse_rdb(resp.iter_lines(decode_unicode=True))
return monitoring_locations
| true |
0611a4a0273924836c56d2a35c2c2b7322d940ec | Python | rabu67/python_basics_part4 | /count_no_of_words.py | UTF-8 | 61 | 3.171875 | 3 | [] | no_license | a=[]
x=input("enter input")
a=x.split(' ')
print(len(a))
| true |
84f1e182a6d5e84bab5ee4a33ed81bf8f30eb560 | Python | fvictor/noesis-python | /noesis/nodes.py | UTF-8 | 1,927 | 2.640625 | 3 | [
"MIT"
] | permissive | import javabridge as jb
from .utils import get_class_wrapper, java_collection_to_numpy
class NodeScorer():
"""This class implements the interface for node scorers. These algorithms
compute a score for each node according to certain specific rules.
Parameters
----------
scorer : string
Technique used to compute node scores. Currently supported techniques are
'AdjustedBetweenness', 'AdjustedCloseness', 'AveragePathLength', 'Betweenness', 'BetweennessScore',
'Closeness', 'ClusteringCoefficient', 'ConnectedComponents', 'Decay', 'Degree', 'DegreeAssortativity',
'DiffusionCentrality', 'Eccentricity', 'EigenvectorCentrality', 'FreemanBetweenness',
'HITS', 'InDegree', 'KatzCentrality', 'LinkBetweenness', 'LinkBetweennessScore',
'LinkEmbeddedness', 'LinkNeighborhoodOverlap', 'LinkNeighborhoodSize', 'LinkRays',
'NormalizedBetweenness', 'NormalizedDecay', 'NormalizedDegree', 'NormalizedInDegree',
'NormalizedOutDegree', 'OutDegree', 'PageRank', 'PathLength', and 'UnbiasedDegreeAssortativity'.
args: parameters
Parameters for the node scorer. These parameters are specific
for each node scorer and more details are provided in NOESIS documentation.
"""
def __init__(self, scorer, *args):
self.scorer = scorer
self.args = args
def compute(self, network):
"""Compute scores for each node in a given network.
Parameters
----------
network : Network
Network for which the node scores will be computed.
Returns
-------
scores : ndarray, shape (num_nodes,)
Vector of scores for each node.
"""
class_wrapper = get_class_wrapper(self.scorer, ['noesis.analysis.structure'])
scorer = class_wrapper(network.__o__, *self.args)
return java_collection_to_numpy(scorer.call(), float) | true |
2165ecfa5cb4410554e638d872f13576e9e940e1 | Python | Fuabioo/Proyecto-2-Relaciones-Etimologia | /src/tec/ic/ia/p2/g08_logic_w_w_test.py | UTF-8 | 12,259 | 2.703125 | 3 | [
"MIT"
] | permissive | """
Pytest for Logic Word-Word Module
"""
from tec.ic.ia.p2.g08_model import Model
from tec.ic.ia.p2 import g08_logic_w_w
class Dummy:
"""Dummy console, this prevents modifying the code for the logic_w_w"""
def __init__(self):
self.dummy = "" # Dummy variable
def print(self, *args):
"""dummy function"""
self.dummy = args
class TestLogicWW(object):
""" Test object validation for the logic"""
def test_brothers_1(self):
"""Test Case: tio(esp) is brother of abuelo(esp)? -> No"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.brother(
"tio",
"esp",
"abuelo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_brothers_2(self):
"""Test Case: tio(esp) is brother of padre(esp)? -> Yes"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.brother(
"padre",
"esp",
"tio",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = True
assert result == expected_result
def test_child_1(self):
"""Test Case: padre(esp) is child of abuelo(esp)? -> Yes"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.child(
"padre",
"esp",
"abuelo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = True
assert result == expected_result
def test_child_2(self):
"""Test Case: tio(esp) is child of yo(esp)? -> No"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.child(
"tio",
"esp",
"yo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_uncle_1(self):
"""Test Case: tio(esp) is uncle of primo(esp)? -> No"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.uncle(
"tio",
"esp",
"primo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_uncle_2(self):
"""Test Case: primo(esp) is uncle of yo(esp)? -> No"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.uncle(
"primo",
"esp",
"yo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_uncle_3(self):
"""Test Case: "tio" is uncle of "sobrino"? -> Yes"""
model_test = Model()
data_pool = [
'+ etymological_origin_of(" abuelo",spa," padre",spa)',
'+ etymological_origin_of(" abuelo",spa," tio",spa)',
'+ etymological_origin_of(" padre",spa," sobrino",spa)']
model_test.append_data(data_pool)
model_test.load(Dummy())
relations = {
'derived': False,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': True,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.uncle(
"tio",
"spa",
"sobrino",
"spa",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = True
assert result == expected_result
def test_cousin_1(self):
"""Test Case: tio(esp) is cousin of primo(esp)? -> No"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.cousin(
"tio",
"esp",
"primo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_cousin_2(self):
"""Test Case: tio(esp) is cousin of yo(esp)? -> No"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.cousin(
"tio",
"esp",
"yo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_cousin_3(self):
"""Test Case: hijo is cousin of primo? -> Yes"""
model_test = Model()
data_pool = [
'+ etymological_origin_of(" abuelo",spa," padre",spa)',
'+ etymological_origin_of(" abuelo",spa," tio",spa)',
'+ etymological_origin_of(" padre",spa," hijo",spa)',
'+ etymological_origin_of(" tio",spa," primo",spa)']
model_test.append_data(data_pool)
model_test.load(Dummy())
relations = {
'derived': False,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': True,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.cousin(
"hijo",
"spa",
"primo",
"spa",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = True
assert result == expected_result
def test_cousin_level_1(self):
"""Test Case: tio(esp) cousin level of abuelo(esp)? -> Not cousins"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.cousin_level(
"tio",
"esp",
"abuelo",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_cousin_level_2(self):
"""Test Case: yo(esp) cousin level of tio(esp)? -> Not cousins"""
thread = "esp_derived_esp.cl"
model_test = Model()
model_test.set_cl_file(thread)
model_test.parse_file()
model_test.load(Dummy())
relations = {
'derived': True,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': False,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.cousin_level(
"yo",
"esp",
"tio",
"esp",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = False
assert result == expected_result
def test_cousin_level_3(self):
"""Test Case: hijo cousin level of primo? -> 1"""
model_test = Model()
data_pool = [
'+ etymological_origin_of(" abuelo",spa," padre",spa)',
'+ etymological_origin_of(" abuelo",spa," tio",spa)',
'+ etymological_origin_of(" padre",spa," hijo",spa)',
'+ etymological_origin_of(" tio",spa," primo",spa)']
model_test.append_data(data_pool)
model_test.load(Dummy())
relations = {
'derived': False,
'etymologically': False,
'etymology': False,
'etymologically_related': False,
'has_derived_form': False,
'etymological_origin_of': True,
'is_derived_from': False,
'variant': False}
result = g08_logic_w_w.cousin_level(
"hijo",
"spa",
"primo",
"spa",
Dummy(),
model_test.get_logic(), # data
relations)
expected_result = True
assert result == expected_result
| true |
9805cb0fa295f3b3fea8d1cf1551d7be76def218 | Python | ttyskg/ProgrammingCompetition | /AtCoder/ABC/133/a.py | UTF-8 | 165 | 2.53125 | 3 | [
"MIT"
] | permissive | import sys
def main():
input = sys.stdin.readline
N, A, B = map(int, input().split())
return min(A*N, B)
if __name__ == '__main__':
print(main())
| true |
1aab1eacf68293bcd8c33889e477ed1f1b590630 | Python | rdiaz21129/rdiaz | /dev/old/dev_python_dictionary_ver_1.02.py | UTF-8 | 5,834 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python3.5
# By: Ricardo Diaz
# Date: 20181002
# Python3.5
# Name: dev_python_dictionary_ver_1.02.py
# Referance: https://www.programiz.com/python-programming/nested-dictionary
# ~~~~~~~~~~
# Import modules
# ~~~~~~~~~~
import re
import os
# ~~~~~~~~~~
# Testing area (may need to move in order to test)
# ~~~~~~~~~~
# ~~~~~~~~~~
# Define variables
# ~~~~~~~~~~
cnt = 0
test01 = '--------------RICARDO TEST----------------'
dict_interface = {}
dict_switchport_mode = {}
# ~~~~~~~~~~
# Define regex
# ~~~~~~~~~~
#re_interface = r"^interface\s([A-Za-z]*\d{1,3}\/\d{1,3}\/\d{1,3})" # Capture group ex. [GigabitEthernet2/2/44]
re_interface = r"^interface\s([A-Za-z]*-*[A-Za-z].*)"
#re_switchport_access_vlan = r"\sswitchport\saccess\svlan\s(\d*)" # Capture group vlan number ex. [20]
re_switchport_access_vlan = r"(\sswitchport\saccess\svlan\s)(\d*)" # Capture group vlan number ex. [20]
re_switchport_mode = r"(\sswitchport\smode\s)(\w*)" # Capture group (2): [ switchport mode ][access or trunk]
# ~~~~~~~~~~
# Define functions
# ~~~~~~~~~~
def def_open_file():
global var_open_ui_filename
with open(ui_filename, 'r') as var_open_ui_filename: # opens file, reads file and store in variable
var_open_ui_filename = [line.rstrip('\n') for line in var_open_ui_filename]
# TESTING WITH THIS FUNCTION
'''
def def_re_out_switchport_mode_and_vlan():
for line in var_open_ui_filename: # iterate through every line (for loop)
if re.search(re_switchport_mode, line): # every line will be matched to see if the line matches if statement/regex | \sswitchport\smode\s(\w*)
re_match_switchport_mode = re.search(re_switchport_mode, line)
#print (re_match_switchport_mode) # prints <_sre.SRE_Match object; span=(0, 23), match=' switchport mode access'>
var_re_match_switchport_mode_g0 = re_match_switchport_mode.group(0)
var_re_match_switchport_mode_g1 = re_match_switchport_mode.group(1)
var_re_match_switchport_mode_g2 = re_match_switchport_mode.group(2)
#print (var_re_match_switchport_mode_g0) #prints switchport mode access
#print (var_re_match_switchport_mode_g1) #prints switchport mode
#print (var_re_match_switchport_mode_g2) #prints access
dict_switchport_mode.update({var_re_match_switchport_mode_g1: var_re_match_switchport_mode_g2})
#var_def_re_out_switchport_mode_and_vlan = (def_re_out_switchport_mode_and_vlan)#TESTING
'''
def def_re_out_interface():
# for loop and iterate through every line
for line in var_open_ui_filename: # iterate through every line (for loop)
if re.search(re_interface, line): # every line will be matched to see if the line matches if statement/regex | ^interface\s([A-Za-z]*\d{1,3}\/\d{1,3}\/\d{1,3})
#print (line)#TESTING
re_match_interface = re.search(re_interface, line)
#print (re_match_interface) # prints <_sre.SRE_Match object; span=(0, 30), match='interface GigabitEthernet1/0/2'>
var_re_match_interface_g0 = re_match_interface.group(0)
var_re_match_interface_g1 = re_match_interface.group(1)
#print (var_re_match_interface_g0) # prints interface TenGigabitEthernet2/1/4
#print (var_re_match_interface_g1) # prints TenGigabitEthernet2/1/4
#dict_interface.update({var_re_match_interface_g1: cnt})
#print (dict_interface)#TESTING
#dict_interface[var_re_match_interface_g1] = {}
if re.search(re_switchport_mode, line): # every line will be matched to see if the line matches if statement/regex | \sswitchport\smode\s(\w*)
re_match_switchport_mode = re.search(re_switchport_mode, line)
#print (line) # TESTING
#print (re_match_switchport_mode) # prints <_sre.SRE_Match object; span=(0, 23), match=' switchport mode access'>
var_re_match_switchport_mode_g0 = re_match_switchport_mode.group(0)
var_re_match_switchport_mode_g1 = re_match_switchport_mode.group(1)
var_re_match_switchport_mode_g2 = re_match_switchport_mode.group(2)
#print (var_re_match_switchport_mode_g0) #prints switchport mode access
#print (var_re_match_switchport_mode_g1) #prints switchport mode
#print (var_re_match_switchport_mode_g2) #prints access
#[var_re_match_interface_g1][var_re_match_switchport_mode_g1] = (var_re_match_switchport_mode_g2)
#dict_interface.update({var_re_match_interface_g1: cnt})
#print (test01 + '\n')
dict_interface[var_re_match_interface_g1] = {}
dict_interface[var_re_match_interface_g1][var_re_match_switchport_mode_g1] = var_re_match_switchport_mode_g2
#print (dict_interface)
if re.search(re_switchport_access_vlan, line): # every line will be matched to see if the line matches if statement/regex | \sswitchport\saccess\svlan\s(\d*)
re_match_switchport_access_vlan = re.search(re_switchport_access_vlan, line)
var_re_match_switchport_access_vlan_g0 = re_match_switchport_access_vlan.group(0)
var_re_match_switchport_access_vlan_g1 = re_match_switchport_access_vlan.group(1)
var_re_match_switchport_access_vlan_g2 = re_match_switchport_access_vlan.group(2)
dict_interface[var_re_match_interface_g1] = {}
dict_interface[var_re_match_interface_g1][var_re_match_switchport_access_vlan_g1] = var_re_match_switchport_access_vlan_g2
print (dict_interface)
print ('OUT OF THE FOR LOOP')
# ~~~~~~~~~~
# Start program here
# User input
# ~~~~~~~~~~
ui_filename = input('Enter filename: ')
# ~~~~~~~~~~
# Call upon functions
# ~~~~~~~~~~
def_open_file()
def_re_out_interface()
#def_re_out_switchport_mode_and_vlan()
| true |
bb332402534d28bb637b3ebf52fad8dd115a7f7d | Python | Flavius1996/CAMERA-UIT-multi | /load_camera.py | UTF-8 | 1,117 | 3.328125 | 3 | [] | no_license | '''
CAMERA information:
+ camera name (unique for each camera)
+ camera link
'''
import xml.etree.ElementTree as ET
CAMERA_LIST = []
def check_validname():
'''check if all names in CAMERA_LIST is unique'''
global CAMERA_LIST
name_list = [t['name'] for t in CAMERA_LIST]
duplicate = set([x for x in name_list if name_list.count(x) > 1])
if (duplicate is not None and len(duplicate) == 0):
return True
else:
raise ValueError('All camera \'name\' in the file must be unique. \
\nThese name is duplicated: {}'.format(duplicate))
def load_camera_from_file(filepath):
"""Load camera info from xml file."""
global CAMERA_LIST
tree = ET.parse(filepath)
data = tree.getroot()
for camera in data:
if 'name' not in camera.attrib:
raise KeyError('Camera does\'t have attribute: \'name\'')
temp = camera.attrib
for att in camera:
temp[att.tag] = att.text
CAMERA_LIST.append(temp)
check_validname()
# Debug
# load_camera_from_file('./camera_info/test.xml')
# print(CAMERA_LIST) | true |
1a809dce2b4d819f4aec4fba5ce78e6f98f49332 | Python | WongWai95/LeetCodeSolution | /118. 杨辉三角/Solution.py | UTF-8 | 403 | 2.796875 | 3 | [] | no_license | class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if numRows == 0: return []
res = [[1]]
for _ in range(1, numRows):
last_line = res[-1]
temp = [1]
for idx in range(1, len(last_line)):
temp.append(last_line[idx-1]+last_line[idx])
temp.append(1)
res.append(temp)
return res | true |
18271853e519bde5887e4c9b8a6b48c19a1951da | Python | aguzma22/CIS-2348 | /5.19.py | UTF-8 | 2,398 | 3.375 | 3 | [] | no_license | # Anthony Guzman 5.19 CIS2348
def automobile():
print("Davy's auto shop services")
print("Oil change -- $35")
print("Tire rotation -- $19")
print("Car wash -- $7")
print("Car wax -- $12\n")
changeOil = 35;
rotationTire = 19;
washCar = 7;
waxCar = 12;
t = 0;
service1 = 0;
service2 = 0;
automobile()
first_ser = input("Select first service:\n")
if (first_ser == "Oil change" or first_ser == "oil change"):
t = t + changeOil;
service1 = changeOil;
elif (first_ser == "Tire rotation" or first_ser == "tire rotation"):
t = t + rotationTire;
service1 = rotationTire;
elif (first_ser == "Car wash" or first_ser == "car wash"):
t = t + washCar;
service1 = washCar;
elif (first_ser == "Car wax" or first_ser == "car wax"):
t = t + waxCar;
service1 = waxCar;
elif (first_ser == "-"):
first_ser = "No service";
t = t + 0;
secondService = input("Select second service:\n")
if (secondService == "Oil change" or secondService == "oil change"):
t = t + changeOil;
service2 = changeOil;
elif (secondService == " Tire rotation" or secondService == " tire rotation"):
t = t + rotationTire;
service2 = rotationTire;
elif (secondService == "Car wash" or secondService == "car wash"):
t = t + washCar;
service2 = washCar;
elif (secondService == "Car wax" or secondService == "car wax"):
t = t + waxCar;
service2 = waxCar;
elif (secondService == "-"):
secondService = " No service";
service2 = 0;
t = t + 0;
print()
print("Davy's auto shop invoice")
print()
if (first_ser == " No service") and (secondService == " No service"):
print("Service 1:" + first_ser)
print("Service 2:" + secondService)
print()
elif (secondService == " No service"):
print("Service 1: " + first_ser + ", $" + str(service1))
print("Service 2:" + secondService)
print()
elif (first_ser == "No service"):
print("Service 1: " + first_ser)
print("Service 2: " + secondService + ", $" + str(service2))
print()
else:
print("Service 1: " + first_ser + ", $" + str(service1))
print("Service 2: " + secondService + ", $" + str(service2))
print()
print("Total: $" + str(t)) | true |
15180ca8e85e2b8aecc75cceeaabd2679ee01a38 | Python | Keerthana-Muruganantham/Training-projects-python | /14._list_product.py | UTF-8 | 337 | 3.34375 | 3 | [] | no_license | num=int(input("Enter the list length : "))
a = []
b = []
c = []
print("Enter the elements")
for i in range(0, num):
element = int(input())
a.append(element)
print("The list is ", a)
for i in range(0, num) :
b = a.copy()
b[i] = 1
res = 1
for i in range(0, num):
res = res*b[i]
c.append(res)
print (c) | true |
6fc2b711d248931b23ae4cbdddd4dddc329ac915 | Python | icoz/icdb | /icdb/storage/file_storage.py | UTF-8 | 11,214 | 2.796875 | 3 | [] | no_license | # -------------------------------#
# Written by icoz, 2013 #
# email: icoz.vt at gmail.com #
# License: GPL v3 #
# -------------------------------#
"""
Struct of FileStorage
---------------------
FileStorage keeps data in 3 types of files: .idx, .key, .value
.idx - index for keeping hash, key-offset, value-offset
.key - file to save keys (binary)
.value - file to save values (binary)
Limits
------
Key size is max 2^32 bytes
Value size is max 2^40 bytes
.idx struct
-----------
Header:
magic, 4 byte = 0x720AE06A, b'\x6a\xe0\x0a\x72'
version id, 4 byte = 0x00000001
records count, 4 byte
reserved, 4 byte
records, (records count)*sizeof(Record), sorted by hash
Record:
#hash, 16 bytes, md5(key)
key_offset, 4 bytes
key_size, 4 bytes = count of bytes
value_offset, 4 bytes = count of blocks(256-bytes) to skip
value_size, 4 bytes = count of 256-bytes
.key struct
-----------
Contents records of variable size
Record:
magic, 8 byte = b'\x50\x0a\x6f\x70\xf2\x52\x55\xad'
key_size, 4 bytes = count of bytes
flags, 4 byte = 0 - ok, non 0 - deleted
value_offset, 4 bytes = count of blocks(256-bytes) to skip
value_size, 4 bytes = count of 256-bytes
key, bytes[]
.value struct
-------------
Contents records of variable size
Record:
value, bytes[], aligned to 256 bytes
"""
from hashlib import md5
from io import SEEK_END, SEEK_SET
import os
import struct
from unittest import TestCase
from icdb.memcache.hashcache import HashCache
def hash_md5(info):
""" for hashing using MD5 """
m = md5()
m.update(str(info).encode())
return m.digest()
class FileStorage(object):
"""
FileStorage
"""
KEY_MAGIC_NUMBER = b'\x50\x0a\x6f\x70\xf2\x52\x55\xad'
IDX_MAGIC_NUMBER = b'\x6a\xe0\x0a\x72'
def __init__(self, filename='test.icdb'):
# super(FileStorage, self).__init__()
self.filename = filename
self.index = HashCache()
self.value_file = open(filename + '.value', 'ab')
self.key_file = open(filename + '.key', 'ab')
if os.path.exists(self.filename + '.idx'):
self.load_index()
else:
self.build_index()
def __del__(self):
self.value_file.close()
self.key_file.close()
self.build_index()
self.save_index()
def __setitem__(self, key, value):
value_offset, value_size = self.__save_value_record__(value)
key_offset = self.__save_key_record__(key, value_offset, value_size)
# update index
# if we have such key, then del it!
i_key_offset, *rest = self.__get_from_index__(key)
if i_key_offset is not None:
self.index.delete(key)
self.key_file.seek(i_key_offset + 12, SEEK_SET)
self.key_file.write(b'\x01')
self.__put_to_index__(key, key_offset, value_offset, value_size)
def __getitem__(self, key):
# find in index
key_offset, value_offset, value_size = self.__get_from_index__(key)
if key_offset is None:
# if not in index, then search on disk
for flags, key_size, value_offset, value_size, k, key_offset in self.__keys__():
if k == key:
break
# if no such key in key-file, then return None
if k != key:
return None
with open(self.filename + '.value', 'rb') as vfile:
vfile.seek(value_offset * 256)
value = vfile.read(value_size)
return value.decode()
def delete(self, key):
# find in index
k_s = self.index[key]
# find record in file
if k_s is not None:
value_len, value_offset, key_offset = struct.unpack("iii", k_s)
# update flag
self.key_file.seek(key_offset + 12, SEEK_SET)
self.key_file.write(1)
pass
def compress(self):
"""
Compress key and data files, removes deleted items
"""
# create new value and key files
nv = open(self.filename + ".nvalue", wb)
nk = open(self.filename + ".nkey", wb)
# in loop:
# read old key
# if not deleted save old value to new value
for flags, key_size, value_offset, value_size, key, key_offset in self.__keys__():
if flags == 0:
# get key record
# save key record
# get value record
# set value record
pass
pass
def build_index(self):
"""
Builds new index from key-file
"""
# delete current index
self.index = HashCache()
# read .key-file and build index
for flags, key_size, value_offset, value_size, key, key_offset in self.__keys__():
#print(flags, key_size, value_offset, value_size, key, key_offset)
self.__put_to_index__(key, key_offset, value_offset, value_size)
def __keys__(self):
""" internal. Generator for records
raises: (flags, key_size, value_offset, value_size, key, key_offset)
"""
with open(self.filename + '.key', 'rb') as fin:
b = fin.read() # read all file
if len(b) > 32:
pos = 0
while True:
pos = b.find(self.KEY_MAGIC_NUMBER, pos)
if pos == -1:
break
key_size, flags, value_offset, value_size = struct.unpack_from('iiii', b, pos + 8)
key = b[pos + 24:pos + 24 + key_size].decode()
key_offset = pos + 24
if flags == 0:
yield (flags, key_size, value_offset, value_size, key, key_offset)
pos += 1
def __save_value_record__(self, value):
""" internal
Saves value to file
return: value_offset and value_size
"""
value = str(value)
# write value-file
self.value_file.seek(0, SEEK_END)
value_offset = self.value_file.tell() / 256
align = self.value_file.tell() - int(value_offset) * 256
# if file suddenly was corrupted then fill till 256 bytes
for i in range(align):
self.value_file.write(b'\x00')
value_offset = int(self.value_file.tell() / 256)
self.value_file.write(value.encode())
align = 256 - len(value) % 256
# if len(value) % 256 != 0, then fill end
for i in range(align):
self.value_file.write(b'\x00')
self.value_file.flush()
return value_offset, len(value)
def __save_key_record__(self, key, value_offset, value_size):
""" internal
Saves key-record to file
return: key_offset
"""
key = str(key)
# write key-file
self.key_file.seek(0, SEEK_END)
key_offset = self.key_file.tell()
self.key_file.write(self.KEY_MAGIC_NUMBER)
key_struct = struct.pack('iiii', len(key), 0, value_offset, value_size)
self.key_file.write(key_struct)
self.key_file.write(key.encode())
self.key_file.flush()
return key_offset
def __get_key_record__(self, key_offset):
"""
Reads key record from file from key_offset.
Returns (key, flags, value_offset, value_size)
If none found, then returns (None, ...)
"""
with open(self.filename + '.key', 'rb') as f_key:
# get file length
f_len = f_key.seek(0, SEEK_END)
if f_len < key_offset:
raise IndexError
# set pos to key_offset
f_key.seek(key_offset, SEEK_SET)
# read record
magic = f_key.read(8)
# check record
if magic == self.KEY_MAGIC_NUMBER:
k_s = f_key.read(4 * 4)
key_size, flags, value_offset, value_size = struct.unpack('iiii', k_s)
key = f_key.read(key_size)
return (key, flags, value_offset, value_size)
else:
# if not valid, then rebuild index
self.build_index()
raise IndexError
def __put_to_index__(self, key, key_offset, value_offset, value_size):
""" internal
Puts to index (key_offset, value_offset, value_size)-struct for 'key'
"""
self.index[key] = struct.pack('iii', key_offset, value_offset, value_size)
def __get_from_index__(self, key):
""" internal
Returns (key_offset, value_offset, value_size)-struct for 'key' if found
if not found returns None, None, None
"""
index_info = self.index[key]
if index_info is not None:
key_offset, value_offset, value_size = struct.unpack("iii", index_info)
return key_offset, value_offset, value_size
else:
return None, None, None
def load_index(self):
"""
Load previously saved index
"""
with open(self.filename + ".idx", 'rb') as idx:
magic = idx.read(4)
if magic != self.IDX_MAGIC_NUMBER:
raise FileNotFoundError
ver_id, rec_count, reserved = struct.unpack("iii", idx.read(4 * 3))
if ver_id != 1:
raise FileNotFoundError
for i in range(rec_count):
k_off, k_size, v_off, v_size = struct.unpack("iiii", idx.read(4 * 4))
key, *rest = self.__get_key_record__(k_off)
self.__put_to_index__(key, k_off, v_off, v_size)
def save_index(self):
"""
Save index to file
You can now just load index, not rebuild it on start
"""
with open(self.filename + ".idx", 'wb') as idx:
# write header
idx.write(self.IDX_MAGIC_NUMBER)
idx.write(b'\x01\x00\x00\x00')
idx.write(struct.pack('i', self.index.ht_count))
idx.write(b'\x00\x00\x00\x00')
# write body
for i in range(self.index.ht_count):
hash, key, index_info = self.index.ht[i]
key_offset, value_offset, value_size = struct.unpack('iii', index_info)
rec = struct.pack('iiii', key_offset, len(key), value_offset, value_size)
idx.write(rec)
class FileStorageTest(TestCase):
def setUp(self):
try:
os.unlink('test.icdb.key')
os.unlink('test.icdb.value')
os.unlink('test.icdb.idx')
except FileNotFoundError:
pass
self.fs = FileStorage()
def test_set_get(self):
self.fs['123'] = 123
print("value of 123 = ", self.fs['123'])
self.assertEqual('123', self.fs['123'])
self.fs['123'] = 'some'
print("value of 123 = ", self.fs['123'])
self.assertEqual('some', self.fs['123'])
self.fs['311'] = "some text"
self.assertEqual('some text', self.fs['311'])
def test_load_index(self):
self.fs['123'] = 123
self.fs.save_index()
self.fs['111'] = '111'
self.fs.load_index()
try:
print(self.fs['111'])
except IndexError:
return True
else:
return False
| true |
6e0c7c270b27bb7dc09f547553db714ec95fbcd1 | Python | Nix41/Music-Notes | /GetLines.py | UTF-8 | 3,933 | 2.671875 | 3 | [] | no_license |
import cv2
import numpy as np
def process(image):
gray = cv2.cvtColor(image.copy(), cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (11,11), 0)
edged = cv2.Canny(blur, 0, 50)
cv2.imwrite("output/1canny.jpg", edged)
contours, _ = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
dst = photo(image.copy(),contours)
cop = gray.copy()
cop2 = gray.copy()
e, t, res = straight(dst, image.copy(),contours)
e1,t2,res2 = straight(image.copy(), image.copy(),contours)
h2 = cv2.HoughLines(e, 1, np.pi/150, 200)
# Maybe the picture is a clean image of a partiture
h = cv2.HoughLines(e1, 1, np.pi/ 150, 200)
if h2 is None: # if it is not a photo
a, b = detect_lines(h, gray, 80)
r = res2
else: # if it is
a, b = detect_lines(h2, t, 80)
r = res
return a, b, r
def distance(point1, point2):
return np.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)
THRESHOLD_MIN = 160
THRESHOLD_MAX = 255
def photo(image, contours):
for cnt in contours:
# Douglas Pecker algorithm - reduces the number of points in a curve
epsilon = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * epsilon, True)
if len(approx) == 4:
sheet = approx
break
approx = np.asarray([x[0] for x in sheet.astype(dtype=np.float32)])
# top_left has the smallest sum, bottom_right has the biggest
top_left = min(approx, key=lambda t: t[0] + t[1])
bottom_right = max(approx, key=lambda t: t[0] + t[1])
top_right = max(approx, key=lambda t: t[0] - t[1])
bottom_left = min(approx, key=lambda t: t[0] - t[1])
max_width = int(max(distance(bottom_right, bottom_left), distance(top_right, top_left)))
max_height = int(max(distance(top_right, bottom_right), distance(top_left, bottom_left)))
arr = np.array([
[0, 0],
[max_width - 1, 0],
[max_width - 1, max_height - 1],
[0, max_height - 1]], dtype="float32")
rectangle = np.asarray([top_left, top_right, bottom_right, bottom_left])
m = cv2.getPerspectiveTransform(rectangle, arr)
dst = cv2.warpPerspective(image, m, (max_width, max_height))
return dst
LINES_DISTANCE_THRESHOLD = 50
LINES_ENDPOINTS_DIFFERENCE = 10
def detect_lines(hough, image, nlines):
all_lines = set()
width, height = image.shape
# convert to color image so that you can see the lines
lines_image_color = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
for result_arr in hough[:nlines]:
rho = result_arr[0][0]
theta = result_arr[0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
shape_sum = width + height
x1 = int(x0 + shape_sum * (-b))
y1 = int(y0 + shape_sum * a)
x2 = int(x0 - shape_sum * (-b))
y2 = int(y0 - shape_sum * a)
start = (x1, y1)
end = (x2, y2)
diff = y2 - y1
if abs(diff) < LINES_ENDPOINTS_DIFFERENCE:
all_lines.add(int((start[1] + end[1]) / 2))
cv2.line(lines_image_color, start, end, (0, 0, 255), 2)
cv2.imwrite("output/5lines.png", lines_image_color)
return all_lines, lines_image_color
def straight(dst1, image,contours):
cv2.drawContours(image, contours, -1, (0, 255, 0), 2)
cv2.imwrite("output/2with_contours.png", image)
dst = cv2.cvtColor(dst1, cv2.COLOR_BGR2GRAY)
_, result = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imwrite("output/3adjusted_photo.png", result)
rcop = result.copy()
_, thresholded1 = cv2.threshold(rcop, THRESHOLD_MIN, THRESHOLD_MAX, cv2.THRESH_BINARY)
element = np.ones((3, 3))
thresholded2 = cv2.erode(thresholded1, element)
edges = cv2.Canny(thresholded2, 10, 100, apertureSize=3)
return edges, thresholded2, result
| true |
0883928c27e9415499a312cf8c9415f7580c042f | Python | LeiYangGH/pyhomework | /selenium/sinapic/sinapic/sinapic.py | UTF-8 | 1,214 | 2.515625 | 3 | [] | no_license | # coding = utf-8
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
def sina_pic_tabs(browser='Chrome'):
if browser == 'Chrome':
driver = webdriver.Chrome(executable_path=r'C:\Installs\chromedriver.exe')
print("Chrome")
else:
driver = webdriver.Firefox(executable_path=r'C:\Installs\geckodriver.exe')
print("Firefox")
driver.get(r'http://photo.sina.com.cn/')
if browser == 'Chrome':
driver.maximize_window()
else:
driver.set_window_size("1024", "768")
driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
tabs = driver.find_elements_by_class_name("subshow-tab-item")
dic = {}
for tab in tabs[1:len(tabs) - 1]:
##for tab in tabs[1:2]:
tab.click()
time.sleep(1)
#subshow_cont = driver.find_element_by_id("column_subshow_cont_like")
subshow_cont = driver.find_element_by_id("column_subshow_cont")
#print(subshow_cont.get_attribute('outerHTML'))
h3 = subshow_cont.find_elements_by_tag_name('h3')
dic[tab.text] = [h.text for h in h3]
print(dic)
driver.quit()
sina_pic_tabs()
#sina_pic_tabs("Firefox") | true |
0254f51d4593fd739102d26f7db3d3c51493664e | Python | VasilyRum/Hometask_from_Lessons | /Lesson VII - Decorators/task_7_3.py | UTF-8 | 861 | 3.765625 | 4 | [] | no_license | """
Преобразовать вызов функции с конечным количеством
позиционных аргументов f(a, b, c, d)
в вызов вида f(a)(b)(c)(d), используя декоратор.
"""
def carry(func, amount_arg=0):
"""
доступ к функции
"""
if amount_arg == 0:
amount_arg = func.__code__.co_argcount
def wrap(*a):
"""
внешний декоратор
"""
if len(a) == amount_arg:
return func(*a)
def wrapper(*b):
"""
внутренний декоратор
"""
result = func(*(a + b))
return result
return carry(wrapper, amount_arg - len(a))
return wrap
@carry
def foo(a, b, c, d):
return a + b + c + d
print(foo(1)(2)(3)(5))
| true |
f0611233107070ac57ddd9b8c15d00e013c33855 | Python | feast-dev/feast | /sdk/python/feast/infra/key_encoding_utils.py | UTF-8 | 2,688 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import struct
from typing import List, Tuple
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.protos.feast.types.Value_pb2 import ValueType
def _serialize_val(
value_type, v: ValueProto, entity_key_serialization_version=1
) -> Tuple[bytes, int]:
if value_type == "string_val":
return v.string_val.encode("utf8"), ValueType.STRING
elif value_type == "bytes_val":
return v.bytes_val, ValueType.BYTES
elif value_type == "int32_val":
return struct.pack("<i", v.int32_val), ValueType.INT32
elif value_type == "int64_val":
if 0 <= entity_key_serialization_version <= 1:
return struct.pack("<l", v.int64_val), ValueType.INT64
return struct.pack("<q", v.int64_val), ValueType.INT64
else:
raise ValueError(f"Value type not supported for Firestore: {v}")
def serialize_entity_key_prefix(entity_keys: List[str]) -> bytes:
"""
Serialize keys to a bytestring, so it can be used to prefix-scan through items stored in the online store
using serialize_entity_key.
This encoding is a partial implementation of serialize_entity_key, only operating on the keys of entities,
and not the values.
"""
sorted_keys = sorted(entity_keys)
output: List[bytes] = []
for k in sorted_keys:
output.append(struct.pack("<I", ValueType.STRING))
output.append(k.encode("utf8"))
return b"".join(output)
def serialize_entity_key(
entity_key: EntityKeyProto, entity_key_serialization_version=1
) -> bytes:
"""
Serialize entity key to a bytestring so it can be used as a lookup key in a hash table.
We need this encoding to be stable; therefore we cannot just use protobuf serialization
here since it does not guarantee that two proto messages containing the same data will
serialize to the same byte string[1].
[1] https://developers.google.com/protocol-buffers/docs/encoding
"""
sorted_keys, sorted_values = zip(
*sorted(zip(entity_key.join_keys, entity_key.entity_values))
)
output: List[bytes] = []
for k in sorted_keys:
output.append(struct.pack("<I", ValueType.STRING))
output.append(k.encode("utf8"))
for v in sorted_values:
val_bytes, value_type = _serialize_val(
v.WhichOneof("val"),
v,
entity_key_serialization_version=entity_key_serialization_version,
)
output.append(struct.pack("<I", value_type))
output.append(struct.pack("<I", len(val_bytes)))
output.append(val_bytes)
return b"".join(output)
| true |
6e4d1e16263277d3ba75b5d334ffbeb473491615 | Python | Jane11111/Leetcode2021 | /Huawei01.py | UTF-8 | 802 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2021-08-18 19:04
# @Author : zxl
# @FileName: Huawei01.py
import sys
def solve(X,N,lst):
dp = []
for i in range(X+1):
dp.append([0 for j in range(N+1)])
for i in range(1,X+1):
for j in range(1,N+1):
p,a,s = lst[j-1]
v = dp[i][j-1]
for k in range(1,a+1):
if k*p>i:
break
v = max(v,dp[i-k*p][j-1]+s*k)
dp[i][j] = v
return dp[-1][-1]
if __name__ == "__main__":
line = sys.stdin.readline().strip()
X,N = list(map(int, line.split()))
lst = []
for i in range(N):
line = sys.stdin.readline().strip()
arr = list(map(int, line.split()))
lst.append(arr)
ans = solve(X,N,lst)
print(ans) | true |
fe3e4bde96ae4b3906a1a2d01e01c92ad67f7e7e | Python | luis-mueller/drep | /library/estimator.py | UTF-8 | 891 | 2.671875 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
class LabelMap:
def __init__(self, path):
self.name = path
self.img = cv2.imread(path)
self.classes = np.loadtxt('data/classes_rgb.txt', dtype=np.uint8)
self.oneHotPredictions = None
self.lastData = None
def predict(self, x, collapse = False):
if self.oneHotPredictions is None or self.lastData is None or (not np.array_equal(self.lastData, x)):
imgValue = np.expand_dims(self.img[x[:, 0], x[:, 1]], axis = 1)
self.oneHotPredictions = np.all(self.classes == imgValue, axis = 2)
self.lastData = x
if collapse:
return np.argmax(self.oneHotPredictions, axis = 1) + 1
return np.transpose(self.oneHotPredictions.astype(np.int32))
def score(self, X, y):
return np.mean(y == self.predict(X, collapse=True).flatten()) | true |
80d3d743c27bf1534062126c6b294d519e6af0ba | Python | PavlosTsoumpariotis/cs1340-lab-2 | /Lab_2.py | UTF-8 | 2,079 | 3.859375 | 4 | [] | no_license | filename = "C:/Lab2/GOOG.csv"
def get_data(file_name):
""" Read the data from the file_name and save them into a 2-D list or any other data structure for processing
:param file_name: <str> - the file's name you saved for the stock's prices
:return: a list of lists <list> (2-D list), or any other data structure you think more efficient
"""
with open(filename, "r") as f:
next(f)
data = []
for row in f:
row = row.strip("\n")
row = row.split(",")
data.append(row)
return data
def get_averages_by_month(list, month):
"""
:param data_list: <list> - the list that you will process
:param month: <int> - from which month you want the statistics
:return: a float which is the average price of that month
"""
sale = 0.00
volume = 0.00
for rows in list:
date = rows[0].split("/")
if int(date[0]) == month: # date sub zero will give me the very first element of every date
sale += float(rows[5]) * float(rows[6])
volume += float(rows[6])
avg = sale/volume
return avg
def get_highest_by_month(list, month):
"""
:param data_list: <list> - the list that you will process
:param month: <int> - from which month you want the statistics
:return: a float which is the highest price of that month
"""
max = 0
for rows in list:
date = rows[0].split("/")
if int(date[0]) == month:
if max < float(rows[5]):
max = float(rows[5])
return max
data_list = get_data(filename)
print(data_list)
#print("This is the data from the csv file: Goog.csv")
#avg_aug = get_averages_by_month(data_list, 8)
#avg_jun = get_averages_by_month(data_list, 6)
#print(avg_aug)
#print("June's Average Stock price")
#print(avg_jun)
#print("August's Average Stock price")
#jun_highest = get_highest_by_month(data_list, 6)
#print(jun_highest)
#print("June's Highest Closing Price") | true |
2e914400ada3f4704b53baeff883411125cfb51b | Python | ymsk-sky/atcoder | /abc121/d.py | UTF-8 | 934 | 3.65625 | 4 | [] | no_license | """a~bの排他的論理和F(a,b)を求めよ
n XOR n = 0 の特性より
F(a,b) = F(0,a-1) XOR F(0,b)
--
F(0,b) =0,1,2,...,a-2,a-1,a,a+1,a+2,...,b-2,b-1,b
F(0,a-1)=0,1,2,...,a-2,a-1
--
となるので,直接F(a,b)を求めるよりF(0,n)を求めることを考える.
任意の偶数nについて
n XOR (n+1) = 1
(最下位ビットのみが1違うことから上記はわかる)
よって
F(0,6)
= 0 XOR 1 XOR 2 XOR 3 XOR 4 XOR 5 XOR 6
= (0 XOR 1) XOR (2 XOR 3) XOR (4 XOR 5) XOR 6
= 1 XOR 1 XOR 1 XOR 6
ここで1 XOR 1 ... XOR 1については1が偶数個なら0,奇数個なら1となる
"""
a,b=map(int,input().split())
if b%2==0:
if (b//2)%2==0:
fb=0^b
else:
fb=1^b
else:
if ((b+1)//2)%2==0:
fb=0
else:
fb=1
if (a-1)%2==0:
if ((a-1)//2)%2==0:
fa=0^(a-1)
else:
fa=1^(a-1)
else:
if (a//2)%2==0:
fa=0
else:
fa=1
print(fa^fb)
| true |
3e978a5adbac23a99d9f715fccbf262bff4d3d7f | Python | rheadlam/Textbook-related-projects | /PythonProjects/BookAnalysis/ZipfyWordPlot.py | UTF-8 | 2,028 | 3.703125 | 4 | [] | no_license | """This module relates to a Think Python, 2nd Edition case study.
It plots or prints frequency against rank to demonstrate zipf's law. That is -
given some corpus of natural language utterances, the frequency of any
word is inversely proportional to its rank in the frequency table. By default
Flatland is analysed once again, and is flagged to plot. Running as a script
allows filename and flag arguments to be entered by user.
"""
from __future__ import print_function, division
import sys
import matplotlib.pyplot as plt #needs installation
from WordHistogram import process_file
def rank_freq(hist):
"""Returns (rank, freq) tuples in a list.
hist: histogram mapping words to corresponding frequency
returns: list of (rank, freq) tuples
"""
# sorts list in decreasing order
freqs = list(hist.values())
freqs.sort(reverse=True)
# enumerates ranks and frequencies
rf = [(r+1, f) for r, f in enumerate(freqs)]
return rf
def print_ranks(hist):
"""Prints rank against frequency.
hist: histogram mapping words to corresponding frequency
"""
for r, f in rank_freq(hist):
print(r, f)
def plot_ranks(hist, scale='log'):
"""Plots frequency against rank.
hist: histogram mapping words to corresponding frequency
scale: string 'linear' or 'log'
"""
t = rank_freq(hist)
rs, fs = zip(*t)
plt.clf()
plt.xscale(scale)
plt.yscale(scale)
plt.title('Zipf Word plot')
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.plot(rs, fs, 'r-', linewidth=3)
plt.show()
def main(script, filename='flatland.txt', flag='plot'):
hist = process_file(filename, skip_header=True)
# prints or plots
if flag == 'print':
print_ranks(hist)
elif flag == 'plot':
plot_ranks(hist)
else:
print('Usage: ZipfyWordPlot.py filename [print|plot]') #remove square brackets.
if __name__ == '__main__':
main(*sys.argv)
| true |
768dcf03c79c071285ab7030ec82b5eed1ffd085 | Python | Rivarrl/leetcode_python | /leetcode/LCP173+/5339.py | UTF-8 | 1,299 | 3.3125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# ======================================
# @File : 5339.py
# @Time : 2020/3/7 23:28
# @Author : Rivarrl
# ======================================
from algorithm_utils import *
class Solution:
"""
[1373. 二叉搜索子树的最大键值和](https://leetcode-cn.com/problems/maximum-sum-bst-in-binary-tree/)
"""
@timeit
def maxSumBST(self, root: TreeNode) -> int:
res = 0
def dfs(p):
if not p: return True, 0
left, right = dfs(p.left), dfs(p.right)
cur = left[1] + right[1] + p.val
if p.left and p.val <= p.left.val: return False, cur
if p.right and p.val >= p.right.val: return False, cur
if left[0] and right[0]:
nonlocal res
res = max(res, cur)
return left[0] and right[0], cur
dfs(root)
return res
if __name__ == '__main__':
a = Solution()
x = construct_tree_node([1,4,3,2,4,2,5,null,null,null,null,null,null,4,6])
a.maxSumBST(x)
x = construct_tree_node([4,3,null,1,2,null,null])
a.maxSumBST(x)
x = construct_tree_node([-4,-2,-5])
a.maxSumBST(x)
x = construct_tree_node([2,1,3])
a.maxSumBST(x)
x = construct_tree_node([5,4,8,3,null,6,3])
a.maxSumBST(x) | true |
b364f7e05a3131b7d2f6dc4d6a5da008f3385151 | Python | nlaanait/stemdl | /utils/pre_augment_images.py | UTF-8 | 1,815 | 2.78125 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
import h5py
import numpy as np
def pre_process_hdf5_only(h5_raw, h5_aug, params):
"""
Adds noise + glimpse for a set of images in a HDF5 dataset and writes results back to the h5_aug dataset.
One image is generated per input image.
:param h5_raw: 3D h5py.Dataset object structured as [example, rows, cols] containing the clean images
:param h5_aug: 3D h5py.Dataset object structured as [example, rows, cols] that will hold the augmented image
:param params: dict
:return:
"""
for det in [h5_raw, h5_aug]:
assert isinstance(det, h5py.Dataset)
assert isinstance(params, dict)
assert h5_raw.shape == h5_aug.shape
assert h5_raw.dtype == h5_aug.dtype
assert h5_raw.dtype == np.float16
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
raw_image = tf.placeholder(tf.float16, shape=(h5_raw.shape[1], h5_raw.shape[2]), name='raw_image')
# upcast to float32 since noise addition only works on float32
raw_image = tf.cast(raw_image, tf.float32)
# add noise to 2D image
noisy_image = add_noise_image(raw_image, params)
# make it a 4D tensor:
# fake batch size of 1
noisy_image_3d = tf.expand_dims(noisy_image, axis=0)
# fake depth of 1:
noisy_image_4d = tf.expand_dims(noisy_image_3d, axis=-1)
# glimpse this 3D image:
params.update({'batch_size': 1})
glimpsed_image = get_glimpses(noisy_image_4d, params)
with tf.Session() as session:
session.run(init_op)
for eg_ind in range(h5_raw.shape[0]):
aug_image = session.run(glimpsed_image, feed_dict={raw_image: h5_raw[eg_ind]})
if eg_ind % 100 == 0 and eg_ind > 0:
print(eg_ind)
h5_aug[eg_ind] = aug_image[0, :, :, 0]
| true |
3633fa2d10a65c07f72963640c80602ea7e2cc7a | Python | ArchieDash/RealPython | /time/timer_decorator.py | UTF-8 | 557 | 2.796875 | 3 | [] | no_license | import functools
import time
from reader import feed
def timer(func):
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
tic = time.perf_counter()
value = func(*args, **kwargs)
toc = time.perf_counter()
elapsed_time = toc - tic
print(f"Elapsed time: {elapsed_time:0.4f} seconds")
return value
return wrapper_timer
if __name__ == "__main__":
@timer
def latest_tutorial():
tutorial = feed.get_article(0)
print(tutorial)
latest_tutorial() | true |
f81a8fb022111520e98aab6e4babc09e9a7bbafb | Python | Pewgun/Flappy_birds_NEAT_algorithm | /gene.py | UTF-8 | 999 | 2.625 | 3 | [] | no_license | import random
from constants import *
class Gene():
def __init__(self, fromNode, toNode, inno):
self.fromNode = fromNode
self.toNode = toNode
if USE_GAUSS:
self.weight = random.gauss(0.0,WEIGHT_CHANGE_POWER)
self.clamp()
else:
self.weight = random.random()*abs(LOWER_WEIGHT_BOUND-UPPER_WEIGHT_BOUND)+LOWER_WEIGHT_BOUND
self.enabled = True
self.inno = inno
def clamp(self):
self.weight = min(max(LOWER_WEIGHT_BOUND, self.weight), UPPER_WEIGHT_BOUND)
def mutateWeight(self):
if random.random() < PROB_CHANGE_WEIGHT:
if USE_GAUSS:
self.weight = random.gauss(0.0,WEIGHT_CHANGE_POWER)
self.clamp()
else:
self.weight = random.random()*abs(LOWER_WEIGHT_BOUND-UPPER_WEIGHT_BOUND)+LOWER_WEIGHT_BOUND
else:
self.weight += random.gauss(0.0,WEIGHT_CHANGE_POWER)
self.clamp()
def clone(self):
g = Gene(self.fromNode, self.toNode, self.inno)
g.weight = self.weight
g.enabled = self.enabled
return g | true |
37ee2ccc42bdbbb544291b2587f49a00ea4d061d | Python | StetsenTech/rolodex-reader | /rolodex/utils/process.py | UTF-8 | 2,320 | 3.140625 | 3 | [] | no_license | """Module that adds methods to help with processing input"""
import re
import phonenumbers
# Regex validators for file input
VALID_ONE = re.compile((
r'(?P<last>[A-z]+),\s(?P<first>[A-z. ]+),\s'
r'(?P<phone>\([0-9]{3}\)-[0-9]{3}-[0-9]{4}),\s'
r'(?P<color>[A-z ]+),\s(?P<zip>[0-9]{5})'
))
VALID_TWO = re.compile((
r'(?P<first>[A-z. ]+)\s(?P<last>[A-z]+),\s'
r'(?P<color>[A-z ]+),\s(?P<zip>[0-9]{5}),\s'
r'(?P<phone>[0-9]{3}\s[0-9]{3}\s[0-9]{4})'
))
VALID_THREE = re.compile((
r'(?P<first>[A-z. ]+),\s(?P<last>[A-z]+),\s'
r'(?P<zip>[0-9]{5}),\s(?P<phone>[0-9]{3}\s[0-9]{3}\s[0-9]{4}),\s'
r'(?P<color>[A-z ]+)'
))
def process_entries(entries, p_format="{}-{}-{}"):
"""Checks if entry data is valid
Args:
entries(list): List of personal information
p_region(basestring): Region the phone is from
p_format(basestring): Output format for phone number
Returns:
list, list: List of entry dictories and a list of invalid indices
"""
valid_entries = [] # Tracks valid entries
errors = [] # Tracks invalid entry indices
for i, entry in enumerate(entries):
# Check to see if entry is valid
# If invalid, add index to list of invalid entries
if VALID_ONE.match(entry):
entry_match = VALID_ONE.match(entry)
elif VALID_TWO.match(entry):
entry_match = VALID_TWO.match(entry)
elif VALID_THREE.match(entry):
entry_match = VALID_THREE.match(entry)
else:
errors.append(i)
continue
# Convert phone number
# @ TODO: Handle in marshmallow schema
phone = _format_phone_number(entry_match.group("phone"), p_format)
# Check to see if entry is valid
entry_dict = {
"first_name": entry_match.group("first"),
"last_name": entry_match.group("last"),
"phone_number": phone,
"color": entry_match.group("color"),
"zipcode": entry_match.group("zip")
}
valid_entries.append(entry_dict)
return valid_entries, errors
def _format_phone_number(phone, p_format):
"""Converts phone number to desired format"""
phone_object = phonenumbers.parse(phone, "US")
return phonenumbers.format_number(phone_object, p_format)
| true |
9f340287b13eb00718b54aa618a01ff5c5fc29c8 | Python | salkinium/bachelor | /experiment_control/box.py | UTF-8 | 3,262 | 2.609375 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Niklas Hauser
# All rights reserved.
#
# The file is part of my bachelor thesis and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import logging
import os
from periodic_timer import PeriodicTimer
from temperature_control import TemperatureControl
from mote_control import MoteControl
class Box(object):
""" TemperatureBox
Allows access to the controller and node in the styrofoam box.
"""
def __init__(self, identifier, mote, temperature, log_path=''):
super(Box, self).__init__()
self.logger = logging.getLogger('Box.({})'.format(identifier))
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# file logging
fh = logging.FileHandler(os.path.join(log_path, 'box-{}.log'.format(identifier)))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.id = int(identifier)
self.mote_control = MoteControl(mote, os.path.join(log_path, 'box_raw-{}.log'.format(identifier)))
if temperature:
self.temperature_control = TemperatureControl(temperature, os.path.join(log_path, 'box_raw-{}.log'
.format(identifier)))
else:
self.temperature_control = None
self.environment_timer = PeriodicTimer(10, self._report_environment)
self.environment_timer.start()
@property
def air_temperature(self):
if self.temperature_control:
return self.temperature_control.temperature
else:
return self.mote_temperature
@property
def mote_temperature(self):
return self.mote_control.temperature
@property
def mote_humidity(self):
return self.mote_control.humidity
def temperature_target_reached(self):
if self.temperature_control:
return (self.temperature_control.target_reached(self.mote_temperature))
else:
return True
def set_air_temperature(self, value):
if not self.temperature_control:
return
self.logger.info("Setting air temperature to {}C".format(value))
self.temperature_control.temperature = value
def broadcast(self, msg):
self.mote_control.broadcast(msg)
def _report_environment(self):
if self.temperature_control:
power = self.temperature_control.power
else:
power = -1
self.logger.info("Environment: Tair={:.1f}C Tmote={:.1f}C Hmote={:.1f}% Tpower={}%"
.format(self.air_temperature, self.mote_temperature,
self.mote_humidity, power))
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return "Box( {} )".format(self.id)
| true |
578eef06fe6ac22f18a8acae7d0996b592138dad | Python | liupei-git/test1 | /fmz/python/[VNC] SVM Test (Copy).py | UTF-8 | 3,264 | 2.9375 | 3 | [] | no_license | import numpy as np
import pandas as pd
data = DataAPI.MktIdxdGet(indexID=u"",ticker=u"000300",tradeDate=u"",beginDate=u"20100101",endDate=u"20180501",exchangeCD=u"XSHE,XSHG",field=u"",pandas="1")
data.set_index('tradeDate', inplace = True)
# 获取HS300的每日价格数据
for i in range(1, 21, 1):
data['close-' + str(i) + 'd'] = data['closeIndex'].shift(i)
# 对于收盘价,在data中增加了20列数据,分别为1天前的收盘价至20天前的收盘价
hs_close = data[[x for x in data.columns if 'close' in x]].iloc[20:]
# 选取今天及1-20天前的收盘价,iloc[20:]剔除了无效数据
hs_close = hs_close.iloc[:, ::-1]
# 将新DataFrame的列按倒序排列
################################################################################################
from sklearn import svm
# 从sklearn库中导入svm算法
days = 1500
# 设定全局变量,分割训练集和测试集的数据,1500在上文数据中约占75%
clf_close = svm.SVR(kernel='linear')
# 使用svm下的SVR算法,'linear'表示线性核
f_close_train = hs_close[:days]
# 训练集features
l_close_train = hs_close['closeIndex'].shift(-1)[:days]
# 训练集labels,将收盘价shift(-1)表示预测的是下一天的收盘价
f_close_test = hs_close[days:]
# 测试集features
l_close_test = hs_close['closeIndex'].shift(-1)[days:]
# 训练集labels,将收盘价shift(-1)表示预测的是下一天的收盘价
clf_close.fit(f_close_train, l_close_train)
# 训练模型
######################################################################################
p_close_train = clf_close.predict(f_close_train)
# 将训练集features导入模型进行预测,生成预测的收盘价
df_close_train = pd.DataFrame(l_close_train)
# 新建一个DataFrame,内容为训练集labels,即下一天的收盘价
df_close_train.columns = ['next close']
# 列名重命名为'next close'
df_close_train['predicted next close'] = p_close_train
# 加入一列预测的收盘价数据
df_close_train['next open'] = data['openIndex'][20:20 + days].shift(-1)
# 加入一列下一天开盘价的数据,从data而非hs_close中获取,需要切片
trigger = 1.0
df_close_train['position'] = np.where(df_close_train['predicted next close'] > df_close_train['next open'] * trigger, 1, 0)
# 通过np.where函数判断,当预测的下一天收盘价 > 下一天开盘价相乘或相加一个trigger时,仓位设置为1,否则为0
df_close_train['PL'] = np.where(df_close_train['position'] == 1, (df_close_train['next close'] - df_close_train['next open']) / df_close_train['next open'], 0)
# 当仓位为1时,在下一天开盘时买入,收盘时卖出,记录下一天应获得的收益率,否则收益率为0
df_close_train['strategy'] = (df_close_train['PL'].shift(1) + 1).cumprod()
# 策略每日收益的累积收益率,其中shift(1)表示当日记录的是下一天才能获得的收益率,当日无法获得
df_close_train['return'] = (df_close_train['next close'].pct_change() + 1).cumprod()
# benchmark的累积收益率
df_close_train[['strategy', 'return']].dropna().plot()
# 画出策略与benchmark的累计收益率图
##########################################################################
def main():
Log(exchange.GetAccount())
| true |
84f076d84e4bcf437bd63d50982806f00724c756 | Python | lindo-zy/python-100 | /python-075.py | UTF-8 | 330 | 3.171875 | 3 | [] | no_license | '''
判断情人节
'''
import time
if __name__=='__main__':
date=time.strftime('%m-%d',time.localtime())
if date=='02-14':
print ('情人节是时候给你女朋友买支玫瑰花了!!')
else:
print ('这时候你不要忘记发个红包!!')
print ('哈哈,这是一个测试题!!') | true |
31bca7d313e1dafd00daa8f671d41d3f9c0f1c69 | Python | anshiquanshu66/CSKB-Population | /aser/relation.py | UTF-8 | 2,406 | 2.546875 | 3 | [
"MIT"
] | permissive | import hashlib
try:
import ujson as json
except:
import json
import pprint
from aser.base import JsonSerializedObject
relation_senses = [
'Precedence', 'Succession', 'Synchronous',
'Reason', 'Result',
'Condition', 'Contrast', 'Concession',
'Conjunction', 'Instantiation', 'Restatement',
'ChosenAlternative', 'Alternative', 'Exception',
'Co_Occurrence']
class Relation(JsonSerializedObject):
def __init__(self, hid=None, tid=None, relations=None):
self.hid = hid if hid else ""
self.tid = tid if tid else ""
self.rid = Relation.generate_rid(self.hid, self.tid)
self.relations = dict()
self.update(relations)
@staticmethod
def generate_rid(hid, tid):
key = hid + "$" + tid
return hashlib.sha1(key.encode('utf-8')).hexdigest()
def to_triples(self):
triples = []
for r in sorted(self.relations.keys()):
triples.extend([(self.hid, r, self.tid)] * int(self.relations[r]))
return triples
def update(self, x):
if x is not None:
if isinstance(x, dict):
for r, cnt in x.items():
if r not in self.relations:
self.relations[r] = cnt
else:
self.relations[r] += cnt
elif isinstance(x, (list, tuple)):
# cnt = 1.0/len(x) if len(x) > 0 else 0.0
cnt = 1.0
for r in x:
if r not in self.relations:
self.relations[r] = cnt
else:
self.relations[r] += cnt
elif isinstance(x, Relation):
if self.hid == x.hid and self.tid == x.tid:
for r, cnt in x.relations.items():
if r not in self.relations:
self.relations[r] = cnt
else:
self.relations[r] += cnt
def __str__(self):
repr_dict = {
"rid": self.rid,
"hid": self.hid,
"tid": self.tid,
"relations": self.relations.__str__()
}
return pprint.pformat(repr_dict)
def __repr__(self):
return "(%s, %s, %s)" % (self.hid, self.tid, self.relations) | true |
d5e98d1ac90fc24699c6017cc19b86a3738f954a | Python | rebeccahhh/Berea | /Data Structures/Python/hunterr-csc236-A12/hunterr-a12/war.py | UTF-8 | 4,648 | 3.609375 | 4 | [] | no_license | from Stack import Stack
import Queue
from Queue import Queue
import random
class War:
def __init__(self):
# possibly useful instance variables
self.myCurrent = None # my currently displayed card
self.otherCurrent = None # other currently displayed card
self.currentState = 0 # keeps track of the state of play
self.dealingPile = Stack() # queue or stack
self.myHand = Stack() # queue or stack
self.myStorage = Queue() # queue or stack
self.compHand = Stack() # queue or stack
self.compStorage = Queue() # queue or stack
self.lootPile = Queue() # queue or stack
#def War():??
# Constructor initializes all instance variables
def add_dealingPile(self):
suit=["A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K"]
for i in range(4):
for i in suit:
self.dealingPile.push(i)
random.shuffle(self.dealingPile.items)
print self.dealingPile.items
return self.dealingPile
def deal(self):
# deals out 25 cards from to each player's playing pile from shuffled dealers pile
count = 0
while count < 25:
x = self.dealingPile.pop()
self.myHand.push(x)
y = self.dealingPile.pop()
self.compHand.push(y)
count + 1
self.state = 1
# self.myPlayingPile.push(self, object)
def make_move(self):
# initiates a round of play and communicates play-by-play during the round
# returns true when the game is still in play
# returns false when the game is over
# Communicates an appropriate message about whether the user beat the computer
while self.state == 1:
while self.myHand.size() != 0 and self.compHand.size() != 0:
my = self.myHand.pop()
comp = self.compHand.pop()
self.compare_cards(my, comp)
if self.myHand.size() == 0:
self.move_my_storage()
elif self.compHand.size() == 0:
self.move_comp_storage()
else:
self.state = 0
if self.state == 0:
#compare total cards
print("You need to do something here Becca")
print()
def remove_my_card(self):
# Precondition: myPlayingPile is not empty
# If it is not empty, the function removes a card from myPlayingPile,
# returning the stored value
if self.myHand != None:
card = self.myHand.pop()
self.lootPile.enqueue(card)
#return self.lootPile()
else:
print("What in tarnation???")
def remove_other_card(self):
# Precondition: compHand is not empty
# If it is not empty, the function removes a card from compHand,
# returning the stored value
if self.compHand != None:
card = self.compHand.pop()
self.lootPile.enqueue(card)
#return self.lootPile()
else:
print("dag-flabbit")
def compare_cards(self, my, comp):
# compares myCurrent to otherCurrent and behaves appropriately
my = my
comp = comp
print str(my)
print str(comp)
if my > comp:
self.lootPile.enqueue(my)
self.lootPile.enqueue(comp)
self.move_my_loot()
elif my < comp:
self.lootPile.enqueue(my)
self.lootPile.enqueue(comp)
self.move_other_loot()
elif my == comp:
self.state = 1
else:
print ("HELP MEEEE")
def move_my_loot(self):
# moves everything from lootPile to myStorage
loot = self.lootPile.size()
for i in range(loot):
self.lootPile.dequeue()
self.myStorage.enqueue(i)
# does this work????
def move_other_loot(self):
# moves everything from lootPile to compStorage
loot = self.lootPile.size()
for i in range(loot):
self.lootPile.dequeue()
self.compStorage.enqueue(i)
def move_my_storage(self):
# moves everything from myStorage to myPlayingPile
stuff = self.myStorage.size()
for i in range(stuff):
self.myStorage.dequeue()
self.myHand.push(i)
def move_comp_storage(self):
# moves everything from compStorage to compHand
for i in self.compStorage():
self.compStorage.dequeue()
self.compHand.push(i) | true |
43be4215d61347376ab2678b71f6e11410b65a77 | Python | Polarisru/pyuavcan | /pyuavcan/transport/serial/_stream_parser.py | UTF-8 | 6,870 | 2.796875 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
import typing
from pyuavcan.transport import Timestamp
from ._frame import SerialFrame
class StreamParser:
"""
A stream parser is fed with bytes received from the channel.
The parser maintains internal parsing state machine; whenever the machine detects that a valid frame is received,
the callback is invoked.
When the state machine identifies that a received block of data cannot possibly
contain or be part of a valid frame, the raw bytes are delivered into the callback as-is for optional later
processing; such data is called "out-of-band" (OOB) data. An empty sequence of OOB bytes is never reported.
The OOB data reporting can be useful if the same serial port is used both for UAVCAN and as a text console.
The OOB bytes may or may not be altered by the COBS decoding logic.
"""
def __init__(
self,
callback: typing.Callable[[Timestamp, memoryview, typing.Optional[SerialFrame]], None],
max_payload_size_bytes: int,
):
"""
:param callback: Invoked when a new frame is parsed or when a block of data could not be recognized as a frame.
In the case of success, an instance of the frame class is passed in the last argument, otherwise it's None.
In either case, the raw buffer is supplied as the second argument for capture/diagnostics or OOB handling.
:param max_payload_size_bytes: Frames containing more than this many bytes of payload
(after escaping and not including the header, CRC, and delimiters) may be considered invalid.
This is to shield the parser against OOM errors when subjected to an invalid stream of bytes.
"""
if not (callable(callback) and max_payload_size_bytes > 0):
raise ValueError("Invalid parameters")
self._callback = callback
self._max_frame_size_bytes = (
SerialFrame.calc_cobs_size(
max_payload_size_bytes + SerialFrame.NUM_OVERHEAD_BYTES_EXCEPT_DELIMITERS_AND_ESCAPING
)
+ 2
)
self._buffer = bytearray() # Entire frame including all delimiters.
self._timestamp: typing.Optional[Timestamp] = None
def process_next_chunk(self, chunk: typing.Union[bytes, bytearray, memoryview], timestamp: Timestamp) -> None:
# TODO: PERFORMANCE WARNING: DECODE COBS ON THE FLY TO AVOID EXTRA COPYING
for b in chunk:
self._buffer.append(b)
if b == SerialFrame.FRAME_DELIMITER_BYTE:
self._finalize(known_invalid=self._outside_frame)
else:
if self._timestamp is None:
self._timestamp = timestamp # https://github.com/UAVCAN/pyuavcan/issues/112
if self._outside_frame or (len(self._buffer) > self._max_frame_size_bytes):
self._finalize(known_invalid=True)
@property
def _outside_frame(self) -> bool:
return self._timestamp is None
def _finalize(self, known_invalid: bool) -> None:
if not self._buffer or (len(self._buffer) == 1 and self._buffer[0] == SerialFrame.FRAME_DELIMITER_BYTE):
# Avoid noise in the OOB output during normal operation.
# TODO: this is a hack in place of the proper on-the-fly COBS parser.
return
buf = memoryview(self._buffer)
self._buffer = bytearray() # There are memoryview instances pointing to the old buffer!
ts = self._timestamp or Timestamp.now()
self._timestamp = None
parsed: typing.Optional[SerialFrame] = None
if (not known_invalid) and len(buf) <= self._max_frame_size_bytes:
parsed = SerialFrame.parse_from_cobs_image(buf)
self._callback(ts, buf, parsed)
def _unittest_stream_parser() -> None:
from pytest import raises
from pyuavcan.transport import Priority, MessageDataSpecifier
ts = Timestamp.now()
outputs: typing.List[typing.Tuple[Timestamp, memoryview, typing.Optional[SerialFrame]]] = []
with raises(ValueError):
sp = StreamParser(lambda *_: None, 0)
sp = StreamParser(lambda ts, buf, item: outputs.append((ts, buf, item)), 4)
print("sp._max_frame_size_bytes:", sp._max_frame_size_bytes) # pylint: disable=protected-access
def proc(
b: typing.Union[bytes, memoryview]
) -> typing.Sequence[typing.Tuple[Timestamp, memoryview, typing.Optional[SerialFrame]]]:
sp.process_next_chunk(b, ts)
out = outputs[:]
outputs.clear()
for i, (t, bb, f) in enumerate(out):
print(f"output {i + 1} of {len(out)}: ", t, bytes(bb), f)
return out
assert not outputs
((tsa, buf, a),) = proc(b"abcdef\x00")
assert ts.monotonic_ns <= tsa.monotonic_ns <= Timestamp.now().monotonic_ns
assert ts.system_ns <= tsa.system_ns <= Timestamp.now().system_ns
assert a is None
assert memoryview(b"abcdef\x00") == buf
assert [] == proc(b"")
# Valid frame.
f1 = SerialFrame(
priority=Priority.HIGH,
source_node_id=SerialFrame.FRAME_DELIMITER_BYTE,
destination_node_id=SerialFrame.FRAME_DELIMITER_BYTE,
data_specifier=MessageDataSpecifier(2345),
transfer_id=1234567890123456789,
index=1234567,
end_of_transfer=True,
payload=memoryview(b"ab\x9E\x8E"),
) # 4 bytes of payload.
((tsa, buf, a),) = proc(f1.compile_into(bytearray(100)))
assert tsa == ts
assert isinstance(a, SerialFrame)
assert SerialFrame.__eq__(f1, a)
assert buf[-1] == 0 # Frame delimiters are in place.
# Second valid frame is too long.
f2 = SerialFrame(
priority=Priority.HIGH,
source_node_id=SerialFrame.FRAME_DELIMITER_BYTE,
destination_node_id=SerialFrame.FRAME_DELIMITER_BYTE,
data_specifier=MessageDataSpecifier(2345),
transfer_id=1234567890123456789,
index=1234567,
end_of_transfer=True,
payload=memoryview(bytes(f1.compile_into(bytearray(1000))) * 2),
)
assert len(f2.payload) == 43 * 2 # Cobs escaping
((tsa, buf, a),) = proc(f2.compile_into(bytearray(1000)))
assert tsa == ts
assert a is None
assert buf[-1] == 0 # Frame delimiters are in place.
# Create new instance with much larger frame size limit; feed both frames but let the first one be incomplete.
sp = StreamParser(lambda ts, buf, item: outputs.append((ts, buf, item)), 10 ** 6)
assert [] == proc(f1.compile_into(bytearray(200))[:-2]) # First one is ended abruptly.
(tsa, _, a), (tsb, _, b), = proc(
f2.compile_into(bytearray(200))
) # Then the second frame begins.
assert tsa == ts
assert tsb == ts
assert a is None
assert isinstance(b, SerialFrame)
| true |
38f13f799d818001a209f6d1786fae47bae2b7f2 | Python | DiepHenry/underwriting_example | /backend/src/serve_model.py | UTF-8 | 606 | 2.65625 | 3 | [] | no_license | from fastapi import FastAPI
import uvicorn
from joblib import load
from pydantic import BaseModel
clf = load(r"d_tree.joblib")
def get_prediction(gender, age, bmi):
x = [[gender, age, bmi]]
y = clf.predict(x)[0]
prob = clf.predict_proba(x)[0].tolist()
return {"prediction": y}
app = FastAPI()
@app.get("/")
def read_root():
return {"message": "Welcome from the API"}
class ModelParams(BaseModel):
gender: int
age: int
bmi: float
@app.post("/predict")
def predict(params: ModelParams):
pred = get_prediction(params.gender, params.age, params.bmi)
return pred
| true |
16d3e29e458773c4b6ab663d322428a6efbdb95d | Python | stawary/LeetCode | /LeetCode/Reverse_Integer.py | UTF-8 | 2,208 | 4.375 | 4 | [] | no_license | '''
Given a 32-bit signed integer, reverse digits of an integer.
Example 1:
Input: 123
Output: 321
Example 2:
Input: -123
Output: -321
Example 3:
Input: 120
Output: 21
Note:
Assume we are dealing with an environment which could only hold integers within the 32-bit signed integer range.
For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
'''
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
result = 0
if x>0:
while x>0:
result = result*10 + x%10
x//=10
if result<=2147483648:
return result
else:
return 0
else:
x=-x
while x>0:
result = result*10 + x%10
x//=10
if result<=2147483648:
return -result
else:
return 0
#自己最开始的代码,其中result<=2147483648是要判断32位整数是否溢出,溢出返回0。python里整除为 // 。
#优化判断x是否大于0的部分
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
result = 0
s = (x > 0) - (x < 0) #得到正负值
m = x*s #m始终为正
while m > 0:
result = result*10 + m%10
m//=10
if result<=2**31:
return result*s
else:
return 0
#上面的方法整数计算的方法,另一种思路是用字符串处理,倒序排列
class Solution:
def reverse(self, x):
s = (x > 0) - (x < 0) #得到正负符号
r = int(str(x*s)[::-1]) #将整型转为字符型并倒序排列再转为整型,list[::-1]为倒序排列
return s*r * (r < 2**31) #正负号×数值×是否溢出
#此方法最为简练
#学到知识点:
#1、list[::-1],列表倒序排列
#2、s = (x > 0) - (x < 0),得到正负号
#3、int32位有符号整数范围为-2^31~2^31-1,无符号整数范围为0~2^32-1。正数用源码表示,负数用补码表示,而负数的补码是其反码+1 所以出现了是-2^31。
| true |
2baa86c46c30317911aed0de14e8161d74d7b29d | Python | octo-weilian/SargassumSensing | /Python/mlc.py | UTF-8 | 8,816 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import rasterio as rio
from rasterio.plot import reshape_as_raster, reshape_as_image
# In[408]:
class mlClassifier:
"""
A simple ML classifier class. No a priori probability (equal probability for each class).
Adapted from:https://gist.github.com/jgomezdans/8276704#file-ml_test-ipynb
Credits to J Gomez-Dans
"""
def __init__(self,train,label_column):
"""
Takes in a training dataset as panda dataframe, n_features*n_samples
(i.e. columns * rows), column name of the labels and image stack to predict (optional).
Pre computes log determinant of the training dataset.
"""
self.train_labels = train[label_column].unique()
self.label_column = label_column
self.train = train
self.mus,self.i_covs,self.det_covs = self.compute_density()
def compute_density(self):
mus = []
i_covs= []
det_covs = []
for label in self.train_labels:
train_subset = self.train[self.train[self.label_column]==label]
train_subset = train_subset.drop(columns=[self.label_column])
train_subset = np.transpose(train_subset.to_numpy())
n_samples = train_subset.shape[1]
n_features = train_subset.shape[0]
cov_m = np.cov(train_subset)
i_cov_m = np.linalg.inv(cov_m)
det_cov = np.linalg.det(cov_m)
mu = train_subset.mean(axis=1)
mus.append(mu)
i_covs.append(i_cov_m)
det_covs.append(det_cov)
return (mus,i_covs,det_covs)
def calc_prob(self,x_test,mu,i_cov_m):
"""
Method to compute the (class) conditional probability density
"""
s = x_test - mu
log_prob = -0.5*(np.dot(s,i_cov_m)*s).sum(1)
prob = np.exp(log_prob)
return prob
def calc_prob_gx(self,x_test,mu,i_cov_m,det_cov,threshold):
"""
Method to compute the gaussian discrimnant function with threshold
"""
s = x_test - mu
gx = -np.log(det_cov)-(np.dot(s,i_cov_m)*s).sum(1)
if threshold :
t = -threshold - np.log(det_cov)
gx = np.where(gx>t,gx,99)
return gx
else:
return gx
def img2predict(self,stack_img):
"""
Method to load an image as rasterio object
"""
with rio.open(stack_img) as src:
img = src.read()
profile = src.profile
return (img,profile)
def prob_rasters(self,stack_img,out_file):
"""
Method to compute the calculate the probability of a raster image.
"""
img,profile = self.img2predict(stack_img)
profile.update({'driver': 'GTiff','interleave': 'band','compress': 'lzw',
'width':img.shape[2],'height':img.shape[1],
'nodata': -999,'dtype': rio.float32,'count':len(self.train_labels)
})
reshaped_img = reshape_as_image(img)
raster_pred = reshaped_img.reshape(-1,len(img))
stack=[]
for i in range(len(self.train_labels)):
mu = self.mus[i]
i_cov_m = self.i_covs[i]
prob_img = self.calc_prob(raster_pred,mu,i_cov_m)
prob_reshaped_img = prob_img.reshape(reshaped_img[:,:,0].shape)
stack.append([prob_reshaped_img])
out_img = np.vstack(stack)
with rio.open(out_file,'w',**profile) as dst:
for i in range(len(out_img)):
dst.write_band(i+1,out_img[i].astype(rio.float32))
def classify_raster_gx(self,stack_img,out_file=None,threshold=None):
"""
Method to compute the discrimnant function, find the max likelihood and assign classes to a raster image.
Threshold is based on the N-degrees of freedom (N = number of predictors) and Chi-Square
"""
if type(stack_img)==str:
img,profile = self.img2predict(stack_img)
reshaped_img = reshape_as_image(img)
raster_pred = reshaped_img.reshape(-1,len(img))
profile.update({'driver': 'GTiff','interleave': 'band','compress': 'lzw',
'width':img.shape[2],'height':img.shape[1],
'nodata': 99,'dtype': rio.uint8,'count':1})
elif type(stack_img)==np.ndarray:
reshaped_img = reshape_as_image(stack_img)
raster_pred = reshaped_img.reshape(-1,len(stack_img))
if threshold is None:
stack=[]
for i in range(len(self.train_labels)):
mu = self.mus[i]
i_cov_m = self.i_covs[i]
det_cov = self.det_covs[i]
prob_img = self.calc_prob_gx(raster_pred,mu,i_cov_m,det_cov,threshold)
stack.append([prob_img])
vstack = np.vstack(stack)
class_stack = np.argmax(vstack,axis=0)+1
class_image = class_stack.reshape(reshaped_img[:,:,0].shape)
else:
stack=[]
for i in range(len(self.train_labels)):
mu = self.mus[i]
i_cov_m = self.i_covs[i]
det_cov = self.det_covs[i]
prob_img = self.calc_prob_gx(raster_pred,mu,i_cov_m,det_cov,threshold)
stack.append([prob_img])
vstack = np.vstack(stack)
mask= np.where(vstack==99,vstack,0)
mask = np.sum(mask,axis=0)
mask = np.where(mask==mask.max(),mask,0)
new_vstack = np.where(vstack!=99,vstack,0)
new_vstack = np.vstack((new_vstack,mask))
class_stack = np.argmax(new_vstack,axis=0)+1
class_image = class_stack.reshape(reshaped_img[:,:,0].shape)
if out_file is None:
return class_image
else:
with rio.open(out_file,'w',**profile) as dst:
dst.write(class_image.astype(rio.uint8),1)
def classify_testdata(self,x_test,label_column,threshold=None):
"""
Method for accuracy assessment.
Return oa,kappa,acc_df,con_mat
"""
test_df = x_test.copy()
all_columns = test_df.columns
predictors = [i for i in x_test.columns if i != label_column]
x_test = x_test[predictors]
x_test = x_test.to_numpy()
for i,label in enumerate(self.train_labels):
mu = self.mus[i]
i_cov_m = self.i_covs[i]
det_cov = self.det_covs[i]
prob_data = self.calc_prob_gx(x_test,mu,i_cov_m,det_cov,threshold)
test_df[label+"_gx"] = prob_data
labels_gx = np.setdiff1d(test_df.columns,all_columns)
if threshold is None:
test_df['MLC predicted'] = test_df[labels_gx].idxmax(axis=1).str.split('_').str[0]
else:
test_df['MLC predicted'] = test_df[labels_gx].where(test_df[labels_gx]!=99).idxmax(axis=1).str.split('_').str[0]
test_df = test_df.dropna()[[label_column,'MLC predicted']]
#create confusion matrix
con_mat = pd.crosstab(test_df[label_column],test_df['MLC predicted'])
con_mat.columns.name = 'MLC'
#compute user, producer and overall accuracies
row_sum = con_mat.sum(axis=1)
col_sum = con_mat.sum(axis=0)
omitted = np.setdiff1d(col_sum.index,row_sum.index)
col_sum = col_sum.drop(omitted)
ua = np.diag(con_mat)/row_sum
pa = np.diag(con_mat)/col_sum
f1 = (2 * pa*ua) /(pa+ua)
oa = sum(np.diag(con_mat))/len(test_df)
acc_df = round(pd.DataFrame({'Label':col_sum.index,'PA':pa.values,'UA':ua.values,'F1-score':f1.values}),2)
#compute kappa score (khat)
p = row_sum/ len(test_df)
q = col_sum/len(test_df)
exp_acc = sum(p*q)
kappa = (oa-exp_acc)/(1-exp_acc)
con_mat = con_mat.append(pd.Series(con_mat.sum(0),name='Observed'))
con_mat['Classified'] = pd.Series(con_mat.sum(1))
con_mat = con_mat.append(acc_df.set_index('Label')["PA"])
con_mat["UA"] = acc_df.set_index('Label')["UA"]
con_mat["F1-score"] = acc_df.set_index('Label')["F1-score"]
con_mat.iloc[-1,-2] = f'{round(oa,2)} (OA)'
con_mat.iloc[-1,-1] = f'{round(kappa,2)} (Kappa)'
con_mat = con_mat.fillna('-')
accuracy_table = con_mat[con_mat!=0.0].fillna('-')
return accuracy_table
| true |
93e7937ff7a229be4a7fa487583ac63fc927e635 | Python | shreya-singh-tech/yelp-scrapping | /yelp_bs_Final.py | UTF-8 | 1,549 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
from lxml import html
import csv
import requests
from time import sleep
import re
import argparse
import sys
import pandas as pd
import time as t
headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36'}
links_with_text = []
final_city_links =[]
info_scraped = {}
def parse_url(url) :
response=requests.get(url,headers=headers)
soup=BeautifulSoup(response.content,'lxml')
t.sleep(3)
#for a in soup.find_all('a', href=True):
#print (a['href'])
for a in soup.find_all('a', href=True, class_ = 'css-166la90'):
if a.text:
links_with_text.append(a['href'])
#for item in soup.select('[class*=container]'):
#try:
#print(item)
#except Exception as e:
#raise e
#print('')
def clean_urls(links_with_text):
for link in links_with_text:
if (link[0:5] =="/biz/"):
info_scraped['URL'] = "https://www.yelp.com"+link
final_city_links.append(info_scraped['URL'])
print(final_city_links)
df = pd.DataFrame({'URL':final_city_links})
#df.columns =['URL']
return(df)
if __name__=="__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('page_no')
args = argparser.parse_args()
page_no = args.page_no
yelp_url = "https://www.yelp.com/search?cflt=restaurants&find_loc=Chicago&start=%s"%(page_no)
scraped_data = parse_url(yelp_url)
final_links = clean_urls(links_with_text)
final_links.to_csv("url_yelp.csv")
| true |
07d3b541bb4eafa2e5e39ad13dab0b2c2ec3e982 | Python | flaxandteal/tablespy | /src/tablespy/inspector.py | UTF-8 | 6,037 | 2.75 | 3 | [
"MIT"
] | permissive | from .inspection import Inspection
import numpy as np
import skimage
import skimage.measure
from tabulator import Stream, exceptions
import pandas as p
import xlrd
MIN_COLS = 2
MAX_TITLE_COLS = 3
MIN_ROWS = 2
MAX_ROWS_TO_HEADINGS = 3
MAX_HEADING_GAP_TOLERANCE = 1
MIN_HEADING_CONTENT = 3
class Inspector:
def __init__(self, infile=None):
self.infile = infile
def region_iter(self, **kwargs):
for inspection, df, sheet in self.inspect_iter(return_df=True, **kwargs):
for region in inspection.regions.values():
region_df = df.iloc[
region['header-boundary'][0] + 1:region['upper-right'][0],
region['lower-left'][1]:region['upper-right'][1]
]
region_df.columns = df.iloc[
region['header-boundary'][0],
region['lower-left'][1]:region['header-boundary'][1]
]
yield region_df, region, sheet
def inspect_iter(self, return_df=False, **kwargs) -> Inspection:
i = 0
while True:
i += 1
try:
inspection = self.inspect(sheet=i, return_df=return_df, **kwargs)
except exceptions.SourceError:
break
yield inspection
if return_df:
inspection, df, sheet = inspection
if inspection.format not in ('ods', 'xls', 'xlsx'):
break
def inspect(self, sheet=None, return_df=False, **kwargs) -> Inspection:
# sheet == 1 should default, as it's what we expect for non-sheet formats
if sheet is not None and sheet != 1:
kwargs['sheet'] = sheet
else:
sheet = 1
with Stream(self.infile, **kwargs) as stream:
data = [row for row in stream]
fmt = stream.format
df = p.DataFrame(data)
inspection = self.inspect_df(df, fmt)
if return_df:
return inspection, df, sheet
else:
return inspection
def inspect_df(self, df, fmt='df'):
inspection = Inspection(infile=self.infile)
df = df.replace(r'^\s*$', np.nan, regex=True).reset_index()
df.columns = range(len(df.columns))
df.index = range(len(df.index))
ia, iz = df.first_valid_index(), df.last_valid_index()
dft = df.transpose()
ca, cz = dft.first_valid_index(), dft.last_valid_index()
df = df.loc[ia:iz, ca:cz]
image = df.notna().to_numpy(dtype=int)
regions = self.region_split(image, offset=(ia, ca))
embedded_regions = []
for region in regions:
for region2 in regions:
if region == region2:
continue
# Is top left of this region within the bounds of another region?
# If so, then nested tables are a problem beyond this routine, we assume
# that this is just a coincidentally isolated set of values in a bigger table
if region[0][0] >= region2[0][0] and region[0][0] <= region2[2][0] and \
region[0][1] >= region2[0][1] and region[0][1] <= region2[2][1]:
embedded_regions.append(region)
regions = set(regions) - set(embedded_regions)
region_dict = {}
for ix, region in enumerate(regions):
title = region[-1]
if title:
title_text = '\n'.join([str(c) for c in df.loc[title[0], title[1]:title[1] + title[2] - 1]])
else:
title_text = None
region_dict[ix + 1] = {
'lower-left': region[0],
'header-boundary': region[1],
'upper-right': region[2],
'title': {
'loc': title,
'text': title_text
}
}
inspection.regions = region_dict
inspection.shape = image.shape
inspection.format = fmt
return inspection
def region_split(self, image, offset=(0, 0)):
labelled_image, labels = skimage.measure.label(image, connectivity=1, return_num=True)
regions = []
for i in range(1, labels + 1):
region = np.nonzero(labelled_image == i)
lr, lc, ur, uc = min(region[0]), min(region[1]), max(region[0]), max(region[1])
width = uc - lc + 1
if width < MIN_COLS or ur - lr < MIN_ROWS or (width == MIN_COLS and ur - lr == MIN_ROWS):
continue
header_start = lr
header_end = lr
for i in range(ur - lr):
row = labelled_image[lr + i]
if np.count_nonzero(row) > width - MAX_HEADING_GAP_TOLERANCE:
header_end = lr + i
if i > MAX_ROWS_TO_HEADINGS:
header_start = lr + i
upper_regions = self.region_split(image[lr:lr+i, lc:uc], (offset[0] + lr, offset[1] + lc))
regions += upper_regions
break
if np.count_nonzero(row) < MIN_HEADING_CONTENT:
header_start = lr + i + 1
title = None
if lr < header_start:
in_a_row = 0
for i in range(lc, uc):
if labelled_image[lr, i]:
if not in_a_row:
title = (lr + offset[0], i + offset[1], 1)
if in_a_row > MAX_TITLE_COLS:
break
in_a_row += 1
elif in_a_row:
title = (lr + offset[0], i - in_a_row + offset[1], in_a_row)
break
regions.append((
(header_start + offset[0], lc + offset[1]),
(header_end + offset[0], uc + offset[1]),
(ur + offset[0], uc + offset[1]),
title
))
return regions
| true |
4585c42a2e6c46c5177480d8f39af8802687811d | Python | HemanthShetty/CS6200Project | /Stopping.py | UTF-8 | 895 | 2.859375 | 3 | [] | no_license | __author__ = 'Anupam'
COMMON_WORDS_FILE = "../data/common_words"
def StopList(unigramIndex,queryTerms):
stop_list = []
with open(COMMON_WORDS_FILE, "r") as ins:
for line in ins:
stop_list.append(line.rstrip())
newIndex = dict(unigramIndex)
newQuery = list(queryTerms)
for word in stop_list:
newQuery =remove_values_from_list(newQuery,word)
newIndex = remove_key(newIndex,word)
return (newIndex,newQuery)
def isStopWord(word):
stop_list = []
with open(COMMON_WORDS_FILE, "r") as ins:
for line in ins:
stop_list.append(line.rstrip())
if word in stop_list:
return True
return False
def remove_values_from_list(the_list, val):
return [value for value in the_list if value != val]
def remove_key(d, key):
r = dict(d)
if key in r.keys():
del r[key]
return r
| true |
a89c6dd24821f67dee7dee82a4337ef25047ccb6 | Python | DerHunger/telingo | /telingo/__init__.py | UTF-8 | 20,477 | 2.796875 | 3 | [
"MIT"
] | permissive | """
The telingo module contains functions to translate and solve temporal logic
programs.
Classes:
Solver -- Solver class.
Application -- Main application class.
Functions:
imain -- Function to run the incremetal solving loop.
smain -- Function to run the incremetal solving loop scheduled.
main -- Main function starting an extended clingo application.
"""
from . import transformers as _tf
from . import theory as _ty
from . import scheduler as _sd
import sys as _sys
import clingo as _clingo
import textwrap as _textwrap
from time import clock
class Solver:
"""
Solver object containing the logic to ground and solve scheduled lengths.
"""
def __init__(self, ctl, theory, restarts_per_solve, conflicts_per_restart, move_final, verbose):
"""
Initializes the solver.
Arguments:
ctl -- Control object holding the program.
theory -- telingo theory.
restarts_per_solve -- restarts per solve iteration.
conflicts_per_restart -- number of conflicts before restart.
move_final -- move final to current solving length, instead of maximum.
verbose -- verbosity level.
"""
self.__ctl = ctl
self.__length = 0
self.__last_length = 0
self.__verbose = verbose
self.__result = None
self.__theory = theory
self.__time0 = clock()
# set solving and restart policy
self.__ctl.configuration.solve.solve_limit = "umax,"+str(restarts_per_solve)
if int(conflicts_per_restart) != 0:
self.__ctl.configuration.solver[0].restarts = "F,"+str(conflicts_per_restart)
self.__move_final = move_final
def __verbose_start(self):
"""
Starts the verbose timer.
"""
self.__time0 = clock()
def __verbose_end(self, string):
"""
Ends the verbose timer and prints the time with a given string.
Arguments:
string -- Output prefix.
"""
_sys.stdout.write(string+" Time:\t {:.2f}s\n".format(clock()-self.__time0))
def solve(self, length, future_sigs, program_parts, on_model):
"""
Grounds and solves the scheduler length.
Arguments:
length -- length to ground and solve.
program_parts -- program parts to ground and solve.
on_model -- callback for intercepting models.
"""
if self.__verbose: _sys.stdout.write("Grounded Until:\t {}\n".format(self.__length))
# previous length < new length
if self.__length < length:
parts = []
for t in range(self.__length+1, length+1):
for root_name, part_name, rng in program_parts:
for i in rng:
if ((t - i >= 0 and root_name == "always") or
(t - i > 0 and root_name == "dynamic") or
(t - i == 0 and root_name == "initial")):
parts.append((part_name, [t - i, t]))
if length > 0:
if not self.__move_final:
self.__ctl.release_external(_clingo.Function("__final", [self.__length]))
self.__ctl.cleanup()
if self.__verbose:
_sys.stdout.write("Grounding...\t "+str(parts)+"\n")
self.__verbose_start()
self.__ctl.ground(parts)
if self.__verbose: self.__verbose_end("Grounding")
self.__theory.translate(length, self.__ctl)
if not self.__move_final:
self.__ctl.assign_external(_clingo.Function("__final", [length]), True)
self.__length = length
# blocking or unblocking actions
if length < self.__last_length:
if self.__verbose: _sys.stdout.write("Blocking actions...\n")
for t in range(length+1, self.__last_length+1):
self.__ctl.assign_external(_clingo.Function("skip", [t]), True)
elif self.__last_length < length:
if self.__verbose: _sys.stdout.write("Unblocking actions...\n")
for t in range(self.__last_length+1, length+1):
self.__ctl.assign_external(_clingo.Function("skip", [t]), False)
# solve
if self.__verbose: self.__verbose_start()
if self.__move_final:
if length > 0:
self.__ctl.assign_external(_clingo.Function("__final", [self.__last_length]), False)
self.__ctl.assign_external(_clingo.Function("__final", [length]), True)
assumptions = []
for name, arity, positive in future_sigs:
for atom in self.__ctl.symbolic_atoms.by_signature(name, arity, positive):
if atom.symbol.arguments[-1].number > length:
assumptions.append(-atom.literal)
self.__result = self.__ctl.solve(on_model=on_model, assumptions=assumptions)
if self.__verbose:
self.__verbose_end("Solving")
_sys.stdout.write(str(self.__result)+"\n\n")
self.__last_length = length
# return
return self.__result
def imain(prg, future_sigs, program_parts, on_model, imin = 0, imax = None, istop = "SAT"):
"""
Take a program object and runs the incremental main solving loop.
For each pair (name, arity) in future_sigs all atoms in the program base
with the time parameter referring to the future are set to false. For
example, given (p, 2) and atoms p(x,1) in step 0, the atom would p(x,1)
would be set to false via an assumption. In the following time steps, it
would not be set to False.
The list program_parts contains all program parts appearing in the program
in form of triples (root, name, range) where root is either "initial" (time
step 0), "always" (time steps >= 0), or "dynamic" (time steps > 0) and
range is a list of integers for which the part has to be grounded
backwards. Given range [0, 1] and root "always", at each iteration the
program part would be grounded at horizon and horizon-1. The latter only if
the horizon is greater than 0.
Arguments:
prg -- Control object holding the program.
future_sigs -- Signatures of predicates whose future incarnations have to
be set to False.
program_parts -- Program parts to ground.
imin -- Minimum number of iterations.
imax -- Maximum number of iterations.
istop -- When to stop.
"""
f = _ty.Theory()
step, ret = 0, None
while ((imax is None or step < imax) and
(step == 0 or step < imin or (
(istop == "SAT" and not ret.satisfiable) or
(istop == "UNSAT" and not ret.unsatisfiable) or
(istop == "UNKNOWN" and not ret.unknown)))):
parts = []
for root_name, part_name, rng in program_parts:
for i in rng:
if ((step - i >= 0 and root_name == "always") or
(step - i > 0 and root_name == "dynamic") or
(step - i == 0 and root_name == "initial")):
parts.append((part_name, [step - i, step]))
if step > 0:
prg.release_external(_clingo.Function("__final", [step-1]))
prg.cleanup()
prg.ground(parts)
f.translate(step, prg)
prg.assign_external(_clingo.Function("__final", [step]), True)
assumptions = []
for name, arity, positive in future_sigs:
for atom in prg.symbolic_atoms.by_signature(name, arity, positive):
if atom.symbol.arguments[-1].number > step:
assumptions.append(-atom.literal)
ret, step = prg.solve(on_model=lambda m: on_model(m, step), assumptions=assumptions), step+1
def smain(prg, future_sigs, program_parts, on_model, imin=0, imax=None, istop="SAT", scheduler_options=_sd.Scheduler_Config()):
"""
Take a program object and runs the incremental scheduled main solving loop.
For each pair (name, arity) in future_sigs all atoms in the program base
with the time parameter referring to the future are set to false. For
example, given (p, 2) and atoms p(x,1) in step 0, the atom would p(x,1)
would be set to false via an assumption. In the following time steps, it
would not be set to False.
The list program_parts contains all program parts appearing in the program
in form of triples (root, name, range) where root is either "initial" (time
step 0), "always" (time steps >= 0), or "dynamic" (time steps > 0) and
range is a list of integers for which the part has to be grounded
backwards. Given range [0, 1] and root "always", at each iteration the
program part would be grounded at horizon and horizon-1. The latter only if
the horizon is greater than 0.
Arguments:
prg -- Control object holding the program.
future_sigs -- Signatures of predicates whose future incarnations have to
be set to False.
program_parts -- Program parts to ground.
imin -- Minimum number of iterations.
imax -- Maximum number of iterations.
istop -- When to stop.
scheduler_options -- options of the schedule to use.
"""
theory = _ty.Theory()
step, ret = 0, None
# ground initial
parts = []
for root_name, part_name, rng in program_parts:
for i in rng:
if ((step - i >= 0 and root_name == "always") or
(step - i > 0 and root_name == "dynamic") or
(step - i == 0 and root_name == "initial")):
parts.append((part_name, [step - i, step]))
prg.ground(parts)
theory.translate(step, prg)
prg.assign_external(_clingo.Function("__final", [step]), True)
#solver
solver = Solver(prg, theory, scheduler_options.restarts_per_solve, scheduler_options.conflicts_per_restart, scheduler_options.move_final, scheduler_options.verbose)
#scheduler
scheduler = scheduler_options.build_scheduler()
# incremental scheduled main solving loop
max_length = 0
print_length = 0
length = 0
i = 1
while ((imax is None or step < imax) and
(step == 0 or step < imin or (
(istop == "SAT" and not ret.satisfiable) or
(istop == "UNSAT" and not ret.unsatisfiable) or
(istop == "UNKNOWN" and not ret.unknown)))):
if scheduler_options.verbose:
_sys.stdout.write("Iteration "+str(i)+"\n")
time0 = clock()
i += 1
# get current solve length from scheduler
length = scheduler.next(ret)
if length is None:
_sys.stdout.write("PLAN NOT FOUND\n")
break
# solve given length
if scheduler_options.move_final or length > print_length: print_length = length
ret, step = solver.solve(length, future_sigs, program_parts, on_model=lambda m: on_model(m, print_length)), step+1
if ret is not None and length > max_length: max_length = length
if ret is not None and ret.satisfiable and step >= imin: break
if scheduler_options.verbose: _sys.stdout.write("Iteration Time:\t {:.2f}s\n".format(clock()-time0)+"\n")
class Application:
"""
Application object as accepted by clingo.clingo_main().
Rewrites the incoming temporal logic programs into incremental ASP programs
and solves them.
"""
def __init__(self, name):
"""
Initializes the application setting the program name.
See clingo.clingo_main().
"""
self.program_name = name
self.version = "1.0.1"
self.__imin = 0
self.__imax = None
self.__istop = "SAT"
self.__horizon = 0
self.__scheduler_config = _sd.Scheduler_Config()
def __on_model(self, model, horizon):
"""
Prints the atoms in a model grouped by state.
Arguments:
model -- The model to print.
horizon -- The number of states.
"""
self.__horizon = horizon
def __parse_imin(self, value):
"""
Parse imin argument.
"""
self.__imin = int(value)
return self.__imin >= 0
def __parse_imax(self, value):
"""
Parse imax argument.
"""
if len(value) > 0:
self.__imax = int(value)
return self.__imax >= 0
else:
self.__imax = None
return True
def __parse_istop(self, value):
"""
Parse istop argument.
"""
self.__istop = value.upper()
return self.__istop in ["SAT", "UNSAT", "UNKNOWN"]
def __parse_scheduler_greater_equal(self, value, argument, minimum=0):
"""
Parse argument with value greater than a minimum.
"""
setattr(self.__scheduler_config, argument, int(value))
return getattr(self.__scheduler_config, argument) >= minimum
def __parse_scheduler_boolean(self, value, argument):
"""
Parse argument with boolean value.
"""
if value.upper() in ['TRUE', '1', 'T', 'Y', 'YES']:
setattr(self.__scheduler_config, argument, True)
return True
elif value.upper() in ['FALSE', '0', 'F', 'N', 'NO']:
setattr(self.__scheduler_config, argument, False)
return True
else:
return False
def __parse_scheduler(self, value):
"""
Parse propagate-unsat argument.
"""
# select scheduler
arg = value.split(",")
if arg[0] == 'A':
if int(arg[1]) >= 0 and int(arg[1]) <= 50:
self.__scheduler_config.A = int(arg[1])
else: return False
elif arg[0] == 'B':
if float(arg[1]) >= 0.1 and float(arg[1]) <= 0.9999:
self.__scheduler_config.B = float(arg[1])
else: return False
elif arg[0] == 'C':
if float(arg[1]) >= 1.0 and float(arg[1]) <= 2.0:
self.__scheduler_config.C = float(arg[1])
self.__scheduler_config.inc = 1
else: return False
else:
_sys.stdout.write("First scheduler parameter is wrong!\n")
return False
if len(arg) > 2 and (arg[0] == 'A' or arg[0] == 'B'):
if int(arg[2]) > 0:
self.__scheduler_config.inc = int(arg[2])
else:
return False
if len(arg) > 3 and arg[0] == 'B':
if int(arg[3]) >= 1:
self.__scheduler_config.processes = int(arg[3])
else:
return False
return True
def print_model(self, model, printer):
table = {}
for sym in model.symbols(shown=True):
if sym.type == _clingo.SymbolType.Function and len(sym.arguments) > 0:
table.setdefault(sym.arguments[-1].number, []).append(_clingo.Function(sym.name, sym.arguments[:-1], sym.positive))
for step in range(self.__horizon+1):
symbols = table.get(step, [])
_sys.stdout.write(" State {}:".format(step))
sig = None
for sym in sorted(symbols):
if not sym.name.startswith('__'):
if (sym.name, len(sym.arguments), sym.positive) != sig:
_sys.stdout.write("\n ")
sig = (sym.name, len(sym.arguments), sym.positive)
_sys.stdout.write(" {}".format(sym))
_sys.stdout.write("\n")
return True
def register_options(self, options):
"""
See clingo.clingo_main().
"""
group = "Telingo Options"
options.add(group, "imin", "Minimum number of solving steps [0]", self.__parse_imin, argument="<n>")
options.add(group, "imax", "Maximum number of solving steps []", self.__parse_imax, argument="<n>")
options.add(group, "istop", _textwrap.dedent("""\
Stop criterion [sat]
<arg>: {sat|unsat|unknown}"""), self.__parse_istop)
# Scheduler algorithms
group = "Scheduler Options"
options.add(group, "scheduler", _textwrap.dedent("""\
Configure scheduler settings
<sched>: <type {A,B,C}>,<n>[,<S {1..umax}>][,<M {1..umax}>]
A,<n> : Run algorithm A with parameter <n>{1..50}
B,<n> : Run algorithm B with parameter <n>{0.1..0.9999}
C,<n> : Run algorithm C with parameter <n>{1.0..2.0}
...,<S> : Increase horizon lengths 0, <S>, 2<S>, 3<S>, ... [5] (A and B only)
...,<M> : Maximum number of processes [20] (B only)""")
, self.__parse_scheduler, argument="<sched>")
# Scheduler options
options.add(group, "scheduler-start,F", "Starting horizon length [0]", lambda val: self.__parse_scheduler_greater_equal(val, "start"), argument="<n>")
options.add(group, "scheduler-end,T", "Ending horizon length [3000]", lambda val: self.__parse_scheduler_greater_equal(val, "limit"), argument="<n>")
options.add(group, "scheduler-verbose", "Set verbosity level to <n>", lambda val: self.__parse_scheduler_greater_equal(val, "verbose"), argument="<n>")
options.add(group, "conflicts-per-restart,i", "Short for -r F,<n> (see restarts)", lambda val: self.__parse_scheduler_greater_equal(val, "conflicts_per_restart"), argument="<n>")
options.add(group, "keep-after-unsat", "After finding n to be UNSAT, do keep runs with m<n [t]", lambda val: self.__parse_scheduler_boolean(val, "propagate_unsat"), argument="<b>")
# Solving options
options.add(group, "final-at-last", "Fix query always at the last (grounded) time point [t]", lambda val: self.__parse_scheduler_boolean(val, "move_final"), argument="<b>")
options.add(group, "forbid-actions", _textwrap.dedent("""Forbid actions at time points after current plan length,
using the predicate occurs/1 [f]""")
, lambda val: self.__parse_scheduler_boolean(val, "forbid_actions"), argument="<b>")
options.add(group, "force-actions", _textwrap.dedent("""Force at least one action at time points before current plan length,
using the predicate occurs/1 [f]""")
, lambda val: self.__parse_scheduler_boolean(val, "force_actions"), argument="<b>")
def main(self, prg, files):
"""
Implements the incremental solving loop.
This function implements the Application.main() function as required by
clingo.clingo_main().
"""
is_scheduler = self.__scheduler_config.single_scheduler()
with prg.builder() as b:
files = [open(f) for f in files]
if len(files) == 0:
files.append(_sys.stdin)
program = [f.read() for f in files]
# additional programs for scheduler
if is_scheduler:
externals_program = """
#program dynamic. #external skip(t).
"""
forbid_actions_program = """
#program dynamic.
:- occurs(A), skip(t). % no action
"""
force_actions_program = """
#program dynamic.
:- not occurs(_), not skip(t). % some action
"""
program.append(externals_program)
if getattr(self.__scheduler_config, "forbid_actions", False):
program.append(forbid_actions_program)
if getattr(self.__scheduler_config, "force_actions", False):
program.append(force_actions_program)
future_sigs, program_parts = _tf.transform(program, b.add)
if is_scheduler:
smain(prg, future_sigs, program_parts, self.__on_model, self.__imin, self.__imax, self.__istop, self.__scheduler_config)
else:
imain(prg, future_sigs, program_parts, self.__on_model, self.__imin, self.__imax, self.__istop)
def main():
"""
Run the telingo application.
"""
_sys.exit(int(_clingo.clingo_main(Application("telingo"), _sys.argv[1:])))
| true |
1ef2bd9a2aa8ce42d2a563b35fe54c1331229389 | Python | Rajeshkumark26/Algorithms_coding_practice | /palindromeString.py | UTF-8 | 233 | 3.46875 | 3 | [] | no_license | def isPalindrome(string):
return string==string[::-1]
def isPalindrome(string):
reversedString=''
for i in reversed(range(len(string))):
reversedString += string[i]
return reversedString == string | true |
0f9d7f7c9f31a0a40435f85f7fd80b3873a96930 | Python | olabiyi/16s_pathogen_analysis | /pathogenAnalysisFunctions.py | UTF-8 | 19,659 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 24 21:12:23 2018
@author: Biyi
In order to install and use modules like ecopy on Windows you'll need to
download and install visual studio 2015
1. From this link https://stackoverflow.com/questions/44290672/
how-to-download-visual-studio-community-edition-2015-not-2017
Downdload the web installer or iso for windows.
After this must have installed (it takes a few hours).
2. Then run the command
a. 'pip install msgpack' in the Anaconda prompt
b. 'pip install ecopy' in the Anaconda prompt
To get a work around version of scikit bio for Windows follow the instructions
below:
1. Install Windows Visual Studio 2015 Community Edition with C++ SDK enabled to
meet the requirements of the compiler for installing the package(Scikit-Bio)
from the link https://go.microsoft.com/fwlink/?LinkId=532606&clcid=0x409
(web installer)
2. Download the latest source from the Scikit-bio Github repository
https://github.com/biocore/scikit-bio/archive/master.zip.
3. Using a tool like 7-zip, unpack it into your python packages directory
(C:\Users\user\Anaconda3\pkgs).
3. Open a command line client (cmd) and navigate to the source directory
i.e. the uzipped folder.
cd C:\Users\user\Anaconda3\pkgs\scikit-bio-master
4. Using Notepad++ edit the setup.py file
"C:\Program Files (x86)\Notepad++\notepad++" setup.py
Find the line in which the variable ssw_extra_compile_args
is defined and change it. You can comment on the previous version and
redefine the variable in the new line:
#ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
ssw_extra_compile_args = []
5. Save the changes, close the editor and start the installation with
this command:
python setup.py install
I hope you do not receive error messages.
6. Exit from the directory that you insatlled skbio into
cd C:\Users\user\Documents
7. Open an Anaconda Python session (using the command python) and check if
Scikit-Bio was installed correctly using print(skbio.art).
test insatllation
import skbio
print(skbio.art)
"""
import numpy as np
import pandas as pd
import os
import ecopy as ep
import random
def pathogen_analysis(file):
"""
Function to perform pathogen analysis from files generated
after running the analysis in bash
"""
# Check if the file is empty
if os.stat(file).st_size > 0:
# Read in the file
sample = pd.read_csv(file,
header=None)
# Rename the column
sample.columns= ['strains']
# Get the frequency for each strain
sample['strains'].value_counts()
####### Get the stain names on the strain column #####
# Get sthe index of the starins
index = [x for x in range(len(sample.loc[:,'strains']))]
# Intialize an empty list that will contain the strain names
strain_names = []
# Loop through every row while splitting then appending the strain name
# to sb
for i in index:
strain_names.append((sample.loc[:,'strains'][i]).split())
# Create a dataframe from sb
split_names = pd.DataFrame(strain_names)
# Replace none with nan
split_names = split_names.replace(to_replace='None',
value=np.nan)
# Replace nan with an empty string
split_names = split_names.fillna(value='')
# Get the genera dataframe
genera_df = split_names.iloc[:,0]
# Get the species dataframe
species_df = split_names.iloc[:,0] + ' ' \
+ split_names.iloc[:,1]
# Get the strains dataframe
strains_df = split_names.iloc[:,0] + ' ' + \
split_names.iloc[:,1] + ' ' + split_names.iloc[:,2] + \
' ' + split_names.iloc[:,3] #+ ' ' + split_names.iloc[:,4]
taxon_df = [genera_df,species_df,strains_df]
taxon_names = ['Genus', 'Species', 'Strains']
taxon = ['genera','species', 'strains']
for j in range(len(taxon_df)):
#### Make a dataframe of the taxon and the frequency ###
taxon_freq= pd.DataFrame(taxon_df[j].value_counts())
# Create an empty dataframe that will contain the taxon
# and freqency information
taxon_names[j] = pd.DataFrame()
# Get the names of the taxon and set it to the taxon
# column here for the genera
taxon_names[j][taxon[j]] = taxon_freq.index.values
# Add a column that will contain the frequency of each genera
taxon_names[j]['freq'] = taxon_freq.iloc[:,0].values
# Create a dictionary that will contain
# the analysis for each taxon level
final = {taxon[0]: taxon_names[0],
taxon[1]: taxon_names[1],
taxon[2]: taxon_names[2]}
else:
# If the file is empty run this block
taxon = ['genera','species', 'strains']
taxon_names = ['Genus', 'Species', 'Strains']
taxon_names[0] = pd.DataFrame({'genera': 'non_detected','freq': 0},
index=[0],columns= ['genera','freq'])
taxon_names[1] = pd.DataFrame({'species': 'non_detected','freq': 0},
index=[0],columns= ['species','freq'])
taxon_names[2] = pd.DataFrame({'strains': 'non_detected','freq': 0},
index=[0],columns= ['strains','freq'])
final = {taxon[0]: taxon_names[0],
taxon[1]: taxon_names[1],
taxon[2]: taxon_names[2]}
return final
def combined_results(files):
"""
Function to combined pathogen analyses results
for all samples analysed
"""
# Intialize an empty list that will contain
# the sample names
samples = []
# Intialize a dictionary that will contain
result_together = {}
# Append the sample names to the samples list
for sample in range(len(files)):
#print(sample)
#get the sample names
samples.append(files[sample].split(sep='_')[1])
result_together[samples[sample]] = (pathogen_analysis(file=files[sample]))
return(result_together,samples)
def make_taxon_table(result_together, samples):
"""
Function to a pathogen taxon table
"""
##get a named list
##result = dict(zip(taxon,SB_100)) #continue from here
pathogens = pd.Series()
for sample in samples:
pathogens = pathogens.append(result_together[sample]['species']['species'])
# Get the unique genera
pathogens = pathogens.unique()
d = {'pathogens': pathogens}
taxon_table = pd.DataFrame(d)
# Remove the non detected pathogens
taxon_table = taxon_table[taxon_table['pathogens'] != 'non_detected']
# Create a dataframe with the sample names with values set at zero
zeros_dataframe = pd.DataFrame(data=0, index=np.arange(len(taxon_table.index)),\
columns= samples)
# Set the index of the zeros dataframe
zeros_dataframe.index = taxon_table.index
# Create a frame list
frame = [taxon_table,zeros_dataframe]
# Concatenate the dataframes along the columns
taxon_table = pd.concat(frame, axis=1)
# Set the index of the dataframe to the names of the pathogens
taxon_table = taxon_table.set_index('pathogens')
# Loop through every sample while getting the frequency for each pathogen
for sample in samples:
#print(sample)
# Get the detect/pathogens for each sample
detect = result_together[sample]['species']['species']
# Get the index in a list form
index = detect.index.tolist()
# Get all the frequencies for the dtected pathogens
frequency = result_together[sample]['species']['freq']
# Loop
for pathogen in taxon_table.index.tolist():
for i in index:
if (pathogen == detect[i]):
taxon_table.loc[pathogen,sample] = frequency[i]
return(taxon_table)
# Remember to modify function for any grouping vaiable besides treatment
def presence_abscence(taxon_table_t, independent_variables):
"""
Function to calculate the proportion of pathogens detected
in a pathogen table matrix
"""
# Find if the pathogens were detected in the samples
detect_table = taxon_table_t > 0
detect_frame = [independent_variables,detect_table]
detect_table2 = pd.concat(detect_frame,axis=1)
# Get the number of times each pathogen as detected for each treatment
detect = detect_table2.groupby(by=['Treatment']).sum()
number_of_samples_per_treat = detect_table2.groupby(by=['Treatment']).count().iloc[:,0]
frame2 = [number_of_samples_per_treat,detect]
detect_df= pd.concat(frame2,axis=1)
columns = detect_df.columns.tolist()
columns[0] = 'number of samples'
detect_df.columns = columns
return detect_df
def join_tables(table1,table2,how='inner'):
"""
Function for joining two pandas dataframes based on their indices
"""
table1_col_names = table1.columns
# Copy the dataframes to avoid inplace assignment
#to the original dataframe
table1_copy = table1.copy()
table2_copy = table2.copy()
table1_copy['samples'] = table1_copy.index
table2_copy['samples'] = table2_copy.index
joined_table = table1_copy.join(other=table2_copy,
on='samples',
how=how,
lsuffix='_i',
rsuffix='_t')
joined_table = joined_table.drop(columns=['samples','samples_i','samples_t'])
table1_copy = joined_table[table1_col_names]
table2_copy = joined_table.drop(columns=table1_col_names)
return(table1_copy,table2_copy,joined_table)
def remove_zero_samples(taxon_table_t):
"""
Function to remove samples that nothing was detected in them
to avoid errors with division by zero
"""
sample_sum = taxon_table_t.sum(axis=1)
# Get the boolean indices for samples that pathogens were not detected in
logic_index = sample_sum == 0
# Get the samples
zeros_samples =list(sample_sum[logic_index].index)
# Drop the samples with zero or that pathogens were not detcted in them
taxon_table_t = taxon_table_t.drop(index=zeros_samples)
return taxon_table_t
def estimate_diversity(taxon_table_t):
""" Function to estimate common diversity matrics from a taxon table
with samples as rows and observation/otus/pathogens as column
the output of this function is a dataframe of the diversity matrices per sample
"""
# Import the modules for diversity calculations and manipulating dataframes
# Get the sample names which are the indices of the transposed taxon table
table_samples = taxon_table_t.index.tolist()
# Get the number of observed otus / pathogens per sample
observed_pathogens = [] #initialize an empty list
# Calculate the observed species per sample
for sample in table_samples:
observed_pathogens.append(observed_otus(table= taxon_table_t,
sample_id= sample))
# Estimate diversity
shannon = ep.diversity(x = taxon_table_t, method = 'shannon')
simpson = ep.diversity(x = taxon_table_t, method = 'simpson')
species_richness = ep.diversity(x = taxon_table_t, method = 'spRich')
eveness = ep.diversity(x = taxon_table_t, method = 'even')
# Convert the estimates to Pandas series
shannon = pd.Series(data = shannon, index = table_samples)
simpson = pd.Series(data = simpson, index = table_samples)
observed_pathogens = pd.Series(data=observed_pathogens, index= table_samples)
species_richness = pd.Series(data = species_richness, index = table_samples)
eveness = pd.Series(data = eveness, index = table_samples)
diversity_dict = {'observed_species': observed_pathogens, 'species_richness': species_richness,
'eveness': eveness, 'shannon':shannon, 'simpson': simpson}
diversity_table = pd.DataFrame(data = diversity_dict)
return(diversity_table)
def stack_data(abund_table,ind_var_df, variable):
"""
Function to stack an abundance dataframe along with one independent variable
abund_table = a table normalized to relative abundances with samples on the rows and features on the columns
ind_var_df = a dataframe with the independent variables in the same order as the samples
"""
import pandas as pd
import numpy as np
# Ensure that ind_var_df is a dataframe
ind_var_df = pd.DataFrame(ind_var_df)
# Get the independent variable of choice
ind_var = ind_var_df[variable]
indvar_label = []
pathogens_label = []
relative_abundance = []
# Get a list of all the column names
colNames = list(abund_table)
# Loop through every column while creating a stack of the independent
#variable the column name (pathogens)
# and the values within these columns
count = 0
for column in range(0,len(colNames)):
indvar_label.append(ind_var)
pathogen = pd.Series((np.repeat(colNames[column],len(ind_var))),name='pathogens')
pathogens_label.append(pathogen)
relative_abundance.append(abund_table[colNames[column]])
count += 1
Ind_var = pd.concat(indvar_label)
pathogens = pd.concat(pathogens_label)
pathogens.index = Ind_var.index
Relative_abund = pd.concat(relative_abundance)
frame = [Ind_var,pathogens,Relative_abund]
stacked_table = pd.concat(frame,axis=1)
stacked_table.columns = [variable,'pathogens','relative_abundance']
return stacked_table
def remove_low_abundance_pathogens(abun_table,threshold = 0.05,group_low_abund=True):
"""
Function to remove or group the less abundant pathogens
"""
# Get the maximum value for each column/pathogens
max_abund = abun_table.max()
# Get the boolean vector for the less abundant pathogens
less_abund_column_logic = max_abund < threshold
# Get the less abundant columns/pathogens
less_abund_columns = max_abund[less_abund_column_logic]
# Sum the low abundance pathogens
rare = abun_table[list(less_abund_columns.index)].sum(axis=1)
# Drop the less abundant columns/pathogens
abund_pathogens_table = abun_table.drop(columns=list(less_abund_columns.index))
# Group the pathogens with low abundance
if group_low_abund:
# Create a frame for concatenation
frame = [abund_pathogens_table,rare]
# Concatenate the frame
abund_pathogens_table = pd.concat(frame,axis=1)
# Rename the last column, which is the column with the rare pathogens
abund_pathogens_table.columns.values[len(abund_pathogens_table.columns)-1] = 'Rare'
return abund_pathogens_table
def get_treatment_abund_table(independent_variable_df,taxon_table,group="Treatment"):
# Get the independent variable of interest - here i get the treatment variable
treatment_df = independent_variable_df[group].to_frame()
_,_,treatment_abun_table = join_tables(table1=treatment_df,
table2=taxon_table)
#get the sum of counts for each treatment
treatment_abun_table = treatment_abun_table.groupby([group]).sum()
#normalize to relative abundance per treatment in percentage
treatment_abun_table = treatment_abun_table.apply(func=lambda x: x/sum(x),axis=1) * 100
return treatment_abun_table
##################### these set of function are courtesy of Greg Carapaso in
#################### the applied bioinformatics tutorial
def observed_otus(table, sample_id):
"""
Function to get the number of observed otus in a sample
when samples are rows and features are columns
"""
return sum([e > 0 for e in table.loc[sample_id,:]])
def get_observed_nodes(tree, table, sample_id, verbose=False):
"""
Function to get observed nodes
"""
observed_otus = [obs_id for obs_id in table.index if table[sample_id][obs_id] > 0]
observed_nodes = set()
# Iterate over the observed OTUs
for otu in observed_otus:
t = tree.find(otu)
observed_nodes.add(t)
if verbose:
print(t.name, t.length, end=' ')
for internal_node in t.ancestors():
if internal_node.length is None:
# We've hit the root
if verbose:
print('')
else:
if verbose and internal_node not in observed_nodes:
print(internal_node.length, end=' ')
observed_nodes.add(internal_node)
return observed_nodes
def phylogenetic_diversity(tree, table, sample_id, verbose=False):
"""
Function to calculate phylogenetic_diversity
"""
observed_nodes = get_observed_nodes(tree, table, sample_id, verbose=verbose)
result = sum(o.length for o in observed_nodes)
return result
def bray_curtis_distance(table, sample1_id, sample2_id):
"""
Function to calculate bray-curtis distance
"""
numerator = 0
denominator = 0
sample1_counts = table[sample1_id]
sample2_counts = table[sample2_id]
for sample1_count, sample2_count in zip(sample1_counts, sample2_counts):
numerator += abs(sample1_count - sample2_count)
denominator += sample1_count + sample2_count
return numerator / denominator
def table_to_distances(table, pairwise_distance_fn):
"""
Function to make a distance matrix
"""
from skbio.stats.distance import DistanceMatrix
from numpy import zeros
sample_ids = table.columns
num_samples = len(sample_ids)
data = zeros((num_samples, num_samples))
for i, sample1_id in enumerate(sample_ids):
for j, sample2_id in enumerate(sample_ids[:i]):
data[i,j] = data[j,i] = pairwise_distance_fn(table, sample1_id, sample2_id)
return DistanceMatrix(data, sample_ids)
def unweighted_unifrac(tree, table, sample_id1, sample_id2, verbose=False):
"""
Function to calculate unweighed unifrac distance
"""
observed_nodes1 = get_observed_nodes(tree, table, sample_id1, verbose=verbose)
observed_nodes2 = get_observed_nodes(tree, table, sample_id2, verbose=verbose)
observed_branch_length = sum(o.length for o in observed_nodes1 | observed_nodes2)
shared_branch_length = sum(o.length for o in observed_nodes1 & observed_nodes2)
unique_branch_length = observed_branch_length - shared_branch_length
unweighted_unifrac = unique_branch_length / observed_branch_length
return unweighted_unifrac
##############################################################################
def generate_colors(n):
"""
Function to generate n visually distinct RGB colours
"""
rgb_values = []
hex_values = []
r = int(random.random() * 256)
g = int(random.random() * 256)
b = int(random.random() * 256)
step = 256 / n
for _ in range(n):
r += step
g += step
b += step
r = int(r) % 256
g = int(g) % 256
b = int(b) % 256
r_hex = hex(r)[2:]
g_hex = hex(g)[2:]
b_hex = hex(b)[2:]
hex_values.append('#' + r_hex + g_hex + b_hex)
rgb_values.append((r,g,b))
return rgb_values, hex_values
| true |
305c51fa004d11042cef0c23fb4b5812682469a0 | Python | timcnicholls/aoc_2019 | /day_6.py | UTF-8 | 3,369 | 3.484375 | 3 | [] | no_license | # AOC Day 6
import logging
import sys
class UniversalOrbitMap(object):
def __init__(self, orbit_map=None):
self.bodies = {}
if orbit_map:
self.parse_map(orbit_map)
def load_file(self, file_name):
with open(file_name, 'r') as f:
orbit_map = f.readlines()
self.parse_map(orbit_map)
def parse_map(self, orbit_map):
self.bodies = {}
for orbit in orbit_map:
(parent, body) = orbit.strip().split(')')
if body in parent:
logging.warning("Body {} already exists in map with parent {}".format(
body, self.bodies[body]
))
self.bodies[body] = parent
logging.debug("Parsed orbit map of length {} containing {} objects".format(
len(orbit_map), len(self.bodies)
))
def calculate_orbits(self):
total_orbits = 0
for body in self.bodies:
while body in self.bodies:
total_orbits += 1
body = self.bodies[body]
logging.debug("Total number of orbits : {}".format(total_orbits))
return total_orbits
def get_path(self, body):
path = []
while body in self.bodies:
path.append(body)
body = self.bodies[body]
return path
def calculate_transfer(self, body_1, body_2):
path_elems_1 = set(self.get_path(body_1))
path_elems_2 = set(self.get_path(body_2))
transfer = len(path_elems_1.symmetric_difference(path_elems_2)) - 2
return transfer
def self_test_part1(self):
logging.info("Running UOM self test part 1")
test_map = [
"COM)B",
"B)C",
"C)D",
"D)E",
"E)F",
"B)G",
"G)H",
"D)I",
"E)J",
"J)K",
"K)L",
]
test_num_orbits = 42
self.parse_map(test_map)
num_orbits = self.calculate_orbits()
assert test_num_orbits == num_orbits
def self_test_part2(self):
logging.info("Running UOM self test part 2")
test_map = [
"COM)B",
"B)C",
"C)D",
"D)E",
"E)F",
"B)G",
"G)H",
"D)I",
"E)J",
"J)K",
"K)L",
"K)YOU",
"I)SAN",
]
test_transfer = 4
self.parse_map(test_map)
num_orbits = self.calculate_orbits()
transfer = self.calculate_transfer("YOU", "SAN")
assert test_transfer == transfer
def main():
log_level = logging.INFO
try:
if int(sys.argv[1]):
log_level = logging.DEBUG
except (ValueError, IndexError):
pass
logging.basicConfig(
level=log_level, format='%(levelname)-8s: %(message)s', datefmt='%H:%M:%S'
)
uom = UniversalOrbitMap()
uom.self_test_part1()
uom.self_test_part2()
uom.load_file('input_6.txt')
total_orbits = uom.calculate_orbits()
logging.info("Part 1: total number of orbits: {}".format(total_orbits))
transfer_len = uom.calculate_transfer("YOU", "SAN")
logging.info("Part 2: orbital transfer length = {}".format(transfer_len))
if __name__ == '__main__':
main() | true |
8408c47dcd9151f1b258c98097cafd653b9aab2c | Python | DorotaOrzeszek/SPOJpl | /SPOJ-3456.py | UTF-8 | 716 | 3.515625 | 4 | [] | no_license | litery = map(chr, range(97,123)) + map(chr, range(65,91))
def kodowanie(litera): # litera -> str
if ord(litera) in range(97,110):
szyfr = chr(ord(litera)+13)
elif ord(litera) in range(110,123):
szyfr = chr(97+ord(litera)-110)
elif ord(litera) in range(65,78):
szyfr = chr(ord(litera)+13)
elif ord(litera) in range(78,91):
szyfr = chr(65+ord(litera)-78)
elif ord(litera) in range(48,53):
szyfr = chr(ord(litera)+5)
elif ord(litera) in range(53,58):
szyfr = chr(ord(litera)-5)
else:
szyfr = litera
return szyfr
while True:
try:
wejscie = raw_input()
except EOFError:
break
wyjscie = ''
for char in wejscie:
wyjscie += kodowanie(char)
print wyjscie
| true |
e5787143224985e0d5b59680040dbc883f4fd161 | Python | judangyee/ddd | /dino.py | UTF-8 | 900 | 3.53125 | 4 | [] | no_license | import random
sel = ['가위', '바위', '보']
result = {0: '승리했습니다.', 1: '패배했습니다.', 2: '비겼습니다.'}
def checkWin(user, com):
if not user in sel:
print('잘못입력하였습니다. 다시 입력하세요')
return False
print(f'사용자 ( {user} vs {com} ) 컴퓨터')
if user == com:
state = 2
elif user == '가위' and com == '바위':
state = 1
elif user == '바위' and com == '보':
state = 1
elif user == '보' and com == '가위':
state = 1
else:
state = 0
print(result[state])
return True
print('\n-------------------------------------------')
while True:
user = input("가위, 바위, 보 : ")
com = sel[random.randint(0, 2)]
if checkWin(user, com):
break
print('-------------------------------------------\n') | true |
d27234036f158cfe74dd50e65b2222467e8c39ec | Python | mybatete/Python | /Quick Sort/quick_sort.py | UTF-8 | 2,132 | 3.5625 | 4 | [] | no_license | MINSIZE=4
def main():
myList=[11,5,78,3,14,67,22,37,19,25,99]
#myList=[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
print "Before Sort: ",myList
left = 0
right = len(myList)-1
myList = quickSort(myList,left,right)
print "\n\nAfter Sort: ",myList
print "\n\n"
def quickInsertion(myList,first,last):
#print "\n"
current = first + 1
#last = len(myList) - 1
while current <= last :
value = myList[current]
index = current - 1
#print "Pass:", current, "\n"
while ( index >=first and value < myList[index]):
#print "Index: %d ... Comparing %d\t%d\n" % (index, value, myList[index])
myList[index + 1] = myList[index]
index = index -1
myList[index + 1] = value
#print myList, "\n"
current = current + 1
return myList
def medianLeft(myList,left,right):
mid = (left + right)/2
# Compare Left and Middle:
if myList[left] > myList[mid]:
tmp = myList[left]
myList[left] = myList[mid]
myList[mid] = tmp
# Compare Left and Right:
if myList[left] > myList[right]:
tmp = myList[left]
myList[left] = myList[right]
myList[right] = tmp
# Compare Middle and Right:
if myList[mid] > myList[right]:
tmp = myList[mid]
myList[mid] = myList[right]
myList[right] = tmp
# Swap Left and Median:
tmp = myList[left]
myList[left] = myList[mid]
myList[mid] = tmp
return
def quickSort(myList,left, right):
if (right - left) > MINSIZE:
medianLeft(myList,left,right)
pivot = myList[left]
sortLeft = left + 1
sortRight = right
while sortLeft <= sortRight:
while myList[sortLeft] < pivot :
sortLeft += 1
while myList[sortRight] >= pivot :
sortRight -= 1
if sortLeft <= sortRight :
#Swap:
tmp = myList[sortLeft]
myList[sortLeft] = myList[sortRight]
myList[sortRight] = tmp
myList[left] = myList[sortLeft -1]
myList[sortLeft -1 ] = pivot
if left < sortRight:
quickSort(myList, left, sortRight-1)
if sortLeft < right:
quickSort(myList, sortLeft, right)
else:
quickInsertion(myList,left, right)
return myList
main()
| true |
61cbb4d5f9159aa0e501e2a305c4e859763d4862 | Python | hasculdr/py_study_new | /examples/19_ssh_telnet/2_telnetlib.py | UTF-8 | 847 | 2.671875 | 3 | [] | no_license | import telnetlib
import time
import getpass
import sys
command = sys.argv[1].encode('ascii')
user = input('Username: ').encode('ascii')
password = getpass.getpass().encode('ascii')
enable_pass = getpass.getpass(prompt='Enter enable password: ').encode('ascii')
devices_ip = ['192.168.100.1', '192.168.100.2', '192.168.100.3']
for ip in devices_ip:
print('Connection to device {}'.format(ip))
with telnetlib.Telnet(ip) as t:
t.read_until(b'Username:')
t.write(user + b'\n')
t.read_until(b'Password:')
t.write(password + b'\n')
t.write(b'enable\n')
t.read_until(b'Password:')
t.write(enable_pass + b'\n')
t.write(b'terminal length 0\n')
t.write(command + b'\n')
time.sleep(1)
output = t.read_very_eager().decode('ascii')
print(output)
| true |
cd3bb5b646f0de60ef94ed1e4481bb0b58d4aaa3 | Python | fhansmann/coding-challenges | /c__41.py | UTF-8 | 88 | 3.203125 | 3 | [
"MIT"
] | permissive | tl = (1,2,3,4,5,6,7,8,9,10)
tl_one = tl[:5]
tl_two = tl[5:]
print(tl_one)
print(tl_two)
| true |
1134525c87af23ada2861ff535148381f4b26c70 | Python | hacors/fusion | /FingerPrint/code/data_preprocessing/smiles/smiles_to_tfrecord.py | UTF-8 | 3,051 | 3.140625 | 3 | [] | no_license | import argparse, os, time
import numpy as np
from code.data_preprocessing.smiles.smiles_parser import SMILESParser
from code.data_preprocessing.graph_dataset import write_graphs_to_tfrecord
def parse_input_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("smiles_file", help="File with one SMILES string per line.", type=str)
parser.add_argument("output_file", help="Output file where TFRecords representing the graphs will be stored.",
type=str)
parser.add_argument("config_file", help="JSON file with the configuration for the SMILES parser.", type=str)
parser.add_argument("--targets_file",
help="File with a set of comma-separated targets per line. Number of lines must "
"match the number of lines in smiles_file.", type=str)
args = parser.parse_args()
return args
def main():
# PARSE INPUT ARGUMENTS
args = parse_input_arguments()
# READ SMILES STRINGS
print 'Reading SMILES strings...'
tic = time.time()
smiles_array = []
with open(args.smiles_file, 'r') as f:
for line in f:
smiles_array.append(line.strip())
n_smiles = len(smiles_array)
toc = time.time()
print 'Read %d SMILES strings in %0.3f seconds.\n' % (n_smiles, toc - tic)
# READ TARGETS (IF ANY)
targets_array, n_targets = None, None
if args.targets_file is not None:
print 'Reading targets...'
tic = time.time()
targets_array = np.loadtxt(args.targets_file, delimiter=',', dtype=np.float32)
n_targets = len(targets_array)
toc = time.time()
print 'Read %d targets in %0.3f seconds.\n' % (n_targets, toc - tic)
# Verify that there is one set of targets for each input SMILES string
if targets_array is not None and n_smiles != n_targets:
raise ValueError("smiles_file must have the same number of lines as targets_file")
# CREATE GRAPH REPRESENTATION OF DATASET
print 'Parsing SMILES strings...'
tic = time.time()
# Create SMILESParser object
smiles_parser = SMILESParser(config=args.config_file)
# Create a list of graph objects
graphs = smiles_parser.parse_smiles(smiles_array=smiles_array, targets_array=targets_array)
n_graphs = len(graphs)
toc = time.time()
print 'Parsed %d SMILES strings in %0.3f seconds.' % (n_smiles, toc - tic)
print 'Parsing failed for %d/%d SMILES strings.\n' % (n_smiles - n_graphs, n_smiles)
# CREATE OUTPUT DIRECTORY (IF IT DOES NOT EXIST)
output_dir = os.path.split(args.output_file)[0]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# CONVERT GRAPH REPRESENTATION TO TFRECORDS AND WRITE TO DISK
print 'Writing graph data to TFRecords file...'
tic = time.time()
write_graphs_to_tfrecord(graphs, os.path.join(args.output_file))
toc = time.time()
print 'Wrote graph data to TFRecords file %s in %0.3f seconds.\n' % (args.output_file, toc - tic)
if __name__ == "__main__":
main()
| true |