blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
edb9c157e487b58dd50ef6ceb9a56ee5950ec77c | Python | manuelmhtr/algortithms-course | /karatsuba/solution.py | UTF-8 | 610 | 3.296875 | 3 | [] | no_license | import sys
import math
def multiply(m1, m2):
largest = max(len(str(m1)), len(str(m2)))
if largest <= 1:
return m1 * m2
mid = int(math.ceil(largest / 2))
n = mid * 2
m1str = str(m1).zfill(n)
m2str = str(m2).zfill(n)
a = int(m1str[:mid])
b = int(m1str[mid:])
c = int(m2str[:mid])
d = int(m2str[mid:])
s1 = multiply(a, c)
s2 = multiply(b, d)
s3 = multiply((a + b), (c + d))
s4 = s3 - (s2 + s1)
t1 = int(str(s1) + '0' * n)
t2 = s2
t3 = int(str(s4) + '0' * mid)
return t1 + t2 + t3
first = int(sys.argv[1])
second = int(sys.argv[2])
print(multiply(first, second))
| true |
fd9a553132c94e5dbbac5061262dcad454396467 | Python | jgold189/RandomImageADay | /randomImage.py | UTF-8 | 312 | 2.703125 | 3 | [] | no_license | from PIL import Image
import numpy as np
from datetime import datetime
#Width and height for the image
w, h = 512, 512
data = np.random.randint(256, size=(h, w, 3), dtype=np.uint8)
img = Image.fromarray(data, "RGB")
todayFile = "images/" + str(datetime.today().strftime("%m-%d-%Y")) + ".png"
img.save(todayFile) | true |
995a9c87a2806ef5597ac73c6d06161abe962eee | Python | zanoni/python_proway | /exercicios/marcello/array/ex_ar_3.py | UTF-8 | 552 | 4.0625 | 4 | [] | no_license | '''
n = [4, 7, 2, 3]
maior =0
for i in range(len(n)):
if n[i]> maior:
maior = n[i]
print(maior)
'''
#entrada
qtd_pos = int(input('Quantidade de posições: '))
q = []
num_maior = 0
#processamento
while len(q) < qtd_pos:
num = float(input('Número: '))
if num >= 0:
q.append(num)
else:
print('O número tem que ser positivo')
for i in range(len(q)):
if q[i] > num_maior:
num_maior = q[i]
#saida
print('O maior número é {} e está na posição {}' .format(num_maior, q.index(num_maior)))
| true |
2156c4c8c21b63ef52d982473d960e98d5be5ad1 | Python | PengchengAi/udp_ctl | /led_test.py | UTF-8 | 716 | 2.875 | 3 | [] | no_license | from gpiozero import LED
from time import sleep
led = [LED(17), LED(27), LED(22)]
en = LED(10) # on(): enable. off(): disable.
while True:
for i in range(2):
for j in range(2):
for k in range(2):
if i % 2:
led[2].on()
else:
led[2].off()
if j % 2:
led[1].on()
else:
led[1].off()
if k % 2:
led[0].on()
else:
led[0].off()
for _ in range(5):
en.on()
sleep(0.1)
en.off()
sleep(0.1)
| true |
09aa2b8a198bc841bcd4c0af44ad6dc46d920caa | Python | subho781/MCA-python-Assignment5 | /Q2.py | UTF-8 | 200 | 3.609375 | 4 | [] | no_license | '''build a dictionary with two keys, 'a'
and 'b', each having an associated
value of 0 (using two method)'''
D1={'a':0,'b':0}
print(D1)
L = [('a', 0),
('b', 0)]
D2 = dict(L)
print(D2) | true |
b6f21402f425e62f359fb875ce6d89059c3e300f | Python | RolfSievert/dotfiles | /home/.scripts/split-pdf.py | UTF-8 | 2,943 | 2.984375 | 3 | [] | no_license | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
import argparse
from PyPDF2 import PdfFileReader, PdfFileWriter
import pathlib
from copy import deepcopy
index_example = \
"""
Name of new pdf
2-202
Second pdf name
100-200
150-300
"""
parser = argparse.ArgumentParser(description=f'Set bookmarks to a pdf according to index file. Index entries start with page number followed by bookmark name, where children are defined with spaces.')
parser.add_argument('pdf_path', type=pathlib.Path, help='path to the pdf')
parser.add_argument('-r', required=True, type=pathlib.Path, help='path to file containing ranges')
args = parser.parse_args()
reader = PdfFileReader(args.pdf_path)
def addOutlines(writer, outlines, old_to_new_page_map, parent):
while len(outlines):
ol = outlines.pop(0)
new_parent = None
if len(outlines) and type(ol) is not list and type(outlines[0]) is list and ol.page in old_to_new_page_map.keys():
new_parent = writer.add_outline_item(title=ol.title, pagenum=old_to_new_page_map[ol.page], parent=parent)
ol = outlines.pop(0)
if type(ol) is list:
addOutlines(writer, ol, old_to_new_page_map, new_parent)
elif ol.page in old_to_new_page_map.keys():
writer.add_outline_item(title=ol.title, pagenum=old_to_new_page_map[ol.page], parent=parent)
def writePdf(writer, pdf_name, old_to_new_page_map):
addOutlines(writer, deepcopy(reader.outline), old_to_new_page_map, parent=None)
writer.page_mode = "/UseOutlines" # This is what tells the PDF to open to bookmarks
pdf_name += '.pdf'
print(f"Saving '{pdf_name}'...\n")
with open(pdf_name, "wb") as f:
writer.write(f)
with open(args.r, 'r') as index:
pdf_name = ''
writer = PdfFileWriter()
old_to_new_page_map = {}
page = 0
for line in index:
title = line.strip()
range_split = line.strip().split('-')
if len(range_split) == 2 and range_split[0].isdigit() and range_split[1].isdigit():
print(f"Adding range {range_split} to '{pdf_name}'...")
# add range to pdf
start = int(range_split[0]) - 1
end = int(range_split[1])
for page_num in range(start, end):
old_to_new_page_map[page_num] = page
page = page + 1
for page_num in range(start, end):
writer.add_page(reader.pages[page_num])
elif title != '':
# save previous pdf range
if not pdf_name.isspace() and pdf_name != '':
writePdf(writer, pdf_name, old_to_new_page_map)
# assign new pdf
writer = PdfFileWriter()
old_to_new_page_map = {}
page = 0
pdf_name = title
# save last pdf
if not pdf_name.isspace() and pdf_name != '':
writePdf(writer, pdf_name, old_to_new_page_map)
print("Done.")
| true |
09daf5c586e83c92068dc609309e92e97d335578 | Python | aiborra11/Geoquery-project | /source/modules/acquisition.py | UTF-8 | 970 | 2.84375 | 3 | [] | no_license | from pymongo import MongoClient
import pandas as pd
#Connecting the database with the queried data (companies_cb)
def mongo_connect(host):
client = MongoClient(host)
db = client.DBcompanies_cb
data = db.companies_cb
return data
# Query using Pymongo to receive all the required data for my analysis. (I know we should query everything at the same place (pymongo or mongodb compass), but was interested in trying both to learn.).
def mongo_query(data, min_employee=10, max_employee=51):
one_office = data.find({'$and': [
{'offices': {'$exists': True}},
{'offices': {'$ne': None}},
{'number_of_employees': {'$gte': min_employee}},
{'number_of_employees': {'$lte': max_employee}},
{'offices.latitude': {'$ne': None}},
{'offices.longitude': {'$ne': None}},
{'offices.latitude': {'$exists': True}},
{'offices.longitude': {'$exists': True}}
]})
return pd.DataFrame(one_office)
| true |
22064b13580015727a2c0bb82295521cbf4b8fff | Python | naiveHobo/PostOCR | /PostOCR/helpbox.py | UTF-8 | 2,410 | 2.515625 | 3 | [
"MIT"
] | permissive | import os
from tkinter import *
from PIL import Image, ImageTk
from .config import ROOT_PATH, BACKGROUND_COLOR
class HelpBox(Frame):
def __init__(self, master, **kw):
Frame.__init__(self, master, **kw)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=0)
self.rowconfigure(2, weight=0)
Label(self, text="Meet PostOCR!", anchor='nw', width=100,
font="OpenSans 22 bold", fg='white', bg=BACKGROUND_COLOR, bd=2).grid(row=0, column=0, padx=20, pady=20)
Label(self, text="Made with ❤ by naiveHobo", anchor='nw', width=100,
font="OpenSans 10 bold", fg='white', bg=BACKGROUND_COLOR, bd=2).grid(row=2, column=0, padx=20, pady=20)
text_frame = Frame(self, height=440, width=550, bg=BACKGROUND_COLOR, bd=2, relief=SUNKEN)
text_frame.grid(row=1, column=0)
text_frame.grid_propagate(False)
text_frame.grid_rowconfigure(0, weight=1)
text_frame.grid_columnconfigure(0, weight=1)
text_box = Text(text_frame, borderwidth=3, relief="sunken", bg=BACKGROUND_COLOR,
fg='white', font="OpenSans 12", wrap='word')
with open('./help.txt', 'r') as infile:
texts = infile.read()
texts = [text + '\n\n\n' for text in texts.split('\n\n\n')]
text_box.insert('1.0', texts[0])
texts = texts[1:]
paths = ['open_file.png', 'clear.png', 'search.png', 'extract.png', 'ocr.png', 'find.png', 'fix.png']
self.images = [ImageTk.PhotoImage(Image.open(os.path.join('./widgets', path))) for path in paths]
for text, image in zip(texts, self.images):
text_box.image_create(END, image=image)
text_box.insert(END, ' ' + text)
text_box.insert(END, texts[-1].split('\n\n')[0] + '\n')
self.images.append(ImageTk.PhotoImage(Image.open(os.path.join(ROOT_PATH, 'widgets/toolbar.png'))))
text_box.image_create(END, image=self.images[-1])
text_box.insert(END, '\n\n' + '\n\n'.join(texts[-1].split('\n\n')[1:]))
text_box.config(state=DISABLED)
text_box.grid(row=0, column=0, sticky="nsew", padx=2, pady=2)
scroll_bar = Scrollbar(text_frame, command=text_box.yview, bg=BACKGROUND_COLOR)
scroll_bar.grid(row=0, column=1, sticky='nsew')
text_box['yscrollcommand'] = scroll_bar.set
| true |
e4012b9a8fbb536f921d0d50a2d6ed49c1b38392 | Python | dipakdash/python | /python_socratica/10_lists.py | UTF-8 | 599 | 4.5 | 4 | [] | no_license | #!/usr/bin/python3
# Lists can contain duplicates, different data types
numbers = [1,3,5,7,9,17]
letters = ['a', 'b', 'c']
print(f'list numbers = {numbers}')
print(f'list letters = {letters}')
print(f'first element of list numbers is numbers[0] = {numbers[0]}')
print(f'last element of list numbers is numbers[-1] = {numbers[-1]}')
print(f'list numbers + letters = {numbers + letters}')
letters.reverse()
print(f'list letters reversed in place {letters}')
print(f'sliced list numbers, numbers[1:4] = {numbers[1:4]}')
print(f'sliced list numbers reversed, numbers[4:1:-1] = {numbers[4:1:-1]}')
| true |
830a951e037eab94ec4c5292426ab97ba35cf0e9 | Python | dcasasmol/goz | /goz/dbapi/models.py | UTF-8 | 30,818 | 2.53125 | 3 | [] | no_license | # dbapi/models.py
import datetime
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User as djangoUser
from .exceptions import UserNotSaved
from utils.views import generate_password, is_valid_password
class User(models.Model):
'''This class models the *Game of Zones* user model.
Note:
This class uses a Django User object to authenticate itself in Django
authentication system.
Attributes:
MALE (str): Male database token.
FEMALE (str): Female database token.
NOT_AVAILABLE: (str): Gender gender not available database token.
GENDER_CHOICES (tuple): Database gender choices.
id (int): User id.
user (djangoUser): Django User object.
username (str): Django User username (*Foursquare* user id).
password (str): Django User password.
first_name (str): User first name.
last_name (str): User last name.
gender (str): User gender, select one of `GENDER_CHOICES`,
default to `NOT_AVAILABLE`.
birth_date (date, optional): User birth date, default None.
photo (str): User photo slug.
city (str, optional): User city.
bio (str, optional): User biography.
email (str, optional): User email.
facebook (str, optional): User `Facebook` slug.
twitter (str, optional): User `Twitter` slug.
friends (list of User, optional): User friends (another User objects).
creation_date (datetime): User creation datetime.
last_update (datetime): User last update datetime.
'''
MALE = 'ma'
FEMALE = 'fe'
NOT_AVAILABLE = 'na'
GENDER_CHOICES = (
(NOT_AVAILABLE, 'TOKEN_NA'),
(MALE, 'TOKEN_MALE'),
(FEMALE, 'TOKEN_FEMALE'),
)
id = models.AutoField(primary_key=True)
user = models.OneToOneField(djangoUser, related_name='goz_user')
username = models.CharField(max_length=255, unique=True)
password = models.CharField(max_length=255)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
gender = models.CharField(max_length=2,
choices=GENDER_CHOICES,
default=NOT_AVAILABLE)
birth_date = models.DateField(blank=True, null=True, default=None)
photo = models.SlugField(max_length=255)
city = models.CharField(max_length=255, blank=True)
bio = models.TextField(blank=True)
email = models.EmailField(blank=True)
facebook = models.SlugField(max_length=255, blank=True)
twitter = models.SlugField(max_length=255, blank=True)
friends = models.ManyToManyField('self', blank=True, symmetrical=True)
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
def change_password(self, new_pw):
'''Changes the current password by a given new one.
Only changes the password if the user is saved (`user` attribute has a
Django User object instance) and `new_pw` is a valid password.
Note:
Uses `set_password` methods of Django User model.
Args:
new_pw (str): New password.
Raises:
UserNotSaved: If the user is not saved (`user` attribute has a Django
User object instance).
ValidationError: If `new_pw` is not a valid password.
'''
if self.user_id:
if is_valid_password(new_pw):
old_pw = self.password
try:
self.user.set_password(new_pw)
self.password = new_pw
self.save()
except Exception as e:
self.user.set_password(old_pw)
self.user.save()
else:
raise ValidationError
else:
raise UserNotSaved
def enable(self):
'''Sets the `is active` attribute of Django User model to True.
Only enables the user if is saved (`user` attribute has a Django User
object instance).
Note:
Uses `is_active` attribute of Django User model.
Raises:
UserNotSaved: If the user is not saved (`user` attribute has a Django
User object instance).
'''
if self.user_id and not self.is_active:
self.user.is_active = True
self.user.save()
elif not self.user_id:
raise UserNotSaved
def disable(self):
'''Sets the `is active` attribute of Django User model to False.
Only disables the user if is saved (`user` attribute has a Django User
object instance).
Note:
Uses `is_active` attribute of Django User model.
Raises:
UserNotSaved: If the user is not saved (`user` attribute has a Django
User object instance).
'''
if self.user_id and self.is_active:
self.user.is_active = False
self.user.save()
elif not self.user_id:
raise UserNotSaved
@property
def full_name(self):
'''Gets the full name of the user.
Returns:
str: The first_name plus the last_name, separated by a space.
'''
return ' '.join([self.first_name, self.last_name])
@property
def is_active(self):
'''Checks if the user is active.
Note:
Uses `is_active` attribute of Django User model.
Returns:
bool: True if successful, False otherwise.
'''
return self.user.is_active if self.user_id else False
@property
def is_logged(self):
'''Checks if the user is logged in the system.
Note:
Uses `is_authenticated` method of Django User model.
Returns:
bool: True if successful, False otherwise.
'''
return self.user.is_authenticated() if self.user_id else False
@property
def is_female(self):
'''Checks if the user is female.
Returns:
bool: True if successful, False otherwise.
'''
return self.gender == self.FEMALE
def is_friend(self, user):
'''Checks if a given User is friend or not.
Args:
user (User): User object to check.
Returns:
bool: True if successful, False otherwise.
'''
return user in self.friends
@property
def is_male(self):
'''Checks if the user is male.
Returns:
bool: True if successful, False otherwise.
'''
return self.gender == self.MALE
@property
def last_login(self):
'''Gets the datetime of the user's last login.
Note:
Uses `last_login` attribute of Django User model.
Returns:
datetime: User's last login.
'''
return self.user.last_login
@property
def num_badges(self):
'''Gets the badges number of the user.
Returns:
int: Badges number.
'''
return self.badges.filter(user=self).count()
@property
def num_checkins(self):
'''Gets the checkins number of the user.
Returns:
int: Checkins number.
'''
return self.checkins.filter(user=self).count()
@property
def num_friends(self):
'''Gets the friends number of the user.
Returns:
int: Friends number.
'''
return self.friends.count()
@property
def num_kingdoms(self):
'''Gets the kingdoms number of the user.
Returns:
int: Kingdoms number.
'''
return self.kingdoms.count()
@property
def num_purchases(self):
'''Gets the purchases number of the user.
Returns:
int: Purchases number.
'''
return self.purchases.filter(user=self).count()
def _process_gender(self, raw_gender):
'''Processes a raw gender to store it in databse.
Args:
raw_gender (str): Raw gender.
Returns:
str: Processed gender.
'''
if raw_gender.lower() in ['male', 'masculine', 'man']:
gender = self.MALE
elif raw_gender.lower() in ['female', 'feminine', 'woman']:
gender = self.FEMALE
else:
gender = self.NOT_AVAILABLE
return gender
def reset_password(self):
'''Resets the current password by a random one.
Only resets the password if the user is saved (`user` attribute has a
Django User object instance).
Note:
Uses `set_password` methods of Django User model.
Raises:
UserNotSaved: If the user is not saved (`user` attribute has a Django
User object instance).
'''
if self.user_id:
old_pw = self.password
try:
random_pw = generate_password(size=8)
self.user.set_password(random_pw)
self.password = random_pw
self.save()
except Exception as e:
self.user.set_password(old_pw)
self.user.save()
else:
raise UserNotSaved
def save(self, *args, **kwargs):
'''Saves the User object creating/updating the Django User model.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
'''
if not self.user_id:
# If User not has Django user object, creates a new one.
self.password = generate_password(size=8)
hashed_password = make_password(self.password)
self.user = djangoUser.objects.create(username=self.username,
password=hashed_password)
# Updates Django user attributes.
self.user.first_name = self.first_name
self.user.last_name = self.last_name
self.user.email = self.email
self.user.save()
self.gender = self._process_gender(self.gender)
# Finally, saves User object.
super(User, self).save(*args, **kwargs)
def __str__(self):
'''Displays a human-readable representation of the User object.
Returns:
str: Human-readable representation of the User object.
'''
return '[%s] %s %s' % (self.username,
self.first_name,
self.last_name)
@property
def total_score(self):
'''Gets the total score of the user.
Returns:
int: Total score.
'''
return self.scores.filter(user=self).aggregate(sum=Sum('points'))['sum']
class Categorie(models.Model):
'''This class models a *Foursquare* categorie.
Attributes:
id (int): Categorie id.
name (str): Categorie name.
icon (str): Categorie icon slug.
creation_date (datetime): Categorie creation datetime.
last_update (datetime): Categorie last update datetime.
active (bool): If the Categorie is active or not, default True.
'''
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
icon = models.SlugField(max_length=255)
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
def enable(self):
'''Sets the `active` attribute of the Categorie to True.
'''
if not self.is_active:
self.active = True
self.save()
def disable(self):
'''Sets the `active` attribute of the Categorie to False.
'''
if self.is_active:
self.active = False
self.save()
@property
def is_active(self):
'''Checks if the Categorie is active.
Returns:
bool: True if active, False otherwise.
'''
return self.active
@property
def num_venues(self):
'''Gets the venues number of the Categorie.
Returns:
int: Venues number.
'''
return self.venues.count()
def __str__(self):
'''Displays a human-readable representation of the Categorie object.
Returns:
str: Human-readable representation of the Categorie object.
'''
return self.name
class Zone(models.Model):
'''This class models a *Game of Zones* zone.
A zone is a part of a town/city where you can make checkins to get points.
The user with the highest score in a zone is called king.
Attributes:
id (int): Zone id.
name (str): Zone name.
king (User): Zone king.
stroke_colour (str): Zone border colour, default '#008800'.
stroke_weight (str): Zone border weight, default '4',
stroke_opacity (str): Zone border opacity, default '1',
fill_colour (str): Zone fill colour, default '#008800',
fill_opacity (str): Zone fill opacity, default '0.2',
points (str): A list of points which mark off the zone.
scores (list of Score): Scores in the Zone for many users.
creation_date (datetime): Zone creation datetime.
last_update (datetime): Zone last update datetime.
active (bool): If the Zone is active or not, default True.
'''
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
king = models.ForeignKey(User,
related_name='kingdoms',
related_query_name='kingdom')
stroke_colour = models.CharField(max_length=7, default='#008800')
stroke_weight = models.CharField(max_length=1, default='4')
stroke_opacity = models.CharField(max_length=1, default='1')
fill_colour = models.CharField(max_length=7, default='#008800')
fill_opacity = models.CharField(max_length=3, default='0.2')
points = models.TextField(blank=True)
scores = models.ManyToManyField(User, through='Score')
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
'''Zone model metadata.
Attributes:
ordering (list of str): Fields to order by in queries.
'''
ordering = ['id']
def enable(self):
'''Sets the `active` attribute of the Zone to True.
'''
if not self.is_active:
self.active = True
self.save()
def disable(self):
'''Sets the `active` attribute of the Zone to False.
'''
if self.is_active:
self.active = False
self.save()
@property
def is_active(self):
'''Checks if the Zone is active.
Returns:
bool: True if active, False otherwise.
'''
return self.active
@property
def num_venues(self):
'''Gets the venues number of the Zone.
Returns:
int: Venues number.
'''
return self.venues.count()
def __str__(self):
'''Displays a human-readable representation of the Zone object.
Returns:
str: Human-readable representation of the Zone object.
'''
return self.name
@property
def total_score(self):
'''Gets the total score of the Zone.
Returns:
int: Total score.
'''
return self.scores.filter(zone=self).aggregate(sum=Sum('points'))['sum']
class Venue(models.Model):
'''This class models a *Foursquare* venue.
Attributes:
id (int): Venue id.
name (str): Venue name.
lat (str): Venue latitude.
lng (str): Venue longitude.
foursquare_url (str): Venue *Foursquare* url slug.
categorie (Categorie): Venue categorie.
zone (Zone): Venue zone.
checkins (list of Checkin): Checkins in the Zone for many users.
creation_date (datetime): Venue creation datetime.
last_update (datetime): Venue last update datetime.
active (bool): If the Venue is active or not, default True.
'''
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
lat = models.CharField(max_length=255)
lng = models.CharField(max_length=255)
foursquare_url = models.SlugField()
categorie = models.ForeignKey(Categorie,
related_name='venues',
related_query_name='venue')
zone = models.ForeignKey(Zone,
related_name='venues',
related_query_name='venue')
checkins = models.ManyToManyField(User, through='Checkin')
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
def enable(self):
'''Sets the `active` attribute of the Venue to True.
'''
if not self.is_active:
self.active = True
self.save()
def disable(self):
'''Sets the `active` attribute of the Venue to False.
'''
if self.is_active:
self.active = False
self.save()
@property
def is_active(self):
'''Checks if the Venue is active.
Returns:
bool: True if active, False otherwise.
'''
return self.active
@property
def num_checkins(self):
'''Gets the checkins number of the Venue.
Returns:
int: Checkins number.
'''
return self.checkins.filter(venue=self).count()
def __str__(self):
'''Displays a human-readable representation of the Venue object.
Returns:
str: Human-readable representation of the Venue object.
'''
return self.name
class Item(models.Model):
'''This class models a *Game of Zones* item.
An item is an object that can be bought spending points and can be used to
attack other users zones or to defend yours.
Attributes:
id (int): Item id.
name (str): Item name.
description (str): Item description.
attack (int): Item attack value, default 0.
defense (int): Item defense value, default 0.
speed (int): Item speed value, default 0.
reach (int): Item reach value, default 0.
price (int): Item price, default 0.
duration (int): Item duration, default 0.
icon (str): Item icon slug.
purchasers (list of Purchase): Purchases of the Item for many users.
creation_date (datetime): Item creation datetime.
last_update (datetime): Item last update datetime.
active (bool): If the Item is active or not, default True.
'''
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
description = models.CharField(max_length=255)
attack = models.SmallIntegerField(default=0)
defense = models.SmallIntegerField(default=0)
speed = models.SmallIntegerField(default=0)
reach = models.PositiveSmallIntegerField(default=0)
price = models.PositiveSmallIntegerField(default=0)
duration = models.PositiveSmallIntegerField(default=0)
icon = models.SlugField(max_length=255)
purchasers = models.ManyToManyField(User, through='Purchase')
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
def enable(self):
'''Sets the `active` attribute of the Item to True.
'''
if not self.is_active:
self.active = True
self.save()
def disable(self):
'''Sets the `active` attribute of the Item to False.
'''
if self.is_active:
self.active = False
self.save()
@property
def is_active(self):
'''Checks if the Item is active.
Returns:
bool: True if active, False otherwise.
'''
return self.active
@property
def num_purchases(self):
'''Gets the purchases number of the Item.
Returns:
int: Purchases number.
'''
return self.purchasers.filter(item=self).count()
def __str__(self):
'''Displays a human-readable representation of the Item object.
Returns:
str: Human-readable representation of the Item object.
'''
return self.name
class Badge(models.Model):
'''This class models a *Game of Zones* badge.
A badge is a reward that an user earns after completing different objectives.
Attributes:
id (int): Badge id.
name (str): Badge name.
description (str): Badge description.
unlock_message (str): Badge unlock message.
level (int): Badge level, default 0.
icon (str): Badge icon slug.
purchasers (list of Unlocking): Unlockings of the Badge for many users.
creation_date (datetime): Badge creation datetime.
last_update (datetime): Badge last update datetime.
active (bool): If the Badge is active or not, default True.
'''
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
description = models.CharField(max_length=255)
unlock_message = models.CharField(max_length=255)
level = models.PositiveSmallIntegerField(default=0)
icon = models.SlugField(max_length=255)
unlockings = models.ManyToManyField(User, through='Unlocking')
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
'''Badge model metadata.
Attributes:
ordering (list of str): Fields to order by in queries.
'''
ordering = ['level']
def enable(self):
'''Sets the `active` attribute of the Badge to True.
'''
if not self.is_active:
self.active = True
self.save()
def disable(self):
'''Sets the `active` attribute of the Badge to False.
'''
if self.is_active:
self.active = False
self.save()
@property
def is_active(self):
'''Checks if the Badge is active.
Returns:
bool: True if active, False otherwise.
'''
return self.active
@property
def num_unlockings(self):
'''Gets the unlockings number of the Badge.
Returns:
int: Unlockings number.
'''
return self.unlockings.filter(badge=self).count()
def __str__(self):
'''Displays a human-readable representation of the Badge object.
Returns:
str: Human-readable representation of the Badge object.
'''
return self.name
class Score(models.Model):
'''This class is an intermediate model between User and Zone.
Store the points earned by an user in a zone.
Attributes:
id (int): Score id.
user (User): User who earns points.
zone (Zone): Zone where the User earns points.
points (int): Score earned, default 0.
creation_date (datetime): Score creation datetime.
last_update (datetime): Score last update datetime.
'''
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User,
related_name='scores',
related_query_name='score')
zone = models.ForeignKey(Zone)
points = models.PositiveIntegerField(default=0)
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
class Meta:
'''Score model metadata.
Attributes:
unique_together (tuple): Tuple of fields which must be unique.
ordering (list of str): Fields to order by in queries.
'''
unique_together = ('user', 'zone')
ordering = ['user', '-points']
@property
def is_king(self):
'''Check if the related user is the king of the related zone.
Returns:
bool: True if is the king, False otherwise.
'''
return self.user is self.zone.king
def __str__(self):
'''Displays a human-readable representation of the Score object.
Returns:
str: Human-readable representation of the Score object.
'''
return '%s:%s [%s]' % (self.user, self.zone, self.points)
class Checkin(models.Model):
'''This class is an intermediate model between User and Venue.
Store the number of checkins made by an user in a zone.
Attributes:
id (int): Checkin id.
user (User): User who made checkins.
venue (Venue): Venue where the checkin is made.
number (int): Checkins number made, default 0.
process (bool): If the Checkin has been processed or not, default True.
creation_date (datetime): Checkin creation datetime.
last_update (datetime): Checkin last update datetime.
'''
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User,
related_name='checkins',
related_query_name='checkin')
venue = models.ForeignKey(Venue)
number = models.PositiveIntegerField(default=0)
process = models.BooleanField(default=True)
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
class Meta:
'''Checkin model metadata.
Attributes:
unique_together (tuple): Tuple of fields which must be unique.
ordering (list of str): Fields to order by in queries.
'''
unique_together = ('user', 'venue')
ordering = ['user', '-number']
@property
def is_processed(self):
'''Checks if the Checkin has been processed or not.
Returns:
bool: True if has been processed, False otherwise.
'''
return self.process
def __str__(self):
'''Displays a human-readable representation of the Checkin object.
Returns:
str: Human-readable representation of the Checkin object.
'''
return '%s:%s [%s]' % (self.user, self.venue, self.number)
class Purchase(models.Model):
'''This class is an intermediate model between User and Item.
Store the number of items bought by an user.
Attributes:
id (int): Purchase id.
user (User): User who purchase.
item (Item): Item purchased.
number (int): Number of items purchased, default 0.
creation_date (datetime): Purchase creation datetime.
last_update (datetime): Purchase last update datetime.
'''
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User,
related_name='purchases',
related_query_name='purchase')
item = models.ForeignKey(Item)
number = models.PositiveIntegerField(default=0)
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
class Meta:
'''Purchase model metadata.
Attributes:
unique_together (tuple): Tuple of fields which must be unique.
ordering (list of str): Fields to order by in queries.
'''
unique_together = ('user', 'item')
ordering = ['user', '-number']
@property
def is_expired(self):
'''Checks if the Purchase has expired or not.
Returns:
bool: True if has expired, False otherwise.
'''
today = datetime.datetime.today()
delta = datetime.timedelta(days=self.item.duration)
return (self.creation_date + delta) < today if self.creation_date else None
def __str__(self):
'''Displays a human-readable representation of the Purchase object.
Returns:
str: Human-readable representation of the Purchase object.
'''
return '%s:%s [%s]' % (self.user, self.item, self.number)
class Unlocking(models.Model):
'''This class is an intermediate model between User and Badge.
Store the number of badges unlocked by an user.
Attributes:
id (int): Unlocking id.
user (User): User who unlock.
badge (Badge): Badge unlocked.
creation_date (datetime): Unlocking creation datetime.
last_update (datetime): Unlocking last update datetime.
'''
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User,
related_name='badges',
related_query_name='badge')
badge = models.ForeignKey(Badge)
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
class Meta:
'''Unlocking model metadata.
Attributes:
unique_together (tuple): Tuple of fields which must be unique.
ordering (list of str): Fields to order by in queries.
'''
unique_together = ('user', 'badge')
ordering = ['user', '-creation_date']
def __str__(self):
'''Displays a human-readable representation of the Unlocking object.
Returns:
str: Human-readable representation of the Unlocking object.
'''
return '%s:%s' % (self.user, self.item)
class Event(models.Model):
'''This class models a *Game of Zones* event.
An event is an offer for a certain time that can affect venues, zones, items,
categories or users.
Attributes:
ON_EVENT (str): On event status database token.
OFF_EVENT (str): Off event status database token.
STATUS_CHOICES (tuple): Event status choices.
id (int): Event id.
name (str): Event name.
description (str): Event description.
start_date (date): Event start d
end_date (date): Event end date.
status (str): Event status, select one of `STATUS_CHOICES`.
venue (Venue, optional): Venue affected by the Event.
zone (Zone, optional): Zone affected by the Event.
item (Item, optional): Item affected by the Event.
categorie (Categorie, optional): Categorie affected by the Event.
user (User, optional): User affected by the Event.
creation_date (datetime): Event creation datetime.
last_update (datetime): Event last update datetime.
active (bool): If the Event is active or not, default True.
'''
ON_EVENT = 'on'
OFF_EVENT = 'off'
STATUS_CHOICES = (
(ON_EVENT, 'TOKEN_ON'),
(OFF_EVENT, 'TOKEN_OFF'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
description = models.CharField(max_length=255)
start_date = models.DateField()
end_date = models.DateField()
status = models.CharField(max_length=3,
choices=STATUS_CHOICES,
default=OFF_EVENT)
venue = models.ForeignKey(Venue, blank=True,
related_name='events',
related_query_name='event')
zone = models.ForeignKey(Zone, blank=True,
related_name='events',
related_query_name='event')
item = models.ForeignKey(Item, blank=True,
related_name='events',
related_query_name='event')
categorie = models.ForeignKey(Categorie, blank=True,
related_name='events',
related_query_name='event')
user = models.ForeignKey(User, blank=True,
related_name='events',
related_query_name='event')
creation_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
'''Event model metadata.
Attributes:
ordering (list of str): Fields to order by in queries.
'''
ordering = ['-status', 'end_date']
@property
def is_active(self):
'''Checks if the Event is active.
Returns:
bool: True if active, False otherwise.
'''
return self.active
@property
def is_expired(self):
'''Checks if the Event has expired or not.
Returns:
bool: True if has expired, False otherwise.
'''
is_expired = None
if self.end_date:
is_expired = self.end_date <= datetime.datetime.today().date()
return is_expired
@property
def is_started(self):
'''Checks if the Event has started or not.
Returns:
bool: True if has started, False otherwise.
'''
is_started = None
if self.start_date:
is_started = self.start_date <= datetime.datetime.today().date()
return is_started
@property
def is_live(self):
'''Checks if the Event is live or not.
Returns:
bool: True if is live, False otherwise.
'''
return self.is_started and not self.is_expired
def __str__(self):
'''Displays a human-readable representation of the Event object.
Returns:
str: Human-readable representation of the Event object.
'''
return self.name
| true |
6beb56d9675836d5662230b9c70d84289061b9cb | Python | lochwg/AI-Big-data-Training-class | /cv2 - 模糊處理.py | UTF-8 | 684 | 3.015625 | 3 | [] | no_license | import cv2
from matplotlib import pyplot as plt
img_bgr = cv2.imread('Nikola_Tesla.jpg')
img_gray = cv2.imread('Nikola_Tesla.jpg', cv2.IMREAD_GRAYSCALE)
# 數字越大 越模糊
img_gauss = cv2.GaussianBlur(img_bgr, (3, 3), 0)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
plt.figure()
# =============================================================================
# 2,2,1 代表切分為表格 2X2 第1格位置
# =============================================================================
plt.subplot(2,2,1)
plt.axis('off')
plt.imshow(img_gray, cmap='gray')
plt.subplot(2,2,2)
plt.imshow(img_rgb)
plt.subplot(2,2,3)
plt.imshow(img_gauss)
plt.show() | true |
4e90210ffb48e493b3e8d468ac1e12f4ea3b35e3 | Python | afeldman1/SSW555Team4Project | /gedcom/tests/test_family.py | UTF-8 | 912 | 3.03125 | 3 | [] | no_license | """
SSW 555
2016 Spring
Team 4
"""
import unittest
from gedcom.family import Family
class GEDEntitiesTest(unittest.TestCase):
def test_Family(self):
entity = Family(uid = '@F01@',
husband = 'Mr.',
wife = 'Mrs.',
marriage_date = '20 OCT 1983',
divorce_date = None)
entity.add_child('Kid1')
entity.add_child('Kid2')
self.assertEqual(entity.uid, '@F01@')
self.assertEqual(entity.husband, 'Mr.')
self.assertEqual(entity.wife, 'Mrs.')
self.assertTrue('Kid1' in entity.children)
self.assertTrue('Kid2' in entity.children)
self.assertFalse('Kid3' in entity.children)
self.assertEqual(entity.marriage_date, '20 OCT 1983')
self.assertTrue(entity.divorce_date is None)
if __name__ == '__main__':
unittest.main()
| true |
f679f0e035f999d7c36c932c3ccad985ab01b7b1 | Python | liq07lzucn/Scripts-RayStation-4.7.2 | /assigner_types_poi.py | UTF-8 | 1,380 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Ce script tente d'auto-assigner le bon type de POI pour chacun des POI
présentement définis pour le patient.
.. rubric::
EMPLACEMENT :
- *Patient Modeling*
- *Scripting*
- *Module specific scripts*
Le script se fie au nom des POI. Il ignore la casse.
Ainsi, *Iso scan* et *ISO SCAN* sont équivalents.
Le script assigne le type de *Localization Point* au point qui contient
la chaîne de caractères *SCAN*. Si plusieurs points avec cette chaîne de
caractères sont trouvés, une erreur est lancée.
Le script assigne le type *Isocenter* au POI qui a été déterminé comme étant
le plus probable d'être l'isocentre, toujours selon son nom (sauf dans le cas
où l'isocentre est déjà le point de localisation). Il ne lance **PAS** d'erreur
si plusieurs points avec la chaîne de caractère *ISO* sont trouvés.
Tous les autres POI sont assignés au type *Marker* par défaut.
.. rubric::
PRÉ-REQUIS :
- Nom du point de référence au CT-Sim contient la chaîne de caractères *SCAN*. Ex. : *ISO SCAN*, *REF SCAN*.
- Nom du point isocentre contenant la chaîne de caractère *ISO*. Ex. : *ISO*, *ISO SCAN*.
.. seealso::
fonction :py:func:`hmrlib.poi.auto_assign_poi_types`
"""
import setpath
import hmrlib.lib as lib
import hmrlib.poi as poi
with lib.RSScriptWrapper(__file__):
poi.auto_assign_poi_types()
| true |
a714b395c18cf4bd60c011d28a8ad67a9094638b | Python | vishalydv23/HackerRankSolution | /cracking the coding interview/Data_Structure_Stacks_Balanced_Brackets.py | UTF-8 | 668 | 3.65625 | 4 | [] | no_license | def is_matched(expression):
lefty = "({[" # opening delimiters
righty = ")}]" # respective closing delims
S = []
for c in expression:
if c in lefty:
S.append(c) # push left delimiter on stack
elif c in righty:
if not S:
return False # nothing to match with
if righty.index(c) != lefty.index(S.pop( )):
return False # mismatched
if not S:
return True
else:
return False
t = int(input().strip())
for a0 in range(t):
expression = input().strip()
if is_matched(expression) == True:
print("YES")
else:
print("NO")
| true |
a6b50e96397e20589fa54d2fc7c7eb5ad944fb1a | Python | rembrandtqeinstein/learningPy | /c_ex_12_5.py | UTF-8 | 919 | 3.109375 | 3 | [] | no_license | # To run this, download the BeautifulSoup zip file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
tags = soup('span')
count = list()
#countt = 0
#total = 0
for tag in tags:
for v in tag:
try:
v = int(v)
except:
continue
count.append(v)
#countt = countt + 1
#total = total + v
tot = sum(count)
leng = len(count)
print("Sum:",tot, "Amount of N°:", leng, "Average:",tot/leng)
#print(leng)
#print(tot/leng)
#print(countt)
#print(total)
| true |
35b781450c873e448b79a617bf4f4e82c4e75071 | Python | yogeshjadhav22/HackerRank_Code | /Beautiful_Days_at_the_Movies.py | UTF-8 | 1,094 | 3.234375 | 3 | [] | no_license | #https://www.hackerrank.com/challenges/beautiful-days-at-the-movies/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the beautifulDays function below.
def beautifulDays(i, j, k):
d=i
arr=[]
for i in range(i,j+1):
arr.append(i)
print(arr)
for p in range(0,len(arr)):
p1=str(arr[p])
p4=p1[::-1]
print("ch",p1)
#p3=0
#p1=arr[p]%10
#p2=p1*10
#p3=arr[p]/10
#p4=p2+p3
arr[p]=int(p4)
#print(arr)
count=0
p=0
for i in range(d,j+1):
p1=i-arr[p]
p=p+1
if(p1%k==0):
count+=1
return(count)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ijk = input().split()
i = int(ijk[0])
j = int(ijk[1])
k = int(ijk[2])
result = beautifulDays(i, j, k)
fptr.write(str(result) + '\n')
fptr.close()
| true |
84e9a825af29b1b7cf02b9bfd57c5055cc7bfee7 | Python | jesslattif/Hackbright-Curriculum | /04 Test List Operations/ex34.py | UTF-8 | 87 | 3.015625 | 3 | [] | no_license | animal = ["bear", "tiger", "penguin", "zebra", "quail", "goldfish"]
print animal[1:-1] | true |
2516b5287474505839fe0d65c333f5eec0524fda | Python | asifhossain2k20/Python-Basic-Codes | /13_String_slincing.py | UTF-8 | 160 | 3.109375 | 3 | [] | no_license | a='joyful'
print("a[1:4] ",a[1:4])
print("a[:4] ",a[:5])
print("a[3:] ",a[3:])
print("a[0:6:2]",a[0:6:2])
print("a[::-1]",a[::-1])
print("a[-1:-5]",a[-1:-5]) | true |
c79e730cc9e5bac1520c4d32dfb2bcb61d78b94c | Python | radtek/MultiverseClientServer | /Media/common/Interface/FrameXML/MarsTarget.py | UTF-8 | 911 | 2.65625 | 3 | [
"MIT"
] | permissive | import MarsUnit
def TargetFrame_OnLoad(frame):
frame.RegisterEvent("PROPERTY_health")
frame.RegisterEvent("PLAYER_TARGET_CHANGED")
def TargetFrame_Update(frame):
if MarsUnit.UnitExists("target"):
frame.Show()
UnitFrame_Update(frame)
TargetFrame_CheckDead()
else:
frame.Hide()
def TargetFrame_OnEvent(frame, event):
UnitFrame_OnEvent(frame, event)
if event.eventType == "PROPERTY_health":
TargetFrame_CheckDead()
elif event.eventType == "PLAYER_TARGET_CHANGED":
TargetFrame_Update(frame)
def TargetFrame_CheckDead():
if MarsUnit.UnitIsDead("target"):
TargetDeadText.Show()
else:
TargetDeadText.Hide()
TargetFrame.Hide()
PlayerName.SetJustifyH("CENTER")
TargetName.SetJustifyH("CENTER")
PlayerFrameHealthBar.Show()
PlayerFrameManaBar.Show()
TargetFrameHealthBar.Show()
TargetFrameManaBar.Show()
| true |
a215ac2e4162cca5298d186a53866095471d180b | Python | lswzw/python | /huaban-img.py | UTF-8 | 1,909 | 2.703125 | 3 | [] | no_license | import requests
import os
import re
from selenium import webdriver
from time import sleep
from lxml import etree
from multiprocessing.dummy import Pool
def get_date(num):
option = webdriver.FirefoxOptions()
option.add_argument('--headless')
browser = webdriver.Firefox(options=option)
#browser = webdriver.Firefox()
browser.get('https://huaban.com/boards/'+str(c)+'/')
if num == 0:
date = browser.page_source
browser.quit()
return(date)
else:
for i in range(num):
browser.execute_script('window.scrollTo(0,10240)')
sleep(1)
date = browser.page_source
browser.quit()
return(date)
def get_url(url):
url = 'https://huaban.com'+url
req = requests.get(url)
img_url = re.findall('"key":"(.*?)", "type":"image/jpeg", "height":',req.text)[0]
img_url = 'http://hbimg.huabanimg.com/'+ img_url
save_img(img_url)
def save_img(url):
name = url[-30:-13]
print(url)
date = requests.get(url)
with open(b+'/'+name+'.jpg', 'wb') as f:
f.write(date.content)
####开始####
if __name__ == '__main__':
c = int(input('输入链接码: '))
a = int(input('输入下载的页数: '))
b = input('创建目录名: ')
os.makedirs(b)
list_old=[]
list=[]
for i in range(a):
date = get_date(i)
soup = etree.HTML(date)
list_old += soup.xpath('//*[@id="waterfall"]/div/a/@href')
for i in list_old:
if i not in list:
list.append(i)
pool = Pool(4)
pool.map(get_url,list)
pool.close()
pool.join()
print('\n'+'....下载完成....')
| true |
30897dca8b03329923f660136427f9fb5e3b4b49 | Python | FDUCxz/Coffee | /main.py | UTF-8 | 1,602 | 3.40625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 17:58:49 2020
@author: CXZ
"""
from beveragebase import Beverage, CondimentDecorator
from cuptype import Midcup, Bigcup, Superbigcup
from milktype import Quanzhi, Yanmai, Tuozhi
from condimentdecorator import *
if __name__ == "__main__":
cuptypes = {"中杯":Midcup(), "大杯":Bigcup(), "超大杯":Superbigcup()}
milktypes = {"全脂":Quanzhi, "燕麦":Yanmai, "脱脂":Tuozhi}
condimenttypes = {"原味糖浆": Yuanwei,"香草糖浆":Xiangcao, "焦糖糖浆":Jiaotang, "榛果糖浆":Zhenguo, "摩卡糖浆":Moka }
try:
print("欢迎来到咖啡店!请选购咖啡!")
cuptype = input("请输入杯型(中杯/大杯/超大杯):")
coffee = cuptypes[cuptype]
temperature = input("请输入温度(冰/常温/热):")
coffee.get_temperature(temperature)
milktype = input("请输入牛奶类型(全脂/燕麦/脱脂):")
coffee = milktypes[milktype](coffee)
key = None
while key not in ["N","n"]:
condimenttype = input("情输入加入的酱料(原味糖浆/香草糖浆/焦糖糖浆/榛果糖浆/摩卡糖浆):")
coffee = condimenttypes[condimenttype](coffee)
key = input("还需要添加酱料吗[Y/N]")
print("购买商品为:" + temperature + coffee.get_description() + "咖啡一杯"+ "¥"+str(coffee.cost()))
except Exception as e:
print("输入有误,请重新输入!")
finally:
print("欢迎下次光临!")
| true |
9990c8b64273d8ecd9dfcb4afe444334529c3679 | Python | PacketImpact/lcoreapi | /lcoreapi/api.py | UTF-8 | 8,032 | 2.59375 | 3 | [
"MIT"
] | permissive | from datetime import datetime, timedelta
import json
import requests
from requests import exceptions as rexc
from urllib.parse import quote as _quote
__all__ = ['API', 'APIError', 'APIServerError', 'APIAuthError',
'APINotFoundError', 'APIMethodNotAllowedError',
'APIBadRequestError', 'BASE_URL', 'Resource']
BASE_URL = 'https://core.lambdavpn.net/v1/'
def parse_date(s):
""" Parse API returned datetimes, handling multiple formats and
compatibility. It's ISO 8601, with or without microseconds.
It used to have no TZ, now has UTC "Z".
"""
if not s:
return None
s = s.replace('+00:00', 'Z')
formats = [
"%Y-%m-%dT%H:%M:%S.%fZ", # UTC, with microseconds
"%Y-%m-%dT%H:%M:%SZ", # UTC
"%Y-%m-%dT%H:%M:%S.%f", # with microseconds
"%Y-%m-%dT%H:%M:%S",
]
for f in formats:
try:
return datetime.strptime(s, f)
except ValueError:
pass
raise ValueError("Unknown date format: %r" % s)
def dumps(data, **kwargs):
def default(obj):
if isinstance(obj, datetime):
return obj.isoformat()
raise TypeError()
return json.dumps(data, default=default, **kwargs)
def quote(v):
if isinstance(v, bool):
return '1' if v else '0'
if isinstance(v, (int, float)):
return str(v)
if isinstance(v, (str, bytes)):
return _quote(v)
if isinstance(v, datetime):
return str(v.isoformat())
def append_qs(url, **filters):
if filters:
if '?' in url:
url += '&'
else:
url += '?'
url += '&'.join(k + '=' + quote(v) for k, v in sorted(filters.items()))
return url
class APIError(Exception):
pass
class APIServerError(APIError):
pass
class APIAuthError(APIError):
pass
class APINotFoundError(APIError):
pass
class APIMethodNotAllowedError(APIError):
pass
class APIBadRequestError(APIError):
pass
class ListIter:
""" Just a lazy iterator with the correct __len__ """
def __init__(self, obj):
self.obj = obj
def it():
o = self.obj
while o and o['items']:
for item in o['items']:
yield item
o = o.get('next')
self.it = it()
def __iter__(self):
return self
def __len__(self):
return self.obj['total_count']
def __next__(self):
return next(self.it)
def __repr__(self):
return repr(self.obj)
def __str__(self):
return str(self.obj)
class Resource(dict):
def __init__(self, api, data):
self.api = api
# Flag to prevent repeating 404's
self.__loaded = False
# Response preprocessing
for k, v in data.items():
if k.endswith('_date') or k == 'date':
data[k] = parse_date(v)
continue
if isinstance(v, dict):
data[k] = Resource(api, v)
continue
if isinstance(v, list):
for n, item in enumerate(v):
if isinstance(item, dict):
v[n] = Resource(api, item)
continue
super().__init__(data)
@property
def id(self):
return self.get('id')
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
# May need loading
if self.__loaded:
raise
if set(self.keys()).issubset({'object', 'href', 'id'}) and self.get('href'):
self.update(self.api.get(self['href']))
self.__loaded = True
return super().__getitem__(key)
def list_iter(self):
""" Iterator for "list" objects """
assert self.get('object') == 'list'
return ListIter(self)
def __str__(self):
cn = self.__class__.__name__
return "<{cn}#{self.id} on API({self.api!s})>".format(self=self, cn=cn)
def __repr__(self):
cn = self.__class__.__name__
encoded = dumps(self, sort_keys=True, indent=2)
return "<{cn}#{self.id} on API({self.api!s}) {encoded}>" \
.format(self=self, cn=cn, encoded=encoded)
class API:
def __init__(self, key_id, secret, base_url=BASE_URL, timeout=10):
self.auth = (key_id, secret)
self.base_url = base_url
self.timeout = timeout
self._cache = dict()
self._cache_ttl = timedelta(minutes=5)
@property
def public_key(self):
return self.auth[0]
@property
def info(self):
return self.get('/meow')
def _query(self, method, url, data=None):
r_kwargs = {}
r_kwargs['auth'] = self.auth
# POST data= args as JSON instead of form data.
# the API should accept both, but JSON is better for nested stuff.
if data is not None:
r_kwargs['data'] = dumps(data)
r_kwargs['headers'] = {'Content-Type': 'application/json'}
r_kwargs['timeout'] = self.timeout
try:
req = method(url, **r_kwargs)
data = Resource(self, req.json())
except ValueError as e:
try:
print(req.text)
except:
pass
raise APIError("Invalid response content from %r on %r" % (self, url)) from e
except rexc.RequestException as e:
raise APIError("Error connecting to %r" % self) from e
if req.status_code == 200 or req.status_code == 201:
return data
if req.status_code == 400:
raise APIBadRequestError(data.get('message', "Bad request"))
if req.status_code == 401:
raise APIAuthError(data.get('message', "Unauthorized"))
if req.status_code == 403:
raise APIAuthError(data.get('message', "Forbidden"))
if req.status_code == 404:
raise APINotFoundError(data.get('message', "Not found"))
if req.status_code == 405:
raise APIMethodNotAllowedError(data.get('message', "Method not allowed"))
if req.status_code >= 500 and req.status_code <= 599:
raise APIServerError(data.get('message', "Unknown server error"))
err_type = data.get('error')
err_msg = data.get('message')
raise APIError("Unknown error {}: {} ({})"
.format(req.status_code, err_type, err_msg))
def build_url(self, url, **kwargs):
if url.startswith('/'):
url = self.base_url + url[1:]
url = append_qs(url, **kwargs)
return url
def get(self, url, **kwargs):
url = self.build_url(url, **kwargs)
if self._cache is not None:
# Clear cache
cache_limit = datetime.now() - self._cache_ttl
for k, v in list(self._cache.items()):
if v[0] < cache_limit:
del self._cache[k]
if url in self._cache:
return self._cache[url][1]
data = self._query(requests.get, url)
if self._cache is not None:
self._cache[url] = (datetime.now(), data)
return data
def post(self, url, data, **kwargs):
url = self.build_url(url, **kwargs)
return self._query(requests.post, url, data=data)
def put(self, url, data, **kwargs):
url = self.build_url(url, **kwargs)
return self._query(requests.put, url, data=data)
def patch(self, url, data, **kwargs):
url = self.build_url(url, **kwargs)
return self._query(requests.patch, url, data=data)
def delete(self, url, **kwargs):
url = self.build_url(url, **kwargs)
return self._query(requests.delete, url)
def __str__(self):
return "{self.public_key} on {self.base_url}".format(self=self)
def __repr__(self):
cn = self.__class__.__name__
return "<{cn}({self!s})>".format(cn=cn, self=self)
| true |
4618cc4c1197a363bc5fae8a7cbc2876579f7478 | Python | Xanonymous-GitHub/main | /python/191213.py | UTF-8 | 304 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | line = list()
with open("index.txt") as f:
for x in f:
tmp = x.replace("\n", "").split(":")
line.append([tmp[0], list(map(int, tmp[1].split()))])
result = [[] for x in range(6)]
for x in line:
for y in x[1]:
result[y-1].append(x[0])
for x in result:
print(" ".join(x)) | true |
20bac10768af642f2cf507009a7214ba7077b5e8 | Python | uvacw/inca | /inca/rssscrapers/news_scraper.py | UTF-8 | 116,931 | 2.796875 | 3 | [] | no_license | import datetime
from lxml.html import fromstring
from inca.core.scraper_class import Scraper
from inca.scrapers.rss_scraper import rss
from inca.core.database import check_exists
import feedparser
import re
import logging
logger = logging.getLogger("INCA")
def polish(textstring):
# This function polishes the full text of the articles - it separated the lead from the rest by ||| and separates paragraphs and subtitles by ||.
lines = textstring.strip().split("\n")
lead = lines[0].strip()
rest = "||".join([l.strip() for l in lines[1:] if l.strip()])
if rest:
result = lead + " ||| " + rest
else:
result = lead
return result.strip()
class ad(rss):
"""Scrapes ad.nl"""
def __init__(self):
self.doctype = "ad (www)"
self.rss_url = "http://www.ad.nl/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
paywall_na whether the text is behind a paywall
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
paywall = tree.xpath(
'//*[@class ="fjs-paywall--personal"] | //*[@class="photo__logo--paying"]'
)
if paywall:
paywall_na = True
else:
paywall_na = False
try:
title = tree.xpath('//*/h1[@class="article__title"]//text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
category = tree.xpath('//*/a[@class="sub-nav__link"]//text()')[0]
except:
category = ""
logger.debug("Could not parse article category")
# 1. path: regular intro
# 2. path: intro when in <b>; found in a2014 04 130
try:
teaser = tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro"]//span//text() | //*/p[@class="article__intro"]/span[@class="tag"]//text() | //*/p[@class="article__intro"]//b//text()'
)[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined); found in 2014 12 1057
# 3. path: second hadings found in 2014 11 1425
try:
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/h2[@class="article__subheader"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
except:
logger.debug("Could not parse article text")
try:
author_door = (
tree.xpath('//*[@class="author"]/text()')[0]
.strip()
.lstrip("Bewerkt")
.lstrip(" door:")
.lstrip("Door:")
.strip()
)
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article author")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
text = polish(text)
images = ad._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
"paywall_na": paywall_na,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
# 'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.ad.nl//cookiewall/accept?url=" + link
return link
class nu(rss):
"""Scrapes nu.nl """
def __init__(self):
self.doctype = "nu"
self.rss_url = "http://www.nu.nl/rss"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
tree = fromstring(htmlsource)
try:
category = tree.xpath(
'//*/li[@class=" active"]/a[@class="trackevent"]//text()'
)
if category == "":
logger.debug("Could not parse article category.")
except:
category = ""
logger.debug("Could not parse article category.")
try:
teaser = tree.xpath('//*[@class="item-excerpt"]//text()')[0]
except:
logger.debug("Could not parse article teaser.")
teaser = ""
try:
text = "||".join(
tree.xpath(
'//*[@class="block-wrapper"]/div[@class="block-content"]/p//text()'
)
).strip()
except:
text = ""
logger.warning("Could not parse article text")
try:
# regular author-xpath:
author_door = (
tree.xpath('//*[@class="author"]/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
if author_door == "":
# xpath if link to another hp is embedded in author-info
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article author.")
except:
author_door = ""
logger.debug("Could not parse article author.")
author_bron = ""
text = polish(text)
try:
category = tree.xpath(
'//*/li[@class=" active"]/a[@class="trackevent"]//text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category.")
try:
title = tree.xpath("//h1/text()")[0].strip()
except:
title = None
logger.warning("Could not parse article title.")
images = nu._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//div[@class="particle headerimage"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
class nos(rss):
"""Scrapes nos.nl """
def __init__(self):
self.doctype = "nos (www)"
self.rss_url = "http://feeds.nos.nl/nosnieuwsalgemeen"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.error("HTML tree cannot be parsed")
try:
title = tree.xpath("//h1")[0].text
except:
title = ""
logger.warning("Could not parse article title")
try:
category = "".join(tree.xpath('//*/a[@id="link-grey"]//text()'))
except:
category = ""
logger.debug("Could not parse article title")
if category == "":
try:
category = "".join(
tree.xpath(
'//*[@id="content"]/article/header/div/div/div/div/div/div/span/a/text()'
)
)
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = tree.xpath('//*[@class="article_textwrap"]/p/em//text()')[0]
except:
logger.debug("Could not parse article teaser")
teaser = ""
try:
text = " ".join(
tree.xpath('//*[@class="article_textwrap"]/p//text()')
).strip()
except:
text = ""
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*[@id="content"]/article/section/div/div/div/span/text()'
)[0]
except:
author_door = ""
logger.debug("Could not parse article source")
author_bron = ""
text = polish(text)
images = nos._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
try:
img = element.xpath(
'//figure[@class="article_head_image block_largecenter"]//img'
)[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : element.xpath(element.xpath('.//div[@Class="caption_content"]/text()')),
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
except IndexError:
pass
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
# link=re.sub("/$","",link)
# link="http://www.nos.nl//cookiesv2.publiekeomroep.nl/consent/all"+link
# currently, there is no cookiewall in place, so we just return the link as it is
return link
class volkskrant(rss):
"""Scrapes volkskrant.nl """
def __init__(self):
self.doctype = "volkskrant (www)"
self.rss_url = "http://www.volkskrant.nl/nieuws/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
paywall_na whether the text is behind a paywall
"""
tree = fromstring(htmlsource)
paywall = tree.xpath(
'//*[@class ="fjs-paywall--personal"] | //*[@class ="tm-paywall-overlay"]'
)
if paywall:
paywall_na = True
else:
paywall_na = False
try:
title = tree.xpath(
'//*/h1[@class="artstyle__header-title artstyle__header-title--white artstyle__header-title--light"]//text() | //*/h1[@class="artstyle__header-title"]//text() | //*/h1[@class="artstyle__header-title artstyle__header-title--white"]//text() | //*[@class="artstyle__header-title artstyle__header-title--hero-bleed artstyle__header-title--light"]/text()'
)[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
category = tree.xpath(
'//*/span[@class="artstyle__labels__section"]//text()'
)[0]
except:
category = ""
if category == "":
try:
category = tree.xpath('//*[@class="action-bar__primary"]/a/text()')[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = tree.xpath(
'//*/p[@class="artstyle__intro artstyle__intro--center"]//text() | //*/p[@class="artstyle__intro artstyle__intro--center"]/span//text() | //*/p[@class="artstyle__intro artstyle__intro--center"]/a//text() | //*/p[@class="artstyle__intro"]//text() | //*/p[@class="artstyle__intro"]//text()'
)[0]
except:
logger.debug("Could not parse article teaser")
teaser = ""
try:
# 1. path: regular textrest
# 2. path: textrest version found in 2014 11 16
# 3. path: second heading found in 2014 11 50
# 4. path: text with link behind; found in 2014 10 2455(html-file-nr)
# 5. path: old design regular text
# 6. path: old design second heading
# 7. path:old design text with link
textrest = tree.xpath(
'//*/p[@class="artstyle__text artstyle__text--drop-cap"]//text() | //*/p[@class="artstyle__text"]//text() | //*/h3[@class="artstyle__title"]//text()'
)
except:
logger.warning("Could not parse article text")
textrest = ""
text = "\n".join(textrest)
try:
author_door = tree.xpath('//*/a[@class="artstyle__byline__author"]/text()')[
0
]
except:
author_door = ""
if author_door == "":
try:
author_door = (
" ".join(
tree.xpath(
'//*[@class="article__meta--v2"]/span/span[2]/text()'
)
)
.strip()
.lstrip("Bewerkt")
.lstrip(" door:")
.lstrip("Door:")
)
except:
logger.debug("Could not parse article author")
try:
author_bron = (
" ".join(tree.xpath('//*/span[@class="article__meta"][*]/text()'))
.strip()
.lstrip("Bron:")
.strip()
)
# geeft het tweede veld: "Bron: ANP"
except:
author_bron = ""
if author_bron == "":
try:
author_bron = (
" ".join(
tree.xpath('//*/span[@class="author-info__source"]/text()')
)
.strip()
.lstrip("- ")
.lstrip("Bron: ")
.strip()
)
except:
author_bron = ""
if author_bron == "":
try:
bron_text = tree.xpath('//*[@class="time_post"]/text()')[1].replace(
"\n", ""
)
author_bron = re.findall(".*?bron:(.*)", bron_text)[0]
except:
author_bron = ""
if author_bron == "":
try:
bron_text = tree.xpath('//*[@class="time_post"]/text()')[0].replace(
"\n", ""
)
author_bron = re.findall(".*?bron:(.*)", bron_text)[0]
except:
author_bron = ""
if author_bron == "":
try:
bron_text = tree.xpath(
'//*[@class="article__meta--v2"]/span/text()'
)[0].replace("\n", "")
author_bron = re.findall(".*?Bron:(.*)", bron_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article byline source")
if author_door == "" and author_bron == "" and category == "Opinie":
author_door = "OPINION PIECE OTHER AUTHOR"
text = polish(text)
images = volkskrant._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
"paywall_na": paywall_na,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath(
'//figure[@class="article-photo fjs-gallery-item"]//img | //figure[@class="top-media--back fjs-gallery-item"]//img'
)
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))}
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article"""
if "cookiewall" in link:
link = link.split("url=", 1)[1]
else:
link = link
return link
class nrc(rss):
"""Scrapes nrc.nl """
def __init__(self):
self.doctype = "nrc (www)"
self.rss_url = "http://www.nrc.nl/rss.php?n=np"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=9, day=10)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
tree = fromstring(htmlsource)
try:
title = tree.xpath(
'//*[@class="center-block intro-col article__header"]/h1/text() | //*[@class="liveblog__header__inner"]/h1/text()'
)[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
category = tree.xpath(
'//*[@id="broodtekst"]/a[1]/text() | //*[@class="article__flag"]//text() | //*[@class="keyword"]//text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
if category == "":
try:
category = tree.xpath('//*[@class="article__section-branding"]/text()')[
0
]
except:
category = ""
try:
teaser = tree.xpath(
'//*[@class="intro article__intro"]/p//text() | //*[@class="intro article__intro"]//text()'
)[0]
except:
logger.info("OOps - geen eerste alinea?")
teaser = ""
text = " ".join(
tree.xpath(
'//*[@class="content article__content"]/p//text() | //*[@class="content article__content"]/h2//text()'
)
).strip()
if text == "":
logger.warning("Could not parse article text")
textnew = re.sub("Follow @nrc_opinie", "", text)
try:
author_door = tree.xpath('//*[@class="author"]/span/a/text()')[0]
except:
author_door = ""
if author_door == "":
try:
author_door = tree.xpath('//*[@class="auteur"]/span/a/text()')[0]
except:
author_door = ""
if author_door == "":
try:
author_door = tree.xpath('//*[@class="authors"]/ul/li/text()')[0]
except:
author_door = ""
if author_door == "":
try:
author_door = tree.xpath(
'//*[@class="article__byline__author-and-date"]/a/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = tree.xpath(
'//*[@class="content article__content"]/span[@class="byline"]//text()'
)[0]
except:
author_door = ""
author_bron = ""
if textnew == "" and category == "" and author_door == "":
logger.debug("No article-page?")
try:
if (
tree.xpath('//*[@class="kies show clearfix"]/h2/text()')[0]
== "Lees dit hele artikel"
):
text = "THIS SEEMS TO BE AN ARTICLE ONLY FOR SUBSCRIBERS"
logger.warning("This seems to be a subscribers-only article")
except:
text = ""
logger.warning("Could not parse article text")
text = polish(text)
images = nrc._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath(
'//*[@class="responsive-img-div img-b1bc3f75894aebe980b93536058622c9 loaded"]//img | //*[@class="responsive-img-div__click-catcher"]//img'
)
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))}
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
class parool(rss):
"""Scrapes parool.nl """
def __init__(self):
self.doctype = "parool (www)"
self.rss_url = "http://www.parool.nl/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parseurl(self, url):
link = url.lstrip("http://www.parool.nl///cookiewall/accept?url=")
try:
category = re.findall("/+[a-z]+/", link)[0]
except:
category = ""
logger.debug("Could not parse article category")
if category == "":
try:
category = re.findall("/+[a-z]+-+[a-z]+-+[a-z]+/", link)[0]
except:
category = ""
logger.debug("Could not parse article category")
category = category.replace("/", "")
return {"category": category}
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
paywall_na whether the text is behind a paywall
"""
tree = fromstring(htmlsource)
paywall = tree.xpath('//*[@class ="fjs-paywall--personal"]')
if paywall:
paywall_na = True
else:
paywall_na = False
try:
title = tree.xpath('//*/h1[@class="article__title"]//text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
category = ""
try:
teaser = (
tree.xpath('//*/p[@class="article__intro"]')[0].text_content().strip()
)
except:
teaser = ""
logger.debug("Could not parse article teaser")
text = " ".join(
tree.xpath(
'//*/p[@class="article__body__paragraph first"]//text() | //*/p[@class="article__body__paragraph"]//text() | //*/h2[@class="article__body__title"]//text()'
)
).strip()
author_text = tree.xpath('//*[@class=" article__author"]//text()')
try:
author_door = (
[e for e in author_text if e.find("Door") >= 0][0]
.strip()
.replace("(", "")
.replace(")", "")
.replace("Door:", "")
)
except:
author_door = ""
if author_door == "":
try:
author_door = (
[e for e in author_text if e.find("Bewerkt door:") >= 0][0]
.strip()
.replace("(", "")
.replace(")", "")
.replace("Bewerkt door:", "")
)
except:
author_door = ""
logger.debug("Could not parse article author")
try:
bron_text = tree.xpath(
'//*[@id="page-main-content"]//*[@class="article__footer"]/span/span/text()'
)[0]
author_bron = re.findall(".*?Bron:(.*)", bron_text)[0]
except:
author_bron = " "
if author_bron == "":
try:
bron_text = tree.xpath('//*/span[@class="author-info__source"]/text()')[
0
]
author_bron = re.findall(".*?Bron:(.*)", bron_text)[0]
except:
author_bron = ""
logger.debug("Could not parse byline source")
text = polish(text)
images = parool._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"teaser": teaser,
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
"paywall_na": paywall_na,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath(
'//figure[@class="article-photo fjs-gallery-item"]//img'
)
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))}
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.parool.nl///cookiewall/accept?url=" + link
return link
class trouw(rss):
"""Scrapes trouw.nl """
def __init__(self):
self.doctype = "trouw (www)"
self.rss_url = "http://www.trouw.nl/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
paywall_na whether the text is behind a paywall
"""
tree = fromstring(htmlsource)
paywall = tree.xpath('//*[@class ="paywall-notice__body"]')
if paywall:
paywall_na = True
else:
paywall_na = False
try:
title = tree.xpath('//*/h1[@class="article__header__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
teaser = tree.xpath(
'//*/p[@class="article__introduction__text"]//text() | //*/section[@class="article__introduction layout__stage--center"]//text()'
)[0]
except:
teaser = " "
logger.debug("Could not parse article teaser")
try:
category = tree.xpath(
'//*[@id="subnav_nieuws"]/li/a/span/text() | //*/a[@class="article__header__meta__section-link"]//text()'
)[0]
except:
category = ""
if category == "":
try:
category = tree.xpath(
'//*[@id="str_cntr2"]//*[@class="dos_default dos_film"]/h2/text()'
)[0]
except:
category = ""
if category == "":
try:
category = tree.xpath(
'//*[@id="str_cntr2"]//*[@class="dos_default dos_vluchtelingen"]/span/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
# 1. Regular text - intro
# 2. Bold text - subtitles
# 3. Regular text
# 4. Extra box title
# 5. Extra box text
# 6. Link text
# 7. Explanantion box text
# 8. italics
textrest = tree.xpath(
'//*[@class="article__section-title__text heading-3"]/text() | //*/p[@class="article__paragraph"]//text() | //*/figcaption[@class="article__photo__caption"]//text() | //*[@class="article__paragraph"]/text() | //*[@class="article__quote__text"]/text() | //*[@class="article__framed-text__title"]/text() | //*[@id="art_box2"]/section/p/text() | //*[@id="art_box2"]/p/a/text() | //*[@id="art_box2"]//*[@class="embedded-context embedded-context--inzet"]/text() | //*[@id="art_box2"]/p/em/text()'
)
except:
textrest = " "
logger.warning("Could not parse article text")
text = "\n".join(textrest)
try:
author_door = tree.xpath(
'//*[@class="author"]/text() | //*/strong[@class="article__header__meta__author"]/text()'
)[0]
except:
author_door = " "
logger.debug("Could not parse article author")
try:
bron_text = tree.xpath('//*[@class="time_post"]/text()')[1].replace(
"\n", ""
)
author_bron = re.findall(".*?bron:(.*)", bron_text)[0]
except:
author_bron = ""
if author_bron == "":
try:
bron_text = tree.xpath('//*[@class="time_post"]/text()')[0].replace(
"\n", ""
)
author_bron = re.findall(".*?bron:(.*)", bron_text)[0]
except:
author_bron = ""
logger.debug("Could not parse byline source")
text = polish(text)
images = trouw._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
"paywall_na": paywall_na,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath(
'//figure[@class="article__cover layout__stage--center"]//img'
)
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))}
# 'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.trouw.nl/cookiewall/accept?url=" + link
return link
class telegraaf(rss):
"""Scrapes telegraaf.nl """
def __init__(self):
self.doctype = "telegraaf (www)"
self.rss_url = "http://www.telegraaf.nl/rss"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
paywall_na whether the text is behind a paywall
"""
tree = fromstring(htmlsource)
paywall = tree.xpath(
'//*[@class ="bg-premium all-paddings-6"] | //*/div[@class = "MeteringNotification"] | //*/div[@class = "PremiumLabelWithLine__body TextArticlePage__premiumLabel"]'
)
if paywall:
paywall_na = True
else:
paywall_na = False
try:
title = tree.xpath(
'//*/h1[@class="article-title playfair-bold-l no-top-margin no-bottom-margin gray1"]/text()|//*/h1[@class="article-title playfair-bold-l playfair-bold-xl--m playfair-bold-g--l no-top-margin no-bottom-margin gray1"]/text()|//*/h2[@class="ui-tab-gothic-bold ui-text-medium"]/text() | //*/h1[@class="ui-stilson-bold ui-text-large ui-break-words ui-dark3 ui-no-top-margin ui-bottom-margin-2 ui-top-padding-2"]/text()|//*/h2[@class="no-top-margin bottom-margin-3 bottom-margin-4--l roboto-black-l roboto-black-xl--l gray2"]/text() | //*/h1[@class="ArticleTitle__title"]/text() | //*/h1[@class="ArticleTitleBlock__title"]/text() | //*/h1[@class="videoTopBlock__titleWrapper"]/text()'
)[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
category = tree.xpath(
'//*/a[@class="inline-block gray1 roboto-black-s uppercase-text no-underline bottom-padding-1 bottom-border-thin"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = tree.xpath(
'//*/p[@class="abril-bold no-top-margin"]//text() | //*/p[@class="ArticleIntroBlock__paragraph ArticleIntroBlock__paragraph--nieuws"]/span/text() | //*/p[@class="ArticleIntroBlock__paragraph ArticleIntroBlock__paragraph--entertainment"]/span/text() | //*/p[@class="ArticleIntroBlock__paragraph ArticleIntroBlock__paragraph--financieel"]/span/text() | //*/p[@class="ArticleIntroBlock__paragraph ArticleIntroBlock__paragraph--lifestyle"]/span/text() | //*/p[@class="ArticleIntroBlock__paragraph ArticleIntroBlock__paragraph--vrouw"]/span/text() | //*/p[@class="ArticleIntroBlock__paragraph ArticleIntroBlock__paragraph--sport"]/span/text() | //*/p[@class="ArticleIntroBlock__paragraph ArticleIntroBlock__paragraph--watuzegt"]/span/text()'
)[0]
except:
logger.debug("Could not parse article teaser")
teaser = ""
try:
text = "||".join(
tree.xpath(
'//*/p[@class="false bottom-margin-6"]//text() | //*/p[@class="false bottom-margin-6"]/span[class="bold"]//text() | //*[@class="ArticleBodyHtmlBlock__body"]/text() | //*/p[@class="ArticleBodyBlocks__paragraph ArticleBodyBlocks__paragraph--nieuws"]/text() | //*/p[@class="ArticleBodyBlocks__paragraph ArticleBodyBlocks__paragraph--sport"]/text() | //*/p[@class="ArticleBodyBlocks__paragraph ArticleBodyBlocks__paragraph--entertainment"]/text() | //*/p[@class="ArticleBodyBlocks__paragraph ArticleBodyBlocks__paragraph--financieel"]/text() | //*/p[@class="ArticleBodyBlocks__paragraph ArticleBodyBlocks__paragraph--lifestyle"]/text() | //*/p[@class="ArticleBodyBlocks__paragraph ArticleBodyBlocks__paragraph--vrouw"]/text() | //*/p[@class="ArticleBodyBlocks__paragraph ArticleBodyBlocks__paragraph--watuzegt"]/text()'
)
).strip()
except:
text = ""
logger.warning("Could not parse article text")
# if text.strip() == "":
# logger.warning("Trying alternative method....")
# htmlsource has text included like so: "articleBody":"HERE IS THE TEXT.","author":
try:
author_door = (
tree.xpath(
'//*[@class="auteur"]/text() | //*[@class="ui-table ui-gray3"]/span[2]/text()'
)[0]
.strip()
.lstrip("Van ")
.lstrip("onze")
.lstrip("door")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
author_bron = ""
text = polish(text)
images = telegraaf._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
"paywall_na": paywall_na,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath(
'//*[@class="__picture picture height-100 absolute top-left-corner width-100 no-borders"]//img | //div[@class="FluidImage__contentWrapper FluidImage__contentWrapper--placeholder"]/img'
)
if len(img_list) > 0:
img = img_list[0]
try:
image_source = img.attrib["srcset"]
if ".jpg" in image_source:
image_source = image_source.split(".jpg", 1)[0] + ".jpg"
elif "png" in image_source:
image_source = image_source.split(".png", 1)[0] + ".png"
else:
pass
image = {
"url": self.rss_url[:-4] + image_source,
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
except:
images = []
else:
images = []
return images
class metro(rss):
"""Scrapes metronieuws.nl """
def __init__(self):
self.doctype = "metro (www)"
self.rss_url = "http://www.metronieuws.nl/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
tree = fromstring(htmlsource)
try:
title = tree.xpath(
'//*[@class="row"]/h1/text() | //*[@class="css-1edvzmo css-b21mdf"]/text()'
)[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
category = tree.xpath(
'//*[@class="active"]/text() | //*/a[@title class="active"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
# fix: xpath for category in new layout leads to a sentence in old layout:
if len(category.split(" ")) > 1:
category = ""
try:
# 1. path: regular text
# 2. path: text with link behind, fi 2014 12 646
# 3. path: italic text, fi 2014 12 259
# 4. path: second headings, fi 2014 12 222
# 5. path: another version of regualr formated text, fi 2014 12 1558
# 6. path: another version a second heading, fi 2014 12 1923
# 7. path: italic text with link behind in span environment, fi 2014 11 540
# 8. path: italic text with link behind, not in span evir, fi 2014 10 430
# --until here code is just copied from spits
# 10. path: bold and italic text, fi 2014 12 04
# 11. path: bold text, fi 2014 12 04
# 12. path: second headings
# 13. path: regular text
# 14. path: new layout. TO DO: -- also includes "invisible" text after 'bekijk ook'
textrest = tree.xpath(
'//*[@class="field-item even"]/p/text() | //*[@class="field-item even"]/p/a/text() | //*[@class="field-item even"]/p/em/text() | //*[@class="field-item even"]/h2/text() | //*[@class="field-item even"]/p/span/text() | //*[@class="field-item even"]/h2/span/text() | //*[@class="field-item even"]/p/span/em/a/text() | //*[@class="field-item even"]/p/em/a/text() | //*[@class="field-item even"]/p/em/strong/text() | //*[@class="field-item even"]/p/b/text() | //*[@class="field-item even"]/div/text() | //*[@class="field-item even"]/p/strong/text() | //*[@class="css-1uqapas "]/p[@class="css-1qs30e6"]/descendant-or-self::text()'
)
except:
logger.debug("Could not parse article text")
textrest = ""
text = "\n".join(textrest)
text = re.sub("Lees ook:", " ", text)
try:
# new layout author:
author_door = (
tree.xpath('//*[@class="username"]/text()')[0]
.strip()
.lstrip("door ")
.lstrip("© ")
.lstrip("2014 ")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
if author_door == "":
# try old layout author
try:
author_door = (
tree.xpath('//*[@class="article-options"]/text()')[0]
.split("|")[0]
.replace("\n", "")
.replace("\t", "")
.strip()
)
except:
author_door = ""
author_bron = ""
text = polish(text)
images = metro._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//*[@class="image row"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
class geenstijl(rss):
"""Scrapes geenstijl.nl """
def __init__(self):
self.doctype = "geenstijl"
self.rss_url = "https://www.geenstijl.nl/feeds/recent.atom"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=9, day=15)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
"""
tree = fromstring(htmlsource)
textrest = tree.xpath(
'//*[@class="article_content"]/p//text() | //*[@class="article_content"]/p/strong//text() | //*[@class="article_content"]/p/em//text() | //*/h2[@class="content-title"]//text()'
)
if textrest == "":
logger.warning("Could not parse article text")
text = "\n".join(textrest)
try:
title = tree.xpath('//*[@class="col-xs-12"]/h1/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
teaser = tree.xpath('//*[@class="article-intro"]/p/text()')[0]
except:
teaser = ""
logger.warning("Could not parse article teaser")
try:
author_door = tree.xpath(
'//*[@class="col-xs-12 col-sm-7"]/a[@rel="author"]//text()'
)[0].replace("|", "")
except:
author_door = ""
logger.warning("Could not parse article source")
text = polish(text)
images = geenstijl._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//*[@class="article_img_container"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "https://www.geenstijl.nl%2Fsetcookie.php?t=" + link
return link
class fok(rss):
"""Scrapes fok.nl """
def __init__(self):
self.doctype = "fok"
self.rss_url = "http://rss.fok.nl/feeds/nieuws"
self.version = ".1"
self.date = datetime.datetime(year=2016, month=8, day=2)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
tree = fromstring(htmlsource)
try:
title = tree.xpath('//*/header[@class="hasHidden"]/h1/text()')
except:
title = ""
logger.warning("Could not parse article title")
try:
teaser = tree.xpath('//*/article[@class="single"]/p[0]//text()')
except:
teaser = ""
logger.debug("Could not parse article teaser")
try:
category = "".join(tree.xpath('//*[@id="crumbs"]/ul/li/a/text()'))
except:
category = ""
logger.category("Could not parse article category")
if len(category.split(" ")) > 1:
category = ""
try:
textrest = tree.xpath(
'//*/article[@class="single"]/p//text() | //*/article[@class="single"]/p/em//text() | //*[@role="main"]/article/p//text() | //*[@role="main"]/article/p/strong//text() | //*[@role="main"]/article/p/strong/a//text() | //*[@role="main"]/article/p/a//text() | //*[@role="main"]/article/p/em//text() | //*[@id="mainContent"]//*[@role="main"]/article/p//text() | //*[@id="mainContent"]/div[5]/main/article/p//text()'
)
except:
print("geen text")
logger.warning("Could not parse article text")
textrest = ""
text = "\n".join(textrest)
try:
author_door = tree.xpath('//*[@class="mainFont"]/text()')[0].strip()
except:
author_door = ""
logger.debug("Could not parse article source")
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article-options"]/text()')[0]
.split("|")[0]
.replace("\n", "")
.replace("\t", "")
.strip()
)
except:
author_door = ""
try:
author_bron = tree.xpath('//*[@class="bron"]/strong/text()')[0]
except:
author_bron = ""
if author_bron == "":
try:
author_bron = tree.xpath('//*[@class="bron"]/strong/a/text()')[0]
except:
author_bron = ""
logger.debug("Could not parse article source byline")
textnew = polish(textnew)
images = fok._extract_images(self, tree)
extractedinfo = {
"title": title,
"category": category.strip(),
"teaser": teaser,
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//*[@class="col-4 first"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
if __name__ == "__main__":
print("Please use these scripts from within inca. EXAMPLE: BLA BLA BLA")
class destentor(rss):
"""Scrapes destentor.nl"""
def __init__(self):
self.doctype = "destentor (www)"
self.rss_url = "http://www.destentor.nl/home/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=3)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@class="article__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = " ".join(
tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
# teaser = tree.xpath('//*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/span/b/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
# 4. path: span paragraphs
# 5. path: bold paragraph headings
# 6. path: live blogs time
# 7. path: live blogs intro or heading
# 8. path: live blogs body text
# 9. path: live blogs strong body text
# 10. path: live blogs link body text
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
# text = tree.xpath('//*/p[@class="article__paragraph"]/text() | //*/p[@class="article__paragraph"]/a/text() | //*/p[@class="article__paragraph"]/h2/text() | //*/p[@class="article__paragraph"]/span/text() | //*/p[@class="article__paragraph"]/b/text() | //*/time[@class="liveblog__time-text"]/text() | //*/p[@class="liveblog__intro"]/text() | //*/p[@class="liveblog__paragraph"]/text() | //*/p[@class="liveblog__paragraph"]/strong/text() | //*/p[@class="liveblog__paragraph"]/a/text()')
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
logger.debug("Could not parse article source")
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
images = destentor._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
# 'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.destentor.nl///cookiewall/accept?url=" + link
return link
# Local newspapers
class bd(rss):
"""Scrapes bd.nl"""
def __init__(self):
self.doctype = "bd (www)"
self.rss_url = "http://www.bd.nl/home/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=9)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@class="article__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = " ".join(
tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
# 4. path: span paragraphs
# 5. path: bold paragraph headings
# 6. path: live blogs time
# 7. path: live blogs intro or heading
# 8. path: live blogs body text
# 9. path: live blogs strong body text
# 10. path: live blogs link body text
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/span[@class="article__source"]/span/text()| //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article author")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article source byline")
# text=polish(text)
images = bd._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.bd.nl///cookiewall/accept?url=" + link
return link
class gelderlander(rss):
"""Scrapes gelderlander.nl"""
def __init__(self):
self.doctype = "gelderlander (www)"
self.rss_url = "http://www.gelderlander.nl/home/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=10)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@class="article__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
# 2. path = video articles
# 3. path = articles that are tagged 'Home'
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = " ".join(
tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
# teaser=tree.xpath('//*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
# text = tree.xpath('//*/p[@class="article__paragraph"]/span/text() | //*/p[@class="article__paragraph"]/a/text() | //*/p[@class="article__paragraph"]/h2/text() | //*/h2[@class="article__subheader"]/text() | //*/p[@class="article__paragraph"]/b/text() | //*/p[@class="article__paragraph"]/text() | //*/p[@class="article__paragraph"]/i/text() | //*/p[@class="article__paragraph"]/a/i/text()')
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article byline source")
# text=polish(text)
images = gelderlander._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.gelderlander.nl///cookiewall/accept?url=" + link
return link
class ed(rss):
"""Scrapes ed.nl"""
def __init__(self):
self.doctype = "ed (www)"
self.rss_url = "http://www.ed.nl/home/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=10)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@class="article__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
# 2. path = video articles
# 3. path = articles that are tagged 'Home'
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = " ".join(
tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
# teaser=tree.xpath('//*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
# 4. path: span paragraphs
# 5. path: bold paragraph headings
# 6. path: live blogs time
# 7. path: live blogs intro or heading
# 8. path: live blogs body text
# 9. path: live blogs strong body text
# 10. path: live blogs link body text
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
# text = tree.xpath('//*/p[@class="article__paragraph"]/text() | //*/p[@class="article__paragraph"]/a/text() | //*/p[@class="article__paragraph"]/h2/text() | //*/p[@class="article__paragraph"]/span/text() | //*/p[@class="article__paragraph"]/b/text() | //*/time[@class="liveblog__time-text"]/text() | //*/p[@class="liveblog__intro"]/text() | //*/p[@class="liveblog__paragraph"]/text() | //*/p[@class="liveblog__paragraph"]/strong/text() | //*/p[@class="liveblog__paragraph"]/a/text()')
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article source byline")
# text=polish(text)
images = ed._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.ed.nl///cookiewall/accept?url=" + link
return link
class bndestem(rss):
"""Scrapes bndestem.nl"""
def __init__(self):
self.doctype = "bndestem (www)"
self.rss_url = "http://www.bndestem.nl/home/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=17)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@class="article__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
# 2. path = video articles
# 3. path = articles that are tagged 'Home'
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
# 1. path = normal intro
# 2. path = normal intro version 2
# 3. path = normal intro version 3
# 4. path = bold intro
# 5. path = bold intro version 2
# 6. path = intro video
# 7. path = intro video version 2
# 8. path = links in intro video
try:
teaser = " ".join(
tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
# teaser=tree.xpath('//*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/span/b/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
# 4. path: span paragraphs
# 5. path: bold paragraph headings
# 6. path: bold paragraph headings - version 2
# 6. path: live blogs time
# 7. path: live blogs intro or heading
# 8. path: live blogs body text
# 9. path: live blogs strong body text
# 10. path: live blogs link body text
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
# text = tree.xpath('//*/p[@class="article__paragraph"]/text() | //*/p[@class="article__paragraph"]/a/text() | //*/p[@class="article__paragraph"]/h2/text() | //*/h2[@class="article__subheader"]/text() | //*/p[@class="article__paragraph"]/span/text() | //*/p[@class="article__paragraph"]/b/text() | //*/p[@class="article__paragraph"]/i/text() | //*[@class="s-element-content s-text emojify"]/text() | //*[@class="s-element-content s-text emojify"]/b/text() | //*[@class="s-element-content s-text emojify"]/u/text() | //*[@class="s-element-content s-text emojify"]/u/b/text() | //*[@class="s-element-content s-text emojify"]/a/text() | //*[@class="s-element-content s-text emojify"]/b/a/text()')
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article source byline")
# text=polish(text)
images = bndestem._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.bndestem.nl///cookiewall/accept?url=" + link
return link
class pzc(rss):
"""Scrapes pzc.nl"""
def __init__(self):
self.doctype = "pzc (www)"
self.rss_url = "http://www.pzc.nl/home/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=17)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@class="article__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
# 2. path = video articles
# 3. path = articles that are tagged 'Home'
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
# 1. path = normal intro
# 2. path = normal intro version 2
# 3. path = normal intro version 3
# 4. path = bold intro
# 5. path = bold intro version 2
# 6. path = intro video
# 7. path = intro video version 2
# 8. path = links in intro video
try:
teaser = " ".join(
tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
# teaser=tree.xpath('//*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/span[@class="tag"]/text() | //*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/span/b/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
# 4. path: span paragraphs
# 5. path: bold paragraph headings
# 6. path: live blogs time
# 7. path: live blogs intro or heading
# 8. path: live blogs body text
# 9. path: live blogs strong body text
# 10. path: live blogs link body text
# text = tree.xpath('//*/p[@class="article__paragraph"]/text() | //*/p[@class="article__paragraph"]/a/text() | //*/p[@class="article__paragraph"]/h2/text() | //*/p[@class="article__paragraph"]/span/text() | //*/p[@class="article__paragraph"]/b/text() | //*/time[@class="liveblog__time-text"]/text() | //*/p[@class="liveblog__intro"]/text() | //*/p[@class="liveblog__paragraph"]/text() | //*/p[@class="liveblog__paragraph"]/strong/text() | //*/p[@class="liveblog__paragraph"]/a/text()')
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article source byline")
# text=polish(text)
images = pzc._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.pzc.nl///cookiewall/accept?url=" + link
return link
class tubantia(rss):
"""Scrapes tubantia.nl"""
def __init__(self):
self.doctype = "tubantia (www)"
self.rss_url = "http://www.tubantia.nl/home/rss.xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=17)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@class="article__title"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
# 2. path = video articles
# 3. path = articles that are tagged 'Home'
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
# 1. path = normal intro
# 2. path = normal intro version 2
# 3. path = normal intro version 3
# 4. path = bold intro
# 5. path = bold intro version 2
# 6. path = intro video
# 7. path = intro video version 2
# 8. path = links in intro video
try:
teaser = " ".join(
tree.xpath(
'//*/p[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
# teaser=tree.xpath('//*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/span[@class="tag"]/text() | //*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/span/b/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
# 4. path: span paragraphs
# 5. path: bold paragraph headings
# 6. path: live blogs time
# 7. path: live blogs intro or heading
# 8. path: live blogs body text
# 9. path: live blogs strong body text
# 10. path: live blogs link body text
text = " ".join(
tree.xpath(
'//*/p[@class="article__paragraph"]//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
# text = tree.xpath('//*/p[@class="article__paragraph"]/text() | //*/p[@class="article__paragraph"]/a/text() | //*/p[@class="article__paragraph"]/h2/text() | //*/p[@class="article__paragraph"]/span/text() | //*/p[@class="article__paragraph"]/b/text() | //*/time[@class="liveblog__time-text"]/text() | //*/p[@class="liveblog__intro"]/text() | //*/p[@class="liveblog__paragraph"]/text() | //*/p[@class="liveblog__paragraph"]/strong/text() | //*/p[@class="liveblog__paragraph"]/a/text()')
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article source byline")
# text=polish(text)
images = tubantia._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__figure"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
def getlink(self, link):
"""modifies the link to the article to bypass the cookie wall"""
link = re.sub("/$", "", link)
link = "http://www.tubantia.nl///cookiewall/accept?url=" + link
return link
class limburger(rss):
"""Scrapes limburger.nl"""
def __init__(self):
self.doctype = "limburger (www)"
self.rss_url = "http://feeds.feedburner.com/Limburgernl-nieuws?format=xml"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=17)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*/h1[@itemprop="name"]/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
# 2. path = video articles
# 3. path = articles that are tagged 'Home'
category = tree.xpath(
'//*[@class="container"]/ul/li[@class="sub-nav__list-item active"]/a/text() | //*[@class="article__section-text"]/a/text() | //*/span[@class="mobile-nav__list-text"]/text()'
)[0]
except:
category = ""
logger.debug("Could not parse article category")
# 1. path = normal intro
# 2. path = normal intro version 2
# 3. path = normal intro version 3
# 4. path = bold intro
# 5. path = bold intro version 2
# 6. path = intro video
# 7. path = intro video version 2
# 8. path = links in intro video
try:
teaser = " ".join(
tree.xpath(
'//*[@class="article__intro"]//text() | //*/p[@class="article__intro video"]//text()'
)
).strip()
# teaser=tree.xpath('//*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/span[@class="tag"]/text() | //*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/span/b/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
# 2. path: text with link behind (shown in blue underlined);
# 3. path: second headings
# 4. path: span paragraphs
# 5. path: bold paragraph headings
# 6. path: live blogs time
# 7. path: live blogs intro or heading
# 8. path: live blogs body text
# 9. path: live blogs strong body text
# 10. path: live blogs link body text
text = " ".join(
tree.xpath(
'//*[@class="article__body"]/p//text() | //*/p[@class="liveblog_time-text"]//text() | //*/time[@class="liveblog__time-text"]//text() | //*/p[@class="liveblog__intro"]//text() | //*/p[@class="liveblog__paragraph"]//text()'
)
).strip()
# text = tree.xpath('//*/p[@class="article__paragraph"]/text() | //*/p[@class="article__paragraph"]/a/text() | //*/p[@class="article__paragraph"]/h2/text() | //*/p[@class="article__paragraph"]/span/text() | //*/p[@class="article__paragraph"]/b/text() | //*/time[@class="liveblog__time-text"]/text() | //*/p[@class="liveblog__intro"]/text() | //*/p[@class="liveblog__paragraph"]/text() | //*/p[@class="liveblog__paragraph"]/strong/text() | //*/p[@class="liveblog__paragraph"]/a/text()')
if text == "":
logger.warning("Could not parse article text")
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article byline source")
# text=polish(text)
images = limburger._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text.strip(),
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
return extractedinfo
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//figure[@class="article__image"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {
"url": img.attrib["src"],
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
"alt": img.attrib["alt"],
}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
return extractedinfo
class frieschdagblad(rss):
"""Scrapes frieschdagblad.nl"""
def __init__(self):
self.doctype = "frieschdagblad (www)"
self.rss_url = "http://www.frieschdagblad.nl/nieuws.asp"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=10)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*[@class="ArtKopStd"]/b/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
category = tree.xpath('//*/span[@class="rubriek"]/text()')[0]
except:
category = ""
logger.debug("Could not parse article category")
# no teaser
try:
teaser = tree.xpath(
'//*/p[@class="article__intro"]/span[@class="tag"]/text() | //*/p[@class="article__intro"]/text() | //*/p[@class="article__intro"]/span/text() | //*/p[@class="article__intro"]/b/text() | //*/p[@class="article__intro video"]/text() | //*/p[@class="article__intro video"]/span/text() | //*/p[@class="article__intro video"]/span/a/text()'
)[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
text = tree.xpath('//*[@class="ArtTekstStd"]/text()')
if text == "":
logger.warning("Could not parse article text")
# no author
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article byline_source")
# text=polish(text)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text,
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
}
return extractedinfo
class zwartewaterkrant(rss):
"""Scrapes zwartewaterkrant.nl"""
def __init__(self):
self.doctype = "zwartewaterkrant (www)"
self.rss_url = "http://www.zwartewaterkrant.nl/rss.php"
self.version = ".1"
self.date = datetime.datetime(year=2017, month=5, day=10)
def parsehtml(self, htmlsource):
"""
Parses the html source to retrieve info that is not in the RSS-keys
Parameters
----
htmlsource: string
html retrived from RSS feed
yields
----
title the title of the article
category sth. like economy, sports, ...
teaser the intro to the artcile
text the plain text of the article
byline the author, e.g. "Bob Smith"
byline_source sth like ANP
image: images included in the article
"""
try:
tree = fromstring(htmlsource)
except:
logger.warning("Could not parse HTML tree", type(doc), len(doc))
# print(doc)
return ("", "", "", "")
try:
title = tree.xpath('//*[@id="containerContent"]/h2/text()')[0]
except:
title = ""
logger.warning("Could not parse article title")
try:
# 1. path = normal articles
category = tree.xpath('//*/span[@class="rubriek"]/text()')[0]
except:
category = ""
logger.debug("Could not parse article category")
try:
teaser = tree.xpath('//*/span[@class="blackbold"]/text()')[0]
except:
teaser = ""
logger.debug("Could not parse article teaser")
# 1. path: regular text
text = tree.xpath(
'//*[@id="containerContent"]/p/text() | //*[@id="containerContent"]/p/a/text()'
)
if text == "":
logger.warning("Could not parse article text")
# no author
try:
author_door = tree.xpath(
'//*/span[@class="article__source"]/b/text() | //*/p[@class="article__paragraph"]/b/i/text()'
)[0]
except:
author_door = ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="author"]/a/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door == ""
if author_door == "":
try:
author_door = (
tree.xpath('//*[@class="article__source"]/span/text()')[0]
.strip()
.lstrip("Door:")
.strip()
)
except:
author_door = ""
logger.debug("Could not parse article source")
try:
brun_text = tree.xpath('//*[@class="author"]/text()')[1].replace("\n", "")
author_bron = re.findall(".*?bron:(.*)", brun_text)[0]
except:
author_bron = ""
logger.debug("Could not parse article byline source")
# text=polish(text)
images = zwartewaterkrant._extract_images(self, tree)
extractedinfo = {
"title": title.strip(),
"category": category.strip(),
"teaser": teaser.strip(),
"text": text,
"byline": author_door.replace("\n", " "),
"byline_source": author_bron.replace("\n", " ").strip(),
"images": images,
}
def _extract_images(self, dom_nodes):
images = []
for element in dom_nodes:
img_list = element.xpath('//*[@class="containerContent"]//img')
if len(img_list) > 0:
img = img_list[0]
image = {"url": img.attrib["src"]}
#'height' : img.attrib['height'],
#'width' : img.attrib['width'],
#'caption' : _fon(element.xpath('.//p[@Class="imageCaption"]/text()'))
#'alt' : img.attrib['alt']}
if image["url"] not in [i["url"] for i in images]:
images.append(image)
else:
images = []
return images
return extractedinfo
| true |
d414204633f4ea5cdd1236453007cea4f5f5eb98 | Python | KushRohra/PythonProjects | /Machine Learning/Part 1/8. Logistic Regression/logistic_regression.py | UTF-8 | 1,294 | 3.09375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
dataset = pd.read_csv('../datasets/Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1].values
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=0)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)
# Predicting a new result
predicted_value = classifier.predict(sc.transform([[30, 87000]]))
print("Predicting a new result : ", predicted_value[0])
print()
# Predicting the test set results
y_pred = classifier.predict(X_test)
print("Predicting the test set results : ")
print(np.concatenate((y_test.reshape(len(y_test), 1), y_pred.reshape(len(y_pred), 1)), 1))
print()
# Making the confusion matrix
cm = confusion_matrix(y_test, y_pred)
print("Confusion Matrix is : ", cm)
print()
# Accuracy Score
score = accuracy_score(y_test, y_pred)
print("Accuracy score of the model is : ", score)
print()
| true |
ed18f18192b8926023fa567108ae4bd8e067032e | Python | mysqlbin/python_note | /2020-03-24-Python-ZST-base/2020-08-28-条件循环控制/2020-04-19-02.py | GB18030 | 450 | 2.71875 | 3 | [] | no_license | #!/usr/bin/ptyhon
#coding=gbk
"""
"""
count = 2
while count > 0:
print('ݿͱÿһ,count: {}'.format(count))
count = count - 1
print('debug')
"""
count = 2 0 ݿͱÿһ,count: 2
count = 1 0 ݿͱÿһ,count: 1
count = 0 0ѭ ݿͱÿһ......
..
"""
| true |
cd8c62b0a6894888c97177d3160a11cc8bc25fb6 | Python | searchspring/vaurien | /vaurien/protocols/memcache.py | UTF-8 | 2,148 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | import re
from vaurien.protocols.base import BaseProtocol
from vaurien.util import chunked
RE_LEN = re.compile('Content-Length: (\d+)', re.M | re.I)
RE_KEEPALIVE = re.compile('Connection: Keep-Alive')
RE_MEMCACHE_COMMAND = re.compile('(.*)\r\n')
EOH = '\r\n\r\n'
CRLF = '\r\n'
class Memcache(BaseProtocol):
"""Memcache protocol.
"""
name = 'memcache'
def _handle(self, source, dest, to_backend, on_between_handle):
# https://github.com/memcached/memcached/blob/master/doc/protocol.txt
# Sending the query
buffer = self._get_data(source)
if not buffer:
self._abort_handling(to_backend, dest)
return
# sending the first packet
dest.sendall(buffer)
on_between_handle()
# finding the command we sent.
cmd = RE_MEMCACHE_COMMAND.search(buffer)
if cmd is None:
# wat ?
self._abort_handling(to_backend, dest)
return
# looking at the command
cmd = cmd.groups()[0]
buffer_size = self.option('buffer')
cmd_parts = cmd.split()
mcmd = cmd_parts[0]
if mcmd in ('set', 'add', 'replace', 'append'):
cmd_size = len(cmd) + len(CRLF)
data_size = int(cmd_parts[-1])
total_size = cmd_size + data_size
# grabbing more data if needed
left_to_read = total_size - len(buffer) + len(CRLF)
if left_to_read > 0:
for chunk in chunked(left_to_read, buffer_size):
data = source.recv(chunk)
buffer += data
dest.sendall(data)
# Receiving the response now
buffer = self._get_data(dest, buffer_size)
source.sendall(buffer)
if buffer.startswith('VALUE'):
# we're getting back a value.
EOW = 'END' + CRLF
else:
EOW = CRLF
while not buffer.endswith(EOW):
data = self._get_data(dest, buffer_size)
buffer += data
source.sendall(data)
# we're done
return True # keeping connected
| true |
d8e0242df900667c9f730a133c2054ff4e6c1dab | Python | FiveEyes/ml-notebook | /dlp/ch8_1_text_generation.py | UTF-8 | 2,442 | 2.59375 | 3 | [
"MIT"
] | permissive | import numpy as np
import keras as ks
from keras import layers
def reweight_dist(org_dist, temp=0.5):
dist = np.log(org_dist) / temp
dist = np.exp(dist)
return dist / np.sum(dist)
path = ks.utils.get_file(
'nietzsche.txt',
origin='http://s3.amazonaws.com/text-datasets/nietzsche.txt')
text=open(path).read().lower()
print('Corpus length:', len(text))
maxlen = 60
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i:i+maxlen])
next_chars.append(text[i+maxlen])
print('Number of seq:', len(sentences))
chars = sorted(list(set(text)))
print('unique char:', len(chars))
char_indices = dict((char, chars.index(char)) for char in chars)
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i,t,char_indices[char]] = 1
y[i,char_indices[next_chars[i]]] = 1
model = ks.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
optimizer = ks.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temp=0.1):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temp
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1,preds,1)
return np.argmax(probas)
import random
import sys
model = ks.models.load_model("text_gen.h5")
for epoch in range(1,60):
print('epoch', epoch)
model.fit(x, y, batch_size=128, epochs=1)
model.save("text_gen.h5")
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index:start_index+ maxlen]
print(generated_text)
for temp in [0.2, 0.5, 1.0, 1.2]:
print('temp:', temp)
generated_text = text[start_index:start_index+ maxlen]
sys.stdout.write(generated_text)
for i in range(400):
sampled = np.zeros((1,maxlen, len(chars)))
for i, char in enumerate(generated_text):
sampled[0,t, char_indices[char]] - 1.0
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temp)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
print('') | true |
df30ea3a483dbee9503b3b69804a646c502e2770 | Python | NikaTiunkina/cdv | /programowanie_strukturalne/10.rekurencja.py | UTF-8 | 1,050 | 3.78125 | 4 | [] | no_license |
import time
def find_fib_num_rec(num):
if num == 1 or num == 2:
return 1
else :
return find_fib_num_rec(num - 2) + find_fib_num_rec(num - 1)
def find_fib_num_loop(num):
a,b = 0,1
for elem in range(num) :
a,b= b, a+b
start = time.process_time()
print(find_fib_num_rec(10))
stop = time.process_time()
print(f'Czas wykonywania funkcji "find_fib_num_rec" wynosi: {stop - start}')
start1 = time.process_time()
print(find_fib_num_loop(35))
stop1 = time.process_time()
print(f'Czas wykonania funkcji "find_fib_num_loop wynosi : {stop1 - start1}"')
def potenga (podstawa, wykladniki):
if wykladnik == 0:
return 1
else:
return podstawa * potenga(podstawa, wykladnik - 1)
print(potenga(3,3))
'''
potenga(3,3) --->
potenga(3,2) --->9
potenga(3,1) --->3
potenga(3,0) --->1
potenga(3,3) ---> 3*9 =27
potenga(3,2) ---> 3*3=9
potenga(3,1) ---> 3*1=3
'''
| true |
8affbb45ec6a6c8214a94d08c954db01ef290ec2 | Python | pmorin2/dslr | /check_script.py | UTF-8 | 485 | 2.625 | 3 | [] | no_license | from sklearn.metrics import accuracy_score
import argparse
import pandas
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset_house", type=str, help="input dataset")
parser.add_argument("dataset_truth", type=str, help="input weights")
args = parser.parse_args()
predict = pandas.read_csv(args.dataset_house).values[:, 1]
real = pandas.read_csv(args.dataset_truth).values[:, 1]
print(f'Accuracy: {accuracy_score(predict, real):.2f}') | true |
3bebde20baa05e8a45c6b29bcf6134cffd5d3809 | Python | curtislb/ProjectEuler | /py/problem_036.py | UTF-8 | 1,676 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""problem_036.py
Problem 36: Double-base palindromes
The decimal number, 585 = 1001001001_2 (binary), is palindromic in both bases.
Find the sum of all numbers, less than LIMIT, which are palindromic in base
BASE_A and base BASE_B.
(Please note that the palindromic number, in either base, may not include
leading zeros.)
"""
__author__ = 'Curtis Belmonte'
import common.digits as digs
# PARAMETERS ##################################################################
LIMIT = 1000000 # default: 1000000
BASE_A = 10 # default: 10
BASE_B = 2 # default: 2
# SOLUTION ####################################################################
def solve() -> int:
total = 0
# generate all even-length palindromes in BASE_A below LIMIT
n = 1
palindrome = digs.make_palindrome(n, BASE_A)
while palindrome < LIMIT:
# check if palindrome is also a palindrome in BASE_B
if digs.is_palindrome(palindrome, BASE_B):
total += palindrome
# generate next even-length palindrome in BASE_A
n += 1
palindrome = digs.make_palindrome(n, BASE_A)
# generate all odd-length palindromes in BASE_A below LIMIT
n = 1
palindrome = digs.make_palindrome(n, BASE_A, odd_length=True)
while palindrome < LIMIT:
# check if palindrome is also a palindrome in BASE_B
if digs.is_palindrome(palindrome, BASE_B):
total += palindrome
# generate next odd-length palindrome in BASE_A
n += 1
palindrome = digs.make_palindrome(n, BASE_A, odd_length=True)
return total
if __name__ == '__main__':
print(solve())
| true |
b48631ea343df95af358fac50d1e23001fadb1b5 | Python | doraemon1293/Leetcode | /archive/573SquirrelSimulation.py | UTF-8 | 788 | 3.296875 | 3 | [] | no_license | class Solution(object):
def minDistance(self, height, width, tree, squirrel, nuts):
"""
:type height: int
:type width: int
:type tree: List[int]
:type squirrel: List[int]
:type nuts: List[List[int]]
:rtype: int
"""
mini = float("inf")
total_nut_to_tree = 0
for nut in nuts:
nut_to_tree = abs(nut[0] - tree[0]) + abs(nut[1] - tree[1])
total_nut_to_tree += nut_to_tree
nut_to_squrrel = abs(nut[0] - squirrel[0]) + abs(nut[1] - squirrel[1])
if (nut_to_squrrel - nut_to_tree) < mini:
mini = nut_to_squrrel - nut_to_tree
return mini + total_nut_to_tree * 2
print Solution().minDistance(5,
7,
[2, 2],
[4, 4],
[[3, 0], [2, 5]])
| true |
147cb9142b20af5d48c6147be2a12debd365b42d | Python | jgmatu/PythonST | /ST05/practica05/exchange.py | UTF-8 | 7,321 | 2.875 | 3 | [] | no_license | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
import sys
import flask
import json
import requests
import os
import time
app = flask.Flask(__name__)
FAIL = "Amount must be a number to know the amount of bitcoins conversion..."
def openfile (fileName , mode) :
try :
fich = open(fileName , mode)
return fich
except IOError :
sys.stderr.write("Error open file " + fileName + " mode : " + mode.upper() + '\n')
raise SystemExit
def oldCache (name) :
fiveMins = 300
oneDay = 3600*24
btcEx = "cny.json"
cacheF = openfile(name , "r")
cacheD = cacheF.read()
cacheF.close()
cacheJ = json.loads(cacheD)
if btcEx in name :
cacheTime = (time.time() - cacheJ["date"]) - fiveMins
else :
cacheTime = (time.time() - cacheJ["date"]) - oneDay
return cacheTime > 0
def isCache (name) :
return os.path.exists(name) and not oldCache(name)
def checkJson (data) :
dataS = str(data)
dataS = dataS.replace("'" , '"')
dataS = dataS.replace("u\"" , "\"")
return dataS
def getReq (url , name) :
data = {}
try :
if isCache(name) :
# Get information exchange from local cache.
print "Data get from cache..." + name
cacheF = openfile(name , "r")
cacheD = cacheF.read()
data = json.loads(cacheD)
else :
print "Data get from server..." + name
# Get information exchange from server...
r = requests.get(url)
if r.status_code == 200 :
data = r.json()
else :
print "Error : " + str(r.status_code)
# Update file with cache value in json format and actual time.
data["date"] = time.time()
dataS = checkJson(data)
cache = openfile(name , "w+")
cache.write(dataS)
cache.close()
return data
except :
sys.stderr.write("Error in request server or cache file...." + '\n')
raise SystemExit
@app.route('/')
def index () :
return "Hello World"
def isAmount (query_string) :
keys = query_string.keys()
return keys[0] == 'amount'
def isPars (query_string) :
return len(query_string) == 1 and isAmount(query_string)
def getCny () :
"""Get the value of bitcoins in chinense money to conversions...."""
try :
url = "https://data.btcchina.com/data/ticker?market=btccny"
btcCny = getReq(url , "cny.json")
return float(btcCny["ticker"]["buy"])
except ValueError :
print FAIL + "GET XBTCNT EXCHANGE"
raise SystemExit
@app.route ('/exchange/XBTRUB')
def xbtRub () :
"""Change bitcoin to japanese money..."""
query_string = flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=RUB"
jsonExchanges = getReq(url , "rub.json")
rubCny = jsonExchanges["rates"]["CNY"]
btcRub = btcCny / rubCny
if isPars(query_string) :
btcRub *= float(query_string["amount"])
return json.dumps(str(btcRub))
except ValueError :
return FAIL + "XBTRUB"
@app.route ('/exchange/XBTCHF')
def xbtChf () :
"""Change bitcoin to japanese money..."""
query_string = flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=CHF"
jsonExchanges = getReq(url , "chf.json")
chfCny = jsonExchanges["rates"]["CNY"]
btcChf = btcCny / chfCny
if isPars(query_string) :
btcChf *= float(query_string["amount"])
return json.dumps(str(btcChf))
except ValueError :
return FAIL + "XBTCHF"
@app.route ('/exchange/XBTAUD')
def xbtAud () :
"""Change bitcoin to japanese money..."""
query_string = flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=AUD"
jsonExchanges = getReq(url , "aud.json")
audCny = jsonExchanges["rates"]["CNY"]
btcAud = btcCny / audCny
if isPars(query_string) :
btcAud *= float(query_string["amount"])
return json.dumps(str(btcAud))
except ValueError :
return FAIL + "XBTAUD"
@app.route ('/exchange/XBTJPY')
def xbtJpy () :
"""Change bitcoin to japanese money..."""
query_string = flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=JPY"
jsonExchanges = getReq(url , "jpy.json")
jpyCny = jsonExchanges["rates"]["CNY"]
btcJpy = btcCny / jpyCny
if isPars(query_string) :
btcJpy *= float(query_string["amount"])
return json.dumps(str(btcJpy))
except ValueError :
return FAIL + "XBTJPY"
@app.route ('/exchange/XBTCAD')
def xbtCad () :
"""Change bitcoin to canadiense dollar..."""
query_string = flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=CAD"
jsonChanges = getReq(url , "cad.json")
cnyCad = jsonChanges["rates"]["CNY"]
cadBtc = btcCny / cnyCad
if isPars(query_string) :
cadBtc *= float(query_string["amount"])
return json.dumps(str(cadBtc))
except ValueError :
return FAIL + "XBTCAD"
@app.route ('/exchange/XBTUSD')
def xbtUsd () :
"""Get the value of bitcoins in dollars..."""
query_string= flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=USD"
jsonExchanges = getReq(url , "usd.json")
cnyUsd = jsonExchanges["rates"]["CNY"]
usdBtc = btcCny / cnyUsd
if isPars(query_string) :
usdBtc *= float(query_string["amount"])
return json.dumps(str(usdBtc))
except ValueError:
return FAIL + "XBTUSD"
@app.route ('/exchange/XBTGBP')
def xbtGbp () :
query_string = flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=GBP"
jsonExchanges = getReq(url , "gbp.json")
cnyGbp = jsonExchanges["rates"]["CNY"]
btcGbp = btcCny / cnyGbp
if isPars(query_string) :
btcGbp *= float(query_string["amount"])
return json.dumps(str(btcGbp))
except ValueError :
return FAIL + "XBTGBP"
@app.route ('/exchange/XBTEUR')
def xbtEur () :
"""Obtain the value of bitcoin in EUR"""
query_string = flask.request.args
try :
btcCny = getCny()
url = "http://api.fixer.io/latest?base=EUR"
jsonExchanges = getReq(url , "eur.json")
cnyEur = jsonExchanges["rates"]["CNY"]
btcEur = btcCny / cnyEur
if isPars(query_string) :
btcEur *= float (query_string["amount"])
return json.dumps(str(btcEur))
except ValueError :
return FAIL + "XBTEUR"
@app.route('/exchange/XBTCNY')
def xbtCny () :
"""Obtain the value of bintcoin in chinense value"""
query_string = flask.request.args
try :
btcCny = getCny()
if isPars (query_string) :
btcCny *= float(query_string["amount"])
return json.dumps(str(btcCny))
except ValueError :
return FAIL + "XBTCNY"
if __name__ == "__main__" :
app.run(debug=True)
| true |
377b0b2105acba14860aae744dcc0665b5b05c32 | Python | zoukun120/DangDang_Scrapy | /DangDang_Scrapy/dbhelper.py | UTF-8 | 1,560 | 2.515625 | 3 | [] | no_license | import pymysql
from twisted.enterprise import adbapi
from scrapy.utils.project import get_project_settings # 导入seetings配置
import time
# 读取settings中的配置
class DBHelper:
def __init__(self):
settings = get_project_settings() # 获取settings配置,设置需要的信息
dbparams = dict(
host=settings['MYSQL_HOST'], # 读取settings中的配置
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8', # 编码要加上,否则可能出现中文乱码问题
cursorclass=pymysql.cursors.DictCursor,
use_unicode=False,
)
dbpool = adbapi.ConnectionPool('pymysql', **dbparams) # **表示将字典扩展为关键字参数,相当于host=xxx,db=yyy....
self.dbpool = dbpool
# 写入数据库中
# 参数xxx随便命名,都能调用execute
def insert_to_db(self,xxx,item):
sql_insert = 'insert into dangdang(time,name,author,price_n,price_s,discount,comment_num,detail,rank,publish_time ) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
params = (
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
item['book_name'],
item['author'],
item['price_n'],
item['price_s'],
item['discount'],
item['comment_num'],
item['detail'],
item['rank'],
item['release']
)
xxx.execute(sql_insert, params)
| true |
792538da7343d71bdff8ea8334d70c8449ef3a9d | Python | comptech-winter-school/online-store-redirects | /utils/make_sample.py | UTF-8 | 2,854 | 3.125 | 3 | [
"MIT"
] | permissive | import pandas as pd
def preprocessing_true_redirects(true_redirs):
"""
Удаляет неиспользуемые колоноки в датафрейме с подлинными редиректами.
Переименовывает оставшиеся колонки.
:param true_redirs: pd.DataFrame - датафрейм подлинных редиректов на правильную категорию.
:returntrue_redirects: pd.DataFrame - обработанный pd.DataFrame.
"""
true_redirs = true_redirs.drop(
columns=['redir_id', 'rule_id', 'start_date', 'parent_id'])
true_redirs['is_redirect'] = 1
true_redirs.rename(columns={'category': 'category_name'}, inplace=True)
return true_redirs
def preprocessing_false_redirects(false_redirs, categories):
"""
Удаляет неиспользуемые колоноки в датафрейме с ложными редиректами.
Добавляет необходимую колонку с именем категории.
Переименовывает оставшиеся колонки.
:param false_redirs: pd.DataFrame - сгенерированный датафрейм редиректов не на ту категорию.
:return: обработанный pd.DataFrame
"""
false_redirs['category_name'] = false_redirs['category_id_not_redir'].apply(
lambda i: categories.query('id == @i')['name'].values[0])
false_redirs = false_redirs.drop(
columns=['external_id', 'product_id', 'category_id'])
false_redirs.rename(
columns={'category_id_not_redir': 'category_id'}, inplace=True)
false_redirs['is_redirect'] = 0
return false_redirs
def make_sample(path_to_data):
"""
Создаёт итоговую выборку из подлинных редиректов на нужную категорию
и редиректов не на ту категорию. (из файлов 420_redirects.csv, negative_examples.csv)
:param path_to_data: str - путь к папке, в которой содержатся файлы (example "./data/redir")
:return sample: pd.DataFrame - итоговая таблица для обучения и тестирования модели
Состоит из столбцов [query, category_id, category_name, is_redirect]
"""
true_redirs = pd.read_csv(path_to_data + '/420_redirects.csv', index_col=0)
false_redirs = pd.read_csv(path_to_data + '/negative_examples.csv')
categories = pd.read_csv(path_to_data + '/categories.csv', index_col=0)
true_redirs = preprocessing_true_redirects(true_redirs)
false_redirs = preprocessing_false_redirects(false_redirs, categories)
sample = pd.concat([true_redirs, false_redirs], ignore_index=True)
return sample
| true |
f13a4384a35f72d081a94579c980747fa7547c4d | Python | theatul/practice_problems | /btree.py | UTF-8 | 2,883 | 4.09375 | 4 | [] | no_license | # binary tree
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class Btree:
def __init__(self):
self.head = None
def add(self, data):
temp = Node(data)
if self.head == None:
self.head = temp
return
tnode = self.head
while True:
if data < tnode.data:
if tnode.left == None:
tnode.left = temp
return
else:
tnode = tnode.left
continue
if data > tnode.data:
if tnode.right == None:
tnode.right = temp
return
else:
tnode = tnode.right
continue
def print(self, node):
# To do this witout recursion, a stack needs to be used. https://www.geeksforgeeks.org/iterative-preorder-traversal/
if node == None:
return
print(node.data)
self.print(node.left)
self.print(node.right)
def printTree(self):
self.print(self.head)
def printcolumn(self):
if not self.head:
return
queue = []
queue.append(self.head)
while queue:
length = len(queue)
for i in range(length):
item = queue.pop(0)
print(item.data, end = ' ')
if item.left:
queue.append(item.left)
if item.right:
queue.append(item.right)
print("")
def getMin(self, root):
temp = root
while temp.left:
temp = temp.left
return temp
def delNode(self, root, data):
if not root:
return None
if data < root.data:
root.left = self.delNode(root.left, data)
elif data > root.data:
root.right = self.delNode(root.right, data)
else:
if root.left == None and root.right == None:
#leaf node
return None
elif root.left == None:
return root.right
elif root.right == None:
return root.left
else:
# both nodes are available, take in order successor (minimum node in right tree)
temp = self.getMin(root.right)
root.data = temp.data
root.right = self.delNode(root.right, temp.data)
return root
def deleteNode(self, data):
self.delNode(self.head, data)
tree = Btree()
tree.add(10)
tree.add(5)
tree.add(15)
tree.add(4)
tree.add(6)
tree.add(11)
tree.add(16)
tree.add(3)
tree.printcolumn()
tree.deleteNode(5)
print(" ")
tree.printcolumn()
| true |
cf7eb03cee2db7f5af64540e933974af5712e863 | Python | hguuuu/MML | /modality_weighting/unimodal_text_model.py | UTF-8 | 4,841 | 2.609375 | 3 | [] | no_license | from unimodal_dataset import UnimodalDataset
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
import pickle
import sklearn.metrics as metrics
class UnimodalTextModel(nn.Module):
def __init__(self, embedding_dim, hidden_dim1, hidden_dim2, num_layers, dropout):
super(UnimodalTextModel, self).__init__()
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_dim1//2, num_layers=num_layers, dropout=dropout, bidirectional=True)
self.linear1 = nn.Linear(in_features=hidden_dim1, out_features=hidden_dim2)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(in_features=hidden_dim2, out_features=7)
def forward(self, x):
output, _ = self.lstm(x)
output = self.linear1(output)
output = self.relu(output)
output = self.linear2(output)
return torch.flatten(output, 1, 2)
def forward2(self, x):
output, _ = self.lstm(x)
output = self.linear1(output)
return output
def get_prob(self, embedding):
probs = self.forward(embedding)
return probs
def get_embedding(self, embedding):
embed = self.forward2(embedding)
return embed
def train(model, epochs, batch_size, optimizer):
data = UnimodalDataset('unimodal_text_train_embeddings.pkl')
dataloader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True)
loss_func = nn.CrossEntropyLoss()
for epoch in range(epochs):
loss = 0
losses = []
for batch in dataloader:
optimizer.zero_grad()
if torch.cuda.is_available():
preds = model(batch["bert_embedding"].view(batch_size, 1, -1).cuda())
true = batch["emotion"].cuda()
else:
preds = model(batch["bert_embedding"].view(batch_size, 1, -1))
true = batch["emotion"]
calc_loss = loss_func(preds, true)
loss += calc_loss.item()
losses.append(calc_loss.item())
calc_loss.backward()
optimizer.step()
if epoch % 10 == 0:
print("Epoch: ", epoch, ", Loss: ", loss/len(data))
def evaluate(model, batch_size):
model.eval()
data = UnimodalDataset('unimodal_text_dev_embeddings.pkl')
dataloader = DataLoader(data, batch_size=batch_size, drop_last=True)
all_preds = []
all_true = []
for batch in dataloader:
preds = model(batch["bert_embedding"].view(batch_size, 1, -1))
true = batch["emotion"].tolist()
softmax = nn.Softmax(dim=1)
preds = softmax(preds)
preds = torch.argmax(preds, dim=1).tolist()
all_preds += preds
all_true += true
return metrics.f1_score(all_true, all_preds, average='weighted')
if __name__=="__main__":
torch.manual_seed(25)
#hidden_d1 = [1000, 768, 384]
#hidden_d2 = [300, 256, 128]
#dropout = [0.1, 0.2, 0.3, 0.4]
#epochs = [20, 50]
#layers = [1]
#lr = [0.0001, 0.0005, 0.001, 0.005, 0.01]
hidden_d1 = [768]
hidden_d2 = [256]
dropout = [0.4]
epochs = [20]
layers = [1]
lr = [0.0001]
best_score = 0
best_metrics = []
for a in hidden_d1:
for b in hidden_d2:
for c in dropout:
for d in epochs:
for e in layers:
for f in lr:
print(", hidden d1: ", a, ", hidden d2: ", b, ", dropout: ", c, ", epochs: ", d, ", lstm layers: ", e, ", lr: ", f)
model = UnimodalTextModel(768*3, a, b, e, c)
if torch.cuda.is_available():
model.cuda()
train(model, d, 100, optim.Adam(model.parameters(), lr=f, weight_decay=0.0003))
# pickle.dump(model, open('models/text_model_' + str(a) + '_' + str(b) + '_' + str(c) + '_' + str(d) + '_' + str(e) + '_' + str(f) + '.pkl', 'wb'))
score = evaluate(model.cpu(), 100)
print(score)
if score > best_score:
torch.save(model, 'models/text_model.pt')
best_metrics = [a, b, c, d, e, f]
best_score = score
print(best_score)
print(best_metrics)
# model = UnimodalTextModel(768*3, 768, 300, 1, 0.2)
# if torch.cuda.is_available():
# model.cuda()
# train(20, 2, optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.0003))
# pickle.dump(model, open('text_model.pkl', 'wb'))
# model = pickle.load(open('text_model.pkl', 'rb'))
# evaluate(model.cpu(), 2)
| true |
b9ef5de462298fcb713ca6ff2397ab57816ac0b3 | Python | Drawiin/algoritmos-basicos | /OnibusFlexivel/routeCalculator.py | UTF-8 | 6,336 | 3.703125 | 4 | [
"MIT"
] | permissive | from math import sqrt
from math import factorial
from itertools import permutations
import os
def calculateDistance(pointA, pointB):
deltaX = float((pointA[0] - pointB[0])**2)
deltaY = float((pointA[1] - pointB[1])**2)
return sqrt(deltaX + deltaY)
def calculateRouteLength(route):
routeLength = float(0)
for index in range(len(route) - 1):
routeLength += calculateDistance(route[index], route[index + 1])
return routeLength
def calculateBestRoute(route):
bestRoute = []
bestRouteLength = float("inf")
for permutation in permutations(route):
currentRouteLenght = calculateRouteLength(permutation)
if(currentRouteLenght < bestRouteLength):
bestRoute = list(permutation)
bestRouteLength = currentRouteLenght
return (bestRoute, bestRouteLength)
def readPoint(index):
return tuple([int(i) for i in input('ponto {} entre com x e y separados por espaço: '.format(index + 1)).split(' ')])
if __name__ == "__main__":
print('')
print('+----------------------------------------------+')
print('| |')
print('| Pressione Enter para começar |')
print('| |')
print('+----------------------------------------------+')
input('')
print('+----------------------------------------------+')
print('| |')
print('| Quantos pontos o caminho do ônibus tem ? |')
print('| |')
print('+----------------------------------------------+')
pathLength = int(input('>'))
os.system('cls' if os.name == 'nt' else 'clear')
print('+----------------------------------------------+')
print('| |')
print('| Quantos pontos de passageiros tem ? |')
print('| |')
print('+----------------------------------------------+')
passangersLength = int(input('>'))
os.system('cls' if os.name == 'nt' else 'clear')
print('+----------------------------------------+')
print('| |')
print('| Entre com os pontos da rota do ônibus |')
print('| |')
print('+----------------------------------------+')
print('| | | | | | | | | | | | | | | | | | | | | ')
print('V V V V V V V V V V V V V V V V V V V V V ')
print('')
path = [readPoint(i) for i in range(pathLength)]
os.system('cls' if os.name == 'nt' else 'clear')
print('+----------------------------------------------+')
print('| |')
print('| Entre com os pontos da rota dos passageiros |')
print('| |')
print('+----------------------------------------------+')
print('| | | | | | | | | | | | | | | | | | | | | | | | ')
print('V V V V V V V V V V V V V V V V V V V V V V V V ')
print('')
passangers = [readPoint(i) for i in range(passangersLength)]
os.system('cls' if os.name == 'nt' else 'clear')
print('+----------------------------------------------+')
print('| |')
print('| pontos do caminho do ônibus |')
print('| |')
print('+----------------------------------------------+')
print('| | | | | | | | | | | | | | | | | | | | | | | | ')
print('V V V V V V V V V V V V V V V V V V V V V V V V ')
print('')
print(path)
print('')
print('+----------------------------------------------+')
print('| |')
print('| pontos de passageiros |')
print('| |')
print('+----------------------------------------------+')
print('| | | | | | | | | | | | | | | | | | | | | | | | ')
print('V V V V V V V V V V V V V V V V V V V V V V V V ')
print('')
print(passangers)
print('')
print('+----------------------------------------------+')
print('| |')
print('| Pressione Enter para continuar |')
print('| |')
print('+----------------------------------------------+')
input('')
route = path + passangers
if(len(route) > 8):
os.system('cls' if os.name == 'nt' else 'clear')
print('+--------------------------------------------------------------------+')
print('| |')
print('| A rota que você está prestes a calcular contem vários pontos |')
print('| isso pode levar vários minutos........ou anos |')
print('| pressione Enter para continuar |')
print('| |')
print('+--------------------------------------------------------------------+')
input('')
os.system('cls' if os.name == 'nt' else 'clear')
print('calculando todas as', factorial(len(route)), 'rotas possiveis.................')
result = calculateBestRoute(route)
print('+----------------------------------------------+')
print('| |')
print('| A melhor rota encontrada foi |')
print('| |')
print('+----------------------------------------------+')
print('| | | | | | | | | | | | | | | | | | | | | | | | ')
print('V V V V V V V V V V V V V V V V V V V V V V V V ')
print('')
print(result[0])
print('')
print('Com custo de ')
print(result[1])
print('')
print('+----------------------------------------------+')
print('| |')
print('| Pressione Enter para Para Encerrar |')
print('| |')
print('+----------------------------------------------+')
input('')
| true |
e6ee1f8b94b9aa9efc19c62b22747268b4aa63d3 | Python | masa-k0101/Self-Study_python | /Dfz/Cp5/c5_7_gradient_check.py | UTF-8 | 1,009 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os, sys
import numpy as np
from c5_6_two_layer_net import TwoLayerNet
sys.path.append(os.pardir) # パスに親ディレクトリ追加
from c3_2_data_mnist import load_mnist
# MNISTの訓練データとテストデータ読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
# 2層のニューラルワーク生成
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
# 検証用のデータ準備
x_batch = x_train[:3]
t_batch = t_train[:3]
# 数値微分と誤差逆伝播法で勾配算出
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
# 各重みの差を確認
for key in grad_numerical.keys():
# 差の絶対値の算出
diff = np.abs(grad_backprop[key] - grad_numerical[key])
# 平均と最大値を表示
print(f"{key}: [差の平均]{np.average(diff):.10f} [最大の差]{np.max(diff):.10f}") | true |
315be8a39eb0c8d360671a4f6b3b12a9d17673e8 | Python | sriniketh28/Python-DSA | /DSA-Questions/queue-using-stacks.py | UTF-8 | 681 | 3.640625 | 4 | [] | no_license | class Queue:
def __init__(self):
self.stack1 = []
self.stack2 = []
def isEmpty(self):
return True if len(self.stack2)==0 and len(self.stack1)==0 else False
def enQueue(self, data):
self.stack1.append(data)
def deQueue(self):
if not self.stack2:
while self.stack1:
self.stack2.append(self.stack1.pop())
if self.stack2:
return self.stack2.pop()
else:
return -1
queue1 = Queue()
print(queue1.deQueue())
print(queue1.isEmpty())
queue1.enQueue(10)
queue1.enQueue(20)
queue1.enQueue(30)
queue1.enQueue(40)
print(queue1.deQueue())
print(queue1.isEmpty())
| true |
4404d158daaee6e9e719e872fb4cdf0a84c6ee62 | Python | FlogFr/hospital | /tests/loading.py | UTF-8 | 2,156 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""Tests for :py:mod:`hospital.loading` module."""
import os
import unittest
try:
from unittest import mock
except ImportError: # Python 2.x fallback.
import mock
from hospital import HealthCheck
from hospital.loading import HealthCheckLoader
class HealthCheckLoaderTestCase(unittest.TestCase):
"""Tests around :py:class:`hospital.loading.HealthCheckLoader`."""
def test_is_health_check(self):
"""HealthCheckLoader.is_health_check checks ``is_healthcheck`` attr."""
loader = HealthCheckLoader()
self.assertTrue(loader.is_healthcheck(mock.Mock(is_healthcheck=True)))
class FakeHealthCheck(HealthCheck):
def test_fake(self):
pass
self.assertTrue(loader.is_healthcheck(FakeHealthCheck('test_fake')))
class FakeTestCase(unittest.TestCase):
def test_fake(self):
pass
self.assertFalse(loader.is_healthcheck(FakeTestCase('test_fake')))
def test_discovery_by_python_path(self):
"""HealthCheckLoader discovers healthchecks in Python packages."""
loader = HealthCheckLoader()
suite = loader.discover('hospital.healthchecks.predictable')
self.assertEqual(suite.countTestCases(), 2)
def test_discovery_of_module_by_python_path_in_stdlib(self):
"""HealthCheckLoader can scan locations in stdlib."""
for location in ['datetime', 'xml']: # A module and a package.
loader = HealthCheckLoader()
suite = loader.discover(location)
self.assertEqual(suite.countTestCases(), 0)
def test_discovery_of_module_by_python_path_outside_project(self):
"""HealthCheckLoader can scan locations outside working directory."""
original_dir = os.getcwd()
try:
# Move to a place that is not parent of
# 'hospital.healthchecks.predictable'.
os.chdir(os.path.dirname(__file__))
loader = HealthCheckLoader()
suite = loader.discover('hospital')
self.assertTrue(suite.countTestCases() > 0)
finally:
os.chdir(original_dir)
| true |
4ba4ea600d6ce301a352b7b1a945d24518fc700c | Python | JohnOyster/ComputerVision | /HOG/svm_train.py | UTF-8 | 5,536 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""CIS 693 - Project 2.
Author: John Oyster
Date: June 6, 2020
Description:
DISCLAIMER: Comment text is taken from course handouts and is copyright
2020, Dr. Almabrok Essa, Cleveland State University,
Objectives:
2. Write a program to train and test the linear Support Vector
Machine (SVM) classifier for pedestrian detection using the extracted
features from part 1.
a) Train the SVM classifier with HOGfeatures of the training
set (use built-in function/library (e.g. from sklearn.svm import SVC)).
b) Classify the HOGfeatures
of the testing images (both positive and negatives samples)using
the trained SVM model (use built-in function/library).
c) Compute the accuracy, false positive rate, and the miss rate.
3. Repeat the experiment in part 2 for training the SVM classifier
with different set of kernel functions (e. g. rbf, polynomial, etc.).
Assumptions:
1. Unless this statement is remove, 8-bit pixel values
"""
# Copyright (c) 2020. John Oyster in agreement with Cleveland State University.
import os.path
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import cv2
from sklearn.svm import SVC
import hog
def get_good_train_set(directory="./NICTA/TrainSet/PositiveSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_bad_train_set(directory="./NICTA/TrainSet/NegativeSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_good_test_set(directory="./NICTA/TestSet/PositiveSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_bad_test_set(directory="./NICTA/TestSet/NegativeSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_hog_descriptor(image):
"""The magic from hog.py.
:param image: Input Grayscale Image
:type: np.ndarray
:return: Output HOG Descriptor
:rtype: np.ndarray
"""
image = cv2.resize(image, (64, 128))
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = hog.gamma_correction(image, gamma_value)
gradient = hog.compute_gradients(image)
cell_histograms, _ = hog.compute_weighted_vote(gradient)
hog_blocks, _ = hog.normalize_blocks(cell_histograms)
return hog_blocks.ravel()
if __name__ == '__main__':
gamma_value = 1.0
good_set = get_good_train_set()
image_count = len(good_set)
good_set_hog = np.empty((image_count, 3780))
image_index = 0
for image_file in good_set:
test_image = cv2.imread(image_file)
good_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
good_set_tag = np.ones(image_count)
bad_set = get_bad_train_set()
image_count = len(bad_set)
bad_set_hog = np.empty((image_count, 3780))
image_index = 0
for image_file in bad_set:
test_image = cv2.imread(image_file)
bad_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
bad_set_tag = np.zeros(image_count)
good_test_set = get_good_test_set()
good_test_image_count = len(good_test_set)
good_test_set_hog = np.empty((good_test_image_count, 3780))
image_index = 0
for image_file in good_test_set:
test_image = cv2.imread(image_file)
good_test_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
bad_test_set = get_bad_test_set()
bad_test_image_count = len(bad_test_set)
bad_test_set_hog = np.empty((bad_test_image_count, 3780))
image_index = 0
for image_file in bad_test_set:
test_image = cv2.imread(image_file)
bad_test_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
train_data = np.concatenate((good_set_hog, bad_set_hog))
tag_data = np.concatenate((good_set_tag, bad_set_tag))
C = 1.0 # SVM regularization parameter
lin_svc = SVC(kernel='linear', C=C).fit(train_data, tag_data)
rbf_svc = SVC(kernel='rbf', C=C).fit(train_data, tag_data)
poly_svc = SVC(kernel='poly', C=C, degree=2).fit(train_data, tag_data)
# title for the classifiers
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial kernel']
for i, clf in enumerate((lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
#plt.subplot(2, 2, i + 1)
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
good_test_results = clf.predict(good_test_set_hog)
#print(good_test_results)
bad_test_results = clf.predict(bad_test_set_hog)
#print(bad_test_results)
print("Results for {}".format(titles[i]))
print("Accuracy for Positive Cases: {}".format(np.sum(good_test_results) / good_test_image_count * 100))
print("Accuracy for Negative Cases: {}".format(100 - (np.sum(bad_test_results) / bad_test_image_count * 100)))
del good_test_results, bad_test_results
| true |
f2f7bd53eaec8f1d2403118a2f8df1e9bc68518d | Python | alchan/COVID-visualization-Chan-Harwell | /code/CovidChoropleth.py | UTF-8 | 12,855 | 2.546875 | 3 | [] | no_license | import dash #required: pip install dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output, State
import io
import json
from math import inf, ceil
import numpy as np
import os
import pandas as pd
import plotly.express as px #version 5.3.1 used
import requests
from urllib.request import urlopen
# load U.S. counties and fips code data
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
# Download csv from NY Times raw github
url = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv"
download = requests.get(url).content
# Read data into pandas
dict_dtypes = {'county' : str,
'fips' : str,
'state' : str
}
df = pd.read_csv(io.StringIO(download.decode('utf-8')), dtype=dict_dtypes)
# ____Geographic Exceptions ____
# -------New York City
# Makes list of new rows for NYC Counties, then convert it to dataframe and appends to df
# Not directly appended to df for performance optimization
ny = df[df['county'] == 'New York City']
nyList = []
for row in ny.itertuples():
day, state, cases, deaths = row[1], row[3], row[5], row[6]
nyCounties = {'New York': '36061', 'Kings': '36047', 'Queens': '36081', 'Bronx': '36005', 'Richmond': '36085'}
for county, fips in nyCounties.items():
nyList.append((day, county, state, fips, cases, deaths))
df = df.append(pd.DataFrame(nyList, columns=['date', 'county', 'state', 'fips', 'cases', 'deaths']), ignore_index=True)
# -------Alaska
# Note: Wade Hampton Census Area County has no reported cases to date
# Makes new entries for Yakutat and Hoonah-Angoon Separately, as well as Bristol Bay and Lake and Peninsula Borough counties
al = df[(df['county'] == 'Yakutat plus Hoonah-Angoon') | (df['county'] == 'Bristol Bay plus Lake and Peninsula')]
alaskaList = []
for row in al.itertuples():
day, initCounty, state, cases, deaths = row[1], row[2], row[3], row[5], row[6]
yakHoonah = {'Yakutat City and Borough': '02282', 'Hoonah-Angoon Census Area': '02105'}
bristolLake = {'Bristol Bay Borough': '02060', 'Lake and Peninsula Borough': '02164'}
chosen = (yakHoonah, bristolLake) [initCounty == 'Bristol Bay plus Lake and Peninsula']
for county, fips in chosen.items():
alaskaList.append((day, county, state, fips, cases, deaths))
df = df.append(pd.DataFrame(alaskaList, columns=['date', 'county', 'state', 'fips', 'cases', 'deaths']), ignore_index=True)
#_____Format Data_____
df['cases'].fillna(0)
df['deaths'].fillna(0)
df['logcases'] = df['cases']
df['logdeaths'] = df['deaths']
df['logcases'] = df['logcases'].apply(lambda x: x if x is not np.NaN else 0)
df['logcases'] = df['logcases'].apply(lambda x: np.log10(x))
df['logdeaths'] = df['logdeaths'].apply(lambda x: x if x is not np.NaN else 0)
df['logdeaths'] = df['logdeaths'].apply(lambda x: np.log10(x))
pd.set_option('precision', 0)
# Fill in missing data for Oglala Lakota county
daterange = pd.date_range('2020-03-15', df['date'].max())
missingList = []
for dte in daterange:
day = dte.strftime('%Y-%m-%d')
county= 'Oglala Lakota'
fip = '46113'
state = 'South Dakota'
missingList.append((day, county, state, fip, 0, 0, 0, 0))
df = df.append(pd.DataFrame(missingList,
columns=['date', 'county', 'state', 'fips', 'cases', 'deaths', 'logcases', 'logdeaths']), ignore_index=True)
# Function to create and save image of choropleth to assets folder
def save_frame(year_slctd, data_slctd, url):
dff = df[df["date"] == year_slctd]
log = ('logdeaths', 'logcases') [data_slctd == 'cases']
maxColor = dff[log].max() + 1
hoverBool = (True, False) if data_slctd == 'cases' else (False,True)
fig = px.choropleth(
data_frame=dff,
geojson=counties,
locations='fips',
scope="usa",
color=log,
hover_data={'county':True, data_slctd:True, 'state':True, 'logcases':hoverBool[0], 'logdeaths':hoverBool[1]},
color_continuous_scale="Oryel",
range_color= (0, maxColor),
labels={'fips': 'County Code', 'county': 'County Name',
'cases': 'Number of Cases', 'deaths': 'Reported Deaths',
'logcases': 'Log Scaled Cases', 'logdeaths': 'Log Scaled Deaths',
'state': 'State'},
template='plotly_dark',
width=1800,
height=690
)
annotation = {
'xref': 'paper',
'yref': 'paper',
'x': 0.01,
'y': 0.99,
'text': str(year_slctd),
'showarrow': False,
'font': {'size': 24, 'color': 'white'}
}
fig.add_annotation(annotation)
tickV = [dff[log].min(), dff[log].quantile(0.5), dff[log].quantile(0.9), dff[log].max()]
for i,v in enumerate(tickV):
if np.isnan(tickV[i]) or tickV[i] == -inf:
tickV[i] = 0
tickT = [int(10**tickV[0]), int(10**tickV[1]),
int(10**tickV[2]), int(10 **tickV[3])]
titleText = ("Reported Deaths", "Reported Cases") [log == 'logcases']
fig.update_layout(coloraxis_colorbar=dict(
title=titleText,
tickvals=tickV,
ticktext=tickT
),
geo = dict(
landcolor = '#a794b9',
lakecolor = '#04DCF6'
),
title={
'text': titleText,
'y':0.91,
'x':0.5,
'xanchor':'center',
'yanchor':'top',
'font': {
'size':27,
'color':"white"}
}
)
fig.write_image(url)
# Populate any missing images since time of last data pull and create dateIndexer dict for Date Slider
dirname = os.path.dirname(__file__)
assets_path = os.path.join(dirname, 'assets')
if not os.path.exists(assets_path):
os.mkdir(assets_path)
os.mkdir(assets_path + '/Cases')
os.mkdir(assets_path + '/Deaths')
dateIndexer = {}
daterange, i = pd.date_range(df['date'].min(), df['date'].max()), 0
for dte in daterange:
day = dte.strftime('%Y-%m-%d')
dateIndexer[i] = day
i += 1
urlc, urld = assets_path + '/Cases/' +'c' + day + '.png', assets_path + '/Deaths/' + 'd' + day + '.png'
if not os.path.exists(urlc):
save_frame(day, 'cases', urlc)
if not os.path.exists(urld):
save_frame(day, 'deaths', urld)
# ------------------------------------------------App Layout-------------------------------------------------------------
app = dash.Dash(__name__)
bg = 'white'
mx = max(dateIndexer, key=int)
app.layout = html.Div([
dcc.Interval(
id='frame-interval',
interval=200, # Animation speed in milliseconds (lower is faster)
n_intervals=0,
disabled=True
),
html.H1("U.S. COVID-19 Statistics by County", style={'text-align': 'center', 'backgroundColor': bg}),
html.Div(children=[
dcc.RadioItems(id="slct_data", options=[
{'label': 'Show Cases', 'value': 'cases'},
{'label': 'Show Deaths', 'value': 'deaths'}],
value='cases',
labelStyle={'display': 'inline-block', 'backgroundColor': 'yellow', 'fontSize':20},
inputStyle={"margin-left": "20px"})],
style = {'width' : '100%', 'display' : 'flex', 'align-items': 'center', 'justify-content' : 'center'}),
html.Br(),
html.Div(children=[
dcc.DatePickerSingle(
id='slct_day',
min_date_allowed= df['date'].min(),
max_date_allowed= df['date'].max(),
date=df['date'].min())],
style = {'width' : '100%', 'display' : 'flex', 'align-items': 'center', 'justify-content' : 'center', 'background':bg}),
html.Br(),
html.Div(children=[
dcc.Graph(id='covid_map', figure={})],
style = {'width' : '100%', 'display' : 'flex', 'align-items': 'center', 'justify-content' : 'center', 'background':bg}),
html.Div(children=(
html.H1("Choropleth Frame Slider", style={'text-align': 'center', 'backgroundColor': 'Yellow'})
)
),
html.Div('Manually Drag Slider or Play as Animation', style={'fontSize': 20, 'color': 'gray', 'text-align':'center'}),
html.Br(),
html.Div(children=[
html.Button('Play/Pause Animation', id='play-anim', n_clicks=0)],
style = {'width' : '100%', 'display' : 'flex', 'align-items': 'center', 'justify-content' : 'center', 'background':bg}),
html.Br(),
# Slider only accepts integers. The dateIndexer dict is used to convert each int to a date
dcc.Slider(
id='frameSlider',
min=0,
max=max(dateIndexer, key=int),
value=0,
marks = {0:dateIndexer[0], ceil(mx/2):dateIndexer[ceil(mx/2)], mx: dateIndexer[mx]},
updatemode = 'drag'
),
html.Div(id='frameBox', children=[
html.Img(
src='/assets/Cases/c2020-01-21.png'
)],
style = {'width' : '100%', 'display' : 'flex', 'align-items': 'center', 'justify-content' : 'center', 'background':bg}),
html.Br()
#END OF PARENT DIV
], style = {'background':bg})
# -------------------------------Application Callback Functions---------------------------------------------
# Update Main choropleth when date or data is changed
@app.callback(
Output(component_id='covid_map', component_property='figure'),
[Input(component_id='slct_day', component_property='date'),
Input(component_id='slct_data', component_property='value')]
)
def update_graph(year_slctd, data_slctd):
dff = df[df["date"] == year_slctd]
# Choose log column to use for color scale
log = ('logdeaths', 'logcases') [data_slctd == 'cases']
maxColor = dff[log].max() + 1
hoverBool = (True, False) if data_slctd == 'cases' else (False,True)
fig = px.choropleth(
data_frame=dff,
geojson=counties,
locations='fips',
scope="usa",
color=log,
hover_data={'county':True, data_slctd:True, 'state':True, 'logcases':hoverBool[0], 'logdeaths':hoverBool[1]},
color_continuous_scale="Oryel",
range_color= (0, maxColor),
labels={'fips': 'County Code', 'county': 'County Name',
'cases': 'Number of Cases', 'deaths': 'Reported Deaths',
'logcases': 'Log Scaled Cases', 'logdeaths': 'Log Scaled Deaths',
'state': 'State'},
template='plotly_dark',
width=1800,
height=690
)
annotation = {
'xref': 'paper',
'yref': 'paper',
'x': 0.01,
'y': 0.99,
'text': str(year_slctd),
'showarrow': False,
'font': {'size': 24, 'color': 'white'}
}
fig.add_annotation(annotation)
tickV = [dff[log].min(), dff[log].quantile(0.5), dff[log].quantile(0.9), dff[log].max()]
for i,v in enumerate(tickV):
if np.isnan(tickV[i]) or tickV[i] == -inf:
tickV[i] = 0
tickT = [int(10**tickV[0]), int(10**tickV[1]),
int(10**tickV[2]), int(10 **tickV[3])]
titleText = ("Reported Deaths", "Reported Cases") [log == 'logcases']
fig.update_layout(coloraxis_colorbar=dict(
title=titleText,
tickvals=tickV,
ticktext=tickT,
),
geo = dict(
landcolor = '#a794b9',
lakecolor = '#04DCF6'
),
title={
'text': titleText,
'y':0.91,
'x':0.5,
'xanchor':'center',
'yanchor':'top',
'font': {
'size':27,
'color':"white"}
},
)
return fig
# Update Map Frame Shown when slider is moved
@app.callback(
Output(component_id='frameBox', component_property='children'),
[Input(component_id='frameSlider', component_property='value'),
Input(component_id='slct_data', component_property='value')]
)
def update_frame(sliderKey, data_slct):
day = dateIndexer[sliderKey]
url = ('/assets/Deaths/' + 'd' + day + '.png', '/assets/Cases/' +'c' + day + '.png') [data_slct == 'cases']
frame = html.Img(src=url)
return frame
# Increments Frame Slider whenever interval is enabled via Play/Pause button
@app.callback(
Output(component_id='frameSlider', component_property='value'),
Input(component_id='frame-interval', component_property='n_intervals'),
State('frameSlider', 'value')
)
def playFrames(n, slideVal):
if slideVal < max(dateIndexer, key=int):
return slideVal + 1
return 0
# Enable/Disable Interval used for animating image frames when Play/Pause button is pressed
@app.callback(
Output('frame-interval', 'disabled'),
Input('play-anim', 'n_clicks'),
State('frame-interval', 'disabled')
)
def start_stop_interval(button_clicks, disabled_state):
if button_clicks is not None and button_clicks > 0:
return not disabled_state
else:
return disabled_state
# ----------------------------------------------------------------------------
if __name__ == '__main__':
#set debug=True for development
app.run_server(debug=False) | true |
7a0e5e06e46ed4a585d3bf0dbe1ac6c549dfea3f | Python | jamtot/DailyChallenge | /IRC connecting (14mar2016)/Connection.py | UTF-8 | 979 | 2.71875 | 3 | [
"MIT"
] | permissive | input = """chat.freenode.net:6667
carrot_chompa
carrot_chompa
Ed Sheeran"""
import socket
def make_connection(input):
server, nick, user, name = input.splitlines()
server, port = server.split(":")
server_name = "*"
user_mode = 0
s = socket.socket()
s.connect((server, int(port)))
print "connected"
nickmsg = "NICK %s" % nick
usermsg = "USER %s %d %s :%s\r\n" % (user, user_mode, server_name, name)
print nickmsg
print usermsg
s.send(nickmsg+"\r\n")
s.send(usermsg+"\r\n")
print "initial details sent"
received = ""
while True:
if "\r\n" not in received:
received += s.recv(512)
lines, received = received.split("\r\n", 1)
print lines
#print received
if lines.startswith("PING"):
pong = list(lines)
pong[1] = "O"
pong = ("").join(pong)
print pong
s.send(pong+"\r\n")
make_connection(input)
| true |
bdb6d79e3e208dd9111459b1912827c2306ec33d | Python | Genionest/My_python | /Zprogram3/my_pygame/all_draw.py | UTF-8 | 1,924 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-18 20:22:32
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
import pygame
from pygame.locals import *
from sys import exit
from random import *
from math import pi
pygame.init()
screen = pygame.display.set_mode((640,480),0,32)
points = []
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
if event.type == KEYDOWN:
#按任意键可以清屏并把点回复到原始状态
points = []
screen.fill((255,255,255))
if event.type == MOUSEBUTTONDOWN:
screen.fill((255,255,255))
#画随机矩形
rc = (randint(0,255),randint(0,255),randint(0,255))
rp = (randint(0,639),randint(0,479))
rs = (639-randint(rp[0],639),479-randint(rp[1],479))
pygame.draw.rect(screen,rc,Rect(rp,rs))
#画随机圆形
rc = (randint(0,255),randint(0,255),randint(0,255))
rp = (randint(0,639),randint(0,479))
rr = (randint(1,200))
pygame.draw.circle(screen,rc,rp,rr)
#获得当前鼠标点击位置
x,y = pygame.mouse.get_pos()
points.append((x,y))
#根据点击位置画弧线
angle = (x/639.)*pi*2.#x与平面长度之比 等于 角度与圆之比
pygame.draw.arc(screen,(0,0,0),(0,0,639,479),0,angle,3)
#根据点击位置画椭圆
pygame.draw.ellipse(screen,(0,255,0),(0,0,x,y))
#从左上和右下画两根先连接到点击位置
pygame.draw.line(screen,(0,0,255),(0,0),(x,y))
pygame.draw.line(screen,(255,0,0),(640,480),(x,y))
#画出点击轨迹图
if len(points) > 1:
pygame.draw.lines(screen,(155,155,0),False,points,2)
#和轨迹图基本一样,只不过是闭合的,因为会覆盖,所以这里给注释了
#if len(points) >= 3:
# pygame.draw.polygon(screen,(0,155,155),points,2)
#把每个点画明显一点
for p in points:
pygame.draw.circle(screen,(155,155,155),p,3)
pygame.display.update() | true |
6b40327b60b1c2e733614860b31ea9bf266bae16 | Python | finleysg/image-processor | /image_processing.py | UTF-8 | 3,749 | 2.984375 | 3 | [] | no_license | # coding=utf-8
import os, sys
from PIL import Image, ImageDraw, ImageFont, ImageEnhance
from pilkit.processors import ResizeToFit
# def add_watermark(self, image):
# # Code for adding the watermark goes here.
# return self.watermark(image, "© 2016 Zoomdoggy Design", "arial black.ttf", None, 36)
def resize(image, *size):
"""
size is a pair like 600, 600
"""
processor = ResizeToFit(*size)
return processor.process(image)
def reduce_opacity(image, opacity):
"""
Returns an image with reduced opacity.
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879
"""
assert opacity >= 0 and opacity <= 1
if image.mode != 'RGBA':
image = image.convert('RGBA')
else:
image = image.copy()
alpha = image.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
image.putalpha(alpha)
return image
def calculate_position(image, position, textsize, margin):
if position == 0: # centered
return [(image.size[i]/2)-(textsize[i]/2) for i in [0,1]]
elif position == 1: # upper left
return margin
elif position == 2: # upper right
return [image.size[0]-margin[0]-textsize[0], margin[1]]
elif position == 3: # lower right
# return [image.size[0]-margin[0]-textsize[0], image.size[1]-margin[1]-textsize[1]]
return [image.size[i]-margin[i]-textsize[i] for i in [0,1]]
elif position == 4: # lower left
return [margin[0], image.size[1]-margin[1]-textsize[1]]
def add_watermark(image, text, font_path, position, font_scale=None, font_size=36, color=(255,255,255), opacity=0.6, margin=(30, 30)):
"""
image - PIL Image instance
text - text to add over image
font_path - font that will be used
font_scale - font size will be set as percent of image height
"""
if image.mode != "RGBA":
image = image.convert("RGBA")
textlayer = Image.new("RGBA", image.size, (0,0,0,0))
textdraw = ImageDraw.Draw(textlayer)
if font_scale:
width, height = image.size
font_size = int(font_scale*height)
while True:
font=ImageFont.truetype(font_path, font_size)
textsize = textdraw.textsize(text, font=font)
if (textsize[0] + margin[0] * 2 < image.size[0]):
break
font_size -= 1
textpos = calculate_position(image, position, textsize, margin)
textdraw.text(textpos, text, font=font, fill=color)
if opacity != 1:
textlayer = reduce_opacity(textlayer,opacity)
return Image.composite(textlayer, image, textlayer)
# return imprint(image, text, font=font, position=position, opacity=opacity, color=color, margin=margin)
def main():
if len(sys.argv) < 2:
print("Image path is required")
return
if len(sys.argv) < 3:
print("Watermark text is required")
return
processed_path = sys.argv[1] + "/processed"
if not os.path.exists(processed_path):
os.makedirs(processed_path)
for (dirpath, dirnames, filenames) in os.walk(sys.argv[1]):
if not dirpath.endswith("processed"):
for filename in filenames:
try:
image = Image.open("%s/%s" % (dirpath, filename))
image = add_watermark(image, sys.argv[2], "arial.ttf", 0, font_scale=.08, margin=(30, 30))
image = resize(image, 400, 400)
# image = add_watermark(image, sys.argv[2], "arial.ttf", 0)
image.save("%s/%s" % (processed_path, filename), quality=80, optimize=True)
except:
e = sys.exc_info()
print(e)
pass
if __name__ == "__main__":
main() | true |
1a695545a498151e06b95a2a34c760dfb2fb8104 | Python | mlyzhong/Kofiko | /MonkeyVR/PythonUDPDebugging/python_udp_listener.py | UTF-8 | 686 | 2.71875 | 3 | [] | no_license | import socket
import random
UDP_IP = "127.0.0.1"
UDP_PORT = 12345
UDP_SEND = 1111
print "reading IP address: " + UDP_IP + " port: " + str(UDP_PORT)
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
if data == "getAnalog":
message = \
str(random.random()) + "," + str(random.random()) + "," + \
str(random.random()) + "," + str(random.random())
sock.sendto(message, (UDP_IP, UDP_PORT))
print "getAnalog", message
#else:
# print "received message:", data
| true |
92f7f2b99962fc799a618984d2177febb894758b | Python | tazle/ruuvitag-sensor | /ruuvitag_sensor/decoder.py | UTF-8 | 4,421 | 2.90625 | 3 | [
"MIT"
] | permissive | from __future__ import division
import base64
import math
import logging
log = logging.getLogger(__name__)
def get_decoder(data_type):
'''
Get correct decoder for Data Type.
Returns:
object: Data decoder
'''
if data_type == 2:
return UrlDecoder()
else:
return Df3Decoder()
class UrlDecoder(object):
'''
Decodes data from RuuviTag url
Protocol specification:
https://github.com/ruuvi/sensor-protocol-for-eddystone-url
'''
'''
Decoder operations are ported from:
https://github.com/ruuvi/sensor-protocol-for-eddystone-url/blob/master/index.html
0: uint8_t format; // (0x01 = realtime sensor readings)
1: uint8_t humidity; // one lsb is 0.5%
2-3: uint16_t temperature; // Signed 8.8 fixed-point notation.
4-5: uint16_t pressure; // (-50kPa)
6-7: uint16_t time; // seconds (now from reset)
The bytes for temperature, pressure and time are swaped during the encoding
'''
def _get_temperature(self, decoded):
'''Return temperature in celsius'''
temp = (decoded[2] & 127) + decoded[3] / 100
sign = (decoded[2] >> 7) & 1
if sign == 0:
return round(temp, 2)
return round(-1 * temp, 2)
def _get_humidity(self, decoded):
'''Return humidity %'''
return decoded[1] * 0.5
def _get_pressure(self, decoded):
'''Return air pressure hPa'''
pres = ((decoded[4] << 8) + decoded[5]) + 50000
return pres / 100
def decode_data(self, encoded):
'''
Decode sensor data.
Returns:
dict: Sensor values
'''
try:
identifier = None
if len(encoded) > 8:
identifier = encoded[8:]
encoded = encoded[:8]
decoded = bytearray(base64.b64decode(encoded, '-_'))
return {
'temperature': self._get_temperature(decoded),
'humidity': self._get_humidity(decoded),
'pressure': self._get_pressure(decoded),
'identifier': identifier
}
except:
log.exception('Encoded value: %s not valid', encoded)
return None
class Df3Decoder(object):
'''
Decodes data from RuuviTag with Data Format 3
Protocol specification:
https://github.com/ruuvi/sensor-protocol-for-eddystone-url
'''
def _get_temperature(self, data):
'''Return temperature in celsius'''
temp = (data[2] & ~(1 << 7)) + (data[3] / 100)
sign = (data[2] >> 7) & 1
if sign == 0:
return round(temp, 2)
return round(-1 * temp, 2)
def _get_humidity(self, data):
'''Return humidity %'''
return data[1] * 0.5
def _get_pressure(self, data):
'''Return air pressure hPa'''
pres = (data[4] << 8) + data[5] + 50000
return pres / 100
def _twos_complement(self, value, bits):
if (value & (1 << (bits - 1))) != 0:
value = value - (1 << bits)
return value
def _get_acceleration(self, data):
'''Return acceleration mG'''
acc_x = self._twos_complement((data[6] << 8) + data[7], 16)
acc_y = self._twos_complement((data[8] << 8) + data[9], 16)
acc_z = self._twos_complement((data[10] << 8) + data[11], 16)
return (acc_x, acc_y, acc_z)
def _get_battery(self, data):
'''Return battery mV'''
return (data[12] << 8) + data[13]
def decode_data(self, data):
'''
Decode sensor data.
Returns:
dict: Sensor values
'''
try:
byte_data = bytearray.fromhex(data)
acc_x, acc_y, acc_z = self._get_acceleration(byte_data)
return {
'humidity': self._get_humidity(byte_data),
'temperature': self._get_temperature(byte_data),
'pressure': self._get_pressure(byte_data),
'acceleration': math.sqrt(acc_x * acc_x + acc_y * acc_y + acc_z * acc_z),
'acceleration_x': acc_x,
'acceleration_y': acc_y,
'acceleration_z': acc_z,
'battery': self._get_battery(byte_data)
}
except Exception:
log.exception('Value: %s not valid', data)
return None
| true |
c3ed155c5d31cb9638c66fd9d76980777f6c4246 | Python | urentia/haphpipe | /haphpipe/stages/assemble_scaffold.py | UTF-8 | 5,256 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
from haphpipe.utils import sysutils
from haphpipe.utils import sequtils
from haphpipe.utils import alignutils
__author__ = 'Matthew L. Bendall'
__copyright__ = "Copyright (C) 2019 Matthew L. Bendall"
def stageparser(parser):
group1 = parser.add_argument_group('Input/Output')
group1.add_argument('--contigs_fa', type=sysutils.existing_file,
required=True,
help='Fasta file with assembled contigs')
group1.add_argument('--ref_fa', type=sysutils.existing_file,
required=True,
help='''Fasta file with reference genome to scaffold
against''')
group1.add_argument('--outdir', type=sysutils.existing_dir, default='.',
help='Output directory')
group2 = parser.add_argument_group('Scaffold options')
group2.add_argument('--seqname', default='sample01',
help='Name to append to scaffold sequence.')
group3 = parser.add_argument_group('Settings')
group3.add_argument('--keep_tmp', action='store_true',
help='Additional options')
group3.add_argument('--quiet', action='store_true',
help='''Do not write output to console
(silence stdout and stderr)''')
group3.add_argument('--logfile', type=argparse.FileType('a'),
help='Append console output to this file')
group3.add_argument('--debug', action='store_true',
help='Print commands but do not run')
parser.set_defaults(func=assemble_scaffold)
def assemble_scaffold(
contigs_fa=None, ref_fa=None, outdir='.',
seqname='sample01',
keep_tmp=False, quiet=False, logfile=None, debug=False
):
""" Pipeline step to assemble contigs to reference scaffold
Args:
contigs_fa (str): Path to fasta file with assembled contigs
ref_fa (str): Path to reference fasta file
outdir (str): Path to output directory
seqname (str): Name to append to scaffold sequence
keep_tmp (bool): Do not delete temporary directory
quiet (bool): Do not write output to console
logfile (file): Append console output to this file
debug (bool): Print commands but do not run
Returns:
out_scaffold (str): Path to scaffold FASTA. Reference positions that
were not covered have 'n'
out_imputed (str): Path to imputed FASTA. Reference positions that
were not covered have reference base.
out_aln (str): Path to FASTA alignment between scaffold and
reference.
out_padded (str): Path to output with all contigs aligned to
reference.
"""
# Check dependencies
sysutils.check_dependency('nucmer')
sysutils.check_dependency('delta-filter')
sysutils.check_dependency('show-tiling')
# Outputs
out_scaffold = os.path.join(outdir, 'scaffold_assembly.fa')
out_imputed = os.path.join(outdir, 'scaffold_imputed.fa')
out_aln = os.path.join(outdir, 'scaffold_aligned.fa')
out_padded = os.path.join(outdir, 'scaffold_padded.out')
# Temporary directory
tempdir = sysutils.create_tempdir(
'assemble_scaffold', None, quiet, logfile
)
# Create fasta file with sequence IDs only (remove decription)
tmp_contigs_fa = sequtils.clean_seqnames_file(contigs_fa, tempdir)
with open(out_padded, 'w') as pad_fh:
scaffolds = alignutils.assemble_to_ref(
tmp_contigs_fa, ref_fa, tempdir, pad_fh=pad_fh,
quiet=quiet, logfile=logfile, debug=debug
)
# Output scaffolds as FASTA
with open(out_scaffold, 'w') as outh:
for ref in sorted(scaffolds.keys()):
n = '%s.%s' % (ref.split('.')[0], seqname)
s = scaffolds[ref].scaffold()
print('>%s\n%s' % (n, sequtils.wrap(s)), file=outh)
# Output imputed as FASTA
with open(out_imputed, 'w') as outh:
for ref in sorted(scaffolds.keys()):
n = '%s.%s' % (ref.split('.')[0], seqname)
s = scaffolds[ref].imputed()
print('>%s\n%s' % (n, sequtils.wrap(s)), file=outh)
# Output alignments for other pipeline stages
with open(out_aln, 'w') as outh:
for ref in sorted(scaffolds.keys()):
n = '%s.%s' % (ref.split('.')[0], seqname)
print('>REF|%s\n%s' % (n, scaffolds[ref].raln()), file=outh)
print('>%s\n%s' % (n, scaffolds[ref].qaln()), file=outh)
if not keep_tmp:
sysutils.remove_tempdir(tempdir, 'assemble_scaffold', quiet, logfile)
return out_scaffold, out_imputed, out_aln, out_padded
def console():
""" Entry point
Returns:
None
"""
parser = argparse.ArgumentParser(
description='Assemble contigs to genome.',
formatter_class=sysutils.ArgumentDefaultsHelpFormatterSkipNone,
)
stageparser(parser)
args = parser.parse_args()
args.func(**sysutils.args_params(args))
if __name__ == '__main__':
console()
| true |
2cf334787a84d64e41ea8184951f9ac77ca77eaf | Python | keep999Inchina/stm32-hello-world | /ai&ml/helloml-iamaidiot/有限复仇者.py | UTF-8 | 2,393 | 2.75 | 3 | [
"MIT"
] | permissive | # type善人=0,复仇者=1,恶人=2,行标为成员,列标1-extent,2-life,3-memory,4-type
from numpy import random
import matplotlib.pyplot as plt
from pandas import DataFrame
dic0 = {'life': [5] * 9, 'type': [0] * 9,
'memory': [[]] * 9, 'extent': [0] * 9}
frame = DataFrame(dic0)
frame.name = '有限复仇者'
frame.index.name = '编号'
for x in range(9):
frame.iat[x, 3] = random.randint(0, 2)
c = 0
d = 0
while d == 0:
while c < 10:
counter = frame['type'].value_counts()
counter.name = c
recorder = DataFrame(counter)
a = random.randint(0, 8)
b = random.randint(0, 8)
if frame.iat[a, 3] or frame.iat[b, 3] < 0:
break
if a != b:
if frame.iat[a,
3] + frame.iat[b,
3] == 0 or frame.iat[a,
3] + frame.iat[b,
3] == 1:
frame.iat[a, 2] += [1]
frame.iat[b, 2] += [1]
if frame.iat[a, 3] + frame.iat[b, 3] == 2:
if frame.iat[a, 2] - frame.iat[b, 2] == 0:
pass
elif frame.iat[a, 2] < frame.iat[b, 2]:
frame.iat[a, 2] -= [2]
frame.iat[b, 2] += [2]
else:
frame.iat[a, 2] += [2]
frame.iat[b, 2] -= [2]
if frame.iat[a, 3] + frame.iat[b, 3] == 3:
if frame.iat[a, 2] < frame.iat[b, 2]:
frame.iat[a, 2] -= [1]
else:
frame.iat[b, 2] -= [1]
if frame.iat[a, 3] + frame.iat[b, 3] == 4:
frame.iat[a, 2] -= [2]
frame.iat[b, 2] -= [2]
cgn = frame[frame['life'] < 0]
pgn = frame.sort_values(by='life', ascending=False)
if len(cgn.index) == 0:
pass
else:
for x, y in cgn.index, pgn.index:
frame.iat[x, 4] = pgn.iat[y, 4]
frame.iat[x, 2] = 10
c += 1
counter = frame['type'].value_counts()
counter.name = c
recorder[c] = counter
if c == 10:
d = 1
recorder.plot()
plt.show()
| true |
e1df6170780ec0ffb777f5cf9d78ff663e72136b | Python | jankapusta/milan-sessions | /two_lists.py | UTF-8 | 1,204 | 4.21875 | 4 | [] | no_license |
def readInteger():
try:
sNumber = raw_input()
return int(sNumber)
except ValueError:
print "Skipping value '",sNumber,"'. Not a number"
exit()
print "This will combine two sorted lists into one."
print "Enter size of the 1st list:"
iSizeM = readInteger()
print "Enter numbers from the 1st sorted list:"
iNumbersM = []
for iN in range(iSizeM):
iNumbersM.append(readInteger())
print "Enter size of the 2nd list:"
iSizeN = readInteger()
print "Enter numbers from the 2nd sorted list:"
iNumbersN = []
for iN in range(iSizeN):
iNumbersN.append(readInteger())
print "First list: ", iNumbersM
print "Second list: ", iNumbersN
iResultNumbers = []
indexM = 0
indexN = 0
while (indexM < iSizeM) or (indexN < iSizeN):
if (indexN < iSizeN) and ((indexM >= iSizeM) or (iNumbersN[indexN] < iNumbersM[indexM])):
print "Take", iNumbersN[indexN], "from second array"
iResultNumbers.append(iNumbersN[indexN])
indexN = indexN+1
else:
print "Take", iNumbersM[indexM], "from first array"
iResultNumbers.append(iNumbersM[indexM])
indexM = indexM+1
print "Two lists merged into one: ", iResultNumbers
print "Bye"
| true |
f9630eff57e85d9fcf973551dda494890c623e40 | Python | yuispeuda/pro-lang | /python/calısma-soruları/karekok_bulma.py | UTF-8 | 217 | 2.765625 | 3 | [] | no_license | # -*- coding: cp1254 -*-
#!/usr/bin/python
def karekok_hesapla(sayi):
if sayi>=0:
x=sayi**1.0/2
print x
elif sayi<0:
w=((-sayi)**1.0/2)
print w,"i"
| true |
a561f15aeeaee62e3d2ef2e73c07cd509c2bd719 | Python | renerwijn/MEC-downscaling-example | /step3_projection_onto_surface/calc_projection_weights.py | UTF-8 | 4,882 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
interpolation of 3D gridded data to destination grid
"""
import sys
import os, os.path
import numpy as np
import netCDF4
from netCDF4 import Dataset, default_fillvals
from scipy.interpolate import InterpolatedUnivariateSpline
# Output file
outfile = 'projection_weights.nc'
# Choose target elevation -- DIMENSIONS OBVIOUSLY NEED TO MATCH TARGET GRID!
if (False):
elev_file = '/gpfs/p/work/lvank/SMBmip/MARv3.9-yearly-ERA-Interim-1980.nc' # contains target elevation
elev_varname = 'SRF_GIMP'
elev_lat = 'LAT'
elev_lon = 'LON'
if (False): # ISMIP6-1km
elev_file = '/gpfs/fs1/work/lvank/SMBmip/1km-ISMIP6.nc'
elev_varname = 'SRF'
elev_lat = 'lat'
elev_lon = 'lon'
if (False): # CISM 4km
elev_file = '/gpfs/fs1/work/lvank/SCRIP/cism_4km_scrip/cism_topography.nc'
elev_varname = 'topg'
elev_lat = 'lat'
elev_lon = 'lon'
if (False): # RACMO topo
elev_file = "/glade/work/lvank/racmo/racmo23p2_GRN_monthly/elev.nc"
elev_varname = "Elevation"
elev_lat = 'lat'
elev_lon = 'lon'
if (True): # 1 km grid 2700x1496
elev_file = "CHA: find this file here: aux/Icemask_Topo_Iceclasses_lon_lat_average_1km.nc"
elev_varname = "Topography"
elev_lat = 'LAT'
elev_lon = 'LON'
with Dataset(elev_file,'r') as fid:
target_srf = fid.variables[elev_varname][:].squeeze() # Two dimensional
lat2d = fid.variables[elev_lat][:]
lon2d = fid.variables[elev_lon][:]
# construct mask with invalid values, which will be used during output phase
topo_dst_masked = np.ma.masked_greater(target_srf, 4000.) # removes all values of 9999
topo_dst_masked = np.ma.masked_less(topo_dst_masked, 0.) # removes negative values
# standard numpy array for calculating weights; invalid values are treated as if they
# were at sea level (z=0) and will be masked later.
topo_dst = topo_dst_masked.filled(fill_value = 0)
#print(topo_dst.min())
#print(topo_dst.max())
#print(topo_dst.shape)
nlat, nlon = topo_dst.shape
print(topo_dst.shape)
fname_topo_mec = 'input/TOPO_COL_196002-197001_b.e21.BHIST.f09_g17.CMIP6-historical.003b.nc'
with Dataset(fname_topo_mec,'r') as fid:
topo_mec = fid.variables['TOPO_COL'][0].squeeze() # Three dimensional, nLEV, nlat, nlon
print(topo_mec.shape)
nlev = len(topo_mec)
# -------------------
# determine interpolation weights
# -------------------
wgt = np.ma.zeros((nlev,nlat,nlon), dtype=np.float64)
# SPECIAL CASE: below lowest MEC topo
foo = np.where(topo_dst <= topo_mec[0], 1, 0)
wgt[0] += 1 * foo
# SPECIAL CASE: above highest MEC topo
foo = np.where(topo_dst > topo_mec[nlev-1], 1, 0)
wgt[nlev-1] += 1 * foo
for ilev in range(0,nlev-1):
print(ilev)
if (ilev < nlev-1):
foo = np.where( np.logical_and(topo_mec[ilev] <= topo_dst, topo_mec[ilev+1] > topo_dst), 1, 0)
# compute weights for each level (linear interpolation)
dH = topo_mec[ilev+1] - topo_mec[ilev]
wgt_A = (topo_mec[ilev+1] - topo_dst) / dH
wgt_B = (topo_dst - topo_mec[ilev]) / dH
wgt[ilev] += wgt_A * foo
wgt[ilev+1] += wgt_B * foo
wgt_sum = wgt.sum(axis=0)
assert np.allclose(wgt_sum, 1), 'Interpolation weights do not add to 1.0!'
#assert np.allclose(np.sum(wgt,axis=2), 1.0, rtol=1e-05, atol=1e-08)
# Set mask to account for invalid values
wgt.mask = False
for ilev in range(nlev):
#wgt.mask[:,:,ilev] = topo_dst_masked.mask
wgt.mask[ilev] = topo_mec[ilev].mask
# reorder dimensions to (nlev, nlat, nlon)
#wgt2 = wgt.transpose((2,0,1))
wgt2 = wgt
# -------------------
# write output file
# -------------------
print("INFO: writing %s" % outfile)
ncfile = Dataset(outfile, 'w', format='NETCDF4')
ncfile.title = 'Linear interpolation weights for projecting levelled CLM output onto target elevation '
ncfile.elev_file = elev_file
ncfile.institute = "NCAR / Utrecht University"
ncfile.contact = "L.vankampenhout@uu.nl"
ncfile.netcdf = netCDF4.__netcdf4libversion__
# Create dimensions
ncfile.createDimension('y', nlat)
ncfile.createDimension('x', nlon)
ncfile.createDimension('lev',nlev)
# Define the coordinate var
lons = ncfile.createVariable('lon', 'f8', ('y','x'))
lats = ncfile.createVariable('lat', 'f8', ('y','x'))
levs = ncfile.createVariable('lev', 'i4', ('lev',))
# Assign units attributes to coordinate var data
lons.standard_name = "longitude" ;
lons.long_name = "longitude" ;
lons.units = "degrees_east"
lons.axis = "Y"
lats.standard_name = "latitude" ;
lats.long_name = "latitude" ;
lats.units = "degrees_north"
lats.axis = "X"
levs.units = "MEC level number"
# Write data to coordinate var
lons[:] = lon2d[:]
lats[:] = lat2d[:]
levs[:] = range(0,nlev)
var = ncfile.createVariable('weights','f4',('lev','y','x',), fill_value=default_fillvals['f4'])
var.units = "-"
var.long_name = "interpolation weights"
var[:] = wgt2
ncfile.close()
print("INFO: done")
| true |
c0dbfc850b4a7a4c50a71396960e0079603ad032 | Python | arbc139/IMA | /weka_analysis/FPGrowth/csv_manager.py | UTF-8 | 234 | 2.984375 | 3 | [] | no_license |
import csv
class CsvManager():
def __init__(self, csvfile, fieldnames):
self.writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
self.writer.writeheader()
def write_row(self, row):
self.writer.writerow(row) | true |
d999dd84f5282214061cc8272ea16744fd64f77a | Python | Cairnica/django-allauth | /allauth/socialaccount/providers/other/dataporten/provider.py | UTF-8 | 5,313 | 2.578125 | 3 | [
"MIT"
] | permissive | import requests
from allauth.socialaccount.providers.base import ProviderAccount, ProviderException
from allauth.socialaccount.providers.core.oauth2.provider import OAuth2Provider
class DataportenAccount(ProviderAccount):
def get_avatar_url(self):
'''
Returns a valid URL to an 128x128 .png photo of the user
'''
# Documentation for user profile photos can be found here:
# https://docs.dataporten.no/docs/oauth-authentication/
base_url = 'https://api.dataporten.no/userinfo/v1/user/media/'
return base_url + self.account.extra_data['profilephoto']
def to_str(self):
'''
Returns string representation of a social account. Includes the name
of the user.
'''
dflt = super(DataportenAccount, self).to_str()
return '%s (%s)' % (
self.account.extra_data.get('name', ''),
dflt,
)
class DataportenProvider(OAuth2Provider):
id = 'dataporten'
name = 'Dataporten'
account_class = DataportenAccount
access_token_url = 'https://auth.dataporten.no/oauth/token'
authorize_url = 'https://auth.dataporten.no/oauth/authorization'
profile_url = 'https://auth.dataporten.no/userinfo'
groups_url = 'https://groups-api.dataporten.no/groups/'
def extract_uid(self, data):
'''
Returns the primary user identifier, an UUID string
See: https://docs.dataporten.no/docs/userid/
'''
return data['userid']
def extract_extra_data(self, data):
'''
Extracts fields from `data` that will be stored in
`SocialAccount`'s `extra_data` JSONField.
All the necessary data extraction has already been done in the
complete_login()-view, so we can just return the data.
PS: This is default behaviour, so we did not really need to define
this function, but it is included for documentation purposes.
Typical return dict:
{
"userid": "76a7a061-3c55-430d-8ee0-6f82ec42501f",
"userid_sec": ["feide:andreas@uninett.no"],
"name": "Andreas \u00c5kre Solberg",
"email": "andreas.solberg@uninett.no",
"profilephoto": "p:a3019954-902f-45a3-b4ee-bca7b48ab507",
}
'''
return data
def extract_common_fields(self, data):
'''
This function extracts information from the /userinfo endpoint which
will be consumed by allauth.socialaccount.adapter.populate_user().
Look there to find which key-value pairs that should be saved in the
returned dict.
Typical return dict:
{
"userid": "76a7a061-3c55-430d-8ee0-6f82ec42501f",
"userid_sec": ["feide:andreas@uninett.no"],
"name": "Andreas \u00c5kre Solberg",
"email": "andreas.solberg@uninett.no",
"profilephoto": "p:a3019954-902f-45a3-b4ee-bca7b48ab507",
"username": "andreas",
}
'''
# Make shallow copy to prevent possible mutability issues
data = dict(data)
# If a Feide username is available, use it. If not, use the "username"
# of the email-address
for userid in data.get('userid_sec'):
usertype, username = userid.split(':')
if usertype == 'feide':
data['username'] = username.split('@')[0]
break
else:
# Only entered if break is not executed above
data['username'] = data.get('email').split('@')[0]
return data
def complete_login(self, request, app, token, **kwargs):
'''
Arguments:
request - The get request to the callback URL
/accounts/dataporten/login/callback.
app - The corresponding SocialApp model instance
token - A token object with access token given in token.token
Returns:
Should return a dict with user information intended for parsing
by the methods of the DataportenProvider view, i.e.
extract_uid(), extract_extra_data(), and extract_common_fields()
'''
# The athentication header
headers = {'Authorization': 'Bearer ' + token.token}
# Userinfo endpoint, for documentation see:
# https://docs.dataporten.no/docs/oauth-authentication/
userinfo_response = requests.get(
self.get_profile_url(request),
headers=headers,
)
# Raise exception for 4xx and 5xx response codes
userinfo_response.raise_for_status()
# The endpoint returns json-data and it needs to be decoded
extra_data = userinfo_response.json()['user']
# Finally test that the audience property matches the client id
# for validification reasons, as instructed by the Dataporten docs
# if the userinfo-response is used for authentication
if userinfo_response.json()['audience'] != app.client_id:
raise ProviderException(
'Dataporten returned a user with an audience field \
which does not correspond to the client id of the \
application.'
)
return self.sociallogin_from_response(
request,
extra_data,
)
provider_classes = [DataportenProvider]
| true |
e8c8a7f9a018e86d2c0a9e203ba78784686d45fa | Python | pqrkseohyeon/IOT | /Raspberry Pi/Flask (1)/app.py | UTF-8 | 1,484 | 2.640625 | 3 | [] | no_license | from flask import Flask, render_template
import RPi.GPIO as GPIO
import Adafruit_BMP.BMP085 as BMP085
app = Flask(__name__)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
pins = {
10: { 'name':'YELLOW', 'state':GPIO.LOW},
11: { 'name':'BLUE', 'state':GPIO.LOW},
12: { 'name':'RED', 'state':GPIO.LOW},
}
sensor = BMP085.BMP085()
bmp = { 'temp' : 0.0, 'humi' : 0.0}
for pin in pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.LOW)
@app.route("/")
def main():
for pin in pins:
pins[pin]['state'] = GPIO.input(pin)
temp = {
'pins' : pins,
'bmp' : bmp
}
return render_template('app.html', **temp)
@app.route("/<changePin>/<action>")
def action(changePin, action):
pin = int(changePin)
if action == 'on':
GPIO.output(pin, GPIO.HIGH)
if action == 'off':
GPIO.output(pin, GPIO.LOW)
for pin in pins:
pins[pin]['state'] = GPIO.input(pin)
temp = {
'pins' : pins,
'bmp' : bmp
}
return render_template('app.html', **temp)
@app.route("/test")
def readSensor():
# 온도, 압력, 고도 값을 읽어서 변수에 저장
temp = sensor.read_temperature()
pressure = sensor.read_pressure()
altitude = sensor.read_altitude()
bmp['temp']=temp
temp = {
'pins' : pins,
'bmp' : bmp
}
return render_template('app.html',**temp)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=True) | true |
492228a885f224a69f0844f4feaf1bf6d86677bc | Python | ranBernstein/GaitKinect | /Fourier/utils/misc/animate.py | UTF-8 | 545 | 2.703125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def animate(data):
def update_line(num, data, line):
line.set_data(xrange(num), data[:num])
return line,
fig = plt.figure()
line, = plt.plot([], [], 'r-')
plt.xlim(0, len(data))
plt.ylim(np.min(data), np.max(data))
line_ani = animation.FuncAnimation(fig, update_line, len(data), fargs=(data, line), interval=50, blit=True)
#line_ani.save('my_animation.mp4')
plt.show() | true |
63a1efccbe01cb58423ae4ce65119bc950485726 | Python | Ayushmanglani/competitive_coding | /leetcode/October/7_RotateList.py | UTF-8 | 578 | 2.984375 | 3 | [] | no_license | class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if k == 0 or not head or not head.next:
return head
curr = head
a = []
while curr:
a.append(curr.val)
curr = curr.next
l = len(a)
r = [0]*l
for i in range(l):
p = (i+k)%l
r[p] = a[i]
head = ListNode(r[0],None)
curr = head
for i in range(1,l):
new = ListNode(r[i],None)
curr.next = new
curr = curr.next
return(head) | true |
63114d8981f39dced8b74f4bbd9838434cce53e6 | Python | EzequielGuillen/Inteligencia-Articial-1 | /Agentes Racionales/enviroment.py | UTF-8 | 1,443 | 3.515625 | 4 | [] | no_license | import random
class Enviroment:
def __init__(self,size_x,size_y):
self.sizex=size_x
self.sizey=size_y
self.tablero=[[0]*self.sizex for i in range(self.sizey)]
def CreateDirt(self,cantDirt):
while cantDirt>0:
x=random.randint(0,self.sizex-1)
y=random.randint(0,self.sizey-1)
if self.tablero[x][y] == 0 :
self.tablero[x][y]=1
cantDirt-=1
def ValidMove(self,posX,posY):
if posX < self.sizex and posX >-1 and posY < self.sizey and posY > -1:
return True
return False
def isDirty(self,posX,posY):
if self.tablero[posX][posY] == 1:
return True
return False
def Clean(self,posX,posY):
self.tablero[posX][posY]=0
def SizeX(self):
return self.sizex
def SizeY(self):
return self.sizey
def Print(self):
for y in range (0,self.sizey):
print("[",end=" ")
for x in range (0,self.sizex):
if self.tablero[x][y]==0:
print("⬜",end=" ")
if self.tablero[x][y]==1:
print("⬛",end=" ")
if self.tablero[x][y]==5:
print("🔷",end=" ")
if self.tablero[x][y]==10:
print("🔶",end=" ")
print("]") | true |
957b36d625297237eb2dd1d416f95203358903f5 | Python | FranciscoRZ/NeuralNetForOptionPricing | /ANNPricer_v1.py | UTF-8 | 3,065 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 17:13:23 2018
@author: Cisco
"""
import os
os.chdir("/Users/Cisco/Desktop/M1 EIF/S2/Mémoire/DataCACPaul")
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import data
data = pd.read_csv('DataK5200.csv', sep = ';', decimal = ',').dropna(axis = 0, how = 'all')
data = data.reindex(index = data.index[::-1]).dropna(axis = 1, how = 'all')
X = data.iloc[:, [1,2]].values
#y_call = data.iloc[:, 3].values
y_put = data.iloc[:, 4].values
# Splitting into Training and Test sets
from sklearn.model_selection import train_test_split
#X_train, X_test, y_call_train, y_call_test = train_test_split(X, y_call, test_size = 0.2)
X_train, X_test, y_put_train, y_put_test = train_test_split(X, y_put, test_size = 0.2)
# Feature scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# @@@@@@@ Building the ANN Model @@@@@@@@@@
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initializing the ANN
pricer = Sequential()
# Adding a first hidden layer and the input layer
pricer.add(Dense(output_dim = 3, init = 'uniform', activation = 'relu', input_dim = 2))
# Adding a second input layer
pricer.add(Dense(output_dim = 3, init = 'uniform', activation = 'relu'))
# Adding the output layer
pricer.add(Dense(output_dim = 1, init = 'uniform', activation = 'linear'))
# compiling
pricer.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the ANN to the training set
#pricer.fit(X_train, y_call_train, batch_size = 10, epochs = 100)
pricer.fit(X_train, y_put_train, batch_size = 10, epochs = 100)
# @@@@@@@ Making the predictions and evaluating the model @@@@@@@@
y_pred = pricer.predict(X_test)
plt.clf
#plt.plot(y_call_test, color = 'red')
plt.plot(y_put_test, color = 'red')
plt.plot(y_pred, color = 'blue')
#pricer.evaluate(X_test, y_call_test)
pricer.evaluate(X_test, y_put_test)
# Predicting the entire set
Predictors = scaler.fit_transform(X)
PredictCall = pricer.predict(Predictors)
plt.clf
plt.plot(PredictCall, color = 'blue', label = 'Predicted put prices')
#plt.plot(y_call, color = 'red', label = 'Market call prices')
plt.plot(y_put, color = 'red', label = 'Market put prices')
plt.legend()
#plt.title('Predicted call prices vs. observed call prices')
plt.title('Predicted call prices vs. observed call prices')
plt.savefig(fname = '/Users/Cisco/Desktop/M1 EIF/S2/Mémoire/Graphiques/Model25200_put.jpeg')
#pricer.evaluate(Predictors, y_call)
pricer.evaluate(Predictors, y_put)
output = pd.DataFrame(PredictCall)
#output.to_csv("/Users/Cisco/Desktop/Mémoire/PredictionCallModel2", sep = ",", index = False)
# Saving results
import csv
csvfile = '/Users/Cisco/Desktop/M1 EIF/S2/Mémoire/Predictions/Model2_put52Predictions'
PredictCall = PredictCall[::-1]
with open(csvfile, 'w') as output:
writer = csv.writer(output, lineterminator = '\n')
writer.writerows(PredictCall)
| true |
0b91aa683884351391fd46d3c63aecea94507d28 | Python | brownplt/insta-model | /conformance_suite/test_list_of_dynamic.py | UTF-8 | 475 | 2.859375 | 3 | [] | no_license | # test_list_of_dynamic.py
# This should pass.
from threading import Thread
from typing import List
def f(threads: List[Thread]) -> int:
return len(threads)
# def test_list_of_dynamic(self):
# codestr = """
# from threading import Thread
# from typing import List
# def f(threads: List[Thread]) -> int:
# return len(threads)
# """
# f = self.find_code(self.compile(codestr), "f")
# self.assertInBytecode(f, "FAST_LEN")
| true |
6bd9e8103693131457d1bf585b31373e549b077d | Python | arthurtomas/Codes_git | /desafio096.py | UTF-8 | 201 | 4.15625 | 4 | [] | no_license | def area(larg, comp):
print(f'A área de um terreno {larg}m x {comp}m = {larg*comp:.2f}m²')
# Programa Principal
l = float(input('Largura(m): '))
c = float(input('Comprimento(m): '))
area(l, c)
| true |
18152e73c5081301660d167f111b026906f4920f | Python | YorkFish/learning_notes | /Python3/Qizi/set/set_01.py | UTF-8 | 400 | 4.09375 | 4 | [] | no_license | """
intersection, union set, difference set
tips: 有时候,对于列表,可以先 set(list),再用上面的“交、并、差”
"""
set_a = {1, 2, 3}
set_b = {2, 3, 4}
print(">>> set_a & set_b =", set_a & set_b) # 交集
print(">>> set_a | set_b =", set_a | set_b) # 并集
print(">>> set_a - set_b =", set_a - set_b) # 差集
print(">>> set_a ^ set_b =", set_a ^ set_b) # 对称差集
| true |
36dc8acdc39cccf2e345bfb35fb2ebbda20f54ab | Python | qdonnellan/lessonwell | /tests/view_tests/sign_up_page_test.py | UTF-8 | 3,013 | 2.578125 | 3 | [] | no_license | from tests.main_test_handler import TestBase
from models.user import User
import json
import unittest
class SignUpPageTest(TestBase):
"""
test that the sign up page is working correctly @ "/sign_up"
"""
def test_sign_up_page_view_not_authenticated(self):
"""
test initial view of '/sign_up', should be redirected
if the user is not authenticated
"""
response = self.testapp.get('/sign_up')
self.assertEqual(response.status_int, 302)
def test_sign_up_page_view_with_google_authentication(self):
"""
test views of '/sign_up' with a user authenticated to google
"""
self.create_google_user()
response = self.testapp.get('/sign_up')
self.assertEqual(response.status_int, 200)
def test_sign_up_page_post_without_authentication(self):
"""
a post request from a non autenticated user should be aborted
"""
response = self.testapp.post('/sign_up', status=401)
self.assertEqual(response.status_int, 401)
def test_sign_up_page_post_request_without_token(self):
"""
a post request without the correct stripe token should fail
"""
self.create_google_user()
data = { 'username' : 'legitimateUsername' }
response = self.testapp.post('/sign_up', data)
self.assertIn(
'Invalid Payment Information',
response.follow().body
)
def test_sign_up_page_with_protected_username(self):
"""
a post request using a protected username should be rejected
"""
self.create_google_user()
data = { 'username' : 'admin' }
response = self.testapp.post('/sign_up', data)
self.assertIn(
'That username is protected, choose a different one',
response.follow().body
)
def test_sign_up_page_with_non_unique_username(self):
"""
a post request using a username that is already taken
should be rejected
"""
self.create_and_return_local_user(username='helloworld')
self.create_google_user()
data = { 'username' : 'helloworld' }
response = self.testapp.post('/sign_up', data)
self.assertIn(
'That username is already taken',
response.follow().body
)
@unittest.skip('creates a new user on stripe - takes a while!')
def test_valid_sign_up_post(self):
self.create_google_user()
self.assertEqual(User.query().count(), 0)
data = {
'username' : 'helloworld',
'stripeToken' : self.generate_sample_token().id,
}
response = self.testapp.post('/sign_up', data)
# there should now be a new user in the database!
self.assertEqual(User.query().count(), 1)
self.assertIn(
'You have successfully created a lessonwell account',
response.follow().body
)
| true |
3107c7fbc79d79de1cd3a8eb19ca0ae9c6d71298 | Python | ichejun/coding | /leetcode/206. 反转链表.py | UTF-8 | 1,319 | 4.03125 | 4 | [] | no_license | '''
反转一个单链表。
示例:
输入: 1->2->3->4->5->NULL
输出: 5->4->3->2->1->NULL
进阶:
你可以迭代或递归地反转链表。你能否用两种方法解决这道题?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/reverse-linked-list
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
# Nullnode = ListNode(None)
Nullnode = (None)
cur = Nullnode
while head != None:
tmp = head.next
head.next = cur #head.next指向cur
cur = head #将cur放在head的位置 这里需要理解的是,这里的所有的都是类似指针,不是实例。实例只有用类声明的时候才有。
head = tmp
return cur
# pre = None
# cur = head
# while cur:
# temp = cur.next # 先把原来cur.next位置存起来
# cur.next = pre
# pre = cur
# cur = temp
# return pre
| true |
bcd23dea79ab7f4703de8d150287b93c1cb6e4bc | Python | aman-ku/Machine-Learning-Algorithms | /Classification/8.DTC/Decision_tree_classification.py | UTF-8 | 2,870 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 16:56:36 2020
@author: amankumar
"""
import pandas as pd
import numpy as np
df = pd.read_csv('play_golf.csv')
eps = np.finfo(float).eps
def entropy_last(df):
Class=df.keys()[-1] #return last column of df(keys() return the different attributes names of df)
unique_values=df[Class].unique() #return array of diiferent value of last column
entropy=0
for i in unique_values:
prob=df[Class].value_counts()[i]/len(df[Class]) #calculating the probability
entropy+=-prob*np.log2(prob)
print(entropy)
return entropy
def attribute_entropy(df,attribute):
unique_classes=df[attribute].unique()
Class=df.keys()[-1]
unique_last=df[Class].unique()
information=0
for i in unique_classes:
entropy=0
for j in unique_last:
num=len(df[attribute][df[attribute]==i][df[df.keys()[-1]]==j])
den=len(df[attribute][df[attribute]==i])
entropy+=-num/(den+eps)*np.log2(num/den+eps)
information+=den/len(df)*entropy
return abs(information)
def best_for_split(df):
IG=[]
for key in df.keys()[:-1]:
IG.append(entropy_last(df)-attribute_entropy(df,key))
return df.keys()[:-1][np.argmax(IG)]
def get_subtable(df,node,value):
return df[df[node]==value].reset_index(drop=True) #returning the matrix of any single class(category) of attribute with serial number index
def build_tree(df,tree=None):
Class=df.keys()[-1]
node=best_for_split(df)
attr=df[node].unique()
if tree is None:
tree={}
tree[node]={}
for value in attr:
subtable = get_subtable(df,node,value)
clValue,counts = np.unique(subtable['Play Golf'],return_counts=True)
if len(counts)==1:#Checking purity of subset
tree[node][value] = clValue[0]
else:
tree[node][value] = build_tree(subtable) #Calling the function recursively
return tree
tree=build_tree(df)
import pprint
pprint.pprint(tree)
def predict(inst,tree):
'''
Function to predict for any input variable.
'''
#Recursively we go through the tree that we built earlier
for nodes in tree.keys():
value = inst[nodes]
tree = tree[nodes][value]
prediction = 0
if type(tree) is dict:
prediction = predict(inst, tree)
else:
prediction = tree
break;
return prediction
data = {'Outlook':'Sunny','Temperature':'Cool','Humidity':'High','Windy':True}
inst = pd.Series(data)
prediction = predict(inst,tree)
print(prediction)
| true |
cd26fcf2812fccda02a7c81568aca5b41d4918a6 | Python | DustyQ5/CTI110 | /P4LAB2_Sawyer.py | UTF-8 | 635 | 3.296875 | 3 | [] | no_license | # Turtle named Bit draws my first and last initial
# 10/9/2018
# CTI-110 P4T1b: Initials
# Chazz Sawyer
#
import turtle
wn = turtle.Turtle
bit=turtle.Turtle()
bit.color("red")
bit.pensize("3")
bit.forward(90)
bit.left(90)
bit.penup()
bit.forward(90)
bit.pendown()
bit.left(90)
bit.forward(90)
bit.left(90)
bit.forward(90)
bit.left(90)
bit.penup()
bit.forward(135)
bit.left(90)
bit.forward(90)
bit.right(90)
bit.forward(90)
bit.left(180)
bit.pendown()
bit.forward(90)
bit.left(90)
bit.forward(45)
bit.left(90)
bit.forward(90)
bit.right(90)
bit.forward(45)
bit.right(90)
bit.forward(90)
| true |
2486b8ffd9ff51eb264df6b92794696e90f02b96 | Python | MechaMonk/can_booster_pack | /decode/receive.py | UTF-8 | 1,671 | 2.703125 | 3 | [] | no_license | from serial import *
port_name = 'COM35'
port_baudrate = 1000000
def decode_message(line):
ls = line[1:].strip()
s = ls.split(b'.')
tstm = int(s[0], 16)
typ = 'A' if len(s[1])==3 else 'B'
id = int(s[1], 16)
dlen = len(s[2])//2
data = []
for i in range(dlen):
data.append(int(s[2][i*2:i*2+2], 16))
return tstm, typ, id, dlen, data
try:
from secret_decoder import message_content_note, message_filter
except ImportError:
def message_content_note(mtype, id, data):
# ----- fill your content resolving here ----
if mtype == 'B':
return "CAN message type B"
else:
return "CAN message type A"
#return ''
def message_filter(mtype, id, data):
return True
with Serial(port_name, port_baudrate, timeout=0.1) as port:
while True:
try:
#if True:
line = port.readline()
if line != b'':
(tStamp, mType, mId, mLen, mData) = decode_message(line)
if message_filter(mType, mId, mData):
note = message_content_note(mType, mId, mData)
print('{:5d} {} {} '.format(tStamp, mType,
(" {:03X}" if mType == 'A' else "{:08X}").format(mId)), end='')
dlen = len(mData)
for i in range(8):
print(' {:02X}'.format(mData[i]) if i < dlen else ' ', end='')
print(' {}'.format(note))
except:
print('Decode ERROR: {}'.format(line))
pass | true |
3b911f647fd79ec9deac419723d61b803e574625 | Python | slash-segmentation/segtools | /pysegtools/images/io/_handler_manager.py | UTF-8 | 8,747 | 2.578125 | 3 | [] | no_license | """
Defines the handler-manager which manages handlers for a class. This is used by the FileImageStack
and FileImageSource classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta
from io import open #pylint: disable=redefined-builtin
from ...imstack import Help
from ...general.utils import all_subclasses
class _HandlerManagerMeta(ABCMeta):
"""
The meta-class for the handler-manager, which extends ABCMeta to call Help.register if
applicable.
"""
def __new__(cls, clsname, bases, dct):
c = super(_HandlerManagerMeta, cls).__new__(cls, clsname, bases, dct)
n = c.name()
if n is not None:
names = (n,c.__name__) + tuple(ext.lstrip('.').lower() for ext in c.exts())
Help.register(names, c.print_help)
return c
class HandlerManager(object):
__metaclass__ = _HandlerManagerMeta
@classmethod
def is_handler(cls, handler, read=True):
"""Checks that the given string is a valid handler for this type."""
#pylint: disable=protected-access
assert cls != HandlerManager
return any(handler == sub.name() and (read and sub._can_read() or not read and sub._can_write())
for sub in all_subclasses(cls))
@classmethod
def handlers(cls, read=True):
"""Get a list of all handlers of this type."""
#pylint: disable=protected-access
assert cls != HandlerManager
handlers = []
for sub in all_subclasses(cls):
h = sub.name()
if h is not None and (read and sub._can_read() or not read and sub._can_write()):
handlers.append(h)
return handlers
@classmethod
def __openable_by(cls, filename, readonly=False, handler=None, **options):
#pylint: disable=protected-access
handlers = (h for h in all_subclasses(cls) if h._can_read() and (readonly or h._can_write()))
if handler is not None:
for h in handlers:
if handler == h.name(): return h
raise ValueError('No handler named "%s"' % handler)
for h in handlers:
with open(filename, 'rb') as f:
try:
if h._openable(filename, f, readonly, **options): return h
except StandardError: pass
raise ValueError('Unable to find handler for opening file "%s"' % filename)
@classmethod
def open(cls, filename, readonly=False, handler=None, **options):
"""
Opens an existing image file. Extra options are only supported by some file handlers.
"""
assert cls != HandlerManager
return cls.__openable_by(filename, readonly, handler, **options). \
open(filename, readonly, **options)
@classmethod
def openable(cls, filename, readonly=False, handler=None, **options):
"""
Checks if an existing image file can be opened with the given arguments. Extra options are
only supported by some file handlers.
"""
assert cls != HandlerManager
try: cls.__openable_by(filename, readonly, handler, **options); return True
except StandardError: return False
@classmethod
def __creatable_by(cls, filename, writeonly=False, handler=None, **options):
#pylint: disable=protected-access
from os.path import splitext
ext = splitext(filename)[1].lower()
handlers = (h for h in all_subclasses(cls) if h._can_write() and (writeonly or h._can_read()))
if handler is not None:
for h in handlers:
if handler == cls.name(): return h
raise ValueError('No image source handler named "'+handler+'" for creating files')
for h in handlers:
try:
if h._creatable(filename, ext, writeonly, **options): return h
except StandardError: pass
raise ValueError('Unable to find image source handler for creating file "'+filename+'"')
@classmethod
def _create_trans(cls, im): return im
@classmethod
def create(cls, filename, im, writeonly=False, handler=None, **options):
"""
Creates an image file. Extra options are only supported by some file handlers.
Selection of a handler and format is purely on file extension and options given.
Note that the "writeonly" flag is only used for optimization and may not always been
honored. It is your word that you will not use any functions that get data from the
stack.
"""
assert cls != HandlerManager
return cls.__creatable_by(filename, writeonly, handler, **options). \
create(filename, cls._create_trans(im), writeonly, **options)
@classmethod
def creatable(cls, filename, writeonly=False, handler=None, **options):
"""
Checks if a filename can written to as a new image file. Extra options are only supported by
some file handlers.
"""
assert cls != HandlerManager
try: cls.__creatable_by(filename, writeonly, handler, **options); return True
except StandardError: return False
@classmethod
def _openable(cls, filename, f, readonly, **opts): #pylint: disable=unused-argument
"""
[To be implemented by handler, default is nothing is openable]
Return if a file is openable given the filename, file object, and dictionary of options. If
this returns True then the class must provide a static/class method like:
`open(filename, readonly, **options)`
Option keys are always strings, values can be either strings or other values (but strings
must be accepted for any value and you must convert, if possible). While _openable should
return False if there any unknown option keys or option values cannot be used, open should
throw exceptions.
"""
return False
@classmethod
def _creatable(cls, filename, ext, writeonly, **opts): #pylint: disable=unused-argument
"""
[To be implemented by handler, default is nothing is creatable]
Return if a filename/ext (ext always lowercase and includes .) is creatable as given the
dictionary of options. If this returns True then the class must provide a static/class
method like:
`create(filename, IMAGE, writeonly, **options)`
Option keys are always strings, values can be either strings or other values (but strings
must be accepted for any value and you must convert, if possible). While _creatable should
return False if there any unknown option keys or option values cannot be used, create should
throw exceptions.
The IMAGE is either an ImageSource for source handlers or an ImageStack for stack handlers.
Note that the "writeonly" flag is only used for optimization and may not always been
honored. It is the word of the caller they will not use any functions that get data from
the stack. The handler may ignore this and treat it as read/write.
"""
return False
@classmethod
def _can_read(cls):
"""
[To be implemented by handler, default is readable]
Returns True if this handler can, under any circumstances, read images.
"""
return True
@classmethod
def _can_write(cls):
"""
[To be implemented by handler, default is writable]
Returns True if this handler can, under any circumstances, write images.
"""
return True
@classmethod
def name(cls):
"""
[To be implemented by handler, default causes the handler to not have a help page, be
unusable by name, and not be listed, but still can handle things]
Return the name of this image handler to be displayed in help outputs.
"""
return None
@classmethod
def exts(cls):
"""
[To be implemented by handler, default returns empty tuple the handler to not have any extra
help page names]
Return a tuple of lower-case exts including the . that this image handler recongnizes for
writing (and common extensions for readable types). These are added as help pages. In some
cases it may make to not return any extensions even if extensions are used to determine if
a file can be created.
"""
return ()
@classmethod
def print_help(cls, width):
"""
[To be implemented by handler, default prints nothing]
Prints the help page of this image handler.
"""
pass
| true |
7ac642f38db14c7dfc26c06a46d3023d360444db | Python | dav009/pythia | /src/pipelines/features_and_labels.py | UTF-8 | 1,139 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# coding: utf-8
import numpy as np
from collections import namedtuple
def get_data(scores, features):
length = 0.8 * len(scores)
data = list()
labels = list()
for score in scores:
feature = list()
if features.cos_similarity:
feature.append(np.array([score.bagwordsScore]))
if features.tfidf_sum:
feature.append(np.array([score.tfidfScore]))
if features.bag_of_words:
feature.append(score.bog)
if features.skipthoughts:
feature.append(score.skipthoughts)
if features.lda:
feature.append(score.ldavector)
feature = np.concatenate(feature, axis=0)
data.append(feature)
if score.novelty: labels.append(1)
else: labels.append(0)
return data, labels
def main(argv):
data, labels = get_data(argv[0], argv[1])
return data, labels
if __name__ == '__main__':
if len(sys.argv) < 3:
print ("Usage: features_and_labels.py scores features\n\ngenerates features and labels for scores given the defined features")
else: main(sys.argv[1:])
| true |
f192c195a612a86fd663f743a3a2e283ad868e46 | Python | OleksiiBondarr/evo2018_2 | /start.py | UTF-8 | 3,693 | 3.453125 | 3 | [] | no_license | import random
class ServerSimulation:
"""
type_id - int тип заполнения серверов (1 - рандомное, 0 - зеркальное)
server_amount - int кол-во серверов
data_amount - int кол-во кусков данных
data_chunks - словарь: key - номер сервера, value - список кусков данных
"""
data_chunks = {}
def __init__(self, type_id, servers_amount=10, data_amount=100):
self.servers_amount = servers_amount
self.data_amount = data_amount
for i in range(servers_amount):
self.data_chunks[i] = []
if type_id:
self.init_random_placement()
else:
self.init_full_copy_placement()
def init_random_placement(self):
"""
с помощью random.shuffle() получаем рандомный список кусков данных
и затем записываем одну копию на первую половину серверов, затем повторяем для
копии данных и записываем на оставшиеся сервера
"""
random_data = list(range(0, self.data_amount))
server_num = 0
for times in range(2):
random.shuffle(random_data)
for i in random_data:
if len(self.data_chunks[server_num]) < 20:
self.data_chunks[server_num].append(i)
else:
server_num += 1
self.data_chunks[server_num].append(i)
def init_full_copy_placement(self):
"""
с помощью random.shuffle() получаем рандомный список кусков данных
и затем записываем одну копию на первую половину серверов, затем клонируем их
"""
random_data = list(range(0, self.data_amount))
server_num = 0
random.shuffle(random_data)
for i in random_data:
if len(self.data_chunks[server_num]) < 20:
self.data_chunks[server_num].append(i)
else:
server_num += 1
self.data_chunks[server_num].append(i)
for key in range(server_num+1):
self.data_chunks[key+self.servers_amount/2] = self.data_chunks[key]
def test_servers(self):
"""
проверяем есть ли в каждых двух серверах один общий кусок данных - если есть относим эту пару к
тем случаям, когда данные теряются, если нет - к тем, когда не теряется
итоговый процент = кол-во раз потеряли данные/(кол-во раз не потеряли + кол-во раз потеряли)
"""
list_keys = list(self.data_chunks.keys())
lost_data = 0
not_lost_data = 0
for i in range(0, len(list_keys)):
for j in range(i+1, len(list_keys)):
for data_part in self.data_chunks[i]:
if data_part in self.data_chunks[j]:
lost_data += 1
break
else:
not_lost_data += 1
return 'Killing 2 arbitrary servers results in data loss in ' + \
str(round(100*lost_data/(not_lost_data+lost_data), 2)) + '% cases'
ex = ServerSimulation(0)
print(ex.test_servers())
| true |
c7ad22506fffc6b6162070f3200c92fb379e03ae | Python | Grudz/Python_Course_Instructor | /Homework/voltage_divider_calculator2.py | UTF-8 | 696 | 4 | 4 | [] | no_license | # Homework 4 - Ben Grudzien
# ENG 1503 - Voltage Divider Calculator
#import sys (Gets rid of "none" for other script)
def voltage_divider(Vin, R1, R2):
try:
return (Vin * R2) / (R1 + R2)
except ZeroDivisionError:
print("Error: Can't divide by 0")
print("--- VOLTAGE DIVIDER CALCULATOR ---\n")
input('Press ENTER to begin')
while True:
print("What is the input voltage, Vin?")
Vin = int(input())
print("What is R1's value?")
R1 = int(input())
print("What is R2's value?")
R2 = int(input())
print("The output voltage = ", voltage_divider(Vin, R1, R2))
input("\nPress ENTER to restart (CTRL+C to exit)\n")
| true |
81e05d3531505ea85231432a22cac27580457970 | Python | Sstark97/Linux-Commands | /Python-Commands/translate.py | UTF-8 | 4,940 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python3
from google_trans_new import google_translator
import argparse
from pathlib import Path
from sys import stderr, stdout
class CpError(Exception):
pass
class Logger:
def __init__(self, verbosity=False):
self.verbose = verbosity
def set_verbosity(self,verbosity):
self.verbose = verbosity
def log(self,message, file=stdout):
if self.verbose:
print(message, file=file)
def warning(self, message, file=stderr):
print(f'WARNING: {message}',file=file)
def error(self, message, file=stderr):
print(f'ERROR: {message}', file=file)
logger = Logger()
translator = google_translator()
def translateControl(sentence: str, spanish=False,english=False,deutch=False,french=False,chinese=False,japanese=False):
if spanish:
translate_spanish(sentence,spanish)
elif english:
translate_english(sentence,english)
elif deutch:
translate_deutch(sentence,deutch)
elif french:
translate_french(sentence,french)
elif chinese:
translate_chinese(sentence,chinese)
elif japanese:
translate_japanese(sentence,japanese)
else:
logger.error(f'Use any command for translate {sentence}')
def translate_spanish(sentence: str, spanish):
if translator.detect(sentence)[0] == 'es':
message = translator.translate('is already in Spanish',lang_tgt='es')
print(f'{sentence} ' + f'{message}')
else:
logger.log(f'Translate to Spanish -> {sentence}')
print(translator.translate(sentence,lang_tgt='es'))
def translate_english(sentence: str, english):
if translator.detect(sentence)[0] == 'en':
message = 'is already in English'
print(f'{sentence} ' + f'{message}')
else:
logger.log(f'Translate to English -> {sentence}')
print(translator.translate(sentence,lang_tgt='en'))
def translate_deutch(sentence: str, deutch):
if translator.detect(sentence)[0] == 'de':
message = translator.translate('is already in Deutch',lang_tgt='de')
print(f'{sentence} ' + f'{message}')
else:
logger.log(f'Translate to Deutch -> {sentence}')
print(translator.translate(sentence,lang_tgt='de'))
def translate_french(sentence: str, french):
if translator.detect(sentence)[0] == 'fr':
message = translator.translate('is already in French',lang_tgt='fr')
print(f'{sentence} ' + f'{message}')
else:
logger.log(f'Translate to French -> {sentence}')
print(translator.translate(sentence,lang_tgt='fr'))
def translate_chinese(sentence: str, chinese):
if translator.detect(sentence)[0] == 'zh-CN':
message = translator.translate('is already in Chinese',lang_tgt='zh-CN')
print(f'{sentence} ' + f'{message}')
else:
logger.log(f'Translate Chinese -> {sentence}')
print(translator.translate(sentence,lang_tgt='zh-CN'))
def translate_japanese(sentence: str, japanese):
if translator.detect(sentence)[0] == 'ja':
message = translator.translate('is already in Japanese',lang_tgt='ja')
print(f'{sentence} ' + f'{message}')
else:
logger.log(f'Translate to Japanese -> {sentence}')
print(translator.translate(sentence,lang_tgt='ja'))
def mi_translate() -> argparse.Namespace:
parser = argparse.ArgumentParser(
prog='translate',
description='translate command implementation in Python'
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-s', '--spanish',
action='store_true',
help='Translate the sentence to Spanish'
)
group.add_argument(
'-e', '--english',
action='store_true',
help='Translate the sentence to English'
)
group.add_argument(
'-d', '--deutch',
action='store_true',
help='Translate the sentence to Deutch'
)
group.add_argument(
'-f', '--french',
action='store_true',
help='Translate the sentence to French'
)
group.add_argument(
'-c', '--chinese',
action='store_true',
help='Translate the sentence to Chinese'
)
group.add_argument(
'-j', '--japanese',
action='store_true',
help='Translate the sentence to Japanese'
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='Give details about actions being performed'
)
parser.add_argument(
'source',
type=str,
help='Source String'
)
return parser.parse_args()
def main():
args = mi_translate()
try:
logger.set_verbosity(args.verbose)
translateControl(args.source,args.spanish,args.english,args.deutch,args.french,args.chinese,args.japanese)
except CpError as e:
logger.error(e)
exit(1)
if __name__ == '__main__':
main()
| true |
3cc060ae4761dc71a97441aa9b8cb2b9f5fcd269 | Python | Pavana24/myPython | /ex24print.py | UTF-8 | 925 | 3.6875 | 4 | [] | no_license | print " Let's practise everything."
print 'you\'d need to know \'but escapes with \\ that do \n newlines and \t tabs.'
poem = """\t The lovely world
with logic so firmed planted
cannot discern \n the needs of love
nor comprehend passion from intution
and recquires an explaination
\n\twhere there is more
"""
print "------------------------"
print poem
print "---------------------------------"
five = 10-2+3-6
print "this five is : %s " %five
def secret_formula(started ):
jelly_beans=started*500
jars = jelly_beans/1000
crates = jars/1000
return jelly_beans,jars,crates
start_point = 1000
beans, jars, crates = secret_formula(start_point)
print "With the starting point od %d:" %start_point
print "we'd have %dbeans, %d jars and %d crates" %(beans, jars,crates)
start_point = start_point/10
print "We can also do that this way:"
print "we'd have %d beans , %d jars, %d crates."%secret_formula(start_point)
| true |
112216ccc9090d627f9307729e86f1ad14f0bc4a | Python | caominhduy/DeepRuby | /test_dataset.py | UTF-8 | 3,907 | 2.640625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """ Run this module to generate the dataset for testing, randomly. """
import numpy as np
import copy
import csv
import os
from datetime import datetime
from dependencies import *
from basics import *
import random as rd
from time import time
from shutil import copy as shucopy
__author__ = 'Duy Cao'
__version__ = '2020.5.5'
TEST_DATASET_SIZE = 100 # notice that the final dataset may be smaller
# after duplicate_removal
AVAILABLE_ROTATIONS = ['l', 'u', 'r', 'b', 'f', 'd', \
'l2', 'u2', 'r2', 'b2', 'f2', 'd2', \
'lr', 'ur', 'rr', 'br', 'fr', 'dr']
def datetime_(): # date and time will be the name of the dataset
t = datetime.now().strftime("%y%m%d%H%M")
return t
def touch(path, path2, filename): # path = 'test' or 'train'
if not os.path.exists('datasets'):
os.mkdir('datasets')
if not os.path.exists('datasets/' + path):
os.mkdir('datasets/' + path)
if not os.path.exists('datasets/' + path + '/' + path2):
os.mkdir('datasets/' + path + '/' + path2)
if os.path.exists('datasets/' + path + '/' + path2 + '/' + filename + '.csv'):
os.remove('datasets/' + path + '/' + path2 + '/' + filename + '.csv')
f = open('datasets/' + path + '/' + path2 + '/' + filename + '.csv', 'x')
f.close()
else:
f = open('datasets/' + path + '/' + path2 + '/' + filename + '.csv', 'x')
f.close()
def generator(path, path2, filename): # rotating randomly & write combinations into dataset
touch(path, path2, filename)
cube = init_cube(RUBIK_SIZE)
path = 'datasets/' + path + '/' + path2 + '/' + filename + '.csv'
with open(path, 'w', newline='') as dataset:
generator = csv.writer(dataset)
for i in range(TEST_DATASET_SIZE):
move = rd.choice(AVAILABLE_ROTATIONS)
cube = turn(move, cube)
row = rubik_to_array(cube, RUBIK_SIZE)
row = np.append(row, rotating_notations(move))
generator.writerow(row)
def duplicate_removal(path, path2, filename): # remove duplicate in datasets
old_path = 'datasets/' + path + '/' + path2 + '/' + filename + '.csv'
new_path = 'datasets/' + path + '/' + path2 + '/' + filename + '_cached.csv'
shucopy(old_path, new_path)
os.remove(old_path)
touch(path, path2, filename)
with open(new_path, 'r') as clipboard, open (old_path, 'w') as final:
cached = set()
counter = 0
for line in clipboard:
if line in cached:
counter+=1
if line not in cached:
cached.add(line)
final.write(line)
os.remove(new_path)
return(counter)
def duplicate_removal_v2(path, path2, filename, size): # check only combinations, not full row
old_path = 'datasets/' + path + '/' + path2 + '/' + filename + '.csv'
new_path = 'datasets/' + path + '/' + path2 + '/' + filename + '_cached.csv'
num_of_cols = size**2*6
shucopy(old_path, new_path)
os.remove(old_path)
touch(path, path2, filename)
with open(new_path, 'r') as clipboard, open (old_path, 'w') as final:
cached = set()
counter = 0
for line in clipboard:
if line[:num_of_cols] in cached:
counter+=1
if line[:num_of_cols] not in cached:
cached.add(line[:num_of_cols])
final.write(line)
os.remove(new_path)
return(counter)
if __name__ == '__main__':
start = time()
filename = datetime_()
dir = 'test'
dir2 = str(RUBIK_SIZE)
generator(dir, dir2, filename)
print("Done! \nTime elapsed = " + str(round((time() - start), 3)) + " seconds")
duplicates = duplicate_removal_v2(dir, dir2, filename, RUBIK_SIZE)
print(str(duplicates) + "/" + str(TEST_DATASET_SIZE) + " were duplicates " + \
"and automatically removed.")
| true |
ed651cc49ce1bb25adadace4be077c6fd330b42b | Python | xiangbaloud/okgo_py | /getvolinfo.py | UTF-8 | 777 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python
import sys
import requests
def usage():
print('[-] Usage: ./getvolinfo.py <uuid>, if you want to display all vols, use <all>')
exit(0)
def main():
url = "http://10.144.7.2/api/share/volumes"
key = {'X-AccelStor-API-Key':'4e40139db3e56759fd28ec3f542065eb9048020a'}
if len(sys.argv) > 1:
argv = str(sys.argv[1])
if argv == "all":
url_vol = url
else:
vol_uuid = argv
url_vol = "http://10.144.7.2/api/share/volumes/" + vol_uuid
else:
usage()
session = requests.Session()
session.trust_env = False
response = session.get(url_vol, headers=key)
session.close()
print response
print response.content
if __name__ == '__main__':
main()
| true |
7f5b4470998e2d395a49b9e3d0765bdb52f133ae | Python | gil9red/SimplePyScripts | /world_seed_in_binary_2D.py | UTF-8 | 1,821 | 3.140625 | 3 | [
"CC-BY-4.0"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import hashlib
import random
import string
from itertools import cycle
def get_random_seed(length: int = 8) -> str:
return "".join(random.choices(string.ascii_letters + string.digits, k=length))
def get_bits_seed(seed: str) -> str:
seed = bytes(seed, encoding="utf-8")
return "".join(bin(c)[2:][:8].zfill(8) for c in hashlib.sha256(seed).digest())
def create_world(rows: int, cols: int) -> list[list[int]]:
return [[0] * cols for _ in range(rows)]
def print_world(world: list[list[int]]):
print("\n".join(" ".join(map(str, row)) for row in world))
def fill_world(world: list[list[int]], seed: str):
bits = get_bits_seed(seed)
bits = cycle(bits)
for row in range(len(world)):
for col in range(len(world[0])):
world[row][col] = int(next(bits))
if __name__ == "__main__":
for i in range(8, 64 + 1):
assert len(get_random_seed(length=i)) == i
print("Random seed:", get_random_seed())
print()
assert (
get_bits_seed("1")
== "0110101110000110101100100111001111111111001101001111110011100001100111010110101110000000010011101111111101011010001111110101011101000111101011011010010011101010101000100010111100011101010010011100000000011110010100101101110110110111100001110101101101001011"
)
assert (
get_bits_seed("123")
== "1010011001100101101001000101100100100000010000100010111110011101010000010111111001001000011001111110111111011100010011111011100010100000010010100001111100111111111111110001111110100000011111101001100110001110100001101111011111110111101000100111101011100011"
)
world = create_world(rows=5, cols=10)
print_world(world)
print()
fill_world(world, seed="123")
print_world(world)
| true |
6d576c38f510673ec84c79c8c78345eeead54949 | Python | AnneJoJo/Algorithm | /Algorithm2019/Math/waterAndJar.py | UTF-8 | 1,887 | 4.1875 | 4 | [] | no_license | # 这是一道脑筋急转弯题,我想很多人以前应该听过这道题目,有一个容量为3升和一个容量为5升的水罐,问我们如何准确的称出4升的水。我想很多人都知道怎么做,先把5升水罐装满水,倒到3升水罐里,这时5升水罐里还有2升水,
# 然后把3升水罐里的水都倒掉,把5升水罐中的2升水倒入3升水罐中,这时候把5升水罐解满,然后往此时有2升水的3升水罐里倒水,这样5升水罐倒出1升后还剩4升即为所求。这个很多人都知道,但是这道题随意给我们了三个参数,
# 问有没有解法,这就比较难了。这里我就照搬网上大神的讲解吧:
#
# 这道问题其实可以转换为有一个很大的容器,我们有两个杯子,容量分别为x和y,问我们通过用两个杯子往里倒水,和往出舀水,问能不能使容器中的水刚好为z升。那么我们可以用一个公式来表达:
#
# z = m * x + n * y
#
# 其中m,n为舀水和倒水的次数,正数表示往里舀水,负数表示往外倒水,那么题目中的例子可以写成: 4 = (-2) * 3 + 2 * 5,即3升的水罐往外倒了两次水,5升水罐往里舀了两次水。
# 那么问题就变成了对于任意给定的x,y,z,存不存在m和n使得上面的等式成立。根据裴蜀定理,ax + by = d的解为 d = gcd(x, y),那么我们只要只要z % d == 0,上面的等式就有解,
# 所以问题就迎刃而解了,我们只要看z是不是x和y的最大公约数的倍数就行了,别忘了还有个限制条件x + y >= z,因为x和y不可能称出比它们之和还多的水,参见代码如下;\
def gcd (x,y):
if y == 0:
return x
gcd(y,x%y) # 5 3 | 3 2 | 2 1| 1 1 | 1 0
def waterAndJar(x,y,z):
if x + y >= z:
return z % gcd(x,y) == 0
if z == 0:return True
return False
| true |
0960915d5596c257b91a8dad992ff0fed66260a2 | Python | HJReachability/learning_feedback_linearization | /ros/src/quads/src/system_identifier.py | UTF-8 | 2,902 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python
import rospy
import sys
import numpy as np
from quads_msgs.msg import Control
from quads_msgs.msg import State
class SystemIdentifier(object):
def __init__(self):
self._state_sub = rospy.Subscriber("/state", State, self.state_callback)
self._control_sub = rospy.Subscriber("/control/raw", Control, self.control_callback)
self._states = []
self._controls = []
self._dt = []
self._last_time = None
def state_callback(self, msg):
x = np.array([msg.x, msg.y, msg.z, msg.theta,
msg.phi, msg.psi, msg.dx, msg.dy,
msg.dz, msg.zeta, msg.xi, msg.q, msg.r, msg.p])
if len(self._states) <= len(self._controls):
self._states.append(x)
t = rospy.Time.now().to_sec()
if self._last_time is not None:
self._dt.append(t - self._last_time)
self._last_time = t
def control_callback(self, msg):
u = np.array([msg.thrustdot2, msg.pitchdot2, msg.rolldot2, msg.yawdot2])
if len(self._controls) <= len(self._states):
self._controls.append(u)
def solve(self):
num_transitions = len(self._states) - 1
num_equations = num_transitions * 6
if num_transitions <= 0:
return
A = np.zeros((num_equations, 4))
b = np.zeros((num_equations, 1))
sin = np.sin
cos = np.cos
g = 9.81
for ii in range(num_transitions):
current_x = self._states[ii]
current_u = self._controls[ii]
next_x = self._states[ii + 1]
theta = current_x[3]
phi = current_x[4]
psi = current_x[5]
zeta = current_x[9]
# Unknowns are [1/m, 1/Ix, 1/Iy, 1/Iz].
A[6 * ii, 0] = (sin(phi) * sin(psi) + cos(phi) * cos(psi) * sin(theta)) * zeta
A[6 * ii + 1, 0] = (-cos(psi) * sin(phi) + cos(phi) * sin(psi) * sin(theta)) * zeta
A[6 * ii + 2, 0] = (cos(phi) * cos(theta)) * zeta
A[6 * ii + 3, 1] = current_u[1]
A[6 * ii + 4, 2] = current_u[2]
A[6 * ii + 5, 3] = current_u[3]
b[6 * ii, 0] = (next_x[6] - current_x[6]) / self._dt[ii]
b[6 * ii + 1, 0] = (next_x[7] - current_x[7]) / self._dt[ii]
b[6 * ii + 2, 0] = (next_x[8] - current_x[8]) / self._dt[ii] + g
b[6 * ii + 3, 0] = (next_x[11] - current_x[11]) / self._dt[ii]
b[6 * ii + 4, 0] = (next_x[12] - current_x[12]) / self._dt[ii]
b[6 * ii + 5, 0] = (next_x[13] - current_x[13]) / self._dt[ii]
# Solve A x = b.
soln, _, _, _ = np.linalg.lstsq(A, b)
print "Solved!"
print "-- m = ", 1.0 / soln[0]
print "-- Ix = ", 1.0 / soln[1]
print "-- Iy = ", 1.0 / soln[2]
print "-- Iz = ", 1.0 / soln[3]
| true |
c3cec82c8b415deb05005d50af19ef20296f788a | Python | lshi0335/MSITM6341 | /Assignments/homework_assignment_3/working_with_lists.py | UTF-8 | 816 | 4.3125 | 4 | [] | no_license | # Lei Shi
# 0985491
# 9/15/2019
# MSITM 6341
# Assignment 3
grocery_items = ["apple", "banana", "carrot", "dill", "eggplant"]
prices = [1.99, 0.64, 1.00, 0.50, 1.49]
# 1. Print the 3rd item followed by it’s price
print(grocery_items[2] +": " + '${:,.2f}'.format(prices[2]))
# 2. Print the last item followed by it’s price
print(grocery_items[-1] +": " + '${:,.2f}'.format(prices[-1]))
# 3. Add a 6th item and it’s price
grocery_items.append("fish")
prices.append(8.99)
# 4. Print the list of items
print(grocery_items)
# 5. Print the list of prices
print(prices)
# 6. Remove the first item and it’s price
del grocery_items[0]
del prices[0]
# 7. Double the price of the 2nd item
prices[1] = prices[1]*2
# 8. Print the list of items
print(grocery_items)
# 9. Print the list of prices
print(prices)
| true |
53c5cae635d9a99ff438e2f4addd1b60b321a1b8 | Python | yanxurui/keepcoding | /python/algorithm/cracking/18.9.py | UTF-8 | 1,311 | 3.34375 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import heapq
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self._before = [] # max heap
self._after = [] # min heap
def addNum(self, num):
if len(self._after) == len(self._before):
heapq.heappush(self._after, -heapq.heappushpop(self._before, -num))
else:
heapq.heappush(self._before, -heapq.heappushpop(self._after, num))
def findMedian(self):
if len(self._before) != len(self._after):
return self._after[0]
else:
# return (-self._before[0] + self._after[0]) / 2
return -self._before[0]
class Middle:
def getMiddle(self, A, n):
# write code here
finder = MedianFinder()
rst = []
for i in A:
finder.addNum(i)
rst.append(finder.findMedian())
return rst
if __name__ == '__main__':
from testfunc import test
from common import TreeNode, ListNode
test_data = [
(
(
[1,2,3,4,5,6],6
),
[1,1,2,2,3,3]
),
(
(
[6,5,4,3,2,1],6
),
[6,5,5,4,4,3]
),
]
test(Middle().getMiddle, test_data)
| true |
5480474fc98ccd05dd04213fe8909ac6c00430b5 | Python | woodymit/millstone_accidental_source | /genome_designer/utils/bam_utils.py | UTF-8 | 1,949 | 3.0625 | 3 | [
"MIT"
] | permissive | """
Utility functions for working with bam files.
"""
import os
import shutil
import subprocess
from django.conf import settings
def filter_bam_file_by_row(input_bam_path, filter_fn, output_bam_path):
"""Filters rows out of a bam file that don't pass a given filter function.
This function keeps all header lines.
Args:
input_bam_path: Absolute path to input bam file.
filter_fn: Function applied to each row of the input bam and returns a
Boolean. If True, keeps the row.
output_bam_path: Absolute path to the output bam file.
"""
output_root = os.path.splitext(output_bam_path)[0]
initial_sam_intermediate = output_root + '.sam'
filtered_sam_intermediate = output_root + '.filtered.sam'
final_bam = output_root + '.filtered.bam'
# Convert to SAM (preserve header with -h option).
with open(initial_sam_intermediate, 'w') as output_fh:
p_samtools_view = subprocess.call(
[settings.SAMTOOLS_BINARY, 'view', '-h', input_bam_path],
stdout=output_fh)
# Filter.
with open(filtered_sam_intermediate, 'w') as output_fh:
with open(initial_sam_intermediate) as input_fh:
for line in input_fh:
# Always write header lines.
if line[0] == '@':
output_fh.write(line)
continue
if filter_fn(line):
output_fh.write(line)
continue
# Write final bam.
with open(final_bam, 'w') as fh:
p_samtools_view = subprocess.call(
[settings.SAMTOOLS_BINARY, 'view', '-bS',
filtered_sam_intermediate],
stdout=fh)
# Move temp file to the original file location.
shutil.move(final_bam, output_bam_path)
# Delete intermediate files.
os.remove(initial_sam_intermediate)
os.remove(filtered_sam_intermediate)
| true |
52bb878dcd77b7644cd196dededb86848c151d69 | Python | JasonKessler/sklearn-porter | /examples/classifier/BernoulliNB/java/basics.py | UTF-8 | 2,205 | 3.015625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from sklearn.datasets import load_iris
from sklearn.naive_bayes import BernoulliNB
from sklearn_porter import Porter
iris_data = load_iris()
X, y = iris_data.data, iris_data.target
clf = BernoulliNB()
clf.fit(X, y)
# Cheese!
result = Porter().port(clf)
# result = Porter(language='java').port(clf)
print(result)
"""
class Tmp {
public static int predict(double[] atts) {
if (atts.length != 4) {
return -1;
}
int i, j;
double[] priors = {-1.0986122886681096, -1.0986122886681096, -1.0986122886681096};
double[][] negProbs = {{-3.9512437185814138, -3.9512437185814138, -3.9512437185814138, -3.9512437185814138}, {-3.9512437185814138, -3.9512437185814138, -3.9512437185814138, -3.9512437185814138}, {-3.9512437185814138, -3.9512437185814138, -3.9512437185814138, -3.9512437185814138}};
double[][] delProbs = {{3.931825632724312, 3.931825632724312, 3.931825632724312}, {3.931825632724312, 3.931825632724312, 3.931825632724312}, {3.931825632724312, 3.931825632724312, 3.931825632724312}, {3.931825632724312, 3.931825632724312, 3.931825632724312}};
double[] jll = new double[3];
for (i = 0; i < 3; i++) {
double sum = 0.;
for (j = 0; j < 4; j++) {
sum += atts[i] * delProbs[j][i];
}
jll[i] = sum;
}
for (i = 0; i < 3; i++) {
double sum = 0.;
for (j = 0; j < 4; j++) {
sum += negProbs[i][j];
}
jll[i] += priors[i] + sum;
}
double highestLikeli = Double.NEGATIVE_INFINITY;
int classIndex = -1;
for (i = 0; i < 3; i++) {
if (jll[i] > highestLikeli) {
highestLikeli = jll[i];
classIndex = i;
}
}
return classIndex;
}
public static void main(String[] args) {
if (args.length == 4) {
double[] atts = new double[args.length];
for (int i = 0, l = args.length; i < l; i++) {
atts[i] = Double.parseDouble(args[i]);
}
System.out.println(Tmp.predict(atts));
}
}
}
""" | true |
1681ab059da8c66bd880a1b6d32463e498d9dc33 | Python | vaguely-right/Baseball | /Retrosheet/Old/6-retrosheetlogistic.py | UTF-8 | 9,423 | 2.578125 | 3 | [] | no_license | import pandas as pd
import numpy as np
from tqdm import tqdm
import numpy.linalg as la
import seaborn as sns
from sklearn.linear_model import LogisticRegression
import seaborn as sns
pd.set_option('display.width',150)
pd.set_option('display.max_columns',16)
#%%
# Read the constants
fg = pd.read_csv('fgconstants.csv')
fg.set_index('Season',inplace=True)
def get_events(year):
if type(year)==int:
year = str(year)
#Define the files
gmfile = 'Data\\'+year+'games.txt'
evfile = 'Data\\'+year+'events.txt'
idfile = 'retroID.csv'
#Read the data
gm = pd.read_csv(gmfile)
ev = pd.read_csv(evfile)
pid = pd.read_csv(idfile,index_col=False)
pid['Name'] = pid.First+' '+pid.Last
#Get the gamesite from the game dataframe
ev = ev.merge(gm[['gameid','gamesite']],how='left',on='gameid')
#Get just the end of batter events
ev = ev[ev.battereventflag=='T']
#Create a dictinary for the eventtype codes
eventdict = {0 : 'UNKNOWN',
1 : 'NOBAT',
2 : 'BIPOUT',
3 : 'K',
4 : 'NOBAT',
5 : 'NOBAT',
6 : 'NOBAT',
7 : 'NOBAT',
8 : 'NOBAT',
9 : 'NOBAT',
10 : 'NOBAT',
11 : 'NOBAT',
12 : 'NOBAT',
13 : 'NOBAT',
14 : 'BB',
15 : 'OTHER',
16 : 'BB',
17 : 'OTHER',
18 : 'OTHER',
19 : 'BIPOUT',
20 : 'SNGL',
21 : 'XBH',
22 : 'XBH',
23 : 'HR',
24 : 'NOBAT'}
eventdf = pd.DataFrame.from_dict(eventdict,orient='index')
eventdf.columns=['event']
#Assign event abbreviations to every event
ev = ev.merge(eventdf,how='left',left_on='eventtype',right_index=True)
#Specify sacrifice hit and fly events
ev.event[ev.shflag=='T'] = 'OTHER'
ev.event[ev.sfflag=='T'] = 'BIPOUT'
ev['timesthrough'] = ev.groupby(['gameid','pitcher']).cumcount()//9
ev.timesthrough[ev.timesthrough>2] = 2
ev['pitbathand'] = ev.pitcherhand+ev.batterhand
return ev
def pivot_events(year,split,minpa=0):
if split not in ['batter','pitcher','gamesite','batterhand','pitcherhand','timesthrough','pitbathand']:
print('Invalid split index')
print('Currently supported: batter, pitcher, gamesite, batterhand, pitcherhand, pitbathand, timesthrough')
return
ev = get_events(year)
# New in this version: drop OTHER events
ev = ev[ev.event!='OTHER']
ptable = pd.pivot_table(ev[[split,'event']],index=[split],columns=['event'],aggfunc=len,fill_value=0,margins=True)
ptable = ptable[:-1]
ptable = ptable.rename(columns={'All':'PA'})
# ptable = ptable[['PA','SNGL','XBH','HR','BB','K','BIPOUT','OTHER']]
ptable = ptable[['PA','SNGL','XBH','HR','BB','K','BIPOUT']]
ptable.SNGL = ptable.SNGL/ptable.PA
ptable.XBH = ptable.XBH/ptable.PA
ptable.HR = ptable.HR/ptable.PA
ptable.BB = ptable.BB/ptable.PA
ptable.K = ptable.K/ptable.PA
ptable.BIPOUT = ptable.BIPOUT/ptable.PA
# ptable.OTHER = ptable.OTHER/ptable.PA
# ptable['AVG'] = (ptable.SNGL+ptable.XBH+ptable.HR)/(ptable.SNGL+ptable.XBH+ptable.HR+ptable.K+ptable.BIPOUT)
# ptable['OBP'] = (ptable.SNGL+ptable.XBH+ptable.HR+ptable.BB)/(ptable.SNGL+ptable.XBH+ptable.HR+ptable.K+ptable.BIPOUT+ptable.BB)
# ptable['WOBA'] = (ptable.SNGL*0.89+ptable.XBH*1.31+ptable.HR*2.10+ptable.BB*0.70)/(1-ptable.OTHER)
# ptable['FIP'] = (ptable.HR*13+ptable.BB*3-ptable.K*2)/(ptable.K+ptable.BIPOUT)*3+3.05
ptable['AVG'] = (ptable.SNGL+ptable.XBH+ptable.HR)/(1-ptable.BB)
ptable['OBP'] = ptable.SNGL+ptable.XBH+ptable.HR+ptable.BB
c = fg.loc[year]
ptable['WOBA'] = ptable.SNGL*c.w1B + ptable.XBH*(c.w2B*0.9+c.w3B*0.1) + ptable.HR*c.wHR + ptable.BB*(c.wBB*0.9+c.wHBP*0.1)
ptable['FIP'] = (ptable.HR*13+ptable.BB*3-ptable.K*2)/(ptable.K+ptable.BIPOUT)*3+c.cFIP
return ptable
#%%
# Get the events for a specified year
year = 2013
cols = ['SNGL','XBH','HR','BB','K','BIPOUT']
splits = ['batter','pitcher','gamesite','timesthrough','pitbathand']
ev = get_events(year)
ev = ev[ev.event!='OTHER']
ev = ev[['batter','pitcher','gamesite','timesthrough','pitbathand','event']]
ev['ind'] = 1.0
# Calculate the mean probabilities, ratios, and logratios
pbar = ev.event.value_counts(normalize=True).to_frame().transpose()
pbar = pbar[['SNGL','XBH','HR','BB','K','BIPOUT']]
rbar = pbar / (1-pbar)
logrbar = np.log(rbar)
# Pivot to get the indicators
xbatter = ev.pivot(columns='batter',values='ind').fillna(0)
xpitcher = ev.pivot(columns='pitcher',values='ind').fillna(0)
xgamesite = ev.pivot(columns='gamesite',values='ind').fillna(0)
xtimesthrough = ev.pivot(columns='timesthrough',values='ind').fillna(0)
xpitbathand = ev.pivot(columns='pitbathand',values='ind').fillna(0)
# Concatenate the indicators for the array
xbatter.columns = pd.MultiIndex.from_product([['batter'],xbatter.columns])
xpitcher.columns = pd.MultiIndex.from_product([['pitcher'],xpitcher.columns])
xgamesite.columns = pd.MultiIndex.from_product([['gamesite'],xgamesite.columns])
xtimesthrough.columns = pd.MultiIndex.from_product([['timesthrough'],xtimesthrough.columns])
xpitbathand.columns = pd.MultiIndex.from_product([['pitbathand'],xpitbathand.columns])
x = pd.concat([xbatter,xpitcher,xgamesite,xtimesthrough,xpitbathand],axis=1)
x.columns.names=['split','ID']
#x['intercept','intercept'] = 1.0
#%% Try categorical logistic regression with just two predictors
reg = LogisticRegression()
X = x[['batter','pitcher']].to_numpy()
Y = ev[['event']]
reg.fit(X,Y)
#%% Get the means in logit, probit, and probability space
bbar = pd.DataFrame(reg.intercept_).transpose()
bbar.columns = reg.classes_.transpose()
bbar.index = ['mean']
rbar = np.exp(bbar)
pbar = rbar/(1+rbar)
pbar['SUM'] = pbar.sum(axis=1)
pbar
#%% ALTERNATE: Go the other way with it, start with the "true" pbar
pbar = ev.event.value_counts(normalize=True).to_frame().transpose()[['BB','BIPOUT','HR','K','SNGL','XBH']]
rbar = pbar / (1-pbar)
bbar = np.log(rbar)
#%% Transform the estimates
bhat = pd.DataFrame(reg.coef_.transpose())
bhat.index = x[['batter','pitcher']].columns
bhat.columns = reg.classes_
rhat = np.exp(np.add(bhat,bbar))
phat = rhat/(1+rhat)
phat['SUM'] = phat.sum(axis=1)
#%%
s='batter'
o='HR'
sns.scatterplot(x=p[o].loc[s],y=phat[o].loc[s],hue=p.loc[s].PA)
p[o].loc[s].hist(weights=p.loc[s].PA)
phat[o].loc[s].hist(weights=p.loc[s].PA)
#%%
#%%
#%%
#%% Now try categorical logistic regression for all five predictors
#reg = LogisticRegression()
#reg = LogisticRegression(class_weight=truepbar.transpose().event.to_dict())
#reg = LogisticRegression(class_weight='balanced')
reg = LogisticRegression(class_weight=truepbar.transpose().event.apply(lambda x: np.sqrt(x/(1-x))).to_dict())
X = x.to_numpy()
Y = ev[['event']]
reg.fit(X,Y)
#%% Get the means in logit, probit, and probability space
bbar = pd.DataFrame(reg.intercept_).transpose()
bbar.columns = reg.classes_.transpose()
bbar.index = ['mean']
rbar = np.exp(bbar)
pbar = rbar/(1+rbar)
#pbar['SUM'] = pbar.sum(axis=1)
#pbar
#%% ALTERNATE: Go the other way with it, start with the "true" pbar
truepbar = ev.event.value_counts(normalize=True).to_frame().transpose()[['BB','BIPOUT','HR','K','SNGL','XBH']]
rbar = truepbar / (1-truepbar)
bbar = np.log(rbar)
#%% Transform the estimates
bhat = pd.DataFrame(reg.coef_.transpose())
bhat.index = x.columns
bhat.columns = reg.classes_
rhat = np.exp(np.add(bhat,bbar))
phat = rhat/(1+rhat)
#phat['SUM'] = phat.sum(axis=1)
#SUM is way off now, normalize
#phat = np.divide(phat,np.sum(phat,axis=1).to_frame())
#phat.sum(axis=1)
#%%
phat.groupby('split').mean()
pbar
truepbar
sns.scatterplot(truepbar.transpose().event.to_numpy(),np.divide(pbar,truepbar).transpose()['mean'].to_numpy())
phat['SUM'] = phat.sum(axis=1)
#%%
# Get some base values to compare to
pbatter = pivot_events(year,'batter')
pbatter = pbatter[cols+['PA']]
#pbatter.columns = pd.MultiIndex.from_product([['batter'],pbatter.columns])
#pbatter.index = pd.MultiIndex.from_product([['bat'],pbatter.index])
ppitcher = pivot_events(year,'pitcher')
ppitcher = ppitcher[cols+['PA']]
#ppitcher.columns = pd.MultiIndex.from_product([['pitcher'],ppitcher.columns])
pgamesite = pivot_events(year,'gamesite')
pgamesite = pgamesite[cols+['PA']]
#pgamesite.columns = pd.MultiIndex.from_product([['gamesite'],pgamesite.columns])
ptimesthrough = pivot_events(year,'timesthrough')
ptimesthrough = ptimesthrough[cols+['PA']]
ppitbathand = pivot_events(year,'pitbathand')
ppitbathand = ppitbathand[cols+['PA']]
p = pd.concat([pbatter,ppitcher,pgamesite,ptimesthrough,ppitbathand],axis=0)
p.index = x.columns
#%%
s='pitbathand'
o='K'
sns.scatterplot(x=p[o].loc[s],y=phat[o].loc[s],hue=p.loc[s].PA)
sns.scatterplot(x=p[cols].loc[s],y=phat[cols].loc[s])
p[o].loc[s].hist(weights=p.loc[s].PA)
phat[o].loc[s].hist(weights=p.loc[s].PA)
plong = p.melt(ignore_index=False).reset_index()
phatlong = phat.melt(ignore_index=False).rename(columns={'variable':'event'}).reset_index()
comp = plong.merge(phatlong,how='inner',on=['split','ID','event']).merge(p.PA,left_on=['split','ID'],right_index=True)
sns.scatterplot(x='value_x',y='value_y',data=comp,hue='PA')
phat
| true |
ba59ebba2e068546face3a92c586930dc6c334c9 | Python | hsuanhauliu/html-compiler | /html_compiler/compiler.py | UTF-8 | 1,204 | 3.046875 | 3 | [
"MIT"
] | permissive | """
Compiler module.
"""
import os
from bs4 import BeautifulSoup
def compile(path):
""" Recursive function for merging components """
soup = ""
next_dir, filename = _separate_dir_and_file(path)
with cd(next_dir):
with open(filename, "r") as rfile:
soup = BeautifulSoup(rfile, 'html.parser')
component_tags = soup.findAll("div", {"class": "m_component"})
for tag in component_tags:
tag_id = tag.get("id")
component_file = tag_id + ".html"
component = compile(component_file)
soup.find(id=tag_id).replaceWith(component)
return soup
def _separate_dir_and_file(path):
""" Helper function for separating file directory and the file """
temp = path.rfind("/")
if temp == -1:
return ".", path
return path[:temp], path[temp + 1:]
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
| true |
340556fd0862d6b4d83f1e78923171b353d08f04 | Python | hermetique/holpy | /imperative/parser.py | UTF-8 | 5,054 | 2.671875 | 3 | [
"BSD-3-Clause"
] | permissive | # Author: Bohua Zhan
import json, os
from lark import Lark, Transformer, v_args, exceptions
from kernel.type import TFun, NatType
from kernel.term import Term, Var, Not, And, Or, Implies, Eq, Lambda, true, Nat
from kernel.report import ProofReport
from kernel import theory
from logic import basic
from logic import logic
from data import nat
from data.function import mk_const_fun, mk_fun_upd
from imperative import imp
from kernel.proofterm import ProofTerm
from syntax import json_output
"""Parsing for simple imperative programs."""
grammar = r"""
?expr: CNAME -> var_expr
| INT -> num_expr
| expr "+" expr -> plus_expr
| expr "*" expr -> times_expr
?cond: expr "==" expr -> eq_cond
| expr "!=" expr -> ineq_cond
| expr "<=" expr -> less_eq_cond
| expr "<" expr -> less_cond
| cond "&" cond -> conj_cond
| cond "|" cond -> disj_cond
| "true" -> true_cond
?cmd: "skip" -> skip_cmd
| CNAME ":=" expr -> assign_cmd
| "if" "(" cond ")" "then" cmd "else" cmd -> if_cmd
| "while" "(" cond ")" "{" cmd "}" -> while_cmd
| "while" "(" cond ")" "{" "[" cond "]" cmd "}" -> while_cmd_inv
| cmd ";" cmd -> seq_cmd
%import common.CNAME
%import common.WS
%import common.INT
%ignore WS
"""
natFunT = TFun(NatType, NatType)
st = Var("s", natFunT)
def str_to_nat(s):
"""Convert string to natural number."""
return ord(s) - ord("a")
@v_args(inline=True)
class HoareTransformer(Transformer):
def __init__(self):
pass
def var_expr(self, s):
if ord(s) >= ord('a') and ord(s) <= ord('z'):
return st(Nat(str_to_nat(s)))
elif ord(s) >= ord('A') and ord(s) <= ord('Z'):
return Var(s, NatType)
else:
raise NotImplementedError
def num_expr(self, n):
return Nat(int(n))
def plus_expr(self, e1, e2):
return nat.plus(e1, e2)
def times_expr(self, e1, e2):
return nat.times(e1, e2)
def eq_cond(self, e1, e2):
return Eq(e1, e2)
def ineq_cond(self, e1, e2):
return Not(Eq(e1, e2))
def conj_cond(self, b1, b2):
return And(b1, b2)
def disj_cond(self, b1, b2):
return Or(b1, b2)
def true_cond(self):
return true
def less_eq_cond(self, e1, e2):
return nat.less_eq(e1, e2)
def less_cond(self, e1, e2):
return nat.less(e1, e2)
def skip_cmd(self):
return imp.Skip(natFunT)
def assign_cmd(self, v, e):
Assign = imp.Assign(NatType, NatType)
return Assign(Nat(str_to_nat(v)), Lambda(st, e))
def if_cmd(self, b, c1, c2):
Cond = imp.Cond(natFunT)
return Cond(Lambda(st, b), c1, c2)
def while_cmd(self, b, c):
While = imp.While(natFunT)
return While(Lambda(st, b), Lambda(st, true), c)
def while_cmd_inv(self, b, inv, c):
While = imp.While(natFunT)
return While(Lambda(st, b), Lambda(st, inv), c)
def seq_cmd(self, c1, c2):
Seq = imp.Seq(natFunT)
return Seq(c1, c2)
cond_parser = Lark(grammar, start="cond", parser="lalr", transformer=HoareTransformer())
com_parser = Lark(grammar, start="cmd", parser="lalr", transformer=HoareTransformer())
def parse_cond(s):
return cond_parser.parse(s)
def parse_com(s):
return com_parser.parse(s)
def process_file(input, output):
basic.load_theory('hoare')
dn = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dn, 'examples/' + input + '.json'), encoding='utf-8') as a:
data = json.load(a)
output = json_output.JSONTheory(output, ["hoare"], "Generated from " + input)
content = data['content']
eval_count = 0
vcg_count = 0
for run in content[:5]:
if run['ty'] == 'eval':
com = parse_com(run['com'])
st1 = mk_const_fun(NatType, nat.zero)
for k, v in sorted(run['init'].items()):
st1 = mk_fun_upd(st1, Nat(str_to_nat(k)), Nat(v))
st2 = mk_const_fun(NatType, nat.zero)
for k, v in sorted(run['final'].items()):
st2 = mk_fun_upd(st2, Nat(str_to_nat(k)), Nat(v))
Sem = imp.Sem(natFunT)
goal = Sem(com, st1, st2)
prf = ProofTerm("eval_Sem", goal, []).export()
rpt = ProofReport()
th = theory.check_proof(prf, rpt)
output.add_theorem("eval" + str(eval_count), th, prf)
eval_count += 1
elif run['ty'] == 'vcg':
com = parse_com(run['com'])
pre = Lambda(st, parse_cond(run['pre']))
post = Lambda(st, parse_cond(run['post']))
Valid = imp.Valid(natFunT)
goal = Valid(pre, com, post)
prf = imp.vcg_solve(goal).export()
rpt = ProofReport()
th = theory.check_proof(prf, rpt)
output.add_theorem("vcg" + str(vcg_count), th, prf)
vcg_count += 1
else:
raise TypeError
output.export_json()
| true |
fc3c87f34e09a7cd047961e8b779ab60109740ea | Python | zhix9767/Leetcode | /code/Longest Valid Parentheses.py | UTF-8 | 1,872 | 3.296875 | 3 | [] | no_license | class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0:
return 0
maxLength = 0
stack = []
stack.append(-1)
for i in range(len(s)):
if s[i] == '(':
stack.append(i)
else:
j = stack.pop()
if not stack:
stack.append(i)
else:
maxLength = max(maxLength, i - stack[-1])
return maxLength
def longestValidParentheses2(self, s):
if len(s) == 0:
return 0
dp = [0] * len(s)
for i in range(1,len(s)):
if s[i] == '(':
dp[i] == 0
else:
if dp[i-1] == '(':
dp[i] = (dp[i-2] if i > 2 else 0) + 2
elif i-dp[i-1]>0 and s[i-dp[i-1]-1] == '(':
dp[i] = dp[i-1] + (dp[i-dp[i-1]-2] if i-dp[i-1]-2 >= 0 else 0) + 2
return max(dp)
def longestValidParentheses3(self, s):
if len(s) == 0:
return 0
left = 0
right = 0
maxLength = 0
for i in range(len(s)):
if s[i] == '(':
left += 1
else:
right += 1
if right == left:
maxLength = max(maxLength, 2 * left)
elif right > left:
left, right = 0, 0
left = 0
right = 0
for i in range(len(s)-1, -1, -1):
if s[i] == ')':
right += 1
else:
left += 1
if left == right:
maxLength = max(maxLength, 2* left)
elif left > right:
left, right = 0, 0
return maxLength
| true |
961ec6d1a23dcfde73235475003d4431fd0ff4cd | Python | daumie/dominic-motuka-bc17-week-1 | /day_1/sieve_of_eratosthenes.py | UTF-8 | 817 | 4.5 | 4 | [
"MIT"
] | permissive | """Finds prime numbers using sieve of eratosthenes"""
def sieve(num):
"""create a boolean array "prime[0...n]" and initialize all entries as True.
A value in prime[i] will finally be false if i is not prime, else True"""
prime = [True for i in range(num + 1)]
p = 2
while p * p <= num:
# If prime[p] is not changed then it is a prime number
if prime[p] is True:
# update all multiples of p
for i in range(p * 2, num + 1, p):
prime[i] = False
# print all prime numbers
for p in range(2, num):
if prime[p]:
print(p)
# Driver program
# if __name__ == '__main__':
# num = 30
# print("The list of prime numbers below or equal to", end=" ")
# print(num, end=" is: \n")
# sieve(num)
| true |
49e5ae35f29f8c8d2701468c84613339be239574 | Python | JovoM/Gis-programiranje | /Zadatak5-Vezba2.py | UTF-8 | 116 | 3.28125 | 3 | [] | no_license | # coding=utf-8
rec=raw_input("Unesi neku recenicu: ")
i=0
while i< len(rec):
print rec[i]
i=i+1
| true |
641597cae3183ea491d807d5b9b4906ae95af068 | Python | bsautrey/logistic-regression | /logistic_regression.py | UTF-8 | 4,855 | 3.421875 | 3 | [
"MIT"
] | permissive | # Implement logistic regression from Andrew Ng's CS229 course: http://cs229.stanford.edu/notes/cs229-notes1.pdf. Batch gradient ascent is used to learn the parameters, i.e. maximize the likelihood.
import random
from copy import copy
from math import exp
import numpy as np
import matplotlib.pyplot as plot
# alpha - The learning rate.
# dampen - Factor by which alpha is dampened on each iteration. Default is no dampening, i.e. dampen = 1.0
# tol - The stopping criteria
# theta - The parameters to be learned.
class LogisticRegression():
def __init__(self):
self.X = None
self.Y = None
self.alpha = None
self.dampen = None
self.tol = None
self.theta = None
self.percents = None
def set_X(self,X):
self.X = X
def set_Y(self,Y):
self.Y = Y
def set_alpha(self,alpha=0.001,dampen=1.0):
self.alpha = alpha
self.dampen = dampen
def set_tao(self,tao=1.0):
self.tao = tao
def set_tolerance(self,tol=0.001):
self.tol = tol
def initialize_theta(self,theta=None):
if not theta:
number_of_parameters = self.X.shape[1]
theta = copy(self.X[0,:])
theta.resize((1,number_of_parameters))
self.theta = theta
def _initialize_percents(self):
self.percents = []
def run_BGA(self,max_iterations=5000):
self._initialize_percents()
old_theta = copy(self.theta)
iterations = 0
number_of_rows = self.X.shape[0]
number_of_columns = self.X.shape[1]
while True:
for i in xrange(number_of_rows):
x = self.X[i,:]
y = self.Y[i,:][0]
x.resize((number_of_columns,1))
for j in xrange(number_of_columns):
theta_j = self.theta[0][j]
x_j = x[j][0]
dot = np.dot(self.theta,x)[0][0]
logistic = 1.0/(1 + exp(-dot))
new_theta_j = theta_j + self.alpha*(y - logistic)*x_j
self.theta[0][j] = new_theta_j
iterations = iterations + 1
percent = self._calculate_convergence(old_theta)
self.percents.append((iterations,percent))
old_theta = copy(self.theta)
self.alpha = self.alpha*self.dampen
print iterations,percent,self.alpha,self.theta
if percent < self.tol or iterations > max_iterations:
return
def _calculate_convergence(self,old_theta):
diff = old_theta - self.theta
diff = np.dot(diff,diff.T)**0.5
length = np.dot(old_theta,old_theta.T)**0.5
percent = 100.0*diff/length
return percent
def generate_example(self,sample_size_per_class=1000):
# class_1
mean = np.array([3,3])
cov = np.array([[1,-0.6],[-0.6,1]])
res_1 = np.random.multivariate_normal(mean,cov,sample_size_per_class)
# class_2
mean = np.array([5,5])
cov = np.array([[1,-0.75],[-0.75,1]])
res_2 = np.random.multivariate_normal(mean,cov,sample_size_per_class)
# assemble data
X = np.row_stack((res_2,res_1))
intercept = np.ones((2*sample_size_per_class))
X = np.column_stack((X,intercept))
Y = []
for i in xrange(2*sample_size_per_class):
if i < sample_size_per_class:
class_1 = 1.0
Y.append(class_1)
else:
class_2 = 0.0
Y.append(class_2)
Y = np.array(Y)
Y.resize(2*sample_size_per_class,1)
# initialize
self.set_X(X)
self.set_Y(Y)
self.set_alpha(alpha=0.001,dampen=1.0)
self.set_tolerance(0.05)
self.initialize_theta()
self.run_BGA()
# decision boundry
theta_0 = self.theta[0][2]
theta_1 = self.theta[0][1]
theta_2 = self.theta[0][0]
x_1_1 = 0.0
x_1_2 = 8.0
x_2_1 = -(theta_0 + theta_1*x_1_1)/theta_2
x_2_2 = -(theta_0 + theta_1*x_1_2)/theta_2
# plot
plot.scatter(self.X[0:sample_size_per_class,0],self.X[0:sample_size_per_class,1],s=0.5,color='orange')
plot.scatter(self.X[sample_size_per_class:,0],self.X[sample_size_per_class:,1],s=0.5,color='green')
plot.plot([x_1_1,x_2_1],[x_1_2,x_2_2])
plot.show()
def plot_convergence(self,start_index,end_index=None):
if end_index:
X,Y = zip(*self.percents[start_index:end_index])
else:
X,Y = zip(*self.percents[start_index:])
plot.plot(X,Y)
plot.show()
| true |
8f5c7f4a94492f7df91b0c30fe92a73596320a94 | Python | nagmat1/INT-Edge | /modules/backup-metrics/metrics-jan12/sink/conf.py | UTF-8 | 443 | 2.640625 | 3 | [] | no_license | import json
class Configuration:
def __init__(self, file='conf.json'):
self.conf = None
self.file = file
self.__load()
def __load(self):
with open(self.file) as cf:
self.conf = json.load(cf)
def getListenConf(self):
return self.conf['listen']
def getMetaServerConf(self):
return self.conf['metaserver']
# instantiate a variable
conf = Configuration() | true |
894963a7f4f5fb707f84106c81eb17ed95acbe01 | Python | harsh9451036849/text-FileCompare | /textFileCompare.py | UTF-8 | 270 | 3.078125 | 3 | [] | no_license | f = open("file1.txt")
f1 = open("file2.txt")
i = 0
n = []
for i in range(100):
p = f1.readline().strip().split(' ')
p = ''.join(p)
n.append(p)
for i in range(100):
st = f.readline().strip().split(' ')
st = ''.join(st)
if st in n :
continue
else :
print(st)
| true |
b9793505cb7afeaae0337370183b950ad8e72df3 | Python | javs9708/PGP | /apps/usuario/funciones/validadores.py | UTF-8 | 1,188 | 2.625 | 3 | [] | no_license | import re
from dateutil.relativedelta import relativedelta
from datetime import datetime, date, time, timedelta
LIMITE = (date.today() - relativedelta(years=100))
patron_nombre_apellido = re.compile('([A-ZÁÉÍÓÚ a-zñáéíóú]{1}[a-zñáéíóú A-ZÁÉÍÓÚ ]+[\s]*)+$')
patron_cc = re.compile('[\d]{6,14}$')
patron_password = re.compile('[\d][0-9]{8,}$')
#patron_email = re.compile('[\w]+@{1}[\w]+(\.[\w]+)*\.[a-z]{2,3}$')
def validar_nombre(nombre):
if re.match(patron_nombre_apellido,nombre) is None:
return True
else:
return False
def validar_apellido(apellido):
if re.match(patron_nombre_apellido,apellido) is None:
return True
else:
return False
def validar_cc(cc):
if re.match(patron_cc,cc) is None:
return True
else:
return False
def validar_password(password):
if re.match(patron_password,password) is None:
return True
else:
return False
def validar_fecha(fecha):
fecha = datetime.strptime(fecha, '%Y-%m-%d').date()
if (fecha + relativedelta(years=18) > date.today()) or (fecha < LIMITE):
return True
else:
return False
"""def validar_email(email):
if re.match(patron_email,email) is None:
return True
else:
return False
"""
| true |
eff6462c59329c55932a78f1e681312856518ef2 | Python | connorcodes/thetrailproject | /transform_data.py | UTF-8 | 10,441 | 2.53125 | 3 | [] | no_license | from typing import *
from enum import Enum, IntFlag, unique
import struct
import json
import os
import errno
@unique
class Activity(IntFlag):
Backpacking = 1 << 0
XCountrySkiing = 1 << 1
HorsebackRiding = 1 << 2
OffRoadDriving = 1 << 3
RockClimbing = 1 << 4
SnowShoeing = 1 << 5
Walking = 1 << 6
Birding = 1 << 7
Fishing = 1 << 8
MountainBiking = 1 << 9
PaddleSports = 1 << 10
ScenicDriving = 1 << 11
Surfing = 1 << 12
Camping = 1 << 13
Hiking = 1 << 14
NatureTrips = 1 << 15
RoadBiking = 1 << 16
Skiiing = 1 << 17
TrailRunning = 1 << 18
@unique
class View(IntFlag):
Beach = 1 << 0
CityWalk = 1 << 1
HistoricSite = 1 << 2
Lake = 1 << 3
River = 1 << 4
Waterfall = 1 << 5
Wildlife = 1 << 6
Cave = 1 << 7
Forest = 1 << 8
HotSprings = 1 << 9
RailsTrails = 1 << 10
Views = 1 << 11
Wildflowers = 1 << 12
@unique
class Suitability(IntFlag):
Dog = 1 << 0
Kid = 1 << 1
Wheelchair = 1 << 2
@unique
class RouteType(Enum):
OutAndBack = 1,
Loop = 2,
PointToPoint = 3
@unique
class TrailTraffic(Enum):
Light = 1,
Moderate = 2,
Heavy = 3
@unique
class Difficulty(Enum):
Easy = 1
Moderate = 2
Hard = 3
class GeographicLocation(object):
def __init__(self, latitude: float, longitude: float):
if latitude is None or longitude is None:
raise ValueError("Latitude or Longitude was None.")
if type(latitude) is not float or type(longitude) is not float:
raise ValueError("Latitude or Longitude aren't floats.")
self.latitude: float = latitude
self.longitude: float = longitude
class Trail(object):
def __init__(self):
self.name: str = None
self.area_name: str = None
self.country_name: str = None
self.city_name: str = None
self.state_name: str = None
self.review_count: int = 0
self.difficulty: Difficulty = 0
self.geographic_location: GeographicLocation = None
self.length: float = 0.0
self.rating: float = 0
self.activities: Activity = 0
self.views: View = 0
self.suitability: Suitability = 0
self.routetype: RouteType = 0
self.trail_traffic: int = 0
self.elevation_gain: float = 0.0
@staticmethod
def from_json(json: Dict[str, Any]):
trail = Trail()
if type(json['name']) is not str:
raise ValueError("name isn't a string.")
trail.name = json['name']
if type(json['length']) is not float:
raise ValueError("length isn't a float.")
trail.length = json['length']
if type(json['difficulty_rating']) is not str:
raise ValueError("difficulty_rating isn't a string.")
difficulty = int(json['difficulty_rating'])
if difficulty >= 0 and difficulty < 3:
trail.difficulty = Difficulty.Easy
elif difficulty >= 3 and difficulty < 5:
trail.difficulty = Difficulty.Moderate
elif difficulty >= 5 and difficulty <= 7:
trail.difficulty = Difficulty.Hard
else:
raise ValueError("unknown difficulty value.")
if type(json['route_type']) is not str:
raise ValueError("length isn't a float.")
if json['route_type'] == 'L':
trail.route_type = RouteType.Loop
elif json['route_type'] == 'O':
trail.route_type = RouteType.OutAndBack
elif json['route_type'] == 'P':
trail.route_type = RouteType.PointToPoint
else:
raise ValueError("unknown route_type value.")
if type(json['area_name']) is not str:
raise ValueError("area_name isn't a string.")
trail.area_name = json['area_name']
if type(json['country_name']) is not str:
raise ValueError("country_name isn't a string.")
trail.country_name = json['country_name']
if type(json['city_name']) is not str:
raise ValueError("city_name isn't a string.")
trail.country_name = json['city_name']
if type(json['num_reviews']) is not int:
raise ValueError("num_reviews isn't an integer.")
trail.review_count = json['num_reviews']
if type(json['avg_rating']) is not float and type(json['avg_rating']) is not int:
raise ValueError("avg_rating isn't an integer or float.")
trail.rating = float(json['avg_rating'])
if type(json['elevation_gain']) is not float and type(json['elevation_gain']) is not int:
print('elevation_gain is type %s' % type(json['elevation_gain']))
raise ValueError("elevation_gain isn't a float.")
trail.elevation_gain = json['elevation_gain']
if "kids" in json['features']:
trail.suitability = trail.suitability | Suitability.Kid
if "dogs" in json['features'] or "dogs-leash" in json['features']:
trail.suitability = trail.suitability | Suitability.Kid
if "partially-paved" in json['features'] or "paved" in json['features']:
trail.suitability = trail.suitability | Suitability.Kid
if "lake" in json['features']:
trail.views = trail.views | View.Lake
if "wildlife" in json['features']:
trail.views = trail.views | View.Wildlife
if "wild-flowers" in json['features']:
trail.views = trail.views | View.Wildflowers
if "views" in json['features']:
trail.views = trail.views | View.Views
if "beach" in json['features']:
trail.views = trail.views | View.Beach
if "forest" in json['features']:
trail.views = trail.views | View.Forest
if "cave" in json['features']:
trail.views = trail.views | View.Cave
if "city-walk" in json['features']:
trail.views = trail.views | View.CityWalk
if "historic-site" in json['features']:
trail.views = trail.views | View.HistoricSite
if "hot-springs" in json['features']:
trail.views = trail.views | View.HotSprings
if "rails-trails" in json['features']:
trail.views = trail.views | View.CityWalk
if "river" in json['features']:
trail.views = trail.views | View.River
if "waterfall" in json['features']:
trail.views = trail.views | View.Waterfall
if "birding" in json['activities']:
trail.activities = trail.activities | Activity.Birding
if "fishing" in json['activities']:
trail.activities = trail.activities | Activity.Fishing
if "nature-trips" in json['activities']:
trail.activities = trail.activities | Activity.NatureTrips
if "paddle-sports" in json['activities'] or "sea-kayaking" in json['activities']:
trail.activities = trail.activities | Activity.PaddleSports
if "road-biking" in json['activities']:
trail.activities = trail.activities | Activity.RoadBiking
if "scenic-driving" in json['activities']:
trail.activities = trail.activities | Activity.ScenicDriving
if "surfing" in json['activities']:
trail.activities = trail.activities | Activity.Surfing
if "trail-running" in json['activities']:
trail.activities = trail.activities | Activity.TrailRunning
if "walking" in json['activities']:
trail.activities = trail.activities | Activity.Walking
if "rock-climbing" in json['activities']:
trail.activities = trail.activities | Activity.RockClimbing
if "mountain-biking" in json['activities']:
trail.activities = trail.activities | Activity.MountainBiking
if "backpacking" in json['activities']:
trail.activities = trail.activities | Activity.Backpacking
if "camping" in json['activities']:
trail.activities = trail.activities | Activity.Camping
if "horseback-riding" in json['activities']:
trail.activities = trail.activities | Activity.HorsebackRiding
if "off-road-driving" in json['activities']:
trail.activities = trail.activities | Activity.OffRoadDriving
if "skiing" in json['activities']:
trail.activities = trail.activities | Activity.Skiiing
if "x-country-skiing" in json['activities']:
trail.activities = trail.activities | Activity.XCountrySkiing
if "fishing" in json['activities'] or "fly-fishing" in json['activities'] :
trail.activities = trail.activities | Activity.Fishing
if "snow-shoeing" in json['activities']:
trail.activities = trail.activities | Activity.SnowShoeing
if type(json['popularity']) is not float:
raise ValueError("popularity isn't a float.")
popularity = json['popularity']
if popularity >= 0 and popularity < 25:
trail.trail_traffic = TrailTraffic.Light
if popularity >= 25 and popularity < 50:
trail.trail_traffic = TrailTraffic.Moderate
if popularity >= 50:
trail.trail_traffic = TrailTraffic.Heavy
trail.geographic_location = GeographicLocation(json['_geoloc']['lat'], \
json['_geoloc']['lng'])
return trail
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
if __name__ == "__main__":
with open('trail_data.json', 'r') as trail_data_file:
trails_data = json.load(trail_data_file)
trails = []
for trail_data in trails_data['hits']:
try:
trail = Trail.from_json(trail_data)
trails.append(trail)
except Exception as e:
print(e)
make_sure_path_exists("trail_data")
for trail in trails:
#need to make sure trail.name is sanatized
make_sure_path_exists("trail_data/%s" % trail.name)
with open("trail_data/%s/data" % trail.name, 'w') as output:
| true |
24afe5e478b7811ea044687a69bbcb4dee855eaa | Python | Significant-Gravitas/Auto-GPT-Plugins | /run_pylint.py | UTF-8 | 425 | 2.625 | 3 | [
"MIT"
] | permissive | """
https://stackoverflow.com/questions/49100806/
pylint-and-subprocess-run-returning-exit-status-28
"""
import subprocess
cmd = " pylint src\\**\\*"
try:
subprocComplete = subprocess.run(
cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
print(subprocComplete.stdout.decode("utf-8"))
except subprocess.CalledProcessError as err:
print(err.output.decode("utf-8"))
| true |
0b7f5e678a3b5a6ed747a77d7d7c52a3ce84f78f | Python | zingp/webstudy | /studyFlask/test_flask_session.py | UTF-8 | 658 | 2.515625 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: "liuyouyuan"
# Date: 2018/6/1
"""
pip install redis
pip install flask-session
"""
from flask import Flask, session, redirect
from flask.ext.session import Session
app = Flask(__name__)
app.debug = True
app.secret_key = 'asdfasdfasd'
app.config['SESSION_TYPE'] = 'redis'
from redis import Redis
app.config['SESSION_REDIS'] = Redis(host='****',port='6379')
Session(app)
@app.route('/login')
def login():
session['username'] = 'alex'
return redirect('/index')
@app.route('/index')
def index():
name = session['username']
return name
if __name__ == '__main__':
app.run()
| true |
a3ce227e25dc6c85e3b3dbc39ec16a8441a99e35 | Python | badonfai/Python-packages | /OutlierIdentifiers/test/test_outlieridentifier.py | UTF-8 | 6,986 | 2.5625 | 3 | [] | no_license | """
Test script for outlieridentifier.py
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from OutlierIdentifier.Outlieridentifier.outlieridentifier import *
import numpy as np
import unittest
import json
np.random.seed(2342)
randData1 = np.random.uniform(low=-10, high=100, size=200)
randData2 = np.random.uniform(low=140, high=160, size=19)
randData3 = np.random.uniform(low=-50, high=-30, size=12)
randData = np.concatenate((randData1, randData2, randData3), axis=0)
randData = np.random.choice(randData, size=len(randData))
class Tests(unittest.TestCase):
"""Equivalence Tests for outlieridentifier.py"""
def test_OutlierIdentifier_equal(self):
"""outlierIdentifiers equivalences"""
lowerThreshold, upperThreshold = SPLUSQuartileIdentifierParameters(randData)
test1 = randData[(randData <= lowerThreshold) | (randData >= upperThreshold)]
test2 = OutlierIdentifier(randData, SPLUSQuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = QuartileIdentifierParameters(randData)
test1 = randData[(randData <= lowerThreshold) | (randData >= upperThreshold)]
test2 = OutlierIdentifier(randData, QuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = HampelIdentifierParameters(randData)
test1 = randData[(randData <= lowerThreshold) | (randData >= upperThreshold)]
test2 = OutlierIdentifier(randData, HampelIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
def test_TopOutlierIdentifier_equal(self):
"""TopOutlierIdentifier equivalences"""
lowerThreshold, upperThreshold = SPLUSQuartileIdentifierParameters(randData)
test1 = randData[(randData >= upperThreshold)]
test2 = OutlierIdentifier(randData, SPLUSQuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = QuartileIdentifierParameters(randData)
test1 = randData[(randData >= upperThreshold)]
test2 = TopOutlierIdentifier(randData, QuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = HampelIdentifierParameters(randData)
test1 = randData[(randData >= upperThreshold)]
test2 = TopOutlierIdentifier(randData, HampelIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
def test_BottomOutlierIdentifier_equal(self):
"""BottomOutlierIdentifier equivalences"""
lowerThreshold, upperThreshold = SPLUSQuartileIdentifierParameters(randData)
test1 = randData[(randData <= lowerThreshold)]
test2 = OutlierIdentifier(randData, SPLUSQuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = QuartileIdentifierParameters(randData)
test1 = randData[(randData <= lowerThreshold)]
test2 = BottomOutlierIdentifier(randData, QuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = HampelIdentifierParameters(randData)
test1 = randData[(randData <= lowerThreshold)]
test2 = BottomOutlierIdentifier(randData, HampelIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
def test_OutlierPosition_equal(self):
"""OutlierPosition equivalences"""
lowerThreshold, upperThreshold = SPLUSQuartileIdentifierParameters(randData)
test1 = np.where((randData <= lowerThreshold) | (randData >= upperThreshold))[0]
test2 = OutlierPosition(randData, SPLUSQuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = QuartileIdentifierParameters(randData)
test1 = np.where((randData <= lowerThreshold) | (randData >= upperThreshold))[0]
test2 = OutlierPosition(randData, QuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = HampelIdentifierParameters(randData)
test1 = np.where((randData <= lowerThreshold) | (randData >= upperThreshold))[0]
test2 = OutlierPosition(randData, HampelIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
def test_TopOutlierPosition_equal(self):
"""TopOutlierPosition equivalences"""
lowerThreshold, upperThreshold = SPLUSQuartileIdentifierParameters(randData)
test1 = np.where((randData >= upperThreshold))[0]
test2 = TopOutlierPosition(randData, SPLUSQuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = QuartileIdentifierParameters(randData)
test1 = np.where((randData >= upperThreshold))[0]
test2 = TopOutlierPosition(randData, QuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = HampelIdentifierParameters(randData)
test1 = np.where((randData >= upperThreshold))[0]
test2 = TopOutlierPosition(randData, HampelIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
def test_BottomOutlierPosition_equal(self):
"""BottomOutlierPosition equivalences"""
lowerThreshold, upperThreshold = SPLUSQuartileIdentifierParameters(randData)
test1 = np.where((randData <= lowerThreshold))[0]
test2 = BottomOutlierPosition(randData, SPLUSQuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = QuartileIdentifierParameters(randData)
test1 = np.where((randData <= lowerThreshold))[0]
test2 = BottomOutlierPosition(randData, QuartileIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
lowerThreshold, upperThreshold = HampelIdentifierParameters(randData)
test1 = np.where((randData <= lowerThreshold))[0]
test2 = BottomOutlierPosition(randData, HampelIdentifierParameters)
self.assertEqual(test1.tolist(), test2.tolist())
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(*[Tests])
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
total = testResult.testsRun
if total == 0:
res = {'score': 1, 'output': []}
else:
errors = [x[1] for x in testResult.errors]
failures = [x[1] for x in testResult.failures]
score = 1 - 1.0 * (len(errors) + len(failures)) / total
res = {'score': score, 'test_output': errors + failures}
print(json.dumps(res)) | true |
3454eb981643922ee54827741acf728627ae4023 | Python | RonakKhandelwal/Python | /Learn Python The Hard Way/ex15.py | UTF-8 | 278 | 3.109375 | 3 | [] | no_license | from sys import argv
script,filename=argv
txt=open(filename)
print"Here's your file %r:"%filename
print txt.read()
print "Type the filename again :"
file_again=raw_input('>')
txt_again=open(file_again)
print "Here's the new file %r :"%file_again
print txt_again.read()
| true |
4ddd00b34d6cb24e6575d86975a5632914fd9a53 | Python | nitinnat/Tweet-Prejudice-Detection | /make_splits.py | UTF-8 | 1,517 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu May 10 16:07:18 2018
@author: Nitin
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(42) #Set seed
filepath_not_NER = "./FeatureVectorNotNER.csv"
filepath_NER = "./FeatureVectorNER.csv"
output_file_train_NER = "train-Strat-NER-{}.csv"
output_file_test_NER = "test-Strat-NER-{}.csv"
output_file_train_not_NER = "train-Strat-NotNER-{}.csv"
output_file_test_not_NER = "test-Strat-NotNER-{}.csv"
#90-10, 80-20,70-30 and 60-40.
def read_data(filepath):
df = pd.read_csv(filepath,header = 0)
return df
def make_split(df,train_split):
train, test = train_test_split(df, test_size=1-train_split, random_state=0)
return train,test
if __name__ == "__main__":
X_not_NER = read_data(filepath_not_NER)
X_NER = read_data(filepath_NER)
for split in [0.9,0.8,0.7,0.6]:
X_train, X_test = make_split(X_not_NER,split)
print(X_train.shape, X_test.shape)
X_train.to_csv(output_file_train_not_NER.format(int(split*100)), index = None, header = None)
X_test.to_csv(output_file_test_not_NER.format(int(split*100)), index = None,header = None)
#NER
X_train, X_test = make_split(X_NER,split)
print(X_train.shape, X_test.shape)
X_train.to_csv(output_file_train_NER.format(int(split*100)), index = None, header = None)
X_test.to_csv(output_file_test_NER.format(int(split*100)), index = None,header = None)
| true |
24b42a36d3249def982ed249d216077883bcbf4a | Python | wasv/scrap-code | /python/Goddard.py | UTF-8 | 604 | 3.109375 | 3 | [
"MIT"
] | permissive | import Tkinter as tk
import serial
# Translates keypress to robot
commands = { 'W' : 'FG', 'A' : 'RG', 'S' : 'RT', 'D' : 'FT' }
def onKeyPress(event):
key = event.char.upper()
print key
if key in ('W', 'A', 'S', 'D' ):
com = commands.get( key )
ser.write( com )
print key, com
#ser = serial.Serial('/dev/ttyUSB0',9600)
ser = serial.Serial('COM4',9600) # Uncomment for Windows
root = tk.Tk()
root.geometry('300x200')
text = tk.Text(root, background='black', foreground='green', font=('Console', 12))
text.pack()
root.bind('<KeyPress>', onKeyPress)
root.mainloop()
| true |