blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1bc3dc0f345df16baf94a40c43b5e1843555e357
|
Python
|
a01747686/TC2008-I21-Eq2
|
/hamburguesas.py
|
UTF-8
| 1,394
| 2.96875
| 3
|
[] |
no_license
|
import threading
import time
#creacion de los semaforos
MG = threading.Semaphore(1)
MH = threading.Semaphore(1)
MP = threading.Semaphore(1)
DMG = threading.Semaphore()
DMH = threading.Semaphore()
DMP = threading.Semaphore()
OMG = threading.Semaphore(0)
OMH = threading.Semaphore(0)
OMP = threading.Semaphore(0)
def despachador():
while True:
DMG.release()
print(DMG)
MG.release()
print(MG)
#COLOCA PEDIDO
MG.acquire()
print(MG)
OMG.acquire()
print(OMG)
def cocinero():
while True:
DMH.release()
MH.release()
#coloca hamburguesas
MH.acquire()
OMH.acquire()
def empacador():
while True:
DMP.release()
OMH.release()
OMG.release()
MP.release()
#Surte pedido
MP.acquire()
MH.release()
#coloca hamburguesa
MH.acquire()
MP.release()
#coloca hamburguesa
MG.acquire()
DMG.acquire()
DMH.acquire()
OMP.acquire()
def cajero():
while True:
OMP.release()
MP.release()
#coloca hamburguesa
MP.release()
DMP.release()
def main():
hilo1.threading.Thread(target=despachador)
hilo2.threading.Thread(target=cocinero)
hilo3.threading.Thread(target=empacador)
hilo4.threading.Thread(target=cajero)
| true
|
d0cbca3c7dc1d0c134b21dab20d923623d448012
|
Python
|
MorisMa18/Arcade_Sapce_Invader
|
/Space_Invader.py
|
UTF-8
| 7,617
| 3.21875
| 3
|
[] |
no_license
|
import pygame
import random
import math
# Initialize Pygame
pygame.init()
# player class
class player:
image = pygame.image.load('player.png')
x_coord = 370
y_coord = 480
delta_x = 0
# enemy class
class enemy:
image = pygame.image.load('ufo.png')
x_coord = 0
y_coord = 0
delta_x = 6
delta_y = 40
def __init__ (self, x_value, y_value):
self.x_coord = x_value
self.y_coord = y_value
# bullet class
class bullet:
image = pygame.image.load('bullet.png')
x_coord = 0
y_coord = 480
delta_x = 0
delta_y = 50
state = "ready"
# -- SET_UP THE GAME --
# Window
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption("Space Invader")
# Score
score_value = 0
score_text_x = 10
score_text_y = 10
# font (Score)
font = pygame.font.Font('freesansbold.ttf', 32)
# font (Game Over)
over_font = pygame.font.Font('freesansbold.ttf', 64)
# Function which render the score text
def render_show_score(x,y):
score = font.render("Score:" + str(score_value), True, (255, 255, 255))
screen.blit (score, (x, y))
# Function which render the Game Over text
def render_game_over_text():
gg_text = over_font.render("GAME OVER", True, (255, 255, 255))
screen.blit(gg_text, (200, 250))
# Creating player object
player_Obj= player()
# Creating bullet object
bullet_Obj = bullet()
# Creating enemy objects
# TODO: Adjust the number of enemies depending on level of difficulty
num_of_enemies = 6
# Array of enemy objects
enemy_objs = []
for i in range (0, num_of_enemies):
enemy_objs.append (enemy(random.randint(0, 800), random.randint(50, 150)))
# Function that render the player image
def render_player (x, y):
screen.blit(player_Obj.image, (x, y))
# Function that render the enemy image
def render_enemy (x, y, i):
screen.blit(enemy_objs[i].image, (x, y))
# Function that render the bullet image
def render_bullet (x, y):
# global bullet_Obj.state
bullet_Obj.state = "fire"
screen.blit(bullet_Obj.image, (x + 16, y + 10))
# Collision engines b/w bullet & enemy
def isCollision (enemy_x, enemy_y, bullet_x, bullet_y):
distance = math.sqrt(math.pow(enemy_x - bullet_x, 2) + math.pow(enemy_y - bullet_y, 2))
if distance < 27:
return True
else:
return False
# Declare control parameters for the game while loops
running = False
intro = True
# Initialize color RGB values for easy passing
dark_green = (0, 255, 0)
dark_red = (255, 0, 0)
bright_green = (0, 200, 0)
bright_red = (200, 0, 0)
# -- RUNNING CODE --
# INTRO Screen
# Provide functionalities to the buttons
def button_func (button_msg, x_pos, y_pos, width, height, inactive, active, function):
# TODO: Add button texts on the buttons
# Definition of mouse position
mouse = pygame.mouse.get_pos()
# Definition of mouse click
click = pygame.mouse.get_pressed()
# Getting the position of the mouse
mouse = pygame.mouse.get_pos()
# If statement to see if the mouse position is with the rectangles
if x_pos + width > mouse[0] > x_pos and y_pos + height > mouse[1] > y_pos:
pygame.draw.rect(screen, active, (x_pos, y_pos, width, height))
if click[0] == 1 and function != None:
if function is "Start_the_game":
global running
running = True
global intro
intro = False
else:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, inactive, (x_pos, y_pos, width, height))
# Executing intro screen
while intro:
#TODO: Better intro screen
for event in pygame.event.get():
if event.type == pygame.QUIT:
intro = False
screen.fill((0, 0, 0))
# Displaying the title of the game
start_screen_font = pygame.font.Font('freesansbold.ttf', 64)
intro_text = start_screen_font.render("Space Invader", True, (255, 255, 255))
screen.blit(intro_text, (200, 250))
# Making the buttons (rectangles)
# Green - Play the game
pygame.draw.rect(screen, (0, 200, 0), (150, 450, 100, 50))
# Red - Quit the game
pygame.draw.rect(screen, (200, 0, 0), (550, 450, 100, 50))
# Adding functionality to the buttons
button_func("Start", 150, 450, 100, 50, dark_green, bright_green, "Start_the_game")
button_func("Quit", 550, 450, 100, 50, dark_red, bright_red, "Quit_the_game")
pygame.display.update()
# Game window, run continuously
while running:
screen.fill((0, 0, 0))
# Check if the user is closing the window
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Logic when a key is pressed
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_Obj.delta_x = -20
if event.key == pygame.K_RIGHT:
player_Obj.delta_x = 20
if event.key == pygame.K_SPACE:
if bullet_Obj.state is "ready":
# Ensure when the bullet is fired, it is not following movement of the player
bullet_Obj.x_coord = player_Obj.x_coord
render_bullet (player_Obj.x_coord, bullet_Obj.y_coord)
# Logic when a key is released
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
player_Obj.delta_x = 0
# Player Movement Calculation
player_Obj.x_coord += player_Obj.delta_x
# State 1: Make sure the spaceship does not go out of bounds
if player_Obj.x_coord <= 0:
player_Obj.x_coord = 0
elif player_Obj.x_coord >= 736: #800 - 64 (64x64 image)
player_Obj.x_coord = 736
# Enemy Movement (Automatic)
for i in range (0, num_of_enemies):
# State 1: When an enemy reaches y_coord > 240, touching the player
if enemy_objs[i].y_coord > 240:
for j in range (0, num_of_enemies):
enemy_objs[j].y_coord = 20000
render_game_over_text()
break
# Making enemies move
enemy_objs[i].x_coord += enemy_objs[i].delta_x
# State 2: When the enemy touches the border, bounce back
if enemy_objs[i].x_coord <= 0:
enemy_objs[i].delta_x *= -1
enemy_objs[i].y_coord += enemy_objs[i].delta_y
elif enemy_objs[i].x_coord >= 736: # 800 - 64 (64 x 64 image)
enemy_objs[i].delta_x *= -1
enemy_objs[i].y_coord += enemy_objs[i].delta_y
# State 3: When the enemy collide with the bullet
collision = isCollision(enemy_objs[i].x_coord, enemy_objs[i].y_coord, bullet_Obj.x_coord, bullet_Obj.y_coord)
if collision:
bullet_Obj.y_coord = 480
bullet_Obj.state = "ready"
score_value += 1
# If the enemy collided with the bullet, the position changes
enemy_objs[i].x_coord = random.randint(0, 800)
enemy_objs[i].y_coord = random.randint(50, 150)
render_enemy(enemy_objs[i].x_coord, enemy_objs[i].y_coord, i)
# Bullet Movement
# Returning to original position, ready to shoot
if bullet_Obj.y_coord <= 0:
bullet_Obj.y_coord = 480
bullet_Obj.state = "ready"
# When spacebar is hit
if bullet_Obj.state is "fire":
render_bullet (bullet_Obj.x_coord, bullet_Obj.y_coord)
bullet_Obj.y_coord -= bullet_Obj.delta_y
render_player(player_Obj.x_coord, player_Obj.y_coord)
render_show_score(score_text_x, score_text_y)
pygame.display.update()
| true
|
f39ab35ce9b8e5191aef0a39383912dbad549d80
|
Python
|
pombredanne/genutility
|
/genutility/win/file.py
|
UTF-8
| 2,926
| 2.65625
| 3
|
[
"ISC"
] |
permissive
|
from __future__ import generator_stop
from ctypes import FormatError, GetLastError, WinError, byref, sizeof
from errno import EACCES
from cwinsdk.um.handleapi import INVALID_HANDLE_VALUE
from cwinsdk.um.winnt import FILE_SHARE_READ, FILE_SHARE_WRITE
from cwinsdk.windows import ERROR_SHARING_VIOLATION # structs; enums
from cwinsdk.windows import (FILE_ID_DESCRIPTOR, FILE_ID_INFO, FILE_ID_TYPE, FILE_INFO_BY_HANDLE_CLASS, GENERIC_READ,
OPEN_EXISTING, CreateFileW, GetFileInformationByHandleEx, OpenFileById)
from .handle import WindowsHandle, _mode2access
class SharingViolation(OSError):
pass
class WindowsFile(WindowsHandle):
def __init__(self, handle):
# type: (int, ) -> None
WindowsHandle.__init__(self, handle, doclose=True)
@classmethod
def from_path(cls, path, mode="r", shared=False):
# type: (str, str, bool) -> WindowsFile
""" Create a Windows file objects from `path`.
If shared is False: allow write access from other processes.
"""
DesiredAccess = _mode2access[mode]
if shared:
ShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE
else:
ShareMode = FILE_SHARE_READ
SecurityAttributes = None
CreationDisposition = OPEN_EXISTING
FlagsAndAttributes = 0
handle = CreateFileW(path, DesiredAccess, ShareMode, SecurityAttributes,
CreationDisposition, FlagsAndAttributes, None)
if handle == INVALID_HANDLE_VALUE:
winerror = GetLastError()
if winerror == ERROR_SHARING_VIOLATION:
errno = EACCES
strerror = FormatError(winerror)
raise SharingViolation(errno, strerror, path, winerror)
else:
raise WinError()
return cls(handle)
@classmethod
def from_fileid(cls, volume, fileid):
VolumeHint = None # open volume handle here
FileId = FILE_ID_DESCRIPTOR(Size=..., Type=FILE_ID_TYPE.ExtendedFileIdType)
DesiredAccess = GENERIC_READ
ShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE
lpSecurityAttributes = None
FlagsAndAttributes = 0
handle = OpenFileById(VolumeHint, byref(FileId), DesiredAccess, ShareMode,
lpSecurityAttributes, FlagsAndAttributes)
return cls(handle)
def info(self):
FileInformation = FILE_ID_INFO()
GetFileInformationByHandleEx(self.handle, FILE_INFO_BY_HANDLE_CLASS.FileIdInfo,
byref(FileInformation), sizeof(FileInformation))
return FileInformation
def is_open_for_write(path):
# type: (str, ) -> bool
""" Tests if file is already open for write
by trying to open it in exclusive read model.
"""
try:
with WindowsFile.from_path(path, mode="r", shared=False):
return False
except SharingViolation:
return True
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("path")
args = parser.parse_args()
with WindowsFile.from_path(args.path, mode="r", shared=False) as wf:
print("Volume serial number:", wf.info().VolumeSerialNumber)
print("File id:", bytes(wf.info().FileId).hex())
| true
|
f3b3e02f4fee7b239a2016dbbce8f245bcb6a45f
|
Python
|
dmdekf/algo
|
/Algorithem_my/05_01list2/4843_special_sort.py
|
UTF-8
| 464
| 2.765625
| 3
|
[] |
no_license
|
import sys
sys.stdin = open('input.txt')
T = int(input())
for tc in range(1, T+1):
N = int(input())
d = list(map(int, input().split()))
for i in range(N-1):
for j in range(i, N):
if d[i]>d[j]:
d[i] , d[j] = d[j] ,d[i]
result = []
for i in range(N//2):
result.append(d[-1-i])
result.append(d[i])
# result = print(*result)
print('#{}'.format(tc),end=' ')
print(*result[:10])
| true
|
eea007b2fb90200b44dee5ebd5b06ea310c1bd15
|
Python
|
webdev2145/web-scraping
|
/main.py
|
UTF-8
| 492
| 3.046875
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import lxml
import requests
url = 'https://www.empireonline.com/movies/features/best-movies-2/'
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
movies = soup.find_all(name='h3', class_='title')
output = ''
my_movies = movies
# for movie in movies:
my_movies.reverse()
for movie in my_movies:
output += f'{movie.getText()}\n'
with open("100 Best Movies.txt", "w") as movie_handle:
movie_handle.writelines(output)
| true
|
544e4a80e8f3f2b61c9683a0d21c684f387d552e
|
Python
|
SmartTeleMax/iktomi
|
/tests/web/url_template.py
|
UTF-8
| 4,456
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
__all__ = ['UrlTemplateTests']
import unittest
from iktomi.web.url_templates import UrlTemplate, construct_re
from iktomi.web.url_converters import Converter
class UrlTemplateTests(unittest.TestCase):
def test_empty_match(self):
'UrlTemplate match method with empty template'
ut = UrlTemplate('')
self.assertEqual(ut.match(''), ('', {}))
self.assertEqual(ut.match('/'), (None, {}))
def test_match_without_params(self):
'UrlTemplate match method without params'
ut = UrlTemplate('simple')
self.assertEqual(ut.match('simple'), ('simple', {}))
self.assertEqual(ut.match('/simple'), (None, {}))
def test_match_with_params(self):
'UrlTemplate match method with params'
ut = UrlTemplate('/simple/<int:id>')
self.assertEqual(ut.match('/simple/2'), ('/simple/2', {'id':2}))
self.assertEqual(ut.match('/simple'), (None, {}))
self.assertEqual(ut.match('/simple/d'), (None, {}))
def test_match_from_begining_without_params(self):
'UrlTemplate match method without params (from begining of str)'
ut = UrlTemplate('simple', match_whole_str=False)
self.assertEqual(ut.match('simple'), ('simple', {}))
self.assertEqual(ut.match('simple/sdffds'), ('simple', {}))
self.assertEqual(ut.match('/simple'), (None, {}))
self.assertEqual(ut.match('/simple/'), (None, {}))
def test_match_from_begining_with_params(self):
'UrlTemplate match method with params (from begining of str)'
ut = UrlTemplate('/simple/<int:id>', match_whole_str=False)
self.assertEqual(ut.match('/simple/2'), ('/simple/2', {'id':2}))
self.assertEqual(ut.match('/simple/2/sdfsf'), ('/simple/2', {'id':2}))
self.assertEqual(ut.match('/simple'), (None, {}))
self.assertEqual(ut.match('/simple/d'), (None, {}))
self.assertEqual(ut.match('/simple/d/sdfsdf'), (None, {}))
def test_builder_without_params(self):
'UrlTemplate builder method (without params)'
ut = UrlTemplate('/simple')
self.assertEqual(ut(), '/simple')
def test_builder_with_params(self):
'UrlTemplate builder method (with params)'
ut = UrlTemplate('/simple/<int:id>/data')
self.assertEqual(ut(id=2), '/simple/2/data')
def test_only_converter_is_present(self):
ut = UrlTemplate('<int:id>')
self.assertEqual(ut(id=2), '2')
def test_default_converter(self):
ut = UrlTemplate('<message>')
self.assertEqual(ut(message='hello'), 'hello')
def test_redefine_converters(self):
from iktomi.web.url_converters import Integer
class DoubleInt(Integer):
def to_python(self, value, env=None):
return Integer.to_python(self, value, env) * 2
def to_url(self, value):
return str(value // 2)
ut = UrlTemplate('/simple/<int:id>',
converters={'int': DoubleInt})
self.assertEqual(ut(id=2), '/simple/1')
self.assertEqual(ut.match('/simple/1'), ('/simple/1', {'id': 2}))
def test_var_name_with_underscore(self):
ut = UrlTemplate('<message_uid>')
self.assertEqual(ut(message_uid='uid'), 'uid')
def test_trailing_delimiter(self):
self.assertRaises(ValueError, UrlTemplate, '<int:id:>')
def test_empty_param(self):
self.assertRaises(ValueError, UrlTemplate, '<>')
def test_delimiter_only(self):
self.assertRaises(ValueError, UrlTemplate, '<:>')
def test_type_and_delimiter(self):
self.assertRaises(ValueError, UrlTemplate, '<int:>')
def test_empty_type(self):
self.assertRaises(ValueError, UrlTemplate, '<:id>')
def test_no_delimiter(self):
self.assertRaises(ValueError, UrlTemplate, '<any(x,y)slug>')
def test_anonymous(self):
class SimpleConv(Converter):
regex = '.+'
convs = {'string': SimpleConv}
ut = UrlTemplate('/simple/<id>')
regexp = construct_re(ut.template,
converters=convs,
anonymous=True)[0]
self.assertEqual(regexp.pattern, r'^\/simple\/.+')
regexp = construct_re(ut.template,
converters=convs,
anonymous=False)[0]
self.assertEqual(regexp.pattern, r'^\/simple\/(?P<id>.+)')
| true
|
26eee6fde2e9207dcecfaf99de295d2d7e4058b2
|
Python
|
myf-algorithm/Leetcode
|
/PAT_B/1024.科学计数法.py
|
UTF-8
| 1,459
| 2.734375
| 3
|
[] |
no_license
|
a, b = input().split('E')
fu = a[0]
zheng, xiao = a[1:].split('.')
zhi_fu, zhi_shu = b[0], b[1:]
res = ""
if zhi_fu == '-':
if len(zheng) > int(zhi_shu):
zheng_lt = [i for i in zheng]
zheng_lt.insert(len(zheng) - int(zhi_shu), '.')
zheng = ''.join(zheng_lt)
if fu == '-':
res += '-'
res += zheng
res += xiao
elif fu == '+':
res += zheng
res += xiao
elif len(zheng) <= int(zhi_shu):
if fu == '-':
res += '-'
res += '0.'
res += '0' * (int(zhi_shu) - len(zheng))
res += zheng
res += xiao
elif fu == '+':
res += '0.'
res += '0' * (int(zhi_shu) - len(zheng))
res += zheng
res += xiao
elif zhi_fu == '+':
if len(xiao) > int(zhi_shu):
xiao_lt = [i for i in xiao]
xiao_lt.insert(int(zhi_shu), '.')
xiao = ''.join(xiao_lt)
if fu == '-':
res += '-'
res += zheng
res += xiao
elif fu == '+':
res += zheng
res += xiao
elif len(xiao) <= int(zhi_shu):
if fu == '-':
res += '-'
res += zheng
res += xiao
res += '0' * (int(zhi_shu) - len(xiao))
elif fu == '+':
res += zheng
res += xiao
res += '0' * (int(zhi_shu) - len(xiao))
print(res)
| true
|
3a7c5fa60dfffe8aacc8aabf631a3ffbb6e96022
|
Python
|
anastasiev/Arc2
|
/services/matchesService.py
|
UTF-8
| 1,784
| 3.1875
| 3
|
[] |
no_license
|
from models.model import Match
from views.view import ConsoleView
class MatchesService(object):
"""
Class implements actions with matches
"""
def getMatchByCountry(self, matches, countryName):
"""
Find all matches in selected country
:param matches:
:param countryName:
:return:
"""
res = []
for m in matches:
if m.country == countryName:
res.append(m)
return res
def getMatchByTeam(self, matches, teamName):
"""
Find all matches in selected team
:param matches:
:param teamName:
:return:
"""
res = []
for m in matches:
if m.team1 == teamName or m.team2 == teamName:
res.append(m)
return res
def addMatch(self, matches):
"""
Function add match to list of matches
:param matches:
:return:
"""
view = ConsoleView()
view.printMessage("Enter country: ")
country = view.inputFromConsole()
view.printMessage("Enter first team name: ")
team1 = view.inputFromConsole()
view.printMessage("Enter second team name: ")
team2 = view.inputFromConsole()
view.printMessage("Enter first team score: ")
res1 = view.inputFromConsole()
view.printMessage("Enter second team score: ")
res2 = view.inputFromConsole()
view.printMessage("Enter day: ")
day = view.inputFromConsole()
view.printMessage("Enter month: ")
month = view.inputFromConsole()
view.printMessage("Enter year: ")
year = view.inputFromConsole()
matches.append(Match(country, team1, team2, res1, res2, [day, month, year]))
| true
|
80418917954021802747bfb4793e83ffa93ddb7d
|
Python
|
Krisz-tina/MouseDynamics
|
/table_generation/measure_time.py
|
UTF-8
| 2,692
| 2.890625
| 3
|
[] |
no_license
|
import csv
from utils import settings
def main(file_name):
with open(file_name, 'r') as csv_file:
data_reader = csv.reader(csv_file, delimiter=',')
user_ids = ['7', '9', '12', '15', '16', '20', '21', '23', '29', '35']
row = next(data_reader)
row = next(data_reader)
data = []
k = 10
for i in range(0, len(user_ids)):
user_id = user_ids[i]
counter = 0
elapsed_time = 0
counter_k = 0
avg = 0
avgavg = 0
while row[-1] == user_id:
if counter_k < k:
avg += float(row[0])
# print(row[0])
else:
counter_k = 0
# avg /= k
# print('avg ' + str(avg))
avgavg += avg
avg = 0
counter += 1
counter_k += 1
# counter += 1
# elapsed_time += float(row[0])
try:
row = next(data_reader)
except StopIteration:
data_row = [user_id, counter, elapsed_time / counter]
data.append(data_row)
print('USER' + str(user_id))
# print(counter)
# print(avgavg)
print(avgavg / counter)
return data
print('USER' + str(user_id))
# print(counter)
# print(avgavg)
print(avgavg / counter)
data_row = [user_id, counter, elapsed_time / counter]
data.append(data_row)
return data
def main2(file_name):
with open(file_name, 'r') as csv_file:
data_reader = csv.reader(csv_file, delimiter=',')
user_ids = ['7', '9', '12', '15', '16', '20', '21', '23', '29', '35']
row = next(data_reader)
row = next(data_reader)
data = []
for i in range(0, len(user_ids)):
user_id = user_ids[i]
sum = 0
while row[-1] == user_id:
sum += float(row[0])
try:
row = next(data_reader)
except StopIteration:
data_row = [user_id, sum]
data.append(data_row)
print('USER' + str(user_id))
print(sum)
return data
print('USER' + str(user_id))
print(sum)
data_row = [user_id, sum]
data.append(data_row)
return data
main2('D:/Sapientia EMTE/final exam/softwares/MouseDynamics/output/Book1.csv')
| true
|
e26adeef007d3f7c873484ebaee55341bde180f9
|
Python
|
arman2766/Stackoverflow-Survey-2019
|
/job_satisfaction.py
|
UTF-8
| 3,323
| 3
| 3
|
[] |
no_license
|
import csv
from collections import defaultdict, Counter
with open('developer_survey_2019/survey_results_public.csv') as f:
csv_reader = csv.DictReader(f)
total = 0
satisfaction_info = {}
sat_mapper= {
'Very dissatisfied' : 0,
'Slightly dissatisfied' : 0.25,
'Neither satisfied nor dissatisfied' : 0.5,
'Slightly satisfied' : 0.75,
'Very satisfied' : 1,
}
for lines in csv_reader:
jobSats = lines['JobSat']
carSats = lines['CareerSat']
countries = lines['Country']
genders = lines['Gender']
trans = lines['Trans']
total=0
gender_type={'Man','Woman','Others'}
for country in countries:
if countries == 'NA':
continue
satisfaction_info.setdefault(countries, {
'sat_m': 0,
'sat_w': 0,
'sat_o': 0,
'total_m': 0,
'total_w': 0,
'total_o': 0
})
if genders == 'Man':
if (jobSats == 'NA') | (carSats == 'NA' ):
continue
temp = sat_mapper[jobSats] + sat_mapper[carSats]
if(temp > 1.0):
satisfaction_info[countries]['sat_m']+=1
satisfaction_info[countries]['total_m']+=1
else:
satisfaction_info[countries]['total_m']+=1
elif genders == 'Woman':
if (jobSats == 'NA') | (carSats == 'NA' ):
continue
temp = sat_mapper[jobSats] + sat_mapper[carSats]
if(temp > 1.0):
satisfaction_info[countries]['sat_w']+=1
satisfaction_info[countries]['total_w']+=1
else:
satisfaction_info[countries]['total_w']+=1
else:
if (jobSats == 'NA') | (carSats == 'NA' ):
continue
temp = sat_mapper[jobSats] + sat_mapper[carSats]
if(temp > 1.0):
satisfaction_info[countries]['sat_o']+=1
satisfaction_info[countries]['total_o']+=1
else:
satisfaction_info[countries]['total_o']+=1
for country,info in satisfaction_info.items():
print(f'{country}:')
print('\tMan :')
if satisfaction_info[country]['total_m'] == 0:
print(f'\tNo result!')
else:
score = round((satisfaction_info[country]['sat_m']/satisfaction_info[country]['total_m'])*100,1)
print(f'\t{score}%')
print('\tWoman :')
if satisfaction_info[country]['total_w'] == 0:
print(f'\tNo result!')
else:
score = round((satisfaction_info[country]['sat_w']/satisfaction_info[country]['total_w'])*100,1)
print(f'\t{score}%')
print('\tOthers :')
if satisfaction_info[country]['total_o'] == 0:
print(f'\tNo result!')
else:
score = round((satisfaction_info[country]['sat_o']/satisfaction_info[country]['total_o'])*100,1)
print(f'\t{score}%')
print('\n')
| true
|
9330ae16039857ea32213ec0fa77691b8c65120b
|
Python
|
CurtisJohansen/time-series-exercises
|
/acquire.py
|
UTF-8
| 3,226
| 3.1875
| 3
|
[] |
no_license
|
#################### IMPORTS ####################
import pandas as pd
import numpy as np
import requests
import os
######################## ACQUIRE FUNCTIONS #################################
def get_items():
'''
returns dataframe of all items either through system cache or via an api
'''
if os.path.isfile('items.csv'):
df = pd.read_csv('items.csv')
return df
else:
items_list = []
response = requests.get(base_url+'/api/v1/items')
data = response.json()
n = data['payload']['max_page']
for i in range(1,n+1):
url = base_url+'/api/v1/items?page='+str(i)
response = requests.get(url)
data = response.json()
page_items = data['payload']['items']
items_list += page_items
df = pd.DataFrame(items_list)
df.to_csv('items.csv', index=False)
return df
#################### GERMANY ENERGY FUNCTION #####################
def get_germany():
'''
This function creates a csv of germany energy data if one does not exist
if one already exists, it uses the existing csv
and brings it into pandas as dataframe
'''
if os.path.isfile('opsd_germany_daily.csv'):
df = pd.read_csv('opsd_germany_daily.csv', index_col=0)
else:
url = 'https://raw.githubusercontent.com/jenfly/opsd/master/opsd_germany_daily.csv'
df = pd.read_csv(url)
df.to_csv('opsd_germany_daily.csv')
return df
############################# ACQUIRE DATA FUNCTION #########################
def get_df(name):
"""
This function takes in the string 'items', 'stores', or 'sales' and
returns a df containing all pages and creates a .csv file for future use.
"""
base_url = 'https://python.zgulde.net'
api_url = base_url + '/api/v1/'
response = requests.get(api_url + name)
data = response.json()
file_name=(name+'.csv')
if os.path.isfile(file_name):
return pd.read_csv(name+'.csv')
else:
# create list from 1st page
my_list = data['payload'][name]
# loop through the pages and add to list
while data['payload']['next_page'] != None:
response = requests.get(base_url + data['payload']['next_page'])
data = response.json()
my_list.extend(data['payload'][name])
# Create DataFrame from list
df = pd.DataFrame(my_list)
# Write DataFrame to csv file for future use
df.to_csv(name + '.csv')
return df
############################# MERGE DATA FUNCTION #########################
def combine_df(items, sales, stores):
'''
This functions takes in the three dataframes, items, sales, and stores and merges them.
'''
# rename columns to have a primary key
items.rename(columns={'item_id':'item'}, inplace=True)
stores.rename(columns={'store_id':'store'}, inplace=True)
# merge the dataframes together
items_sales = items.merge(sales, how='right', on='item')
df = items_sales.merge(stores, how='left', on='store')
return df
| true
|
117085f3cea213a6349a50bd6f617d8191a4276e
|
Python
|
goddessofpom/ife
|
/practice/BinTree.py
|
UTF-8
| 811
| 3.46875
| 3
|
[] |
no_license
|
class BinTNode:
def __init__(self, dat, left=None, right=None):
self.data = dat
self.left = left
self.right = right
def count_BinTNodes(t):
if t is None:
return 0
else:
return 1 + count_BinTNodes(t.left) + count_BinTNodes(t.right)
def sum_BinTNodes(t):
if t is None:
return 0
else:
return t.data + sum_BinTNodes(t.left) + sum_BinTNodes(t.right)
# 宽度优先遍历二叉树
def levelorder(t, proc):
qu = Queue()
qu.enqueue(t)
while not qu.is_empty():
n = qu.dequeue()
if n is None:
continue
else:
qu.enqueue(n.left)
qu.enqueue(n.right)
proc(n.data)
# 非递归先根序遍历
def preorder_nonrec(t, proc):
s = Stack()
while t is not None or not s.is_empty():
while t is not None:
proc(t.data)
s.push(t.right)
yield t.data
t = t.left
t = s.pop()
| true
|
5730e706415e336a99f953bc43478f4a9367a0cd
|
Python
|
bineeshpc/data_science
|
/tutorials/lstm/summarize.py
|
UTF-8
| 496
| 2.71875
| 3
|
[] |
no_license
|
from pandas import DataFrame
from pandas import read_csv
from matplotlib import pyplot
# load results into a dataframe
filenames = ['experiment_timesteps_1.csv', 'experiment_timesteps_2.csv',
'experiment_timesteps_3.csv', 'experiment_timesteps_4.csv', 'experiment_timesteps_5.csv']
results = DataFrame()
for name in filenames:
results[name[11:-4]] = read_csv(name, header=0)
# describe all results
print(results.describe())
# box and whisker plot
results.boxplot()
pyplot.show()
| true
|
6cdb03626102efa1e76057d89cd19dce3bcbefda
|
Python
|
nirkog/AI-ML-Stuff
|
/Insertion Sort/main.py
|
UTF-8
| 639
| 3.921875
| 4
|
[] |
no_license
|
def Swap(items, i, j):
temp = items[i]
items[i] = items[j]
items[j] = temp
def InsertionSort(items):
sortedItems = []
i = 0
for item in items:
sortedItems.append(item)
j = len(sortedItems) - 2
itemIndex = i
while j >= 0:
if sortedItems[j] > item:
Swap(sortedItems, itemIndex, j)
itemIndex -= 1
else:
break
j -= 1
i += 1
return sortedItems
def main():
items = [3, 1, -923, 87, 3, 6, 28, -21]
items = InsertionSort(items)
print(items)
if __name__ == '__main__':
main()
| true
|
4c9eb037c2027cda98e45aa44aa2c4426ba79ce9
|
Python
|
Costadoat/Informatique
|
/TP/TP06 Algorithmes dichotomiques/TP06.py
|
UTF-8
| 1,620
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 15:19:30 2021
@author: jg
"""
from math import atan,exp
import matplotlib.pyplot as plt
L=[1,7.6,8,10.1]
a=7
def recherche_naive(L,a):
i=0
while L[i]<a:
i=i+1
if L[i]==a:
return(i)
else:
return(False)
#print(recherche_naive(L,a))
def dichotomie(L, a):
debut = 0
fin = len(L) - 1
while debut <= fin:
m = (debut+fin) // 2
if L[m] == a:
return m
elif L[m] < a:
debut = m + 1
else:
fin = m - 1
return False
#print(dichotomie(L,a))
def dichotomie_comparatif(L, a):
debut = 0
fin = len(L) - 1
compteur=0
while debut <= fin:
compteur=compteur+1
m = (debut+fin) // 2
if L[m] == a:
return m,compteur
elif L[m] < a:
debut = m + 1
else:
fin = m - 1
return False,compteur
L=[1,7.6,8,10,12,13,14,15,16]
a=15
#print(dichotomie_comparatif(L,a))
def dicho_zero(L):
debut = 0
fin = len(L) - 1
while debut <= fin:
m = (debut+fin) // 2
if L[m] == 0:
return m
elif L[m] < 0:
debut = m + 1
else:
fin = m - 1
return(m)
L=[-2,-1,0.0001,2,3]
def f(x):
return(exp(x)+x)
X=[]
x=-1
for i in range(1000):
x=x+2/1000.
X.append(x)
L=[]
for x in X:
L.append(f(x))
print(dicho_zero(Y))
print(X[dicho_zero(L)])
import matplotlib.pyplot as plt
plt.plot(X,L)
plt.plot(X,[0]*len(X))
plt.savefig('graphe_f.pdf')
plt.show()
| true
|
4d9d7c2fdf11367ba45eff32d6134e6ceabbd064
|
Python
|
zauberzeug/nicegui
|
/nicegui/elements/splitter.py
|
UTF-8
| 1,818
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
from typing import Any, Callable, Optional, Tuple
from .mixins.disableable_element import DisableableElement
from .mixins.value_element import ValueElement
class Splitter(ValueElement, DisableableElement):
def __init__(self, *,
horizontal: Optional[bool] = False,
reverse: Optional[bool] = False,
limits: Optional[Tuple[float, float]] = (0, 100),
value: Optional[float] = 50,
on_change: Optional[Callable[..., Any]] = None,
) -> None:
"""Splitter
The `ui.splitter` element divides the screen space into resizable sections,
allowing for flexible and responsive layouts in your application.
Based on Quasar's Splitter component:
`Splitter <https://quasar.dev/vue-components/splitter>`_
It provides three customizable slots, ``before``, ``after``, and ``separator``,
which can be used to embed other elements within the splitter.
:param horizontal: Whether to split horizontally instead of vertically
:param limits: Two numbers representing the minimum and maximum split size of the two panels
:param value: Size of the first panel (or second if using reverse)
:param reverse: Whether to apply the model size to the second panel instead of the first
:param on_change: callback which is invoked when the user releases the splitter
"""
super().__init__(tag='q-splitter', value=value, on_value_change=on_change, throttle=0.05)
self._props['horizontal'] = horizontal
self._props['limits'] = limits
self._props['reverse'] = reverse
self.before = self.add_slot('before')
self.after = self.add_slot('after')
self.separator = self.add_slot('separator')
| true
|
46735b85ae9d85d92aafff1e09b0facc48b358ef
|
Python
|
Hassibayub/Apache-Spark
|
/fakeFrindsSpark.py
|
UTF-8
| 817
| 2.9375
| 3
|
[] |
no_license
|
from pyspark import SparkConf, SparkContext
from time import perf_counter
start = perf_counter()
conf = SparkConf().setMaster("local").setAppName("FakeFriends")
sc = SparkContext(conf= conf)
raw = sc.textFile(r"G:\Shared drives\Unlimited\Python Scripts\Apache Spark\fakefriends.csv")
datapair = raw.map(lambda x: ( int(x.split(",")[2]), int(x.split(",")[3]) ))
# datapaint (age, friends)
aggData = datapair.mapValues(lambda x: (x,1)).reduceByKey(lambda x,y : (x[0] + y[0], x[1]+y[1] ))
# aggData (eachAge , (totalfriends, counter))
avgData = aggData.mapValues(lambda x: x[0]/x[1])
# avgData (age, avgFriends)
dataCollected = avgData.collect()
for age, avgFrinds in dataCollected:
print("At Age {}, Avg Friends {}".format(age, int(avgFrinds)))
print("Time elapsed: ", perf_counter() - start, " secs")
| true
|
ea7425df8a9e3ed3158c3810bda1cf58be89b8ac
|
Python
|
goodmorningdata/nps
|
/nps_viz_size.py
|
UTF-8
| 11,721
| 3.375
| 3
|
[] |
no_license
|
'''
This script creates a map of the United States with NPS sites marked
with a circle corresponding to the site's size. The command line
argument, "designation", set by the flag, "-d", allows the user to
specify the set of park sites to add to the map. If no parameter is
specified, all NPS site locations are added to the map.
The following visualizations are created:
1) A Folium map with park location mapped as an icon. Each icon has as
a clickable pop that tells the park name and links to the nps.gov
page for the park.
- Output file = nps_parks_map_location_{designation}.html
2) A table of park size in order of size in descending order. first.
- Output files = nps_parks_sorted_by_visits_{designation}.xlsx,
nps_parks_sorted_by_visits_{designation}.html.
3) Plots including:
Plot #1 - Park size histogram.
NOT COMPLETE - Plot #2 - Average designation park size bar plot.
Plot #3 - Total park area per state pie chart.
Required Libraries
------------------
math, pandas, folium, matplotlib
Dependencies
------------
1) Run the script, nps_create_master_df.py to create the file,
nps_parks_master_df.xlsx.
'''
from nps_shared import *
import math
import pandas as pd
import numpy as np
import folium
import matplotlib.pyplot as plt
def create_size_map(df, designation):
'''
This function adds a circle marker for each park in the parameter
dataframe to the map. The circle size corresponds to the area of
the park. The radius of the circle was calculated by taking the
area of the park in square meters, dividing it by pi and then taking
the square root.
These markers provide the park name and park size in square miles
as a tooltip. A tooltip instead of a popup is used for this map
because the popup was less sensitive for the circle markers.
Parameters
----------
map : Folium map object
Folium map to add circle markers to.
df : Pandas DataFrame
DataFrame of all park visitors to add to the map.
Returns
-------
None
'''
# Create blank map.
center_lower_48 = [39.833333, -98.583333]
map = folium.Map(location = center_lower_48,
zoom_start = 3,
control_scale = True,
tiles = 'Stamen Terrain')
# Add park size circles to map.
for _, row in (df[~df.lat.isnull()]
.sort_values(by='designation', ascending=False).iterrows()):
# Create tooltip with park size.
tooltip = (row.park_name.replace("'", r"\'")
+ ', {:,.0f} acres'.format(row.gross_area_acres)
+ ' ({:,.0f}'.format(row.gross_area_square_miles)
+ ' square miles)')
# Add marker to map.
folium.Circle(
radius=math.sqrt(row.gross_area_square_meters/math.pi),
location=[row.lat, row.long],
tooltip=tooltip,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(map)
# Save map to file.
map.save(set_filename('size_map', 'html', designation))
def plot_park_size_histogram(df, designation):
'''
Generate a park size histogram.
Parameters
----------
df : Pandas DataFrame
DataFrame of park visit data to export.
designation : str
Designation of parks in the dataframe.
Returns
-------
None
'''
# List of park acreage in millions of acrea.
x_list = (df.gross_area_acres.values)/1e6
# Mean and median text box.
mean = df.gross_area_acres.mean()/1e6
median = np.median(df.gross_area_acres)/1e6
text_string = '$\mu=%.2f$\n$\mathrm{median}=%.2f$'%(mean, median)
# matplotlib.patch.Patch properties.
props = dict(facecolor='white', alpha=0.5)
# Create park size histogram.
fig, ax = plt.subplots()
ax.hist(x_list, bins=list(range(math.ceil(max(x_list)) + 1)), alpha=0.8)
ax.text(0.96, 0.95, text_string,
transform=ax.transAxes,
fontsize=10,
verticalalignment='top', horizontalalignment='right',
bbox=props)
plt.xlabel("Millions of acres")
plt.ylabel("Number of parks")
plt.title(set_title("Park size histogram 2018", designation))
plt.show()
# Save plot to file.
fig.savefig(set_filename('size_histogram', 'png', designation))
def plot_avg_size_vs_designation(df, designation):
'''
Calculate the average park size within each designation and plot
as a bar chart.
Parameters
----------
df : Pandas DataFrame
DataFrame of park visit data to export.
designation : str
Designation of parks in the dataframe.
Returns
-------
None
'''
if designation == "All Parks":
df = (df[['designation', 'gross_area_acres']]
.groupby(by='designation').mean())
df = df.sort_values(by='designation')
# Create horizontal bar plot of number of parks in each state.
fig = plt.figure(figsize=(8,6))
plt.barh(df.index, df.gross_area_acres/1e6, alpha=0.8)
plt.title(set_title("Average park size by designation",
designation))
plt.xlabel("Millions of acres")
plt.yticks(fontsize=8)
plt.tight_layout()
plt.show()
# Save plot to file.
fig.savefig(set_filename('size_avg_size_vs_designation',
'png', designation))
else:
print("** Warning ** ")
print("Average park size vs. designation plot only makes sense for "
"all parks. You entered designation = {}. If you would like to "
"see the average park size vs. designation plot, please run the "
"script again with no designation command line parameter."
"Ex: 'python3 nps_viz_size.py'".format(designation))
print("****\n")
def chart_total_park_area_per_state(df, designation):
'''
This function plots park area in each state as a percent of total
U.S. park area as a pie chart. The first 6 states are given their
own pie wedge and the remaining states grouped as "other" for
readability.
Parameters
----------
df : Pandas DataFrame
DataFrame of park size data.
designation : str
Designation of parks in the dataframe.
Returns
-------
None
'''
# Total area of all parks in the dataframe.
total_area = df.gross_area_acres.sum()
# Group and sum area by state.
df_state_areas = (df[['main_state', 'gross_area_acres']]
.groupby(['main_state'])
.sum()
.sort_values('gross_area_acres', ascending=False))
# Split into top six and "Other".
df_plot = df_state_areas[:6].copy()
df_plot.loc['Other'] = [df_state_areas['gross_area_acres'][6:].sum()]
# Pie chart.
fig, ax = plt.subplots()
ax.pie(df_plot.gross_area_acres, labels=df_plot.index,
startangle=90, autopct='%1.1f%%', shadow=False)
ax.axis('equal')
plt.suptitle(set_title("Percent of total U.S. park area by state",
designation), size=16)
plt.title('Total U.S. park area ({}) is {:,.0f} acres'
.format(designation.lower(), total_area))
plt.tight_layout(rect=[0, 0.05, 1, 0.95])
plt.show()
# Save plot to file.
fig.savefig(set_filename('size_total_park_area_by_state',
'png', designation))
def plot_park_area_pct_of_state(df, designation):
'''
This function plots park area percent of total state area for each
state as a horizontal bar plot. If a state does not have any parks
in the designation parameter category, it will not be included in
the plot.
Parameters
----------
df : Pandas DataFrame
DataFrame of park size data.
designation : str
Designation of parks in the dataframe.
Returns
-------
None
'''
# Get state areas from file.
df_state = pd.read_csv('_reference_data/census_state_area.csv',
index_col='state_code')
# Group and sum area by state.
df_park_area = (df[['main_state', 'gross_area_acres']]
.groupby(['main_state'])
.sum()
.sort_values('gross_area_acres', ascending=False))
# Join park area and state area dataframes and calculate percent.
df_park_area = df_park_area.join(df_state, how='left')
df_park_area['pct_area'] = (df_park_area.gross_area_acres /
df_park_area.area_acres * 100)
df_park_area.sort_values(by=['pct_area'], ascending=False, inplace=True)
# Plot park area percent of state area by state.
fig = plt.figure(figsize=(8,6))
plt.barh(df_park_area.index, df_park_area.pct_area, alpha=0.8)
plt.title(set_title("Park area as a percent of total state area",
designation), size=16)
plt.xlabel("Percent of total state area")
plt.yticks(fontsize=8)
plt.tight_layout()
plt.show()
# Save plot to file.
fig.savefig(set_filename('size_park_area_pct_of_state',
'png', designation))
def output_size_data_to_tables(df, designation):
'''
This function outputs the park size data as a table to both an
Excel spreadsheet and an html file. The data is sorted by size,
largest first.
Parameters
----------
df : Pandas DataFrame
DataFrame of park visit data to export.
designation : str
Designation of parks in the dataframe.
Returns
-------
None
'''
df = df.round(0)
df_export = (df[['park_name', 'gross_area_acres',
'gross_area_square_miles']]
.sort_values(by=['gross_area_acres'], ascending=False)
.reset_index(drop=True))
df_export.index += 1
df_export['gross_area_square_miles'].replace(
to_replace=0, value="<1", regex=True, inplace=True)
export_cols = {'park_name': 'Park Name',
'gross_area_acres': 'Size (acres)',
'gross_area_square_miles': 'Size (square miles)'}
df_export = df_export.rename(columns=export_cols)
filename = set_filename('size_parks_sorted_by_size',
designation=designation)
df_export.to_excel(filename + 'xlsx', index=True)
df_export.to_html(filename + 'html',
justify='left',
classes='table-park-list',
float_format=lambda x: '{:,.0f}'.format(x))
def main():
df_park, designation = get_parks_df(warning=['location', 'size'])
# Remove parks missing size data from the dataframe.
df_park = df_park[~df_park.gross_area_acres.isnull()]
# Print statistical info for dataframe.
print(df_park[['gross_area_acres', 'gross_area_square_miles',
'gross_area_square_meters']].describe(), '\n')
# Map #1 - Plot park locations with size circle and save map to html file.
create_size_map(df_park, designation)
# Plot #1 - Histogram - park size
plot_park_size_histogram(df_park, designation)
# NOT COMPLETE - Plot #2 - Average designation park size bar plot.
#plot_avg_size_vs_designation(df_park, designation)
# Plot #3 - Total park area per state pie chart.
chart_total_park_area_per_state(df_park, designation)
# Plot #4 - Park area as a percent of state area.
plot_park_area_pct_of_state(df_park, designation)
# Save park size data as an Excel spreadsheet and an html table.
output_size_data_to_tables(df_park, designation)
if __name__ == '__main__':
main()
| true
|
97891da749a30918c00af86d0b7025a0bccd6016
|
Python
|
hurtb777/pyBrainNetSim
|
/examples/Simulate_Sensor-Movers.py
|
UTF-8
| 1,773
| 2.5625
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import pyBrainNetSim.generators.network as rnd
import pyBrainNetSim.models.world as world
import pyBrainNetSim.drawing.viewers as vis
import pyBrainNetSim.simulation.evolution as evo
mpl.rcParams['figure.figsize'] = (15, 5)
population_size = 10
time_steps = 50
my_environment = world.Environment(origin=(-10, -10), max_point=(10, 10), field_permeability=1.)
food = world.Attractor(environment=my_environment, position=(3, 3), strength=10.) # add "food"
ipd = rnd.InternalNodeProperties(**{"number_neurons": 16,
'excitatory_to_inhibitory':.7,
'spontaneity': 0.05,
'inactive_period': 1.})
spd = rnd.SensoryNodeProperties()
mpd = rnd.MotorNodeProperties()
wpd = rnd.EdgeProperties()
sm_prop_dist = rnd.SensorMoverProperties(ipd, spd, mpd, wpd) # default set of distributions of the different variables in the mouse
smp = evo.SensorMoverPopulation(my_environment, sm_prop_dist, initial_population_size=population_size)
print "Created %d individuals" % population_size
smp.sim_time_steps(max_iter=time_steps)
f1, ax1 = plt.subplots(1,3)
axs = smp.draw_top_networkx(top=3)
f2, ax1 = plt.subplots(1,3)
axs2 = smp.draw_top_trajectories(top=3)
top_individual = smp.individuals[smp.top_efficiencies(top=1, at_time=-1)[0]] # get top individual at the current time (-1)
f2, axarr = plt.subplots(1,3)
_ax = vis.pcolormesh_edges(top_individual, at_time=-1, ax=axarr[0])
_ax = vis.pcolormesh_edges(top_individual, at_time=0, ax=axarr[1])
_ax = vis.pcolormesh_edge_changes(top_individual, initial_time=0, final_time=-1, as_pct=True, ax=axarr[2])
plt.show()
| true
|
2fa58c8c867b98382dd21d2886b39ec55cc9fc13
|
Python
|
AxelPuig/facerecognition
|
/utils_cv/__init__.py
|
UTF-8
| 865
| 2.828125
| 3
|
[] |
no_license
|
import cv2
import numpy as np
def load_and_display_image(filename):
img = cv2.imread(filename, cv2.IMREAD_COLOR)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def process_image(filename):
img = cv2.imread(filename, cv2.IMREAD_COLOR)
print(img.shape)
longueur, largeur, _ = img.shape
matricederotation = cv2.getRotationMatrix2D((longueur, largeur / 2), 90, 1)
dst = cv2.warpAffine(img, matricederotation, (longueur, largeur))
cv2.imshow('image', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
def translation_image(filename):
img = cv2.imread(filename, cv2.IMREAD_COLOR)
hauteur, longueur, _ = img.shape
M = np.float32([[1, 0, 100], [0, 1, 50]])
dst = cv2.warpAffine(img, M, (longueur + 100, hauteur + 50))
cv2.imshow('img', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
34173df587d389e2bdd3c41be785ff6568493bbf
|
Python
|
cosmos-sajal/ds_algo
|
/strings/anagram.py
|
UTF-8
| 497
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
# https://leetcode.com/problems/valid-anagram/
def get_initialised_freq_list():
return [0] * 26
def populate_freq_list(str):
freq_list = get_initialised_freq_list()
for i in range(len(str)):
freq_list[ord(str[i]) - ord('a')] += 1
return freq_list
def is_anagram(str1, str2):
freq_list_1, freq_list_2 = populate_freq_list(
str1), populate_freq_list(str2)
return freq_list_1 == freq_list_2
def main():
print(is_anagram('abc', 'bcaa'))
main()
| true
|
a8aedb9f2b5463d43fd9ed49c4f142cdae4d2c7e
|
Python
|
lovetyagi-17/Hacktoberfest_2021-1
|
/rock_paper_scissor.py
|
UTF-8
| 2,660
| 4.125
| 4
|
[] |
no_license
|
"""
Uncle Fyodor, Matroskin the Cat and Sharic the Dog live their simple but happy lives in Prostokvashino.
Sometimes they receive parcels from Uncle Fyodor’s parents and sometimes from anonymous benefactors, in which case it is hard to determine to which one of them the package has been sent.
A photographic rifle is obviously for Sharic who loves hunting and fish is for Matroskin, but for whom was a new video game console meant? Every one of the three friends claimed that the present is for him and nearly quarreled.
Uncle Fyodor had an idea how to solve the problem justly: they should suppose that the console was sent to all three of them and play it in turns. Everybody got relieved but then yet another burning problem popped up — who will play first?
This time Matroskin came up with a brilliant solution, suggesting the most fair way to find it out: play rock-paper-scissors together.
The rules of the game are very simple. On the count of three every player shows a combination with his hand (or paw). The combination corresponds to one of three things: a rock, scissors or paper.
Some of the gestures win over some other ones according to well-known rules: the rock breaks the scissors, the scissors cut the paper, and the paper gets wrapped over the stone. Usually there are two players.
Yet there are three friends, that’s why they decided to choose the winner like that: If someone shows the gesture that wins over the other two players, then that player wins. Otherwise, another game round is required.
Write a program that will determine the winner by the gestures they have shown.
Input
-----
The first input line contains the name of the gesture that Uncle Fyodor showed, the second line shows which gesture Matroskin showed and the third line shows Sharic’s gesture.
Output
------
Print "Fyodor" (without quotes) if Uncle Fyodor wins. Print "Matroskin" if Matroskin wins and "Sharic" if Sharic wins. If it is impossible to find the winner, print "?".
Example 1
input
-----
rock
rock
rock
output
-----
?
Example 2
input
-----
paper
rock
rock
output
-----
Fyodor
Example 3
input
-----
scissors
rock
rock
output
------
?
Example 4
input
-----
scissors
paper
rock
output
------
?
"""
#Code:
F = input()
M = input()
S = input()
player = {F:'Fyodor', M:'Matroskin', S:'Sharic'}
possible_throws = [['paper', 'rock', 'rock'], ['rock', 'scissors', 'scissors'], ['paper', 'paper', 'scissors']]
possible_wins = ['paper', 'rock', 'scissors']
packets = [F,M,S]
packets.sort()
if packets in possible_throws:
winner = possible_wins[possible_throws.index(packets)]
print(player[winner])
else :
print("?")
| true
|
d80a1746932c739aaf97a8c8b1141e226dfec04d
|
Python
|
jngmk/Training
|
/Python/BAEKJOON/14503 로봇청소기/14503.py
|
UTF-8
| 824
| 2.546875
| 3
|
[] |
no_license
|
def cleaning():
global cleaned_space, d
a, b = X, Y
arr[a][b] = 2
while True:
flag = False
for di in range(3, -1, -1):
vd = (d+di) % 4
va, vb = a+da[vd], b+db[vd]
if not arr[va][vb]:
arr[va][vb] = 2
a, b = va, vb
cleaned_space += 1
d = vd
flag = True
break
if not flag:
if arr[a+da[(d+2)%4]][b+db[(d+2)%4]] == 1:
return
a, b = a+da[(d+2)%4], b+db[(d+2)%4]
da = [-1, 0, 1, 0]
db = [0, 1, 0, -1]
N, M = map(int, input().split())
X, Y, d = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
visited = [[0] * M for _ in range(N)]
cleaned_space = 1
cleaning()
print(cleaned_space)
| true
|
ca1fa0a55101b3a6524d6fc6ea3da86c2a96ed2a
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02757/s740152883.py
|
UTF-8
| 533
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import sys
from collections import Counter
input = lambda: sys.stdin.readline().strip()
n, p = [int(x) for x in input().split()]
s = input()
ans = 0
if p == 2 or p == 5:
allowed_digits = '24680' if p == 2 else '50'
for i, c in enumerate(s, start=1):
if c in allowed_digits:
ans += i
else:
count = Counter({0: 1})
x, e = 0, 1
for c in reversed(s):
x = (x + (ord(c) - ord('0')) * e) % p
e = e * 10 % p
ans += count[x]
count[x] += 1
print(ans)
| true
|
34a50dc9f0a26c3d1b4941587caba49c79f42185
|
Python
|
TheTimmoth/wireui
|
/wireui/library/typedefs/tables.py
|
UTF-8
| 10,264
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
# tables.py
# Table for wireguard
# Author: Tim Schlottmann
from typing import Union
from .exceptions import PeerDoesNotExistError
from .result import MESSAGE_LEVEL
from .result import Message
from .result import MessageContent
from .result import Result
class Table():
""" n x m table """
def __init__(self,
n: int,
m: int,
row_names: list = [],
column_names: list = []):
if len(row_names) != n and row_names != []:
raise ValueError("Dimension mismatch: len(row_names) != n")
if len(column_names) != m and column_names != []:
raise ValueError("Dimension mismatch: len(column_names) != m")
self.n = n
self.m = m
# Create n x m matrix
self.content = [None] * self.n
for i in range(self.n):
self.content[i] = [None] * self.m
self.row_names = row_names
self.column_names = column_names
# Get parameters from names
if self.row_names != []:
self.row_names_lengths = [0] * n
self.row_names_max_length = 0
for i in range(n):
self.row_names_lengths[i] = len(self.row_names[i])
if self.row_names_lengths[i] > self.row_names_max_length:
self.row_names_max_length = self.row_names_lengths[i]
if self.column_names != []:
self.column_names_lengths = [0] * m
self.column_names_max_length = 0
for i in range(m):
self.column_names_lengths[i] = len(self.column_names[i])
if self.column_names_lengths[i] > self.column_names_max_length:
self.column_names_max_length = self.column_names_lengths[i]
def __repr__(self):
return f"{type(self).__name__}({self.n}, {self.m}, {self.row_names}, {self.column_names})"
def __str__(self):
s = ""
# Print column headings (only if there are any)
if len(self.column_names) > 0:
for i in range(self.row_names_max_length):
s += " "
for i in range(len(self.column_names)):
s += f" {self.column_names[i]}"
s += "\n"
# Print rows (only if there are any)
for i in range(self.n):
if len(self.row_names) > 0:
for j in range(self.row_names_max_length - self.row_names_lengths[i]):
s += " "
s += self.row_names[i]
for j in range(self.m):
s += f" {self.content[i][j]}"
if len(self.column_names) > 0 and len(str(
self.content[i][j])) < self.column_names_lengths[j]:
for _ in range(self.column_names_lengths[j] -
len(str(self.content[i][j]))):
s += " "
s += "\n"
#Remove last new line
s = s[:-1]
return s
def getitem(self, i: int, j: int) -> any:
""" Get the value of the item in row i and column j """
return self.content[i][j]
def setitem(self, i: int, j: int, v: any):
""" Set the item in row i and column j to value v """
self.content[i][j] = v
def setrow(self, i: int, r: list):
if len(r) == self.m:
self.content[i] = r
else:
raise ValueError(
f"Dimension mismatch: len(r) ({len(r)}) != self.m ({self.m})")
class CONNECTION_TABLE_MESSAGE_TYPE():
@property
def DIMENSION_MISMATCH():
return 0
@property
def SELF_CONNECTION():
return 1
@property
def MAIN_PEER_MISSING():
return 2
@property
def MAIN_PEER_NOT_EXISTS():
return 3
@property
def MAIN_PEER_NOT_OUTGOING():
return 4
class ConnectionTableMessageContent(MessageContent):
message_type: int
peer: str
i: int
j: int
actual: Union[int, str]
should: Union[int, str]
ConnectionTableMessage = Message[ConnectionTableMessageContent]
class ConnectionTable(Table):
""" ConnectionTable for peers """
def __init__(self, peer_names: list):
super().__init__(len(peer_names),
len(peer_names) + 1, peer_names,
peer_names + ["main_peer"])
for i in range(self.n):
for j in range(self.n):
self.setitem(i, j, 0)
self.setitem(i, self.m - 1, "None")
def __repr__(self):
return f"{type(self).__name__}({self.row_names})"
def setitem(self, i: int, j: int, v: Union[int, str]):
""" Set the item in row i and column j to value v
Please execute check_integrity afterwards to make sure changed data is still valid """
super().setitem(i, j, v)
def update(self, s: str) -> Result:
""" Updates the table with a str representation of a ConnectionTable object """
r = Result()
# Split lines and remove first line
s = s.splitlines()
s.pop(0)
for i in range(self.n):
# Separate connection elements
s[i] = s[i][self.row_names_max_length + 1:]
s[i] = s[i].split()
if len(s[i]) != self.m:
r.append(
ConnectionTableMessage(
message_level=MESSAGE_LEVEL.ERROR,
message=ConnectionTableMessageContent(
message_type=CONNECTION_TABLE_MESSAGE_TYPE.DIMENSION_MISMATCH,
peer="",
i=i,
j=0,
actual=len(s[i]),
should=self.m)))
continue
# Update table
for j in range(self.m):
if j < self.m - 1:
self.setitem(i, j, int(s[i][j]))
else:
self.setitem(i, j, s[i][j])
self.__check_integrity(r)
return r
def get_outgoing_connected_peers(self, name: str) -> list:
""" Get a list of all peers that peer 'name' has an outgoing connection to """
# Get row index for peer
row = -1
for i in range(self.n):
if self.column_names[i] == name:
row = i
break
if row == -1:
raise PeerDoesNotExistError(name)
# List all peers with an outgoing connection to that peer
l = []
for i in range(self.n):
if self.getitem(row, i):
l.append(self.column_names[i])
return l
def get_main_peer(self, name: str) -> str:
""" Get the main peer for outgoing connections for a peer """
# Get row index for peer
row = -1
for i in range(self.n):
if self.column_names[i] == name:
row = i
break
if row == -1:
raise PeerDoesNotExistError(name)
return (self.getitem(row, self.m - 1))
def get_ingoing_connected_peers(self, name: str) -> list:
""" Get a list of all peers that peer 'name' has an ingoing connection from """
# Get column index for peer
column = -1
for i in range(self.m):
if name == self.column_names[i]:
column = i
break
if column == -1:
raise PeerDoesNotExistError(name)
# List all peers with an ingoing connection from that peer
l = []
for i in range(self.n):
if self.getitem(i, column):
l.append(self.row_names[i])
return l
def __check_integrity(self, r: Result):
""" Check if the table has invalid entries """
#Check if all diagonal elements are zero
for i in range(self.n):
if self.getitem(i, i) == 1:
self.setitem(i, i, 0)
r.append(
ConnectionTableMessage(
message_level=MESSAGE_LEVEL.ERROR,
message=ConnectionTableMessageContent(
message_type=CONNECTION_TABLE_MESSAGE_TYPE.SELF_CONNECTION,
peer=self.row_names[i],
i=i,
j=i,
actual=1,
should=0)))
# Check main_peers
for i in range(self.n):
# If there are outgoing peers...
if len(self.get_outgoing_connected_peers(
self.row_names[i])) > 0 or self.getitem(i, self.m - 1) != "None":
# ... check if there is a main_peer
if self.getitem(i, self.m - 1) == "None":
self.setitem(i, self.m - 1,
self.get_outgoing_connected_peers(self.row_names[i])[0])
r.append(
ConnectionTableMessage(
message_level=MESSAGE_LEVEL.ERROR,
message=ConnectionTableMessageContent(
message_type=CONNECTION_TABLE_MESSAGE_TYPE.MAIN_PEER_MISSING,
peer=self.row_names[i],
i=i,
j=self.m - 1,
actual="",
should=self.getitem(i, self.m - 1))))
# ... check if the main_peer exists
if self.getitem(i, self.m - 1) not in self.row_names:
# Correct to the first outgoing peer if there is one
if self.get_outgoing_connected_peers(self.row_names[i]):
self.setitem(
i, self.m - 1,
self.get_outgoing_connected_peers(self.row_names[i])[0])
r.append(
ConnectionTableMessage(
message_level=MESSAGE_LEVEL.ERROR,
message=ConnectionTableMessageContent(
message_type=CONNECTION_TABLE_MESSAGE_TYPE.
MAIN_PEER_NOT_EXISTS,
peer=self.row_names[i],
i=i,
j=self.m - 1,
actual="",
should=self.getitem(i, self.m - 1))))
# Correct to "None" otherwise
else:
self.setitem(i, self.m - 1, "None")
r.append(
ConnectionTableMessage(
message_level=MESSAGE_LEVEL.ERROR,
message=ConnectionTableMessageContent(
message_type=CONNECTION_TABLE_MESSAGE_TYPE.
MAIN_PEER_NOT_EXISTS,
peer=self.row_names[i],
i=i,
j=self.m - 1,
actual="",
should="None")))
# ... check if the main_peer is outgoing
elif self.getitem(i,
self.m - 1) not in self.get_outgoing_connected_peers(
self.row_names[i]):
j = 0
for name in self.column_names:
if self.getitem(i, self.m - 1) == name:
self.setitem(i, j, 1)
break
j += 1
r.append(
ConnectionTableMessage(
message_level=MESSAGE_LEVEL.ERROR,
message=ConnectionTableMessageContent(
message_type=CONNECTION_TABLE_MESSAGE_TYPE.
MAIN_PEER_NOT_OUTGOING,
peer=self.row_names[i],
i=i,
j=j,
actual=0,
should=1)))
| true
|
a3b9de1389f1f52afe0f5ceeb29878cb35d2d7c9
|
Python
|
stemaan/pyr1-code
|
/day13/proerty.py
|
UTF-8
| 766
| 2.84375
| 3
|
[] |
no_license
|
class Human:
def __init__(self, name):
self.__name = name
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value.lower()
class Jira:
def __init__(self, *args, **kwargs):
self.field123123123123 = 'something'
def get_report(self):
print('Approved by manager', self.approved_by_manager)
@property
def approved_by_manager(self):
return self.field123123123123
if __name__ == '__main__':
adam = Human('Adam')
print(adam.name)
adam.name = 'Jan'
print(adam.name)
form_username = 'form .username'
data_to_submit = {
form_username: 'jan',
'password': 'admin1',
'email': 'test@example.com'
}
| true
|
fe110281794c06ce60e40bcb8051d98e0eed37a4
|
Python
|
BIAOXYZ/variousCodes
|
/_CodeTopics/LeetCode_contest/biweekly/biweekly2022/71-[大年初五]/71_2.py
|
UTF-8
| 615
| 2.953125
| 3
|
[] |
no_license
|
class Solution(object):
def pivotArray(self, nums, pivot):
"""
:type nums: List[int]
:type pivot: int
:rtype: List[int]
"""
small, equal, large = [], [], []
for num in nums:
if num < pivot:
small.append(num)
elif num > pivot:
large.append(num)
else:
equal.append(num)
return small + equal + large
"""
https://leetcode-cn.com/submissions/detail/265024445/
44 / 44 个通过测试用例
状态:通过
执行用时: 168 ms
内存消耗: 29.5 MB
"""
| true
|
9ff16482c5666b73f7da76d002e50d1659d0b8e7
|
Python
|
venkatajagadeesh123/python_snippets
|
/strings.py
|
UTF-8
| 1,973
| 3.734375
| 4
|
[] |
no_license
|
# name = "Srini"
# age = 23
print ("Hello world")
print("My name is " + name + "my age " + str(age))
print("My name is %s and my age %d" % (name,age))
print("My name is {name} and my age {age}".format(age=age,name=name))
# this syntc work only python 3.6
print(f'My name is {name} my age next year {age+1}')
# writing a function to generate stroy
# this syntax in python 3.X
def story(name,age,email='basic@gmail.com'):
return ("My name is {name} and my age {age} and my email is {email}" .format(age=age,name=name,email=email))
def make_upper_and_give_first_twoval(mystr):
upcasestr = mystr.upper()
return upcasestr[:2]
# name = "srini"
# age = 23
# email = "hello@gmail.com"
# story(name,age,email)
# print(story(age=23,name='srini',email='hello@gmail.com'))
# full_story= story(age=23,name='srini',email='hello@gmail.com')
# print(full_story)
print(story(age=23,name='srini'))
person = {'name': 'Jenn', 'age': 23}
# sentence = 'My name is ' + person['name'] + ' and I am ' + str(person['age']) + ' years old.'
# print(sentence)
# sentence = 'My name is {} and I am {} years old.'.format(person['name'], person['age'])
# print(sentence)
# sentence = 'My name is {0} and I am {1} years old.'.format(person['name'], person['age'])
# print(sentence)
# tag = 'h1'
# text = 'This is a headline'
# sentence = '<{0}>{1}</{0}>'.format(tag, text)
# print(sentence)
sentence = 'My name is {0} and I am {1} years old.'.format(person['name'], person['age'])
print(sentence)
# pi = 3.14159265
# sentence = 'Pi is equal to {}'.format(pi)
# print(sentence)
sentence = '1 MB is equal to {} bytes'.format(1000**2)
print(sentence)
import datetime
my_date = datetime.datetime(2016, 9, 24, 12, 30, 45)
# print(my_date)
# March 01, 2016
sentence = '{:%B %d, %Y}'.format(my_date)
print(sentence)
# March 01, 2016 fell on a Tuesday and was the 061 day of the year.
sentence = '{:%B %d, %Y} fell on a {} and was the {} day of the year'.format(my_date)
print(sentence)
| true
|
3e5d028e5653e3626bef32018a9b9d38d4f5c264
|
Python
|
Phaiax/sudoku
|
/cheatsheet.py
|
UTF-8
| 12,922
| 2.546875
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
# LOAD AND DISPLAY
# ==============================================
# imread -> numpy ndarray
img = cv2.imread(filename, cv2.IMREAD_COLOR|IMREAD_GRAYSCALE|IMREAD_UNCHANGED) # UNCHANGED includes alpha
# show with matplotlib
from matplotlib import pyplot as plt
plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show()
# with gtk
while(1):
cv2.imshow('image',img)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
# empty
img = np.zeros((512,512,3), np.uint8)
# TYPE NDARRAY
# ===============================================
img.shape # -> tuple
img.ndim
img.dtype # for debugging! uint8
# img.T img.base img.ctypes img.dumps img.itemset
# img.nonzero img.reshape img.sort img.tofile
# img.all img.byteswap img.cumprod img.fill img.itemsize
# img.partition img.resize img.squeeze img.tolist
# img.any img.choose img.cumsum img.flags img.max
# img.prod img.round img.std img.tostring
# img.argmax img.clip img.data img.flat img.mean
# img.ptp img.searchsorted img.strides img.trace
# img.argmin img.compress img.diagonal img.flatten img.min
# img.put img.setfield img.sum img.transpose
# img.argpartition img.conj img.dot img.getfield img.nbytes
# img.ravel img.setflags img.swapaxes img.var
# img.argsort img.conjugate img.dtype img.imag img.ndim
# img.real img.shape img.take img.view
# img.astype img.copy img.dump img.item img.newbyteorder
# img.repeat img.size img.tobytes
# data layout:
# [BGR]
px = img[100,100]
px_blue = img[100,100,0]
img[100,100] = [255,255,255]
# select region
ball = img[280:340, 330:390]
img[273:333, 100:160] = ball
# splitting channels
b,g,r = cv2.split(img)
img = cv2.merge((b,g,r))
# setting red to zero
img[:,:,2] = 0
# DRAW
# ===============================================
cv2.line(img,(0,0),(511,511),(255,0,0),5)
cv2.rectangle(img,(384,0),(510,128),(0,255,0),3)
cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> None
cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
# poly
pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
img = cv2.polylines(img,[pts],True,(0,255,255))
# text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
# INTERACTIVE
# ================================================
# Trackbars
cv2.namedWindow('image')
cv2.createTrackbar('R','image',0,255,nothing)
cv2.createTrackbar(switch, 'image',0,1,nothing) # switch = '0 : OFF \n1 : ON'
cv2.setTrackbarPos(switch, 'image', 1 if mode else 0)
s = cv2.getTrackbarPos(switch,'image')
# eventloop
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(timeout) & 0xFF
if k == ord('m'):
pass # key m pressed
elif k == 27:
break
# getTrackbarPos()
# mouse
cv2.setMouseCallback('image',draw_circle)
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing,mode,color
if event == cv2.EVENT_LBUTTONDOWN: # list of all events: [i for i in dir(cv2) if 'EVENT' in i]
pass
# INTRESTING FUNCTIONS
# ==================================================
# Make special boarders around img (for kernel functions). [i for i in dir(cv2) if 'BORDER' in i]
cv2.copyMakeBorder(src, top, bottom, left, right, borderType[, dst[, value]]) -> dst
cv2.cvtColor(src, code[, dst[, dstCn]]) -> dst
# THRESHOLDING
# type=cv2.THRESH_BINARY | THRESH_BINARY_INV | THRESH_TRUNC | THRESH_TOZERO | THRESH_TOZERO_INV
cv2.threshold(src, thresh, maxval, type[, dst]) -> retval, dst
# automatically find best threshold
cv2.threshold(src,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) -> retval, dst
# adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C | cv2.ADAPTIVE_THRESH_GAUSSIAN_C
cv2.adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst
cv2.inRange(src, lowerb, upperb[, dst]) -> dst
# BITWISE
cv2.bitwise_not(src[, dst[, mask]]) -> dst
cv2.bitwise_and(src1, src2[, dst[, mask]]) -> dst
cv2.bitwise_or(src1, src2[, dst[, mask]]) -> dst
cv2.bitwise_xor(src1, src2[, dst[, mask]]) -> dst
# BLUR
# Averaging
blur(src, ksize=(,) [, dst[, anchor[, borderType]]]) -> dst
boxFilter(src, ddepth, ksize=(,) [, dst[, anchor[, normalize[, borderType]]]]) -> dst
# Gaussian
cv2.GaussianBlur(src, ksize=(,), sigmaX[, dst[, sigmaY[, borderType]]]) -> dst
# Median (does not introduce new color values)
cv2.medianBlur(src, ksize[, dst]) -> dst
# Bilateral (preserves edges)
bilateralFilter(src, d, sigmaColor, sigmaSpace[, dst[, borderType]]) -> dst
# KERNEL
kernel = np.ones((5,5),np.float32)/25
filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst
# MORPHOLOGY
# Kernel: Rectangular
kernel = np.ones((5,5),np.uint8)
# or if not rectangular structured:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT | MORPH_ELLIPSE | MORPH_CROSS,(5,5))
eroded = cv2.erode(img,kernel,iterations = 1)
dilation = cv2.dilate(img,kernel,iterations = 1)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)
# GRADIENTS / HIGH PASS FILTERS
# if ksize=-1 -> Scharr is used
cv2.Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
cv2.Scharr(src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]]) -> dst
cv2.Laplacian(src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst
# PYDAMIDS
# gaussian scale area by 1/4 or 4
cv2.pyrUp(src[, dst[, dstsize[, borderType]]]) -> dst # times 4 pixels
cv2.pyrDown(src[, dst[, dstsize[, borderType]]]) -> dst # times 1/4 pixels
# Laplacian pyramid for level x
cv2.substract(levelx, levelx_then_downscaled_then_upscaled)
# CONTOURS
# finds white spots (best with b/w images, no grey)
# mode = cv2.CHAIN_APPROX_NONE | cv2.CHAIN_APPROX_SIMPLE
findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy
drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> None
cv2.moments(contours[i])
cv2.contourArea(contours[i]) -> area
equi_diameter = np.sqrt(4*area/np.pi)
cv2.arcLength(contours[i], closed)
cv2.approxPolyDP(contours[i],epsilon,True)
cv2.convexHull(contours[i] [, hull[, clockwise[, returnPoints]]) -> hull
cv2.isContourConvex(contours[i])
cv2.boundingRect(contours[i]) -> x,y,w,h
aspect_ratio = float(w)/h
extent = float(area)/(w*h)
solidity = float(area)/cv2.contourArea(hull)
cv2.minAreaRect(contours[i]) -> rect # rotated bounding rect
cv2.minEnclosingCircle(contours[i])
cv2.fitEllipse(contours[i])
cv2.fitLine(contours[i], cv2.DIST_L2,0,0.01,0.01) -> [vx,vy,x,y]
# Histogram
cv2.calcHist(images=[img], channels=[0], mask=None, histSize=[256], ranges=[0,256]
[, hist[, accumulate]]) -> hist
# draw
plt.hist(img.ravel(),256,[0,256]); plt.show()
# equalization
equ = cv2.equalizeHist(img [,equ])
# better localized equalization (Contrast Limited Adaptive Histogram Equalization)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
# two dimensional hist
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
plt.imshow(hist,interpolation = 'nearest')
# Backprojection: Use histogram of searched object to get a probability mask from a picture
cv2.calcBackProject
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.html
# Fourier Transformation
# Speedup with optimal size, pad with zeros
nrows = cv2.getOptimalDFTSize(rows)
ncols = cv2.getOptimalDFTSize(cols)
nimg = cv2.copyMakeBorder(img, 0, nrows - rows, 0, ncols - cols, cv2.BORDER_CONSTANT, value = 0)
#
dft = cv2.dft(np.float32(nimg),flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft) # make 0,0 into center
# real plane imag plane
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
# or
cv2.cartToPolar(x, y[, magnitude[, angle[, angleInDegrees]]]) -> magnitude, angle
# and inverse
f_ishift = np.fft.ifftshift(fshift) # reverse np.fft.fftshift
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])
# Template matching
methods = cv2.TM_CCOEFF | cv2.TM_CCOEFF_NORMED | cv2.TM_CCORR |
cv2.TM_CCORR_NORMED | cv2.TM_SQDIFF | cv2.TM_SQDIFF_NORMED
res = cv2.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# or for multiple
threshold = 0.8
loc = np.where( res >= threshold)
# Hough transform
# input is binary image (from canny)
lines = cv2.HoughLines(edges,1,np.pi/180,200)
# see for display
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# faster:
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
# Hough transformation for circles
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
# FOREGROUND/BACKGROUND SEPERATION
# wathershed and playing with erosion/dillution to seperate fore and background
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_watershed/py_watershed.html
# Grab cut
mask, bgdModel, fgdModel = cv2.grabCut(img,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html
# Calculates the distance to the closest zero pixel for each pixel of the source image.
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2|DIST_L1|DIST_C, maskSize=5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
cv2.minMaxLoc(imgray,mask) -> min_val, max_val, min_loc, max_loc
cv2.mean(im,mask) -> mean_val
cv2.countNonZero(img)
# IMAGE MANIPILATION
# ==================================================
# MERGING
cv2.add(img1, img2) # is saturating
cv2.addWeighted(img1,a,img2,b,c) # a*img1 + b*img2 + c
# AFFINE TRANSFORM
# In affine transformation, all parallel lines in the original image will still be parallel in the output image.
warpAffine(src, M, dsize=(,) [, dst[, flags[, borderMode[, borderValue]]]]) -> dst
# TRANSLATE
M = np.float32([[1,0,tx],[0,1,ty]])
# ROTATE
M = getRotationMatrix2D(center=(,), angle, scale=(,))
# FROM POINTS
pts1 = np.float32([[50,50],[200,50],[50,200]])
pts2 = np.float32([[10,100],[200,50],[100,250]])
M = cv2.getAffineTransform(pts1,pts2)
# PERSPECTIVE TRANSFORM
#
warpPerspective(src, M, dsize=(,) [, dst[, flags[, borderMode[, borderValue]]]]) -> dst
# FROM POINTS
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
# PATTERNS
# ==================================================
# MASKING AND ADDING
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg,img2_fg)
img1[0:rows, 0:cols ] = dst
# PERFORMANCE
# ==================================================
e1 = cv2.getTickCount()
# your code execution
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency() # time is in seconds
cv2.useOptimized() # is using optimized?
cv2.setUseOptimized(True|False)
# ipython
%timeit c=d()
| true
|
0c80e3f5d468d2b479db63b58422a8bfd757d2ac
|
Python
|
stefanDeveloper/bomberman
|
/agent_code/nikolaj_boyle/callbacks.py
|
UTF-8
| 2,105
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
import os
import pickle
import random
import torch as T
from .model import DQN
import numpy as np
from .StateToFeat import state_to_features
ACTIONS = ['UP', 'RIGHT', 'DOWN', 'LEFT', 'WAIT', 'BOMB']
def setup(self):
"""
Setup your code. This is called once when loading each agent.
Make sure that you prepare everything such that act(...) can be called.
When in training mode, the separate `setup_training` in train.py is called
after this method. This separation allows you to share your trained agent
with other students, without revealing your training code.
In this example, our model is a set of probabilities over actions
that are is independent of the game state.
:param self: This object is passed to all callbacks and you can set arbitrary values.
"""
self.random_prob = .9
if self.train and not os.path.isfile("my-saved-model.pt"):
self.logger.info("Setting up model from scratch.")
print("Setting up model")
self.model = DQN(6)
else:
self.logger.info("Loading model from saved state.")
with open("my-saved-model.pt", "rb") as file:
self.model = pickle.load(file)
def act(self, game_state: dict) -> str:
"""
Your agent should parse the input, think, and take a decision.
When not in training mode, the maximum execution time for this method is 0.5s.
:param self: The same object that is passed to all of your callbacks.
:param game_state: The dictionary that describes everything on the board.
:return: The action to take as a string.
"""
# todo Exploration vs exploitation
if self.train:
random_prob = (.9 - (.9 * (self.n_rounds / 700)))
if random.random() < random_prob:
self.logger.debug("Choosing action purely at random.")
# 80%: walk in any direction. 10% wait. 10% bomb.
return np.random.choice(ACTIONS, p=[.2, .2, .2, .2, .1, .1])
self.logger.debug("Querying model for action.")
return ACTIONS[T.argmax(self.model.forward(game_state))]
# Here was state_to_features
| true
|
f75071e7b11208cd3a45626e1254d68dc6179499
|
Python
|
noval102200/NovalIDE
|
/noval/python/interpreter/pythonpathmixin.py
|
UTF-8
| 5,744
| 2.5625
| 3
|
[
"MulanPSL-1.0"
] |
permissive
|
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog,messagebox
from noval import NewId,_
import noval.util.fileutils as fileutils
import noval.util.apputils as sysutils
import noval.python.parser.utils as parserutils
import locale
import noval.imageutils as imageutils
import noval.consts as consts
import noval.ttkwidgets.treeviewframe as treeviewframe
import noval.menu as tkmenu
ID_GOTO_PATH = NewId()
ID_REMOVE_PATH = NewId()
ID_NEW_ZIP = NewId()
ID_NEW_EGG = NewId()
ID_NEW_WHEEL = NewId()
class PythonpathMixin:
"""description of class"""
def InitUI(self,hide_tree_root=False):
self.has_root = not hide_tree_root
self.treeview = treeviewframe.TreeViewFrame(self)
self.treeview.tree["show"] = ("tree",)
self.treeview.pack(side=tk.LEFT,fill="both",expand=1)
self.LibraryIcon = imageutils.load_image("","python/library_obj.gif")
self.treeview.tree.bind("<3>", self.OnRightClick, True)
right_frame = ttk.Frame(self)
self.add_path_btn = ttk.Button(right_frame, text=_("Add Path.."),command=self.AddNewPath)
self.add_path_btn.pack(padx=consts.DEFAUT_HALF_CONTRL_PAD_X,pady=(consts.DEFAUT_HALF_CONTRL_PAD_Y))
self.remove_path_btn = ttk.Button(right_frame, text=_("Remove Path..."),command=self.RemovePath)
self.remove_path_btn.pack(padx=consts.DEFAUT_HALF_CONTRL_PAD_X,pady=(consts.DEFAUT_HALF_CONTRL_PAD_Y))
self.add_file_btn = ttk.Menubutton(right_frame,
text=_("Add File..."),state="pressed")
self.add_file_btn.pack(padx=consts.DEFAUT_HALF_CONTRL_PAD_X,pady=(consts.DEFAUT_HALF_CONTRL_PAD_Y))
right_frame.pack(side=tk.LEFT,fill="y")
self.button_menu = self.CreatePopupMenu()
self.add_file_btn.config(menu = self.button_menu)
self.menu = None
def CreatePopupMenu(self):
menu = tkmenu.PopupMenu()
menuItem = tkmenu.MenuItem(ID_NEW_ZIP, _("Add Zip File"), None, None,None)
menu.AppendMenuItem(menuItem,handler=lambda:self.AddNewFilePath(ID_NEW_ZIP))
menuItem = tkmenu.MenuItem(ID_NEW_EGG, _("Add Egg File"), None, None,None)
menu.AppendMenuItem(menuItem,handler=lambda:self.AddNewFilePath(ID_NEW_EGG))
menuItem = tkmenu.MenuItem(ID_NEW_WHEEL, _("Add Wheel File"), None, None,None)
menu.AppendMenuItem(menuItem,handler=lambda:self.AddNewFilePath(ID_NEW_WHEEL))
return menu
def AddNewFilePath(self,id):
if id == ID_NEW_ZIP:
filetypes = [(_("Zip File") ,"*.zip"),]
title = _("Choose a Zip File")
elif id == ID_NEW_EGG:
filetypes = [(_("Egg File") , "*.egg"),]
title = _("Choose a Egg File")
elif id == ID_NEW_WHEEL:
filetypes = [(_("Wheel File") ,"*.whl"),]
title = _("Choose a Wheel File")
path = filedialog.askopenfilename(title=title ,
filetypes = filetypes,
master=self)
if not path:
return
self.AddPath(fileutils.opj(path))
def AddNewPath(self):
path = filedialog.askdirectory(title=_("Choose a directory to Add"))
if not path:
return
self.AddPath(fileutils.opj(path))
def AddPath(self,path):
if self.CheckPathExist(path):
messagebox.showinfo(_("Add Path"),_("Path already exist"),parent= self)
return
self.treeview.tree.insert(self.GetRootItem(),"end",text=path,image=self.LibraryIcon)
def OnRightClick(self, event):
if self.treeview.tree.selection()[0] == self.GetRootItem():
return
if self.menu is None:
self.menu = tkmenu.PopupMenu()
self.menu.Append(ID_GOTO_PATH, _("&Goto Path"),handler=lambda:self.TreeCtrlEvent(ID_GOTO_PATH))
self.menu.Append(ID_REMOVE_PATH, _("&Remove Path"),handler=lambda:self.TreeCtrlEvent(ID_REMOVE_PATH))
self.menu.tk_popup(event.x_root, event.y_root)
def TreeCtrlEvent(self,id):
'''
右键处理事件
'''
if id == ID_GOTO_PATH:
item = self.treeview.tree.selection()[0]
fileutils.safe_open_file_directory(self.treeview.tree.item(item,"text"))
return True
elif id == ID_REMOVE_PATH:
self.RemovePath()
return True
else:
return True
def GetRootItem(self):
if self.has_root:
root_item = self.treeview.tree.get_children()[0]
else:
root_item = ''
return root_item
def CheckPathExist(self,path):
root_item = self.GetRootItem()
items = self.treeview.tree.get_children(root_item)
for item in items:
if parserutils.ComparePath(self.treeview.tree.item(item,"text"),path):
return True
return False
def GetPathList(self):
path_list = []
root_item = self.GetRootItem()
items = self.treeview.tree.get_children(root_item)
for item in items:
path = self.treeview.tree.item(item,"text")
path_list.append(path)
return path_list
def RemovePath(self):
selections = self.treeview.tree.selection()
if not selections:
return
for item in selections:
self.treeview.tree.delete(item)
def ConvertPath(self,path):
sys_encoding = locale.getdefaultlocale()[1]
try:
return path.encode(sys_encoding)
except:
try:
return path.decode(sys_encoding)
except:
return path
| true
|
b7a5d614eea1a16eb1a164ee3c07ecbda71f7224
|
Python
|
robee/velocity-boilerplate-public
|
/models.py
|
UTF-8
| 1,806
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
"""DOCUMENTATION TODO"""
from settings import settings
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, DateTime, Boolean
engine = create_engine(settings['database_cred'], echo=False)
from sqlalchemy.ext.declarative import declarative_base
from utils.hasher import *
Base = declarative_base()
class User(Base):
"""DOCUMENTATION TODO"""
__tablename__ = 'users'
user_id = Column(Integer, primary_key=True)
username = Column(String(30), nullable=False)
email = Column(String(75), nullable=True)
password = Column(String(128), nullable=False)
account_type = Column(String(128), nullable=True) #Twitter, Facebook, or Local
details = Column(String(1000), nullable=True)
def __repr__(self):
return "<User('%s')>" % (self.username)
users_table = User.__table__
metadata = Base.metadata
def get_user(db, username=None, email=None, user_id=None):
if username!=None:
return db.query(User).filter_by(username=username).first()
if email !=None:
return db.query(User).filter_by(email=email).first()
if id != None:
return db.query(User).filter_by(user_id=user_id).first()
raise Exception('You didnt give any non-None arguments')
def create_user(username,email, password, account_type='Local', details=''):
new_user = User()
new_user.username = username
new_user.email = email
new_user.password = pass_hash(password)
new_user.account_type=account_type
new_user.details = details
return new_user
def create_all():
"""DOCUMENTATION TODO"""
metadata.create_all(engine)
def drop_all():
"""DOCUMENTATION TODO"""
metadata.drop_all(engine)
def commit(db, objs):
for obj in objs:
logging.info(obj)
db.add(obj)
db.commit()
| true
|
d80dfd23c1bf39a7a1187323e3cf2bd024bf51d5
|
Python
|
nischalshrestha/automatic_wat_discovery
|
/Notebooks/py/prabhatkumarsahu/titanic-data-survival-prediction/titanic-data-survival-prediction.py
|
UTF-8
| 7,946
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Any results you write to the current directory are saved as output.
# In[ ]:
train_data = pd.read_csv("../input/train.csv")
test_data= pd.read_csv("../input/test.csv")
# In[ ]:
train_data.head()
# In[ ]:
test_data.head()
# In[ ]:
# there is missing values in both datasets.
# there is no "Survived" column in train_data because that is what we have to predict!
# my main aim is to find the "Survived" value for each Passenge
# In[ ]:
# Before going there, let's analyse and visualise our data to get a feel of it.
# I need only useful features to be able to predict efficiently.
# Let's start from the first column^
# PassengerId: It is clearly of no use; just a serial no. Let's DROP it then.
train_data.drop(['PassengerId'], axis=1, inplace=True)
# In[ ]:
# Let's move on to the next feature 'Name'
# Useless feature quite obviously.
# Let's drop it
train_data.drop(['Name'], axis=1, inplace=True)
train_data.head()
# In[ ]:
# "Survived" == 0 indicates "DID NOT Survive"; 1 == "Survived"
# Now, we've looked at features uptil Pclass; Next is "Sex"
# In[ ]:
# There are many children, so let's study them separately.
# Convert "Sex" into "Person" column which can take values: "Male", "Female", "Child"
# Let's create a function for that
def what_person(passenger):
age,sex = passenger
if age <= 16:
return 'Child'
else:
return sex
# In[ ]:
# Let's "apply" now
train_data["Person"] = train_data[['Age','Sex']].apply(what_person, axis=1)
# axis=1 specifies that the operation is to be done on columns!
# Drop "Sex" now, since it is redundant
train_data.drop(['Sex'], axis=1, inplace=True)
train_data.head()
# In[ ]:
train_data.info()
# In[ ]:
print("Missing Age values:", train_data['Age'].isnull().sum())
# In[ ]:
# Let's fill the missing^ Age values now
# Generate random numbers between mean-std & mean+std
mean = train_data['Age'].mean()
std = train_data['Age'].std()
r = np.random.randint(mean-std, mean+std)
train_data["Age"].fillna(r, inplace=True)
train_data.info()
# In[ ]:
# Let's look at next two features:
# SibSp is any siblings/spouses on board?
# Parch is any parent/child on board?
# We could reduce these to a single feature: "WithFamily"?
# This would make our feature-vector more efficient and dimensionality reduction!!
train_data['WithFamily'] =train_data['SibSp'] + train_data['Parch']
train_data.drop(['SibSp','Parch'], axis=1, inplace=True)
train_data.head(10)
# In[ ]:
# Let's clean that!
# If "WithFamily" == 0, He was alone. Hence, value should be 0.
train_data['WithFamily'].loc[train_data['WithFamily'] > 0] = 1
train_data.head(10)
# In[ ]:
# Next feature is Ticket, which is useless again.lets Remove it!
train_data.drop(['Ticket'], axis=1, inplace=True)
# In[ ]:
test_data.info()
# In[ ]:
# Fare:
# Missing values only in test_df
test_data["Fare"].fillna(test_data["Fare"].median(), inplace=True)
# In[ ]:
# Convert from float to int
train_data['Fare'] = train_data['Fare'].astype(int)
test_data['Fare'] = test_data['Fare'].astype(int)
# In[ ]:
# Let's see if they vary with Survival chances
fare_notSurvived = train_data["Fare"][train_data["Survived"] == 0]
fare_survived =train_data['Fare'][train_data["Survived"] == 1]
print("Died: ", fare_notSurvived.mean())
print("Survived: ", fare_survived.mean())
# In[ ]:
train_data.head()
# In[ ]:
# Now, I've looked at "Survived" "Pclass" "Age" "Fare"# Now, w
# Created two new features/columns "Person" "WithFamily"; also dropped some columns
# Let's look at Cabin now:
# In[ ]:
# Cabin is in the format: C85 where the first letter ('C', in this case) is the deck
# Deck seems to give out important info as compared to the room no.
# Let's extract all decks from Cabin; let's drop null values first!
deck = train_data['Cabin'].dropna()
deck.head()
# In[ ]:
floor = []
for level in deck:
floor.append(level[0])
# To visualise it, let's convert it into a DataFrame
df = pd.DataFrame(floor, columns=['Level'])
# In[ ]:
train_data.info()
# In[ ]:
# the 'Cabin' column has a lot of missing values.
# On top of that, there is just one value for deck 'T' which doesn't make a lot of sense.
# Filling 75% of the values on our own would affect prediction
# Hence, it is better to drop this column
train_data.drop('Cabin', axis=1, inplace=True)
train_data.head()
# In[ ]:
train_data.info()
# In[ ]:
# Just two missing values! Let's fill it with "S" (the most frequent)# Just t
train_data['Embarked'].fillna("S", inplace=True)
# In[ ]:
# Passengers that embarked at "S" had a less rate of survival; Let's confirm that:
embark = train_data[['Embarked', 'Survived']].groupby(['Embarked']).mean()
embark
# In[ ]:
# Let's make our test_data compatible with train_data; since we're going to train our classifier on train_data
# In[ ]:
test_data.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
# Now, let's create Person for test_df:
test_data["Person"] =test_data[['Age','Sex']].apply(what_person, axis=1)
test_data.drop(['Sex'], inplace=True, axis=1)
# Now, let's create WithFamily for test_df:
test_data['WithFamily'] = test_data['SibSp'] + test_data['Parch']
test_data.drop(['SibSp','Parch'], axis=1, inplace=True)
test_data['WithFamily'].loc[test_data['WithFamily'] > 0] = 1
# In[ ]:
test_data.info()
# In[ ]:
print("Missing: ", test_data['Age'].isnull().sum())
# In[ ]:
# Let's fill in the missing Age values
mean = test_data['Age'].mean()
std = test_data['Age'].std()
r = np.random.randint(mean-std, mean+std)
test_data['Age'].fillna(r, inplace=True)
# Change its dataype to int
train_data['Age'] =train_data['Age'].astype(int)
test_data['Age'] = test_data['Age'].astype(int)
# In[ ]:
test_data.info()
# In[ ]:
# There is one last issue remaining before i can feed this dataset to ML algortihm
# Embarked & Person need to converted to Numeric variables
# I'll use dummy variables:
# It is a variable that takes 0/1 indicating absence/presence of a particular category
# You can read more about it - https://en.wikipedia.org/wiki/Dummy_variable_(statistics)
# EMBARKED-
titanic_embarked = pd.get_dummies(train_data['Embarked'])
titanic_embarked.head()
# In[ ]:
train_data =train_data.join(titanic_embarked)
train_data.head()
# In[ ]:
# Person
titanic_person = pd.get_dummies(train_data['Person'])
titanic_person.head()
# In[ ]:
train_data = train_data.join(titanic_person)
# Let's remove Person/Embarked now
train_data.drop(['Person','Embarked'], axis=1, inplace=True)
train_data.head()
# In[ ]:
# Let's repeat the same procedure for test_data# Let's
test_embarked = pd.get_dummies(test_data['Embarked'])
test_data = test_data.join(test_embarked)
test_person = pd.get_dummies(test_data['Person'])
test_data = test_data.join(test_person)
test_data.drop(['Person','Embarked'], axis=1, inplace=True)
test_data.head()
# In[ ]:
# Now is the time set up our training and test datasets:
x_train = train_data.drop(['Survived'], axis=1)
y_train = train_data['Survived']
x_test = test_data.drop(['PassengerId'], axis=1)
x_train.head()
# In[ ]:
from sklearn import svm
# In[ ]:
model = svm.SVC(kernel='linear', C=1, gamma=1)
# In[ ]:
model.fit(x_train, y_train)
# In[ ]:
prediction = model.predict(x_test)
# In[ ]:
prediction
# In[ ]:
model.score(x_train, y_train)
# In[ ]:
# Let's finally submit !!!!
sub_file = pd.DataFrame({'PassengerId':test_data['PassengerId'], 'Survived':prediction})
sub_file.head()
# In[ ]:
sub_file.to_csv('result.csv', index=False)
# In[ ]:
| true
|
f5d2ff674d4008b56fbd4592a19b047c4bb82f5a
|
Python
|
CheolYongLee/jump_to_python
|
/Chapter_4/vartest_error.py
|
UTF-8
| 281
| 3.140625
| 3
|
[] |
no_license
|
# vartest_error.py
def vartest(a):
a = a + 1
vartest(3)
print(a)
# 함수 안에서 선언한 매개변수는 함수 안에서만 사용 될 뿐 함수 밖에서는 사용되지 않는다.
# 그렇기에 함수 밖의 a에 대한 값이 없으므로 에러가 발생한다.
| true
|
b779d810e04a05d7a4a95614f6085a8c99a1a209
|
Python
|
sky-dream/LeetCodeProblemsStudy
|
/[1187][Hard][Make_Array_Strictly_Increasing]/Make_Array_Strictly_Increasing.py
|
UTF-8
| 1,680
| 3.484375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# leetcode time cost : 668 ms
# leetcode memory cost : 13.8 MB
# Time Complexity: O(M*N)
# Space Complexity: O(M*N)
# solution 1, DP
from functools import bisect
class Solution:
def makeArrayIncreasing(self, arr1: [int], arr2: [int]) -> int:
# use dict solution to maintain the max value in last step,and op_cnt for current loop index in array1
# assuming there is a -1 before index 0 in the array1
N1 = len(arr1)
N2 = len(arr2)
arr2.sort()
solution = {-1:0}
MAX_CNT = 2001 # max array length is 2000
for num in arr1:
new_solution = {}
for prev_max_num,op_cnt in solution.items():
# get the possible value can be used in current index of array1
rc_index = bisect.bisect_right(arr2,prev_max_num)
if rc_index!= N2:
rc_num = arr2[rc_index]
# use the min value
new_solution[rc_num] = min(new_solution.get(rc_num,MAX_CNT),op_cnt+1)
if num > prev_max_num:
# check use this num,keep the cnt, or replace it with rc_num
new_solution[num] = min(new_solution.get(num,MAX_CNT),op_cnt)
# update solution when all possible value checked in aray2
solution = new_solution
if solution:
return min(solution.values())
else:
return -1
def main():
arr1,arr2 = [1,5,3,6,7],[1,3,2,4] #expect is 1
obj = Solution()
res = obj.makeArrayIncreasing(arr1,arr2)
print("return value is ",res);
if __name__ =='__main__':
main()
| true
|
52291885fb56eb334b6616c30fc352ab1d6f235a
|
Python
|
rssalessio/PrivacyStochasticSystems
|
/limited_information.py
|
UTF-8
| 16,748
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Alessio Russo [alessior@kth.se]. All rights reserved.
#
# This file is part of PrivacyStochasticSystems.
#
# PrivacyStochasticSystems is free software: you can redistribute it and/or modify
# it under the terms of the MIT License. You should have received a copy of
# the MIT License along with PrivacyStochasticSystems.
# If not, see <https://opensource.org/licenses/MIT>.
#
import numpy as np
import cvxpy as cp
import scipy as sp
import dccp
from utils import sanity_check_probabilities, sanity_check_rewards, \
compute_KL_divergence_models, compute_stationary_distribution, \
build_markov_transition_density
eps = 1e-15
def limited_information_privacy_policies(P0: np.ndarray, P1: np.ndarray,
pi0: np.ndarray,
pi1: np.ndarray) -> float:
""" Computes 1/I_L(pi_0, pi_1) given pi_0 and pi_1
Parameters
----------
P0, P1 : np.ndarray
Numpy matrices containing the transition probabilities for model M0 and M1
Each matrix should have dimensions |actions|x|states|x|states|
pi0, pi1 : np.ndarray
Numpy matrices of dimensions |states|x|actions| containing the
policies pi0 and pi1
Returns
-------
1/I_F : float
Privacy level
"""
xi0 = compute_stationary_distribution(P0, pi0)
xi1 = compute_stationary_distribution(P1, pi1)
return limited_information_privacy(P0, P1, xi0, xi1)
def limited_information_privacy(P0: np.ndarray, P1: np.ndarray,
xi0: np.ndarray, xi1: np.ndarray) -> float:
""" Computes 1/I_L(pi_0, pi_1) given xi_0 and xi_1
Parameters
----------
P0, P1 : np.ndarray
Numpy matrices containing the transition probabilities for model M0 and M1
Each matrix should have dimensions |actions|x|states|x|states|
xi0, xi1 : np.ndarray
Numpy matrices of dimensions |states|x|actions| containing the stationary distributions
over states and actions of the two models (M0 and M1)
Returns
-------
1/I_L : float
Privacy level
"""
P0, P1 = sanity_check_probabilities(P0, P1)
xi0, xi1 = np.array(xi0), np.array(xi1)
na, ns = P0.shape[0], P0.shape[1]
privacy = 0
for s in range(ns):
z = sp.special.rel_entr(np.sum(xi1[s, :]), np.sum(xi0[s, :]))
if z == np.infty:
print(
'An infinity was computed in a KL-Divergence. Check the first term: {}'
.format(np.sum(mu1[s, :])))
z = 0
privacy -= z
for y in range(ns):
z = sp.special.rel_entr(xi1[s, :] @ P1[:, s, y],
xi0[s, :] @ P0[:, s, y])
if z == np.infty:
print(
'An infinity was computed in a KL-Divergence. Check the first term: {}'
.format(xi1[s, :] @ P1[:, s, y]))
z = 0
privacy += z
return 1 / privacy if not np.isclose(privacy, 0.) else np.infty
def limited_information_privacy(P0: np.ndarray, P1: np.ndarray,
xi0: np.ndarray, xi1: np.ndarray) -> float:
""" Computes 1/I_L(pi_0, pi_1) given xi_0 and xi_1
Parameters
----------
P0, P1 : np.ndarray
Numpy matrices containing the transition probabilities for model M0 and M1
Each matrix should have dimensions |actions|x|states|x|states|
xi0, xi1 : np.ndarray
Numpy matrices of dimensions |states|x|actions| containing the stationary distributions
over states and actions of the two models (M0 and M1)
Returns
-------
1/I_L : float
Privacy level
"""
P0, P1 = sanity_check_probabilities(P0, P1)
xi0, xi1 = np.array(xi0), np.array(xi1)
na, ns = P0.shape[0], P0.shape[1]
privacy = 0
for s in range(ns):
z = sp.special.rel_entr(np.sum(xi1[s, :]), np.sum(xi0[s, :]))
if z == np.infty:
print(
'An infinity was computed in a KL-Divergence. Check the first term: {}'
.format(np.sum(mu1[s, :])))
z = 0
privacy -= z
for y in range(ns):
z = sp.special.rel_entr(xi1[s, :] @ P1[:, s, y],
xi0[s, :] @ P0[:, s, y])
if z == np.infty:
print(
'An infinity was computed in a KL-Divergence. Check the first term: {}'
.format(xi1[s, :] @ P1[:, s, y]))
z = 0
privacy += z
return 1 / privacy if not np.isclose(privacy, 0.) else np.infty
def limited_information_privacy_lb(P0: np.ndarray,
P1: np.ndarray,
initial_points: int = 1,
max_iterations: int = 30,
solver=cp.ECOS,
debug=False):
""" Computes the policies that achieves the best level of privacy in the
limited information setting
Parameters
----------
P0, P1 : np.ndarray
Numpy matrices containing the transition probabilities for models M0 and M1
Each matrix should have dimensions |actions|x|states|x|states|
initial_points : int, optional
Number of initial random points to use to solve the concave problem.
Default value is 1.
max_iterations : int, optional
Maximum number of iterations. Should be larger than initial_points.
Default value is 30.
solver : cvxpy.Solver, optional
Solver used to solve the problem. Default solver is ECOS
debug : bool, optional
If true, prints the solver output.
Returns
-------
I_L : float
Inverse of the privacy level
xi1, xi0 : np.ndarray
Stationary distributions over states and actions achieving the best
level of privacy
"""
P0, P1 = sanity_check_probabilities(P0, P1)
initial_points = int(initial_points) if initial_points >= 1 else 1
max_iterations = initial_points if initial_points > max_iterations else int(
max_iterations)
na, ns = P0.shape[0], P0.shape[1]
best_res, best_xi1, best_xi0 = np.inf, None, None
# Compute KL divergences
I = compute_KL_divergence_models(P0, P1)
# Loop through initial points and return best result
i = 0
n = 0
while i == 0 or (i < initial_points and n < max_iterations):
n += 1
gamma = cp.Variable(1)
xi0 = cp.Variable((ns, na), nonneg=True)
xi1 = cp.Variable((ns, na), nonneg=True)
kl_div_statinary_dis = 0
for s in range(ns):
kl_div_statinary_dis += cp.entr(cp.sum(xi1[s, :]))
# stationarity constraints
stationarity_constraint = 0
for a in range(na):
stationarity_constraint += xi1[:, a].T @ (P1[a, :, :] - np.eye(ns))
constraints = [stationarity_constraint == 0, cp.sum(xi1) == 1]
# Privacy constraints
privacy_constraint = 0
for s in range(ns):
constraints += [cp.sum(xi0[s, :]) == 1]
for y in range(ns):
privacy_constraint += cp.kl_div(
xi1[s, :] @ P1[:, s, y], xi0[s, :] @ P0[:, s, y]) + (
xi1[s, :] @ P1[:, s, y]) - (xi0[s, :] @ P0[:, s, y])
constraints += [privacy_constraint <= gamma]
objective = gamma + kl_div_statinary_dis
# Solve problem
problem = cp.Problem(cp.Minimize(objective), constraints)
if not dccp.is_dccp(problem):
raise Exception('Problem is not Concave with convex constraints!')
try:
result = problem.solve(
method='dccp', ccp_times=1, verbose=debug, solver=solver)
except Exception as err:
continue
# Check if results are better than previous ones
if result[0] is not None:
i += 1
if result[0] < best_res:
best_res, best_xi1, best_xi0 = result[0], xi1.value, xi0.value
# Make sure to normalize the results
best_xi0 += eps
best_xi1 += eps
best_xi0 /= np.sum(best_xi0) if not np.isclose(np.sum(best_xi0), 0) else 1.
best_xi1 /= np.sum(best_xi1) if not np.isclose(np.sum(best_xi1), 0) else 1.
return best_res, best_xi1, best_xi0
def limited_information_privacy_utility(rho: float,
lmbd: float,
P0: np.ndarray,
P1: np.ndarray,
R0: np.ndarray,
R1: np.ndarray,
initial_points: int = 1,
max_iterations: int = 30,
solver=cp.ECOS,
debug: bool = False,
pi0: np.ndarray = None):
""" Optimize the privacy-utility value function over the two policies
in the limited information setting
Parameters
----------
rho : float
Weight given to policy pi_1 (1-rho for policy pi_0)
lmbd : float
Weight given to the privacy term
P0, P1 : np.ndarray
Numpy matrices containing the transition probabilities for model M0 and M1
Each matrix should have dimensions |actions|x|states|x|states|
R0, R1 : np.ndarray
Numpy matrices containing the rewards for model M0 and M1
Each matrix should have dimensions |states|x|actions|
initial_points : int, optional
Number of initial random points to use to solve the concave problem.
Default value is 1.
max_iterations : int, optional
Maximum number of iterations. Should be larger than initial_points.
Default value is 30.
solver : cvxpy.Solver, optional
Solver used to solve the problem. Default solver is ECOS
debug : bool, optional
If true, prints the solver output.
pi0 : np.ndarray, optional
If a policy pi0 is provided, then we optimize over pi1
the problem max_{pi1} V(pi1) - lambda I_F(pi0,pi1).
In this case rho is set to 1 for simplicity.
Returns
-------
I_L : float
Inverse of the privacy level
xi1, xi0 : np.ndarray
Stationary distributions over states and actions achieving the best
level of utility-privacy
"""
# Sanity checks
P0, P1 = sanity_check_probabilities(P0, P1)
R0, R1 = sanity_check_rewards(R0, R1)
initial_points = int(initial_points) if initial_points >= 1 else 1
max_iterations = initial_points if initial_points > max_iterations else int(
max_iterations)
if rho < 0 or rho > 1:
raise ValueError('Rho should be in [0,1]')
if lmbd < 0:
raise ValueError('Lambda should be non-negative')
na = P0.shape[0]
ns = P1.shape[1]
if pi0 is not None:
_xi0, _ = compute_stationary_distribution(P0, pi0)
rho = 1
best_res, best_xi1, best_xi0 = np.inf, None, None
# Loop through initial points and return best result
i = 0
n = 0
while i == 0 or (i < initial_points and n < max_iterations):
n += 1
# Construct the problem to find minimum privacy
gamma = cp.Variable(1, nonneg=True)
xi0 = cp.Variable((ns, na), nonneg=True) if pi0 is None else _xi0
xi1 = cp.Variable((ns, na), nonneg=True)
kl_div_stationary_dis = 0
for s in range(ns):
kl_div_stationary_dis += cp.kl_div(
cp.sum(xi1[s, :]), cp.sum(xi0[s, :])) + cp.sum(
xi1[s, :]) - cp.sum(xi0[s, :])
objective = gamma - lmbd * kl_div_stationary_dis
# stationarity constraints
stationarity_constraint0 = 0
stationarity_constraint1 = 0
for a in range(na):
stationarity_constraint0 += xi0[:, a].T @ (
P0[a, :, :] - np.eye(ns))
stationarity_constraint1 += xi1[:, a].T @ (
P1[a, :, :] - np.eye(ns))
constraints = [stationarity_constraint1 == 0, cp.sum(xi1) == 1]
if pi0 is None:
constraints += [cp.sum(xi0) == 1, stationarity_constraint0 == 0]
# Privacy-utility constraints
privacy_utility_constraint = 0
for s in range(ns):
for y in range(ns):
privacy_utility_constraint += lmbd * (
cp.kl_div(xi1[s, :] @ P1[:, s, y], xi0[s, :] @ P0[:, s, y])
+ (xi1[s, :] @ P1[:, s, y]) - (xi0[s, :] @ P0[:, s, y]))
for a in range(na):
privacy_utility_constraint -= (
rho * xi1[s, a] * R1[s, a] +
(1 - rho) * xi0[s, a] * R0[s, a])
constraints += [privacy_utility_constraint <= gamma]
# Solve problem
problem = cp.Problem(cp.Minimize(objective), constraints)
if not dccp.is_dccp(problem):
raise Exception('Problem is not Concave with convex constraints!')
try:
result = problem.solve(
method='dccp', ccp_times=1, verbose=debug, solver=solver)
except Exception as err:
continue
# Check if results are better than previous ones
if result[0] is not None:
i += 1
if result[0] < best_res:
best_res, best_xi1, best_xi0 = result[0], xi1.value, \
xi0.value if pi0 is None else xi0
# Make sure to normalize the results
best_xi0 += eps
best_xi1 += eps
best_xi0 /= np.sum(best_xi0) if not np.isclose(np.sum(best_xi0), 0) else 1.
best_xi1 /= np.sum(best_xi1) if not np.isclose(np.sum(best_xi1), 0) else 1.
return best_res, best_xi1, best_xi0
def limited_information_privacy_approximate_upper_lb(P0: np.ndarray,
P1: np.ndarray):
""" Computes a pair of policies that upper bounds the privacy lower bound
Parameters
----------
P0, P1 : np.ndarray
Numpy matrices containing the transition probabilities for models M0 and M1
Each matrix should have dimensions |actions|x|states|x|states|
Returns
-------
L : float
Upper bound of I_L
pi0, pi1 : np.ndarray
The computed policies
"""
P0, P1 = sanity_check_probabilities(P0, P1)
na = P0.shape[0]
ns = P1.shape[1]
gamma = cp.Variable(1, nonneg=True)
pi0 = cp.Variable((ns, na), nonneg=True)
pi1 = cp.Variable((ns, na), nonneg=True)
constraint = []
constraint_pi0 = [cp.sum(pi0[s, :]) == 1 for s in range(ns)]
constraint_pi1 = [cp.sum(pi1[s, :]) == 1 for s in range(ns)]
for s in range(ns):
Ds = 0.
for y in range(ns):
P1_pi1 = P1[:, s, y] @ pi1[s, :]
P0_pi0 = P0[:, s, y] @ pi0[s, :]
Ds += cp.kl_div(P1_pi1, P0_pi0) + P1_pi1 - P0_pi0
constraint += [Ds <= gamma]
constraints = constraint + constraint_pi0 + constraint_pi1
problem = cp.Problem(cp.Minimize(gamma), constraints)
result = problem.solve()
return result, pi0.value, pi1.value
def limited_information_lower_bound_IL(P0: np.ndarray, P1: np.ndarray,
pi0: np.ndarray, pi1: np.ndarray):
""" Computes E_x[sup_y d(P1^{pi1}(y'|x), P0^{pi0}(y'|x))], which
lower bounds I_L
Parameters
----------
P0, P1 : np.ndarray
Numpy matrices containing the transition probabilities for models M0 and M1
Each matrix should have dimensions |actions|x|states|x|states|
pi0, pi1 : np.ndarray
Numpy matrix of dimensions |states|x|actions| containing the
policies probabilities
Returns
-------
L : float
Lower bound of I_L
"""
P0, P1 = sanity_check_probabilities(P0, P1)
na = P0.shape[0]
ns = P1.shape[1]
P1_p1 = build_markov_transition_density(P1, pi1)
P0_p0 = build_markov_transition_density(P0, pi0)
_, mu1 = compute_stationary_distribution(P1, pi1)
d = sp.special.kl_div(P1_p1, P0_p0) + sp.special.kl_div(
1 - P1_p1, 1 - P0_p0)
return np.dot(mu1, np.max(d, axis=0))
| true
|
32bce72358b0def0665d5eb7df59dd1b0ccedf54
|
Python
|
mansi-958/python-twoc
|
/Task1/Common divisor.py
|
UTF-8
| 162
| 3.8125
| 4
|
[] |
no_license
|
a=int(input("Enter the number: "))
b=int(input("Enter the other number: "))
if a<b:
num=a
else:
num=b
for i in range(1,num+1):
if a%i==b%i==0:
print(i)
| true
|
9e85b78630ba8f507ada6cb400b403a1a5c95897
|
Python
|
prkapadnis/Python
|
/Programs/sixth.py
|
UTF-8
| 230
| 3.796875
| 4
|
[] |
no_license
|
"""
Finding the third largest element in the list
"""
def finding_largest_third(myList):
myList = list(set(myList))
myList.sort()
return myList[-3]
myList = [2,2,3,1]
print(finding_largest_third(myList))
| true
|
a0149f5b670f740da178fbe440aeeeb029526ae4
|
Python
|
erezrubinstein/aa
|
/tests/integration_tests/core_tests/service_entity_logic_tests/implementation/white_space_helper_test_collection.py
|
UTF-8
| 2,880
| 2.546875
| 3
|
[] |
no_license
|
from core.common.business_logic.service_entity_logic.white_space_grid_helper import select_grid_cell_by_lat_long
from core.common.utilities.helpers import ensure_id
from tests.integration_tests.framework.svc_test_collection import ServiceTestCollection
from tests.integration_tests.utilities.data_access_misc_queries import insert_test_white_space_grid, insert_test_white_space_grid_cell
__author__ = 'erezrubinstein'
class WhiteSpaceHelperTestCollection(ServiceTestCollection):
def initialize(self):
self.user_id = 'test@nexusri.com'
self.source = "gp_14_test_collection.py"
self.context = {"user_id": self.user_id, "source": self.source}
def setUp(self):
# delete when starting
self.mds_access.call_delete_reset_database()
# create a base grid
self.grid_name = "10 Mile Squares"
self.grid_threshold = "GridDistanceMiles10"
self.grid_id = insert_test_white_space_grid(self.grid_threshold, self.grid_name)
def tearDown(self):
pass
# -------------------------------------- Begin Testing!! --------------------------------------
def test_select_grid_cells_by_lat_long(self):
# create three 10 mile grid cells. cell 1 and 2 intersect. cell 3 is very different
grid_cell_1_id = ensure_id(insert_test_white_space_grid_cell(str(self.grid_id), [[[1, 1], [0, 1], [0, 0], [1, 0], [1, 1]]], self.grid_threshold, self.grid_name))
grid_cell_2_id = ensure_id(insert_test_white_space_grid_cell(str(self.grid_id), [[[2, 2], [1, 2], [1, 1], [2, 1], [2, 2]]], self.grid_threshold, self.grid_name))
grid_cell_3_id = ensure_id(insert_test_white_space_grid_cell(str(self.grid_id), [[[5, 5], [4, 5], [4, 4], [5, 4], [5, 5]]], self.grid_threshold, self.grid_name))
# find the match for the first threshold
grid_match = select_grid_cell_by_lat_long(.3, .3, self.grid_threshold)
# make sure only the first 2 grids match
self.test_case.assertEqual(grid_match, { "_id": grid_cell_1_id, "data": { "grid_id": str(self.grid_id), "threshold": self.grid_threshold, "grid_name": self.grid_name }})
# create one more grid and one more cell that intersects the point, but in a separate grid
second_grid_id = insert_test_white_space_grid("GridDistanceMiles50", "50 Mile Squares")
grid_cell_4_id = ensure_id(insert_test_white_space_grid_cell(str(second_grid_id), [[[10, 10], [0, 10], [0, 0], [10, 0], [10, 10]]], "GridDistanceMiles50", "50 Mile Squares"))
# find the match for the first threshold
grid_match = select_grid_cell_by_lat_long(.3, .3, "GridDistanceMiles50")
# make sure only the first 2 grids match
self.test_case.assertEqual(grid_match, { "_id": grid_cell_4_id, "data": { "grid_id": str(second_grid_id), "threshold": "GridDistanceMiles50", "grid_name": "50 Mile Squares" }})
| true
|
15521def17128cd2244c648925d0bef69780c683
|
Python
|
WOC-BUG/machine-learning
|
/代码实例/KNN/sklearn实现KNN交叉验证.py
|
UTF-8
| 911
| 3.28125
| 3
|
[] |
no_license
|
# sklearn实现KNN交叉验证
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV # 通过网格方式来搜索参数
# 导入iris数据集
iris=datasets.load_iris()
x=iris.data
y=iris.target
# 设定想要搜索的K值,'n_neighbors'是sklearn中KNN的参数
parameters={'n_neighbors':[1,3,5,7,9,11,13,15]}
knn=KNeighborsClassifier() # 注意,这里不用指定参数
# 通过GridSearchCV来搜索最好的K值
# 该模块内部是对每一个K值进行了评估
clf=GridSearchCV(knn,parameters,cv=5)
clf.fit(x,y)
# 输出最好的参数以及准确率
print("best score is: %.2f"%clf.best_score_,"best k: ",clf.best_params_)
# 绝对不能把测试数据用在交叉验证的过程中
# 测试数据的作用永远是做最后一步的测试
# 看是否模型满足上线的标准
# 但绝对不能参与到模型的训练。
| true
|
9de03140a29d2813149639d3c1c554067e96ca70
|
Python
|
sug5806/TIL
|
/Python/algorithm/find_max_value/find_max_value_recur.py
|
UTF-8
| 297
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
import random as rd
def re(li, leng):
if leng == 1:
return li[leng-1]
max_val = re(li, leng-1)
if max_val >= li[leng-1]:
return max_val
else:
return li[leng]
li = []
for _ in range(10):
li.append(rd.randint(0,100))
print(li)
print(re(li, len(li)))
| true
|
f4d2c8c4efedd15502bef6275dd8c870f88dd00b
|
Python
|
jdswinbank/taenaris
|
/pysrc/example2/exercise_iter_svc.py
|
UTF-8
| 1,316
| 2.546875
| 3
|
[
"CC0-1.0"
] |
permissive
|
#! /usr/bin/ python
# -*- coding=utf-8 -*-
import pyvo
import warnings
import sys
def main():
# Keep the output of this example "sane".
if not sys.warnoptions:
warnings.simplefilter("ignore")
# Query the registry to obtain obscore services which offer data in
# the radio realm
services=pyvo.registry.search(
servicetype="tap",
datamodel="obscore",
waveband="radio")
# Iterate over the list of obtained services to figure out if they
# obtain data of a specific position. Note the s_region column we
# use for this query. It contains a REGIONAL geometry (in this case
# an array defining a POLYGON) which we can compare with our given
# position.
for svc in services:
query="""
SELECT TOP 15 * FROM ivoa.obscore
WHERE 1=CONTAINS (
POINT('', 240.0, 47.0),
s_region )
"""
# Make the service object
obscore_svc=pyvo.dal.TAPService(svc.access_url)
# Run the query in synchronous mode
result=obscore_svc.run_sync(query)
# Send the resulting table to topcat for further investigation.
# Note our first usage of SAMP.
result.broadcast_samp("topcat")
if __name__=="__main__":
main()
| true
|
38a319b63f8d30fa8ae57b94d1f3a64fd4c16d65
|
Python
|
BartoszPiotrowski/deep-equivalence
|
/utils/predict.py
|
UTF-8
| 3,348
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import tensorflow as tf
import sys
from dataset import Dataset
class NetworkPredict:
def __init__(self, threads=1, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(
graph=graph,
config=tf.ConfigProto(
inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def load(self, path):
# Load the metagraph
with self.session.graph.as_default():
self.saver = tf.train.import_meta_graph(path + '.meta')
# Attach the end points
self.tokens_ids_1 = tf.get_collection(
'end_points/tokens_ids_1')[0]
self.formulae_lens_1 = tf.get_collection(
'end_points/formulae_lens_1')[0]
self.tokens_ids_2 = tf.get_collection(
'end_points/tokens_ids_2')[0]
self.formulae_lens_2 = tf.get_collection(
'end_points/formulae_lens_2')[0]
self.predictions = tf.get_collection(
'end_points/predictions')[0]
self.logits = tf.get_collection(
'end_points/logits')[0]
# Load the graph weights
self.saver.restore(self.session, path)
def predict(self, dataset, discrete=True):
tokens_ids_1, formulae_lens_1, \
tokens_ids_2, formulae_lens_2, \
= dataset.test()
if discrete:
return self.session.run(self.predictions,
{self.formulae_lens_1: formulae_lens_1,
self.tokens_ids_1: tokens_ids_1,
self.formulae_lens_2: formulae_lens_2,
self.tokens_ids_2: tokens_ids_2})
else:
return self.session.run(self.logits,
{self.formulae_lens_1: formulae_lens_1,
self.tokens_ids_1: tokens_ids_1,
self.formulae_lens_2: formulae_lens_2,
self.tokens_ids_2: tokens_ids_2})[:,1]
if __name__ == "__main__":
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
help="Path to a trained model file.")
parser.add_argument(
"--pairs",
type=str,
help="File with pairs of formulae for which we want to predict its \
equivalence.")
parser.add_argument(
"--vocab",
default='data/vocab',
type=str,
help="Path to a vocabulary file.")
parser.add_argument(
'--discrete',
action='store_true',
help="By default the model returns probabilities; setting this flag \
causes returning 0s and 1s.")
args = parser.parse_args()
all_files = os.listdir(args.model)
[meta] = [f for f in all_files if '.meta' in f]
prefix = meta.split('.')[0]
model_with_prefix = args.model + '/' + prefix
network = NetworkPredict()
network.load(model_with_prefix)
test = Dataset(args.pairs, args.vocab, test=True)
p = network.predict(test, args.discrete)
for i in p:
if args.discrete:
print(i)
else:
print('%1.7f' % i)
| true
|
efbfc83fe1b3b0c8985e73d4336ce14c0fb67725
|
Python
|
DavidToca/programming-challanges
|
/leetcode/1539. Kth Missing Positive Number/solve2.py
|
UTF-8
| 322
| 2.828125
| 3
|
[] |
no_license
|
class Solution:
def findKthPositive(self, arr: List[int], k: int) -> int:
response = 0
j = 0
i=1
while k!=0:
if(j >= len(arr) or i != arr[j]):
response = i
k-=1
else:
j+=1
i+=1
return response
| true
|
171f90b57be50bd0b2c2b420daaa553185acf0f9
|
Python
|
TangYaoHan/openCV
|
/04 SVM身高体重分类.py
|
UTF-8
| 1,500
| 3.9375
| 4
|
[] |
no_license
|
"""
身高体重 预测 男女
SVM:
1. SVM_create()
2. svm.train()
3. svm.predict()
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
def main():
# 1. 准备数据
rand_girl = np.array([[155, 48], [159, 50], [164, 53], [168, 56], [172, 60]])
rand_boy = np.array([[152, 53], [156, 55], [160, 56], [172, 64], [176, 65]])
# 2. label
label = np.array([[0], [0], [0], [0], [0], [1], [1], [1], [1], [1]])
# 3 data
data = np.vstack((rand_girl, rand_boy))
data = np.array(data, dtype="float32")
# SVM 所有数据都需要有标签(监督学习)
# [155, 48] -- 0 女生 [152, 53] -- 1 男生
# 4. 训练
svm = cv2.ml.SVM_create() # ml: machine learning svm_create():创建支持向量机
# 属性设置
svm.setType(cv2.ml.SVM_C_SVC) # SVM类型
svm.setKernel(cv2.ml.SVM_LINEAR) # 线性分类器
svm.setC(0.01) # 核相关参数
result = svm.train(data, cv2.ml.ROW_SAMPLE, label)
print(result) # bool True:训练成功 False:训练失败
# 预测(验证预测效果)
pt_data = np.vstack(([167, 55], [162, 57])) # 矩阵1:女生(0) 矩阵2:男生(2)
pt_data = np.array(pt_data, dtype="float32")
par1, par2 = svm.predict(pt_data)
print(par1, "\n", par2)
# par1: 0(表示什么意思?)
# par2: 预测结果
if __name__ == "__main__":
main()
| true
|
6f86e466dbd624773ada4819cb376ea28b81688e
|
Python
|
SavonEvgeniy/Skill_Factory_19.2.3
|
/first_test.py
|
UTF-8
| 1,085
| 3.203125
| 3
|
[] |
no_license
|
from app.calculator import Calculator
class TestCalc:
def setup(self):
self.calc = Calculator
def test_multiply_calculate_correctly(self): #тестируем умножение
assert self.calc.multiply(self, 2, 2) == 4
def test_multiply_calculate_failed(self):
assert self.calc.multiply(self, 2, 2) == 5
def test_division_calculate_correctly(self): #тестируем деление
assert self.calc.division(self, 4, 2) == 2
def test_division_calculate_failed(self):
assert self.calc.division(self, 6, 2) == 2
def test_subtraction_calculate_correctly(self): #тестируем вычитание
assert self.calc.subtraction(self, 4, 2) == 2
def test_subtraction_calculate_failed(self):
assert self.calc.subtraction(self, 6, 2) == 2
def test_adding_calculate_correctly(self): #тестируем сложение
assert self.calc.adding(self, 4, 2) == 6
def test_adding_calculate_failed(self):
assert self.calc.adding(self, 6, 2) == 6
| true
|
99a980bcf823f16c8f94ef85d0b532b8a08c466e
|
Python
|
MostafaNabieh/Computer-Vision-Object-Detection-with-OpenCV-and-Python
|
/face detection.py
|
UTF-8
| 598
| 2.6875
| 3
|
[] |
no_license
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
face_cascade=cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
image = cv2.imread("google.jpg")
fix_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(fix_image)
faces= face_cascade.detectMultiScale(fix_image,1.3,2)
def detect_face(fix_image):
face_rects=face_cascade.detectMultiScale(fix_image)
for (x,y,w,h) in face_rects:
cv2.rectangle(fix_image, (x,y), (x+w,y+h), (255,0,0),10)
return fix_image
result = detect_face(fix_image)
plt.imshow(result)
| true
|
1ab49f6c1e7c1ad57bf66fdc3963287a4cd167d8
|
Python
|
takavarasha/cerf-projects-scraper
|
/utils.py
|
UTF-8
| 2,467
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
""" Utility functions """
import hashlib
import requests
import datetime
import sys
import sqlite3 as lite
def date_from_iso_date(s):
"""Construct a date from an iso date string.
Supports iso date of the form YYYY-MM-DD.
Ignores any chars after the date part.
"""
return datetime.date(year=int(s[0:4]), month=int(s[5:7]), day=int(s[8:10]))
def generate_hash(filename):
"""Generate hash of a file.
"""
h = hashlib.sha1()
with open(filename, 'rb') as f:
buf = f.read()
h.update(buf)
return h.hexdigest()
def download_file(url, local_filename):
"""Downloads a file.
"""
if not local_filename:
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename
def progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
try:
format_str = "{0:." + str(decimals) + "f}"
percents = format_str.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
except:
pass
def db_create_connection(database):
"""Returns a connection to the application sqlite database
Returns:
:rtype : sqlite3.Connection
:return : A connection to the application's sqlite database
"""
db = lite.connect(database=database)
db.text_factory = str
db.isolation_level = None
return db
| true
|
d9ab6be2e567054833fbcd1904cbd899f73a0553
|
Python
|
krisbb/NetworkProgramming
|
/lab2/stmp/smtpClient.py
|
UTF-8
| 3,658
| 2.859375
| 3
|
[] |
no_license
|
import socket
import json
import base64
LENGTHOFMESSAGE = 512
class ClientSocket:
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.address = host
self.port = port
def close(self):
self.sock.close()
def ehlo(self, host):
template = 'EHLO {}'.format(host)
print('Sending -> {}'.format(template))
self.send(template)
def auth(self):
template = 'AUTH LOGIN'
print('Sending -> {}'.format(template))
self.send(template)
def mailFrom(self, sender):
template = 'MAIL FROM:<{}>'.format(sender)
print('Sending -> {}'.format(template))
self.send(template)
def rcptTo(self, recipient):
template = 'RCPT TO:<{}>'.format(recipient)
print('Sending -> {}'.format(template))
self.send(template)
def data(self):
template = 'DATA'
print('Sending -> {}'.format(template))
self.send(template)
def sendMail(self,mail):
encodedMsg = str.encode(mail)
howMuchWasSent = self.sock.send(encodedMsg + b'\r\n' + str.encode('.') + b'\r\n')
# print(howMuchWasSent)
if howMuchWasSent == 0:
raise RuntimeError("socket connection broken")
def quit(self):
template = 'QUIT'
print('Sending -> {}'.format(template))
self.send(template)
def connect(self):
self.sock.connect((host, port))
self.sock.setblocking(True)
def receive(self):
data = self.sock.recv(LENGTHOFMESSAGE)
return data.decode()
def multiReceive(self):
wholeString = ''
while True:
data = self.sock.recv(LENGTHOFMESSAGE)
wholeString += data.decode()
if '\r\n.\r\n' in data.decode():
wholeString = wholeString[:len(wholeString)-3]
break
return wholeString
def send(self, msg):
encodedMsg = str.encode(msg)
howMuchWasSent = self.sock.send(encodedMsg + b'\r\n')
#print(howMuchWasSent)
if howMuchWasSent == 0:
raise RuntimeError("socket connection broken")
FinalMail = 'From: {}\nTo: {}\nSubject: {}\n{}\n'
if __name__ == '__main__':
json_config = {}
with open('config.json') as file:
json_config = json.load(file)
login = json_config['credentials']['login']
passwd = json_config['credentials']['password']
login64 = base64.encodebytes(str.encode(login))
passwd64 = base64.encodebytes(str.encode(passwd))
host = json_config['address']['host']
port = json_config['address']['port']
clientSock = ClientSocket(host, port)
list_output = ''
new_list_output = ''
try:
clientSock.connect()
print(clientSock.receive())
clientSock.ehlo(host)
print(clientSock.receive())
clientSock.auth()
print(clientSock.receive())
clientSock.send(login64.decode()[:-1])
print(clientSock.receive())
clientSock.send(passwd64.decode()[:-1])
print(clientSock.receive())
clientSock.mailFrom(login)
print(clientSock.receive())
clientSock.rcptTo(login)
print(clientSock.receive())
clientSock.data()
print(clientSock.receive())
clientSock.sendMail(FinalMail.format(login, login, 'test', 'test'))
print(clientSock.receive())
clientSock.quit()
print(clientSock.receive())
except Exception as e:
print(e.args)
finally:
clientSock.close()
| true
|
52db3a55e587d61c0ec6608142a46c23d236313a
|
Python
|
sudo-slatin01/Python-sudo
|
/Python3.py
|
UTF-8
| 1,073
| 4.34375
| 4
|
[] |
no_license
|
import random
"""
Необходимо определить индексы элементов списка, значение которых не меньше заданного минимума и не больше заданного максимума.
Пусть исследуемый массив (список в Python) заполняется случайными числами в диапазоне от 0 до 99 (включительно) и состоит из 100 элементов.
Далее минимум и максимум для поиска значений задается пользователем.
"""
list1 = []
for i in range(100):
list1.append(round(random.randint(0, 99)))
print("Введите минимум: ")
min = int(input())
print("Введите максимум: ")
max = int(input())
result = []
index = 0
for x in list1:
if min < x < max:
result.append(index)
index += 1
print(f'Исходный массив: {list1}')
print(f'Индексы элементов списка: {result}')
| true
|
6c4bc5f696ac00c6c37d018231f58505f6396e15
|
Python
|
hiracse091/BanglaWordCloud
|
/main.py
|
UTF-8
| 2,705
| 2.71875
| 3
|
[] |
no_license
|
import codecs
from os import path
from tokenizer import tokenize
from word_tokenize_bn import *
from stemmer import *
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from quadtree import *
from utils import *
lan_bn = [ '০১২৩৪৫৬৭৮৯', ',.;:!?-', 'অআইঈউঊএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহড়ঢ়য়ৎ়◌াি◌ী◌ু◌ূ◌ৃেৈোৌ◌্' ]
d = path.dirname(__file__)
def process_text(text):
wordsList = word_tokenize(text)
words = {}
for word in wordsList:
# remove stopwords
if word not in stopwords:
# print('adding word ' + word)
words[word] = wordsList[word]
# else:
# print('deleting ' + word)
#print('stem of '+word +' is ' +findStem(word))
# print(len(wordsList))
# print(len(words))
print(sorted(words.items(), key=lambda kv: kv[1], reverse=True))
finalListWord = dict()
for word in words:
stemWord = findStem(word)
if stemWord in finalListWord:
finalListWord[stemWord] = finalListWord[stemWord] + words[word]
else:
finalListWord[stemWord] = words[word]
sortedList = sorted(finalListWord.items(), key=lambda kv: kv[1], reverse=True)
#print(sortedList)
# for word in sortedList:
# print(word[1] ,word[0])
# return sortedList
# print(words)
# print(len(finalListWord))
#remove number
#words = [word for word in words if not word.isdigit()]
stopwords = open(d+ '/resources/stopwords-bn.txt', encoding='utf-8').read()
#print(stopwords)
text = open(d + '/resources/islamic3_bn.txt', encoding='utf-8').read()
tokens = tokenize(text, lan_bn)
rootFilePath = d+'/resources/RootFile.txt'
stemmedWords = loadStemmedWords(rootFilePath)
wordList = process_text(text)
draw_text(wordList)
# process words
# import numpy as np
# from PIL import Image
# from PIL import ImageFont
# from PIL import ImageDraw
# # Image size
# width = 600
# height = 300
# channels = 3
#
# # Create an empty image
# #img = np.zeros((height, width, channels), dtype=np.uint8)
# img = Image.new('RGB', (width, height), color = 'white')
# img.save('test.jpg')
# img = Image.open('test.jpg')
# draw = ImageDraw.Draw(img)
# font = ImageFont.truetype("C:\Windows\Fonts\Siyamrupali.ttf", 40, encoding="utf-8")
# text = u"ফসল"
# #.encode('UTF-8')
# w, h = font.getsize(text)
# draw.text(((width-w)/2,(height-h)/2), text , 'red',font=font)
# img.save('test.jpg')
#
#
#
#
#
#
| true
|
f4e89743fc8cb1dbf6e8155c04afdacd0fb54dfc
|
Python
|
cormie45/NHL_League_Simulator
|
/tests/goal_test.py
|
UTF-8
| 733
| 2.96875
| 3
|
[] |
no_license
|
import unittest
from models.goal import Goal
from models.match import Match
from models.player import Player
class TestGoal(unittest.TestCase):
def setUp(self):
self.match = Match('team_a', 1, 2, 3, 6, 'team_b', 2, 0, 1, 3, 'team_a')
self.player = Player('steven', 'cormack', 37, 'team_a', 'center', 12)
self.period = 2
self.goal = Goal(self.match, self.player, self.period)
def test_goal_has_player(self):
self.assertEqual('steven cormack', f"{self.goal.player.first_name} {self.goal.player.last_name}")
def test_goal_has_match(self):
self.assertEqual('team_a', self.goal.match.home_team)
def test_goal_has_period(self):
self.assertEqual('2', self.goal.period)
| true
|
f436a18a824d409ce788de99b5a9964bc62c848f
|
Python
|
tratatapewpew/Queue
|
/Tests/test_queue_constructor.py
|
UTF-8
| 414
| 2.84375
| 3
|
[] |
no_license
|
__author__ = 'Igor Barulin'
import unittest
from queue import Queue
class TestQueueConstructor(unittest.TestCase):
def testQueueConstructorZero(self):
with self.assertRaises(BaseException):
queue = Queue(0)
def testQueueConstructorStr(self):
with self.assertRaises(BaseException):
queue = Queue("1")
def testQueueConstructorFloat(self):
with self.assertRaises(BaseException):
queue = Queue(1.0)
| true
|
3940fb025457aad3b61ce00a9d9a49f846efc344
|
Python
|
CarlosValadez/AVANCE-PIA
|
/PrincipalPya.py
|
UTF-8
| 4,010
| 3.609375
| 4
|
[] |
no_license
|
PrinciplaPIA.py
import csv
import datetime
# Se usa para poder usar expresiones regulares.
import re
# Libreria necesaria para usar el sistema operativo.
import os
# Se importan las clases de clasePIA.
from clasePIA import Contacto
# Se importa una clase que permite extraer elementos de un objeto
from operator import attrgetter
# Función que sirve para mostrar los elementos de la lista de ejemplos.
def NumdeElementos():
txt = "Los elementos de la coleccion son {}"
print(txt.format(len(Contactos)))
def BscTelefono(telabuscar):
coincidencia=False
for contacto in Contactos:
if (contacto.TELEFONO==telabuscar):
coincidencia=True
break
return coincidencia
def BscContacto(telabuscar):
contador=-1
indice_retorno=-1
for contacto in Contactos:
contador+=1
if (contacto.TELEFONO==telabuscar):
indice_retorno=contador
break
return indice_retorno
Contactos = []
# Se declara una lista que va a almacenar objetos, en un inicio esta vacia.
NumdeElementos()
# Se agregaran objetos que estaran en esta lista.
Contactos.append(Contacto("01CV","Carlos Valadez","carlosvz@unal.edu.mx",8126432187,datetime.date(year=2000,month=4,day=10),1700))
Contactos.append(Contacto("02FE","Franco Escalon","fcoescalon@unal.edu.mx",8113459378,datetime.date(year=2001,month=7,day=12),1900))
NumdeElementos()
# Se define una función utilizando la expresión lambda, para facilitar el procedimiento.
LimPantalla = lambda: os.system('cls')
# Valida expresiones regulares.
# _txt es el texto que se va a validar.
# _regex es el patrón de expresión regular a validar.
def RegEx(_txt,_regex):
coincidencia=re.match(_regex, _txt)
return bool(coincidencia)
def principal():
while (True):
LimPantalla()
print("LISTA DE CONTACTOS")
print(" ")
print("[1] Agregar un contacto.")
print("[2] Buscar un contacto.")
print("[3] Eliminar un contacto.")
print("[4] Mostrar contactos.")
print("[5] Serializar datos.")
print("[0] Salir.")
opcion_elegida = input("¿Qué deseas hacer? > ")
if RegEx(opcion_elegida,"^[123450]{1}$"):
if opcion_elegida=="0":
print("GRACIAS POR UTILIZAR EL PROGRAMA")
break
if opcion_elegida=="1":
print("Llamar procedimiento para la acción")
if opcion_elegida=="2":
print("Seleccionaste la Opcion Buscar Contacto")
Telefono=int(input("Ingresa Telefono a Buscar: "))
indice_obtenido=BuscarContacto(Telefono)
if indice_obtenido==-1:
print("No se encontró el objeto")
else:
print(Contactos[indice_obtenido].TELEFONO)
print(Contactos[indice_obtenido].NOMBRE)
print(Contactos[indice_obtenido].CORREO)
if opcion_elegida=="3":
print("Llamar procedimiento para la acción")
if opcion_elegida=="4":
print("Mostrando Contactos")
# Modo en que se ordena.
Contactos.sort(key=attrgetter("TELEFONO"),reverse=False)
# Barrido en secuencia.
for contacto in Contactos:
print("------------------------------------------")
print(contacto.NICKNAME)
print(contacto.NOMBRE)
print(contacto.CORREO)
print(contacto.TELEFONO)
print(contacto.FECHANACIMIENTO)
print(contacto.GASTO)
if opcion_elegida=="5":
print("Llamar procedimiento para la acción")
input("Pulsa enter para contunuar...")
else:
print("Esa respuesta no es válida.")
input("Pulsa enter para contunuar...")
principal()
| true
|
addaba862c07702a6bf0993e2d3db1acb2f05d7e
|
Python
|
scikit-rf/scikit-rf
|
/skrf/media/freespace.py
|
UTF-8
| 9,445
| 2.90625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
freespace (:mod:`skrf.media.freespace`)
========================================
A plane-wave (TEM Mode) in Freespace.
Represents a plane-wave in a homogeneous freespace, defined by
the space's relative permittivity and relative permeability.
.. autosummary::
:toctree: generated/
Freespace
"""
from scipy.constants import epsilon_0, mu_0
import warnings
from .media import Media
from ..data import materials
from ..constants import NumberLike
from typing import Union, TYPE_CHECKING
from numpy import real, sqrt, ones
if TYPE_CHECKING:
from .. frequency import Frequency
class Freespace(Media):
r"""
A plane-wave (TEM Mode) in Freespace.
A Freespace media can be constructed in two ways:
* from complex, relative permativity and permeability OR
* from real relative permativity and permeability with loss tangents.
There is also a method to initialize from a
existing distributed circuit, appropriately named
:func:`Freespace.from_distributed_circuit`
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency` object
frequency band of this transmission line medium
z0_port : number, array-like, or None
`z0_port` is the port impedance for networks generated by the media.
If `z0_port` is not None, the networks generated by the media are
renormalized (or in other words embedded) from the characteristic
impedance z0 of the media to `z0_port`.
Else if `z0_port` is None, the networks port impedances will be the raw
characteristic impedance z0 of the media.
(Default is None)
z0_override : number, array-like, or None
`z0_override` override the characteristic impedance for the media.
If `z0_override` is not None, the networks generated by the media have
their characteristic impedance `z0` overrided by `z0_override`.
(Default is None)
z0 : number, array-like, or None
deprecated parameter, alias to `z0_override` if `z0_override` is None.
Emmit a deprecation warning.
ep_r : number, array-like
complex relative permittivity. negative imaginary is lossy.
mu_r : number, array-like
complex relative permeability. negative imaginary is lossy.
ep_loss_tan : None, number, array-like
electric loss tangent (of the permativity).
If not None, imag(ep_r) is ignored.
mu_loss_tan : None, number, array-like
magnetic loss tangent (of the permeability).
If not None, imag(mu_r) is ignored.
rho : number, array-like, string or None
resistivity (ohm-m) of the conductor walls. If array-like
must be same length as frequency. if str, it must be a key in
:data:`skrf.data.materials`.
Default is None (lossless).
\*args, \*\*kwargs : arguments and keyword arguments
Examples
--------
>>> from skrf.media.freespace import Freespace
>>> from skrf.frequency import Frequency
>>> f = Frequency(75,110,101,'ghz')
>>> Freespace(frequency=f, ep_r=11.9)
>>> Freespace(frequency=f, ep_r=11.9-1.1j)
>>> Freespace(frequency=f, ep_r=11.9, ep_loss_tan=.1)
>>> Freespace(frequency=f, ep_r=11.9-1.1j, mu_r = 1.1-.1j)
"""
def __init__(self, frequency: Union['Frequency', None] = None,
z0_port: Union[NumberLike, None] = None,
z0_override: Union[NumberLike, None] = None,
z0: Union[NumberLike, None] = None,
ep_r: NumberLike = 1+0j, mu_r: NumberLike = 1+0j,
ep_loss_tan: Union[NumberLike, None] = None,
mu_loss_tan: Union[NumberLike, None] = None,
rho: Union[NumberLike, str, None] = None,
*args, **kwargs):
Media.__init__(self, frequency = frequency,
z0_port = z0_port, z0_override = z0_override, z0 = z0)
self.ep_r = ep_r
self.mu_r = mu_r
self.rho = rho
self.ep_loss_tan = ep_loss_tan
self.mu_loss_tan = mu_loss_tan
def __str__(self) -> str:
f = self.frequency
output = 'Freespace Media. %i-%i %s. %i points'%\
(f.f_scaled[0], f.f_scaled[-1], f.unit, f.npoints)
return output
def __repr__(self) -> str:
return self.__str__()
@property
def ep(self) -> NumberLike:
r"""
Complex dielectric permittivity.
If :math:`\tan\delta_e` is not defined:
.. math::
\varepsilon = \varepsilon_0 \varepsilon_r
otherwise,
.. math::
\varepsilon = \varepsilon_0 \Re[\varepsilon_r] (1 - j\tan\delta_e)
where :math:`\tan\delta_e` is the electric loss tangent.
Returns
-------
ep : number or array-like
Complex dielectric permittivity in F/m.
"""
if self.ep_loss_tan is not None:
ep_r = real(self.ep_r)*(1 - 1j*self.ep_loss_tan)
else:
ep_r = self.ep_r
return ep_r*epsilon_0
@property
def mu(self) -> NumberLike:
r"""
Complex dielectric permeability.
If :math:`\tan\delta_m` is not defined:
.. math::
\mu = \mu_0 \mu_r
otherwise,
.. math::
\mu = \mu_0 \Re[\mu_r] (1 - j\tan\delta_m)
where :math:`\tan\delta_m` is the magnetic loss tangent.
Returns
-------
mu : number
Complex permeability in H/m.
"""
if self.mu_loss_tan is not None:
mu_r = real(self.mu_r)*(1 -1j*self.mu_loss_tan)
else:
mu_r = self.mu_r
return mu_r*mu_0
@classmethod
def from_distributed_circuit(cls, dc, *args, **kwargs) -> Media:
r"""
Initialize a freespace from :class:`~skrf.media.distributedCircuit.DistributedCircuit`.
Parameters
----------
dc: :class:`~skrf.media.distributedCircuit.DistributedCircuit`
a DistributedCircuit object
\*args, \*\*kwargs :
passed to `Freespace.__init__
Notes
-----
Here are the details::
w = dc.frequency.w
z= dc.Z/(w*mu_0)
y= dc.Y/(w*epsilon_0)
ep_r = -1j*y
mu_r = -1j*z
See Also
--------
skrf.media.distributedCircuit.DistributedCircuit
"""
w = dc.frequency.w
z= dc.Z/(w*mu_0)
y= dc.Y/(w*epsilon_0)
kw={}
kw['ep_r'] = -1j*y
kw['mu_r'] = -1j*z
kwargs.update(kw)
return cls(frequency=dc.frequency, *args, **kwargs)
@property
def rho(self) -> NumberLike:
"""
Conductivity in ohm*m.
Parameters
----------
val : float, array-like or str
the resistivity in ohm*m. If array-like must be same length
as self.frequency. if str, it must be a key in
:data:`~skrf.data.materials`.
Examples
--------
>>> wg.rho = 2.8e-8
>>> wg.rho = 2.8e-8 * ones(len(wg.frequency))
>>> wg.rho = 'al'
>>> wg.rho = 'aluminum'
"""
return self._rho
@rho.setter
def rho(self, val):
if isinstance(val, str):
self._rho = materials[val.lower()]['resistivity(ohm*m)']
else:
self._rho=val
@property
def ep_with_rho(self) -> NumberLike:
r"""
Complex permittivity with resistivity absorbed into its imaginary component.
.. math::
\varepsilon - j \frac{1}{\rho\omega}
See Also
--------
rho
ep
"""
if self.rho is not None:
return self.ep -1j/(self.rho*self.frequency.w)
else:
return self.ep
@property
def gamma(self) -> NumberLike:
r"""
Propagation Constant, :math:`\gamma`.
Defined as,
.. math::
\gamma = \sqrt{ Z^{'} Y^{'}}
Returns
-------
gamma : npy.ndarray
Propagation Constant,
Note
----
The components of propagation constant are interpreted as follows:
* positive real(gamma) = attenuation
* positive imag(gamma) = forward propagation
"""
ep = self.ep_with_rho
return 1j*self.frequency.w * sqrt(ep*self.mu)
@property
def z0_characteristic(self) -> NumberLike:
r"""
Characteristic Impedance, :math:`z_0`.
.. math::
Z_0 = \sqrt{ \frac{Z^{'}}{Y^{'}}}
Returns
-------
z0_characteristic : npy.ndarray
Characteristic Impedance in units of ohms
"""
ep = self.ep_with_rho
return sqrt(self.mu/ep)*ones(len(self))
def plot_ep(self):
"""
Plot the real and imaginary part of the complex permittivity.
"""
self.plot(self.ep_r.real, label=r'ep_r real')
self.plot(self.ep_r.imag, label=r'ep_r imag')
def plot_mu(self):
"""
Plot the real and imaginary part of the complex permeability.
"""
self.plot(self.mu_r.real, label=r'mu_r real')
self.plot(self.mu_r.imag, label=r'mu_r imag')
def plot_ep_mu(self):
"""
Plot the real and imaginary part of the complex permittivity with resistivity.
"""
self.plot_ep()
self.plot_mu()
| true
|
f78441ad843f1e3b196dd32381d21ca1bb9a5c69
|
Python
|
natp75/homework_5
|
/homework_5/homework_5_6.py
|
UTF-8
| 1,188
| 3.4375
| 3
|
[] |
no_license
|
#Необходимо создать (не программно) текстовый файл, где каждая строка описывает учебный предмет
# и наличие лекционных, практических и лабораторных занятий по этому предмету и их количество.
# Важно, чтобы для каждого предмета не обязательно были все типы занятий. Сформировать словарь,
# содержащий название предмета и общее количество занятий по нему. Вывести словарь на экран.
result = {}
with open('test_5.txt') as file:
file_lines = file.readlines()
for line in file_lines:
data = line.split()
hours = 0
for elem in data[1:]:
if elem != '-':
num = '0'
for i in elem:
if i.isdigit():
num += i
else:
break
hours += int(num)
result.update({data[0].strip(':'): hours})
print(result)
| true
|
8dbeda7f6e0d192eccf6b8186c89a7aa9a3ec088
|
Python
|
harleenkbhatia/Python
|
/pandas_task.py
|
UTF-8
| 1,693
| 3.34375
| 3
|
[] |
no_license
|
import pandas as pd
#series dataframe
data=pd.read_csv('C:/Users/Jagdeep/Downloads/datasets_527325_1205308_Time.csv')
print(data.tail())
data['negative']=data['nagative'].apply(lambda x:0 if x=='' else x)
data['negative']=data['confirmed'].apply(lambda x:1 if x=='' else x)
'''
series=pd.Series([1,24,32,2,13,1,33],index=[10,20,30,40,50,60,70],name="Values")
print(series)
series=series.apply (lambda num: num**2 )
print(series)
#in form of dictionaries
df=pd.DataFrame({"a":[1,2,3],'b':[4,5,6],'c':[7,8,9]},index=['first','second','third'])
print(df)
print(df['b'][2])
print(df.iloc[0,:])
print(df.loc['third',:])
print(df.iloc[1,:])
print(df.loc['second':'third','a':'b'])
#in form of matrices
df2=pd.DataFrame([[1,4,5],[2,5,6],[4,8,9]],index=['first','second','third'],columns=['a','b','c'])
print(df2)
print(type(df['b']))#series data
#collection of multiple series is called data frame
print(df.head(1))#jb data top se chahiye
print(df.tail(1))#jb last se check krni ho
print(df.shape)
print(df.drop(['a','c'],axis=1))#for deleting
print(df.drop("third"))
df['a'][0]=3
print(df)
print(df.drop_duplicates(['a'])) #drops the duplicate one
#used to store data in excel
df.to_csv("data_df.csv")
#df.to_excel #same as above
data=pd.read_csv("data_df.csv")
print(data)
data=pd.read_csv('data_df.csv',index_col='Unnamed: 0')
print(data)
#data=data.drop('Unnamed: 0',axis=1)
#print(data)
print(data.columns)
#data=pd.read_csv('data_df.csv',index_col='Unnamed: 0',nrows=0)
#print(data.info())#memory usage
data=pd.read_csv('data_df.csv',index_col='Unnamed: 0')
print(data.describe())#description of memory
df.to_csv("data_df.csv",index=False)
'''
| true
|
73ad6a87ef791dd67704b2d161e002b6bfb0348c
|
Python
|
C-is-for-Cicero/learning-Python-for-the-memez
|
/Section5/CodingExcercise24.py
|
UTF-8
| 89
| 2.984375
| 3
|
[] |
no_license
|
def converter(fluid_ounces):
mililiters=fluid_ounces*29.57353
return mililiters
| true
|
b3d0530e4d79670d2c5ab8d12cee5ab9676aec65
|
Python
|
EshginGuluzade/shapesOnCanvas
|
/shapes.py
|
UTF-8
| 824
| 3.3125
| 3
|
[] |
no_license
|
class Rectangle:
def __init__(self, x, y, width, height, color):
self.x = x
self.y = y
self.width = width
self.height = height
self.color = color
def draw(self, canvas):
canvas.image_data[self.x:self.x + self.height, self.y:self.y + self.width] = [self.color[0], self.color[1],
self.color[2]]
class Square:
def __init__(self, x, y, side, color):
self.x = x
self.y = y
self.side = side
self.color = color
def draw(self, canvas):
canvas.image_data[self.x:self.x + self.side, self.y:self.y + self.side] = [self.color[0], self.color[1],
self.color[2]]
| true
|
987784d0db790fa8efabd1b3dd1505dec52498d5
|
Python
|
alviandk/python-sunmorn
|
/week 11/game-suit.py
|
UTF-8
| 948
| 3.984375
| 4
|
[] |
no_license
|
from random import randint
#create a random suit for computer answer
def enemy():
rand=randint(1,3)
if rand==1:
return "rock"
elif rand==2:
return "scissor"
elif rand==3:
return "paper"
#function result of suite with 2 parameters "player" and "enemy" and give a return value
def suit(player,enemy):
if player==enemy:
return "draw"
if player=="scissor":
if enemy=="paper":
return "you win"
elif enemy=="rock":
return "you lose"
elif player=="rock":
if enemy=="scissor":
return "you win"
elif enemy=="paper":
return "you lose"
elif player=="paper":
if enemy=="rock":
return "you win"
elif enemy=="scissor":
return "you lose"
def main_program():
player=raw_input("what's your choice (paper/rock/scissor): ")
print suit(player,enemy())
main_program()
| true
|
cc581ba773daf4995249e5956c8a4003cf20b51f
|
Python
|
farnaztavakool/social_media
|
/functions/sum.py
|
UTF-8
| 355
| 3.359375
| 3
|
[] |
no_license
|
# given a list find the pairs that add up to the sum
'''
q1: valid sum
q2: repeated values
'''
def find_sum(sumn,li):
sum_dict = {}
result = []
for i in li:
if sumn-i in sum_dict:
result.append([i, sumn-i])
continue
sum_dict[i] = sumn-i
return result
print (find_sum(10,[3,4,6,7,7]))
| true
|
6f1e2c428973a93bff60de09ca62cc28bebadfc4
|
Python
|
dmunkvold/cryptocurrency_twitter_analysis
|
/crypto_compare_api/crypto_compare_api.py
|
UTF-8
| 1,022
| 2.640625
| 3
|
[] |
no_license
|
# this import code is only necessary because I am struggling to set the
# path for python to look for modules in my 3.6 environment. this code
# can be commented out in the case that cryptocompare imports correctly
import sys
sys.path.append('/Users/David/anaconda/envs/environment_for_py3/lib/python3.6/site-packages')
# the following is necessary code
import cryptocompare
class CryptoCompareAPI:
def get_coin_list(self):
list = cryptocompare.get_coin_list(format=False)
return list
def get_price(self, crypto):
price = cryptocompare.get_price(crypto, curr='USD')
return price
def get_historical_price(self, crypto, datetime):
price = cryptocompare.get_historical_price(crypto, 'USD', datetime)
return price
def get_average(self, crypto, exchange):
avg = cryptocompare.get_avg(crypto, curr='EUR', exchange=exchange)
return avg
def get_exchanges(self):
exchanges = cryptocompare.get_exchanges()
return exchanges
| true
|
d0108c94e475f5c5652f5cf9dcd8cf5da44bbdfb
|
Python
|
talrus/Dz
|
/Exeption_CW.py
|
UTF-8
| 3,194
| 4.34375
| 4
|
[] |
no_license
|
'''
Напишіть програму, яка пропонує користувачу ввести ціле число і визначає чи це число парне чи непарне, чи введені дані коректні.
'''
'''
Напишіть програму, яка пропонує користувачу ввести свій вік, після чого виводить повідомлення про те чи вік є парним чи непарним числом.
Необхідно передбачити можливість введення від’ємного числа, в цьому випадку згенерувати власну виняткову ситуацію.
Головний код має викликати функцію, яка обробляє введену інформацію.
'''
"""
3. Напишіть програму для обчислення частки двох чисел, які вводяться користувачем послідовно через кому,
передбачити випадок ділення на нуль, випадки синтаксичних помилок та випадки інших виняткових ситуацій.
Використати блоки else та finaly.
4. Написати програму, яка аналізує введене число та в залежності від числа видає день тижня,
кий відповідає цьому числу (1 це Понеділок, 2 це Вівторок і т.д.) .
Врахувати випадки введення чисел від 8 і більше, а також випадки введення не числових даних.
"""
def Weekday():
try:
list_of_week = ['Monday', 'Thuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
number = int(input("Enter number of week 1-7:\n"))
if number not in range(1,8):
raise ValueError("Only 1-7 number not more or less")
except TypeError as t:
print(t)
except ValueError as v:
print(v)
except:
print("Someting went wrong")
else:
print(list_of_week[number-1])
def DivExeption():
try:
a, b = input("Enter two numbers separated by commas\n").split(',')
print(f"a : {a}, b: {b}")
except ZeroDivisionError as e:
print(e)
except SyntaxError as s:
print(s)
except Exception:
print('Something went wrong.')
else: print('No exception')
finally : print('I am always execute ^_^')
def MyExeption():
try:
age = int(input("Please enter your age: \n"))
if age <= 0 : raise Exception("Please enter positive integer")
elif age % 2 ==0 : print(f'{age} is even number.')
else : print(f"{age} is odd number")
except ValueError as e:
print(e)
return age
# try:
# number = int(input("Enter integer number\n"))
# if number % 2 ==0 : print(f'{number} is even number.')
# else : print(f"{number} is odd number")
# except ValueError as e:
# print(e)
#age = MyExeption()
#MyExeption()
#DivExeption()
Weekday()
| true
|
37eb017ead042dd6fe3800a88a95c0f725a1bd9c
|
Python
|
NicholasTing/Competitive_Programming
|
/CodeForces_635-640/CodeForces_638/b.py
|
UTF-8
| 504
| 3.09375
| 3
|
[] |
no_license
|
T = int(input())
while T != 0:
n, k = map(int,input().split())
numbers = list(map(int,input().split()))
# distinct numbers
dn = set(numbers)
dn_num = len(dn)
if dn_num > k:
print('-1')
T -= 1
continue
else:
fa = []
for i in dn:
fa.append(i)
while len(fa) < k:
fa.append(1)
fa = fa * n
print(len(fa))
print(' '.join([str(e) for e in fa]))
T -= 1
| true
|
30bd313009f6a29ba2a3727316ed3d75c57692ec
|
Python
|
minssoj/Learning_OpenCV-Python
|
/Code/30.TemplateMatch.py
|
UTF-8
| 1,333
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
# =================================================
# minso.jeong@daum.net
# 30. 템플릿 매칭
# Reference : samsjang@naver.com
# =================================================
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
def templateMatching():
img1 = cv.imread('../Images/11.Ji.Jpg', cv.IMREAD_GRAYSCALE)
img2 = img1.copy()
template = cv.imread('../Images/11.Ji_ROI.jpg', cv.IMREAD_GRAYSCALE)
w, h = template.shape[::-1]
methods = ['cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR', 'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']
for i in methods:
img1 = img2.copy()
method = eval(i)
try:
res = cv.matchTemplate(img1, template, method)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
except:
print('error', i)
continue
# 최솟값이 원하는 값인 Method
if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
top_left = min_loc
# 최대값이 원하는 값인 Method
else:
top_left = max_loc
bottom_right = (top_left[0]+w, top_left[1]+h)
cv.rectangle(img1, top_left, bottom_right, 255, 2)
plt.subplot(1,2,1), plt.imshow(res, cmap='gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(1,2,2), plt.imshow(img1, cmap='gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(i)
plt.show()
templateMatching()
| true
|
36e9a07347ce75d44c900191d66a9fa37a539747
|
Python
|
Mark0042/Sorting-Visualizer
|
/main.py
|
UTF-8
| 1,153
| 3.1875
| 3
|
[] |
no_license
|
import pygame
import math
import random
import time
from pygame import mixer
pygame.init()
pygame.mixer.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((800, 600))
a=[5,3,7,9,8,4,14,7,12,20,24,21,6,19,8,23,22,12,11,10]
mx=0
for i in a:
if i>mx:
mx=i
n=len(a)
wid=800/n
ht=600/mx-1
pygame.display.set_caption("Bubble Sort")
running= True
def checkquit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
return True
def draw(x):
screen.fill((0, 0, 0))
for k in range(n):
if k>x:
pygame.draw.rect (screen, (0,255,0),[k*wid,(600-(a[k]*ht)),wid-1,a[k]*ht])
else:
pygame.draw.rect (screen, (255,255,255),[k*wid,(600-(a[k]*ht)),wid-1,a[k]*ht])
pygame.display.update()
while running:
for i in range(n):
for j in range(n-i-1):
if a[j]>a[j+1]:
a[j],a[j+1]=a[j+1],a[j]
# beepsound=mixer.Sound("beep-07.wav")
# beepsound.play()
draw(n-i-1)
time.sleep(0.01)
draw(0)
draw(-1)
running=False
time.sleep(0.1)
| true
|
20caa14ce23c5fef3772fef528948bde9a1e0e75
|
Python
|
Fallgregg/theory-of-algorithms
|
/Lab06/Lab06.py
|
UTF-8
| 4,563
| 3.484375
| 3
|
[] |
no_license
|
from pip._vendor.distlib.compat import raw_input
class Heap:
def __init__(self, arr, is_max):
"""конструктор класу Heap,
що ініціалізує масив як піраміду,
та встановлює фложок для перевірки
максимального розміру піраміди"""
self.heap = arr
self.is_max = is_max
size = -1
is_max = False
heap = []
def buildHeap(self):
"""функція що проходить по кожному з вузлів
та винокує для них процедуру max_heapify"""
self.size = len(self.heap) - 1
for i in range(len(self.heap) // 2, -1, -1):
self.max_heapify(self.heap, i)
def max_heapify(self, arr, counter):
"""функція опускає значення arr[i] вниз до тих пір,
доки піддерево з корнем, щ о відповідає i, не буде
незростаючаою пірамідою"""
l_ch = left_child(counter)
r_ch = right_child(counter)
h_peak = counter
if self.is_max:
if l_ch <= self.size and arr[l_ch] > arr[counter]:
h_peak = l_ch
if r_ch <= self.size and arr[r_ch] > arr[h_peak]:
h_peak = r_ch
else:
if l_ch <= self.size and arr[l_ch] < arr[counter]:
h_peak = l_ch
if r_ch <= self.size and arr[r_ch] < arr[h_peak]:
h_peak = r_ch
if h_peak is not counter:
arr[counter], arr[h_peak] = arr[h_peak], arr[counter]
self.max_heapify(arr, h_peak)
def insert_max(self, key):
""" функція, що всталяє вузол до піраміди"""
self.size += 1
counter = self.size
self.heap.append(key)
while counter > 0 and (
(self.heap[parent(counter)] < key and self.is_max) or
(self.heap[parent(counter)] > key and not self.is_max)
):
self.heap[counter] = self.heap[parent(counter)]
counter = parent(counter)
self.heap[counter] = key
def parent(counter):
"""функція для знаходження індекса батьківського вузла"""
return (counter - 1) // 2
def right_child(counter):
"""" функція для знаходження індекса правого дочірнього вузла"""
return 2 * counter + 2
def left_child(counter):
"""функція для знаходження індекса лівого дочірнього вузла"""
return 2 * counter + 1
def check_heaps(h_low, h_high):
"""функція для визначення, в яку піраміду (heapHigh, heapLow) додавати новий елемент"""
if h_high.size - h_low.size > 1:
h_low.insert_max(h_high.heap.pop(0))
h_high.buildHeap()
if h_low.size - h_high.size > 1:
h_high.insert_max(h_low.heap.pop(0))
h_low.buildHeap()
def median(h_low, h_high, length):
"""функція пошуку медіани відсортвоаного масиву, використовуючи heapHigh та heapLow"""
if(length + 1) % 2:
if h_low.size > h_high.size:
res = h_low.heap[0]
else:
res = h_high.heap[0]
else:
res = [h_low.heap[0], h_high.heap[0]]
return res
def sequence(arr):
"""функція для знаходження пірамід heapHigh та heapLow"""
h_low = Heap([], True)
h_high = Heap([], False)
res = []
h_low.insert_max(arr[0])
res.append(arr[0])
for counter in range(1, len(arr)):
temp = arr[counter]
if temp < h_low.heap[0]:
h_low.insert_max(temp)
else:
h_high.insert_max(temp)
check_heaps(h_low, h_high)
res.append(median(h_low, h_high, counter))
return res
"""достємо вхідні дані із введеного з консолі файлу"""
array = [int(s) for s in [line.strip() for line in open(raw_input('Enter file name:\n'), 'r')]]
array.pop(0)
result = sequence(array)
"""записуємо вихідні дані у файл"""
outputFile = open('output.txt', 'w')
for item in result:
if isinstance(item, list):
outputFile.write("%s %s\n" % (item[0], item[1]))
else:
outputFile.write("%s\n" % item)
| true
|
c453b5b286f7aeff8142c28d84e449499839d746
|
Python
|
hillaryellis37/NoteFinderProject
|
/NoteFinder/audio/freq_to_note_converter.py
|
UTF-8
| 2,125
| 2.90625
| 3
|
[] |
no_license
|
from math import log2, pow
import numpy as np
from music21 import chord as ch
A4 = 440
C0 = A4 * pow(2, -4.75)
name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
class Note:
def __init__(self, name=name, A4=440, C0=C0):
self.name = name
self.A4 = A4
self.C0 = C0
#inputs a frequency and converts to a note:
def pitch(self, freq):
h = round(12 * log2(freq / self.C0))
octave = h // 12
n = h % 12
note = self.name[n] + str(octave)
return note
def note_to_freq(self, note):
all_freq = []
note_index = self.name.index(note[:-1])
for i in range(10):
note_num = note_index + 12 * i
freq = self.C0 * 2 ** (note_num / 12)
all_freq.append(round(freq))
return all_freq
# inputs a note and outs the frequency range of that note's octave:
def note_to_freq_range(self, note):
octave = int(note[-1])
note_index = self.name.index(note[:-1])
note_num = note_index + 12 * octave
low_note = note_num - 0.5
high_note = note_num + 0.5
freq_low = self.C0 * 2 ** (low_note / 12)
freq_high = self.C0 * 2 ** (high_note / 12)
return np.array([freq_low, freq_high])
# inputs a note and outs the frequency ranges of all the note's octaves:
def note_to_all_freq_harmonics(self, note):
note = note[:-1]
all_freq_harm = np.empty([10,2])
for i in range(10):
octave = note + str(i)
x, y = self.note_to_freq_range(octave)
all_freq_harm[i] = x, y
return all_freq_harm
def chord(self, notes):
chord = ch.Chord(notes)
return chord.pitchedCommonName
def run(self, input_freq):
note = self.pitch(input_freq)
freq_range_single = self.note_to_freq_range(note)
freq_range_all = self.note_to_all_freq_harmonics(note)
freq_harm = self.note_to_freq(note)
# print("the note is {} ".format(note))
return note, freq_range_single, freq_range_all, freq_harm
| true
|
16a1a88fe054606caeb4abd24cfeab0ea147d568
|
Python
|
kain-01/kyoupro
|
/kyoupro/AtCoder Beginner Contest 143/143a.py
|
UTF-8
| 84
| 3.140625
| 3
|
[] |
no_license
|
a,b = map(int,input().split())
if a > b*2:
print(a-b*2)
else:
print(0)
| true
|
e0c384a951c194fe13fa6ab68ba0f373fab77cc2
|
Python
|
vinitha33/training
|
/Hands-on/Python/Ques_8.py
|
UTF-8
| 426
| 3.75
| 4
|
[] |
no_license
|
#Diplay all the numbers which are greater than that number and are to the right side to it.
num1 = [10,4,2,5,3,6]
num2 = []
for i in range(len(num1)):
for j in range((i + 1),len(num1)):
if num1[i] < num1[j]:
# num2.append(num1[i])
num2.append(num1[j])
if len(num2) == 0:
print(num1[i],"= 0")
else:
print(num1[i],"=",num2)
num2.clear()
| true
|
bf65ec187f5bcebf01a77ef17f13f2337978815f
|
Python
|
Ahmad-Mahmoud/USafeB
|
/src/crypt.py
|
UTF-8
| 2,654
| 3.03125
| 3
|
[] |
no_license
|
import string
import random
import os
import re
from Crypto.PublicKey import RSA
from Crypto.Random import *
from Crypto.Cipher import AES
# This class is created to tie the object with its key throughout the encryption and decryption process
class Device:
def __init__(self, i: int):
self.key = get_random_bytes(16)
self.cipher = AES.new(self.key, AES.MODE_GCM)
self.name = "table" + str(i) + ".bsf"
self.table = open(self.name, 'w')
self.table_path = self.name
self.table.close()
# This function is called in a loop, so file_id is supposed to be its iterator
# It encrypts a file into a new file then deletes the original file
# Each object has its own table file which maps the id to the original file name
def encrypt(self, directory: string, file_id: int):
table = open(self.table_path, 'w')
matches = re.findall(r".*/", directory)
filename = matches[0]
target_match = re.findall(r".*/(.*)", directory)
filename += target_match[0]
filename += ".enc"
file_in = open(directory, 'rb')
data = file_in.read()
temp = open(filename, 'wb')
table.write(filename + '\n' + directory + '\n')
cipher_text, tag = self.cipher.encrypt_and_digest(data)
[temp.write(x) for x in (self.cipher.nonce, tag, cipher_text)]
file_in.close()
os.remove(directory)
temp.close()
# This function will also be called in a while loop, however, it will simply loop over the table
# and decrypt everything on it, deleting the encrypted files as well
def decrypt(self):
files = open(self.table_path, "r")
data = files.read().splitlines()
suppressed_file = ""
filename = ""
decrypted = True
for line in data:
if decrypted:
suppressed_file = line
decrypted = False
else:
filename = line
decrypted = True
file_in = open(suppressed_file, 'rb')
file_out = open(filename, 'wb')
nonce, tag, cipher_text = [
file_in.read(x) for x in (16, 16, -1)]
cipher = AES.new(self.key, AES.MODE_GCM, nonce)
data = cipher.decrypt_and_verify(cipher_text, tag)
file_out.write(data)
os.remove(suppressed_file)
file_in.close()
file_out.close()
# For the completed hardware design, the following clean-up section will be required.
# def __del__(self):
# self.table.close()
# os.remove('table')
| true
|
fe1a6d9998d1cfbcd040b65f885fc851baba1e08
|
Python
|
sanathks1998/sanathks
|
/noofmeeting.py
|
UTF-8
| 285
| 2.765625
| 3
|
[] |
no_license
|
r=int(input())
s=list(map(int,input().split()))
f=list(map(int,input().split()))
ct=[]
t=0
for i in range(r):
ctrl=0
t=0
for j in range(i,r):
if(s[j]>=t):
t=f[j]
ctrl=ctrl+1
ct.append(ctrl)
print(int(max(ct)),end="")
| true
|
c6129f5ffdbe45912545d955e39e560999b19e02
|
Python
|
GrandyLee/rayfire
|
/hik/demo.py
|
UTF-8
| 519
| 2.984375
| 3
|
[] |
no_license
|
# -*-coding:UTF:8-*-
import unittest
class TestMethod(unittest.TestCase):
# 每次执行用例前执行setUp(),可以在这里做一些初始化工作
@classmethod
def setUp(cls):
print('setUp')
# 每次执行用例后执行teardown
@classmethod
def tearDown(cls):
print('tearDown')
def test001(self): # unittest中的用例必须以test开头
print('test001')
def test002(self):
print('test002')
if __name__ == '__main__':
unittest.main()
| true
|
b36a66ef8ba8cfc3c871953de7bac89d3f8dbc4a
|
Python
|
Candy-Capilla/sqlalchemy-challenge
|
/app.py
|
UTF-8
| 4,687
| 2.796875
| 3
|
[] |
no_license
|
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import datetime as dt
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
measurement = Base.classes.measurement
station= Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start>add start date in YYYY-MM-DD format<br/>"
f"/api/v1.0/<start>/<end> add start date and end date in YYYY-MM-DD format"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all passenger names"""
# Query all precipitation
results = session.query(measurement.date, measurement.prcp).all()
session.close()
# convert to dictionary
all_prcp = []
for date, prcp in results:
prcp_dict = {}
prcp_dict["date"] = date
prcp_dict["prcp"] = prcp
all_prcp.append(prcp_dict)
return jsonify(all_prcp)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list"""
# Query all stations
results = session.query(station.name).all()
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
last_year = dt.date(2017, 8, 23)- dt.timedelta(days =365)
#query of last year of temps for most active station
results = session.query(measurement.date, measurement.tobs).\
filter(measurement.date >= last_year).\
filter(measurement.station == 'USC00519281').all()
session.close()
#convert to dictionary
all_tobs = []
for date, tobs in results:
temp_dict = {}
temp_dict["date"] = date
temp_dict["tobs"] = tobs
all_tobs.append(temp_dict)
return jsonify(all_tobs)
@app.route("/api/v1.0/<start>")
def start(start):
# Create our session (link) from Python to the DB
session = Session(engine)
#query start to current tmin, tavg, and tmax
from_start = session.query(measurement.date,\
func.min(measurement.tobs),\
func.avg(measurement.tobs),func.max(measurement.tobs)).\
filter(measurement.date >= start).\
group_by(measurement.date).all()
session.close()
#create dict
start_temp = []
for date, t_min, t_avg, t_max in from_start:
start_temp_dict = {}
start_temp_dict["date"] = date
start_temp_dict["min"] = t_min
start_temp_dict["avg"] = t_avg
start_temp_dict["max"] = t_max
start_temp.append(start_temp_dict)
#jsonify
return jsonify(start_temp)
@app.route("/api/v1.0/<start>/<end>")
def start_end(start, end):
# Create our session (link) from Python to the DB
session = Session(engine)
#query start date to end date tmin, tavg, and tmax
between_dates = session.query(measurement.date,\
func.min(measurement.tobs),\
func.avg(measurement.tobs),\
func.max(measurement.tobs)).\
filter(measurement.date >= start).\
filter(measurement.date <= end).\
group_by(measurement.date).all()
session.close()
#create dict
start_end_temp = []
for date, t_min, t_avg, t_max in between_dates:
start_end_temp_dict = {}
start_end_temp_dict["date"] = date
start_end_temp_dict["min"] = t_min
start_end_temp_dict["avg"] = t_avg
start_end_temp_dict["max"] = t_max
start_end_temp.append(start_end_temp_dict)
#jsonify
return jsonify(start_end_temp)
if __name__ == '__main__':
app.run(debug=True)
| true
|
60005d91da181f229aa22d3e84cba6797e33e994
|
Python
|
TayExp/pythonDemo
|
/05DataStructure/数字在排序数组中出现的次数.py
|
UTF-8
| 1,203
| 3.375
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
class Solution:
def GetNumberOfK(self, data, k):
# write code here
first = self.GetFirst(data, k, 0, len(data) - 1)
if first == -1:
return 0
last = self.GetLast(data, k, 0, len(data) - 1)
return last - first + 1
def GetFirst(self, data, k, start, end):
if start > end:
return -1
middle = (start + end) // 2
if data[middle] == k:
while middle >= 0 and data[middle] == k:
middle -= 1
return middle + 1
elif data[middle] < k:
start = middle + 1
else:
end = middle - 1
return self.GetFirst(data, k, start, end)
def GetLast(self, data, k, start, end):
if start > end:
return -1
middle = (start + end) // 2
if data[middle] == k:
while middle <= end and data[middle] == k:
middle += 1
return middle - 1
elif data[middle] < k:
start = middle + 1
else:
end = middle - 1
return self.GetLast(data, k, start, end)
s = Solution()
print(s.GetNumberOfK([1,2,3,3,3,3,3,4,5,6,7],3))
| true
|
fef4b27033495cda8ae040e9a3edf07f1284496b
|
Python
|
rolquitel/py-grafos
|
/layout.py
|
UTF-8
| 14,239
| 2.890625
| 3
|
[] |
no_license
|
import abc
import math
import random
import numpy
from abc import ABC
import node
import graph
from quadtree import QuadTree, Rectangle, Point
stop_layinout = False
def fr(k, x):
"""
Fuerza de repulsion
:param k:
:param x:
:return:
"""
return (k ** 2) / x
def fa(k, x):
"""
Fuerza de atracción
:param k:
:param x:
:return:
"""
# return k * math.log10(x / k)
return (x ** 2) / k
def mag(v2d):
"""
Magnitud de un vector 2d
:param v2d:
:return:
"""
return math.sqrt((v2d[0] ** 2) + (v2d[1] ** 2))
#####################################################################################################################
class Layout(ABC):
"""
Clase abstracta para el cálculo de la disposición (layout) de un grafo
"""
def __init__(self, g):
__metaclass__ = abc.ABCMeta
self.graph = g
self.attr = {}
@abc.abstractmethod
def step(self):
return False
def run(self):
global stop_layinout
while not stop_layinout:
if self.step():
return
class Random(Layout):
def step(self):
for v in self.graph.nodes.values():
v.attr[node.ATTR_POS] = numpy.array(
[random.random(), random.random()])
return True
class Grid(Layout):
def step(self):
dim = numpy.array([1000, 1000])
lado = int(math.ceil(math.sqrt(len(self.graph.nodes))))
tam = dim / (lado + 2)
origen = dim / 2
n = 0
for v in self.graph.nodes.values():
x = tam[0] * int((n % lado) + 1) - origen[0]
y = tam[1] * int((n / lado) + 1) - origen[1]
v.attr[node.ATTR_POS] = numpy.array([x, y])
n = n + 1
return True
#####################################################################################################################
class FruchtermanReingold(Layout):
"""
Clase que calcula la disposición de un grafo mediante el algoritmo de equilibrio de fuerza de Fruchterman y Reigold (1991)
con la mejora introducida por R. Fletcher (2000) para el enfriamiento del procesamiento
"""
def __init__(self, g, k=50, t=0.95, advance=20, conv_threshold=3.0):
super().__init__(g)
# math.sqrt((self.res[0] * self.res[1]) / len(self.grafo.nodes))
self.k = k
self.t = t
self.advance = advance
self.conv_threshold = min(conv_threshold, len(g.nodes) / 100)
self.converged = False
self.energy = math.inf
self.progress = 0
def step(self):
"""
Ejecuta un paso del algoritmo de disposición
:return: True si el algoritmo ha convergido, False de otra forma
"""
if self.converged:
return
# para el enfriamiento
prev_energy = self.energy
self.energy = 0
with graph.WRITING_LOCK:
# fuerza de repulsion
for v in self.graph.nodes.values():
v.attr[node.ATTR_DISP] = numpy.array([0, 0])
for u in self.graph.nodes.values():
if v != u:
delta = v.attr[node.ATTR_POS] - u.attr[node.ATTR_POS]
m_delta = mag(delta)
if m_delta > 0:
v.attr[node.ATTR_DISP] = v.attr[node.ATTR_DISP] + \
(delta / m_delta) * fr(self.k, m_delta)
# fuerza de atracción
for e in self.graph.edges.values():
delta = e.n0.attr[node.ATTR_POS] - e.n1.attr[node.ATTR_POS]
e.n0.attr[node.ATTR_DISP] = e.n0.attr[node.ATTR_DISP] - \
(delta / mag(delta)) * fa(self.k, mag(delta))
e.n1.attr[node.ATTR_DISP] = e.n1.attr[node.ATTR_DISP] + \
(delta / mag(delta)) * fa(self.k, mag(delta))
# mover los nodes de acuerdo a la fuerza resultante
dif = numpy.array([0, 0])
for v in self.graph.nodes.values():
dif = dif + (v.attr[node.ATTR_DISP] /
mag(v.attr[node.ATTR_DISP])) * self.advance
v.attr[node.ATTR_POS] = v.attr[node.ATTR_POS] + (
v.attr[node.ATTR_DISP] / mag(v.attr[node.ATTR_DISP])) * self.advance
self.energy = self.energy + mag(v.attr[node.ATTR_DISP]) ** 2
self.update_step(prev_energy)
if mag(dif) < self.conv_threshold or self.advance < self.conv_threshold:
self.converged = True
return self.converged
def update_step(self, prev_energy):
"""
Actualizar la magnitud del cambio de posición de los nodos, de acuerdo a como lo menciona R. Fletcher (2000)
:param energia_anteior: valor de energía anterior
:return: None
"""
if self.energy < prev_energy:
self.progress = self.progress + 1
if self.progress >= 5:
self.progress = 0
self.advance = self.t * self.advance
else:
self.progress = 0
self.advance = self.t * self.advance
#####################################################################################################################
ATTR_CENTER_OF_MASS = 1
ATTR_MASS = 0
class BarnesHut(Layout):
"""
Clase que calcula la disposición de un grafo mediante el algoritmo de equilibrio de fuerza de Fruchterman y Reigold (1991)
con la mejora introducida por R. Fletcher (2000) para el enfriamiento del procesamiento
"""
def __init__(self, g, k=50, t=0.95, advance=20, conv_threshold=3.0, points_by_region=4):
super().__init__(g)
self.qtree = None
self.points_by_region = points_by_region
self.theta = 1
self.k = k
self.t = t
self.advance = advance
self.reps_for_down = 5
self.conv_threshold = min(conv_threshold, len(g.nodes) / 100)
self.converged = False
self.energy = math.inf
self.initial_energy = 0
self.progress = 0
self.steps = 0
def build_quadtree(self):
self.qtree = QuadTree(Rectangle(self.graph.extent[0][0], self.graph.extent[0][1], self.graph.extent[1][0],
self.graph.extent[1][1]), self.points_by_region)
for v in self.graph.nodes.values():
p = Point(v.attr[node.ATTR_POS][0], v.attr[node.ATTR_POS][1], v)
self.qtree.insert(p)
def compute_mass(self, qtree):
qtree.attr[ATTR_CENTER_OF_MASS] = numpy.array([0, 0])
qtree.attr[ATTR_MASS] = 0
for p in qtree.points:
qtree.attr[ATTR_CENTER_OF_MASS] = qtree.attr[ATTR_CENTER_OF_MASS] + \
numpy.array([p.x, p.y])
qtree.attr[ATTR_MASS] = qtree.attr[ATTR_MASS] + 1
if qtree.is_divided:
self.compute_mass(qtree.I)
self.compute_mass(qtree.II)
self.compute_mass(qtree.III)
self.compute_mass(qtree.IV)
if qtree.I.attr[ATTR_MASS] > 0:
qtree.attr[ATTR_MASS] += qtree.I.attr[ATTR_MASS]
qtree.attr[ATTR_CENTER_OF_MASS] = qtree.attr[ATTR_CENTER_OF_MASS] + \
qtree.I.attr[ATTR_CENTER_OF_MASS] * \
qtree.I.attr[ATTR_MASS]
if qtree.II.attr[ATTR_MASS] > 0:
qtree.attr[ATTR_MASS] += qtree.II.attr[ATTR_MASS]
qtree.attr[ATTR_CENTER_OF_MASS] = qtree.attr[ATTR_CENTER_OF_MASS] + \
qtree.II.attr[ATTR_CENTER_OF_MASS] * \
qtree.II.attr[ATTR_MASS]
if qtree.III.attr[ATTR_MASS] > 0:
qtree.attr[ATTR_MASS] += qtree.III.attr[ATTR_MASS]
qtree.attr[ATTR_CENTER_OF_MASS] = qtree.attr[ATTR_CENTER_OF_MASS] + \
qtree.III.attr[ATTR_CENTER_OF_MASS] * \
qtree.III.attr[ATTR_MASS]
if qtree.IV.attr[ATTR_MASS] > 0:
qtree.attr[ATTR_MASS] += qtree.IV.attr[ATTR_MASS]
qtree.attr[ATTR_CENTER_OF_MASS] = qtree.attr[ATTR_CENTER_OF_MASS] + \
qtree.IV.attr[ATTR_CENTER_OF_MASS] * \
qtree.IV.attr[ATTR_MASS]
if qtree.attr[ATTR_MASS] > 0:
qtree.attr[ATTR_CENTER_OF_MASS] = qtree.attr[ATTR_CENTER_OF_MASS] / \
qtree.attr[ATTR_MASS]
def compute_repulsion_force(self, p, qtree):
force = numpy.array([0.0, 0.0])
vec = p.data.attr[node.ATTR_POS] - qtree.attr[ATTR_CENTER_OF_MASS]
r = numpy.linalg.norm(vec)
# d = math.sqrt(qtree.limite.w * qtree.limite.h)
d = min(qtree.limits.w, qtree.limits.h)
if not r > 0:
return numpy.array([0.0, 0.0])
if d / r < self.theta or not qtree.is_divided:
force = force + (vec / r) * fr(self.k, r) * qtree.attr[ATTR_MASS]
return force
else:
force = force + self.compute_repulsion_force(p, qtree.I)
force = force + self.compute_repulsion_force(p, qtree.II)
force = force + self.compute_repulsion_force(p, qtree.III)
force = force + self.compute_repulsion_force(p, qtree.IV)
return force
def step(self):
"""
Ejecuta un paso del algoritmo de disposición
:return: True si el algoritmo ha convergido, False de otra forma
"""
# if self.convergio:
# return
self.steps += 1
self.build_quadtree()
self.compute_mass(self.qtree)
# para el enfriamiento
prev_energy = self.energy
self.energy = 0
with graph.WRITING_LOCK:
# fuerza de repulsion
for v in self.graph.nodes.values():
p = Point(v.attr[node.ATTR_POS][0],
v.attr[node.ATTR_POS][1], v)
v.attr[node.ATTR_DISP] = self.compute_repulsion_force(
p, self.qtree)
# fuerza de atracción
for e in self.graph.edges.values():
delta = e.n0.attr[node.ATTR_POS] - e.n1.attr[node.ATTR_POS]
m = mag(delta)
if m > 0:
e.n0.attr[node.ATTR_DISP] -= (delta / m) * fa(self.k, m)
e.n1.attr[node.ATTR_DISP] += (delta / m) * fa(self.k, m)
# mover los nodes de acuerdo a la fuerza resultante
for v in self.graph.nodes.values():
m = mag(v.attr[node.ATTR_DISP])
v.attr[node.ATTR_POS] = v.attr[node.ATTR_POS] + \
(v.attr[node.ATTR_DISP] / m) * self.advance
self.energy += m ** 2
if not self.converged:
self.update_step(prev_energy)
return self.converged
def update_step(self, energia_anterior):
"""
Actualizar la magnitud del cambio de posición de los nodes, de acuerdo a como lo menciona R. Fletcher (2000)
:param energia_anterior: valor de energía anterior
:return: None
"""
# print(self.pasos, math.sqrt(self.energia) / (len(self.grafo.nodes) * 10), self.avance)
if math.sqrt(self.energy) / (len(self.graph.nodes) * 10) < self.conv_threshold or self.advance < 1:
print('Layout converged.')
self.converged = True
# self.avance = min(math.sqrt(self.energia) / (len(self.grafo.nodes) * 10), 2 * self.avance)
if self.energy < energia_anterior:
self.progress = self.progress + 1
if self.progress >= self.reps_for_down:
self.progress = 0
self.advance = self.t * self.advance
else:
self.progress = 0
self.advance = self.t * self.advance
#####################################################################################################################
class Spring(Layout):
"""
Clase que calcula la disposición de un grafo mediante el algoritmo de resortes presentado por P. Eades (1984)
"""
def __init__(self, g):
super().__init__(g)
self.c1 = 1
self.c2 = 50
self.c3 = 1
self.c4 = 10
self.expand = False
# math.sqrt((self.res[0] * self.res[1]) / len(self.graph.nodes))
self.k = 50
def step(self):
"""
Ejecuta un paso del algoritmo de disposición
:return: True si el algoritmo ha convergido, False de otra forma
"""
with graph.WRITING_LOCK:
for n in self.graph.nodes.values():
n.attr[node.ATTR_DISP] = numpy.array([0, 0])
for e in self.graph.edges.values():
f = e.n0.attr[node.ATTR_POS] - e.n1.attr[node.ATTR_POS]
d = numpy.linalg.norm(f)
try:
f = (f / d) * math.log10(d / self.c2) * self.c4
except ValueError:
continue
e.n0.attr[node.ATTR_DISP] = e.n0.attr[node.ATTR_DISP] - f
e.n1.attr[node.ATTR_DISP] = e.n1.attr[node.ATTR_DISP] + f
disp = 0
for n in self.graph.nodes.values():
disp = max(disp, numpy.linalg.norm(n.attr[node.ATTR_DISP]))
n.attr[node.ATTR_POS] = n.attr[node.ATTR_POS] + \
n.attr[node.ATTR_DISP]
# print(disp * len(self.grafo.nodes), self.atrib['k'])
if (disp * len(self.graph.nodes)) < self.k and self.expand:
self.expand = False
for a in self.graph.nodes.values():
a.attr[node.ATTR_DISP] = numpy.array([0, 0])
for b in self.graph.nodes.values():
if a != b:
f = a.attr[node.ATTR_POS] - b.attr[node.ATTR_POS]
d = numpy.linalg.norm(f)
f = (f / d) * fr(self.k, d)
a.attr[node.ATTR_DISP] = a.attr[node.ATTR_DISP] - \
self.c4 * f
for n in self.graph.nodes.values():
n.attr[node.ATTR_POS] = n.attr[node.ATTR_POS] + \
n.attr[node.ATTR_DISP] * (0.1 / self.c4)
return False
| true
|
5982de8a3e63ccfb060f1d4cf93f932d279e6441
|
Python
|
NickSto/python-single
|
/youtube.py
|
UTF-8
| 19,653
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import argparse
import collections
import logging
import os
import re
import shutil
import sys
import time
import requests
from oyaml import oyaml as yaml
try:
import youtube_dl
except ImportError:
youtube_dl = None
assert sys.version_info.major >= 3, 'Python 3 required'
API_URL = 'https://www.googleapis.com/youtube/v3/'
DESCRIPTION = """Download videos from a Youtube playlist and save their metadata."""
def make_argparser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('api_key')
parser.add_argument('playlist_id',
help='The playlist id.')
parser.add_argument('-d', '--download',
help='Download the videos to this directory too. This will also save metadata on each video '
'to a text file, one per video.')
parser.add_argument('-m', '--meta', action='store_true',
help='Just save metadata file on each video.')
parser.add_argument('-M', '--max-length', type=int, default=999999,
help='Don\'t download videos longer than this. Give a time, in minutes. The metadata file '
'will still be created, though.')
parser.add_argument('--max-results', type=int, default=50,
help='The maximum number of videos to fetch from the playlist at a time. It will always fetch '
'all videos in the playlist, but this changes how big the chunks are.')
parser.add_argument('-l', '--log', type=argparse.FileType('w'), default=sys.stderr,
help='Print log messages to this file instead of to stderr. Warning: Will overwrite the file.')
volume = parser.add_mutually_exclusive_group()
volume.add_argument('-q', '--quiet', dest='volume', action='store_const', const=logging.CRITICAL,
default=logging.WARNING)
volume.add_argument('-v', '--verbose', dest='volume', action='store_const', const=logging.INFO)
volume.add_argument('--debug', dest='volume', action='store_const', const=logging.DEBUG)
return parser
def main(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
logging.basicConfig(stream=args.log, level=args.volume, format='%(message)s')
if args.download:
if youtube_dl is None:
fail('Error: youtube_dl package required for --download.')
downloaded = read_downloaded_video_dir(args.download)
playlist = fetch_playlist(args.api_key, args.playlist_id, args.max_results)
for playlist_video in playlist['items']:
index = playlist_video['snippet']['position']+1
metadata = {'playlist_item':playlist_video}
video_id = playlist_video['snippet']['resourceId']['videoId']
video, reason = fetch_video(args.api_key, video_id)
metadata['video'] = video
metadata['video_id'] = video_id
if video is None:
metadata['missing_reason'] = reason
metadata['channel'] = None
else:
metadata['channel'] = fetch_channel(args.api_key, video['snippet']['channelId'])
print(format_metadata_human(index, metadata))
if args.download:
#TODO: Allow skipping if the video was added to the playlist very recently.
# The video added date is in playlist['items'][i]['snippet']['publishedAt'].
errors = []
filename = None
skip_download = False
if video_id in downloaded:
video_data = downloaded[video_id]
move_files(downloaded, video_id, index)
video_data['verified'] = True
if 'file' in video_data or video_data.get('downloaded'):
skip_download = True
filename = video_data.get('file')
if skip_download:
logging.warning('Video already downloaded. Skipping..')
elif args.meta:
pass
elif video is None:
logging.warning('Video not found. Skipping download..')
elif parse_duration(video['contentDetails']['duration']) > args.max_length*60:
logging.warning('Video too long to be downloaded. Skipping..')
else:
logging.warning('Downloading..')
filename, errors = download_video(video_id, args.download, prefix='{} - '.format(index))
save_metadata(args.download, index, metadata, filename, errors)
print()
if args.download:
trash_dir = os.path.join(args.download, 'trash')
for video_id, video_data in downloaded.items():
if not video_data['verified']:
if not os.path.isdir(trash_dir):
os.makedirs(trash_dir)
logging.warning('Video {} does not seem to be in the playlist anymore. Moving to {}..'
.format(video_id, trash_dir))
if 'file' in video_data:
path = os.path.join(video_data['dir'], video_data['file'])
shutil.move(path, os.path.join(trash_dir, video_data['file']))
if 'meta' in video_data:
path = os.path.join(video_data['dir'], video_data['meta'])
shutil.move(path, os.path.join(trash_dir, video_data['meta']))
def read_downloaded_video_dir(dirpath):
"""Find existing video and metadata files previously downloaded by this script."""
videos = {}
for filename in os.listdir(dirpath):
fields = filename.split('.')
if filename.endswith('.metadata.yaml') and len(fields) == 4:
# Read metadata file.
try:
index = int(fields[0])
except ValueError:
continue
with open(os.path.join(dirpath, filename), 'r') as meta_file:
metadata = yaml.safe_load(meta_file)
video_id = fields[1]
video_data = videos.get(video_id, {})
video_data['index'] = index
video_data['meta'] = filename
if metadata.get('downloaded'):
video_data['downloaded'] = True
videos[video_id] = video_data
else:
# Read video filename.
video_id = parse_video_id(filename, strict=True)
if video_id is None:
continue
video_id = fields[-2][-12:-1]
fields = filename.split(' - ')
index = int(fields[0])
video_data = videos.get(video_id, {})
video_data['index'] = index
video_data['file'] = filename
video_data['name'] = ' - '.join(fields[1:])
videos[video_id] = video_data
for video_id, video_data in videos.items():
video_data['dir'] = dirpath
# verified: Whether this has been verified to still be in the playlist (deafault to False).
video_data['verified'] = False
return videos
def read_existing_video_dir(dirpath):
"""Search for any video files that include their video id in the filename."""
videos = {}
for dirpath, dirnames, filenames in os.walk(dirpath):
for filename in filenames:
video_id = parse_video_id(filename, strict=False)
if video_id is not None:
videos[video_id] = {'dir':dirpath, 'file':filename}
return videos
def parse_video_id(filename, strict=True):
"""Try to retrieve a video id from a filename."""
if strict:
# The id must be within ' [id XXXXXXXXXXX]' at the end of the filename (right before the
# file extension).
fields = filename.split('.')
if len(fields) < 2 or not fields[-2].endswith(']') or fields[-2][-17:-12] != ' [id ':
return None
video_id = fields[-2][-12:-1]
if re.search(r'[^0-9A-Za-z_-]', video_id):
return None
else:
i = filename.find(' [id ')
if i != -1 and len(filename) > i+16 and filename[i+16] == ']':
# Find a ' [id XXXXXXXXXXX]' anywhere in the filename?
video_id = filename[i+5:i+16]
if re.search(r'[^0-9A-Za-z_-]', video_id):
return None
else:
return None
return video_id
def move_files(downloaded, video_id, index):
"""Check if the current video has already been downloaded, but with a different name, then move it
to the proper name.
Do the same with metadata files."""
if video_id not in downloaded:
return False
metadata = downloaded[video_id]
if index == metadata['index']:
return False
logging.warning('Video {} already saved. Renumbering from {} to {}..'
.format(video_id, metadata['index'], index))
# Move the video file.
if 'file' in metadata:
old_path = os.path.join(metadata['dir'], metadata['file'])
new_path = os.path.join(metadata['dir'], '{} - {}'.format(index, metadata['name']))
check_and_move(old_path, new_path)
# Move the metadata file.
if 'meta' in metadata:
old_path = os.path.join(metadata['dir'], metadata['meta'])
new_path = os.path.join(metadata['dir'], '{}.{}.metadata.yaml'.format(index, video_id))
check_and_move(old_path, new_path)
return True
def check_and_move(src, dst):
if os.path.exists(dst):
fail('Error: Cannot move file {!r}. Destination {!r} already exists.'.format(src, dst))
try:
shutil.move(src, dst)
except FileNotFoundError:
fail('Error: Cannot move file {!r} (file not found).'.format(src))
except PermissionError as error:
fail('Error: Cannot move file {!r}. {}: {}'.format(src, type(error).__name__, error.args[1]))
def format_metadata_human(index, metadata):
if metadata['video'] is None:
return '{}: [{missing_reason}]\nhttps://www.youtube.com/watch?v={video_id}'.format(index, **metadata)
else:
return """{:<3s} {title}
Channel: {channel_title} - https://www.youtube.com/channel/{channel_id}
Upload date: {upload_date}
https://www.youtube.com/watch?v={video_id}""".format(
str(index)+':',
title=metadata['video']['snippet']['title'],
channel_title=metadata['channel']['snippet']['title'],
channel_id=metadata['channel']['id'],
upload_date=metadata['video']['snippet']['publishedAt'][:10],
video_id=metadata['video_id']
)
def format_metadata_yaml(metadata, got_file, errors=()):
output = collections.OrderedDict()
output['url'] = 'https://www.youtube.com/watch?v='+metadata['video_id']
if metadata['video'] is None:
output[metadata['missing_reason']] = True
else:
output['title'] = metadata['video']['snippet']['title']
output['channel'] = metadata['channel']['snippet']['title']
output['channelUrl'] = 'https://www.youtube.com/channel/'+metadata['channel']['id']
output['uploaded'] = metadata['video']['snippet']['publishedAt'][:10]
output['addedToPlaylist'] = metadata['playlist_item']['snippet']['publishedAt'][:10]
output['length'] = parse_duration(metadata['video']['contentDetails']['duration'])
# Do some cleaning of the description string to let it appear as a clean literal in the yaml
# file. Human readability is more important than 100% fidelity here, since we're just trying to
# archive the description to give some sense of the context.
# Note: PyYAML will output strings with newlines as literal line breaks (readable), unless there
# is whitespace at the start or end of any line in the string.
desc_lines = metadata['video']['snippet']['description'].splitlines()
output['description'] = '\n'.join([line.strip() for line in desc_lines])
for error in set(errors):
if error != 'exists':
output[error] = True
if got_file:
output['downloaded'] = True
return yaml.dump(output, default_flow_style=False)
def save_metadata(dest_dir, index, metadata, filename, errors=()):
if filename is None:
got_file = False
else:
video_path = os.path.join(dest_dir, filename)
got_file = os.path.isfile(video_path) and os.path.getsize(video_path) > 0
meta_path = os.path.join(dest_dir, '{}.{}.metadata.yaml'.format(index, metadata['video_id']))
if os.path.exists(meta_path):
logging.warning('Warning: Metadata file {} already exists. Avoiding overwrite..'
.format(meta_path))
with open(meta_path, 'w') as meta_file:
meta_file.write(format_metadata_yaml(metadata, got_file, errors)+'\n')
def parse_duration(dur_str):
assert dur_str.startswith('PT'), dur_str
hours = 0
minutes = 0
seconds = 0
for time_spec in re.findall(r'\d+[HMS]', dur_str):
if time_spec.endswith('H'):
hours = int(time_spec[:-1])
elif time_spec.endswith('M'):
minutes = int(time_spec[:-1])
elif time_spec.endswith('S'):
seconds = int(time_spec[:-1])
return hours*60*60 + minutes*60 + seconds
##### Begin Youtube API section #####
def fetch_playlist(api_key, playlist_id, max_results=50):
playlist = None
params = {
'playlistId':playlist_id,
'maxResults':max_results,
'part':'snippet',
'key':api_key
}
nextPageToken = None
done = False
while not done:
params['pageToken'] = nextPageToken
data = call_api('playlistItems', params, api_key)
nextPageToken = data.get('nextPageToken')
if nextPageToken is None:
done = True
if playlist is None:
playlist = data
else:
playlist['items'].extend(data['items'])
return playlist
def fetch_channel(api_key, channel_id):
params = {
'id':channel_id,
'part':'snippet',
}
data = call_api('channels', params, api_key)
return data['items'][0]
def fetch_video(api_key, video_id):
params = {
'id':video_id,
'part':'snippet,contentDetails'
}
data = call_api('videos', params, api_key)
if data['items']:
return data['items'][0], None
elif data['pageInfo']['totalResults'] == 1:
return None, 'deleted'
else:
return None, 'private'
def call_api(api_name, params, api_key):
our_params = params.copy()
our_params['key'] = api_key
response = requests.get(API_URL+api_name, params=our_params)
if response.status_code != 200:
error = get_error(response)
if error:
fail('Error fetching playlist data. Server message: '+str(error))
else:
fail('Error fetching playlist data. Received a {} response.'.format(response.status_code))
return response.json()
def get_error(response):
data = response.json()
if 'error' in data:
return data['error'].get('message')
else:
return None
##### End Youtube API section #####
##### Begin youtube-dl section #####
def download_video(video_id, destination, quality='18', prefix=''):
filename_template = (prefix+'%(title)s [src %(uploader)s, %(uploader_id)s] '
'[posted %(upload_date)s] [id %(id)s].%(ext)s')
prev_dir = os.getcwd()
try:
os.chdir(destination)
ydl_opts = {
'format':quality,
'outtmpl':filename_template,
'logger':YoutubeDlLogger(),
#TODO: xattrs
}
try:
call_youtube_dl(video_id, ydl_opts)
except youtube_dl.utils.DownloadError as error:
if hasattr(error, 'exc_info'):
if error.exc_info[1].args[0] == 'requested format not available':
del ydl_opts['format']
call_youtube_dl(video_id, ydl_opts)
filename = get_video_filename(DownloadMetadata, video_id)
if filename is not None:
set_date_modified(filename, DownloadMetadata['errors'])
return filename, DownloadMetadata['errors']
finally:
os.chdir(prev_dir)
def call_youtube_dl(video_id, ydl_opts):
DownloadMetadata['titles'] = []
DownloadMetadata['merged'] = None
DownloadMetadata['errors'] = []
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(['https://www.youtube.com/watch?v={}'.format(video_id)])
def get_video_filename(download_metadata, video_id):
if download_metadata['merged']:
logging.debug('Video created from merged video/audio.')
filename = download_metadata['merged']
elif len(download_metadata['titles']) == 1:
filename = download_metadata['titles'][0]
elif download_metadata['errors']:
for error in download_metadata['errors']:
if error == 'blocked':
logging.error('Error: Video {} blocked.'.format(video_id))
elif error == 'restricted':
logging.warning('Error: Video {} restricted and unavailable.'.format(video_id))
elif error == 'unavailable':
logging.warning('Error: Video {} unavailable.'.format(video_id))
elif error == 'exists':
logging.warning('Video already downloaded. Skipping..')
if not download_metadata['errors']:
logging.error('Error: Video {} not downloaded.'.format(video_id))
filename = None
elif len(download_metadata['titles']) == 0:
fail('Error: failed to determine filename of downloaded video {}'.format(video_id))
elif len(download_metadata['titles']) > 1:
fail('Error: found multiple potential filenames for downloaded video {}:\n{}'
.format(video_id, '\n'.join(download_metadata['titles'])))
return filename
def set_date_modified(path, errors):
now = time.time()
try:
os.utime(path, (now, now))
except FileNotFoundError:
if not errors:
fail('Error: Downloaded video {}, but downloaded file not found.'.format(path))
# Define global dict to workaround problem that some data is only available from log messages that
# can only be obtained by intercepting in a hook (no other way to return the data).
DownloadMetadata = {'titles':[], 'merged':None, 'errors':[]}
class YoutubeDlLogger(object):
def debug(self, message):
# Ignore standard messages.
if message.startswith('[youtube]'):
if (message.endswith(': Downloading webpage') or
message.endswith(': Downloading video info webpage') or
message.endswith(': Downloading MPD manifest')):
return
elif message.startswith('[dashsegments] Total fragments: '):
return
elif message.startswith('\r\x1b[K[download]'):
if ' ETA ' in message[-20:]:
return
elif message.startswith('Deleting original file '):
return
# Extract video title info from log messages.
if message.startswith('[download]'):
if message[10:24] == ' Destination: ':
DownloadMetadata['titles'].append(message[24:])
return
elif (message.endswith('has already been downloaded and merged') or
message.endswith('has already been downloaded')):
DownloadMetadata['errors'].append('exists')
elif message.startswith('[ffmpeg] Merging formats into '):
DownloadMetadata['merged'] = message[31:-1]
return
logging.info(message)
def info(self, message):
logging.info(message)
def warning(self, message):
logging.warning(message)
def error(self, message):
#TODO: Blocked videos seem to list that fact in
# video['contentDetails']['regionRestriction']['blocked'] (it's a list of countries it's
# blocked in). Could just check for 'US' in that list. Note: according to the documentation,
# an empty list means it's not blocked anywhere. There's also an 'allowed' list that may
# be there instead. If it is, it's viewable everywhere not on that list (even if it's empty).
# See https://developers.google.com/youtube/v3/docs/videos#contentDetails.regionRestriction
if message.startswith('\x1b[0;31mERROR:\x1b[0m'):
if (message[17:51] == ' This video contains content from ' and (
message.endswith('. It is not available.') or
message.endswith('. It is not available in your country.') or
message.endswith(', who has blocked it on copyright grounds.') or
message.endswith(', who has blocked it in your country on copyright grounds.'))):
DownloadMetadata['errors'].append('blocked')
return
elif message[17:] == ' The uploader has not made this video available.':
DownloadMetadata['errors'].append('restricted')
return
elif message[17:] == ' This video is not available.':
DownloadMetadata['errors'].append('unavailable')
return
logging.error(message)
def critical(self, message):
logging.critical(message)
##### End youtube-dl section #####
def fail(message):
logging.critical(message)
if __name__ == '__main__':
sys.exit(1)
else:
raise Exception('Unrecoverable error')
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except BrokenPipeError:
pass
| true
|
12820f6a00d5be3cc5a6ef3dc99a2eec89f87af4
|
Python
|
j5s/getdomain
|
/lib/log.py
|
UTF-8
| 649
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import logging # 引入logging模块
logging.basicConfig(level=logging.INFO,
format='[-]%(asctime)s-[%(levelname)s]: %(message)s') # logging.basicConfig函数对日志的输出格式及方式做相关配置
if __name__ == '__main__':
# 由于日志基本配置中级别设置为DEBUG,所以一下打印信息将会全部显示在控制台上
logging.info('this is a loggging info message')
logging.debug('this is a loggging debug message')
logging.warning('this is loggging a warning message')
logging.error('this is an loggging error message')
logging.critical('this is a loggging critical message')
| true
|
d6d1bf14c30e03422362c59e411627a1b782f036
|
Python
|
pbourachot/cours
|
/cours4/mymorpion.py
|
UTF-8
| 3,652
| 3.53125
| 4
|
[] |
no_license
|
import turtle as tu
# TODO:
# **** Clear Button
# **** Controler si croix presente
# **** Gagner ???
# Settings
height = 400
width = 400
speed = 0
epaisseur = 5
nbEssai = 0
tableau = [['','',''], # ligne du base
['','',''], # ligne du milieu
['','','']] # ligne du haut
def printTableau():
for a in reversed(tableau):
print(a)
def addCase(x,y,signe):
global tableau
tableau[y][x] = signe
def caseEstVide(x,y):
case = tableau[y][x]
if (case == ''):
return True
else :
return False
def verifieResultat(x,y):
signe = tableau[y][x]
# verifie ligne
if (tableau[y][0] == tableau[y][1] == tableau[y][2]):
print ("Ligne Complete")
tu.textinput("GAGNE", "Ligne Complete")
# Colonne
if (tableau[0][x] == tableau[1][x] == tableau[2][x]):
print ("Colonne Complete")
tu.textinput("GAGNE", "Colonne Complete")
# diagonale
if (tableau[0][0] == tableau[1][1] == tableau[2][2] == signe):
print ("Diagonale montante")
tu.textinput("GAGNE", "Diagonale Complete")
if (tableau[0][2] == tableau[1][1] == tableau[2][0] == signe):
print ("Diagonale Descendante")
tu.textinput("GAGNE", "Diagonale Complete")
#Reiinitialize tout
def clear():
print("Clear" )
global tableau
tu.reset()
# initialize
initialize()
# Dessine la grille
dessineGrille()
tableau = [['','',''], # ligne du base
['','',''], # ligne du milieu
['','','']]
def dessineO(x,y):
print("Dessine un rond dans la case x,y")
tu.color("blue")
tu.pu()
tu.goto(20 + 120*x + 5*x , 60 + 120*y + 5*y)
tu.pd()
tu.circle(40)
print(tu.position())
def dessineX(x,y):
print("Dessine une croix dans la case x,y")
tu.color("red")
tu.pu()
tu.goto(20 + 120*x + 5*x , 40 + 60 + 120*y + 5*y)
tu.pd()
tu.goto(20 + 120*x + 5*x + 80, -40 + 60 + 120*y + 5*y)
tu.pu()
tu.goto(20 + 120*x + 5*x + 80 , 40 + 60 + 120*y + 5*y)
tu.pd()
tu.goto(20 + 120*x + 5*x , -40 + 60 + 120*y + 5*y)
#tu.circle(40)
def click(x,y):
global nbEssai, tableau
nbEssai += 1
print("Click %s %s " %(x,y))
caseX = int(x / 120)
caseY = int(y / 120)
print("Click %s %s " %(caseX,caseY))
if (caseX == 0 and caseY == 3):
clear()
elif (caseEstVide(caseX, caseY)):
if (nbEssai % 2 == 1):
dessineX(caseX,caseY)
addCase(caseX, caseY, 'X')
else :
dessineO(caseX,caseY)
addCase(caseX, caseY, 'O ')
verifieResultat(caseX,caseY)
printTableau()
def initialize():
tu.setworldcoordinates(0,0,400,400)
tu.pensize(epaisseur)
tu.speed(speed)
print(tu.position())
tu.onscreenclick(click)
#tu.onscreenclick(tu.goto)
tu.pu()
tu.goto(10,380)
tu.pd()
tu.right(-90)
tu.forward(10)
tu.right(90)
tu.forward(50)
tu.right(90)
tu.forward(10)
tu.right(90)
tu.forward(50)
tu.right(-180)
#tu.right(-90)
tu.write('Recommence')
#tu.ht()
def dessineGrille():
tu.pu()
tu.goto(0,120)
tu.pd()
tu.fd(370)
tu.pu()
tu.goto(0,245)
tu.pd()
tu.fd(370)
tu.right(90)
tu.pu()
tu.goto(120,370)
tu.pd()
tu.fd(370)
tu.pu()
tu.goto(245,370)
tu.pd()
tu.fd(370)
def main():
# initialize
initialize()
# Dessine la grille
dessineGrille()
if __name__ == '__main__':
main()
tu.TK.mainloop()
| true
|
efa7ae2becbaf4d8a46bb5dbb342593c3c582d0c
|
Python
|
OseungKwon/Beakjoon-Algorithms
|
/브루트 포스/2798 블랙잭.py
|
UTF-8
| 280
| 2.796875
| 3
|
[] |
no_license
|
n, m = map(int, input().split())
arr = list(map(int, input().split()))
max_m = 0
for i in range(0, n-2):
for j in range(i+1, n-1):
for k in range(j+1, n):
if arr[i]+arr[j]+arr[k] <= m:
max_m = max(max_m, arr[i]+arr[j]+arr[k])
print(max_m)
| true
|
bcac17eb3962e0daa95066e977da95d3dbb44c15
|
Python
|
heeya15/PythonCodingTest
|
/09_최단 경로/최단경로-실전문제/전보.py
|
UTF-8
| 5,343
| 3.59375
| 4
|
[] |
no_license
|
"""
(실전 문제) 전보 p, 262
- 어떤 나라에는 N개의 도시가 있다.
그리고 각 도시는 보내고자 하는 메시지가 있는 경우, 다른 도시로 전보를 보내서
다른 도시로 해당 메시지를 전송할 수 있다
- 하지만 ' X라는 도시 '에서 ' Y라는 도시 '로 [ 전보를 보내고자 한다면 ],
도시 [ X에서 Y로 향하는 " 통로 " ]가 ' 설치되어 있어야 한다. '
- 예를 들어 X에서 Y로 향하는 통로는 있지만, [ Y에서 X로 향하는 ] ' 통로가 없다면 '
[ [ Y는 ] --> [ X로 ] 메시지를 보낼 수 없다. ]
또한 [ 통로를 거쳐 메시지를 보낼 때 ]는 " 일정 시간이 소요 "된다.
- 어느 날 'C라는 도시'에서 [ 위급 상황이 발생 ]했다. 그래서 [ 최대한 많은 도시로 메시지를 보내고자 ] 한다.
메시지는 [ 도시 C에서 출발 ]하여 ' 각 도시 사이에 설치된 통로를 거쳐 ', [ 최대한 많이 퍼져나갈 것 ]이다
- [ 각 도시의 번호 ]와 [ 통로가 설치되어 있는 정보가 주어졌을 때 ],
[ 도시 C에서 ] 보낸 메시지를 받게 되는[ 도시의 개수는 ] 총 몇 개이며
[ 도시들이 모두 메시지를 받는 데 ] 까지 ' 걸리는 시간 '은 얼마인지 계산하는 프로그램을 작성하라
------------------------------------------------------------------------------
[ 입력 조건 ]
- 첫째 줄에 " 도시의 개수 [ N ]과" "통로의 개수 [ M ]", 메시지를 [ 보내고자 하는 도시 C가 ] 주어진다.
(1 <= N <= 30,000, 1<= M <=200,000, 1 <= C <= N)
- 둘째 줄부터 [ M+1번째 줄 ]에 걸쳐서 [ 통로에 대한 정보 X,Y,Z ]가 주어진다.
이는 < '특정도시 X' >에서 ' < 다른 특정도시 Y로 이어지는 통로 ' >가 있으며,
< '메시지가 전달되는 시간이 Z' >라는 의미
(1 <= X, Y <= N, 1 <= Z <= 1,000)
[ 출력 조건 ]
- 첫째 줄에 [ 도시 C에서 보낸 메시지를 받는 ] "도시의 총 개수"와 "총 걸리는 시간 "을
' 공백으로 구분 '하여 출력한다.
[ 입력 ] [ 출력 ]
3 2 1
1 2 4
1 3 2 2 4 -- > (메시지를 '받는' 도시의 총개수 = '2', 총 걸리는 시간 = '4')
------------------------------------------------------------------------------
( 문제 아이디어 )
- 핵심: 한 도시에서 다른 도시까지의 [ 최단 거리 문제 ]로 치환할 수 있다.
- N과 M의 범위가 충분히 크기 때문에 우선순위 큐를 활용한 다익스트라 알고리즘을 구현.
-
"""
# 책 정답 9-5.py (p, 263 )
import heapq
import sys
input = sys.stdin.readline
INF = int(1e9) # 무한을 의미하는 값으로 10억을 설정
# [ 노드 ]의 개수, [ 간선 ]의 개수, [ 시작 노드 ]를 입력받기
n, m, c = map(int, input().split())
# " 각 노드에 연결 "되어 있는 [ 노드에 대한 정보를 담는 리스트 ]를 만들기
graph = [[] for i in range(n + 1)]
# "최단 거리 테이블"을 [ 모두 무한 ]으로 초기화
distance = [INF] * (n + 1)
# 모든 [ ** 간선 정보(통로) ** ]를 입력받기
for _ in range(m):
x, y, z = map(int, input().split())
# [ X번 노드 ]에서 [ Y번 노드로 가는 비용 ]이 'Z라는 의미'
graph[x].append((y, z))
def dijkstra(start):
q = []
# 첫 번째 인수는 heap으로 사용될 [ list가 들어가고 ] 두 번째 인수로는 [ 넣고자 하는 값 ]이 들어간다.
# [ 시작 노드로 ] 가기 위한 [ 최단 경로는 0으로 설정 ]하여, [ 큐(q)리스트에 삽입 ]
heapq.heappush(q, (0, start))
distance[start] = 0 # [ 최단 거리 ]테이블 [출발노드] 거리값을 0으로 설정.
while q: # 큐가 비어있지 않다면
# 가장 [ 최단 거리가 짧은 노드 ]에 대한 [ 정보 꺼내기 ]
dist, now = heapq.heappop(q)
# 우선 순위 큐의 최단 거리 값 보다, 최단 거리테이블 거리 값이 더 작다면 무시해라
# 즉 , [ 현재 노드가 ] 이미 처리된 적이 있어 [ 작은 거리값이 들어간 상태 노드 ]라면 무시
if distance[now] < dist:
continue
# 현재 노드와 연결된 다른 인접한 노드들을 확인
for i in graph[now]:
cost = dist + i[1] # 추출한 노드의 거리 + [ 걸쳐서 ] 해당노드로 가는 비용
# 현재 노드를 거쳐서, 다른 노드로 이동하는 거리(cost)가 더 짧은 경우
if cost < distance[i[0]]:
distance[i[0]] = cost # 짧은 거리를 최단 거리테이블에 갱신.
heapq.heappush(q, (cost, i[0])) # 우선 순위 큐에, 갱신된 (최단 거리,노드) 튜플을 넣어줌.
# 다익스트라 알고리즘을 수행
dijkstra(c) # 메시지를 보내는 도시(시작 노드)를 인수로 줌.
print(sep='\n' )
print("출력 ")
# 도달할 수 있는 노드의 개수
count = 0
# 도달할 수 있는 [ 노드 중 ]에서, [ 가장 멀리 있는 노드 ]와의 최단 거리 추출
max_distance = 0
for d in distance:
# 도달할 수 있는 노드인 경우
if d != INF:
count += 1
max_distance = max(max_distance, d)
# 시작 노드는 제외해야 하므로 count - 1을 출력
print(count - 1, max_distance)
| true
|
051e20546935a5e3af7125c712e7d3bed00ca1ff
|
Python
|
cyber-chuvash/redir-balancer
|
/tests/test_cdn_url_builder.py
|
UTF-8
| 1,298
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
import pytest
from balancer.cdn_url_builder import CDNURLBuilder
@pytest.mark.parametrize(
['origin_url', 'exp_cdn_url'],
(
('http://s1.origin-cluster/video/1488/xcg2djHckad.m3u8',
'http://cdn.test/s1/video/1488/xcg2djHckad.m3u8'),
('http://s2.origin-cluster/video/5423/test34289laala.m3u8',
'http://cdn.test/s2/video/5423/test34289laala.m3u8'),
('http://s2.origin-cluster//video/5423/test34289laala.m3u8', # double slash //
'http://cdn.test/s2/video/5423/test34289laala.m3u8'), # single slash /
('http://s2.origin-cluster/video/5423/%7Etest34289.m3u8', # percent-encoded char (RFC 3986 s. 2.1)
'http://cdn.test/s2/video/5423/%7Etest34289.m3u8'),
)
)
def test_cdn_url_builder(origin_url: str, exp_cdn_url: str) -> None:
url_builder = CDNURLBuilder(cdn_host='cdn.test')
assert url_builder.make_cdn_url(origin_url) == exp_cdn_url
@pytest.mark.parametrize(
'bad_url',
(
'http:///video/5423/%7Etest34289.m3u8',
'http://test.com',
'http://test.com/',
)
)
def test_cdn_url_builder_bad_url(bad_url: str) -> None:
url_builder = CDNURLBuilder(cdn_host='cdn.test')
with pytest.raises(ValueError):
url_builder.make_cdn_url(bad_url)
| true
|
17c330412172503086a7fff5b8503daba28bf2df
|
Python
|
wangyendt/LeetCode
|
/Contests/201-300/week 279/2164. Sort Even and Odd Indices Independently/Sort Even and Odd Indices Independently.py
|
UTF-8
| 702
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: wangye(Wayne)
@license: Apache Licence
@file: Sort Even and Odd Indices Independently.py
@time: 2022/02/15
@contact: wang121ye@hotmail.com
@site:
@software: PyCharm
# code is far away from bugs.
"""
from typing import *
class Solution:
def sortEvenOdd(self, nums: List[int]) -> List[int]:
a1 = [n for i, n in enumerate(nums) if i & 1]
a2 = [n for i, n in enumerate(nums) if not (i & 1)]
a1.sort(reverse=True)
a2.sort()
ret = []
for aa1, aa2 in zip(a1, a2):
ret.append(aa2)
ret.append(aa1)
if len(a2) > len(a1):
ret.append(a2[-1])
return ret
| true
|
3530379fda44847319ecd859cd566a5c8ec6c4a9
|
Python
|
ikechuku/practice-python
|
/practice/5.py
|
UTF-8
| 143
| 3.703125
| 4
|
[] |
no_license
|
print("what is your name")
fName = input("first name: \n")
lName = input("last name: \n")
print("Your full name is \n" + lName + " " + fName)
| true
|
962a05c83bf1c8871a0c041e583d7155446d88c9
|
Python
|
Hilary02/atcoder
|
/ABC/159/c.py
|
UTF-8
| 34
| 3.03125
| 3
|
[] |
no_license
|
n = int(input())
print((n/3)**3)
| true
|
7efefd5e622ee4d5a60bf0affef9e8c14959c876
|
Python
|
boosker/Cybersecurity-Final-Project
|
/test/analysis.py
|
UTF-8
| 11,305
| 3.078125
| 3
|
[] |
no_license
|
"""
CSCI 5742 Final Project
Vedant Singhania & Jacob Jolly
PROJECT NAME: Honeypot Analysis Tool
PROJECT DDESCRIPTION: The H.AT. takes data from the modified Adminer Log file
and parses it into IP Addresses, Usernames, and Passwords
if they were an Invalid Login. It then gives the user the
choice to look at graphs of each of the three and see if
are any connections and then determine if an IP Address
is an attacker.
"""
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import numpy as np
import ipaddress
import requests
LARGE_FONT = ("Verdana", 12)
class HATAnalysis(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "Honeypot Analysis Tool")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (Main, UsernameGraphPage, PasswordGraphPage, IPAddressesGraphPage):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(Main)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class Main(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Start Page", font=LARGE_FONT)
label.pack(pady=10, padx=10)
# Create button for Analysis
btn = ttk.Button(self, text="IP Analysis",
command=self.ipanalysis)
btn.pack()
# Create button for Username Graph Page
btn2 = ttk.Button(self, text="Top Username Graph",
command=lambda: controller.show_frame(UsernameGraphPage))
btn2.pack()
# Create button for Password Graph Page
btn3 = ttk.Button(self, text="Top Password Graph",
command=lambda: controller.show_frame(PasswordGraphPage))
btn3.pack()
# Create button for IP Address Graph Page
btn4 = ttk.Button(self, text="Top IP Address Graph",
command=lambda: controller.show_frame(IPAddressesGraphPage))
btn4.pack()
# IP Analyzer Function
def ipanalysis(self):
text = "log.csv"
IPAdds = []; IPCluData = []; IPTimes = []; IPPages = []
IPAgent = []; IPUser = []; IPPass = []
APIKey = "6cec2419ee3377ca5124db685feeed1c"
with open(text, "r") as f:
# Read in file line by line and add IP addresses to arrays
for line in f:
IPAdds.append(line.split(',')[1])
# Add ip address into array to be clustered together
IPCluData.append(int(ipaddress.ip_address(line.split(',')[1])))
f.close()
# Put Latitude, Longitude, Country, and Region into arrays
IPLats, IPLons, IPCouns, IPRegs = self.getGeoData(APIKey, IPAdds)
# When Analysis is done, pop up window displays 'Analysis Completed'
messagebox.showinfo(title="Python H.A.T.", message="Analysis Completed")
# Latitude/Longitude Function to find Geo location from IP Addresses using FreeGeoIP web service
def getGeoData(self, apikey, ip_list=[], lats=[], lons=[], countries=[], regions=[]):
# Go through IP list and request information about them from FreeGeoIP
for ip in ip_list:
r = requests.get("http://api.ipstack.com/" + ip + "?access_key=" + apikey)
json_response = r.json()
print("{ip}, {region_name}, {country_name}, {latitude}, {longitude}".format(**json_response))
# Parse the JSON data into needed parts
if json_response['latitude'] and json_response['longitude']:
lats.append(json_response['latitude'])
lons.append(json_response['longitude'])
if json_response['country_name'] and json_response['region_name']:
countries.append(json_response['country_name'])
regions.append(json_response['region_name'])
return lats, lons, countries, regions
# Page to Graph Usernames
class UsernameGraphPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Top Username Graph Page", font=LARGE_FONT)
label.pack(pady=10, padx=10)
btn = ttk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(Main))
btn.pack()
# Function to count how many of an element there are in a list
def countX(lst, x):
return lst.count(x)
# Function to sort the Seen elements according to what the counted elements
# would be in ascending order
def sort_list(list1, list2):
zipped_pairs = zip(list2, list1)
z = [x for _, x in sorted(zipped_pairs)]
return z
text = "log.csv"
Usernames = []
with open(text, "r") as f:
# Read in file line by line and add IP addresses to arrays
for line in f:
split = line.split(',')
if (split[2] == "INVALIDLOGIN"):
Usernames.append(split[3])
f.close()
SeenUsers = []
UserCount = []
# Make a list of Usernames seen, count how many there are, and add it to UserCount
for user in Usernames:
if user in SeenUsers:
continue
else:
SeenUsers.append(user)
UserCount.append(countX(Usernames, user))
# Sort Seen list according to the ascending order UserCount could be
SeenSort = sort_list(SeenUsers, UserCount)
UserCount.sort()
# Add a bar graph to the page to display
f = Figure(figsize=(5, 5), dpi=100)
a = f.add_subplot(111)
a.bar(SeenUsers, UserCount)
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Page to Graph Passwords
class PasswordGraphPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Top Password Graph Page", font=LARGE_FONT)
label.pack(pady=10, padx=10)
btn = ttk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(Main))
btn.pack()
# Function to count how many of an element there are in a list
def countX(lst, x):
return lst.count(x)
# Function to sort the Seen elements according to what the counted elements
# would be in ascending order
def sort_list(list1, list2):
zipped_pairs = zip(list2, list1)
z = [x for _, x in sorted(zipped_pairs)]
return z
text = "log.csv"
Passwords = []
with open(text, "r") as f:
# Read in file line by line and add Passwords to arrays
for line in f:
split = line.split(',')
if (split[2] == "INVALIDLOGIN"):
Passwords.append(split[4])
f.close()
SeenPasses = []
PassCount = []
# Make a list of passwords seen, count how many there are, and add it to PassCount
for password in Passwords:
if password in SeenPasses:
continue
else:
SeenPasses.append(password)
PassCount.append(countX(Passwords, password))
# Sort Seen list according to the ascending order PassCount could be
SeenSort = sort_list(SeenPasses, PassCount)
PassCount.sort()
# Add a bar graph to the page to display
f = Figure(figsize=(5, 5), dpi=100)
a = f.add_subplot(111)
a.bar(SeenSort, PassCount, align='center')
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Page to Graph IP Addresses
class IPAddressesGraphPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Top IP Address Graph Page", font=LARGE_FONT)
label.pack(pady=10, padx=10)
btn = ttk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(Main))
btn.pack()
# Function to count how many of an element there are in a list
def countX(lst, x):
return lst.count(x)
# Function to sort the Seen elements according to what the counted elements
# would be in ascending order
def sort_list(list1, list2):
zipped_pairs = zip(list2, list1)
z = [x for _, x in sorted(zipped_pairs)]
return z
text = "log.csv"
Addresses = []
with open(text, "r") as f:
# Read in file line by line and add IP addresses to arrays
for line in f:
split = line.split(',')
if (split[2] == "INVALIDLOGIN"):
Addresses.append(split[1])
f.close()
SeenAdds = []
AddsCount = []
# Make a list of addresses seen, count how many there are, and add it to AddsCount
for addr in Addresses:
if addr in SeenAdds:
continue
else:
SeenAdds.append(addr)
AddsCount.append(countX(Addresses, addr))
# Sort Seen list according to the ascending order PassCount could be
SeenSort = sort_list(SeenAdds, AddsCount)
AddsCount.sort()
# Add a bar graph to the page to display
f = Figure(figsize=(5, 5), dpi=100)
a = f.add_subplot(111)
a.bar(SeenSort, AddsCount)
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Iitialize and start the Main page
tool = HATAnalysis()
tool.mainloop()
| true
|
ae5e2a15862c974b61734bb09c646ee498d518cc
|
Python
|
OSGeoLabBp/tutorials
|
/english/data_processing/lessons/code/gpx2kml.py
|
UTF-8
| 849
| 2.875
| 3
|
[
"CC0-1.0"
] |
permissive
|
import sys
from os import path
from osgeo import ogr
"""
convert gpx files into kml
usage: python gpx2kml input.gpx input1.gpx ...
python gpx2kml *.gpx
"""
inDriver = ogr.GetDriverByName('GPX') # get ogr driver for gpx files
if inDriver is None:
print('GPX drive not found')
sys.exit()
outDriver = ogr.GetDriverByName('KML') # get ogr drive for kml files
if outDriver is None:
print('KML drive not found')
sys.exit()
for inName in sys.argv[1:]: # go through input files
src = inDriver.Open(inName, 0) # open for read
outName = path.splitext(inName)[0] + '.kml'
if path.exists(outName): # ogr can't overwrite output
outDriver.DeleteDataSource(outName) # delete !!!! danger
dst = outDriver.CopyDataSource(src, outName) # copy to destination
| true
|
f65c3e6d4b6240c49574add16d75768943861445
|
Python
|
sarihuminer/project-python-bchirot
|
/create_buttons.py
|
UTF-8
| 1,850
| 2.765625
| 3
|
[] |
no_license
|
import sys
import party
import random
import db
from PySide2 import QtCore, QtWidgets, QtGui
class MyWidget(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.allParty_list=[]
self.allParty = db.cursor.execute('select * from party ')
for p in self.allParty:
self.allParty_list.append(party.Party(p[0],p[1],p[2]))
self.layout = QtWidgets.QVBoxLayout()
for p in self.allParty_list:
self.button = QtWidgets.QPushButton(p.char+' \n '+p.name)
self.button.setStyleSheet("background-color: Blue ;color: white; border-style: outset;height:100;width:100")
# self.text = QtWidgets.QLabel("Hello World")
#self.text.setAlignment(QtCore.Qt.AlignCenter)
# self.layout.addWidget(self.text)
self.layout.addWidget(self.button)
self.button.clicked.connect(self.magic)
self.setLayout(self.layout)
def light_palette_ui(self):
self.vertical_layout_main = QtWidgets.QVBoxLayout(self.mainWidget)
self.scroll = QtWidgets.QScrollArea()
self.scroll.setWidgetResizable(True)
self.vertical_layout_main.addWidget(self.scroll)
self.scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scroll_widget = QtWidgets.QWidget()
self.scroll.setWidget(self.scroll_widget)
self.populate_lights()
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.show()
def magic(self):
self.button.setText(random.choice(self.hello))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
widget = MyWidget()
widget.light_palette_ui()
widget.show()
sys.exit(app.exec_())
| true
|
1ecf12de6b88ce90e6a9d71041ab045bf857753d
|
Python
|
atena-data/Python-Bootcamp-Codes
|
/Day 46 - Web Scraping - Top 100 movies/main.py
|
UTF-8
| 707
| 3.421875
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import requests
URL = "https://www.timeout.com/newyork/movies/best-movies-of-all-time"
response = requests.get(URL)
# loading the website's content
website = response.text
# using BeautifulSoup to scrape the website
soup = BeautifulSoup(website, "html.parser")
website_headings = soup.find_all(name="h3", class_="card-title")
# list of the top 100 movies from the website
top_100_movies = [movie.getText() for movie in website_headings]
top_100_movies.pop(len(top_100_movies)-1)
# adding the top 100 movies to a new txt file
movies = ''
for movie in top_100_movies:
movies += f"{movie.strip()}\n"
with open("top_100_movies.txt", "w") as file:
file.write(movies)
| true
|
1f1f82386a22339d7ac79cef661e46fa0949eb71
|
Python
|
Cunillet/Project-Week-5-Your-Own-Project
|
/market_scrapper/search_corrupted_Data.py
|
UTF-8
| 149
| 2.71875
| 3
|
[] |
no_license
|
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv('data/csv/S&P_500.csv')
for index, row in df.iterrows():
print(row)
| true
|
82d167530d91cf90e1fb0cbf3a392d1977e0ae4d
|
Python
|
K1ngDedede/Mogolla-Gaming
|
/Mogolla Analytics/Jueguelo/firebase_connection.py
|
UTF-8
| 1,526
| 2.859375
| 3
|
[] |
no_license
|
import pyrebase
FIREBASE_KEY = ""
firebaseConfig = {
"apiKey": FIREBASE_KEY,
"authDomain": "proyecto-de-grado-7e7d3.firebaseapp.com",
"databaseURL": "https://proyecto-de-grado-7e7d3-default-rtdb.firebaseio.com",
"projectId": "proyecto-de-grado-7e7d3",
"storageBucket": "proyecto-de-grado-7e7d3.appspot.com",
"messagingSenderId": "588928611789",
"appId": "1:588928611789:web:2fbb4ffb4bf5b5af6f8084"
}
firebase = pyrebase.initialize_app(firebaseConfig)
db = firebase.database()
#Retorna una lista de diccionarios que representan cada sesion de juego.
#Las llaves de este diccionario son fecha, con valor de la fecha en que se ejecuto el juego
#y statsf1 cuyo valor es una lista de diccionarios que representan sesiones de juego en la fase 1
def armar_diccionario_sesiones():
sesiones_list = list()
sesiones = db.child("sesiones").get()
for sesion in sesiones.each():
sesion_dict = dict()
sesion_dict["fecha"] = sesion.val()["fecha"]
try:
if(sesion.val()["statsF1"] != None):
statsf1 = sesion.val()["statsF1"]
sesiones_f1 = list()
for llave_sesion_f1 in statsf1.keys():
info_sesion_f1 = statsf1[llave_sesion_f1]
sesiones_f1.append(info_sesion_f1)
sesion_dict["statsf1"] = sesiones_f1
except:
sesion_dict["statsf1"] = []
sesiones_list.append(sesion_dict)
return sesiones_list
print(armar_diccionario_sesiones())
| true
|
cd61f068baa6fb39c49987664cd35216af72fedf
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03186/s343869969.py
|
UTF-8
| 113
| 2.8125
| 3
|
[] |
no_license
|
a,b,c = map(int,input().split())
if c < a+b:
print(b+c)
elif c > a+b:
print(a+b+b+1)
else:
print(c+b)
| true
|
3f2c5f870645de80639b617176b53b20beda4776
|
Python
|
ahedayat/Brent-Kung-Adder
|
/adders/brentkung/sum_logic.py
|
UTF-8
| 1,769
| 3.046875
| 3
|
[] |
no_license
|
import verilog as verilog
class SumLogic:
module_name = 'SumLogic'
def __init__(self, bitwidth):
self.bitwidth = bitwidth
def inputs(self):
Ps = ['P_{}'.format(ix) for ix in range(self.bitwidth+1)]
Gs = ['G_{}_0'.format(ix) for ix in range(self.bitwidth+1)]
return Ps, Gs
def outputs(self):
Ss = ['S_{}'.format(ix) for ix in range(1, self.bitwidth+1)]
c_out = 'C_out'
return c_out, Ss
def verilog(self, file_path, file_name):
m = verilog.Module(SumLogic.module_name)
Ps, Gs = self.inputs()
c_out, Ss = self.outputs()
for bit in range(1, self.bitwidth+1):
# Comment
m.comment('Bit {}'.format(bit))
# Instantiation
m.stmt_assign("S_{}".format(bit), "{g_im1_0} ^ {p_i}".format(
g_im1_0="G_{}_0".format(bit-1),
p_i="P_{}".format(bit)))
# Carry
m.comment('Carry Out')
m.stmt_assign(c_out, 'G_{}_0'.format(self.bitwidth))
for bit, (p, g) in enumerate(zip(Ps, Gs)):
m.input(p, 'input')
m.input(g, 'input')
for s in Ss:
m.output(s, 'output')
m.output(c_out, 'output')
m.start()
m.end()
m.write(file_path, file_name)
def instantiation(self, instance_name, inputs, outputs):
"""
inputs: dict{ port: ? , connector: ?}
outputs: dict{ port: ? , connector: ?}
"""
return verilog.Module.instantiate(module_name=SumLogic.module_name,
instance_name=instance_name,
inputs=inputs,
outputs=outputs)
| true
|
22b95fd2d009ad2cef6cc8852fc0d522ab62ccc9
|
Python
|
yaglm/yaglm
|
/yaglm/metrics/clf.py
|
UTF-8
| 1,736
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
from sklearn.metrics import accuracy_score, roc_auc_score, \
balanced_accuracy_score, f1_score, precision_score, recall_score, \
log_loss
def get_binary_clf_scores(y_true, y_pred, y_score=None,
sample_weight=None, level=1):
"""
Scores a binary classifiers.
Parameters
----------
y_true: array-like, (n_samples, )
The ground truth labels.
y_pred: array-like, (n_samples, )
The predicted labels.
y_score: array-like, (n_samples, )
The predcited scores (e.g. the probabilities)
sample_weight: array-like shape (n_samples,)
Sample weights.
level: int
How much data to return.
Output
------
out: dict
The scores.
"""
out = {}
out['accuracy'] = accuracy_score(y_pred=y_pred, y_true=y_true,
sample_weight=sample_weight)
if y_score is not None:
out['roc_auc'] = roc_auc_score(y_true=y_true, y_score=y_score,
sample_weight=sample_weight)
if level >= 2:
out['log_loss'] = log_loss()
if level >= 2:
out['balanced_accuracy'] = \
balanced_accuracy_score(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
out['f1'] = f1_score(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
out['precision'] = precision_score(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
out['recall'] = recall_score(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
return out
| true
|
b1a46dc1fee08da68fcb9a91c7589d431b405ab1
|
Python
|
hyc12345hyc/hyc
|
/xiaojiayu3.7.1/xiaojiayu_exercise_py/index.py
|
UTF-8
| 4,056
| 3.4375
| 3
|
[] |
no_license
|
# 000愉快的开始
# 001我和Python的第一次亲密接触
# 002用Python设计第一个游戏
# 003小插曲之变量和字符串
# 004改进我们的小游戏 猜数字 4_0改进猜数字
# 005闲聊之Python的数据类型
# 006Python之常用操作符
# 007了不起的分支和循环1
# 008了不起的分支和循环2
# 009了不起的分支和循环3
# 010列表:一个打了激素的数组1
# 011列表:一个打了激素的数组2
# 012列表:一个打了激素的数组3
# 013元组:戴上了枷锁的列表
# 014字符串:各种奇葩的内置方法
# 015字符串:格式化
# 016序列!序列!
# 017函数:Python的乐高积木 欧几里得算法求最大公约数
# 018函数:灵活即强大
# 019函数:我的地盘听我的
# 020函数:内嵌函数和闭包
# 021函数:lambda表达式
# 022函数:递归是神马 斐波那契数列
# 023递归:这帮小兔崽子
# 024递归:汉诺塔
# 025字典:当索引不好用时1 创建字典的各种方法
# 026字典:当索引不好用时2
# 027集合:在我的世界里,你就是唯一
# 028文件:因为懂你,所以永恒
# 029文件:一个任务 分割文件(record.txt)
# 030文件系统:介绍一个高大上的东西
# 031永久存储:腌制一缸美味的泡菜 使用pickle分割文件(record.txt)
# 032异常处理:你不可能总是对的1
# 033异常处理:你不可能总是对的2 改进猜数字游戏
# 034丰富的else语句及简洁的with语句 计算正整数的最大约数
# 035图形用户界面入门:EasyGui
# 036类和对象:给大家介绍对象
# 037类和对象:面向对象编程
# 038类和对象:继承
# 039类和对象:拾遗
# 040类和对象:一些相关的BIF
# 041魔法方法:构造和析构
# 042魔法方法:算术运算1
# 043魔法方法:算术运算2
# 044魔法方法:简单定制
# 045魔法方法:属性访问
# 046魔法方法:描述符(Property的原理)
# 047魔法方法:定制序列
# 048魔法方法:迭代器
# 049乱入:生成器
# 050模块:模块就是程序
# 051模块:__name__='__main__'、搜索路径和包
# 052模块:像个极客一样去思考
# 053论一只爬虫的自我修养1
# 054论一只爬虫的自我修养2:实战
# 055论一只爬虫的自我修养3:隐藏
# 056轮一只爬虫的自我修养4:OOXX
# 057论一只爬虫的自我修养5:正则表达式
# 058论一只爬虫的自我修养6:正则表达式2
# 059论一只爬虫的自我修养7:正则表达式3
# 060论一只爬虫的自我修养8:正则表达式4
# 061论一只爬虫的自我修养9:异常处理
# 062论一只爬虫的自我修养10:安装Scrapy
# 063论一只爬虫的自我修养11:Scrapy框架之初窥门径
# 064GUI的终极选择:Tkinter1
# 065GUI的终极选择:Tkinter2
# 066GUI的终极选择:Tkinter3
# 067GUI的终极选择:Tkinter4
# 068GUI的终极选择:Tkinter5
# 069GUI的终极选择:Tkinter6
# 070GUI的终极选择:Tkinter7
# 071GUI的终极选择:Tkinter8
# 072GUI的终极选择:Tkinter9
# 073GUI的终极选择:Tkinter10
# 074GUI的终极选择:Tkinter11
# 075GUI的终极选择:Tkinter12
# 076GUI的终极选择:Tkinter13
# 077GUI的终极选择:Tkinter14
# 078Pygame:初次见面,请大家多多关照
# 079Pygame:解惑
# 080Pygame:事件
# 081Pygame:提高游戏的颜值1
# 082Pygame:提高游戏的颜值2
# 083Pygame:提高游戏的颜值3
# 084Pygame:基本图形绘制
# 085Pygame:动画精灵
# 086Pygame:碰撞检测
# 087Pygame:播放声音和音效
# 088Pygame:摩擦摩擦
# 089Pygame:游戏胜利
# 090Pygame:飞机大战1
# 091Pygame:飞机大战2
# 092Pygame:飞机大战3
# 093Pygame:飞机大战4
# 094Pygame:飞机大战5
# 095Pygame:飞机大战6
# 096Pygame:飞机大战7
| true
|
8594fa63c4516f544814273981a08da3f9bac06d
|
Python
|
skoter87/testanketapython3
|
/anketa.py
|
UTF-8
| 753
| 4.21875
| 4
|
[] |
no_license
|
name = input('Введите ваше имя: ')
surname = input('Введите вашу фамилию: ')
age = int(input('Введите ваш возраст: '))
weight = int(input('Введите ваш вес: '))
if age < 30 and (weight > 50 or weight < 120):
print('Вы в отличном состоянии!')
elif age >= 40 and (weight <= 50 or weight >= 120):
print('Вам стоит пойти к врачу!')
elif age <= 20 and (weight > 35 or weight < 70):
print('Здоровье студента в хорошем состоянии')
print('Имя пациента:'+ name)
print('Фамилия пациента:' + surname)
print('Возраст пациента:' + str(age))
print('Вес пациента:' + str(weight))
| true
|
951628b64ab17c7c8ec297aeea5a5545ec049760
|
Python
|
tnightengale/string_matching
|
/pyqt_implementation/pyqt_examples/view_frames_example/view_frames_example.py
|
UTF-8
| 1,956
| 3.28125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 31 09:37:38 2019
@author: TeghanN
"""
'''
First of all the classes that Qt Designer offers are not widgets,
and it is recommended that if you modify the .ui when recompiling
you will lose the modifications of the logic. So for the 2 previous
arguments I recommend you restore both files.
'''
from PyQt5 import QtWidgets
from ui_firstwindow import ui_Firstwindow
from ui_secondwindow import ui_Secondwindow
'''
Your problem is that to show a window you have to access the window object,
but in your case if you want to do it in several files you may have problems
with circular imports, undefined variables, etc. The correct thing is that all
windows have the same scope.
Then we will create a main.py file where we will implement the classes that
implement the widgets using the previous design. We create a class where the
windows will be created and we will connect the clicked signals to the show()
method of the other window. In each class the clicked signal of the button is
connected to the hide() method of the window.
'''
class Firstwindow(QtWidgets.QMainWindow, ui_Firstwindow):
def __init__(self, parent=None):
super(Firstwindow, self).__init__(parent)
self.setupUi(self)
self.pushButton.clicked.connect(self.hide)
class Secondwindow(QtWidgets.QDialog, ui_Secondwindow):
def __init__(self, parent=None):
super(Secondwindow, self).__init__(parent)
self.setupUi(self)
self.pushButton_2.clicked.connect(self.hide)
class Manager:
def __init__(self):
self.first = Firstwindow()
self.second = Secondwindow()
self.first.pushButton.clicked.connect(self.second.show)
self.second.pushButton_2.clicked.connect(self.first.show)
self.first.show()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
manager = Manager()
sys.exit(app.exec_())
| true
|
f621625aff0a95c43b934331a2dd7307196da494
|
Python
|
karstenes/Nondisjunction
|
/bot.py
|
UTF-8
| 4,751
| 2.53125
| 3
|
[] |
no_license
|
import discord
import isodate
import re
import logging
from googleapiclient.discovery import build
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='/logs/discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
vsearch = []
apikey = 'MzIxMzM4NDQ1ODYxNDg2NTky.DBclTA.NbxGmWj0CcYxI6e1F7n8XEv67rw'
client = discord.Client()
DEVELOPER_KEY = "AIzaSyARhlZ59COWmLaEejRpl2ArsvAompbuJFk"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def youtube_search(q, results=5):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(
q=q,
part="id,snippet",
order="relevance",
maxResults=results
).execute()
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
sr = youtube.videos().list(
part="contentDetails, snippet",
id=search_result["id"]["videoId"]
).execute()
video = sr["items"][0]
videos.append([video["snippet"]["title"], str(isodate.parse_duration(video["contentDetails"]["duration"])),
search_result["id"]["videoId"]])
return videos
print(youtube_search("test"))
def urltest(string):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if regex.fullmatch(string):
return True
else:
return False
@client.event
async def on_ready():
print("Ready")
print("https://discordapp.com/oauth2/authorize?client_id=" + client.user.id + "&scope=bot&permissions=2146958591")
# noinspection PyGlobalUndefined,PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences
@client.event
async def on_message(message):
if message.content.startswith("//"):
client.delete_message(message)
command = message.content.split(" ")[0][2:]
args = message.content.split(" ")[1:]
print(command)
if command == "ping":
await client.send_message(message.channel, message.author.mention + ' pong')
elif command == "play":
if urltest(args[0]):
pass
else:
if vsearch:
if len("".join(args)) == 1:
try:
vno = int(args[0])
global ytplayer
ytplayer = await voiceclient.create_ytdl_player("https://www.youtube.com/watch?v=%s"%vsearch[vno])
except TypeError:
print("Single character, not selection of video")
else:
embed = discord.Embed(title="Youtube Search", description="Select using `//play n`", color=0xe52d27)
embed.set_author(name=message.author.name,
icon_url=message.author.avatar_url)
embed.set_thumbnail(url='https://www.youtube.com/yt/brand/media/image/YouTube-icon-full_color.png')
if args[0].startswith("r="):
results = int(args[0][2:])
embed.set_footer(text="Searched for \"" + " ".join(args[1:]) + "\"")
search = youtube_search(" ".join(args[1:]), results=results)
else:
embed.set_footer(text="Searched for \"" + " ".join(args) + "\"")
search = youtube_search(" ".join(args))
vsearch.append(message.author.id)
for video in search:
vsearch.append(video)
embed.add_field(name=str(search.index(video) + 1), value="%s (%s)" % (video[0], video[1]))
print(vsearch)
await client.send_message(message.channel, embed=embed)
if not client.is_voice_connected(message.server):
global voiceclient
voiceclient = await client.join_voice_channel(message.author.voice.voice_channel)
elif command == "stop":
if client.is_voice_connected(message.server):
voiceclient.disconnect()
else:
pass
client.run("MzIxMzM4NDQ1ODYxNDg2NTky.DBcgitsFA.tTub_MC96w4mfM2j8_WiZTCR9R0")
| true
|
178de38ce11261e3e7121a6be10316f15d5958f2
|
Python
|
HIT-GH/EPN-CEC-Python
|
/test16-FunctionIsPrime-20210629.py
|
UTF-8
| 1,013
| 3.921875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 18:14:28 2021
@author: HendersonIturralde
"""
print("")
print("≡-≡-≡-≡ Generador de Números Primos ≡-≡-≡-≡")
#--- fun: GENERADOR DE NÚMEROS PRIMOS -------------------
def generador_primos(x):
y = 2
while y < x:
is_prime = True
for item in range (2,y):
resto = y % item
if resto==0:
is_prime = False
break
if is_prime==True:
print(" ", y)
y += 1
#--- End Fun: GENERADOR DE NÚMEROS PRIMOS ---------------
seguir = "S"
while seguir == "S":
max_val = int(input("Ingrese el valor mayor a 2 para comprobar: "))
print("Los primos entre 1 y", max_val, "son:")
generador_primos(max_val+1)
seguir = (input('Presione "S" si quiere generar otra cadena: ')).upper()
#--- EoF ------------------------------------------------
| true
|
64d15982c00b93c5bfd39a9e68d129222991e090
|
Python
|
chenyangbin/pywork
|
/day06函数/15迭代器.py
|
UTF-8
| 2,967
| 3.890625
| 4
|
[] |
no_license
|
# 工程目录:c:\Users\bin\OneDrive\share\pywork\day06函数\15迭代器.py
# 创建日期: 2019.03.17
# 工程目标:生成器的使用
# 创建作者:binyang
# -*- coding:utf-8 -*-
# 生成器 遍历数据 惰性迭代 节省内层空间 字典记录遍历状态 next直接按顺序下一个访问
# 特点:
# 1惰性计算数据 节省内存
# 2记录 状态,通过next访问下一个状态
# 3具备可迭代特性 生成器具有迭代器特性,但迭代器不是生成器
# 创建方式:
# 1把列表的表达:{} 修改 为[] 即可 表达式,非元素
#示例:
l = [i for i in range(1, 100) if i % 2 == 0] #表达式形式的列表
l2 = (i for i in range(1, 100) if i % 2 == 0) # 生成器形式的列表
print("表达式形式列表", l)
print("生成器形式列表", l2)
# 访问生成器中的数据(特殊的迭代器)
print("生成器访问列表", next(l2))
print("生成器访问列表", next(l2))
print("生成器访问函数方法", l2.__next__())
# 生成器创建方式2 函数中包含yield语句 函数的执行结果就是生成器
# 特点:
# 1 yield 可以阻断当前程序的执行 ,然后当使用next函数的时候,或者_next_ 函数的时候,
# 都会让函数继续执行,直到下一个yield又会被暂停
def test():
print("xxxx")
yield 1
#print(1)
print("aa")
yield 2
print(2)
yield 3
print(3)
g = test() # 只有在使用next 或者_next_才可以访问,仅仅调用不会执行生成器
print(" 创建生成器方式2",g)
print(" 创建生成器方式2", next(g))
print(" 创建生成器方式2", next(g)) # 只有在
# 生成器 产生数据的方式,让生成器生成数据
# 1 next
# 2_nxet_
# 3 for in
# 生成器的操作方法
# 1 send 方法 有一个参数,指定的是上一次被刮起的yield的语句的返回值
# 2 可以给额外的yield的语句赋值
# 3 之一第一次调用的时候 t.send(none)
def test2():
print("xxxxxx")
res1 = yield 1 # 第一次yield 而后挂起
print(res1)
res2 = yield 2 # 第二次执行yield
print(res2)
g1 = test2() # 此时不打印xxx 不启动函数的调用
print("生成器的操作",g1.__next__())
#print("生成器的操作", g1.__next__()) 第二次执行yield
print("生成器的操作使用send给yield赋值", g1.send("oooo")) # send方法可以给yield赋值 传值给上一次挂起的yield
# 关闭生成器方法 close
def cose_scq():
yield 1
print("a")
return 10 # 运行到此处以后就会结束运行 抛出异常
yield 2
print("b")
yield 3
print("c")
a =cose_scq()
print(a.__next__())
print(a.__next__())
print("关闭生成器")
a.close() # 关闭生成器
#生成器已经关闭 print(a.__next__())
# 注意,生成器的迭代器使用一次迭代完毕以后,就不能再次使用了,除非再次调用生成器迭代,即生成器只能遍历一次
| true
|