blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
25ac4e8eb06407912972e2d251398ab5d151e95b | Python | Farrythf/LeetCode_DefeatProcess | /NO.62/FirstTry.py | UTF-8 | 576 | 3.046875 | 3 | [] | no_license | class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if m == 1:
return 1
elif m == 2:
return n
elif m == 3:
return int(0.5*n*(n+1))
elif n == 1:
return 1
elif n == 2:
return m
elif n == 3:
return int(0.5*m*(m+1))
else:
res = 0
for i in range(1,m+1):
res = res + self.uniquePaths(i, n-1)
return res
| true |
58db6285b8a1b32f767c66082b1d2f9676757086 | Python | CrtomirJuren/pygame-projects | /beginning-game-development/Chapter 12/model3d.py | UTF-8 | 5,468 | 2.625 | 3 | [
"MIT"
] | permissive |
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
import os.path
class Material(object):
def __init__(self):
self.name = ""
self.texture_fname = None
self.texture_id = None
class FaceGroup(object):
def __init__(self):
self.tri_indices = []
self.material_name = ""
class Model3D(object):
def __init__(self):
self.vertices = []
self.tex_coords = []
self.normals = []
self.materials = {}
self.face_groups = []
self.display_list_id = None
def __del__(self):
#Called when the model is cleaned up by Python
self.free_resources()
def free_resources(self):
# Delete the display list and textures
if self.display_list_id is not None:
glDeleteLists(self.display_list_id, 1)
self.display_list_id = None
# Delete any textures we used
for material in self.materials.values():
if material.texture_id is not None:
glDeleteTextures(material.texture_id)
# Clear all the materials
self.materials.clear()
# Clear the geometry lists
del self.vertices[:]
del self.tex_coords[:]
del self.normals[:]
del self.face_groups[:]
def read_obj(self, fname):
current_face_group = None
file_in = open(fname)
for line in file_in:
# Parse command and data from each line
words = line.split()
command = words[0]
data = words[1:]
if command == 'mtllib': # Material library
model_path = os.path.split(fname)[0]
mtllib_path = os.path.join( model_path, data[0] )
self.read_mtllib(mtllib_path)
elif command == 'v': # Vertex
x, y, z = data
vertex = (float(x), float(y), float(z))
self.vertices.append(vertex)
elif command == 'vt': # Texture coordinate
s, t = data
tex_coord = (float(s), float(t))
self.tex_coords.append(tex_coord)
elif command == 'vn': # Normal
x, y, z = data
normal = (float(x), float(y), float(z))
self.normals.append(normal)
elif command == 'usemtl' : # Use material
current_face_group = FaceGroup()
current_face_group.material_name = data[0]
self.face_groups.append( current_face_group )
elif command == 'f':
assert len(data) == 3, "Sorry, only triangles are supported"
# Parse indices from triples
for word in data:
vi, ti, ni = word.split('/')
indices = (int(vi) - 1, int(ti) - 1, int(ni) - 1)
current_face_group.tri_indices.append(indices)
for material in self.materials.values():
model_path = os.path.split(fname)[0]
texture_path = os.path.join(model_path, material.texture_fname)
texture_surface = pygame.image.load(texture_path)
texture_data = pygame.image.tostring(texture_surface, 'RGB', True)
material.texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, material.texture_id)
glTexParameteri( GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER,
GL_LINEAR)
glTexParameteri( GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
width, height = texture_surface.get_rect().size
gluBuild2DMipmaps( GL_TEXTURE_2D,
3,
width,
height,
GL_RGB,
GL_UNSIGNED_BYTE,
texture_data)
def read_mtllib(self, mtl_fname):
file_mtllib = open(mtl_fname)
for line in file_mtllib:
words = line.split()
command = words[0]
data = words[1:]
if command == 'newmtl':
material = Material()
material.name = data[0]
self.materials[data[0]] = material
elif command == 'map_Kd':
material.texture_fname = data[0]
def draw(self):
vertices = self.vertices
tex_coords = self.tex_coords
normals = self.normals
for face_group in self.face_groups:
material = self.materials[face_group.material_name]
glBindTexture(GL_TEXTURE_2D, material.texture_id)
glBegin(GL_TRIANGLES)
for vi, ti, ni in face_group.tri_indices:
glTexCoord2fv( tex_coords[ti] )
glNormal3fv( normals[ni] )
glVertex3fv( vertices[vi] )
glEnd()
def draw_quick(self):
if self.display_list_id is None:
self.display_list_id = glGenLists(1)
glNewList(self.display_list_id, GL_COMPILE)
self.draw()
glEndList()
glCallList(self.display_list_id)
| true |
5e562909d559c558f22f9770164823e342aa824c | Python | lucasmma/Trabalho-de-Orientacao-a-Objeto | /Exceptions.py | UTF-8 | 1,111 | 2.71875 | 3 | [] | no_license | class Error(Exception):
"""Base class for other exceptions"""
pass
class InvalidMenuNumberException(Error):
"Selecionado quando o valor é invalido no menu"
pass
class PlacaInvalidaException(Error):
"Mask de placa invalida"
pass
class DadosVeiculosIncompletosException(Error):
"Dados do veiculo incompleto"
pass
class DadosAcessoIncompletosException(Error):
"Dados de Acesso incompleto"
pass
class DadosPessoaisIncompletosException(Error):
"Dados pessoais das pessoas fisicas incompleto"
pass
class EstacionamentoFechadoException(Error):
"Estacionamento Fechado"
pass
class VeiculoDuplicadoException(Error):
"Veiculo Duplicado"
pass
class PessoaFisicaDuplicadaException(Error):
"Veiculo Duplicado"
pass
class VeiculoNaoEncontradoException(Error):
"Veiculo não encontrado no estacionamento"
pass
class PeriodoInvalidoException(Error):
"Periodo de tempo invalido, horario saida maior ou igual horario entrada"
pass
class PessoaFisicaInexistenteException(Error):
"Pessoa fisica ainda não cadastrada"
pass | true |
f4252b356d2129f835a950b12040b49f5a8c1c8b | Python | sbabineau/data-structures | /tests/binarytree_tests.py | UTF-8 | 3,289 | 3.5 | 4 | [] | no_license | import unittest
from data_structures.binarytree import BinaryTree as Tree
class BinaryTreeTests(unittest.TestCase):
def setUp(self):
self.tree = Tree(7)
def test_insert(self):
self.tree.insert(9)
self.assertTrue(self.tree.contains(9))
def test_reinsert(self):
self.tree.insert(7)
self.assertEqual(self.tree.size(), 1)
def test_contains_false(self):
self.assertFalse(self.tree.contains(6))
def test_size(self):
self.assertEqual(self.tree.size(), 1)
self.tree.insert(6)
self.assertEqual(self.tree.size(), 2)
def test_depth(self):
self.tree.insert(8)
self.tree.insert(9)
self.tree.insert(10)
self.tree.insert(5)
self.assertEqual(self.tree.depth(), 4)
def test_balance_pos(self):
self.tree.insert(10)
self.tree.insert(8)
self.tree.insert(9)
self.assertEqual(self.tree.balance(), 3)
def test_balance_neg(self):
self.tree.insert(1)
self.tree.insert(2)
self.tree.insert(9)
self.assertEqual(self.tree.balance(), -1)
def test_balance_even(self):
self.tree.insert(10)
self.tree.insert(4)
self.assertEqual(self.tree.balance(), 0)
class EmptyTests(unittest.TestCase):
def setUp(self):
self.tree = Tree(None)
def test_empty_size(self):
self.assertEqual(self.tree.size(), 0)
def test_depth_empty(self):
self.assertEqual(self.tree.depth(), 0)
def test_empty_balance(self):
self.assertEqual(self.tree.balance(), 0)
class TraversalTests(unittest.TestCase):
def setUp(self):
self.tree = Tree(10)
for i in [5, 15, 4, 6, 14, 16]:
self.tree.insert(i)
def test_in_order(self):
outp = []
for i in self.tree.in_order():
outp.append(i)
self.assertEqual(outp, [4, 5, 6, 10, 14, 15, 16])
def test_post_order(self):
outp = []
for i in self.tree.post_order():
outp.append(i)
self.assertEqual(outp, [4, 6, 5, 14, 16, 15, 10])
def test_pre_order(self):
outp = []
for i in self.tree.pre_order():
outp.append(i)
self.assertEqual(outp, [10, 5, 4, 6, 15, 14, 16])
def test_breadth_first(self):
outp = []
for i in self.tree.breadth_first():
outp.append(i)
self.assertEqual(outp, [10, 5, 15, 4, 6, 14, 16])
class DeleteTests(unittest.TestCase):
def setUp(self):
self.tree = Tree(10)
for i in [5, 15, 4, 6, 14, 17]:
self.tree.insert(i)
def test_delete_two_child(self):
self.assertTrue(self.tree.contains(5))
self.tree.delete(5)
self.assertFalse(self.tree.contains(5))
self.assertTrue(self.tree.contains(4))
def test_delete_single_child(self):
self.tree.insert(3)
self.assertTrue(self.tree.contains(4))
self.tree.delete(4)
self.assertFalse(self.tree.contains(4))
self.assertTrue(self.tree.contains(3))
def test_delete_no_child(self):
self.assertTrue(self.tree.contains(4))
self.tree.delete(4)
self.assertFalse(self.tree.contains(4))
if __name__ == '__main__':
unittest.main()
| true |
48c1af009c69f22c0f6c46b930ac6afc7b149d58 | Python | ajayakumar123/cricket_task_project | /cricketProject/cricketApp/models.py | UTF-8 | 8,848 | 2.765625 | 3 | [] | no_license | from django.db import models
from datetime import datetime
from django.core.exceptions import ValidationError
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.urls import reverse
import pytz
utc=pytz.UTC
# Create your models here.
class Team(models.Model):
name=models.CharField(max_length=30)
logo_uri=models.ImageField(max_length=255,upload_to='team_logo/')
club_state=models.CharField(max_length=30)
matches_played = models.IntegerField(default=0)
matches_won = models.IntegerField(default=0)
matches_lost = models.IntegerField(default=0)
team_points = models.IntegerField(default=0)
class Meta:
ordering = ['-team_points']
def __str__(self):
return self.name
class Player(models.Model):
first_name=models.CharField(max_length=30)
last_name=models.CharField(max_length=30)
profile_picture=models.ImageField(max_length=255,upload_to='profiles/')
jersey_number=models.IntegerField()
country=models.CharField(max_length=30)
team=models.ForeignKey(Team,related_name='players', related_query_name='players',on_delete=models.CASCADE)
no_of_matches=models.IntegerField('No of Matches Played')
runs=models.IntegerField()
highest_score=models.IntegerField()
fifties=models.IntegerField()
hundreds=models.IntegerField()
strike_rate=models.FloatField()
@property
def full_name(self):
"Returns the player's full name."
return '%s %s' % (self.first_name, self.last_name)
def __str__(self):
"Returns the string representation of player object"
return self.full_name
class Match(models.Model):
MATCH_CHOICES=(('team1','Team1'),('team2','Team2'))
match_date=models.DateTimeField()
location=models.CharField(max_length=30)
team1=models.ForeignKey(Team,related_name='matches1', related_query_name='matches1',on_delete=models.CASCADE)
team2 = models.ForeignKey(Team, related_name='matches2', related_query_name='matches2', on_delete=models.CASCADE)
team1_score=models.IntegerField(blank=True,null=True)
team2_score=models.IntegerField(blank=True,null=True)
match_winner = models.CharField('Winner of the match',choices=MATCH_CHOICES,max_length=15,blank=True,null=True)
class Meta:
ordering = ['-match_date']
@property
def match_name(self):
"Returns the match's full name."
return '%s-%s' % (self.team1, self.team2)
def get_absolute_url(self):
return reverse('match_list')
def __str__(self):
"Returns the string representation of match object"
return self.match_name
@property
def match_status(self):
"Returns the match status is it completed or upcoming"
today = datetime.now()
print("match date", self.match_date, self.team1, self.team2)
today = utc.localize(today)
res=True
if self.match_date > today:
res=False
return res
def match_winner_team(self):
''' Returns the winner match team'''
if self.match_winner:
if self.match_winner == 'team1':
return self.team1
else:
return self.team2
else:
return False
def clean(self):
'''helpful to validate weather match date is grater than today or mot
and differentiate team1 whenever selecting'''
today=datetime.now()
print("match date",self.match_date,self.team1,self.team2)
today=utc.localize(today)
print("today is:", today)
if self.team1 == self.team2:
raise ValidationError('team1 and team2 must be different')
if self.match_date > today:
if self.team1_score or self.team2_score or self.match_winner:
raise ValidationError(' " we can not add team1 score,team2 score and match winner for upcoming matches')
else:
if not (self.team1_score and self.team2_score and self.match_winner):
raise ValidationError(' " we must add team1 score,team2 score and match winner for completed matches')
def save(self, *args, **kwargs):
'''it will update Team table based on match results'''
print("1111111111",self, *args, **kwargs)
print("222222222",self.match_winner,self.team1,self.team2)
print("222222222",self.id)
if self.id is None:
if self.match_winner and self.team1 and self.team2:
team1_obj = Team.objects.get(id=self.team1.id)
team2_obj = Team.objects.get(id=self.team2.id)
team1_obj.matches_played = team1_obj.matches_played + 1
team2_obj.matches_played = team2_obj.matches_played + 1
if self.match_winner=='team1':
team1_obj.matches_won=team1_obj.matches_won+1
team1_obj.team_points = team1_obj.team_points + 2
else:
team2_obj.matches_won = team2_obj.matches_won + 1
team2_obj.team_points = team2_obj.team_points + 2
team1_obj.matches_lost = team1_obj.matches_played - team1_obj.matches_won
team2_obj.matches_lost = team2_obj.matches_played - team2_obj.matches_won
team1_obj.save()
team2_obj.save()
else:
orig_obj=Match.objects.get(id=self.id)
if orig_obj.match_winner != self.match_winner:
if orig_obj.match_winner is None:
if self.match_winner and self.team1 and self.team2:
team1_obj = Team.objects.get(id=self.team1.id)
team2_obj = Team.objects.get(id=self.team2.id)
team1_obj.matches_played = team1_obj.matches_played + 1
team2_obj.matches_played = team2_obj.matches_played + 1
if self.match_winner == 'team1':
team1_obj.matches_won = team1_obj.matches_won + 1
team1_obj.team_points = team1_obj.team_points + 2
else:
team2_obj.matches_won = team2_obj.matches_won + 1
team2_obj.team_points = team2_obj.team_points + 2
team1_obj.matches_lost = team1_obj.matches_played - team1_obj.matches_won
team2_obj.matches_lost = team2_obj.matches_played - team2_obj.matches_won
team1_obj.save()
team2_obj.save()
else:
if self.match_winner and self.team1 and self.team2:
team1_obj = Team.objects.get(id=self.team1.id)
team2_obj = Team.objects.get(id=self.team2.id)
if self.match_winner == 'team1':
team1_obj.matches_won = team1_obj.matches_won + 1
team1_obj.team_points = team1_obj.team_points + 2
team2_obj.matches_won = team2_obj.matches_won - 1
team2_obj.team_points = team2_obj.team_points - 2
else:
team2_obj.matches_won = team2_obj.matches_won + 1
team2_obj.team_points = team2_obj.team_points + 2
team1_obj.matches_won = team1_obj.matches_won - 1
team1_obj.team_points = team1_obj.team_points - 2
team1_obj.matches_lost = team1_obj.matches_played - team1_obj.matches_won
team2_obj.matches_lost = team2_obj.matches_played - team2_obj.matches_won
team1_obj.save()
team2_obj.save()
super(Match, self).save(*args, **kwargs)
@receiver(pre_delete, sender=Match)
def handle_deleted_match(**kwargs):
match_obj = kwargs['instance']
if match_obj.match_winner and match_obj.team1 and match_obj.team2:
team1_obj = Team.objects.get(id=match_obj.team1.id)
team2_obj = Team.objects.get(id=match_obj.team2.id)
team1_obj.matches_played = team1_obj.matches_played-1
team2_obj.matches_played = team2_obj.matches_played-1
print("reciver function")
if match_obj.match_winner == 'team1':
team1_obj.matches_won = team1_obj.matches_won - 1
team1_obj.team_points = team1_obj.team_points - 2
else:
team2_obj.matches_won = team2_obj.matches_won - 1
team2_obj.team_points = team2_obj.team_points - 2
team1_obj.matches_lost = team1_obj.matches_played - team1_obj.matches_won
team2_obj.matches_lost = team2_obj.matches_played - team2_obj.matches_won
team1_obj.save()
team2_obj.save()
| true |
a05c2782cc851f974714a888084d9debdb6d5bf6 | Python | draculaw/leetcode | /VaildPalindrome.py | UTF-8 | 401 | 3.515625 | 4 | [] | no_license | class Solution:
# @param {string} s
# @return {boolean}
def isPalindrome(self, s):
m = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
s = s.upper()
s = [c for c in s if c in m]
l = len(s)
h = l / 2
s2 = s[h:][::-1]
for i in xrange(h):
if s[i] != s2[i]:
return False
return True
| true |
dd3d0e9de409d8f008449747a60b48937163148e | Python | rahulraghuv/MahiteorProjects | /media/readCSV.py | UTF-8 | 286 | 2.78125 | 3 | [] | no_license | import csv
filename="Rahul_raghuvanshi_test_file.txt"
csvFile=open(filename)
csvReader=csv.reader(csvFile)
print csvReader
csvList=list(csvReader)
print csvList
listData=[]
var=i=0
for data in csvList:
for j in data:
var=var+int(j)
listData.append(var)
i+=1
print listData
| true |
62a309ac9c1eca33ced52a59153ffddc82637f7f | Python | shubhamsaraf26/python | /main.py | UTF-8 | 304 | 3.703125 | 4 | [] | no_license | str1='this is my first string '
print(str1)
str2="this is, my scecond string"
print(str2)
str3='''
this , sting ,has lots of line'''
print(str3)
print(str1[0:5])
print(len(str3))
print(str3.lower())
print(str1.upper())
print(str1.count("this"))
print(str1.find('fir'))
print(str2.split()) | true |
63899a60c5546c97fbde1e86588e2d36fb018bf2 | Python | Dovedanhan/wxPython-In-Action | /spinecho_demo/Sizer/SizersAndNotebook.py | UTF-8 | 6,247 | 2.765625 | 3 | [] | no_license | # -*- coding: iso-8859-1 -*-
#--------------------------------------------------------------------
# Name: SizersAndNotebook.py
# Purpose: An application to learn sizers
# Author: Jean-Michel Fauth, Switzerland
# Copyright: (c) 2007-2008 Jean-Michel Fauth
# Licence: None
# os dev: winXP sp2
# py dev: Python 2.5.4
# wx dev: wxPython 2.8.9.1-ansi
# Revision: 28 December 2008
#--------------------------------------------------------------------
# Note: some panels from other modules in this package are causing refreshing
# issue when they are used with a notebook.
# Workaround. They are copied and modified here with an added
# wx.FULL_REPAINT_ON_RESIZE style.
#--------------------------------------------------------------------
import wx
from colourwindow import ColWin
#--------------------------------------------------------------------
# A modified version of WithBoxSizers.MyPanel13, where the style wx.FULL_REPAINT_ON_RESIZE
# has been added. Refreshing issue.
class MyPanel13A(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, style=wx.FULL_REPAINT_ON_RESIZE)
wgreen = ColWin(self, wx.NewId(), wx.NamedColour('green'))
b1 = wx.Button(self, wx.NewId(), 'button1')
b2 = wx.Button(self, wx.NewId(), 'button2')
b3 = wx.Button(self, wx.NewId(), 'button3')
b = 0
vsizer1 = wx.BoxSizer(wx.VERTICAL)
vsizer1.Add(b1, 0, wx.ALL, b)
vsizer1.AddStretchSpacer()
vsizer1.Add(b2, 0, wx.ALL, b)
vsizer1.AddStretchSpacer()
vsizer1.Add(b3, 0, wx.ALL, b)
b = 5
self.hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.hsizer2.Add(vsizer1, 0, wx.EXPAND | wx.ALL, b)
self.hsizer2.Add(wgreen, 1, wx.EXPAND | wx.ALL, b)
self.SetSizer(self.hsizer2)
#-------------------------------------------------------------------
# A serie of couples, StaticTexts-TextCtrls.
# Buttons ok and cancel.
# This is not elegant. A better way: FlexGridSizer.
# A modified version of WithBoxSizers.MyPanel16, where the style wx.FULL_REPAINT_ON_RESIZE
# has been added. Refreshing issue.
class MyPanel16A(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, style=wx.FULL_REPAINT_ON_RESIZE)
self.parent = parent
self.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False))
lab1 = wx.StaticText(self, -1, 'hydrogen :', style=wx.ALIGN_RIGHT)
lab2 = wx.StaticText(self, -1, 'tin :', style=wx.ALIGN_RIGHT)
lab3 = wx.StaticText(self, -1, 'mendelevium :', style=wx.ALIGN_RIGHT)
lab4 = wx.StaticText(self, -1, 'carbon :', style=wx.ALIGN_RIGHT)
txt1 = wx.TextCtrl(self, -1, '')
txt2 = wx.TextCtrl(self, -1, '')
txt3 = wx.TextCtrl(self, -1, '')
txt4 = wx.TextCtrl(self, -1, '')
b1 = wx.Button(self, wx.NewId(), '&OK')
b2 = wx.Button(self, wx.NewId(), '&Cancel')
staline = wx.StaticLine(self, wx.NewId(), wx.DefaultPosition, (-1, 2), wx.LI_HORIZONTAL)
b = 5
w = 100
hsizer1 = wx.BoxSizer(wx.HORIZONTAL)
hsizer1.Add(lab1, 0, wx.RIGHT, b)
hsizer1.Add(txt1, 1, wx.GROW, b)
hsizer1.SetItemMinSize(lab1, (w, -1))
hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
hsizer2.Add(lab2, 0, wx.RIGHT, b)
hsizer2.Add(txt2, 1, wx.GROW, b)
hsizer2.SetItemMinSize(lab2, (w, -1))
hsizer3 = wx.BoxSizer(wx.HORIZONTAL)
hsizer3.Add(lab3, 0, wx.RIGHT, b)
hsizer3.Add(txt3, 1, wx.GROW, b)
hsizer3.SetItemMinSize(lab3, (w, -1))
hsizer4 = wx.BoxSizer(wx.HORIZONTAL)
hsizer4.Add(lab4, 0, wx.RIGHT, b)
hsizer4.Add(txt4, 1, wx.GROW, b)
hsizer4.SetItemMinSize(lab4, (w, -1))
hsizer5 = wx.BoxSizer(wx.HORIZONTAL)
hsizer5.Add(b1, 0)
hsizer5.Add(b2, 0, wx.LEFT, 10)
b = 5
vsizer1 = wx.BoxSizer(wx.VERTICAL)
vsizer1.Add(hsizer1, 0, wx.EXPAND | wx.ALL, b)
vsizer1.Add(hsizer2, 0, wx.EXPAND | wx.ALL, b)
vsizer1.Add(hsizer3, 0, wx.EXPAND | wx.ALL, b)
vsizer1.Add(hsizer4, 0, wx.EXPAND | wx.ALL, b)
vsizer1.Add(staline, 0, wx.GROW | wx.ALL, b)
vsizer1.Add(hsizer5, 0, wx.ALIGN_RIGHT | wx.ALL, b)
self.SetSizer(vsizer1)
#-------------------------------------------------------------------
#~ ???
#~ r = self.pa1.GetWindowStyleFlag()
#~ r = r | wx.FULL_REPAINT_ON_RESIZE
#~ print 'r:', r
#~ self.pa1.SetWindowStyleFlag(r)
#~ self.pa1.Refresh()
#~ self.Refresh()
class MyNotebook(wx.Notebook):
def __init__(self, parent, id):
sty = wx.NB_TOP | wx.NB_MULTILINE
wx.Notebook.__init__(self, parent, id, style=sty)
self.pa1 = MyPanel13A(self)
self.AddPage(self.pa1, 'MyPanel112A')
self.pa2 = MyPanel16A(self)
self.AddPage(self.pa2, 'MyPanel115A')
#-------------------------------------------------------------------
class MyPanel1(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.nb = MyNotebook(self, wx.ID_ANY)
vsizer = wx.BoxSizer(wx.VERTICAL)
vsizer.Add(self.nb, 1, wx.EXPAND | wx.ALL, 0)
self.SetSizer(vsizer)
#-------------------------------------------------------------------
class MyPanel2(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.nb = MyNotebook(self, wx.ID_ANY)
b1 = wx.Button(self, wx.NewId(), '&OK')
b2 = wx.Button(self, wx.NewId(), '&Cancel')
hsizer5 = wx.BoxSizer(wx.HORIZONTAL)
hsizer5.Add(b1, 0)
hsizer5.Add(b2, 0, wx.LEFT, 10)
b = 8
vsizer = wx.BoxSizer(wx.VERTICAL)
vsizer.Add(self.nb, 1, wx.EXPAND | wx.TOP, b)
vsizer.Add(hsizer5, 0, wx.ALIGN_RIGHT | wx.ALL, b)
self.SetSizer(vsizer)
#-------------------------------------------------------------------
if __name__ == "__main__":
import baseframe
app = wx.PySimpleApp()
frame = baseframe.MyFrame(None, panel=MyPanel1)
frame.Show()
app.MainLoop()
#eof-----------------------------------------------------------------
| true |
bfd5c9209baaefdb131a453963c29e1e5fa370a5 | Python | zh1047592355/ApiAutoTest | /李老师python/day02/test_004.py | UTF-8 | 736 | 3.375 | 3 | [] | no_license | '''
fixture 测试前置和后置,比较常用的方式。
1. 命名比较灵活,不限于setup、teardown等命名方式
2. 使用比较灵活
3. 不需要import即可实现共享。
'''
import pytest
# 测试前置和后置
@pytest.fixture()
def login():
print("登录系统") # yield之前是前置
yield
print("退出系统") # yield之后是后置
# 测试脚本
def test_query():
print("查询功能,不需要登录")
# 使用方式一:将fixture作为参数传到脚本中,比较常用。
def test_add(login):
print("添加功能,需要登录")
# 使用方式二:使用装饰器usefixtures
@pytest.mark.usefixtures("login")
def test_delete():
print("删除功能,需要登录")
| true |
6e83dd286c4059c94272413e88dda9b8365bb6b9 | Python | gomilinux/python-PythonForKids | /Chapter8/Challenge2TurtlePitchfork.py | UTF-8 | 737 | 3.625 | 4 | [] | no_license | #Python For Kids Chapter 8 Challenge #2 Turtle Pitchfork
#Use turtle objects and move them around to create a sideways pitchfork
import turtle
#handle = turtle.Pen()
topfork1 = turtle.Pen()
topfork2 = turtle.Pen()
bottomfork1 = turtle.Pen()
bottomfork2 = turtle.Pen()
topfork1.forward(150)
bottomfork1.forward(150)
topfork1.left(90)
bottomfork1.right(90)
topfork1.forward(80)
bottomfork1.forward(80)
topfork2.forward(200)
bottomfork2.forward(200)
topfork2.left(90)
bottomfork2.right(90)
topfork2.forward(40)
bottomfork2.forward(40)
topfork1.right(90)
bottomfork1.left(90)
topfork2.right(90)
bottomfork2.left(90)
topfork1.forward(90)
bottomfork1.forward(90)
topfork2.forward(40)
bottomfork2.forward(40)
turtle.exitonclick()
| true |
c9a5e60be691a6f6b48c0dee3e54c19a12b967f2 | Python | fank-cd/python_leetcode | /Problemset/binary-tree-level-order-traversal/binary-tree-level-order-traversal.py | UTF-8 | 839 | 3.546875 | 4 | [] | no_license |
# @Title: 二叉树的层序遍历 (Binary Tree Level Order Traversal)
# @Author: 2464512446@qq.com
# @Date: 2020-11-23 16:40:09
# @Runtime: 40 ms
# @Memory: 13.8 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
res = []
if not root:
return res
stack = [root]
while stack:
level = []
for i in range(len(stack)):
root = stack.pop(0)
level.append(root.val)
if root.left:
stack.append(root.left)
if root.right:
stack.append(root.right)
res.append(level)
return res
| true |
f029ed3650cf009d91b5ea4eb6684a526ae6c3e3 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_203/700.py | UTF-8 | 1,390 | 3.125 | 3 | [] | no_license | #!/bin/env python
# google code jam 2017 round 1A problem 1
# Daniel Scharstein
def fill(a, x, rmin, rmax, cmin, cmax):
for r in range(rmin, rmax):
for c in range(cmin, cmax):
if a[r][c] == '?':
a[r][c] = x
elif a[r][c] != x:
print("error 1")
def letters(a, rmin, rmax, cmin, cmax):
s = set()
for r in range(rmin, rmax):
for c in range(cmin, cmax):
s.add(a[r][c])
return list(s - {'?'})
def solve(a, rmin, rmax, cmin, cmax):
#print a
x = letters(a, rmin, rmax, cmin, cmax)
#print x
n = len(x)
if n == 1:
fill(a, x[0], rmin, rmax, cmin, cmax)
return
for r in range(rmin, rmax):
i = len(letters(a, rmin, r, cmin, cmax))
if i >= 1 and i < n:
solve(a, rmin, r, cmin, cmax)
solve(a, r, rmax, cmin, cmax)
return
for c in range(cmin, cmax):
i = len(letters(a, rmin, rmax, cmin, c))
if i >= 1 and i < n:
solve(a, rmin, rmax, cmin, c)
solve(a, rmin, rmax, c, cmax)
return
print("error 2")
tests = int(raw_input())
for k in range(tests):
r, c = map(int, raw_input().split())
a = []
for i in range(r):
a.append(list(raw_input()))
solve(a, 0, r, 0, c)
print "Case #%d:" % (k+1)
for row in a:
print "".join(row)
| true |
ac862c4de6f983a850e0cc3444032cd7df412936 | Python | iamhimmat89/data_structure-and-algorithms-in-python | /bubble_sort.py | UTF-8 | 1,007 | 4.8125 | 5 | [] | no_license | print("\nWelcome to Bubble Sort...!!!\n")
# Class bubbleSort: Used for sorting given data
class BubbleSort:
# constructor
def __init__(self):
self.swapped = False
self.array = None
self.length = None
# method to sort given list into ascending order
def sort(self, array):
self.array = array
self.length = len(array)
for i in range(self.length):
self.swapped = False
for j in range(0, self.length - i - 1):
if self.array[j] > self.array[j + 1]:
self.swapped = True
self.array[j], self.array[j + 1] = self.array[j + 1], self.array[j]
if not self.swapped:
break
# method to display sorted array
def display(self):
print(str(self.array))
bsort = BubbleSort()
arr = [10, 20, 30, 15, 25, 5, 17, 2]
print("Input Array:: ")
print(str(arr))
print(" ")
bsort.sort(arr)
print(" ")
print("Sorted Array:: ")
bsort.display() | true |
e7fdda44f79ca2594d62c7b24f69bdcd08fec03e | Python | abhi9835/python | /class_objects.py | UTF-8 | 375 | 4.03125 | 4 | [] | no_license | class Employee:
company = 'Google'
def getsalary(self):
print(f"salary is {self.salary}")
abhishek = Employee()
abhishek.company = 'youtube'
abhishek.salary = 1000000
print(abhishek.salary)
print(abhishek.company)
abhishek.getsalary() #this line is same as Employee.getsalary(abhishek): we are giving one attribute. So, we need to put self as an attribute. | true |
5136344b6c3545681dbf6dc2009a8ff576a9ddf8 | Python | maxtortime/algorithm | /algospotcoins/py_coin.py | UTF-8 | 773 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/local/bin/python3
import sys, math
n_test_case = int(input())
n_res = [0 for x in range(n_test_case)]
MAX_COINS = 5000
MAX_COUNT = 1000000007
for i in range(n_test_case):
money, n_coin = [int(x) for x in input().split()]
coins = [int(x) for x in input().split()]
countCoins = [long(0) for x in range(MAX_COINS)]
coins.sort()
for coin in coins:
if coin > money:
break
countCoins[coin] += 1
j = 1
while coin + j <= money:
j += 1
if countCoins[j] >= 0:
countCoins[j + coin] += countCoins[j]
if countCoins[money] >= MAX_COUNT:
n_res[i] = countCoins[money] % MAX_COUNT
else:
n_res[i] = countCoins[money]
for n in n_res:
print(n)
| true |
dbdf73a384146bf05170ecdb300f54c08276db23 | Python | APrioriInvestments/object_database | /object_database/web/cells/children.py | UTF-8 | 9,173 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Children:
"""A 'Collection-Like' object that holds Cell child references.
By 'Collection-like' we mean that this object maintains
some polymorphism with collections like Dictionaries and
Lists.
Children maintains an internal dictionary that maps child
names to either a Cell instance, a list of Cell instances,
or an n-dimensional (list of list, etc) of Cell instances.
For the purposes of recalculation and rendering, it also
maintains a flat list of all contained Cell instance children
regardless of what name they appear under in the current dict.
Convenience methods for adding and removing maintain the
integrity of both the flat list and the internal dict.
Overrides like `__setitem__` etc simply wrap the explicit
convenience methods in more list/dictionary like syntax.
Properties
----------
namedChildren: dict
A dictionary that maps unique names to
a Cell instance, a list of Cell instances,
or an n-dimensional (list of list, etc)
list of Cell instances
allChildren: list
A flat list of all child Cell instances,
regardless of their place in namedChildren
_reverseLookup: dict
A dictionary that maps Cell instances to
the key where the instance appears in
namedChildren. Used for reverse lookups.
"""
def __init__(self):
self.namedChildren = {}
self.allChildren = []
self._reverseLookup = {}
def namedChildIdentities(self):
def toIdentities(x):
if isinstance(x, dict):
return {k: toIdentities(v) for k, v in x.items()}
if isinstance(x, list):
return [toIdentities(k) for k in x]
return x.identity
return toIdentities(self.namedChildren)
def addChildNamed(self, name, childStructure):
"""Adds a child with the given name.
If the name is already set in the internal
dictionary, we call `removeChildNamed first.
We use a recursive call to `_addChildstructure` in
order to deal with multi-dimensional values.
Notes
-----
Using helper functions, this method will:
* Add the structure to the internal dict
* Add any encountered Cell instance child
to the reverse lookup dictionary
* Add any incoming Cell instance to the
flat list of all children
Parameters
----------
name: str
The name to give the child, which can be referenced
later
childStructure: (Cell || list || list(list))
A Cell instance, list of Cell instances, or
n-dimensional (list of list, etc) list of
Cell instances
"""
if name in self.namedChildren:
self.removeChildNamed(name)
if childStructure is None:
return
self.namedChildren[name] = self._addChildStructure(childStructure, name)
def addFromDict(self, childrenDict):
"""Adds all elements from an incoming dict.
Will overwrite any existing entries, which
is normal behavior for `addChildNamed`.
Parameters
----------
childrenDict: dict
A dictionary mapping names to children.
"""
for key, val in childrenDict.items():
self[key] = val
def removeChildNamed(self, name):
"""Removes the child or child structure of the given name.
If there is no child with the given name, it
will return False. Likewise, if removal fails
for some reason, it will also return False.
Makes a recursive call to `_removechildStructure`
in order to deal with the possibility of
multidimensional child structures.
Notes
-----
Using helper functions, this method will:
* Remove the given entry from the internal dict
* Remove the removed Cell instances from the
reverse lookup dictionary
* Remove the removed Cell instances from the
flat list of children (`allChildren`)
Parameters
----------
name: str
The key to lookup for which all children
should be removed.
"""
if name not in self.namedChildren:
return False
found = self.namedChildren[name]
success = self._removeChildStructure(found)
if not success:
return False
del self.namedChildren[name]
return True
def removeAll(self):
"""Removes all children and child structures."""
self.namedChildren = {}
self.allChildren = []
def dimensionsForChildNamed(self, name):
"""Returns the number of dimensions for a named entry.
Notes
-----
Some named children are lists of Cells or n-dimension
nested (ie list of list of list etc) of Cells. This
method will return the number of dimensions for a given
child entry in the Children collection.
Parameters
----------
name: str
The key name of the child.
Returns
-------
int: The number of dimensions
"""
found = self.namedChildren[name]
return self._getDimensions(found)
def hasChild(self, child):
"""Returns true if child Cell is in this Children.
Parameters
----------
child: Cell
The child Cell instance to look for.
Returns
-------
Boolean: True if the Cell is a child present in this
Children.
"""
return child in self.allChildren
def hasChildNamed(self, name):
"""Returns True if this instance has a child with the name.
Parameters
----------
name: str
The name of the child (key) to lookup.
Returns
-------
Boolean: True if the name is in the internal dict
(and therefore child is present)
"""
return name in self.namedChildren
def findNameFor(self, child):
"""Returns the name for a given child Cell, if present.
Parameters
----------
child: Cell
A Cell instance to lookup
Returns
-------
str | None: Returns the string of the name (key)
where the instance resides in the internal dict,
or None if it is not found.
"""
if child in self._reverseLookup:
return self._reverseLookup[child]
return None
def items(self):
"""Wrapper for internal dict's `items()` method"""
return self.namedChildren.items()
def _getDimensions(self, item, dimensions=0):
"""Recursively counts the num of dimensions
for a multidimensional child structure.
"""
if isinstance(item, list):
return self._getDimensions(item[0], dimensions + 1)
return dimensions
def _removeChildStructure(self, structure):
"""Recursively iterates through a possible
multidimensional child structure, removing any found
Cell instances to the various internal collections.
"""
if isinstance(structure, list):
return [self._removeChildStructure(s) for s in structure]
else:
self.allChildren.remove(structure)
del self._reverseLookup[structure]
return True
def _addChildStructure(self, structure, name):
"""Recursively iterates through a possible
multidimensional child structure, adding any found
Cell instances to the various internal collections.
"""
if isinstance(structure, list):
return [self._addChildStructure(item, name) for item in structure]
else:
self.allChildren.append(structure)
self._reverseLookup[structure] = name
return structure
def names(self):
return self.namedChildren.keys()
def __contains__(self, key):
return key in self.namedChildren
def __getitem__(self, key):
"""Override that wraps access to namedChildren"""
return self.namedChildren[key]
def __setitem__(self, key, value):
"""Override that wraps `addChildNamed`"""
self.addChildNamed(key, value)
def __delitem__(self, key):
"""Override that wraps `removeChildNamed`"""
if key in self.namedChildren:
self.removeChildNamed(key)
| true |
74f66c29ab563faa7f5b95dbb3de5d7d397b10b9 | Python | cloud-security-research/sgx-ra-tls | /sgxlkl/https-server/https-server.py | UTF-8 | 1,378 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | # This is a demonstration of how to use RA-TLS without actually
# interfacing with the RA-TLS library directly. Instead, the RA-TLS
# key and certificate are generated at startup and exposed through the
# file system. The application accesses the key and certificate by
# reading from the file system.
import base64
import BaseHTTPServer, SimpleHTTPServer
import ssl
def rsa_key_der_to_pem(key_der):
out = '-----BEGIN RSA PRIVATE KEY-----\n'
i = 0
for c in base64.b64encode(key_der):
if (i == 64):
out += '\n'
i = 0
out += c
i += 1
out += '\n'
out += '-----END RSA PRIVATE KEY-----'
return out
# The RA-TLS library currently only exposes the key and certificate as
# in DER format. The Python API expects them in PEM format. Hence, we
# convert them here.
crt_pem = ssl.DER_cert_to_PEM_cert(open('/tmp/crt').read())
f = open('/tmp/crt.pem', 'w')
f.write(crt_pem)
f.close()
with open('/tmp/key.pem', 'w') as f:
print >> f, rsa_key_der_to_pem(open('/tmp/key').read())
# Start the HTTPS web server
ip = '10.0.1.1'
port = 4443
print "Server listening on %s:%d\n" % (ip, port)
httpd = BaseHTTPServer.HTTPServer((ip, port), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, keyfile='/tmp/key.pem', certfile='/tmp/crt.pem', server_side=True)
httpd.serve_forever()
| true |
fa4d748fb50fc917933493ec3cf8db15845f2f39 | Python | salonisv17/DriveAssingment | /Solution | UTF-8 | 2,179 | 3.78125 | 4 | [] | no_license | import datetime
def solution():
isValid = False
while not isValid:
date_1 = input("Type first Date dd-mm-yyyy: ")
value_1 = input("Enter first value: ")
date_2 = input("Type second Date dd-mm-yyyy: ")
value_2 = input("Enter second value: ")
try: # strptime throw an exception if input date doesn't match the pattern
d1 = datetime.datetime.strptime(date_1, "%d-%m-%Y")
d2 = datetime.datetime.strptime(date_2, "%d-%m-%Y")
isValid = True
except:
print("Try again in dd-mm-yyyy pattern\n")
if (isValid == True):
if (d1.year == d2.year and d1.month == d2.month):
# checking for value which is greater then 0 and less than 1000000
# checking for valid years
if (int(value_1) < 1000000 and int(value_2) < 1000000 and int(value_1) > 0 and int(value_2) > 0):
if (d1.year > 1970 and d1.year < 2100 and d2.year > 1970 and d2.year < 2100):
if (d2.day != d1.day): # checking for same days
k = int(d2.day) - int(d1.day)
else:
k = 1
v = int(value_2) - int(value_1)
avrg = v / k
val = int(value_1)
dec_final = {} # final empty dict
for day in range(int(d1.day), int(d2.day) + 1):
if (day != d1.day): # checking if days are same
val = val + avrg
vall1 = str(day) + "-" + str(d1.month) + "-" + str(d1.year)
dec_final[vall1] = int(val)
print(dec_final)
else:
print("enter valid years 1970 < year < 2020")
solution()
else:
print("Enter valid values(0 > value > 1000000)")
solution()
else:
print("year and month must be same")
solution()
solution()
| true |
16c4f6672ea814546565def59d034e49198ff618 | Python | quintant/ZipBrute | /zipAndDestroy.py | UTF-8 | 2,704 | 2.953125 | 3 | [] | no_license | from time import sleep
from zipfile import ZipFile
from passGen import PassGen
import multiprocessing
from termutils import *
def crackBrut(lock:multiprocessing.Lock, num):
import random
import string
from itertools import product
filename = 'dummy.zip'
zip = ZipFile(filename)
cnt = 0
cont = True
BABA = string.ascii_letters + string.digits + string.punctuation
# BOOEY = [ch for ch in BABA]
while cont:
for pwd in product(BABA, repeat=4+num):
pw = "".join(pwd)
try:
zip.extractall( pwd=bytes(pw, encoding='utf-8'))
cont = False
cont = False
with lock:
moveTo(20+num, 2)
cprint(f'Found {pw}', 'green', end='')
print(flush=True, end='')
with open('foundpassw', 'a+') as f:
f.write(pw + '\n')
except Exception:
cnt += 1
if not cnt % 10000:
# with lock:
moveTo(2+num, 2)
cprint(f'Tries {str(cnt):<20}:- {pw[:20]} ', 'cyan', end='')
print(flush=True, end='')
def crackRT(lock:multiprocessing.Lock, num, pwds):
import shutil
filename = 'dummy.zip'
nfn = f'tmp/{num}{filename}'
shutil.copyfile(filename, nfn)
zip = ZipFile(nfn)
cnt = 0
cont = True
while cont:
for pw in pwds:
try:
pas = bytes(pw, encoding='utf-8')
zip.extractall(pwd=pas)
cont = False
with lock:
moveTo(25+num, 2)
cprint(f'Found {pw}', 'green', end='')
print(flush=True, end='')
with open('foundpassw', 'a+') as f:
f.write(pw + '\n')
except Exception:
cnt += 1
if not cnt % 1000:
# with lock:
moveTo(2+num, 2)
cprint(f'Tries {str(cnt):<20}:- {pw[:20]} ', 'cyan', end='')
print(flush=True, end='')
if __name__=="__main__":
passgen = PassGen()
xxx = passgen.split(24)
clear()
threads = []
lock = multiprocessing.Lock()
for num, pwds in enumerate(xxx):
args = (lock, num, pwds, )
thread = multiprocessing.Process(target=crackRT, args=args, daemon=True)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
| true |
904b968df5db83438382920f161b6dd01eecb199 | Python | bkuhlen73/udemy | /python/challenges/min_max_key_in_dictionary.py | UTF-8 | 330 | 3.765625 | 4 | [] | no_license | '''
min_max_key_in_dictionary({2:'a', 7:'b', 1:'c',10:'d',4:'e'}) # [1,10]
min_max_key_in_dictionary({1: "Elie", 4:"Matt", 2: "Tim"}) # [1,4]
'''
def min_max_key_in_dictionary(d):
keys = d.keys()
return [min(keys), max(keys)]
print(min_max_key_in_dictionary(
{2: 'a', 7: 'b', 1: 'c', 10: 'd', 4: 'e'})) # [1,10])
| true |
800337f1e035cc13be46285ad73b1477ff48842b | Python | rafinkang/test_python | /day11/test3.py | UTF-8 | 927 | 4.53125 | 5 | [] | no_license | class Player:
# 클래스 속성
cnt = 0
bag = []
def __init__(self, name):
print("--------초기화 함수",name,"- 생성자--------")
self.name = name
Player.cnt += 1
def put(self, obj):
Player.bag.append(obj)
def attack(self, other):
print(other.name + "를 공격합니다.")
def greeting(self, other):
print(other.name + " 부모님은 잘 계시니?")
# class method - 함수 위에 데코레이션으로 선언
@classmethod
def getBag(cls):
print("인벤토리: ", cls.bag)
p1 = Player("에코")
print(p1.cnt)
p1.put("권총")
print('----------------------------')
p2 = Player("야스오")
print(p2.cnt)
print('----------------------------')
p1.greeting(p2)
p1.attack(p2)
# 클래스 속성(클래스변수)는 인스턴스끼리 공유한다.
print('----------------------------')
p1.getBag()
p2.getBag() | true |
54f08ba85f5a7ab601d1e9aa5a2707c96b887b76 | Python | minevadesislava/HackBulgaria-Programming101 | /week3/3-Panda-Social-Network/panda.py | UTF-8 | 778 | 3.484375 | 3 | [] | no_license |
import re
class Panda:
def __init__(self, name, email, gender):
self.__name = name
self.__email = email
self.__gender = gender
def name(self):
return self.__name
def email(self):
return self.__email
def gender(self):
return self.__gender
def isMale(self):
return self.__gender == 'male'
def isFemale(self):
return self.__gender == 'female'
def __str__(self):
return "Panda: {}, {}, {}".format(self.__name, self.__email, self.__gender)
def __repr__(self):
return "A repr Panda: {}, {}, {} ".format(self.name, self.email, self.gender)
def __eg__(self, other):
return str(self) == str(other)
def _hash_(self):
return hash(self.name)
| true |
32f287fdf24519739a7585d7a1c86e39585d928f | Python | koustavmandal95/Competative_Coding | /Practice Challenges/sum_pair_zer0.py | UTF-8 | 555 | 3.1875 | 3 | [] | no_license | def pairSum0(l):
#Implement Your Code Here
negative_array=[]
positive_array=[]
for i in range(len(l)):
if l[i]<0:
negative_array.append(l[i])
#l.remove(l[i])
else:
positive_array.append(l[i])
print(negative_array,positive_array)
for i in range(0,len(positive_array)):
if abs(negative_array[i]) in positive_array:
print(negative_array[i],positive_array[i])
n=int(input())
l=list(int(i) for i in input().strip().split(' '))
pairSum0(l)
'''
5
2 1 -2 2 3
''' | true |
7abc99dab157f34dbfef0affd9b763a995fc7614 | Python | troykark/Underworlds | /dice.py | UTF-8 | 764 | 3.40625 | 3 | [] | no_license | import random
import statistics
def statarray():
roll = [random.randint(1,6), random.randint(1,6), random.randint(1,6), random.randint(1,6)]
roll.remove(min(roll))
return sum(roll)
def rollDice(rolls,dice):
output = 0
for roll in list(range(rolls)):
output += random.randint(1,dice)
return output
def attackRoll(advantage):
if advantage == 1:
return max([random.randint(1,20),random.randint(1,20)])
elif advantage == -1 :
return min([random.randint(1,20),random.randint(1,20)])
else:
return random.randint(1,20)
def testrolls():
test = []
for i in list(range(10000)):
test.append(statarray())
print(statistics.mean(test))
testrolls()
| true |
22f37678c95ebecfe4eeb92e6db29c4dc89b9b69 | Python | akselell/statikk | /surface.py | UTF-8 | 540 | 2.859375 | 3 | [] | no_license | import matplotlib
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
fig = plt.figure()
ax = plt.axes(projection='3d')
xpoints = np.linspace(-10, 10, 100)
ypoints = np.linspace(-10, 10, 100)
zpoints = np.zeros( (1, (len(xpoints)* len(ypoints))) )
print(zpoints)
i = 0
for x in xpoints:
for y in ypoints:
z = math.sin(x*y)
zpoints[0][i] += z
# zpoints.append(z)
print(zpoints)
#print(zpoints)
#Axes3D.plot_surface()
ax.plot_surface(xpoints, ypoints, zpoints)
| true |
046d4af7f8b53ef7a10028f9f52fc397bbe3af2e | Python | eddiesherlock/twitter | /craw_id.py | UTF-8 | 2,561 | 3.109375 | 3 | [] | no_license | from urllib.request import urlopen
import csv
import re
from bs4 import BeautifulSoup
import requests
import pandas as pd
def crawl_id():
# define url for crawling
url = 'https://en.wikipedia.org/wiki/Main_Page'
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
# newline='' 參數,這是為了讓資料中包含的換行字元可以正確被解析
with open ('ID_reference_.csv','r',newline='') as csvfile:
# 讀取 CSV 檔案內容
# reader = csv.reader(csvfile)
reader = csv.DictReader(csvfile)
column = [row['mlb_name'] for row in reader]
for name in column:
a = name.replace('.', '._')
input_keyword=a.replace(' ','_')
keyword_link = "https://en.wikipedia.org/wiki/"+input_keyword
# print(keyword_link)
res = requests.get(keyword_link, headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
# content = soup.find(name='div', attrs={'id':'mw-content-text'}).find_all(name='a')
# print(content)
# html = urlopen("https://en.wikipedia.org/wiki/"+input_keyword)
# soup = BeautifulSoup(keyword_link,'html.parser')
# 以"/wiki/"开始
# (?!)是不包含:的意思
regex = re.compile(r"^(https:\/\/twitter\.com\/)((?!:).)*$")
for link in soup.find('div', {'id': 'mw-content-text'}).find_all('a', href=regex):
if 'href' in link.attrs:
screen_name_str=link.attrs['href'].split('/')[-1]
print(name,screen_name_str)
# with open('screen_name.csv', 'w') as f:
# writer = csv.writer(f)
# table=[column,screen_name_str]
# writer.writerow(['mlb_name',"screen_name"])
# writer.writerows(table)
# df_tweet = pd.DataFrame(table,columns=['mlb_name',"screen_name"])
# # 顯示所有列
# pd.set_option('display.max_columns', None)
# # 顯示所有行
# pd.set_option('display.max_rows', None)
# # 設置顯示的寬度為2000,防止輸出內容被換行
# pd.set_option('display.width', 2000)
# print(df_tweet.head())
# # for row in screen_name_str:
# # writer.writerows(row)
return 'screen_name'
crawl_id() | true |
254a551605f9493256d5cae6daec1cbbbef2b7d3 | Python | liamhawkins/bio_tools | /volcano_plot.py | UTF-8 | 2,989 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
import os
from argparse import ArgumentParser
parser = ArgumentParser(description='Create volcano plot from spreadsheet of \
p-values and fold changes')
parser.add_argument('-i', '--input', dest='input_file', required=True,
help='input spreadsheet file (.csv, .xls, .xlsx)',
metavar='FILE')
parser.add_argument('-o', '--ouput', dest='output_file',
help='output file (.png, .pdf)', metavar='FILE')
parser.add_argument('-p', '--pvalue', dest='pvalue',
help='p-value threshold for genes of interest',
metavar='PVALUE', nargs='?', const=0.05, type=float,
default=0.05)
parser.add_argument('-f', '--foldchange', dest='foldchange',
help='fold change threshold for genes of interest',
metavar='FOLDCHANGE', nargs='?', const=2, type=float,
default=2)
args = parser.parse_args()
INPUT_FILENAME, INPUT_EXTENSION = os.path.splitext(args.input_file)
if args.output_file == None:
OUTPUT_FILE = INPUT_FILENAME + '.pdf'
else:
OUTPUT_FILE = args.output_file
P_VAL_THRESH = args.pvalue
FC_THRESH = args.foldchange
if INPUT_EXTENSION == '.csv':
df = pd.read_csv(args.input_file)
elif INPUT_EXTENSION in ['.xls', '.xlsx']:
df = pd.read_excel(io=args.input_file)
else:
sys.exit('ERROR: {} is not .csv, .xls, \
or .xlsx file'.format(args.input_file))
df['neglog_p_value'] = np.negative(np.log10(df['p_value']))
df['log2_fc'] = np.log2(df['fold_change'])
df['goi'] = np.where((df['p_value'] < args.pvalue) &
(np.absolute(df['log2_fc']) >= math.log(args.foldchange,2)),
np.where(df['log2_fc'] > 0, '#2c7bb6','#d7191c'), 'black')
X_MAX = round(max(abs(df['log2_fc'].min()), abs(df['log2_fc'].max()))*1.1,1)
X_MIN = -X_MAX
Y_MIN = 0
Y_MAX = df['neglog_p_value'].max()
NEG_LOG_P_THRESH = -math.log(args.pvalue,10)
LOG_FC_THRESH_POS = math.log(args.foldchange,2)
LOG_FC_THRESH_NEG = -LOG_FC_THRESH_POS
plt.rcParams.update({'mathtext.default': 'regular'})
plt.rcParams.update({'figure.figsize': [12.0, 8.0]})
plt.scatter(df['log2_fc'], df['neglog_p_value'], c=df['goi'])
plt.xlabel('$log_2(Fold\ change)$', fontsize=20)
plt.ylabel('$-log_{10}(\mathit{p}-value)$', fontsize=20)
plt.axis([X_MIN,X_MAX,Y_MIN,Y_MAX])
plt.plot([X_MIN,LOG_FC_THRESH_NEG],[NEG_LOG_P_THRESH,NEG_LOG_P_THRESH],
color='grey', linestyle='--')
plt.plot([LOG_FC_THRESH_POS,X_MAX],[NEG_LOG_P_THRESH,NEG_LOG_P_THRESH],
color='grey', linestyle='--')
plt.plot([LOG_FC_THRESH_NEG,LOG_FC_THRESH_NEG],[NEG_LOG_P_THRESH,Y_MAX],
color='grey', linestyle='--')
plt.plot([LOG_FC_THRESH_POS,LOG_FC_THRESH_POS],[NEG_LOG_P_THRESH,Y_MAX],
color='grey', linestyle='--')
plt.savefig(OUTPUT_FILE, dpi=600)
| true |
de43ae3fd621ff3110ac055391a38852333e24d0 | Python | tim-fry/earthmarsbot | /bot.py | UTF-8 | 1,021 | 3.140625 | 3 | [] | no_license | import ephem
import json
import twitter
with open('credentials.json') as f:
credentials = json.loads(f.read())
def generate_distance_message():
m = ephem.Mars()
m.compute()
lightseconds = 499.005
milmiles = 92.955807;
minutes = int(m.earth_distance*lightseconds) / 60
seconds = m.earth_distance*lightseconds % 60;
distance = "#Mars is currently %.6f AU (%.1f million miles) from Earth." % (m.earth_distance, m.earth_distance*milmiles)
time = "It would take %d minutes, %05.2f seconds for a message to travel that distance." % (minutes, seconds)
return distance + " " + time
def send_tweet(message):
api = twitter.Api(**credentials)
try:
status = api.PostUpdate(message)
except TwitterError as err:
print("Oops, something went wrong! Twitter returned an error: %s" % (err.message))
else:
print("Yay! Tweeted: %s" % status.text)
def lambda_handler(_event_json, _context):
# Tweet Message
send_tweet(generate_distance_message())
| true |
e2258a1ffd3c242a4942f5962067234cb4438792 | Python | kimroniny/ACM | /LeetCode/contests/20210704-weilai/2/1.py | UTF-8 | 691 | 3.21875 | 3 | [] | no_license | import queue
import heapq
class P():
def __init__(self,a,b):
self.a = a
self.b = b
def __lt__(self, other):
if self.a<other.a:
return True
else:
return False
def p(self):
print(self.a, self.b)
class Solution:
def eliminateMaximum(self, dist, speed) -> int:
h = []
ans = 0
for v, k in enumerate(dist):
heapq.heappush(h, P(k, speed[v]))
while len(h) > 0:
x = heapq.heappop(h)
if x.a == 0:
return ans
if __name__ == "__main__":
# print()
Solution().eliminateMaximum(
[3,2,4],
[5,3,2]
) | true |
f7fab922203a02a56108a6b023d05a699f11317f | Python | espnet/espnet | /egs2/TEMPLATE/asr1/pyscripts/utils/convert_text_to_phn.py | UTF-8 | 2,527 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
# Copyright 2021 Tomoki Hayashi and Gunnar Thor
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Convert kaldi-style text into phonemized sentences."""
import argparse
import codecs
import contextlib
from joblib import Parallel, delayed, parallel
from tqdm import tqdm
from espnet2.text.cleaner import TextCleaner
from espnet2.text.phoneme_tokenizer import PhonemeTokenizer
def main():
"""Run phoneme conversion."""
parser = argparse.ArgumentParser()
parser.add_argument("--g2p", type=str, required=True, help="G2P type.")
parser.add_argument("--cleaner", type=str, default=None, help="Cleaner type.")
parser.add_argument("--nj", type=int, default=4, help="Number of parallel jobs.")
parser.add_argument("in_text", type=str, help="Input kaldi-style text.")
parser.add_argument("out_text", type=str, help="Output kaldi-style text.")
args = parser.parse_args()
phoneme_tokenizer = PhonemeTokenizer(args.g2p)
cleaner = None
if args.cleaner is not None:
cleaner = TextCleaner(args.cleaner)
with codecs.open(args.in_text, encoding="utf8") as f:
lines = [line.strip() for line in f.readlines()]
text = {line.split()[0]: " ".join(line.split()[1:]) for line in lines}
if cleaner is not None:
text = {k: cleaner(v) for k, v in text.items()}
with tqdm_joblib(tqdm(total=len(text.values()), desc="Phonemizing")):
phns_list = Parallel(n_jobs=args.nj)(
[
delayed(phoneme_tokenizer.text2tokens)(sentence)
for sentence in text.values()
]
)
with codecs.open(args.out_text, "w", encoding="utf8") as g:
for utt_id, phns in zip(text.keys(), phns_list):
g.write(f"{utt_id} " + " ".join(phns) + "\n")
@contextlib.contextmanager
def tqdm_joblib(tqdm_object):
"""Patch joblib to report into tqdm progress bar given as argument.
Reference:
https://stackoverflow.com/questions/24983493
"""
class TqdmBatchCompletionCallback(parallel.BatchCompletionCallBack):
def __call__(self, *args, **kwargs):
tqdm_object.update(n=self.batch_size)
return super().__call__(*args, **kwargs)
old_batch_callback = parallel.BatchCompletionCallBack
parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback
try:
yield tqdm_object
finally:
parallel.BatchCompletionCallBack = old_batch_callback
tqdm_object.close()
if __name__ == "__main__":
main()
| true |
f0b1e893ce77f51dd47286c50fa5b165fb66c17c | Python | avbm/exercism | /python/matrix/matrix.py | UTF-8 | 394 | 3.6875 | 4 | [] | no_license | class Matrix(object):
def __init__(self, matrix_string):
temp_rows = matrix_string.split('\n')
self.rows = []
for row in temp_rows:
self.rows.append(row.split(' '))
def row(self, index):
return [ int(i) for i in self.rows[index-1] ]
def column(self, index):
return [ int(self.rows[i][index-1]) for i in range(len(self.rows)) ]
| true |
eacf328787935c07164926cc32d8365752c3ff63 | Python | wangyongk/scrapy_toturial | /Pythonproject/douban/sk.py | UTF-8 | 595 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 20:23:55 2018
@author: wangyongkang
"""
"""
file.read() 读取文件所有内容
file.readlines() 读取文件的全部内容 与file.read()不同之处在于readlines会把读取的
内容,赋给一个列表变量
file.readline() 读取一行内容
"""
import urllib.request
filename=urllib.request.urlopen("http://www.baidu.com")
response=urllib.request.urlopen("http://www.baidu.com").read().decode("utf-8", "ignore")
file=open("D:/1.html","w",encoding="utf-8")
file.write(response)
filename.info()
file.close()
| true |
8deb5fba8dc3f20d8516955bd3b67a2f59787a1c | Python | NickJaNinja/Internet-Explorer | /python_version/game.py | UTF-8 | 1,242 | 2.53125 | 3 | [] | no_license | from player import *
from universe import *
from shopGoods import *
from random import *
class Game:
def __init__(self):
self.player = None
self.diff = None
self.universe = Universe()
self.currSystem = None#self.universe.getRandomSystem()
self.currShop = None#self.currSystem.getShop()
def setPlayer(self, p):
self.player=p
def getPlayer(self):
return self.player
def setDiff(self, d):
self.diff=d
def getDiff(self):
return self.diff
def setUniverse(self, p):
self.universe=p
def getUniverse(self):
return self.universe
def getCurrentSystem(self):
return self.currSystem
def setCurrentSystem(self,s):
r=random()
self.currSystem = s
self.currShop = s.getShop()
self.currShop.refresh()
if r<0.5:
c=choice(RadicalPriceEvent)
self.currShop.setIE(c)
def getCurrentShop(self):
return self.currShop
def setCurrentShop(self,s):
self.shop = s
def init(self):
self.currSystem = self.universe.getRandomSystem()
self.currShop = self.currSystem.getShop()
def __str__(self):
return self.player.name
| true |
56959d54cae6ca532906da756bac5274059bcbbe | Python | Rand01ph/reviewboard | /reviewboard/cmdline/tests/test_rbsite.py | UTF-8 | 6,245 | 2.671875 | 3 | [
"MIT"
] | permissive | """Unit tests for reviewboard.cmdline.rbsite."""
from __future__ import unicode_literals
import os
import shutil
import tempfile
from reviewboard.cmdline.rbsite import (Command,
MissingSiteError,
UpgradeCommand,
validate_site_paths)
from reviewboard.testing.testcase import TestCase
class CommandTests(TestCase):
"""Unit tests for reviewboard.cmdline.rbsite.Command."""
def setUp(self):
super(CommandTests, self).setUp()
self.command = Command()
def test_get_site_paths_with_string(self):
"""Testing Command.get_site_paths with site_path as string"""
class Options(object):
site_path = '/var/www/reviewboard'
self.assertEqual(self.command.get_site_paths(Options()),
['/var/www/reviewboard'])
def test_get_site_paths_with_list(self):
"""Testing Command.get_site_paths with site_path as ststring"""
class Options(object):
site_path = [
'/var/www/reviewboard1',
'/var/www/reviewboard2',
]
self.assertEqual(
self.command.get_site_paths(Options()),
[
'/var/www/reviewboard1',
'/var/www/reviewboard2',
])
def test_get_site_paths_without_site_path(self):
"""Testing Command.get_site_paths without site_path"""
class Options(object):
site_path = None
self.assertEqual(self.command.get_site_paths(Options()), [])
class UpgradeCommandTests(TestCase):
"""Unit tests for reviewboard.cmdline.rbsite.UpgradeCommand."""
def setUp(self):
super(UpgradeCommandTests, self).setUp()
self.command = UpgradeCommand()
def test_get_site_paths_with_all_sites(self):
"""Testing UpgradeCommand.get_site_paths with all_sites=True"""
tmpdir = tempfile.mkdtemp(prefix='rbsite-')
dir1 = os.path.join(tmpdir, 'site1')
dir2 = os.path.join(tmpdir, 'site2')
dir3 = os.path.join(tmpdir, 'site3')
# Create 2 of the 3 site directories. The third will be excluded, since
# it doesn't exist.
os.mkdir(dir1, 0o755)
os.mkdir(dir2, 0o755)
site_filename = os.path.join(tmpdir, 'sites')
with open(site_filename, 'w') as fp:
fp.write('%s\n' % dir1)
fp.write('%s\n' % dir2)
fp.write('%s\n' % dir3)
class Options(object):
all_sites = True
sitelist = site_filename
try:
self.assertEqual(self.command.get_site_paths(Options()),
{dir1, dir2})
finally:
shutil.rmtree(tmpdir)
def test_get_site_paths_with_all_sites_and_empty(self):
"""Testing UpgradeCommand.get_site_paths with all_sites=True and no
existing sites in sites file
"""
tmpdir = tempfile.mkdtemp(prefix='rbsite-')
# Note that we won't be creating these directories.
dir1 = os.path.join(tmpdir, 'site1')
dir2 = os.path.join(tmpdir, 'site2')
dir3 = os.path.join(tmpdir, 'site3')
site_filename = os.path.join(tmpdir, 'sites')
with open(site_filename, 'w') as fp:
fp.write('%s\n' % dir1)
fp.write('%s\n' % dir2)
fp.write('%s\n' % dir3)
class Options(object):
all_sites = True
sitelist = site_filename
expected_message = \
'No Review Board sites were listed in %s' % site_filename
try:
with self.assertRaisesMessage(MissingSiteError, expected_message):
self.command.get_site_paths(Options())
finally:
shutil.rmtree(tmpdir)
def test_get_site_paths_with_string(self):
"""Testing UpgradeCommand.get_site_paths with site_path as string"""
class Options(object):
all_sites = False
site_path = '/var/www/reviewboard'
self.assertEqual(self.command.get_site_paths(Options()),
['/var/www/reviewboard'])
def test_get_site_paths_with_list(self):
"""Testing UpgradeCommand.get_site_paths with site_path as ststring"""
class Options(object):
all_sites = False
site_path = [
'/var/www/reviewboard1',
'/var/www/reviewboard2',
]
self.assertEqual(
self.command.get_site_paths(Options()),
[
'/var/www/reviewboard1',
'/var/www/reviewboard2',
])
def test_get_site_paths_without_site_path(self):
"""Testing UpgradeCommand.get_site_paths without site_path"""
class Options(object):
all_sites = False
site_path = None
self.assertEqual(self.command.get_site_paths(Options()), [])
class ValidateSitePathsTests(TestCase):
"""Unit tests for reviewboard.cmdline.rbsite.validate_site_paths."""
def test_with_valid_sites(self):
"""Testing validate_site_paths with valid sites"""
# This should not raise.
validate_site_paths([os.path.dirname(__file__)])
def test_with_empty(self):
"""Testing validate_site_paths with empty list"""
expected_message = \
"You'll need to provide a site directory to run this command."
with self.assertRaisesMessage(MissingSiteError, expected_message):
validate_site_paths([])
with self.assertRaisesMessage(MissingSiteError, expected_message):
validate_site_paths(None)
def test_with_missing_site(self):
"""Testing validate_site_paths with missing site"""
expected_message = 'The site directory "/test" does not exist.'
with self.assertRaisesMessage(MissingSiteError, expected_message):
validate_site_paths(['/test'])
def test_with_missing_site_and_require_exists_false(self):
"""Testing validate_site_paths with missing site and
require_exists=False
"""
# This should not raise.
validate_site_paths(['/test'], require_exists=False)
| true |
2880d0b3e60be4d63c80e1b04b70e1c81dd8c7aa | Python | andyfangdz/Spectre | /src/libhistogram/modules.py | UTF-8 | 908 | 2.609375 | 3 | [
"BSD-2-Clause"
] | permissive | import cv2
import numpy as np
class Ghost(object):
def __init__(self):
self.mask = None
self.hist = None
def update(self, partition, mask):
self.mask = mask
hist = cv2.calcHist([partition], [0], mask, [16], [0, 180])
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
self.hist = hist.reshape(-1)
def getHist(self):
return self.hist
def showHist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
return img
def getBackProj(self, frame):
return cv2.calcBackProject([frame], [0], self.hist, [0, 180], 1)
| true |
8929e151f815f5f181a753dc7ee871f49fe6e713 | Python | rochabr/alexa-whispers | /lambda_function.py | UTF-8 | 8,208 | 3.140625 | 3 | [] | no_license | """
This is a Python template for Alexa to get you building skills (conversations) quickly.
"""
from __future__ import print_function
import random
from dynamo_handler import write_whisper, Whisper, read_whisper
# import dynamo_handler
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_whispered_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'SSML',
'ssml': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def build_output(whispers):
output = ""
if len(whispers) == 0:
return "<speak>You don't have any whispers today. Check again tomorrow!</speak>"
elif len(whispers) == 1:
output += "<speak>You have one whisper! How exciting! Here it is! <emphasis level=\"strong\"><amazon:effect name=\"whispered\">" + whispers[0] + ".</amazon:effect></emphasis>"
output += "<break time=\"1s\"/>Well, I hope you've enjoyed your whisper!"
else:
count = 1
output = "<speak>You have " + str(len(whispers)) + " whispers. Let me whisper them to you..."
for whisper in whispers:
output += "<break time=\"1s\"/>Whisper number " + str(count) + "! <emphasis level=\"strong\"><amazon:effect name=\"whispered\">" + whisper + ".</amazon:effect></emphasis>"
count = count + 1
output += "<break time=\"1s\"/>Well, I hope you've enjoyed your whispers!"
print(output)
return output + "<break time=\"2s\"/> What do you want to do now?</speak>"
# --------------- Functions that control the skill's behavior ------------------
def get_readwhispers_response(session, intent):
""" An example of a custom intent. Same structure as welcome message, just make sure to add this intent
in your alexa skill in order for it to work.
"""
session_attributes = {}
card_title = "My Whispers"
userId = session['user']['userId']
password = intent['slots']['password']['value']
whispers = read_whisper(userId, password)
speech_output = build_output(whispers)
reprompt_text = "What do you want to do now?"
should_end_session = False
return build_response(session_attributes, build_whispered_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_sendwhisper_response(session, intent):
""" An example of a custom intent. Same structure as welcome message, just make sure to add this intent
in your alexa skill in order for it to work.
"""
session_attributes = {}
card_title = "Send Whisper To"
name = intent['slots']['name']['value']
speech_output = "All right, I've send your whisper to " + name + ". What do you want to do now?"
reprompt_text = speech_output
userID = session['user']['userId']
whisper = Whisper(userID, intent['slots']['message']['value'], intent['slots']['password']['value'], intent['slots']['name']['value'])
write_whisper(whisper)
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to Whisper! I can read your whispers or send a whisper to someone. What do you want to do?"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "I don't know if you heard me, welcome to your custom alexa application!"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for using the whisper app! Bye! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts.
One possible use of this function is to initialize specific
variables from a previous state stored in an external database
"""
# Add additional code here as needed
pass
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
# Dispatch to your skill's launch message
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
print(intent_request)
# Dispatch to your skill's intent handlers
if intent_name == "ReadWhispers":
return get_readwhispers_response(session, intent)
elif intent_name == "SendWhisperToName":
return get_sendwhisper_response(session, intent)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("Incoming request...")
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
print(event['session']['application']['applicationId'])
# if (event['session']['application']['applicationId'] !=
# ""):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| true |
401cf925d0029691af57dc9bce0f82ceef05687e | Python | Rpereira23/BeijingAirPollutionPrediction | /core/model.py | UTF-8 | 4,457 | 2.640625 | 3 | [] | no_license | import os
import sys
import math
import keras
import logging
import numpy as np
import datetime as dt
import tensorflow as tf
from numpy import newaxis
from core.util.timer import Timer
from keras.models import Sequential, load_model
from keras.layers import Dense, SimpleRNN, LSTM
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from keras import losses
from sklearn.metrics import mean_squared_error
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class Model():
"""A class for an building and inferencing an lstm model"""
def __init__(self):
self.model = Sequential()
self.model.add(SimpleRNN(units=10, input_shape=(1, 1)))
self.model.add(Dense(1))
adam = Adam(lr=0.001)
self.model.compile(loss=losses.mean_squared_error, optimizer=adam)
self.model.summary()
def build_model(self, model_configs):
with Timer(logger, "Building Model"):
for layer in model_configs['model']['layers']:
units = layer['units'] if 'units' in layer else None
dropout_rate = layer['rate'] if 'rate' in layer else None
activation = layer['activation'] if 'activation' in layer else None
return_seq = layer['return_seq'] if 'return_seq' in layer else None
input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
input_dim = layer['input_dim'] if 'input_dim' in layer else None
'''
if layer['type'] == 'dense':
self.model.add(Dense(units, activation=activation))
if layer['type'] == 'simple':
self.model.add(SimpleRNN(units, input_shape=(input_timesteps, input_dim)))
if layer['type'] == 'lstm':
self.model.add(LSTM(units, input_shape=(input_timesteps, input_dim), return_sequences=return_seq))
if layer['type'] == 'dropout':
self.model.add(Dropout(dropout_rate))
'''
'''
self.model.add(SimpleRNN(units=10, input_shape=(1, 1)))
self.model.add(Dense(1))
self.model.compile(loss='mean_squared_error', optimizer='adam')
'''
def train(self, X_train, y_train, nb_epoch, batch_size):
with Timer(logger, "[Model] Training Started: %s epochs, %s batch size " % (nb_epoch, batch_size)):
#save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
#self.model.fit(X, y, epochs=nb_epoch, batch_size=16)
nb_epoch = 30
self.model.fit(X_train, y_train, epochs=nb_epoch, batch_size=16)
#self.model.save(save_fname)
def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch, save_dir):
with Timer(logger,
'[Model] Training Started: %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch) ):
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
callbacks = [
ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True)
]
self.model.fit_generator(
data_gen,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
workers=1
)
def predict_point_by_point(self, data):
#Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
print('[Model] Predicting Point-by-Point...')
predicted = self.model.predict(data)
predicted = np.reshape(predicted, (predicted.size,))
return predicted
def predict_sequences_multiple(self, data, window_size, prediction_len):
#Predict sequence of 50 steps before shifting prediction run forward by 50 steps
print('[Model] Predicting Sequences Multiple...')
prediction_seqs = []
for i in range(int(len(data)/prediction_len)):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(prediction_len):
predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
def predict_sequence_full(self, data, window_size):
#Shift the window by 1 new prediction each time, re-run predictions on new window
print('[Model] Predicting Sequences Full...')
curr_frame = data[0]
predicted = []
for i in range(len(data)):
predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
return predicted
| true |
12f0df494ebc35da9433f5a4d0731eff2dd127da | Python | hy299792458/LeetCode | /python/11-containerWithMostWater.py | UTF-8 | 380 | 2.90625 | 3 | [] | no_license | class Solution(object):
def maxArea(self, height):
l = 0
r = len(height) - 1
res = 0
while l < r:
h = min(height[l], height[r])
res = max(res, h * (r - l))
while l < len(height) and height[l] <= h:
l += 1
while r >= 0 and height[r] <= h:
r -= 1
return res
| true |
ff57f60e1ff5fc7d7eafa36fca18f789ca4106ab | Python | ramksharma1674/pyprojold | /tmpcall.py | UTF-8 | 477 | 3.5 | 4 | [] | no_license | from temp import to_celcius
import random
c= to_celcius(70)
print (c)
number, number2, number3 = random.random(), random.randint(1,6), random.randrange(100,500,5)
#print("number=", random.random())
#number2 = random.randint(1,6)
#number3 = random.randrange(100,500,5)
print (number)
print (number2)
print (number3)
list1 = ["123", "456", 4 , 6.7, -1]
print(list1)
list1.append("Ram")
print(list1)
list1.reverse()
print(len(list1))
for item1 in list1:
print (item1) | true |
37c053b75e74200057240af8f3ff3a9f10e8063e | Python | mokumokustudy/procon20190113 | /src/kwatch/ABC085B.py | UTF-8 | 391 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
## https://atcoder.jp/contests/abs/tasks/abc085_b
import sys
def run(nums):
return len(set(nums))
def main():
def gets(input=sys.stdin):
return input.readline().strip()
N = int(gets())
arr = []; add = arr.append
for _ in range(N):
add(int(gets()))
assert len(arr) == N
#
result = run(arr)
print(result)
main()
| true |
c5adf783a3187de26a10cfc48e68827f649627ed | Python | shirayukikitsune/python-code | /src/main/lp/treinamento_3/sum_of_consecutive_odd_numbers_1/main.py | UTF-8 | 276 | 3.296875 | 3 | [
"Unlicense"
] | permissive | def run():
x = int(input())
y = int(input())
if x > y:
x, y = y, x
start = x + 2 if x % 2 == 1 else x + 1
end = y
total = 0
for i in range(start, end, 2):
total = total + i
print(total)
if __name__ == '__main__':
run()
| true |
c8f78097f826798226a7b994ebf08e38fbffc0f6 | Python | JanainaNascimento/ExerciciosPython | /ex 015.py | UTF-8 | 1,068 | 4.1875 | 4 | [] | no_license | '''faça um programa que leia o comprimento do cateto oposto
e do cateto adjacente de um trian retangulo,
calc e mostre o comp da hipotenusa
print('*Calcula a Hipotenusa de um triângulo retangulo*')
catOpo = float(input('Digite o cateto oposto: '))
catAdj = float(input('Digite o cateto adjacente: '))
hi = (catAdj ** 2 + catOpo ** 2) ** (1/2)
print(f'O comprimento da Hipotenusa é {hi:.3f}')
from math import pow
catOpo = float(input('Digite o cateto oposto: '))
catAdj = float(input('Digite o cateto adjacente: '))
hi = pow(pow(catAdj, 2) + (pow(catOpo, 2)), 1/2)
print(f'O comprimento da Hipotenusa é {hi:.3f}')
#calcular hipotenusa com biblioteca math
import math
catOpo = float(input('Digite o cateto oposto: '))
catAdj = float(input('Digite o cateto adjacente: '))
hi = math.hypot(catAdj, catOpo)
print(f'O comprimento da Hipotenusa é {hi:.3f}')
from math import hypot
catOpo = float(input('Digite o cateto oposto: '))
catAdj = float(input('Digite o cateto adjacente: '))
hi = hypot(catAdj, catOpo)
print(f'O comprimento da Hipotenusa é {hi:.3f}')
''' | true |
e294211a1da242cb78ab6446dfa1e3a78103e72d | Python | Feelx234/nestmodel | /nestmodel/unified_functions.py | UTF-8 | 3,498 | 3.125 | 3 | [
"MIT"
] | permissive | """This file should contain functions that work independent of the underlying graph structure used (e.g. networkx or graph-tool)"""
import numpy as np
def is_networkx_str(G_str):
"""Checks whether a repr string is from networkx Graph"""
if (G_str.startswith("<networkx.classes.graph.Graph") or
G_str.startswith("<networkx.classes.digraph.DiGraph")):
return True
return False
def is_graphtool_str(G_str): # pragma: gt no cover
"""Checks whether a repr string is from graph-tool Graph"""
if G_str.startswith("<Graph object, "):
return True
return False
def is_fastgraph_str(G_str):
"""Checks whether a repr string is from a fastgraph Graph"""
if G_str.startswith("<nestmodel.fast_graph.FastGraph "):
return True
return False
def is_directed(G):
"""Returns whether a graph is directed or not, independent of the graph structure"""
G_str = repr(G)
if is_networkx_str(G_str):
return G.is_directed()
elif is_fastgraph_str(G_str):
return G.is_directed
elif is_graphtool_str(G_str): # pragma: gt no cover
return G.is_directed()
else:
raise NotImplementedError()
def num_nodes(G):
"""Returns the number of nodes for varies kinds of graphs"""
G_str = repr(G)
if is_networkx_str(G_str):
return G.number_of_nodes()
elif is_fastgraph_str(G_str):
return G.num_nodes
elif is_graphtool_str(G_str): # pragma: gt no cover
return G.num_vertices()
else:
raise NotImplementedError()
def get_sparse_adjacency(G):
"""Returns a sparse adjacency matrix as in networkx"""
G_str = repr(G)
if is_networkx_str(G_str):
import networkx as nx # pylint: disable=import-outside-toplevel
return nx.to_scipy_sparse_array(G, dtype=np.float64)
elif is_fastgraph_str(G_str):
return G.to_coo()
elif is_graphtool_str(G_str): # pragma: gt no cover
from graph_tool.spectral import adjacency # pylint: disable=import-outside-toplevel # type: ignore
return adjacency(G).T
else:
raise NotImplementedError()
def get_out_degree_array(G):
"""Returns an array containing the out-degrees of each node"""
G_str = repr(G)
if is_networkx_str(G_str):
if is_directed(G):
return _nx_dict_to_array(G.out_degree)
else:
return _nx_dict_to_array(G.degree)
elif is_fastgraph_str(G_str):
return G.out_degree
elif is_graphtool_str(G_str): # pragma: gt no cover
return G.get_out_degrees(np.arange(num_nodes(G)))
else:
raise NotImplementedError()
def _nx_dict_to_array(d):
"""Helper function converting dict to array"""
return np.array(d, dtype=np.uint32)[:,1]
def to_fast_graph(G):
from nestmodel.fast_graph import FastGraph
G_str = repr(G)
if is_networkx_str(G_str):
return FastGraph.from_nx(G)
elif is_fastgraph_str(G_str):
from copy import copy
return copy(G)
elif is_graphtool_str(G_str): # pragma: gt no cover
return FastGraph.from_gt(G)
else:
raise NotImplementedError()
def rewire_graph(G, depth=0, initial_colors=None, method=1, both = False, **kwargs):
"""Helper function employing NeSt rewiring on a copy of an arbitrary graph"""
G_fg = to_fast_graph(G)
G_fg.ensure_edges_prepared(initial_colors=initial_colors, both=both, max_depth=depth+1)
G_fg.rewire(depth=depth, method=method, **kwargs)
return G_fg | true |
432a9ed7a8111a6509f1a354edd3d6fa18ff84ed | Python | KunyiLiu/algorithm_problems | /kunyi/dp/greedy/queue-reconstruction-by-height.py | UTF-8 | 870 | 3.8125 | 4 | [] | no_license | class Solution:
"""
@param people: a random list of people
@return: the queue that be reconstructed
"""
def reconstructQueue(self, people):
# # 遍历排好序的people,从身高最高的人开始,根据每个人的k值,将其插入到结果数组中
# 因为我们遍历是从身高最高的人开始的,所以即使后面有人插入改变了前面插入人在结果集中的位置,但是相对关系没有变,即每个人的前面比他高的人这个事实没有变,也因为后面插入的人的身高都低于前面的人,所以无法影响之前的结果
# takes O(n^2)
result = []
sorted_people = sorted(people, key = lambda x: (-x[0], x[1]))
for p in sorted_people:
# insert takes O(n)
result.insert(p[1], p)
return result
| true |
fa166e4c0510e69caf4fe58f7381e55f319914de | Python | lyger/matsuri-monitor | /matsuri_monitor/chat/info.py | UTF-8 | 827 | 2.75 | 3 | [
"MIT"
] | permissive | from dataclasses import dataclass
VIDEO_URL_TEMPLATE = "https://www.youtube.com/watch?v={video_id}"
CHANNEL_URL_TEMPLATE = "https://www.youtube.com/channel/{channel_id}"
@dataclass
class ChannelInfo:
"""Holds information about a YouTube channel"""
id: str
name: str
thumbnail_url: str
org: str
@property
def url(self):
"""URL of the channel, constructed with the channel ID"""
return CHANNEL_URL_TEMPLATE.format(channel_id=self.id)
@dataclass
class VideoInfo:
"""Holds information about a YouTube live stream (live or archive)"""
id: str
title: str
channel: ChannelInfo
start_timestamp: float = None
@property
def url(self):
"""URL of the video, constructed with the video ID"""
return VIDEO_URL_TEMPLATE.format(video_id=self.id)
| true |
7b2c42e955ea13c0d11d41ebfd8717666048d61f | Python | alessandrobalata/pyrlmc | /objects/control.py | UTF-8 | 4,200 | 2.890625 | 3 | [
"MIT"
] | permissive | from objects.cont_value import ContValue
from objects.controlled_process import ControlledProcess
import numpy as np
import matplotlib.pyplot as plt
from problems.problem import Problem
class Control:
'''
Control object to be used both backward and forward
'''
def __init__(self, problem: Problem):
self.values = np.zeros((problem.N + 1, problem.M)) * np.nan
self.u_max = problem.u_max
self.u_min = problem.u_min
self.U = problem.U
self.running_reward = problem.running_reward
self.dt = problem.dt
self.M = problem.M
self.optimization_type = problem.optimization_type
self.step_gradient = problem.step_gradient
self.epsilon_gradient = problem.epsilon_gradient
self.first_derivative = problem.first_derivative
self.second_derivative = problem.second_derivative
self.N = problem.N
def compute(self, n: int, x: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> np.ndarray:
'''
:param n:
:param x:
:param cont_value:
:param coeff:
:return:
'''
if self.optimization_type == 'extensive':
return self._extensive_search(n, x, cont_value, coeff)
elif self.optimization_type == 'gradient' and n < self.N - 3:
u_tp1 = x * 0
return self._gradient_descent(n, x, u_tp1, cont_value, coeff)
return self._extensive_search(n, x, cont_value, coeff)
def _gradient_descent(self, n: int, x: np.ndarray, u_tp1: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> \
np.ndarray:
'''
:param x:
:param coeff:
:param u_tp1:
:return:
'''
u = u_tp1
convergence = False
while not convergence:
tmp = u - self.__ratio_derivatives(n, x, u, cont_value, coeff) * self.step_gradient
variation = np.abs(tmp - u) / (1 + np.abs(u))
convergence = variation.all() < self.epsilon_gradient
u = tmp
self.values[n, :] = u
return self.values[n, :].reshape(1, self.M)
def __ratio_derivatives(self, n: int, x: np.ndarray, u: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> \
np.ndarray:
'''
:param x:
:param u:
:param coeff:
:return:
'''
numerator = cont_value.derivative(n, x, u, coeff) + self.first_derivative(n, x, u) * self.dt
denumerator = cont_value.second_derivative(n, x, u, coeff) + self.second_derivative(n, x, u) * self.dt
return numerator / denumerator
def _extensive_search(self, n: int, x: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> np.ndarray:
'''
Computes the optimal control by testing a number of control values in the interval u_min, u_max
:param n: time step
:param x: state vector
:param cont_value: continuation value object
:param coeff: vector of regression coefficients
:return: control vector
'''
print('computing the control')
test = np.linspace(self.u_min, self.u_max, self.U).reshape(1, self.U)
idx = np.argmin(self.running_reward(x, test.T).T * self.dt +
cont_value.compute_batch(n, x.reshape(1, self.M), test, coeff),
axis=1)
self.values[n, :] = test[0, idx]
return self.values[n, :].reshape(1, self.M)
def plot(self) -> None:
'''
Plots the control process over time
:return: None
'''
plt.figure()
plt.plot(self.values)
plt.xlabel('time step')
plt.ylabel('control value')
plt.title('Control Process over time')
def scatter(self, controlled_process: ControlledProcess, time: int) -> None:
'''
Plots the control process against the values of the controlled process at a given time
:return: None
'''
plt.figure()
plt.plot(controlled_process.values[time, :], self.values[time, :], 'o')
plt.xlabel('process value')
plt.ylabel('control value')
plt.title('Control vs. Controlled Process')
| true |
16b88814c78cd454b6b6ca6b52e5d4e455db68f0 | Python | mjdrushton/potential-pro-fit | /lib/atsim/pro_fit/_channel.py | UTF-8 | 8,227 | 2.65625 | 3 | [] | no_license | import logging
import uuid
import itertools
import sys
from gevent.queue import Queue
from gevent import Greenlet
import gevent.lock
import gevent
class ChannelCallback(object):
"""Execnet channels can only have a single callback associated with them. This object is a forwarding callback.
It holds its own callback that can be changed and when registered with an execnet channel, forwards to its own callback"""
def __init__(self, callback=None):
self.callback = callback
def __call__(self, msg):
if self.callback:
return self.callback(msg)
return
class ChannelException(Exception):
def __init__(self, message, wiremsg=None):
super(ChannelException, self).__init__(message)
self.wiremsg = wiremsg
class AbstractChannel(object):
"""Abstract base class for making execnet channels nicer to work with.
At a minimum client code should override the make_start_message() method.
The start_response() method can also be used to customise channel start behaviour.
"""
def __init__(
self,
execnet_gw,
channel_remote_exec,
channel_id=None,
connection_timeout=60,
):
"""Create an execnet channel (which is wrapped in this object) using the `_file_transfer_remote_exec` as its
code.
Args:
execnet_gw (excenet.Gateway): Gateway used to create channel.
channel_remote_exec (module): Module that should be used to start execnet channel.
channel_id (None, optional): Channel id - if not specified a uuid will be generated.
connection_timeout (int, optional): Timeout in seconds after which connection will fail if 'READY' message not received.
"""
self._logger = logging.getLogger(__name__).getChild("BaseChannel")
if channel_id is None:
self._channel_id = str(uuid.uuid4())
else:
self._channel_id = channel_id
self._callback = None
self._logger.info("Starting channel, id='%s'", self.channel_id)
self._channel = self._startChannel(
execnet_gw, channel_remote_exec, connection_timeout
)
self._logger.info("Channel started id='%s'", self.channel_id)
def _startChannel(
self, execnet_gw, channel_remote_exec, connection_timeout
):
channel = execnet_gw.remote_exec(channel_remote_exec)
# Was getting reentrant io error when sending, add a lock to the channel that can be used to synchronize message sending.
if not hasattr(channel.gateway, "_sendlock"):
channel.gateway._sendlock = gevent.lock.Semaphore()
startmsg = self.make_start_message()
self._logger.debug("Channel start message: %s", startmsg)
self._send(channel, startmsg)
msg = channel.receive(connection_timeout)
self.start_response(msg)
return channel
def make_start_message(self):
"""Returns the message that should be sent to channel to initialise the remote exec.
Returns:
Start message.
"""
raise Exception("This class needs to be implemented in child classes.")
def start_response(self, msg):
"""Called with the response to sending start message to execnet channel
Args:
msg : Message received after starting channel.
"""
mtype = msg.get("msg", None)
if mtype is None or not mtype in ["READY", "ERROR"]:
self._logger.warning(
"Couldn't start channel, id='%s'",
self.channel_id
)
raise ChannelException(
"Couldn't create channel for channel_id: '%s', was expecting 'READY' got: %s"
% (self.channel_id, msg),
msg,
)
if mtype == "READY":
self.ready(msg)
elif mtype == "ERROR":
self.error(msg)
def ready(self, msg):
pass
def error(self, msg):
self._logger.warning(
"Couldn't start channel, id='%s': %s",
self.channel_id,
msg.get("reason", ""),
)
raise ChannelException(
"Couldn't create channel for channel_id: '%s', %s"
% (self.channel_id, msg.get("reason", "")),
msg,
)
@property
def channel_id(self):
return self._channel_id
def setcallback(self, callback):
if self._callback is None:
self._callback = ChannelCallback(callback)
self._channel.setcallback(self._callback)
else:
self._callback.callback = callback
def getcallback(self):
if self._callback is None:
return None
return self._callback.callback
callback = property(fget=getcallback, fset=setcallback)
def __iter__(self):
return self._channel
def __next__(self):
msg = next(self._channel)
self._logger.debug("_next, %s: %s", self.channel_id, msg)
return msg
def _send(self, ch, msg):
with ch.gateway._sendlock:
self._logger.debug("_send, %s: %s", self.channel_id, msg)
ch.send(msg)
def send(self, msg):
return self._send(self._channel, msg)
def __len__(self):
return 1
def close(self, error=None):
return self._channel.close(error)
def waitclose(self, timeout=None):
return self._channel.waitclose(timeout)
def isclosed(self):
return self._channel.isclosed()
class MultiChannel(object):
_logger = logging.getLogger("atsim.pro_fit._channel.MultiChannel")
def __init__(
self, execnet_gw, channel_factory, num_channels=1, channel_id=None
):
"""Factory class and container for managing multiple Download/UploadChannel instances.
This class implements a subset of the BaseChannel methods. Importantly, the send() method is not implemented.
To send a message, the client must first obtain a channel instance by iterating over this MultiChannel instance
(for instance, by calling next() ).
Args:
execnet_gw (execnet.Gateway): Gateway used to create execnet channels.
channel_factor (ChannelFactory): ChannelFactory that has `.createChannel(execnet_gw, channel_id)` returning new channel instances.
num_channels (int): Number of channels that should be created and managed.
channel_id (None, optional): Base channel id, this will be appended by the number of each channel managed by multichannel.
If `None` an ID will be automatically generated using `uuid.uuid4()`.
"""
if channel_id is None:
self._channel_id = str(uuid.uuid4())
else:
self._channel_id = channel_id
self._logger.info(
"Starting %d channels with base channel_id='%s'",
num_channels,
self._channel_id,
)
self._channels = self._start_channels(
execnet_gw, channel_factory, num_channels
)
self._iter = itertools.cycle(self._channels)
self._callback = None
def _start_channels(self, execnet_gw, channel_factory, num_channels):
channels = []
for i in range(num_channels):
chan_id = "_".join([str(self._channel_id), str(i)])
ch = channel_factory.createChannel(execnet_gw, chan_id)
channels.append(ch)
return channels
def __iter__(self):
return self._iter
def __next__(self):
return next(self._iter)
def setcallback(self, callback):
self._callback = callback
for ch in self._channels:
ch.setcallback(callback)
def getcallback(self):
return self._callback
callback = property(fget=getcallback, fset=setcallback)
def __len__(self):
return len(self._channels)
def waitclose(self, timeout=None):
for channel in self._channels:
channel.waitclose(timeout)
def broadcast(self, msg):
"""Send msg to all channels registered with MultiChannel"""
self._logger.debug(
"Broadcasting message to %d channels: %s", len(self), msg
)
for channel in self._channels:
channel.send(msg)
| true |
1fe6e6d14f2464fb5b550635afe66acf0beec88e | Python | Geokenny23/Basic-python-batch5-c | /Tugas-2/Soal-1.py | UTF-8 | 2,009 | 3.328125 | 3 | [] | no_license | semuakontak = []
kontak = []
def menu():
print("----menu---")
print("1. Daftar Kontak")
print("2. Tambah Kontak")
print("3. Keluar")
def tampilkankontak():
print("Daftar Kontak: ")
for kontak in semuakontak:
print("Nama : " + kontak["nama"])
print("No. Telepon : " + kontak["telpon"])
def tambahkontak():
nama = str(input("\nMasukkan Data\nNama : "))
telpon = str(input("No Telepon : "))
kontak = {
"nama" : nama,
"telpon" : telpon,
}
semuakontak.append(kontak)
print("Kontak berhasil ditambahkan")
print("Selamat datang!!!")
while True:
menu()
pilihan = int(input("Pilih menu: "))
if pilihan == 1:
tampilkankontak()
elif pilihan == 2:
tambahkontak()
elif pilihan == 3:
print("program selesai, sampai jumpa")
break
else:
print("menu tidak tersedia, silahkan menginput No. yang benar")
print("-------------------------------------------------------")
# print("Selamat datang!")
# while True:
# print("---Menu---")
# print("1. Daftar Kontak")
# print("2. Tambah Kontak")
# print("3. Keluar")
# pilih = int(input("Pilih menu: "))
# if pilih == 1:
# #print("Amal")
# #print("0834267588")
# Nama = {
# "nama" : "Amal"
# "No. telepon" : "0834267588"
# }
# }
# daftar_kontak = [Nama].append(x)
# print(daftar_kontak)
# #print(Nama)
# #print(Nomor)
# #print(x)
# #print(y)
# elif pilih == 2:
# x = str(input("Nama: "))
# y = int(input("No. Telepon: "))
# print("Kontak berhasil ditambahkan")
# elif pilih == 3:
# print("program selesai, sampai jumpa")
# break
# else:
# print("menu tidak tersedia, silahkan menginput No. yang benar")
# print("------------------------------------------------------")
| true |
5b07739d2b9e87f19b39db1cd5413c04f4f70fdd | Python | ehoversten/login_registration | /server.py | UTF-8 | 5,097 | 2.78125 | 3 | [] | no_license | from flask import Flask, request, redirect, render_template, session, flash
from flask_bcrypt import Bcrypt
# import the function connectToMySQL from the file mysqlconnection.py
from mysqlconnection import connectToMySQL
import re
# create a regular expression object that we can use run operations on
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = "Shh....It's a secret!"
bcrypt = Bcrypt(app)
mysql = connectToMySQL('register_db')
# print("all the users", mysql.query_db("SELECT * FROM users;"))
@app.route('/')
def index():
if 'id' not in session:
session['id'] = ''
if 'first_name' not in session:
session['name'] = ''
print(session)
return render_template("index.html", id=session['id'])
@app.route('/login', methods=['POST'])
def login():
if len(request.form['login_email']) < 1:
flash("please enter your email and password to login")
return redirect('/')
if len(request.form['login_password']) < 1:
flash("please enter your email and password to login")
return redirect('/')
login_email = request.form['login_email']
login_passwd = request.form['login_password']
# if len(login_email < 1):
# flash("please enter your email and password to login")
# return redirect('/')
# if len(login_passwd < 1):
# flash("please enter your email and password to login")
# return redirect('/')
# query = "SELECT * FROM users WHERE email = %(email)s;"
data = {
'email':login_email
}
#see if the username provided exists in the database
queryID = "SELECT * FROM users WHERE email = %(email)s;"
query_result = mysql.query_db(queryID, data)
# print('Results from query: ', query_result)
session['id'] = query_result[0]['id']
# print(session)
session['first'] = query_result[0]['first_name']
# login_result = mysql.query_db(queryID, data)
if query_result:
if bcrypt.check_password_hash(query_result[0]['password'], login_passwd):
# session['id'] = login_result[0]['id']
session['first'] = query_result[0]['first_name']
return redirect('/success')
flash("You could not be logged in")
return redirect('/')
@app.route('/process', methods=['POST'])
def validate():
error_flag = 0
# result = request.form
# print(result)
if len(request.form['first_name']) < 2:
flash('First Name field cannot be blank')
error_flag = 0
elif not request.form['first_name'].isalpha():
flash('Name fields cannot contain numbers')
error_flag = 0
# return redirect('/')
if len(request.form['last_name']) < 2:
flash('Last Name field cannot be blank')
error_flag = 0
elif not request.form['last_name'].isalpha():
flash('Name fields cannot contain numbers')
error_flag = 0
# return redirect('/')
if len(request.form['email']) < 1:
flash('Email field cannot be blank')
error_flag = 0
# return redirect('/')
elif not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address')
error_flag = 0
# return redirect('/')
if request.form['password'] != request.form['confirm']:
flash('Passwords must match to register!')
error_flag = 0
if error_flag == 0:
# include some logic to validate user input before adding them to the database!
# create the hash
pw_hash = bcrypt.generate_password_hash(request.form['password'])
# print(pw_hash)
query = "INSERT INTO users(first_name, last_name, email, password) VALUES (%(first_name)s, %(last_name)s, %(email)s, %(password_hash)s);"
data = {
'first_name':request.form['first_name'],
'last_name':request.form['last_name'],
'email':request.form['email'],
'password_hash':pw_hash
}
mysql.query_db(query, data)
# print("Data dict: ", data)
queryID = "SELECT * FROM users WHERE email = %(email)s;"
query_result = mysql.query_db(queryID, data)
# print('Results from query: ', query_result)
session['id'] = query_result[0]['id']
# print(session)
session['first'] = request.form['first_name']
return redirect('/success')
else:
return redirect('/')
@app.route('/success')
def success():
# result = request.form
# print(result)
flash("Success! You have been registered.")
all_users = mysql.query_db("SELECT * FROM users")
# print('Users: ', all_users)
return render_template('success.html', users=all_users)
@app.route('/logout', methods=['POST'])
def logout():
print("You have been logged out")
session.clear()
return redirect('/')
# def debugHelp(message = ""):
# print("\n\n-----------------------", message, "--------------------")
# print('REQUEST.FORM:', request.form)
# print('SESSION:', session)
if __name__=="__main__":
app.run(debug=True)
| true |
cc56ddebd98c3bc1db5a440d02376dfc42533095 | Python | purnima-git/content-dynamodb-datamodeling | /2-2-4-Hierarchical-Data/query.py | UTF-8 | 1,542 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python3
import boto3
from boto3.dynamodb.conditions import Key
table = boto3.resource("dynamodb").Table("TargetStores")
client = boto3.client("dynamodb")
# Get single store location
try:
store_number = "1760"
response = table.get_item(Key={"StoreNumber": store_number})
print(f">>> Get item by store number found:")
print(response["Item"])
except Exception as e:
print("Error getting item:")
print(e)
# Query by state
try:
response = table.query(
IndexName="Location-index", KeyConditionExpression=Key("State").eq("FL"),
)
print(f'\n>>> Query by state found {response["Count"]} locations:')
print(response["Items"])
except Exception as e:
print("Error running query:")
print(e)
# Query by city
try:
response = table.query(
IndexName="Location-index",
KeyConditionExpression=Key("State").eq("FL")
& Key("StateCityPostalcode").begins_with("FL#ORLANDO"),
)
print(f'\n>>> Query by city found {response["Count"]} locations:')
print(response["Items"])
except Exception as e:
print("Error running query:")
print(e)
# Query by postal code
try:
response = table.query(
IndexName="Location-index",
KeyConditionExpression=Key("State").eq("MN")
& Key("StateCityPostalcode").begins_with("MN#MINNEAPOLIS#55403"),
)
print(f'\n>>> Query by postal code found {response["Count"]} locations:')
print(response["Items"])
except Exception as e:
print("Error running query:")
print(e)
| true |
8a16997a8e141a1c5f2ceb07057eed29c798bed4 | Python | shreyanshu007/Crime-Analysis-BTP | /crawler/InsertionIntodatabase.py | UTF-8 | 2,231 | 3.171875 | 3 | [] | no_license | import pymysql
import datetime
# function to check the presence of a url in tabel
def IsUrlExists(url):
'''
TO check if URL exists in DB
input: url - url of website
'''
# print("Article: ", url)
connection = pymysql.connect('localhost', 'root', 'root', 'CRIME_ANALYSIS')
if all(ord(char) < 128 for char in url):
sql = 'SELECT NewsArticleUrl from NewsArticles WHERE NewsArticleUrl Like %s'
db = connection.cursor()
db.execute(sql, ('%' + url + '%',))
result = db.fetchall()
db.close()
connection.close()
if result:
return True
else:
return False
else:
return True
# function to convert string date to datetime format
def return_date(date):
'''
To convert date into DB input format
'''
connection = pymysql.connect('localhost', 'root', 'root', 'CRIME_ANALYSIS')
tokens = date.split('T')
tokens2 = tokens[1].split("+")
date = tokens[0] + " " + tokens2[0]
sql = "SELECT DATE_FORMAT(%s, '%%y-%%m-%%d %%h:%%i:%%s') as DATETIME;"
db = connection.cursor()
db.execute(sql, (date,))
result = db.fetchall()
db.close()
connection.close()
for res in result:
print(res)
print(res[0])
return res[0]
# function to enter data into database.
def InsertIntoDatabase(date, url, title, text, location):
new_date = date
if date:
new_date = return_date(date)
else:
new_date = datetime.datetime.now()
connection = pymysql.connect('localhost', 'root', 'root', 'CRIME_ANALYSIS')
sql = "INSERT INTO NewsArticles(NewsArticleTitle, NewsArticleText, NewsArticleDate, NewsArticleUrl, Location) values(%s, %s, %s, %s, %s)"
try:
db = connection.cursor()
print("date: ", new_date)
if db.execute(sql, (title, text, new_date, url, location,)):
rowId = connection.insert_id()
print("Row id: ", rowId)
connection.commit()
db.close()
connection.close()
except Exception as e:
# print(new_date)
print(e)
connection.rollback()
connection.close()
| true |
82f0d07c3f1c6e376a6e241146fcc60fe762c750 | Python | weisonyoung/ex_dataclass | /ex_dataclass/xpack.py | UTF-8 | 2,198 | 2.75 | 3 | [
"MIT"
] | permissive | """
Ex Dataclass X-Pack
The extend tools for ex_dataclass
1. json loads
2. asdict
3. argument signature
"""
import copy
import json
import typing
from ex_dataclass.type_ import Field_
from . import m
# transfer function type
def asdict_xxxFieldName(value: typing.Any) -> m.F_VALUE:
pass
asdict_func_type = asdict_xxxFieldName
def asdict(obj, *, dict_factory=dict):
if not m.is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return __asdict_inner(obj, dict_factory)
def __asdict_inner(obj, dict_factory):
if m.is_dataclass_instance(obj):
result = []
for f in m.fields(obj):
asdict_fn: asdict_func_type = getattr(obj, f"{m.AsditFuncPrefix}_{f.name}", None)
if asdict_fn:
value = asdict_fn(getattr(obj, f.name, None))
else:
value = __asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, list):
return type(obj)(__asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((__asdict_inner(k, dict_factory),
__asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
class EXpack:
# identification
__ex_pack_field__ = m.EXPackField
# reduce memory usage
__slots__ = ['fields', 'ex_debug']
def __init__(self, *args, **kwargs):
self.fields: typing.Dict[m.F_NAME, Field_] = {}
self.ex_debug = False
def _set_properties(self, fields: typing.Dict[m.F_NAME, Field_] = None) -> 'EXpack':
self.fields = fields
return self
def _with_debug(self, debug: bool) -> 'EXpack':
self.ex_debug = debug
return self
def asdict(self) -> typing.Dict:
return asdict(self)
@classmethod
def json_loads(cls, data: str):
return cls(**json.loads(data))
def json_dumps(self) -> str:
return json.dumps(asdict(self))
def pprint(self):
import pprint
pprint.pprint(asdict(self))
| true |
841bc3d8a9045cf160e676362d93817cf1897e67 | Python | pferreira101/SDB-Zulip_Deployment | /users.py | UTF-8 | 485 | 2.921875 | 3 | [] | no_license | import sys
import csv
import random
num_users = int(sys.argv[1])
row_list = [["EMAIL","PASSWORD","PRIVATE_TO"]]
for i in range(num_users):
randNum = random.randint(1,num_users)
while randNum == i:
randNum = random.randint(1,num_users)
row = ["user"+str(i+1)+"@email.com","exemplo","user"+str(randNum)+"@email.com"]
row_list.append(row)
with open(str(num_users)+'users.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(row_list)
| true |
2f0498934a75c65854903615aa4bce7db34b8472 | Python | PatrikYu/MLiA | /MLiA/MLiA_classification/treePlotter.py | UTF-8 | 6,931 | 3.359375 | 3 | [] | no_license | # coding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8') #python的str默认是ascii编码,和unicode编码冲突,需要加上这几句
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14) # 使坐标轴能显示中文
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 使plotNode能显示中文,注意'中文'前要加U
# 在python中使用Matplotlib注释绘制树形图
# matplotlib提供了一个非常有用的注释工具annotations,它可以在数据图形上添加文本注解
# 1.使用文本注解绘制树节点
import matplotlib.pyplot as plt
# 定义作图属性
# 用字典来定义决策树决策结果的属性,下面的字典定义也可写作 decisionNode={boxstyle:'sawtooth',fc:'0.8'}
decisionNode = dict(boxstyle="sawtooth", fc="0.8") # 决策节点
# boxstyle为文本框的类型,sawtooth是锯齿形,fc是边框线粗细
leafNode = dict(boxstyle="round4", fc="0.8") # 叶节点
arrow_args = dict(arrowstyle="<-") # 箭头形状
def plotNode(nodeTxt, centerPt, parentPt, nodeType): # 子函数:画线并标注
# nodeTxt为要显示的文本,centerPt为文本的中心点,即箭头所在的点,parentPt为指向文本的点,nodeType为文本类型
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )
# 第一个参数是注释的内容,xy设置箭头尖的坐标,xytext设置注释内容显示的起始位置,arrowprops 用来设置箭头
# axes fraction:轴分数,annotate是关于一个数据点的文本
def createPlot():
fig = plt.figure(1,facecolor='white') # 定义一个画布,背景为白色
fig.clf() # 把画布清空
# createPlot.ax1为全局变量,绘制图像的句柄,subplot为定义了一个绘图,
createPlot.ax1 = plt.subplot(111,frameon=False)
# 111表示figure中的图有1行1列,即1个,最后的1代表第一个图,frameon表示是否绘制坐标轴矩形
plotNode(U'决策节点 ' ,(0.5,0.1),(0.1,0.5),decisionNode)
plotNode(U'叶节点',(0.8,0.1),(0.3,0.8),leafNode)
plt.show()
createPlot()
# 精细作图
# {'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}}
# 我们必须知道有多少个叶节点,用于确定x轴的长度,还得知道树的层数,用于确定y轴的高度
def getNumLeafs(myTree): # 获取叶节点数目
numLeafs=0
firstStr=myTree.keys()[0] # 第一个键即第一个节点,'no surfacing'
secondDict=myTree[firstStr] # 这个键key的值value,即该节点的所有子树
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict': # 如果secondDict[key]是个字典,即该节点下面还有子树,说明这是个决策节点
numLeafs += getNumLeafs(secondDict[key]) # 递归,看看这个决策节点下有几个叶节点
else: numLeafs += 1 # 是叶节点,自加1
return numLeafs
def getTreeDepth(myTree): # 确定树的层数,即决策节点的个数+1
maxDepth=0
firstStr=myTree.keys()[0]
secondDict=myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
thisDepth = 1+getTreeDepth(secondDict[key])
else: thisDepth=1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
# 为了节省时间,函数 retrieveTree输出预先存储的树信息
def retrieveTree(i):
listOfTrees=[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0:{ 'head':{0:'no', 1: 'yes'}},1:'no'}}}}]
return listOfTrees[i]
print getNumLeafs(retrieveTree(1))
def createPlot(inTree): # 这是主函数,首先阅读它
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])# 定义横纵坐标轴
#createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) # 绘制图像,无边框,无坐标轴
createPlot.ax1 = plt.subplot(111, frameon=False) # 无边框,有坐标轴
# 注意图形的大小是0-1 ,0-1,例如绘制3个叶子结点,最佳坐标应为1/3,2/3,3/3
plotTree.totalW = float(getNumLeafs(inTree)) #全局变量totalW(树的宽度)=叶子数
# 树的宽度用于计算放置决策(判断)节点的位置,原则是将它放在所有叶节点的中间
plotTree.totalD = float(getTreeDepth(inTree)) #全局变量 树的高度 = 深度
# 同时我们用两个全局变量plotTree.xoff和plotTree.yoff追踪已经绘制的节点位置,以及放置下一个节点的合适位置
plotTree.xOff = -0.5/plotTree.totalW; # 向左移半格
#但这样会使整个图形偏右因此初始的,将x值向左移一点。
plotTree.yOff = 1.0; # 最高点,(0.5,1.0)为第一个点的位置
plotTree(inTree, (0.5,1.0), '') # 调用plotTree子函数,并将初始树和起点坐标传入
plt.show()
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree) # 当前树的叶子数
depth = getTreeDepth(myTree) # 深度,函数中没用到
firstStr = myTree.keys()[0] # 第一个节点
# cntrPt文本中心点 parentPt 指向文本中心的点
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff) # 定位到中间位置,不太清楚
plotMidText(cntrPt, parentPt, nodeTxt) # 画分支上的键:在父子节点之间填充文本信息
plotNode(firstStr, cntrPt, parentPt, decisionNode) # 画决策节点
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD # 从上往下画
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':# 如果是字典则是一个决策(判断)结点
plotTree(secondDict[key],cntrPt,str(key)) # 继续递归
else: # 打印叶子结点
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode) # 画叶节点
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key)) # 在父子节点之间填充文本信息
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD # 重新确定下一个节点的纵坐标
def plotMidText(cntrPt,parentPt,txtString): # 在父子节点之间填充文本信息
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0] # 得到中间位置
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid,yMid,txtString,va="center",ha="center",rotation=30)
createPlot(retrieveTree(0))
myTree=retrieveTree(0) # 得先赋值再改
myTree['no surfacing'][3]='maybe'
createPlot(myTree)
| true |
482953452fbcea6bf072d03b289eaa9bc1236220 | Python | IndiaCFG3/team-52 | /modals/Student.py | UTF-8 | 299 | 2.859375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | class Student:
def __init__(self, student_id, name, teacher_id):
self.student_id = student_id
self.name = name
self.teacher_id = teacher_id
class StudentScore:
def __init__(self, student_id, score):
self.student_id = student_id
self.score = score
| true |
2dc42b1cab0130a6f584226854bc6909c3c36490 | Python | immanishbainsla/manit_workshop | /Day-2/Face_Recognition.py | UTF-8 | 2,271 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import matplotlib.pyplot as plt
import numpy as np
# In[2]:
# Mapping between names & labels
idx2name = {
}
files = os.listdir()
pics = []
Y = []
cnt = 0
for f in files:
if f.endswith(".npy"):
data = np.load(f)
labels = np.ones(data.shape[0],dtype='int32')*cnt
pics.append(data)
idx2name[cnt] = f[:-4]
cnt += 1
Y.append(labels)
# In[3]:
X = np.vstack(pics)
print(X.shape)
# In[4]:
Y = np.asarray(Y)
Y = Y.reshape((40,))
Y.shape
# In[5]:
X.shape,Y.shape
# In[6]:
idx2name
# In[7]:
def dist(a,b):
return np.sum((a-b)**2)**.5
def knn(X,Y,test_point,k=5):
# 1 Step - Find dist of test_point from all points
d = []
m = X.shape[0]
for i in range(m):
current_dis = dist(X[i],test_point)
d.append((current_dis,Y[i]))
# Sort
d.sort()
# Take the first k elements after sorting (slicing)
d = np.array(d[0:k])
d = d[:,1]
uniq,occ = np.unique(d,return_counts=True)
#print(uniq,occ)
idx = np.argmax(occ)
pred = uniq[idx]
return idx2name[int(pred)]
# In[8]:
#test_point = X[5]
# In[9]:
import cv2
import numpy as np
camera = cv2.VideoCapture(0)
facedetector = cv2.CascadeClassifier('../Day-1/face_template.xml')
while True:
b,img = camera.read()
if b==False:
continue
# Detect Faces
faces = facedetector.detectMultiScale(img,1.2,5)
# No face is detected
if(len(faces)==0):
continue
# Draw bounding box around each face
for f in faces:
x,y,w,h = f
green = (0,255,0)
cv2.rectangle(img,(x,y),(x+w,y+h),green,5)
# Get the Pred for Cropped Face
cropped_face = img[y:y+h,x:x+w]
cropped_face = cv2.resize(cropped_face,(100,100))
pred = knn(X,Y,cropped_face)
cv2.putText(img, pred, (x,y-20), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
# Show the New Image
cv2.imshow("Title",img)
#Add some delay 1 ms between 2 frames
key = cv2.waitKey(1)&0xFF
if key==ord('q'):
break
camera.release()
cv2.destroyAllWindows()
# In[ ]:
# In[ ]:
| true |
580bb65166c15098b660e359f35a6f35a4602909 | Python | Jill1627/Artificial-intelligence-projects | /spam_filter.py | UTF-8 | 4,045 | 3.328125 | 3 | [] | no_license | """
Implement a basic spam filter using Naive Bayes Classification
"""
import email
import math
import os
import heapq
from collections import defaultdict
from collections import Counter
############################################################
# Section 1: Spam Filter
############################################################
def load_tokens(email_path):
tokens = list()
with open(email_path) as email_file:
email_msg = email.message_from_file(email_file)
line_iter = email.iterators.body_line_iterator(email_msg)
for line in line_iter:
tokens.extend(line.split())
return tokens
def log_probs(email_paths, smoothing):
total_counter = Counter()
for path in email_paths:
tokens = load_tokens(path)
word_counter = Counter(tokens)
total_counter.update(word_counter)
total_distinct = len(total_counter)
total_count = sum(total_counter.values())
unk_prob = calc_unk_prob(total_count, total_distinct, smoothing)
prob_lookup = defaultdict(lambda: unk_prob)
for word in total_counter:
word_count = total_counter[word]
log_prob = calc_log_prob(word_count, total_count, total_distinct, smoothing)
prob_lookup[word] = log_prob
return prob_lookup
def calc_log_prob(word_count, all_count, vocab_distinct, smoothing):
return math.log((word_count + smoothing) / ((all_count) + smoothing * (vocab_distinct + 1)))
def calc_unk_prob(all_count, vocab_distinct, smoothing):
return math.log((smoothing) / ((all_count) + smoothing * (vocab_distinct + 1)))
class SpamFilter(object):
def __init__(self, spam_dir, ham_dir, smoothing):
spam_paths = [spam_dir + '/' + fname for fname in os.listdir(spam_dir)]
num_spams = len(spam_paths)
ham_paths = [ham_dir + '/' + fname for fname in os.listdir(ham_dir)]
num_hams = len(ham_paths)
self.spam_prob_dict = log_probs(spam_paths, smoothing)
self.ham_prob_dict = log_probs(ham_paths, smoothing)
self.prob_is_spam = 1.0 * num_spams / (num_spams + num_hams)
self.prob_not_spam = 1.0 * num_hams / (num_spams + num_hams)
def is_spam(self, email_path):
tokens = load_tokens(email_path)
word_counter = Counter(tokens)
sum_spam_log_probs = 0
sum_ham_log_probs = 0
for word in word_counter.keys():
count_w = word_counter[word]
sum_spam_log_probs += count_w * self.spam_prob_dict[word]
sum_ham_log_probs += count_w * self.ham_prob_dict[word]
c_spam = math.log(self.prob_is_spam) + sum_spam_log_probs
c_ham = math.log(self.prob_not_spam) + sum_ham_log_probs
return True if c_spam > c_ham else False
def most_indicative_spam(self, n):
mutual_words = set(self.spam_prob_dict.keys()).intersection(set(self.ham_prob_dict.keys()))
word_indication = list()
for word in mutual_words:
p_w = math.exp(self.spam_prob_dict[word]) * self.prob_is_spam \
+ math.exp(self.ham_prob_dict[word]) * self.prob_not_spam
indication = self.spam_prob_dict[word] - math.log(p_w)
word_indication.append((word, indication))
nlarge = heapq.nlargest(n, word_indication, key = lambda i : i[1])
return [tup[0] for tup in nlarge]
def most_indicative_ham(self, n):
mutual_words = set(self.spam_prob_dict.keys()).intersection(set(self.ham_prob_dict.keys()))
word_indication = list()
for word in mutual_words:
p_w = math.exp(self.spam_prob_dict[word]) * self.prob_is_spam \
+ math.exp(self.ham_prob_dict[word]) * self.prob_not_spam
indication = self.ham_prob_dict[word] - math.log(p_w)
word_indication.append((word, indication))
nlarge = heapq.nlargest(n, word_indication, key = lambda i : i[1])
return [tup[0] for tup in nlarge]
| true |
d63a928d12c97866216bd213acb689fddf997eee | Python | motormanalpha/Datalogger | /takedata_github.py | UTF-8 | 1,841 | 3.015625 | 3 | [] | no_license | #
# Matt Schultz
# 2-18-2019
# First try with python code to take regular data from 34970a datalogger.
# Set your options here in the code (to keep it as simiple as possible).
# Import the csv file into "Excel" or "Calc" to graph and manipulate data.
#
import visa
import time
delay = 1 # Number of seconds between scans
loops = 5 # Number of times to do the loop
myfile = open('mydata.csv','a') # mydata.csv will be the output file name
rm = visa.ResourceManager()
daq = rm.open_resource('ASRL/dev/ttyUSB0::INSTR') #Change this to USB0 or 1 or 2... as needed
myfile.write(daq.query("SYST:DATE?")) # puts date stamp in the output file from the logger
myfile.write(daq.query("SYST:TIME?")) # puts time stamp in the output file from the logger
print(daq.query("*IDN?")) # Show logger information in the terminal
print('Scan list includes...')
#Choose some or none of the following scan lists:
#daq.write("CONF:VOLT:DC 10,0.001,(@101:110)") # Proven works, try first.
daq.write("CONF:TEMP THER,10000,1,0.01,(@111:116)")# Proven works, include daq.write("UNIT:...)
daq.write("UNIT:TEMP F,(@111:116)")
#daq.write("CONF:RES AUTO,DEF,(@111:116)") # Proven works.
#daq.write("CONF:VOLT:DC 10,0.001,(@101:110)") # Change and make your own
#daq.write("CONF:VOLT:DC 10,0.001,(@101:110)") # Change and make your own
#
print(daq.query("ROUT:SCAN?")) # show scan list in the terminal
print(daq.query("UNIT:TEMP?")) # shows temp units in the terminal (F,C,or K) if needed.
print('Delay between scans is', delay, 'seconds')
for i in range(loops):
myfile.write(daq.query("READ?")) #Sends line of data to csv file
time.sleep(delay) #seconds to sleep between scans
print('Scanned',i+1,'of', loops) # Countdown on terminal, if you ^C out the previous data should still be in the file.
myfile.close()
print ('Finished with scans')
#END
| true |
a8d1e7d68386245af0de01a697145c1114131a74 | Python | mango0713/Python | /20191217 - 소수판별.py | UTF-8 | 238 | 3.5 | 4 | [] | no_license | x = int(input(" input number :"))
a = 2
while a <= x :
if x % a ==0 :
break
a = a + 1
if a == x :
print ("yes, it is prim number")
else:
print("no, it is not prim number")
| true |
0ef6480a48314bf433959f42625dd95c1ca4197f | Python | invoke-ai/InvokeAI | /invokeai/backend/model_management/model_manager.py | UTF-8 | 42,984 | 2.984375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """This module manages the InvokeAI `models.yaml` file, mapping
symbolic diffusers model names to the paths and repo_ids used by the
underlying `from_pretrained()` call.
SYNOPSIS:
mgr = ModelManager('/home/phi/invokeai/configs/models.yaml')
sd1_5 = mgr.get_model('stable-diffusion-v1-5',
model_type=ModelType.Main,
base_model=BaseModelType.StableDiffusion1,
submodel_type=SubModelType.Unet)
with sd1_5 as unet:
run_some_inference(unet)
FETCHING MODELS:
Models are described using four attributes:
1) model_name -- the symbolic name for the model
2) ModelType -- an enum describing the type of the model. Currently
defined types are:
ModelType.Main -- a full model capable of generating images
ModelType.Vae -- a VAE model
ModelType.Lora -- a LoRA or LyCORIS fine-tune
ModelType.TextualInversion -- a textual inversion embedding
ModelType.ControlNet -- a ControlNet model
3) BaseModelType -- an enum indicating the stable diffusion base model, one of:
BaseModelType.StableDiffusion1
BaseModelType.StableDiffusion2
4) SubModelType (optional) -- an enum that refers to one of the submodels contained
within the main model. Values are:
SubModelType.UNet
SubModelType.TextEncoder
SubModelType.Tokenizer
SubModelType.Scheduler
SubModelType.SafetyChecker
To fetch a model, use `manager.get_model()`. This takes the symbolic
name of the model, the ModelType, the BaseModelType and the
SubModelType. The latter is required for ModelType.Main.
get_model() will return a ModelInfo object that can then be used in
context to retrieve the model and move it into GPU VRAM (on GPU
systems).
A typical example is:
sd1_5 = mgr.get_model('stable-diffusion-v1-5',
model_type=ModelType.Main,
base_model=BaseModelType.StableDiffusion1,
submodel_type=SubModelType.UNet)
with sd1_5 as unet:
run_some_inference(unet)
The ModelInfo object provides a number of useful fields describing the
model, including:
name -- symbolic name of the model
base_model -- base model (BaseModelType)
type -- model type (ModelType)
location -- path to the model file
precision -- torch precision of the model
hash -- unique sha256 checksum for this model
SUBMODELS:
When fetching a main model, you must specify the submodel. Retrieval
of full pipelines is not supported.
vae_info = mgr.get_model('stable-diffusion-1.5',
model_type = ModelType.Main,
base_model = BaseModelType.StableDiffusion1,
submodel_type = SubModelType.Vae
)
with vae_info as vae:
do_something(vae)
This rule does not apply to controlnets, embeddings, loras and standalone
VAEs, which do not have submodels.
LISTING MODELS
The model_names() method will return a list of Tuples describing each
model it knows about:
>> mgr.model_names()
[
('stable-diffusion-1.5', <BaseModelType.StableDiffusion1: 'sd-1'>, <ModelType.Main: 'main'>),
('stable-diffusion-2.1', <BaseModelType.StableDiffusion2: 'sd-2'>, <ModelType.Main: 'main'>),
('inpaint', <BaseModelType.StableDiffusion1: 'sd-1'>, <ModelType.ControlNet: 'controlnet'>)
('Ink scenery', <BaseModelType.StableDiffusion1: 'sd-1'>, <ModelType.Lora: 'lora'>)
...
]
The tuple is in the correct order to pass to get_model():
for m in mgr.model_names():
info = get_model(*m)
In contrast, the list_models() method returns a list of dicts, each
providing information about a model defined in models.yaml. For example:
>>> models = mgr.list_models()
>>> json.dumps(models[0])
{"path": "/home/lstein/invokeai-main/models/sd-1/controlnet/canny",
"model_format": "diffusers",
"name": "canny",
"base_model": "sd-1",
"type": "controlnet"
}
You can filter by model type and base model as shown here:
controlnets = mgr.list_models(model_type=ModelType.ControlNet,
base_model=BaseModelType.StableDiffusion1)
for c in controlnets:
name = c['name']
format = c['model_format']
path = c['path']
type = c['type']
# etc
ADDING AND REMOVING MODELS
At startup time, the `models` directory will be scanned for
checkpoints, diffusers pipelines, controlnets, LoRAs and TI
embeddings. New entries will be added to the model manager and defunct
ones removed. Anything that is a main model (ModelType.Main) will be
added to models.yaml. For scanning to succeed, files need to be in
their proper places. For example, a controlnet folder built on the
stable diffusion 2 base, will need to be placed in
`models/sd-2/controlnet`.
Layout of the `models` directory:
models
├── sd-1
│ ├── controlnet
│ ├── lora
│ ├── main
│ └── embedding
├── sd-2
│ ├── controlnet
│ ├── lora
│ ├── main
│ └── embedding
└── core
├── face_reconstruction
│ ├── codeformer
│ └── gfpgan
├── sd-conversion
│ ├── clip-vit-large-patch14 - tokenizer, text_encoder subdirs
│ ├── stable-diffusion-2 - tokenizer, text_encoder subdirs
│ └── stable-diffusion-safety-checker
└── upscaling
└─── esrgan
class ConfigMeta(BaseModel):Loras, textual_inversion and controlnet models are not listed
explicitly in models.yaml, but are added to the in-memory data
structure at initialization time by scanning the models directory. The
in-memory data structure can be resynchronized by calling
`manager.scan_models_directory()`.
Files and folders placed inside the `autoimport` paths (paths
defined in `invokeai.yaml`) will also be scanned for new models at
initialization time and added to `models.yaml`. Files will not be
moved from this location but preserved in-place. These directories
are:
configuration default description
------------- ------- -----------
autoimport_dir autoimport/main main models
lora_dir autoimport/lora LoRA/LyCORIS models
embedding_dir autoimport/embedding TI embeddings
controlnet_dir autoimport/controlnet ControlNet models
In actuality, models located in any of these directories are scanned
to determine their type, so it isn't strictly necessary to organize
the different types in this way. This entry in `invokeai.yaml` will
recursively scan all subdirectories within `autoimport`, scan models
files it finds, and import them if recognized.
Paths:
autoimport_dir: autoimport
A model can be manually added using `add_model()` using the model's
name, base model, type and a dict of model attributes. See
`invokeai/backend/model_management/models` for the attributes required
by each model type.
A model can be deleted using `del_model()`, providing the same
identifying information as `get_model()`
The `heuristic_import()` method will take a set of strings
corresponding to local paths, remote URLs, and repo_ids, probe the
object to determine what type of model it is (if any), and import new
models into the manager. If passed a directory, it will recursively
scan it for models to import. The return value is a set of the models
successfully added.
MODELS.YAML
The general format of a models.yaml section is:
type-of-model/name-of-model:
path: /path/to/local/file/or/directory
description: a description
format: diffusers|checkpoint
variant: normal|inpaint|depth
The type of model is given in the stanza key, and is one of
{main, vae, lora, controlnet, textual}
The format indicates whether the model is organized as a diffusers
folder with model subdirectories, or is contained in a single
checkpoint or safetensors file.
The path points to a file or directory on disk. If a relative path,
the root is the InvokeAI ROOTDIR.
"""
from __future__ import annotations
import hashlib
import os
import textwrap
import types
from dataclasses import dataclass
from pathlib import Path
from shutil import rmtree, move
from typing import Optional, List, Literal, Tuple, Union, Dict, Set, Callable
import torch
import yaml
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from pydantic import BaseModel, Field
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util import CUDA_DEVICE, Chdir
from .model_cache import ModelCache, ModelLocker
from .model_search import ModelSearch
from .models import (
BaseModelType,
ModelType,
SubModelType,
ModelError,
SchedulerPredictionType,
MODEL_CLASSES,
ModelConfigBase,
ModelNotFoundException,
InvalidModelException,
DuplicateModelException,
ModelBase,
)
# We are only starting to number the config file with release 3.
# The config file version doesn't have to start at release version, but it will help
# reduce confusion.
CONFIG_FILE_VERSION = "3.0.0"
@dataclass
class ModelInfo:
context: ModelLocker
name: str
base_model: BaseModelType
type: ModelType
hash: str
location: Union[Path, str]
precision: torch.dtype
_cache: Optional[ModelCache] = None
def __enter__(self):
return self.context.__enter__()
def __exit__(self, *args, **kwargs):
self.context.__exit__(*args, **kwargs)
class AddModelResult(BaseModel):
name: str = Field(description="The name of the model after installation")
model_type: ModelType = Field(description="The type of model")
base_model: BaseModelType = Field(description="The base model")
config: ModelConfigBase = Field(description="The configuration of the model")
MAX_CACHE_SIZE = 6.0 # GB
class ConfigMeta(BaseModel):
version: str
class ModelManager(object):
"""
High-level interface to model management.
"""
logger: types.ModuleType = logger
def __init__(
self,
config: Union[Path, DictConfig, str],
device_type: torch.device = CUDA_DEVICE,
precision: torch.dtype = torch.float16,
max_cache_size=MAX_CACHE_SIZE,
sequential_offload=False,
logger: types.ModuleType = logger,
):
"""
Initialize with the path to the models.yaml config file.
Optional parameters are the torch device type, precision, max_models,
and sequential_offload boolean. Note that the default device
type and precision are set up for a CUDA system running at half precision.
"""
self.config_path = None
if isinstance(config, (str, Path)):
self.config_path = Path(config)
if not self.config_path.exists():
logger.warning(f"The file {self.config_path} was not found. Initializing a new file")
self.initialize_model_config(self.config_path)
config = OmegaConf.load(self.config_path)
elif not isinstance(config, DictConfig):
raise ValueError("config argument must be an OmegaConf object, a Path or a string")
self.config_meta = ConfigMeta(**config.pop("__metadata__"))
# TODO: metadata not found
# TODO: version check
self.app_config = InvokeAIAppConfig.get_config()
self.logger = logger
self.cache = ModelCache(
max_cache_size=max_cache_size,
max_vram_cache_size=self.app_config.vram_cache_size,
lazy_offloading=self.app_config.lazy_offload,
execution_device=device_type,
precision=precision,
sequential_offload=sequential_offload,
logger=logger,
)
self._read_models(config)
def _read_models(self, config: Optional[DictConfig] = None):
if not config:
if self.config_path:
config = OmegaConf.load(self.config_path)
else:
return
self.models = dict()
for model_key, model_config in config.items():
if model_key.startswith("_"):
continue
model_name, base_model, model_type = self.parse_key(model_key)
model_class = self._get_implementation(base_model, model_type)
# alias for config file
model_config["model_format"] = model_config.pop("format")
self.models[model_key] = model_class.create_config(**model_config)
# check config version number and update on disk/RAM if necessary
self.cache_keys = dict()
# add controlnet, lora and textual_inversion models from disk
self.scan_models_directory()
def sync_to_config(self):
"""
Call this when `models.yaml` has been changed externally.
This will reinitialize internal data structures
"""
# Reread models directory; note that this will reinitialize the cache,
# causing otherwise unreferenced models to be removed from memory
self._read_models()
def model_exists(self, model_name: str, base_model: BaseModelType, model_type: ModelType, *, rescan=False) -> bool:
"""
Given a model name, returns True if it is a valid identifier.
:param model_name: symbolic name of the model in models.yaml
:param model_type: ModelType enum indicating the type of model to return
:param base_model: BaseModelType enum indicating the base model used by this model
:param rescan: if True, scan_models_directory
"""
model_key = self.create_key(model_name, base_model, model_type)
exists = model_key in self.models
# if model not found try to find it (maybe file just pasted)
if rescan and not exists:
self.scan_models_directory(base_model=base_model, model_type=model_type)
exists = self.model_exists(model_name, base_model, model_type, rescan=False)
return exists
@classmethod
def create_key(
cls,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
) -> str:
# In 3.11, the behavior of (str,enum) when interpolated into a
# string has changed. The next two lines are defensive.
base_model = BaseModelType(base_model)
model_type = ModelType(model_type)
return f"{base_model.value}/{model_type.value}/{model_name}"
@classmethod
def parse_key(cls, model_key: str) -> Tuple[str, BaseModelType, ModelType]:
base_model_str, model_type_str, model_name = model_key.split("/", 2)
try:
model_type = ModelType(model_type_str)
except Exception:
raise Exception(f"Unknown model type: {model_type_str}")
try:
base_model = BaseModelType(base_model_str)
except Exception:
raise Exception(f"Unknown base model: {base_model_str}")
return (model_name, base_model, model_type)
def _get_model_cache_path(self, model_path):
return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest())
@classmethod
def initialize_model_config(cls, config_path: Path):
"""Create empty config file"""
with open(config_path, "w") as yaml_file:
yaml_file.write(yaml.dump({"__metadata__": {"version": "3.0.0"}}))
def get_model(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
submodel_type: Optional[SubModelType] = None,
) -> ModelInfo:
"""Given a model named identified in models.yaml, return
an ModelInfo object describing it.
:param model_name: symbolic name of the model in models.yaml
:param model_type: ModelType enum indicating the type of model to return
:param base_model: BaseModelType enum indicating the base model used by this model
:param submodel_type: an ModelType enum indicating the portion of
the model to retrieve (e.g. ModelType.Vae)
"""
model_key = self.create_key(model_name, base_model, model_type)
if not self.model_exists(model_name, base_model, model_type, rescan=True):
raise ModelNotFoundException(f"Model not found - {model_key}")
model_config = self._get_model_config(base_model, model_name, model_type)
model_path, is_submodel_override = self._get_model_path(model_config, submodel_type)
if is_submodel_override:
model_type = submodel_type
submodel_type = None
model_class = self._get_implementation(base_model, model_type)
if not model_path.exists():
if model_class.save_to_config:
self.models[model_key].error = ModelError.NotFound
raise Exception(f'Files for model "{model_key}" not found at {model_path}')
else:
self.models.pop(model_key, None)
raise ModelNotFoundException(f'Files for model "{model_key}" not found at {model_path}')
# TODO: path
# TODO: is it accurate to use path as id
dst_convert_path = self._get_model_cache_path(model_path)
model_path = model_class.convert_if_required(
base_model=base_model,
model_path=str(model_path), # TODO: refactor str/Path types logic
output_path=dst_convert_path,
config=model_config,
)
model_context = self.cache.get_model(
model_path=model_path,
model_class=model_class,
base_model=base_model,
model_type=model_type,
submodel=submodel_type,
)
if model_key not in self.cache_keys:
self.cache_keys[model_key] = set()
self.cache_keys[model_key].add(model_context.key)
model_hash = "<NO_HASH>" # TODO:
return ModelInfo(
context=model_context,
name=model_name,
base_model=base_model,
type=submodel_type or model_type,
hash=model_hash,
location=model_path, # TODO:
precision=self.cache.precision,
_cache=self.cache,
)
def _get_model_path(
self, model_config: ModelConfigBase, submodel_type: Optional[SubModelType] = None
) -> (Path, bool):
"""Extract a model's filesystem path from its config.
:return: The fully qualified Path of the module (or submodule).
"""
model_path = model_config.path
is_submodel_override = False
# Does the config explicitly override the submodel?
if submodel_type is not None and hasattr(model_config, submodel_type):
submodel_path = getattr(model_config, submodel_type)
if submodel_path is not None and len(submodel_path) > 0:
model_path = getattr(model_config, submodel_type)
is_submodel_override = True
model_path = self.resolve_model_path(model_path)
return model_path, is_submodel_override
def _get_model_config(self, base_model: BaseModelType, model_name: str, model_type: ModelType) -> ModelConfigBase:
"""Get a model's config object."""
model_key = self.create_key(model_name, base_model, model_type)
try:
model_config = self.models[model_key]
except KeyError:
raise ModelNotFoundException(f"Model not found - {model_key}")
return model_config
def _get_implementation(self, base_model: BaseModelType, model_type: ModelType) -> type[ModelBase]:
"""Get the concrete implementation class for a specific model type."""
model_class = MODEL_CLASSES[base_model][model_type]
return model_class
def _instantiate(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
submodel_type: Optional[SubModelType] = None,
) -> ModelBase:
"""Make a new instance of this model, without loading it."""
model_config = self._get_model_config(base_model, model_name, model_type)
model_path, is_submodel_override = self._get_model_path(model_config, submodel_type)
# FIXME: do non-overriden submodels get the right class?
constructor = self._get_implementation(base_model, model_type)
instance = constructor(model_path, base_model, model_type)
return instance
def model_info(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
) -> Union[dict, None]:
"""
Given a model name returns the OmegaConf (dict-like) object describing it.
"""
model_key = self.create_key(model_name, base_model, model_type)
if model_key in self.models:
return self.models[model_key].dict(exclude_defaults=True)
else:
return None # TODO: None or empty dict on not found
def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]:
"""
Return a list of (str, BaseModelType, ModelType) corresponding to all models
known to the configuration.
"""
return [(self.parse_key(x)) for x in self.models.keys()]
def list_model(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
) -> Union[dict, None]:
"""
Returns a dict describing one installed model, using
the combined format of the list_models() method.
"""
models = self.list_models(base_model, model_type, model_name)
if len(models) >= 1:
return models[0]
else:
return None
def list_models(
self,
base_model: Optional[BaseModelType] = None,
model_type: Optional[ModelType] = None,
model_name: Optional[str] = None,
) -> list[dict]:
"""
Return a list of models.
"""
model_keys = (
[self.create_key(model_name, base_model, model_type)]
if model_name and base_model and model_type
else sorted(self.models, key=str.casefold)
)
models = []
for model_key in model_keys:
model_config = self.models.get(model_key)
if not model_config:
self.logger.error(f"Unknown model {model_name}")
raise ModelNotFoundException(f"Unknown model {model_name}")
cur_model_name, cur_base_model, cur_model_type = self.parse_key(model_key)
if base_model is not None and cur_base_model != base_model:
continue
if model_type is not None and cur_model_type != model_type:
continue
model_dict = dict(
**model_config.dict(exclude_defaults=True),
# OpenAPIModelInfoBase
model_name=cur_model_name,
base_model=cur_base_model,
model_type=cur_model_type,
)
# expose paths as absolute to help web UI
if path := model_dict.get("path"):
model_dict["path"] = str(self.resolve_model_path(path))
models.append(model_dict)
return models
def print_models(self) -> None:
"""
Print a table of models and their descriptions. This needs to be redone
"""
# TODO: redo
for model_dict in self.list_models():
for model_name, model_info in model_dict.items():
line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}'
print(line)
# Tested - LS
def del_model(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
):
"""
Delete the named model.
"""
model_key = self.create_key(model_name, base_model, model_type)
model_cfg = self.models.pop(model_key, None)
if model_cfg is None:
raise ModelNotFoundException(f"Unknown model {model_key}")
# note: it not garantie to release memory(model can has other references)
cache_ids = self.cache_keys.pop(model_key, [])
for cache_id in cache_ids:
self.cache.uncache_model(cache_id)
# if model inside invoke models folder - delete files
model_path = self.resolve_model_path(model_cfg.path)
cache_path = self._get_model_cache_path(model_path)
if cache_path.exists():
rmtree(str(cache_path))
if model_path.is_relative_to(self.app_config.models_path):
if model_path.is_dir():
rmtree(str(model_path))
else:
model_path.unlink()
self.commit()
# LS: tested
def add_model(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
model_attributes: dict,
clobber: bool = False,
) -> AddModelResult:
"""
Update the named model with a dictionary of attributes. Will fail with an
assertion error if the name already exists. Pass clobber=True to overwrite.
On a successful update, the config will be changed in memory and the
method will return True. Will fail with an assertion error if provided
attributes are incorrect or the model name is missing.
The returned dict has the same format as the dict returned by
model_info().
"""
# relativize paths as they go in - this makes it easier to move the models directory around
if path := model_attributes.get("path"):
model_attributes["path"] = str(self.relative_model_path(Path(path)))
model_class = self._get_implementation(base_model, model_type)
model_config = model_class.create_config(**model_attributes)
model_key = self.create_key(model_name, base_model, model_type)
if model_key in self.models and not clobber:
raise Exception(f'Attempt to overwrite existing model definition "{model_key}"')
old_model = self.models.pop(model_key, None)
if old_model is not None:
# TODO: if path changed and old_model.path inside models folder should we delete this too?
# remove conversion cache as config changed
old_model_path = self.resolve_model_path(old_model.path)
old_model_cache = self._get_model_cache_path(old_model_path)
if old_model_cache.exists():
if old_model_cache.is_dir():
rmtree(str(old_model_cache))
else:
old_model_cache.unlink()
# remove in-memory cache
# note: it not guaranteed to release memory(model can has other references)
cache_ids = self.cache_keys.pop(model_key, [])
for cache_id in cache_ids:
self.cache.uncache_model(cache_id)
self.models[model_key] = model_config
self.commit()
return AddModelResult(
name=model_name,
model_type=model_type,
base_model=base_model,
config=model_config,
)
def rename_model(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
new_name: Optional[str] = None,
new_base: Optional[BaseModelType] = None,
):
"""
Rename or rebase a model.
"""
if new_name is None and new_base is None:
self.logger.error("rename_model() called with neither a new_name nor a new_base. {model_name} unchanged.")
return
model_key = self.create_key(model_name, base_model, model_type)
model_cfg = self.models.get(model_key, None)
if not model_cfg:
raise ModelNotFoundException(f"Unknown model: {model_key}")
old_path = self.resolve_model_path(model_cfg.path)
new_name = new_name or model_name
new_base = new_base or base_model
new_key = self.create_key(new_name, new_base, model_type)
if new_key in self.models:
raise ValueError(f'Attempt to overwrite existing model definition "{new_key}"')
# if this is a model file/directory that we manage ourselves, we need to move it
if old_path.is_relative_to(self.app_config.models_path):
new_path = self.resolve_model_path(
Path(
BaseModelType(new_base).value,
ModelType(model_type).value,
new_name,
)
)
move(old_path, new_path)
model_cfg.path = str(new_path.relative_to(self.app_config.models_path))
# clean up caches
old_model_cache = self._get_model_cache_path(old_path)
if old_model_cache.exists():
if old_model_cache.is_dir():
rmtree(str(old_model_cache))
else:
old_model_cache.unlink()
cache_ids = self.cache_keys.pop(model_key, [])
for cache_id in cache_ids:
self.cache.uncache_model(cache_id)
self.models.pop(model_key, None) # delete
self.models[new_key] = model_cfg
self.commit()
def convert_model(
self,
model_name: str,
base_model: BaseModelType,
model_type: Literal[ModelType.Main, ModelType.Vae],
dest_directory: Optional[Path] = None,
) -> AddModelResult:
"""
Convert a checkpoint file into a diffusers folder, deleting the cached
version and deleting the original checkpoint file if it is in the models
directory.
:param model_name: Name of the model to convert
:param base_model: Base model type
:param model_type: Type of model ['vae' or 'main']
This will raise a ValueError unless the model is a checkpoint.
"""
info = self.model_info(model_name, base_model, model_type)
if info is None:
raise FileNotFoundError(f"model not found: {model_name}")
if info["model_format"] != "checkpoint":
raise ValueError(f"not a checkpoint format model: {model_name}")
# We are taking advantage of a side effect of get_model() that converts check points
# into cached diffusers directories stored at `location`. It doesn't matter
# what submodeltype we request here, so we get the smallest.
submodel = {"submodel_type": SubModelType.Scheduler} if model_type == ModelType.Main else {}
model = self.get_model(
model_name,
base_model,
model_type,
**submodel,
)
checkpoint_path = self.resolve_model_path(info["path"])
old_diffusers_path = self.resolve_model_path(model.location)
new_diffusers_path = (
dest_directory or self.app_config.models_path / base_model.value / model_type.value
) / model_name
if new_diffusers_path.exists():
raise ValueError(f"A diffusers model already exists at {new_diffusers_path}")
try:
move(old_diffusers_path, new_diffusers_path)
info["model_format"] = "diffusers"
info["path"] = (
str(new_diffusers_path)
if dest_directory
else str(new_diffusers_path.relative_to(self.app_config.models_path))
)
info.pop("config")
result = self.add_model(model_name, base_model, model_type, model_attributes=info, clobber=True)
except Exception:
# something went wrong, so don't leave dangling diffusers model in directory or it will cause a duplicate model error!
rmtree(new_diffusers_path)
raise
if checkpoint_path.exists() and checkpoint_path.is_relative_to(self.app_config.models_path):
checkpoint_path.unlink()
return result
def resolve_model_path(self, path: Union[Path, str]) -> Path:
"""return relative paths based on configured models_path"""
return self.app_config.models_path / path
def relative_model_path(self, model_path: Path) -> Path:
if model_path.is_relative_to(self.app_config.models_path):
model_path = model_path.relative_to(self.app_config.models_path)
return model_path
def search_models(self, search_folder):
self.logger.info(f"Finding Models In: {search_folder}")
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
models_folder_safetensors = Path(search_folder).glob("**/*.safetensors")
ckpt_files = [x for x in models_folder_ckpt if x.is_file()]
safetensor_files = [x for x in models_folder_safetensors if x.is_file()]
files = ckpt_files + safetensor_files
found_models = []
for file in files:
location = str(file.resolve()).replace("\\", "/")
if "model.safetensors" not in location and "diffusion_pytorch_model.safetensors" not in location:
found_models.append({"name": file.stem, "location": location})
return search_folder, found_models
def commit(self, conf_file: Optional[Path] = None) -> None:
"""
Write current configuration out to the indicated file.
"""
data_to_save = dict()
data_to_save["__metadata__"] = self.config_meta.dict()
for model_key, model_config in self.models.items():
model_name, base_model, model_type = self.parse_key(model_key)
model_class = self._get_implementation(base_model, model_type)
if model_class.save_to_config:
# TODO: or exclude_unset better fits here?
data_to_save[model_key] = model_config.dict(exclude_defaults=True, exclude={"error"})
# alias for config file
data_to_save[model_key]["format"] = data_to_save[model_key].pop("model_format")
yaml_str = OmegaConf.to_yaml(data_to_save)
config_file_path = conf_file or self.config_path
assert config_file_path is not None, "no config file path to write to"
config_file_path = self.app_config.root_path / config_file_path
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
try:
with open(tmpfile, "w", encoding="utf-8") as outfile:
outfile.write(self.preamble())
outfile.write(yaml_str)
os.replace(tmpfile, config_file_path)
except OSError as err:
self.logger.warning(f"Could not modify the config file at {config_file_path}")
self.logger.warning(err)
def preamble(self) -> str:
"""
Returns the preamble for the config file.
"""
return textwrap.dedent(
"""
# This file describes the alternative machine learning models
# available to InvokeAI script.
#
# To add a new model, follow the examples below. Each
# model requires a model config file, a weights file,
# and the width and height of the images it
# was trained on.
"""
)
def scan_models_directory(
self,
base_model: Optional[BaseModelType] = None,
model_type: Optional[ModelType] = None,
):
loaded_files = set()
new_models_found = False
self.logger.info(f"Scanning {self.app_config.models_path} for new models")
with Chdir(self.app_config.models_path):
for model_key, model_config in list(self.models.items()):
model_name, cur_base_model, cur_model_type = self.parse_key(model_key)
# Patch for relative path bug in older models.yaml - paths should not
# be starting with a hard-coded 'models'. This will also fix up
# models.yaml when committed.
if model_config.path.startswith("models"):
model_config.path = str(Path(*Path(model_config.path).parts[1:]))
model_path = self.resolve_model_path(model_config.path).absolute()
if not model_path.exists():
model_class = self._get_implementation(cur_base_model, cur_model_type)
if model_class.save_to_config:
model_config.error = ModelError.NotFound
self.models.pop(model_key, None)
else:
self.models.pop(model_key, None)
else:
loaded_files.add(model_path)
for cur_base_model in BaseModelType:
if base_model is not None and cur_base_model != base_model:
continue
for cur_model_type in ModelType:
if model_type is not None and cur_model_type != model_type:
continue
model_class = self._get_implementation(cur_base_model, cur_model_type)
models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value))
if not models_dir.exists():
continue # TODO: or create all folders?
for model_path in models_dir.iterdir():
if model_path not in loaded_files: # TODO: check
model_name = model_path.name if model_path.is_dir() else model_path.stem
model_key = self.create_key(model_name, cur_base_model, cur_model_type)
try:
if model_key in self.models:
raise DuplicateModelException(f"Model with key {model_key} added twice")
model_path = self.relative_model_path(model_path)
model_config: ModelConfigBase = model_class.probe_config(
str(model_path), model_base=cur_base_model
)
self.models[model_key] = model_config
new_models_found = True
except DuplicateModelException as e:
self.logger.warning(e)
except InvalidModelException:
self.logger.warning(f"Not a valid model: {model_path}")
except NotImplementedError as e:
self.logger.warning(e)
imported_models = self.scan_autoimport_directory()
if (new_models_found or imported_models) and self.config_path:
self.commit()
def scan_autoimport_directory(self) -> Dict[str, AddModelResult]:
"""
Scan the autoimport directory (if defined) and import new models, delete defunct models.
"""
# avoid circular import
from invokeai.backend.install.model_install_backend import ModelInstall
from invokeai.frontend.install.model_install import ask_user_for_prediction_type
class ScanAndImport(ModelSearch):
def __init__(self, directories, logger, ignore: Set[Path], installer: ModelInstall):
super().__init__(directories, logger)
self.installer = installer
self.ignore = ignore
def on_search_started(self):
self.new_models_found = dict()
def on_model_found(self, model: Path):
if model not in self.ignore:
self.new_models_found.update(self.installer.heuristic_import(model))
def on_search_completed(self):
self.logger.info(
f"Scanned {self._items_scanned} files and directories, imported {len(self.new_models_found)} models"
)
def models_found(self):
return self.new_models_found
config = self.app_config
# LS: hacky
# Patch in the SD VAE from core so that it is available for use by the UI
try:
self.heuristic_import({str(self.resolve_model_path("core/convert/sd-vae-ft-mse"))})
except Exception:
pass
installer = ModelInstall(
config=self.app_config,
model_manager=self,
prediction_type_helper=ask_user_for_prediction_type,
)
known_paths = {self.resolve_model_path(x["path"]) for x in self.list_models()}
directories = {
config.root_path / x
for x in [
config.autoimport_dir,
config.lora_dir,
config.embedding_dir,
config.controlnet_dir,
]
if x
}
scanner = ScanAndImport(directories, self.logger, ignore=known_paths, installer=installer)
scanner.search()
return scanner.models_found()
def heuristic_import(
self,
items_to_import: Set[str],
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
) -> Dict[str, AddModelResult]:
"""Import a list of paths, repo_ids or URLs. Returns the set of
successfully imported items.
:param items_to_import: Set of strings corresponding to models to be imported.
:param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType.
The prediction type helper is necessary to distinguish between
models based on Stable Diffusion 2 Base (requiring
SchedulerPredictionType.Epsilson) and Stable Diffusion 768
(requiring SchedulerPredictionType.VPrediction). It is
generally impossible to do this programmatically, so the
prediction_type_helper usually asks the user to choose.
The result is a set of successfully installed models. Each element
of the set is a dict corresponding to the newly-created OmegaConf stanza for
that model.
May return the following exceptions:
- ModelNotFoundException - one or more of the items to import is not a valid path, repo_id or URL
- ValueError - a corresponding model already exists
"""
# avoid circular import here
from invokeai.backend.install.model_install_backend import ModelInstall
successfully_installed = dict()
installer = ModelInstall(
config=self.app_config, prediction_type_helper=prediction_type_helper, model_manager=self
)
for thing in items_to_import:
installed = installer.heuristic_import(thing)
successfully_installed.update(installed)
self.commit()
return successfully_installed
| true |
0b92d45969f3251f5087ffc3e39c828b88a823a4 | Python | xinkaichen97/HackerRank | /Data Science/humidity2.py | UTF-8 | 1,694 | 2.546875 | 3 | [] | no_license | import pandas as pd
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.arima_model import ARIMA
from matplotlib import pyplot as plt
startDate = "2013-01-01"
endDate = "2013-01-01"
knownTimestamps = ['2013-01-01 00:00','2013-01-01 01:00','2013-01-01 02:00','2013-01-01 03:00','2013-01-01 04:00',
'2013-01-01 05:00','2013-01-01 06:00','2013-01-01 08:00','2013-01-01 10:00','2013-01-01 11:00',
'2013-01-01 12:00','2013-01-01 13:00','2013-01-01 16:00','2013-01-01 17:00','2013-01-01 18:00',
'2013-01-01 19:00','2013-01-01 20:00','2013-01-01 21:00','2013-01-01 23:00']
humidity = ['0.62','0.64','0.62','0.63','0.63','0.64','0.63','0.64','0.48','0.46','0.45','0.44','0.46','0.47','0.48','0.49','0.51','0.52','0.52']
timestamps = ['2013-01-01 07:00','2013-01-01 09:00','2013-01-01 14:00','2013-01-01 15:00','2013-01-01 22:00']
# correct pred: 0.64, 0.55, 0.44, 0.44, 0.52
def predictMissingHumidity(startDate, endDate, knownTimestamps, humidity, timestamps):
pred = []
dataInput = pd.DataFrame({'knownTimestamps': knownTimestamps,'humidity': humidity})
training = [float(x) for x in dataInput.humidity]
#autocorrelation_plot(training)
#plt.plot(knownTimestamps, training)
#plt.show()
for t in range(len(timestamps)):
print(t)
modelArima = ARIMA(training, order=(2, 2, 0))
predict = modelArima.fit()
#print(predict)
humidityPred = float(predict.forecast()[0])
pred.append(humidityPred)
obs = timestamps[t]
return pred
print(predictMissingHumidity(startDate, endDate, knownTimestamps, humidity, timestamps))
| true |
148b397f117456d9ffdd41490c038786d20cd12c | Python | guo-sc/Python-Learn | /Mooc-3.5-2018-12-09/TextProBarV1.py | UTF-8 | 280 | 3.21875 | 3 | [] | no_license | #TextProBarV1.py
import time
scale = 10
A = "执行开始"
B = "执行结束"
print("{:-^20}".format(A))
for i in range(scale+1):
a = "*"*i
b = "."*(scale-i)
c = (i/scale)*100
print("{:^3.0f}%[{}->{}]".format(c,a,b))
time.sleep(0.5)
print("{:-^20}".format(B))
| true |
61deb7037dab106a0f831eae2f1746806fd8baf7 | Python | AsternA/Final-Project---Deep-Learning-w-Raspberry-Pi- | /mission_import.py | UTF-8 | 6,665 | 2.515625 | 3 | [] | no_license | #################################################
# #
# Written by: Almog Stern #
# Date: 15.4.20 #
# Missions to be given to the Pixhawk #
# (With help from Dronekit Examples) #
# #
#################################################
# Library Imports
from dronekit import connect, VehicleMode, LocationGlobalRelative, LocationGlobal, Command
from pymavlink import mavutil
import argparse
import time
import random
import math
# Connection Handle
parser = argparse.ArgumentParser()
#parser.add_argument('--connect', default='/dev/ttyS0')
parser.add_argument('--connect', default="udp:10.100.102.31:14550")
args = parser.parse_args()
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def distance_to_current_waypoint(vehicle):
"""
Gets distance in metres to the current waypoint.
It returns None for the first waypoint (Home location).
"""
nextwaypoint = vehicle.commands.next
if nextwaypoint==0:
return None
missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed
lat = missionitem.x
lon = missionitem.y
alt = missionitem.z
targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)
distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)
return distancetopoint
def readmission(vehicle, aFileName):
"""
Load a mission from a file into a list. The mission definition is in the Waypoint file
format (http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format).
This function is used by upload_mission().
"""
print("\nReading mission from file: %s" % aFileName)
cmds = vehicle.commands
missionlist=[]
with open(aFileName) as f:
for i, line in enumerate(f):
if i==0:
if not line.startswith('QGC WPL 110'):
raise Exception('File is not supported WP version')
else:
linearray=line.split('\t')
ln_index=int(linearray[0])
ln_currentwp=int(linearray[1])
ln_frame=int(linearray[2])
ln_command=int(linearray[3])
ln_param1=float(linearray[4])
ln_param2=float(linearray[5])
ln_param3=float(linearray[6])
ln_param4=float(linearray[7])
ln_param5=float(linearray[8])
ln_param6=float(linearray[9])
ln_param7=float(linearray[10])
ln_autocontinue=int(linearray[11].strip())
cmd = Command( 0, 0, 0, ln_frame, ln_command, ln_currentwp, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_param5, ln_param6, ln_param7)
missionlist.append(cmd)
return missionlist
def upload_mission(vehicle, aFileName):
"""
Upload a mission from a file.
"""
#Read mission from file
missionlist = readmission(vehicle, aFileName)
print("\nUpload mission from a file: %s" % aFileName)
#Clear existing mission from vehicle
print(' Clear mission')
cmds = vehicle.commands
cmds.clear()
#Add new mission to vehicle
for command in missionlist:
cmds.add(command)
print(' Upload mission')
vehicle.commands.upload()
def arm_and_takeoff(vehicle, aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print("Basic pre-arm checks")
# Don't let the user try to arm until autopilot is ready
while not vehicle.is_armable:
print("Waiting for vehicle to initialise...")
time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed:
print("Waiting for arming...")
time.sleep(1)
print("[INFO] Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print("[INFO] Altitude: ", vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.
print("[INFO] Reached target altitude")
break
time.sleep(1)
def which_mission(mid):
if mid == 1:
do_mission('mission_1.txt')
elif mid == 2:
do_mission('mission_2.txt')
elif mid == 3:
do_mission('mission_3.txt')
elif mid == 4:
do_mission('mission_4.txt')
else:
print('[ERROR] No mission matches number...')
return 0
def do_mission(mission_id):
import_mission_filename = mission_id
print('[INFO] Connecting to vehicle on: %s' % args.connect)
vehicle = connect(args.connect, baud=57600, wait_ready=True)
# From Copter 3.3 you will be able to take off using a mission item. Plane must take off using a mission item (currently).
arm_and_takeoff(vehicle, 25)
while not vehicle.is_armable:
print("Waiting for vehicle to initialise...")
time.sleep(1)
#Upload mission from file
missionslist = upload_mission(vehicle, import_mission_filename)
print("Starting mission")
# Reset mission set to first (0) waypoint
vehicle.commands.next=0
# Set mode to AUTO to start mission
vehicle.mode = VehicleMode("AUTO")
# Monitor mission.
# Demonstrates getting and setting the command number
# Uses distance_to_current_waypoint(), a convenience function for finding the
# distance to the next waypoint.
while True:
nextwaypoint=vehicle.commands.next
print('Distance to waypoint (%s): %s' % (nextwaypoint, distance_to_current_waypoint(vehicle)))
if vehicle.armed == False:
break
time.sleep(5)
#Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close() | true |
c8f2d1f6cbf4db9b5606bc40a4e9af98b9431cf7 | Python | mdl/leetcode | /binary-width.py | UTF-8 | 438 | 3.0625 | 3 | [] | no_license | def widthOfBinaryTree(root):
width, left, right = 0, {}, {}
def dfs(node, num = 0, dep = 0):
nonlocal width
if node:
if not dep in left: left[dep] = num
right[dep] = max(right[dep] if dep in right else 0, num)
width = max(width, right[dep] - left[dep] + 1)
dfs(node.left, 2 * num, dep + 1)
dfs(node.right, 2 * num + 1, dep + 1)
dfs(root)
return width
| true |
08a694d38f7a66bb2bd5b4f3351c949810a35fa7 | Python | eewf/SoftUni-Fundamentals-Tasks | /Maximum Multiple.py | UTF-8 | 188 | 2.9375 | 3 | [] | no_license | import sys
divisor = int(input())
bound = int(input())
max_x = -sys.maxsize
for x in range(bound + 1):
if x % divisor == 0:
if 0 < x <= bound:
max_x=x
print(max_x)
| true |
84f23ec69121ac19505848b4c237b0f2c70dbb27 | Python | Malhar-Patwari/Social-Network-Analysis-Project | /Project/cluster.py | UTF-8 | 1,952 | 3 | 3 | [] | no_license | """
cluster.py
"""
import networkx as nx
import matplotlib.pyplot as plt
import sys
import time
import csv
import pandas as pd
import pickle
def read_graph():
"""
Returns:
A networkx undirected graph.
"""
return nx.read_edgelist('friends.txt', delimiter='\t')
def remove_nodes(graph,d):
for i in graph.nodes():
if graph.degree(i) <d:
graph.remove_node(i)
#print(graph.degree())
return graph
def draw_graph(graph,filename):
pos=nx.spring_layout(graph)
nx.draw_networkx(graph,pos,with_labels=False,node_color='blue',node_size=50,alpha=0.50,edge_color='r')
plt.axis('off')
plt.savefig(filename,format="PNG",frameon=None,dpi=500)
plt.show()
def calculate_betweenness(graph):
return nx.edge_betweenness_centrality(graph, normalized=False)
def get_community(graph,k):
components= nx.number_connected_components(graph)
while k > components:
#print(components)
betweenness = sorted(sorted(calculate_betweenness(graph).items()), key=lambda x: (-x[1],x[0]))
#print(betweenness[0][0])
graph.remove_edge(*betweenness[0][0])
components= nx.number_connected_components(graph)
return graph
def main():
# this script takes 10 minutes to run on my computer
graph = read_graph()
print('Original graph has %d nodes and %d edges' %
(graph.order(), graph.number_of_edges()))
print("generating original graphs")
draw_graph(graph,"original_graph.png")
with open("nodes_pik", "wb") as f:
pickle.dump(graph.order(), f)
graph = remove_nodes(graph,2)
#print('graph has %d nodes and %d edges' %
# (graph.order(), graph.number_of_edges()))
draw_graph(graph,"after_removing_edges.png")
print("girwan newman in progress")
graph = get_community(graph,4)
print('Final graph has %d nodes and %d edges' %
(graph.order(), graph.number_of_edges()))
draw_graph(graph,"Final_Graph.png")
with open("graph_pik", "wb") as f:
pickle.dump(graph, f)
if __name__ == '__main__':
main() | true |
cab406041bb58f90a52c775f720bab929614fb2d | Python | kaslock/problem-solving | /Programmers/가장 긴 팰린드롬.py | UTF-8 | 371 | 3.15625 | 3 | [] | no_license | def valid(s):
j = len(s)
for i in range(j // 2):
if s[i] != s[j - 1 - i]:
return False
return True
def solution(s):
answer = 0
for i in range(1, len(s) + 1):
for j in range(len(s)):
if j + i > len(s): break
if valid(s[j:j + i]):
answer = i
break
return answer | true |
af77c53ef7370460f4dbf9e5c450ad5c323ab810 | Python | vinodbellamkonda06/myPython | /oops/OVERLOADING.py | UTF-8 | 133 | 2.671875 | 3 | [] | no_license | import pdb;pdb.set_trace()
class Addition:
@classmethod
def addition(cls,*a):
print(a)
Addition.addition(10,10,20)
| true |
6a2d7e8b555966dd8bda6bdbf846649883fd723b | Python | lis5662/Python | /python_crash _course_book/chapter 5/chapter 5.py | UTF-8 | 5,170 | 3.53125 | 4 | [] | no_license | cars = ['audi', 'bmw', 'subaru', 'toyota']
for car in cars:
if car == 'bmw':
print(car.upper())
else:
print(car.title())
# Проверка условий, равенства
car = 'bmw'
if car == 'bmw':
print(True)
else:
print(False)
# Проверка равенств без учета регистра
# car = 'Audi'
# car == 'audi'
# car = 'Audi'
# car.lower() == 'audi'
# Проверка неравенства
requested_topping = 'mushrooms'
if requested_topping != 'anchovies':
print("Hold the anchovies!")
# Сравнения чисел
# age = 18
# age == 18
# True
answer = 16
if answer != 42:
print("That is not the correct answer. Please try again!")
age = 19
if age <= 21:
print(True)
elif age > 21:
print(False)
# Проверка нескольких условий and и or
# Использование and для проверки условий
age_0 = 22
age_1 = 18
if age_0 >= 21 and age_1 >= 21:
print(True)
else:
print(False)
age_1 = 22
if age_0 >= 21 and age_1 >= 21:
print(True)
else:
print(False)
# использование or для проверки условий
age_0 = 22
age_1 = 18
if age_0 >= 21 or age_1 >= 21:
print(True)
else:
print(False)
age_0 = 18
if age_0 >= 21 or age_1 >= 21:
print(True)
else:
print(False)
# Проверка вхождения в список in
requested_topping = ['mushrooms', 'onions', 'pineapple']
if 'mushrooms' in requested_topping:
print(True)
if 'pepperoni' in requested_topping:
print(False)
# Проверка отсутсвия значения в списке not
banned_users = ['andrew', 'carolina', 'david']
user = 'marie'
if user not in banned_users:
print(user.title() + " , you can post a responce if you wish.")
# Логические выражения
game_active = True
can_edit = False
# Простые команды if
age = 19
if age >= 18:
print("You are old enough to vote!")
print("Have you registered to vote yet?")
# Программы if -else
age = 17
if age >= 18:
print("You are old enough to vote!")
print("Have you registered to vote yet?")
else:
print("Sorry, you are too young to vote.")
print("Please register to vote as soon as you turn 18!")
# Цепочки if- elif-else
age = 12
if age < 4:
print("Your admission cost is $0.")
elif age < 18:
print("Your admission cost is $5.")
else:
print("Your admission cost is $10")
# компактное выполнение кода верхней цепочки if - elif - else
if age < 4:
price = 0
elif age < 18:
price = 5
else:
price = 10
print("Your admission cost is $" + str(price) + ".")
# Серии блоков elif
age = 12
if age < 4:
price = 0
elif age < 18:
price = 5
elif age < 65:
price = 10
else:
price = 5
print("Your admission cost is $" + str(price) + ".")
# Отсутсвие блока else
age = 12
if age < 4:
price = 0
elif age < 18:
price = 5
elif age < 65:
price = 10
elif age >= 65:
price = 5
print("Your admission cost is $" + str(price) + ".")
# Проверка нескольких условий
requested_topping = ['mushrooms', 'extra cheese']
if 'mushrooms' in requested_topping:
print("Adding mushrooms.")
if 'pepperoni' in requested_topping:
print("Adding pepperoni.")
if 'extra cheese' in requested_topping:
print("Adding extra cheese")
print("\nFinished making your pizza!")
# Проверка специавльных значений
requested_toppings = ['mushrooms', ' green peppers', 'extra cheese']
for requested_topping in requested_toppings:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
requested_toppings = ['mushrooms', 'green peppers', 'extra cheese']
for requested_topping in requested_toppings:
if requested_topping == 'green peppers':
print("Sorry, we are out of green peppers right now.")
else:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
# Проверка наличия элементов в списке
requested_toppings_2 = []
if requested_toppings_2:
for requested_topping in requested_toppings_2:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
else:
print("Are you sure you want a plain pizza?")
# Множественные списки
available_toppings = ['mushrooms', 'olives', 'green peppers',
'pepperoni', 'pineapple', 'extra cheese']
requested_toppings_3 = ['mushrooms', 'french fries', 'extra cheese']
for requested_topping in requested_toppings_3:
if requested_topping in available_toppings:
print("Adding " + requested_topping + ".")
else:
print("Sorry, we don't have " + requested_topping + ".")
print("\nFinished making your pizza!") | true |
c53e2092b6912d677a8285fc87084531bf8a5a07 | Python | lylwill/CS275Project | /qlearning.py | UTF-8 | 401 | 2.765625 | 3 | [] | no_license | from RL import RL
class qlearning(RL):
def __init__(self, actions, epsilon, alpha=0.2, gamma=1.0):
RL.__init__(self, actions, epsilon, alpha, gamma)
def learn(self, state1, action, reward, state2):
try:
q = [self.getQ(state2, a) for a in self.actions]
maxQ = max(q)
self.updateQ(state1, action, reward, reward+self.gamma*maxQ)
# self.printQ()
except:
print "Failed to learn"
| true |
7dabea15d4ed1e04eb7fa0acd1216a7db26e00ab | Python | hiter-joe/pyHMT2D | /tests/00_dummy/00_01_dumy_test.py | UTF-8 | 140 | 2.734375 | 3 | [
"MIT",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-generic-cla"
] | permissive |
def test_dummy():
"""A dummy test as placeholder and template
Returns
-------
"""
opo = 1 + 1
assert opo == 2
| true |
0bf8014114d689e1876bfb299dca18b3d664d4f3 | Python | gianfabi/raven | /Workshop/ISUtrainig/extModel.py | UTF-8 | 347 | 2.53125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive |
''' from wikipedia: dx/dt = sigma*(y-x) ; dy/dt = x*(rho-z)-y dz/dt = x*y-beta*z ; '''
import numpy as np
def run(self,Input):
self.prod = 10*self.ThExp*self.GrainRad
self.sum = 5*self.ThExp -0.6*self.GrainRad
self.sin = np.sin(self.ThExp/5.e-7)*np.sin((self.GrainRad-0.5)*10)
# if self.sin == np.NZERO:self.sin=np.zeros(1)
| true |
62c87a491450a84834e947df150415a0e46f5dd9 | Python | sz6636/machine-learning | /Numpy学习/ttt.py | UTF-8 | 465 | 3.3125 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: "Zing-p"
# Date: 2017/12/11
import math
# 向上取整
# print("math.ceil---")
# print("math.ceil(2.3) => ", math.ceil(2.3))
# print("math.ceil(2.6) => ", math.ceil(2.6))
#
# # 向下取整
# print("\nmath.floor---")
# print("math.floor(2.3) => ", math.floor(2.3))
# print("math.floor(2.6) => ", math.floor(2.6))
dic = {"a":"b", "c":"d"}
li = [1,2,3]
# print(**dic)
a = [
"123",
#"456",
]
print(a) | true |
931a80d20b3c316d8e1a98e2e4611536e3daa5b6 | Python | dinhky0204/SoundHandlingPython | /Tkinter/App.py | UTF-8 | 1,740 | 3.15625 | 3 | [] | no_license | from Tkinter import *
import ttk
class App(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack(side = "bottom")
self.initUI()
def initUI(self):
self.entrythingy = Entry()
self.entrythingy.pack()
# Label(self.master, text="First").grid(row=0)
# Label(self.master, text="Second").grid(row=1)
label1 = ttk.Label(text="W", style="BW.TLabel")
label1.pack(side = "left")
frame3 = Frame(self)
frame3.pack(fill=BOTH, expand=True)
lbl3 = Label(frame3, text="Review", width=6)
lbl3.pack(side=LEFT, anchor=N, padx=5, pady=5)
self.entry1 = Entry(frame3)
self.entry1.pack(fill=X, padx=5, expand=True)
self.contents = StringVar()
self.contents.set("frame number")
# self.contents.grid(row=0, column = 2)
self.entrythingy["textvariable"] = self.contents
# and here we get a callback when the user hits return.
# we will have the program print out the value of the
# application variable when the user hits return
self.entrythingy.bind('<Key-Return>',
self.print_contents)
fred = Button(self, text="Test fred", fg="red", bg="blue", command = self.printcontent)
fred.pack(side="right")
quit_btn = Button(self, text="QUIT", fg="red", command = self.quit)
quit_btn.pack(side = "right")
def print_contents(self, event):
print "hi. contents of entry is now ---->", \
self.contents.get()
def printcontent(self):
print "This is content: "
print self.entry1.get()
root = Tk()
app = App(master=root)
app.mainloop()
root.destroy() | true |
14d84a68aaeddc8c5006cecb5875307bb4601e22 | Python | oehoy/voice4you | /voice4u.py | UTF-8 | 3,107 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python #
# -*- coding: utf-8 -*- #
# #
# voice4u.py #
# #
# Copyright 2014 Oehoy <popov.md5@gmail.com> #
# #
#########################################################
import curses, os
screen = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
screen.keypad(1)
curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_WHITE)
getin = None
sub1get = None
sub2get = None
def topmenu():
screen.keypad(1)
curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_WHITE)
pos=1
x = None
h = curses.color_pair(1) #h
n = curses.A_NORMAL #n
while x !=ord('\n'):
screen.clear()
screen.border(0)
screen.addstr(2,2, "\"Voice For You\"", curses.A_STANDOUT) # Title for this menu
screen.addstr(4,2, "Please select it...", curses.A_BOLD) #Subtitle for this menu
if pos==1:
screen.addstr(6,4, "1. Wikipedia", h)
#os.system("echo \"В+ики\" | festival --tts --language russian")
else:
screen.addstr(6,4, "1. Wikipedia", n)
if pos==2:
screen.addstr(7,4, "2. Online radio", h)
#os.system("echo \"Перев+одчик\" | festival --tts --language russian")
else:
screen.addstr(7,4, "2. Online radio", n)
if pos==3:
screen.addstr(8,4, "3. E-mail", h)
#os.system("echo \"Р+адио\" | festival --tts --language russian")
else:
screen.addstr(8,4, "3. E-mail", n)
if pos==4:
screen.addstr(9,4, "4. QIWI Shop", h)
#os.system("echo \"П+очта\" | festival --tts --language russian")
else:
screen.addstr(9,4, "4. QIWI Shop", n)
#os.system("echo \"Электр+онная почта\" | festival --tts --language russian")
if pos==5:
screen.addstr(10,4, "5. Exit", h)
#os.system("echo \"В+ыход\" | festival --tts --language russian")
else:
screen.addstr(10,4, '5. Exit', n)
screen.refresh()
x = screen.getch()
if x == ord('1'):
pos = 1
elif x == ord('2'):
pos = 2
elif x == ord('3'):
pos = 3
elif x == ord('4'):
pos = 4
elif x == ord('5'):
pos = 5
elif x == 258:
if pos < 5:
pos += 1
else: pos = 1
elif x == 259:
if pos > 1:
pos += -1
else: pos = 5
elif x != ord('\n'):
curses.flash()
return ord(str(pos))
# Main program
while getin != ord('5'):
getin = topmenu() # Get the menu item selected on the top menu
if getin == ord('1'):
curses.endwin()
os.system('clear && python ./wikipedia/wiki.py')#run
elif getin == ord('2'): # Topmenu option 2
curses.endwin()
os.system('clear && python ./radio/radio.py')
elif getin == ord('3'): # Topmenu option 3
curses.endwin()
os.system('clear && python ./email/email.py')
elif getin == ord('4'): # Topmenu option
curses.endwin()
os.system('clear && python ./qiwi/qiwi_shop.py')
#run 4
elif getin == ord('5'): # Topmenu option 4
curses.endwin() #VITAL! This closes out the menu system and returns you to the bash prompt.
| true |
4e03ffc9700911ef95736e08f250007896b624bb | Python | niolabs/nio | /nio/modules/proxy.py | UTF-8 | 6,241 | 3.375 | 3 | [] | no_license | """
A base class and exceptions for proxies.
"""
from collections import defaultdict
from inspect import getmembers, isclass, isfunction, ismethod, isroutine
from nio.util.logging import get_nio_logger
class ProxyNotProxied(Exception):
""" Exception raised when an operation takes place on an unproxied proxy.
This can occur when trying to unproxy a proxy that hasn't been proxied yet.
"""
pass
class ProxyAlreadyProxied(Exception):
""" Exception raised when an operation takes place on a proxied proxy.
This can occur when trying to proxy a proxy that has already been proxied
"""
pass
class ModuleProxy(object):
""" A base class for creating a ModuleProxy interface
A ModuleProxy is similar to an interface - it allows for separating the
interface for accessing a class and the implementation controlling how it
works.
To create a ModuleProxy interface, create a class that inherits from the
ModuleProxy class. Define methods and class variables in your interface with
the method signatures you want people to use when calling them. These
functions can have as much or as little functionality as you want. Once the
proxy is "proxied" with an implementation class, any methods defined on the
implementation class will be proxied and overridden on to the interface.
After the interface is proxied, people can create an object as if they are
creating the interface - the caller does not need to know the type or
location of the implementation class.
To create an implementation for a ModuleProxy, create a class that does NOT
inherit from ModuleProxy. Define functionality for whatever methods on the
interface you want. You can define additional methods in your class that can
be accessed by the other methods in your implementation. Be aware though that
since these methods are not on the interface, it should not be assumed that
the caller can call them or even knows they exist.
Once the implementation class is complete, call the proxy method on the
interface passing in the reference to the implementation class.
Example - this will proxy the members of the ImplementationClass on to the
InterfaceProxyClass:
>>> InterfaceProxyClass.proxy(ImplementationClass)
"""
# Whether or not this class has already been proxied
proxied = False
_impl_class = None
_unproxied_methods = defaultdict(dict)
def __init__(self, *args, **kwargs):
""" Handling the instantiation of a ModuleProxy
Instantiating a ModuleProxy probably means they want to instantiate
the module implementation class instead. We still call the ModuleProxy
constructor so that the interface can define an explicit signature
for its constructor.
Therefore, a proxy interface should define its __init__ method with
the desired signature, and call super with the same arguments. The
__init__ method of the proxy implementation will NOT be proxied to the
interface.
"""
if self.proxied and isclass(self._impl_class):
self._impl_class.__init__(self, *args, **kwargs)
else:
# do not allow creation of not-proxied instances, since allowing
# such creation yields to unexpected behaviour when after proxying
# class a proxied method on the instance is invoked.
raise ProxyNotProxied(
"An instance of '{0}' class cannot be created, class has not "
"been proxied".format(self.__class__.__name__))
@classmethod
def proxy(cls, class_to_proxy):
""" Initialize a ModuleProxy by proxying any methods
This will proxy the class by applying methods from the class passed
in to the method to the proxy interface. This method should be called
before any proxied methods are called.
Args:
class_to_proxy (class): A reference to the class to proxy on top
of this interface
Raises:
ProxyAlreadyProxied: If the proxy has already been proxied
"""
if cls.proxied:
raise ProxyAlreadyProxied()
# Iterate through the members of the proxy implementation class
for (name, impl_member) in getmembers(class_to_proxy):
# Make sure this is a method we want to proxy
if not cls._is_proxyable(name, impl_member):
continue
interface_member = getattr(cls, name, None)
get_nio_logger("ModuleProxy").debug(
"Proxying member {0} from {1}".format(
name, class_to_proxy.__name__))
# Save a reference to the original member to replace during unproxy
cls._unproxied_methods[cls.__name__][name] = interface_member
setattr(cls, name, impl_member)
# Mark the class as proxied and save the implementation class
cls.proxied = True
cls._impl_class = class_to_proxy
@classmethod
def unproxy(cls):
""" Return the ModuleProxy to its original class methods
Raises:
ProxyNotProxied: If the proxy has not yet been proxied
"""
if not cls.proxied:
raise ProxyNotProxied()
for name, iface_member in cls._unproxied_methods[cls.__name__].items():
if iface_member is None:
# We didn't have this member on the original interface, delete
delattr(cls, name)
else:
# We had this member originally, replace it with that one
setattr(cls, name, iface_member)
# Reset all of our cached proxy class information
cls._unproxied_methods[cls.__name__] = {}
cls._impl_class = None
cls.proxied = False
@classmethod
def _is_proxyable(cls, name, member):
"""Returns True if a member is proxy-able.
Here is what we want to proxy:
* Any non-private instance functions
* Any non-private class methods
* Any non-private class variables
"""
return not name.startswith('__') and \
(isfunction(member) or ismethod(member) or not isroutine(member))
| true |
c1d8b0edb1e8713863d35705b27219e52254eb1c | Python | gukexi/LearningPython | /study_crawler/src/crawl_maoyan_pyquery.py | UTF-8 | 1,948 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | '''
Created on Jul 29, 2019
@author: ekexigu
'''
import requests
import json
from requests.exceptions import RequestException
import time
from pyquery import PyQuery
def get_one_page(url):
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(text):
html = PyQuery(text)
sub_htmls = html('dd').items()
index = list()
title = list()
actor = list()
time = list()
i_score = list()
f_score = list()
for sub_html in sub_htmls:
index.append(sub_html('.board-index').text())
title.append(sub_html('div div div .name a').text())
actor.append(sub_html('div div div .star').text())
time.append(sub_html('div div div .releasetime').text())
i_score.append(sub_html('div div div .score .integer').text())
f_score.append(sub_html('div div div .score .fraction').text())
for i in range(len(index)):
yield {'index': index[i],
'title': title[i],
'actor': actor[i].strip()[3:],
'time': time[i].strip()[5:15],
'score': i_score[i] + f_score[i]}
def write_to_file(content):
with open('C:\\Users\\ekexigu\\Desktop\\temp\\Crawl_Maoyan_Temp\\result_pyquery.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False)+'\n')
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
text = get_one_page(url)
for item in parse_one_page(text):
write_to_file(item)
if __name__ == '__main__':
for i in range(10):
main(offset=i * 10)
time.sleep(1) | true |
f12a0f2679b34700518093e4db00cd9071935797 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_34/339.py | UTF-8 | 1,143 | 3.234375 | 3 | [] | no_license | def solve(sentence, words, L):
#print "solve(" + sentence + ", " + str(words) + ", " + str(L) + ")"
tokens = parse(sentence)
out = 0
for word in words:
for x in range(L):
works = True
if word[x] not in tokens[x]:
works = False
break
if works:
out += 1
return out
def parse(sentence):
#print "parse(" + sentence + ")"
tokens = []
sentence = list(sentence)
while len(sentence) > 0:
token = sentence.pop(0)
if token == "(":
token = ""
while True:
char = sentence.pop(0)
if char == ")":
break
token += char
tokens.append(token)
return tokens
f = open("A-large.in.txt", "r")
header = f.readline().split()
L = int(header[0].strip())
D = int(header[1].strip())
N = int(header[2].strip())
words = []
for x in range(D):
words.append(f.readline().strip())
for x in range(N):
print "Case #" + str(x+1) + ": " + str(solve(f.readline().strip(), words, L))
| true |
9b9449c64f8e8cb02505074f156d5bc550278c19 | Python | Prabhanda-Akiri/Data-Mining | /TKU.py | UTF-8 | 8,641 | 2.578125 | 3 | [] | no_license | import numpy as np
import bisect as bi
from random import randint
class transaction:
def __init__(self):
self.items=[]
self.total_utility=0
self.each_utility=[]
#self.transaction_id=0
def extract_elements(self,each_transaction):
S=each_transaction.split(":")
S[0]=S[0].split(" ")
S[2]=S[2].split(" ")
for each_item in S[0]:
self.items.append(int(each_item))
self.total_utility=int(S[1])
for each_util in S[2]:
self.each_utility.append(int(each_util))
class Itemset():
def __init__(self):
self.itemset=None
self.support_count=0
self.transactions=None
self.MIU=0
self.EU=0
self.ESTU=0
self.TWU=0
self.MAU=0
class Header_Table_Entry():
def __init__(self):
self.item=0
self.TWU=0
self.link=None
class UP_Tree_Node():
def __init__(self):
self.item_id=0
self.count=0
self.node_utility=0
self.child=[]
self.hlink=None
self.transaction_ids=[]
class UP_TREE():
def __init__(self):
self.root=UP_Tree_Node()
class TKU_algorithm:
def __init__(self,K):
self.K=K
self.no_items=None
self.all_items=[]
self.Transaction_objects=[]
self.Pre_Evaluation_matrix=[]
self.min_util_border=0
self.Header_Table=[]
self.TWU_dict={}
self.Sorted_transactions=[]
self.UP_Tree=UP_TREE()
self.node_utilities=[]
self.transaction_utilities_eachItem=None
self.min_utility_items=[]
self.max_utility_items=[]
self.PKHUIs=[]
self.Itemsets=[]
self.Top_k_itemsets=[]
self.Top_k_MIU=[]
def load_dataset(self):
f=open("foodmart_items.txt","r")
for each_item in f:
self.all_items.append(int(each_item))
self.no_items=len(self.all_items)
#Strategy 2
self.Pre_Evaluation_matrix=[[0 for i in range(self.no_items)] for j in range(self.no_items)]
f=open("foodmart_utility.txt","r")
total_transactions=sum([1 for item in f])
print(total_transactions)
self.transaction_utilities_eachItem=[[None for i in range(total_transactions)]for j in range(self.no_items)]
c=0
print('hi')
with open('foodmart_utility.txt','r') as f:
for item in f:
#print('hi')
temp=transaction()
temp.extract_elements(item)
self.Transaction_objects.append(temp)
#print(temp.total_utility)
for i in range(len(temp.items)):
index_i=self.all_items.index(temp.items[i])
self.transaction_utilities_eachItem[index_i][c]=temp.each_utility[i]
for j in range(len(temp.items)):
self.Pre_Evaluation_matrix[temp.items[i]-1][temp.items[j]-1]+=(temp.each_utility[i]+temp.each_utility[j])
c+=1
#self.all_items=self.all_items+temp.items
temp_pre_eval=np.array(self.Pre_Evaluation_matrix)
temp_pre_eval=temp_pre_eval.flatten()
temp_pre_eval.sort()
min_util_border_temp=temp_pre_eval[-(self.K)]
if self.min_util_border<min_util_border_temp:
self.min_util_border=min_util_border_temp
print(self.min_util_border)
#self.all_items=list(set(self.all_items))
# with open('foodmart_items.txt', 'w') as f:
# for item in self.all_items:
# f.write("%s\n" % item)
def header_table_construction(self):
for each_item in self.all_items:
table_entry=Header_Table_Entry()
table_entry.item=each_item
for each_transaction in self.Transaction_objects:
if each_item in each_transaction.items:
table_entry.TWU= table_entry.TWU + each_transaction.total_utility
self.TWU_dict[table_entry.item]=table_entry.TWU
self.Header_Table.append(table_entry)
Header_Table.sort(key=lambda x: x.TWU, reverse=True)
def Construct_UP_tree(self):
for each_transaction in self.Transaction_objects:
sorted_dict={}
Tr_dash=transaction()
items_in_transaction=each_transaction.items
for each_item in items_in_transaction:
sorted_dict[each_item]=TWU_dict[each_item]
y=dict(sorted(sorted_dict.items(), key=lambda x: x[1],reverse=True))
Sorted_items=list(y.keys())
Sorted_utility=[]
#Sorted_transaction_items.append(Tr_dash)
for each in Sorted_items:
indx=each_transaction.items.index(each)
Sorted_utility.append(each_transaction.each_utility[indx])
Tr_dash.items=Sorted_items
Tr_dash.each_utility=Sorted_utility
Tr_dash.total_utility=each_transaction.total_utility
Tr_dash.transaction_id=each_transaction.transaction_id
self.Insert_Reorganized_transaction(self.UP_Tree.root,Sorted_items[0],Tr_dash)
def Insert_Reorganized_transaction(self,N,Ij,Z):
j=Z.items.index(Ij)
if j<=len(Tr_dash.items):
temp_l=[eachChN.item for eachChN in N.child]
if Ij in temp_l:
k=temp_l.index(Ij)
N.child[k].count+=1
N.child[k].transaction_ids.append(Z.transaction_id)
ChN=N.child[k]
else:
ChN=UP_Tree_Node()
N.child.append(ChN)
ChN.item=Ij
ChN.count=1
ChN.node_utility=0
RTU_Tr=self.Cal_RTU(Z)
sigma_EU=0
for i in range(j+1,len(Z.items)):
#EU_item=Cal_EU(Z.items[i],Z)
sigma_EU+=Z.each_utility[i]
ChN.node_utility += RTU_Tr - sigma_EU
bi.insort(self.node_utilities,ChN.node_utility)
#strategy 3
if self.get_count_UP_tree(self.UP_Tree.root,1)>self.K:
if self.min_util_border<self.node_utilities[k]:
self.min_util_border=self.node_utilities[k]
return Insert_Reorganized_transaction(ChN,Z[j+1],Z)
def Cal_RTU(self,Tr):
return Tr.total_utility
def get_count_UP_tree(self,node,count):
for child in node.child:
count+=self.get_count(child,1)
return count
def get_min_utility_items(self):
for index in range(self.no_items):
self.min_utility_items[index]=min([x for x in self.transaction_utilities_eachItem[index] if x!=None])
def get_max_utility_items(self):
for index in range(self.no_items):
self.max_utility_items[index]=max([x for x in self.transaction_utilities_eachItem[index] if x!=None])
def generate_ESTU(self,iset):
#for iset in self.Itemsets:
#index_j=self.Itemsets.index(iset)
sum_miu=0
sum_mau=0
sum_eu=0
for i in iset.itemset:
index_i=self.all_items.index(i)
#generate MIU
sum_miu+=self.min_utility_items[index_i]
#generate EU
sum_eu+=sum(self.transaction_utilities_eachItem[index_i])
#generate MAU
sum_mau+=self.max_utility_items[index_i]
#genrate TWU
iset.TWU=sum([self.Transaction_objects[i].total_utility for i in iset.transactions])
iset.MIU=sum_miu*iset.support_count
iset.EU=sum_eu*iset.support_count
iset.MAU=sum_mau*iset.support_count
#generate ESTU
while True:
estu=randint(iset.EU,iset.TWU)
if iset.eu<=min(estu,iset.mau):
break
iset.ESTU=estu
return iset
def check_pkhui(self,iset):
if(iset.ESTU>=self.min_util_border) and (iset.MAU>=min_util_border):
self.Top_k_itemsets.append(iset)
bi.insort(self.Top_k_MIU,iset.MIU)
#strategy 4
if iset.MIU>=self.min_util_border:
if len(self.Top_k_MIU)>self.K:
if self.Top_k_MIU[k-1]>self.min_util_border:
self.min_util_border=self.Top_k_MIU[k-1]
else:
return 'Not a PKHUI'
def apply_TKU(self):
for iter_i in range(len(self.Itemsets)):
self.Itemsets[iter_i]=self.generate_ESTU(self.Itemsets[iter_i])
#Strategy 5---------
self.Itemsets.sort(key=lambda x: x.ESTU, reverse=True)
for iter_i in range(len(self.Itemsets)):
return_val=self.check_pkhui(self.Itemsets[iter_i])
if return_val=='Not a PKHUI':
break
#-------------------
def generate_pkhuis(self):
itmsets=[]
root_of_up=self.UP_TREE.root
paths=[]
path_for_each_leaf=[]
path_for_each_leaf = find_path(root_of_up,path_for_each_leaf,0)
#path_for_each_leaf=prune(path_for_each_leaf)
#new_itemset=itemset_generation(path_for_each_leaf)
paths.append(path_for_each_leaf)
def find_path(self,rootNode,path,pathLen):
all_leafs_path=[]
if rootNode is None:
return
if (len(path)>pathLen):
path[pathLen]=rootNode
else:
path.append(rootNode)
pathLen=pathLen+1
if len(rootNode.child)==0:
print("path:",path)
pruned_path=prune(path)
generate_itemsets(path)
else:
for each_child in rootNode.child:
find_path(each_child,path,pathLen)
def prune(self,path):
length=len(path)
for j in range(length-1,0,-1):
temp=path[j]
if temp.count<min_util_border:
to_be_rem=j
path=path[:to_be_rem]
def generate_itemsets(self,path_list):
for L in range(2, len(path_list)+1):
print(itertools.combinations(path_list, L))
for subset in itertools.combinations(path_list, L):
if len(subset)>1:
print(subset)
all_items=[e.itemset for e in self.Itemsets]
if subset in all_items:
index_i=all_items.index(subset)
self.Itemsets[index_i]+=1
else:
temp=Itemset()
temp.itemset=subset
temp.support_count=1
self.Itemsets.append(temp)
tku=TKU_algorithm(100)
tku.load_dataset()
| true |
9a093e9c00361d829d73e01cb94ab4639004569f | Python | misc77/dsegenerator | /resources.py | UTF-8 | 2,290 | 2.703125 | 3 | [
"MIT"
] | permissive | from pkg_resources import resource_filename
import wx
class Resources:
templatePath = "/input"
outputPath = "/output"
configPath = "/config"
graphicsPath = "/gfx"
logPath = "/log"
configFile = "config.ini"
headerImage = "header.png"
dseTemplate = "dse_template_v.xml"
checklistTemplate = "checkliste_template_v.xml"
logFile = "DSEGenerator.log"
@staticmethod
def getHeaderImage():
"""getHeaderImage: returns filename of Header image
"""
return Resources.get_filename(Resources.graphicsPath + "/" + Resources.headerImage)
@staticmethod
def getDSETemplate(version = "1.0"):
"""getDSETemplate
returns the filename of DSE Template for corresponding version.
Keyword Arguments:
version {str} -- [description] (default: {"1.0"})
"""
return Resources.get_filename(Resources.templatePath + "/" + Resources.getVersion(Resources.dseTemplate, version))
@staticmethod
def getChecklisteTemplate(version = "1.0"):
return Resources.get_filename(Resources.templatePath + "/" + Resources.getVersion(Resources.checklistTemplate, version))
@staticmethod
def get_filename(path):
try:
filename = resource_filename(__name__, path)
return filename
except(FileNotFoundError):
wx.MessageBox("Error occured by determining resource! " + FileNotFoundError.strerror(), caption="Error occured!")
@staticmethod
def getVersion(filename, version):
return filename.split(".")[0] + version + "." + filename.split(".")[1]
@staticmethod
def getOutputPath():
relativPath = Resources.outputPath
filename = resource_filename(__name__, relativPath)
return filename
@staticmethod
def getConfigFile():
filename = resource_filename(__name__, Resources.configPath + "/" + Resources.configFile)
return filename
@staticmethod
def getLogFile():
filename = resource_filename(__name__, Resources.logPath + "/" + Resources.logFile)
return filename
@staticmethod
def validVersions(versionA="1.0", versionB="1.0"):
if versionA == versionB:
return True
else:
return False
| true |
ac6fb2ad0dba521d5bd50ccd8840519774c3677c | Python | lawrennd/GPy | /benchmarks/regression/evaluation.py | UTF-8 | 505 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | # Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import abc
import numpy as np
class Evaluation(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def evaluate(self, gt, pred):
"""Compute a scalar for access the performance"""
return None
class RMSE(Evaluation):
"Rooted Mean Square Error"
name = 'RMSE'
def evaluate(self, gt, pred):
return np.sqrt(np.square(gt-pred).astype(np.float).mean())
| true |
2fc76d576e26805d20e00b841451af7b79feb6a4 | Python | ricardofelixmont/python-course-udemy | /9-advanced-build-in-functions/generator_classes.py | UTF-8 | 1,976 | 4.71875 | 5 | [] | no_license | #!/usr/bin/env python3.7
# A primeira coisa que precisamos ter em mente é que não precisamos do 'yield' em classes generators. Utilizamos 'yield' apenas em funções.
class FirstHundredGenerator: # Generator/ Iterator
""" Esta é uma classe generator e seus objetos(instancias) tambem pode ser chamados de iterators"""
def __init__(self):
self.number = 0
def __next__(self): # Este é o dunder method que nos permite utilizar a função 'next(generator)'
# Todos os objetos que possuem esse __next__ method são chamados de 'iterator'
# Todos os generators(como essa classe) são iteradores, mas todos os iterators não necessariamente são generators.
if self.number < 100:
current = self.number
self.number += 1
return current
else:
raise StopIteration()
g = FirstHundredGenerator() # Iterator -> nem todos os iterators podem ser considerados generators.
print(next(g))
g.__next__()
print(g.number)
# Exemplo de iterator que não é um generator:
# Não estamos retornando valores gerados, estamos retornando valores de uma lista.
class FirstFiveIterator:
""" Essa é uma classe generator mas seu objeto não é um gererator, é um iterator"""
def __init__(self):
self.numbers = [1, 2, 3, 4, 5]
self.i = 0
def __next__(self): # Quando definimos o metodo __next__ estamos definindo um iterator
if self.i < len(self.numbers):
current = self.numbers[self.i]
self.i += 1
return current
else:
raise StopIteration()
iterator = FirstFiveIterator()
print(next(iterator))
"""
Uma observação aqui é que os classes acima não permitem iterar sobre seus objetos:
exemplo:
g = FirstFiveIterator()
for number in g:
print(c)
Isso não funciona. It will raise an error.
"""
| true |
3b08e9d443da72b108c26eb3253fa6a87ac9b1e8 | Python | iamsarahdu/Python | /binary.py | UTF-8 | 121 | 3.65625 | 4 | [] | no_license | n=int(input("Enter the number"))
for I in range(1, n+1):
for J in range(1,I+1):
print(J%2,end="")
print() | true |
6d088106af7d934ee7e24405d301846ee7ef44d6 | Python | jaykooklee/practicepython | /if and.py | UTF-8 | 132 | 2.828125 | 3 | [] | no_license | games = 9
points = 25
if games >= 10 and points >= 20:
print('MVP로 선정되었습니다.')
else:
print('다음기회에') | true |
5e3c5205116c47856e682c480e625587abac3fe5 | Python | rococoscout/Capstone | /PYTHON/vecResponse.py | UTF-8 | 2,383 | 2.890625 | 3 | [] | no_license | from rule import Rule
import numpy
import gensim.downloader as api
from gensim.models.word2vec import Word2Vec
embeds = api.load("glove-wiki-gigaword-50")
#Takes a list of Rules and input question
#Returns a string answer if there were matches
#Returns None if there were no matches
def getVecAnswer(rules, userinput, isReg):
if len(rules) == 0:
return None
question=clean(userinput)
uservec= numpy.zeros(50)
for word in question:
if word not in embeds:
continue
uservec = embeds[word] + uservec
errorvec = numpy.zeros(50)
if uservec.all() == errorvec.all():
return None
#if len(rules)==1:
#rules[0].addQuestion(userinput)
#return rules[0].answers[0]
allscores = list()
for rule in rules:
for q in rule.questions:
qvec = make_vector(q)
score = cosine(uservec, qvec)
if not numpy.isnan(score):
allscores.append((rule,score))
allscores.sort(reverse=True, key=lambda x:x[1])
if allscores[0][1] > .95 or isReg:
rule = allscores[0][0]
rule.addQuestion(userinput)
return rule.answers[0]
else:
return None
#Makes a single vector
def make_vector(question):
cleanedQ = clean(question)
vec = numpy.zeros(50)
for word in cleanedQ:
if word not in embeds:
continue
vec = embeds[word] + vec
return vec
#Cleans the text of punctuaction and turns it into list of words
def clean(text):
cleaners= ("?", "!", ".", ",","'",":",";","(",")","~","/",">","<","[","]","#","+","&","*","_","--","-", "$", "\\", "|", "\"", '\'')
cleantext=text.lower()
for c in cleaners:
cleantext=cleantext.replace(c, " ")
tokens=cleantext.strip().split()
return tokens
def cosine(vA, vB):
return numpy.dot(vA,vB) / (numpy.sqrt(numpy.dot(vA,vA)) * numpy.sqrt(numpy.dot(vB,vB)))
if __name__ == "__main__":
temp = ""
mainInput= input("Main input: ")
tempInput= input("Test input: ")
testvm = numpy.zeros(50)
testvm = make_vector(mainInput)
testvt = numpy.zeros(50)
while(tempInput != "q"):
testvt = make_vector(tempInput)
print(cosine(testvm,testvt))
tempInput= input("Test input: ")
| true |
126df9e9f7549d41bd34a3b4e7e810fc68461652 | Python | tdl/python-challenge | /l10_logic.py | UTF-8 | 681 | 3.34375 | 3 | [
"MIT"
] | permissive | def get_digit_count(c, s):
cnt = 0
x = s[cnt]
while x == c:
cnt += 1
if (cnt == len(s)):
break
x = s[cnt]
## print "c=", c, "cnt=", cnt
return (c, str(cnt))
def makenext(s):
cnt = 0
next = ""
while cnt < len(s):
tup = get_digit_count(s[0], s)
next = next + tup[1] + tup[0] ## add new count and digit
cnt += int(tup[1])
s = s[cnt:]
cnt = 0
return next
a = "1"
alist = [a]
for i in range(31):
if len(a) < 70:
print "%d '%s' - len %d" % (i, a, len(a))
else:
print "%d '...' - len %d" % (i, len(a))
a = makenext(a)
alist.append(a)
| true |
8de5b821202e10f7e829931862069843f37dde6b | Python | brschlegel/Poker | /Hands.py | UTF-8 | 1,904 | 3.453125 | 3 | [] | no_license | from Cards import Card
##I'm just now realizing that there really isn't a reason that all of these classes have to be in different files
##Sorted greatest to least rank
class Hand:
##[hand rank, most important card, 2nd most important, ...]
def __init__(self):
self.cardList = []
self.details = [0,0,0,0,0,0]
def Add(self,card):
#handling extremes
if len(self.cardList) == 0:
self.cardList.append(card)
elif card.rank >= self.cardList[0].rank:
self.cardList.insert(0, card)
elif card.rank <= self.cardList[len(self.cardList) - 1].rank:
self.cardList.append(card)
else:
self.AddR(card, 0, len(self.cardList)- 1)
return
def AddR(self,card, start, end):
targetIndex = (end- start)//2 + start
##If card rank is greater than half, check next card, if doesn't fit, recurse with first half
if card.rank > self.cardList[targetIndex].rank:
if card.rank <= self.cardList[targetIndex - 1].rank:
self.cardList.insert(targetIndex, card)
return
else:
self.AddR(card, start, targetIndex)
##Opposite if lesser
if card.rank <= self.cardList[targetIndex].rank:
if card.rank >= self.cardList[targetIndex + 1].rank:
self.cardList.insert(targetIndex + 1, card)
return
else:
self.AddR(card, targetIndex, end)
def Print(self):
for i in range(0, len(self.cardList)):
print(self.cardList[i])
##Depreciated
def FindValue(self):
result = 0
for i in range(0, len(self.cardList)):
result += self.cardList[i].rank * 10.0 **(-((i + 1) *2))
return result
| true |
224158db15c8a64463da202750302902321a00eb | Python | whonut/Project-Euler | /problem21.py | UTF-8 | 442 | 3.34375 | 3 | [] | no_license | from math import sqrt
def factor(n):
factors=[1,]
for x in xrange(2,int(sqrt(n))+1):
if n%x==0:
factors.append(x)
if n/x!=x:
factors.append(n/x)
return factors
def d(n):
return sum(factor(n))
amicables=[]
for n in range(1,10000):
if d(d(n))==n and not n in amicables and n!=d(n):
amicables.append(n)
amicables.append(d(n))
print sum(amicables)
| true |
3bf713b0f9b9913610ef939f8998b4300dbd9eb0 | Python | VakinduPhilliam/Python_Runtime_Parameters | /Python_Sys_Namespace_Warnings.py | WINDOWS-1250 | 911 | 3.0625 | 3 | [] | no_license | # Python sys System-specific parameters and functions.
# This module provides access to some variables used or maintained by the interpreter and to functions that interact
# strongly with the interpreter.
# warnings Warning control.
# Warning messages are typically issued in situations where it is useful to alert the user of some condition in a program,
# where that condition (normally) doesnt warrant raising an exception and terminating the program.
# Developers of interactive shells that run user code in a namespace other than __main__ are advised to ensure that DeprecationWarning
# messages are made visible by default, using code like the following (where user_ns is the module used to execute code entered
# interactively):
import warnings
warnings.filterwarnings("default", category=DeprecationWarning,
module=user_ns.get("__name__"))
| true |
801f71c96b505c6f0f522dac925099fd2718c719 | Python | Cribbee/ZoomInDev | /apps/data_mining/test2.py | UTF-8 | 4,419 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
__author__ = 'Cribbee'
__create_at__ = 2018 / 9 / 29
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl #显示中文
from sklearn.model_selection import train_test_split #这里是引用了交叉验证
from sklearn.linear_model import LinearRegression #线性回归
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn import metrics
def mul_nlr():
df = pd.read_csv('/Users/cribbee/Downloads/course-6-vaccine.csv', header=0)
m = 5
X = df[['Year']]
y = df['Values']
ylabel = "values"
xlabel = "year"
sns.set_style('darkgrid')
lr = LinearRegression()
pr = LinearRegression()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
X_train = X_train.sort_values(by='Year', ascending=True)
X_test = X_test.sort_values(by='Year', ascending=True)
y_train = y_train.sort_values(ascending=True)
y_test = y_test.sort_values(ascending=True)
# 线性
lr.fit(X_train, y_train)
# X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis] # X_fit是构造的预测数据,数据量大的时候是X_tain,X是训练数据
X_fit = X_train
y_lin_fit = lr.predict(X_test)
plt.rcParams['font.sans-serif'] = ['SiHei']
plt.rcParams['axes.unicode_minus'] = False
plt.figure()
plt.subplots(1, 1, figsize=(10, 5))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.scatter(X, y, label='training points')
plt.plot(X_test, y_lin_fit, label='linear fit', linestyle='--')
for m in range(2, m+1):
high_order = PolynomialFeatures(degree=m, include_bias=False) # degress设置多项式拟合中多项式的最高次数
# 真实预测
X_m = high_order.fit_transform(X_fit)
pr.fit(X_m, y_train) # X_m是训练数据,使用它进行建模得多项式系数
y_m_fit = pr.predict(high_order.transform(X_test)) # 利用高次多项式对构造的X_fit数据预测
# 画图看趋势
# X_m = high_order.fit_transform(X_fit)
# pr.fit(X_m, y_train) # X_m是训练数据,使用它进行建模得多项式系数
# y_m_fit = pr.predict(high_order.fit_transform(X_fit)) # 利用高次多项式对构造的X_fit数据预测
# error_sum = mean_absolute_error(y_test, y_m_fit)
# print(error_sum)
plt.plot(X_test, y_m_fit, label='m='+str(m))
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
mse = []
mse_show = []
pic = []
m = 1
mth_power = 5
m_max = 20
plt.subplots(1, 1, figsize=(10, 5))
while m <= m_max:
model = make_pipeline(PolynomialFeatures(m, include_bias=False), LinearRegression())
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse.append(mean_squared_error(y_test, y_pred.flatten()))
mse_show.append("m="+str(m)+", mse="+str(mse[m-1]))
if m <= mth_power:
pic.append(plt.scatter(m, mse[m-1], s=60))
m = m + 1
plt.plot([i for i in range(1, m_max + 1)], mse, 'r')
plt.scatter([i for i in range(mth_power+1, m_max + 1)], mse[mth_power:], c='black', marker='x', s=55)
plt.legend((pic), (mse_show[0:mth_power]))
plt.xlabel('m')
plt.ylabel('MSE')
plt.show()
def mul_nlr2():
df = pd.read_csv('/Users/cribbee/Downloads/course-6-vaccine.csv', header=0)
m = 2
X = df[['Year']].values
y = df['Values'].values
ylabel = "values"
xlabel = "year"
lr = LinearRegression()
pr = LinearRegression()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
sns.set_style('darkgrid')
high_order = PolynomialFeatures(degree=m, include_bias=False)
poly_train_x_2 = high_order.fit_transform(X_train.reshape(len(X_train), 1))
X_fit = high_order.fit_transform(X_test.reshape(len(X_test), 1))
pr.fit(poly_train_x_2, y_train.reshape(len(X_train), 1))
y_m_fit = pr.predict(high_order.fit_transform(X_fit))
plt.plot(X_fit, y_m_fit, label='m=' + str(m))
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
mul_nlr() | true |
15993955b78e903f89b6e6b3a56df72425a5602a | Python | AustralianSynchrotron/lightflow-epics | /lightflow_epics/pv_trigger_task.py | UTF-8 | 6,430 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | import time
from epics import PV
from collections import deque
from functools import partial
from lightflow.queue import JobType
from lightflow.logger import get_logger
from lightflow.models import BaseTask, TaskParameters, Action
logger = get_logger(__name__)
class PvTriggerTask(BaseTask):
""" Triggers the execution of a callback function upon a change in a monitored PV.
This trigger task monitors a PV for changes. If a change occurs a provided callback
function is executed.
"""
def __init__(self, name, pv_name, callback,
event_trigger_time=None, stop_polling_rate=2,
skip_initial_callback=True, *, queue=JobType.Task,
callback_init=None, callback_finally=None,
force_run=False, propagate_skip=True):
""" Initialize the filesystem notify trigger task.
All task parameters except the name, callback, queue, force_run and propagate_skip
can either be their native type or a callable returning the native type.
Args:
name (str): The name of the task.
pv_name (str, callable): The name of the PV that should be monitored.
callback (callable): A callable object that is called when the PV changes.
The function definition is
def callback(data, store, signal, context, event)
where event is the information returned by PyEPICS for
a monitor callback event.
event_trigger_time (float, None): The waiting time between events in seconds.
Set to None to turn off.
stop_polling_rate (float): The number of events after which a signal is sent
to the workflow to check whether the task
should be stopped.
skip_initial_callback (bool): Set to True to skip executing the callback
upon initialization of the PV monitoring.
queue (str): Name of the queue the task should be scheduled to. Defaults to
the general task queue.
callback_init (callable): A callable that is called shortly before the task
is run. The definition is:
def (data, store, signal, context)
where data the task data, store the workflow
data store, signal the task signal and
context the task context.
callback_finally (callable): A callable that is always called at the end of
a task, regardless whether it completed
successfully, was stopped or was aborted.
The definition is:
def (status, data, store, signal, context)
where status specifies whether the task was
success: TaskStatus.Success
stopped: TaskStatus.Stopped
aborted: TaskStatus.Aborted
raised exception: TaskStatus.Error
data the task data, store the workflow
data store, signal the task signal and
context the task context.
force_run (bool): Run the task even if it is flagged to be skipped.
propagate_skip (bool): Propagate the skip flag to the next task.
"""
super().__init__(name, queue=queue,
callback_init=callback_init, callback_finally=callback_finally,
force_run=force_run, propagate_skip=propagate_skip)
# set the tasks's parameters
self.params = TaskParameters(
pv_name=pv_name,
event_trigger_time=event_trigger_time,
stop_polling_rate=stop_polling_rate,
skip_initial_callback=skip_initial_callback
)
self._callback = callback
def run(self, data, store, signal, context, **kwargs):
""" The main run method of the PvTriggerTask task.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
store (DataStoreDocument): The persistent data store object that allows the
task to store data for access across the current
workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
"""
params = self.params.eval(data, store)
skipped_initial = False if params.skip_initial_callback else True
polling_event_number = 0
queue = deque()
# set up the internal callback
pv = PV(params.pv_name, callback=partial(self._pv_callback, queue=queue))
while True:
if params.event_trigger_time is not None:
time.sleep(params.event_trigger_time)
# check every stop_polling_rate events the stop signal
polling_event_number += 1
if polling_event_number > params.stop_polling_rate:
polling_event_number = 0
if signal.is_stopped:
break
# get all the events from the queue and call the callback function
while len(queue) > 0:
event = queue.pop()
if skipped_initial:
if self._callback is not None:
self._callback(data, store, signal, context, **event)
else:
skipped_initial = True
pv.clear_callbacks()
return Action(data)
@staticmethod
def _pv_callback(queue, **kwargs):
""" Internal callback method for the PV monitoring. """
queue.append(kwargs)
| true |
7f925c936b36fadf3966699b506fcefc3e7e04f6 | Python | youyong123/pydumpanalyzer | /pydumpanalyzer/frame_test.py | UTF-8 | 986 | 2.90625 | 3 | [] | no_license | ''' contains tests for the Frame class '''
import pytest
from frame import Frame
from variable import Variable
FRAMES_TO_TEST = [
Frame('module', 1),
Frame('module2', 10, 'thefunction'),
Frame('module2', 10, sourceFile='source.cpp'),
Frame('module2', 10, sourceFile='source.cpp', warningAboutCorrectness=True),
Frame('module2', 10, sourceFile='source.cpp', variables=[Variable('Type', 'Name', "Value")]),
]
@pytest.mark.parametrize(
'frame', FRAMES_TO_TEST
)
def test_frame_functions(frame):
''' ensures functions on Frame don't traceback '''
assert str(frame)
assert repr(frame)
@pytest.mark.parametrize(
'frame', FRAMES_TO_TEST
)
def test_frame_properties(frame):
''' ensures we don't have attributes missing '''
supportedAttributes = ['module', 'index', 'function', 'sourceFile', 'line', 'variables', 'warningAboutCorrectness']
for name in supportedAttributes:
assert hasattr(frame, name)
| true |
a1fb993e97a10bbe45d00499dedb32de043c9fae | Python | saiteja-talluri/CS-335-Assignments | /Lab 5/la5-160050098/task.py | UTF-8 | 5,273 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
from utils import *
def preprocess(X, Y):
''' TASK 0
X = input feature matrix [N X D]
Y = output values [N X 1]
Convert data X, Y obtained from read_data() to a usable format by gradient descent function
Return the processed X, Y that can be directly passed to grad_descent function
NOTE: X has first column denote index of data point. Ignore that column
and add constant 1 instead (for bias part of feature set)
'''
N,D = X.shape
no_cols = 1
for i in range(1,D):
if(isinstance(X[0][i], str)):
no_cols += len(set(X[:,i]))
no_cols += 1
X_new = np.ones((N,no_cols), dtype= 'float')
Y_new = Y.astype(float)
col = 1
for i in range(1,D):
if(isinstance(X[0][i], str)):
X_new[:,col:col+len(set(X[:,i]))] = one_hot_encode(X[:,i], set(X[:,i]))
col += len(set(X[:,i]))
else:
mean = np.mean(X[:,i])
sd = np.std(X[:,i])
X_new[:,col] = (X[:,i]-mean)/sd
col += 1
return X_new,Y_new
def grad_ridge(W, X, Y, _lambda):
''' TASK 2
W = weight vector [D X 1]
X = input feature matrix [N X D]
Y = output values [N X 1]
_lambda = scalar parameter lambda
Return the gradient of ridge objective function (||Y - X W||^2 + lambda*||w||^2 )
'''
gradient = ((-2)*np.dot(X.transpose(), Y - np.dot(X,W))) + 2*_lambda*W
return gradient
def ridge_grad_descent(X, Y, _lambda, max_iter=30000, lr=0.00001, epsilon = 1e-2):
''' TASK 2
X = input feature matrix [N X D]
Y = output values [N X 1]
_lambda = scalar parameter lambda
max_iter = maximum number of iterations of gradient descent to run in case of no convergence
lr = learning rate
epsilon = gradient norm below which we can say that the algorithm has converged
Return the trained weight vector [D X 1] after performing gradient descent using Ridge Loss Function
NOTE: You may precompure some values to make computation faster
'''
W = np.random.normal(0,0.01,(X.shape[1], 1))
for i in range(0, max_iter):
gradient = grad_ridge(W, X, Y, _lambda);
if(np.linalg.norm(gradient, ord=2) < epsilon):
W = W - lr*gradient
break
else:
W = W - lr*gradient
return W
def k_fold_cross_validation(X, Y, k, lambdas, algo):
''' TASK 3
X = input feature matrix [N X D]
Y = output values [N X 1]
k = number of splits to perform while doing kfold cross validation
lambdas = list of scalar parameter lambda
algo = one of {coord_grad_descent, ridge_grad_descent}
Return a list of average SSE values (on validation set) across various datasets obtained from k equal splits in X, Y
on each of the lambdas given
'''
N,D = X.shape
frac = float(N)/float(k)
ans = []
for _lambda in lambdas:
sse_list = []
for i in range(k):
l_bound, r_bound = int(i*frac), int((i+1)*frac)
X_train = np.zeros((N -r_bound + l_bound, D))
Y_train = np.zeros((N -r_bound + l_bound, 1))
X_train[0:l_bound, :] = X[0:l_bound, :]
X_train[l_bound:, :] = X[r_bound:, :]
Y_train[0:l_bound, :] = Y[0:l_bound, :]
Y_train[l_bound:, :] = Y[r_bound:, :]
X_test = X[l_bound:r_bound, :]
Y_test = Y[l_bound:r_bound, :]
W_trained = algo(X_train, Y_train, _lambda)
sse_list.append(sse(X_test, Y_test, W_trained))
ans.append(np.mean(sse_list))
print("Lambda : " + str(_lambda) + ", Ans : " + str(ans[-1]))
return ans
def coord_grad_descent(X, Y, _lambda, max_iter=1000):
''' TASK 4
X = input feature matrix [N X D]
Y = output values [N X 1]
_lambda = scalar parameter lambda
max_iter = maximum number of iterations of gradient descent to run in case of no convergence
Return the trained weight vector [D X 1] after performing gradient descent using Ridge Loss Function
'''
N,D = X.shape
W = np.random.normal(0,0.01,(D, 1))
preprocess = np.zeros(D)
for i in range(D):
preprocess[i] = np.dot(X[:,i].T,X[:,i])
for i in range(0, max_iter):
not_changed = 0
for j in range(D):
if preprocess[j] == 0:
W[j] = 0
else:
rho_j = (np.dot(X[:,j].T,Y - np.dot(X,W))) + preprocess[j]*W[j]
if(rho_j < (-0.5)*_lambda):
beta_j = (rho_j + (0.5)*_lambda)/preprocess[j]
if(W[j] == beta_j):
not_changed += 1
else:
W[j] = beta_j
elif (rho_j > (0.5)*_lambda):
beta_j = (rho_j - (0.5)*_lambda)/preprocess[j]
if(W[j] == beta_j):
not_changed += 1
else:
W[j] = beta_j
else:
if(W[j] == 0):
not_changed += 1
else:
W[j] = 0
if not_changed == D:
break
return W
if __name__ == "__main__":
# Do your testing for Kfold Cross Validation in by experimenting with the code below
X, Y = read_data("./dataset/train.csv")
X, Y = preprocess(X, Y)
trainX, trainY, testX, testY = separate_data(X, Y)
lambdas = [...] # Assign a suitable list Task 5 need best SSE on test data so tune lambda accordingly
scores = k_fold_cross_validation(trainX, trainY, 6, lambdas, coord_grad_descent)
'''
lambdas = [300000, 310000, 320000, 330000, 340000, 350000, 370000, 400000, 410000, 420000, 430000, 440000, 450000]
scores = [168839043350.3544, 168724745503.99258, 168652817057.73956, 168609570271.94696,168591968665.28323, 168610986799.54117, 168743850943.06015, 168837183139.23697, 168805133677.0105, 168789684756.9399, 168791631804.54233, 168811506871.57938, 168854360254.16794]
'''
# plot_kfold(lambdas, scores)
| true |
ef1ced32be2e0b7e189de28ae11fd355212f19c5 | Python | dhockaday/Echonest-TasteProfile-DataLoader | /utils.py | UTF-8 | 670 | 3.234375 | 3 | [] | no_license | import os
import csv
def txt_to_csv(txtfile, csvfile=None):
''' Convert txtfile to csvfile
Params:
txtfile : path to txtfile
csvfile : path to new csvfile
Return :
csvfile : path to saved csvfile
'''
if csvfile == None:
csvfile_name = txtfile.strip().split('/')[-1].split('.')[0] + '.csv'
csvfile = os.path.join('/'.join(txtfile.strip().split('/')[:-1]), csvfile_name)
with open(txtfile, 'r') as f:
data = (line.strip().split('\t') for line in f)
with open(csvfile, 'w+') as out_f:
writer = csv.writer(out_f)
writer.writerows(data)
return csvfile
| true |
82561228274d57eecd5d0c911c627deaa977cbfc | Python | chelseashin/AlgorithmStudy2021 | /soohyun/python/programmers/0505/수식최대화/1.py | UTF-8 | 2,750 | 2.9375 | 3 | [] | no_license | num_list, op_list = list(), list()
ops = set()
def calc(num_1, num_2, op):
if op == '-':
return num_1 - num_2
elif op == '*':
return num_1 * num_2
else:
return num_1 + num_2
def make_post_prefix(pri_list):
global ops, num_list, op_list
priority = dict()
result = []
op_stack = []
for idx, op in enumerate(ops):
priority[op] = pri_list[idx]
for idx, value in enumerate(num_list):
result.append(value)
# print(op_stack)
if idx >= len(op_list):
continue
if len(op_stack) > 0:
print(op_stack[-1], priority[op_stack[-1]], op_list[idx], priority[op_list[idx]])
if len(op_stack) <= 0 or priority[op_stack[-1]] < priority[op_list[idx]]:
op_stack.append(op_list[idx])
elif priority[op_stack[-1]] >= priority[op_list[idx]]:
while len(op_stack) > 0 and priority[op_stack[-1]] >= priority[op_list[idx]]:
result.append(op_stack.pop())
op_stack.append(op_list[idx])
while len(op_stack) > 0:
result.append(op_stack.pop())
return result
def make_number(pri_list):
global ops, num_list, op_list
stack = []
post_prefix = make_post_prefix(pri_list)
print(post_prefix)
for value in post_prefix:
#print(stack)
if value in {'-', '+', '*'}:
num_2 = int(stack.pop())
num_1 = int(stack.pop())
stack.append(calc(num_1, num_2, value))
else:
stack.append(value)
#print(abs(stack[0]))
return abs(stack[0])
#return stack[0]
def dfs(pri_list, visited):
global ops, num_list, op_list
max_result = 0
if len(pri_list) >= len(ops):
result = make_number(pri_list)
return result
else:
for i in range(len(ops)):
if not visited.get(i, False):
visited[i] = True
pri_list.append(i)
result = dfs(pri_list, visited)
if max_result < result:
max_result = result
pri_list.pop()
visited.pop(i)
return max_result
def solution(expression):
global ops, num_list, op_list
number_tmp = ''
# 숫자 - 연산자 나누어 dict로 만들기
for alpha in expression:
if alpha in {'-', '*', '+'}:
ops.add(alpha)
num_list.append(int(number_tmp))
op_list.append(alpha)
number_tmp = ''
else:
number_tmp += alpha
num_list.append(int(number_tmp))
#print(num_list, op_list)
# 숫자 조합 combination
return dfs([], dict())
# max값 찾기
#return answer | true |
c698fae7d316bc70f5c4f78da132801be9447a7f | Python | coolmich/py-leetcode | /solu/348. Design Tic-Tac-Toe.py | UTF-8 | 1,703 | 4.1875 | 4 | [] | no_license | class TicTacToe(object):
def __init__(self, n):
"""
Initialize your data structure here.
:type n: int
"""
self.grid = [[0 for i in range(n)] for j in range(n)]
def move(self, row, col, player):
"""
Player {player} makes a move at ({row}, {col}).
@param row The row of the board.
@param col The column of the board.
@param player The player, can be either 1 or 2.
@return The current winning condition, can be either:
0: No one wins.
1: Player 1 wins.
2: Player 2 wins.
:type row: int
:type col: int
:type player: int
:rtype: int
"""
def check_rc(r, c, grid):
c1 = c2 = True
c3 = c4 = False
for i in range(len(grid)):
if grid[i][c] != grid[r][c]: c1 = False
if grid[r][i] != grid[r][c]: c2 = False
if r == c:
for i in range(len(grid)):
if grid[i][i] != grid[r][c]: c4 = True
if not c4: c3 = True
c4 = False
if r+c == len(grid)-1:
for i in range(len(grid)):
if grid[i][len(grid)-1-i] != grid[r][c]: c4 = True
if not c4: c3 = True
return c1 or c2 or c3
self.grid[row][col] = player
return player if check_rc(row, col, self.grid) else 0
# Your TicTacToe object will be instantiated and called as such:
obj = TicTacToe(3)
# ["TicTacToe","move","move","move","move","move","move"]
arr = [[1,2,2],[0,2,1],[0,0,2],[2,0,1],[0,1,2],[1,1,1]]
for mv in arr:
print obj.move(*mv) | true |
3056fa403c55d5e8f95f12f2d17136f8f2018b16 | Python | wisscot/LaoJi | /Entrance/Leetcode/0127.py | UTF-8 | 1,518 | 3.640625 | 4 | [] | no_license | # 127. Word Ladder
Basic idea:
typical BFS, find the shorest path
class Solution:
def ladderLength(self, start, end, words):
# write your code here
words.add(end)
# build word patterns mapping
pattern_words = self.buildpattern(words)
res = 0
queue = collections.deque([start])
visited = set([start])
# BFS find shortest path
while queue:
res += 1
for _ in range(len(queue)):
head = queue.popleft()
if head == end:
return res
neighbors = self.getneighbors(head, pattern_words)
for nb in neighbors:
if nb in visited:
continue
queue.append(nb)
visited.add(nb)
return 0
def buildpattern(self, words):
p_words = collections.defaultdict(list)
for word in words:
for p in self.patterns(word):
p_words[p].append(word)
return p_words
def patterns(self, word):
patts = []
for i in range(len(word)):
patts.append(word[:i]+'_'+word[i+1:])
return patts
def getneighbors(self, word, p_words):
neighbors = []
for patt in self.patterns(word):
neighbors += p_words[patt]
return set(neighbors)
| true |
c0bc646ed31ac6640ca35c791abf871364e68dcc | Python | Fondamenti18/fondamenti-di-programmazione | /students/1750888/homework01/program02.py | UTF-8 | 4,787 | 3.78125 | 4 | [] | no_license | ''' In determinate occasioni ci capita di dover scrivere i numeri in lettere,
ad esempio quando dobbiamo compilare un assegno.
Puo' capitare che alcuni numeri facciano sorgere in noi qualche dubbio.
Le perplessita' nascono soprattutto nella scrittura dei numeri composti con 1 e 8.
Tutti i numeri come venti, trenta, quaranta, cinquanta, ecc... elidono la vocale
finale (la "i" per 20, la "a" per tutti gli altri) fondendola con la vocale iniziale
del numero successivo; scriveremo quindi ventuno, ventotto, trentotto,
cinquantuno ecc...
Il numero cento, nella formazione dei numeri composti con uno e otto, non si comporta
cosi'; il numero "cento" e tutte le centinaia (duecento, trecento, ecc...),
infatti, non elidono la vocale finale. Dunque non scriveremo centuno, trecentotto ma centouno,
trecentootto, ecc...
I numeri composti dalle centinaia e dalla decina "ottanta" invece tornano ad elidere
la vocale finale; scriveremo quindi centottanta, duecentottanta, ecc...,
non centoottanta, duecentoottanta, ...
Il numero "mille" non elide in nessun numero composto la vocale finale; scriveremo
quindi milleuno, milleotto, milleottanta, ecc...
Altri esempi sono elencati nel file grade02.txt
Scrivere una funzione conv(n) che prende in input un intero n, con 0<n<1000000000000,
e restituisce in output una stringa con il numero espresso in lettere
ATTENZIONE: NON USATE LETTERE ACCENTATE.
ATTENZIONE: Se il grader non termina entro 30 secondi il punteggio dell'esercizio e' zero.
'''
def conv(n):
diz1_9={'0':"",'1':'uno','2':'due','3':'tre','4':'quattro','5':'cinque','6':'sei','7':'sette','8':'otto','9':'nove'}
diz10_19={'00':"",'10':'dieci','11':'undici','12':'dodici','13':'tredici','14':'quattordici','15':'quindici','16':'sedici','17':'diciassette','18':'diciotto','19':'diciannove'}
diz20_90={'00':"",'20':'venti','30':'trenta','40':'quaranta','50':'cinquanta','60':'sessanta','70':'settanta','80':'ottanta','90':'novanta'}
diz100_900={'000':'','100':'cento','200':'duecento','300':'trecento','400':'quattrocento','500':'cinquecento','600':'seicento','700':'settecento','800':'ottocento','900':'novecento'}
numero = str(n)
lista = []
while len(numero) >= 3:
s = numero[-3:]
lista.append(s)
numero = numero[:-3]
if len(numero)!=0:
lista.append(numero)
lista_lettere = []
for el in lista:
temporaneo1 = ""
temporaneo2 = ""
temporaneo3 = ""
if len(el) == 3:
centinaia3=el[0]+"00"
temporaneo1= diz100_900.get(centinaia3)
if el[1] != "1":
decina3=el[1]+"0"
temporaneo2=diz20_90.get(decina3)
unita3=el[2]
temporaneo3=diz1_9.get(unita3)
if el[2]=="1" or el[2]=="8":
temporaneo2=temporaneo2[:-1]
else:
decina3= el[1]+el[2]
temporaneo2 = diz10_19.get(decina3)
if el[1]=="8":
temporaneo1=temporaneo1[:-1]
elif len(el) == 2:
if el[0] != "1":
decina2 = el[0]+"0"
temporaneo2 = diz20_90.get(decina2)
unita2 = el[1]
temporaneo3=diz1_9.get(unita2)
if el[1]=="1" or el[1]=="8":
temporaneo2=temporaneo2[:-1]
else:
decina2 = el[0]+el[1]
temporaneo2 = diz10_19.get(decina2)
else:
temporaneo3 = diz1_9.get(el[0])
numero = temporaneo1+temporaneo2+temporaneo3
lista_lettere.append(numero)
lista_definitiva = []
index = len(lista_lettere)-1
for i in range(len(lista_lettere)):
lista_definitiva.append(lista_lettere[index])
index -= 1
numero_finale = ''
if len(lista_definitiva) == 1:
numero_finale = lista_definitiva[0]
else:
if len(lista_definitiva) == 2:
numero_finale = mille(lista_definitiva[0]) + lista_definitiva[1]
elif len(lista_definitiva) == 3:
numero_finale = milioni(lista_definitiva[0])+mille(lista_definitiva[1])+lista_definitiva[2]
else:
numero_finale = miliardi(lista_definitiva[0])+milioni(lista_definitiva[1])+mille(lista_definitiva[2])+lista_definitiva[3]
return numero_finale
def mille(s):
if s == "uno":
return "mille"
else:
return s+"mila"
def milioni(s):
if s == "uno":
return "unmilione"
else:
return s+"milioni"
def miliardi(s):
if s == "uno":
return "unmiliardo"
else:
return s+"miliardi"
| true |
10ec8d222ccd85cd78c95ff81a40036a00a9f499 | Python | andre-jeon/daily_leetcode | /Week 4/4-12-21/sortArrayByParity.py | UTF-8 | 1,294 | 3.859375 | 4 | [] | no_license | '''
Given an array A of non-negative integers, return an array consisting of all the even elements of A, followed by all the odd elements of A.
You may return any answer array that satisfies this condition.
Example 1:
Input: [3,1,2,4]
Output: [2,4,3,1]
The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
'''
class Solution(object):
def sortArrayByParity(A):
"""
:type A: List[int]
:rtype: List[int]
"""
# evens = []
# odds = []
# # iterate the list
# for i in A:
# # check if the element is even
# if i % 2 == 0:
# # if it is add it to evens
# evens.append(i)
# # if the element is not even
# else:
# # add it to a separte odds
# odds.append(i)
# # add both ans and ans2 and return them
# return evens + odds
evens = []
odds = []
for x in A:
if x % 2 == 0:
evens.append(x)
for x in A:
if x % 2 == 1:
odds.append(x)
return evens + odds
return ([x for x in A if x % 2 == 0] + [x for x in A if x % 2 == 1])
A = [3,1,2,4]
print(sortArrayByParity(A)) | true |