blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a91eb375ccacec93df14d1c6593ca58ed31b76b1 | Python | Merical/Quantization_Pytorch | /scripts/post_training_quantization.py | UTF-8 | 3,825 | 2.59375 | 3 | [] | no_license | import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision
from torch.quantization import QuantStub, DeQuantStub
import time
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.conv1 = nn.Sequential(
nn.Conv2d( #(1*28*28)
in_channels=1,
out_channels=128,
kernel_size=5,
stride=1, #步长
padding=2,
), #(16*28*28)
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),#(16*14*14)
)
self.conv2 = nn.Sequential( # 16*14*14
nn.Conv2d(128,512,5,1,2), #32*14*14
nn.ReLU(),
nn.MaxPool2d(2) # 32*7*7
)
self.out = nn.Linear(512*7*7,10) #全连接
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.conv2(x) #(batch,32,7,7)
# x = x.view(x.size(0),-1) #(batch,32*7*7)
x = x.reshape(x.size(0),-1) #(batch,32*7*7)
output = self.out(x)
output = self.dequant(output)
return output
def print_size_of_model(model):
torch.save(model.state_dict(), "temp.p")
print('Size (MB):', os.path.getsize("temp.p")/1e6)
os.remove('temp.p')
test_data = torchvision.datasets.MNIST(
root='./mnist',
train=False
)
test_x = Variable(torch.unsqueeze(test_data.test_data,dim=1),volatile=True).type(torch.FloatTensor)[:2000]/255.
test_y = test_data.test_labels[:2000]
model = CNN()
model.load_state_dict(torch.load('./models/mnist_cnn.pth'))
model.eval()
print_size_of_model(model)
print("Float CPU test:")
tic = time.time()
for _ in range(100):
test_output = model(test_x[:10])
pred_y = torch.max(test_output,1)[1].data.numpy().squeeze()
toc = time.time()
print(pred_y,'prediction number')
print(test_y[:10].numpy(),'real number')
print("Cost time {} seconds.".format((toc-tic)/100))
# model.fuse_model()
print("Float GPU Test")
model = model.cuda()
test_x_g, test_y_g = test_x.cuda(), test_y.cuda()
tic = time.time()
for _ in range(100):
test_output = model(test_x_g[:10])
pred_y = torch.max(test_output,1)[1].cpu().data.numpy().squeeze()
toc = time.time()
print(pred_y,'prediction number')
print(test_y[:10].numpy(),'real number')
print("Cost time {} seconds.".format((toc-tic)/100))
print("Quant CPU test:")
model = model.cpu()
model.qconfig = torch.quantization.default_qconfig
print(model.qconfig)
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)
print_size_of_model(model)
tic = time.time()
for _ in range(100):
test_output = model(test_x[:10])
pred_y = torch.max(test_output,1)[1].data.numpy().squeeze()
toc = time.time()
print(pred_y,'prediction number')
print(test_y[:10].numpy(),'real number')
print("Cost time {} seconds.".format((toc-tic)/100))
print("Perchannel Quant CPU test:")
del model
model = CNN()
model.load_state_dict(torch.load('./models/mnist_cnn.pth'))
model.eval()
model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
print(model.qconfig)
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)
print_size_of_model(model)
tic = time.time()
for _ in range(100):
test_output = model(test_x[:10])
pred_y = torch.max(test_output,1)[1].data.numpy().squeeze()
toc = time.time()
print(pred_y,'prediction number')
print(test_y[:10].numpy(),'real number')
print("Cost time {} seconds.".format((toc-tic)/100))
torch.jit.save(torch.jit.script(model), './models/mnist_cnn_per_channel_quant.pth')
| true |
e2945c22b7f2f3b07c9b600275276b90ccb74bcf | Python | barrven/python-expenseTracker | /database.py | UTF-8 | 2,333 | 3.09375 | 3 | [] | no_license | ##########################################
# Barrington Venables #
# 101189284 #
# comp2152 assignment - Expense manager #
##########################################
import sqlite3
from contextlib import closing
from month import *
class Database:
def __init__(self, filePath):
self.conn = sqlite3.connect(filePath)
self.conn.row_factory = sqlite3.Row
# returns list of month objects from specific year
def getMonths(self, year):
with closing(self.conn.cursor()) as c:
query = '''select * from master where year = ? order by month asc'''
c.execute(query, (year,))
months = c.fetchall()
monthsList = [] # list of month objects
for month in months:
temp = Month(
month['month'],
month['rent'],
month['groceries'],
month['utilities'],
month['transit'],
month['shopping'],
month['entertainment']
)
monthsList.append(temp)
return monthsList # items in this list are month objects
def addMonthToDb(self, year, number, rent, groceries, utilities, transit, shopping, entertainment):
args = (year, number, rent, groceries, utilities, transit, shopping, entertainment)
try:
with closing(self.conn.cursor()) as c:
query = '''insert into master values(?, ?, ?, ?, ?, ?, ?, ?)'''
c.execute(query, args)
self.conn.commit()
return True
except sqlite3.Error as e:
print('An error occurred: ', e)
return False
def addMonthToDb_flex(self, year_num, month_num, categories_list):
temp = categories_list
temp.insert(0, month_num)
temp.insert(0, year_num)
args = tuple(temp)
print(args)
query = 'insert into master values(' + '?, ' * len(args)
query = query[:-2] + ')'
try:
with closing(self.conn.cursor()) as c:
c.execute(query, args)
self.conn.commit()
return True
except sqlite3.Error as e:
print('An error occurred: ', e)
return False | true |
0d217624c844ef00710bb5bd74a613cb54036e73 | Python | ayyappa1/application-validate-ip | /validate.py | UTF-8 | 725 | 3.03125 | 3 | [] | no_license | import unittest
from urllib.request import urlopen
import json
# unit testing - unittest.TestCase is used to create test cases by subclassing it
class ApplicateionTest(unittest.TestCase):
# Returns True if host ip address matches in respose ip address.
def test_ip_addr(self):
response = json.load(urlopen('http://ipinfo.io/json'))
print(response)
try:
print("Hostname_IP : ",response['ip'])
self.assertRegex(response['ip'],r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
except:
print("Please check your hostname type")
# Running the test
if __name__ == '__main__':
unittest.main()
| true |
dbeea0b41cac7559bd9f9e95b2fa2c5b567410bc | Python | Nwebb03/Depth-First-Python | /Graph-Processing/Depth_First.py | UTF-8 | 1,117 | 3.140625 | 3 | [] | no_license | import Graph_Reading as GR
import copy
import pandas as pd
#Basic Load Graph
graph = GR.Read("Graph-Data\Graph1")
startingpoint = input("Starting Node? ")
endpoint = input("End node? ")
#Create a processing queue
#Every item on the queue is a candidate path (Possible path to the goal, another list)
queue = pd.DataFrame(columns= ["To_Extend"])
queue.loc[1, "To_Extend"]= [startingpoint]
print (queue)
#loop conditional
finished = False
print(graph)
while((queue.shape[0] != 0) & (finished == False)):
#Candidate path taken from the queue for processing
to_extend = queue.loc[1, "To_Extend"]
print(to_extend)
#
queue = queue.drop(index=1)
#Main loop that "searches"
for node in graph[to_extend[0]]:
"""extended = np.copy(to_extend)
extended = np.append(extended, node)
x = np.ndarray((2,2,), [extended])
queue = np.append(x, queue)"""
print(node)
extended = copy.deepcopy(to_extend)
extended.append(node)
print(extended)
queue.append(extended)
print(queue)
finished = True | true |
81509d452449d035b0b709e7095e66e4e1291fcf | Python | srea8/cookbook_python | /08/SuperUserFunction.py | UTF-8 | 4,605 | 3.84375 | 4 | [] | no_license | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Srea
# @Date: 2019-12-04 23:15:37
# @Last Modified by: srea
# @Last Modified time: 2019-12-08 17:54:24
#****************************#
#super的用法
#类中的命名方式
#****************************#
####super的用法
class MyBaseClass:
def __init__(self, value):
self.value = value
class TimesFive(MyBaseClass):
def __init__(self, value):
super(TimesFive, self).__init__(value)
self.value *= 5
class PlusTwo(MyBaseClass):
def __init__(self, value):
super(PlusTwo, self).__init__(value)
self.value += 2
###使用super继承时,在后面的先调用
class GoodWay(TimesFive, PlusTwo):
def __init__(self, value):
super(GoodWay, self).__init__(value)
foo = GoodWay(5)
print('Should be 5*(5+2) and is ', foo.value)
print(GoodWay.mro())
###super example2
class FooParent(object):
def __init__(self):
self.parent = 'I\'m the parent.'
print ('Parent')
def bar(self,message):
print ("%s from Parent" % message)
class FooChild(FooParent):
def __init__(self):
# super(FooChild,self) 首先找到 FooChild 的父类(就是类 FooParent),然后把类 FooChild 的对象转换为类 FooParent 的对象
super(FooChild,self).__init__()
print ('Child')
def bar(self,message):
super(FooChild, self).bar(message)
print ('Child bar fuction')
print (self.parent)
if __name__ == '__main__':
fooChild = FooChild()
fooChild.bar('HelloWorld')
###python 类中命名方式,配合super的使用
class A(object):
def __init__(self):#系统定义方法
self.string='A string'
self._string='A _string'
self.__string='A __string'#私有变量
def fun(self):
return self.string + ' fun-A'
def _fun(self):
print(1)
return self._string+' _fun-A'
def __fun(self):#私有方法
return self.__string+' __fun-A'
def for__fun(self):#内部调用私有方法
return self.__fun()
class B(A):
def __init__(self):#系统定义方法
super().__init__()
self.string = 'B string'
def _fun(self):
super()._fun()
print('{}'.format('superB'))
return A._fun(self)
a=A()
print (a.string)
print (a._string)
# print a.__string 不可访问
print (a.fun())
print (a._fun())
# print (a.__fun()) #不可访问
print (a._A__fun()) #可访问
print (a.for__fun())
b=B()
print (b.fun())
print (b.fun().__len__())#系统定义函数
print (b._fun())
####self和super
class FatherClass(object):
def __init__(self):
super().__init__()
self.name = 'fulei'
print(self.name)
def A(self):
print('我是父类')
class SubClass(FatherClass):
"""docstring for super"""
def __init__(self, arg):
super(SubClass, self).__init__()
self.arg = arg
print(self.arg)
def A(self):
print('我是子类')
def B(self):
self.A()
super().A()
sub = SubClass('zilei')
sub.B()
#####super(type,type)
class Person(object):
def __init__(self, name):
self.name = name
# Getter function
@property
def name(self):
print('fulei_get')
return self._name
# Setter function
@name.setter
def name(self, value):
print('fulei_set')
if not isinstance(value, str):
raise TypeError('Expected a string')
self._name = value
# Deleter function
@name.deleter
def name(self):
raise AttributeError("Can't delete attribute")
class SubPerson(Person):
@property
def name(self):
print('Getting name')
return super().name
@name.setter
def name(self, value):
print('Setting name to', value)
super(SubPerson, SubPerson).name.__set__(self, value) ##??? 这种用法第一次见, 作用原理/作用流程是什么?
#返回一个父类的实例,然后调用其方法
#关键是: super(SubPerson, SubPerson) 如何使用的?
@name.deleter
def name(self):
print('Deleting name')
super(SubPerson, SubPerson).name.__delete__(self)
sp = SubPerson('liming')
print(sp._name)
print(sp.__class__.mro())
print(super(SubPerson,sp).name)
# print(super(SubPerson,self).name)
print(Person.name)
print(Person.__mro__)
print(super(SubPerson,SubPerson)) ##super(SubPerson, SubPerson) 指的是 Person (mro 中 SubPerson 的類:SubPerson 的下一個)
print(super(Person,SubPerson)) ##指的是 object (mro 中 Person 的類:Person 的下一個)
| true |
b383e293758e15699734d481b9309238b1777196 | Python | rjbarber/Python | /If Statement.py | UTF-8 | 75 | 2.953125 | 3 | [] | no_license | # Decision Making Statements
a=10
if(a==10):print("The value of a is 10")
| true |
fc2f059fbe126a2fd27b0a7a084a17608ad33c6e | Python | hemanturvyesoftcore/Data-Science-Projects | /PythonPrograms/queue.py | UTF-8 | 153 | 3.328125 | 3 | [] | no_license | from collections import deque
queue = deque ([1,2,3,4,5,6,7])
queue.appendleft(89)
queue.append(77)
print(queue)
print(type(queue))
print(type(deque))
| true |
522498c7ab0cb49b57e7ed6c2301a89089d94980 | Python | MinecraftDawn/LeetCode | /Easy/88. Merge Sorted Array.py | UTF-8 | 577 | 2.96875 | 3 | [] | no_license | class Solution:
def merge(self, nums1: list, m: int, nums2: list, n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
index = 0
k = 0
while nums2 and index < m + k:
if nums2[0] <= nums1[index]:
nums1.insert(index,nums2.pop(0))
k += 1
else:
index += 1
while nums2:
nums1[index] = nums2.pop(0)
index += 1
while k:
nums1.pop()
k -= 1 | true |
ccf216434378f7561833a01fdec336b6d2d86622 | Python | kaneki666/Hackerrank-Solve | /a game of two stacks.py | UTF-8 | 1,894 | 3.484375 | 3 | [] | no_license | class Stack:
lis = []
def __init__(self, l):
self.lis = l[::-1]
def push(self, data):
self.lis.append(data)
def peek(self):
return self.lis[-1]
def pop(self):
self.lis.pop()
def is_empty(self):
return len(self.lis) == 0
# number of test cases
tests = int(input())
for i in range(tests):
na, nb, x = map(int, input().split(' '))
a = list(map(int, input().split(' ')))
b = list(map(int, input().split(' ')))
temp = []
stk_a = Stack(a)
stk_b = Stack(b)
score = 0
count = 0
# first taking elements from stack A , till score becomes just less than desired total
for j in range(len(a)):
if score + stk_a.peek() <= x:
score += stk_a.peek()
count += 1
temp.append(stk_a.peek())
# storing the popped elements in temporary stack such that we can again remove them from score
# when we find better element in stack B
stk_a.pop()
# this is maximum number of moves using only stack A
max_now = count
# now iterating through stack B for element lets say k which on adding to total score should be less than desired
# or else we will remove each element of stack A from score till it becomes just less than desired total.
for k in range(len(b)):
score += stk_b.peek()
stk_b.pop()
count += 1
while score > x and count > 0 and len(temp) > 0:
count = count - 1
score = score - temp[-1]
temp.pop()
# if the score after adding element from stack B is greater than max_now then we have new set of moves which will also lead
# to just less than desired so we should pick maximum of both
if score <= x and count > max_now:
max_now = count
print(max_now) | true |
8c0af1830442ee2bac740a0f6cf8ff387286be34 | Python | kosmitive/bootstrapped-dqn | /environments/GeneralOpenAIEnvironment.py | UTF-8 | 4,069 | 2.640625 | 3 | [
"MIT"
] | permissive | # MIT License
#
# Copyright (c) 2017 Markus Semmler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gym
import numpy as np
import tensorflow as tf
from gym.spaces import Box
from gym.spaces import Discrete
from environments.Environment import Environment
from util.Space import Space
from multiprocessing.dummy import Pool as ThreadPool
class GeneralOpenAIEnvironment(Environment):
def __init__(self, env_name, N):
"""Constructs a new general environment with. In detail one can
create multiple environments, which are all capable of retaining their own state.
Args:
name - The name of the environment as registered in open ai gym.
N - The number of models to initialize
"""
# save the observation space
env = gym.make(env_name)
eos = env.observation_space
aos = env.action_space
assert isinstance(eos, Box)
assert isinstance(aos, Discrete)
# create the continuous space
state_space = Space(list(zip(eos.low, eos.high)))
action_space = Space([aos.n])
super().__init__("openai_{}".format(env_name), state_space, action_space, N)
# init the other environments
self.envs = [env] + [gym.make(env_name) for _ in range(N - 1)]
# set up a thread pool
threads = 16
self.chunks = np.maximum(int(N / threads + 0.5), 10)
self.pool = ThreadPool(threads)
self.indices = list(range(N))
# --- NP ---
def render(self, w):
"""Simply render the environment of the passed ind."""
self.envs[w].render()
# --- TF ---
def _next_observation_graph(self, actions):
"""This method receives a vector of N actions."""
next_observations, rewards, dones =\
tf.py_func(self._one_step, [tf.cast(actions, tf.int64)],
[tf.float64, tf.float64, tf.bool])
self.next_observations = tf.cast(next_observations, tf.float32)
return self.next_observations, tf.cast(rewards, tf.float32), tf.cast(dones, tf.int32)
def _reset_graph(self):
"""This method simply executed the reset function for each environment"""
# map over all observations
return tf.py_func(self._reset_envs, [], tf.float64)
# --- PY Funcs ---
def _reset(self, w):
"""Simply overwrite and return an initial state."""
return self.envs[w].reset()
def _step(self, w, state, action):
"""Pass back reward, next state and if the episode
is finished. (r, ns, d)"""
return self.envs[w].step(action)
def _reset_envs(self):
obs = self.pool.imap(self._reset, self.indices, self.chunks)
self.pool.close()
self.pool.join()
return np.stack(obs, axis=0)
def _one_step(self, action):
obs = self.pool.imap(lambda k: self._step(k, None, action[k][0]), self.indices, self.chunks)
self.pool.close()
self.pool.join()
tobs = [np.stack([o[k] for o in obs], axis=0) for k in range(3)]
return tobs | true |
7e44f90f4e9ee575e0cb35ffe1ff11ac66fa3e93 | Python | towardsRevolution/Computer-Vision-Algorithms | /PA1/histEq.py | UTF-8 | 2,409 | 3.3125 | 3 | [] | no_license | import cv2
import numpy,math
import matplotlib.pyplot as plt
__author__ = "Aditya Pulekar"
def main():
L = int(input("Enter the number of gray-scale levels to be considered for the image (256): "))
#Reading a colored image
img = cv2.imread('hazecity.png',1)
cv2.imshow('Aditya Pulekar (Colored)',img)
cv2.waitKey(0)
#Procuring the dimensions of the input image
details = img.shape
#Converting the color image to a gray scale image
#Faster (Using Numpy)
rgbToGray = numpy.array([0.114,0.589,0.299])
img_gray = numpy.sum(img * rgbToGray, axis=-1)/(L-1)
cv2.imshow('Aditya Pulekar (Gray)',img_gray)
cv2.waitKey(0)
#Plotting the histogram
channels = 0
cumuSum = 0
#Plotting the gray scale histogram
hist = [0 for itr in range(L)]
for i in range(details[0]):
for j in range(details[1]):
hist[(img_gray[i,j]*(L-1)).__int__()]+=1
plt.figure(1)
plt.subplot(311)
plt.plot(hist, 'ro-')
plt.title("Histogram,PDF and CDF of the gray-scale image")
plt.xlabel("Pixel Intensities--->")
plt.ylabel("Frequency of Pixels--->")
sumOfPixelValues = details[0]*details[1]
#Plotting the probability distribution function(PDF) and cumulative
#distribution function(CDF)
pdf_gray = [(index.__float__()/sumOfPixelValues) for index in hist]
cdf_gray = []
for i in range(len(pdf_gray)):
cumuSum += pdf_gray[i]
cdf_gray.append(cumuSum)
plt.subplot(312)
plt.plot(pdf_gray, 'go-')
plt.xlabel("Pixel Intensities--->")
plt.ylabel("Probability -->")
plt.subplot(313)
plt.plot(cdf_gray, 'bo-')
plt.xlabel("Pixel Intensities -->")
plt.ylabel("Cumu Probabilty -->")
plt.show()
#Histogram Equalization
cdf_new = []
for i in range(len(hist)):
cumuSum += hist[i]
cdf_new.append(cumuSum)
print(len(cdf_new))
hist_Eq = []
for index in range(L):
hist_Eq.append(math.floor(((cdf_new[index] - min(cdf_new))/((details[0]*details[1]) - min(cdf_new)))*(L-1)))
plt.figure(2)
#Equalized Image
newImage = numpy.empty([details[0],details[1]])
for rows in range(details[0]):
for cols in range(details[1]):
newImage[rows,cols] = hist_Eq[(img_gray[rows,cols]*(L-1)).__int__()]
plt.title("Image after Histogram Equalization")
plt.imshow(newImage,cmap=plt.cm.gray)
plt.show()
main() | true |
6630cbac994857f9dbb9c033cabac3a6c4b2d918 | Python | T1bzt/hanabi_ai | /tools/tests/hanabi_table_tests.py | UTF-8 | 8,187 | 3.1875 | 3 | [
"MIT"
] | permissive | import unittest
from tools.hanabi_table import HanabiTable
from tools.hanabi_hand import HanabiHand
from tools.hanabi_card import HanabiCard, HanabiColor
from tools.hanabi_deck import HanabiVariant
def diagnose(table):
print("Player 0")
print(table.info_for_player(1)["hands"][0])
print("Player 1")
print(table.info_for_player(0)["hands"][1])
print(str(table))
print(table.scored_cards)
class HanabiTableTests(unittest.TestCase):
def setUp(self):
self.table = HanabiTable(2, 1, HanabiVariant.basic)
def test_table_play_card(self):
self.assertFalse(self.table.is_game_over())
self.assertEqual(len(self.table.deck), 40)
self.table.play_card(0, 0)
self.assertFalse(self.table.is_game_over())
self.assertEqual(len(self.table.deck), 39)
def test_table_discard_card(self):
self.assertFalse(self.table.is_game_over())
self.assertEqual(len(self.table.deck), 40)
self.assertEqual(len(self.table.discard), 0)
self.table.discard_card(0, 0)
self.assertFalse(self.table.is_game_over())
self.assertEqual(len(self.table.deck), 39)
self.assertEqual(len(self.table.discard), 1)
def test_table_disclose_color(self):
self.assertEqual(["??", "??", "??", "??", "??"], self.table.info_for_player(0)["hands"][0])
self.table.disclose_color(0, 0, HanabiColor.RED)
self.assertEqual(["??", "??", "??", "??", "R?"], self.table.info_for_player(0)["hands"][0])
def test_table_disclose_rank(self):
self.assertEqual(["??", "??", "??", "??", "??"], self.table.info_for_player(0)["hands"][0])
self.table.disclose_rank(0, 0, 4)
self.assertEqual(["??", "??", "?4", "??", "??"], self.table.info_for_player(0)["hands"][0])
def test_table_is_game_over_too_many_mistakes(self):
self.assertFalse(self.table.is_game_over())
self.table.play_card(0,1)
self.assertFalse(self.table.is_game_over())
self.assertEqual(self.table.mistakes_left, 2)
self.table.play_card(1,0)
self.assertFalse(self.table.is_game_over())
self.assertEqual(self.table.mistakes_left, 1)
self.table.play_card(1,0)
self.assertTrue(self.table.is_game_over())
self.assertEqual(self.table.mistakes_left, 0)
def test_table_is_game_over_no_more_cards(self):
self.assertFalse(self.table.is_game_over())
for i in range(0, 42):
self.assertEqual(len(self.table.discard), i)
self.assertFalse(self.table.is_game_over())
self.table.discard_card(0,0)
self.assertTrue(self.table.is_game_over())
def test_table_is_game_over_game_won(self):
self.table.play_card(1,2)
self.assertEquals(1, self.table.score())
self.table.play_card(0,0)
self.assertEquals(2, self.table.score())
self.table.play_card(0,4)
self.assertEquals(3, self.table.score())
self.table.play_card(1,0)
self.assertEquals(4, self.table.score())
self.table.discard_card(1,1)
self.table.discard_card(0,3)
self.table.play_card(0,4)
self.assertEquals(5, self.table.score())
self.table.play_card(1,4)
self.assertEquals(6, self.table.score())
self.table.discard_card(1,2)
self.table.play_card(1,4)
self.assertEquals(7, self.table.score())
self.table.play_card(0,4)
self.assertEquals(8, self.table.score())
self.table.play_card(1,0)
self.assertEquals(9, self.table.score())
self.table.play_card(0,0)
self.assertEquals(10, self.table.score())
self.table.discard_card(0,3)
self.table.discard_card(0,3)
self.table.discard_card(1,3)
self.table.discard_card(1,3)
self.table.play_card(1, 3)
self.assertEquals(11, self.table.score())
self.table.discard_card(1, 3)
self.table.play_card(1,4)
self.assertEquals(12, self.table.score())
self.table.play_card(0,2)
self.assertEquals(13, self.table.score())
self.table.play_card(0,0)
self.assertEquals(14, self.table.score())
self.table.play_card(0,0)
self.assertEquals(15, self.table.score())
self.table.play_card(0,3)
self.assertEquals(16, self.table.score())
self.table.play_card(0,1)
self.assertEquals(17, self.table.score())
self.table.play_card(0,1)
self.assertEquals(18, self.table.score())
self.table.discard_card(0,0)
self.table.discard_card(0,4)
self.table.discard_card(0,1)
self.table.play_card(0,3)
self.assertEquals(19, self.table.score())
self.table.play_card(1,2)
self.assertEquals(20, self.table.score())
#sweep green
self.table.play_card(1,4)
self.assertEquals(21, self.table.score())
self.table.play_card(0,3)
self.assertEquals(22, self.table.score())
self.table.play_card(1,0)
self.assertEquals(23, self.table.score())
self.table.play_card(1,1)
self.assertEquals(24, self.table.score())
self.table.play_card(1,0)
self.assertEquals(25, self.table.score())
self.assertEquals(self.table.mistakes_left, 3)
self.assertTrue(self.table.is_game_over())
def test_table_str(self):
self.assertEqual("Score: 0, Cards remaining: 40, Discarded: 0, Disclosures left: 8, Mistakes left: 3", str(self.table))
self.table.play_card(1,0)
self.assertEqual("Score: 0, Cards remaining: 39, Discarded: 1, Disclosures left: 8, Mistakes left: 2", str(self.table))
self.table.disclose_color(0, 0, HanabiColor.RED)
self.assertEqual("Score: 0, Cards remaining: 39, Discarded: 1, Disclosures left: 7, Mistakes left: 2", str(self.table))
self.table.discard_card(0,0)
self.assertEqual("Score: 0, Cards remaining: 38, Discarded: 2, Disclosures left: 8, Mistakes left: 2", str(self.table))
def test_table_info_for_player(self):
info = self.table.info_for_player(0)
self.assertEqual(info["score"], 0)
self.assertEqual(info["deck_size"], 40)
self.assertEqual(len(info["discarded"]), 0)
self.assertEqual(info["disclosures"], 8)
self.assertEqual(info["mistakes_left"], 3)
self.assertEqual(info["num_players"], 2)
self.assertEqual(info["hands"][0], ["??", "??", "??", "??", "??"])
self.assertEqual(info["hands"][1], ["R2", "W4", "W1", "Y1", "G3"])
self.assertEqual(info["known_info"][0], ["??", "??", "??", "??", "??"])
self.assertEqual(info["known_info"][1], ["??", "??", "??", "??", "??"])
self.assertEqual(info["scored_cards"]["R"], 0)
self.assertEqual(info["scored_cards"]["B"], 0)
self.assertEqual(info["scored_cards"]["G"], 0)
self.assertEqual(info["scored_cards"]["Y"], 0)
self.assertEqual(info["scored_cards"]["W"], 0)
self.assertTrue("*" not in info["scored_cards"])
def test_table_score(self):
self.assertEqual(0, self.table.score())
def test_table_play_5_get_disclosure(self):
self.play_to_white_5()
self.table.disclose_rank(0, 0, 0)
self.assertEquals(7, self.table.disclosures)
self.table.play_card(0, 0)
self.assertEqual(8, self.table.disclosures)
def test_table_play_5_no_extra_disclosure(self):
self.play_to_white_5()
self.assertEquals(8, self.table.disclosures)
self.table.play_card(0, 0)
self.assertEqual(8, self.table.disclosures)
def play_to_white_5(self):
self.table.play_card(0, 0)
self.table.play_card(1, 2)
self.table.play_card(1, 4)
self.table.play_card(1, 0)
self.table.discard_card(0, 3)
self.assertEquals(8, self.table.disclosures)
self.table.discard_card(0, 3)
self.table.discard_card(1, 1)
self.table.play_card(0, 4)
self.table.play_card(0, 3)
self.table.play_card(0, 4)
self.table.play_card(1, 4)
self.table.play_card(1, 0)
if __name__ == '__main__':
unittest.main()
| true |
0524f592a0c294588defb7d8d5b28b84f61bd495 | Python | sandwu/leetcode_problems | /专题训练/数组/中等/从前序和中续遍历构造二叉树.py | UTF-8 | 697 | 3.671875 | 4 | [] | no_license |
"""
You may assume that duplicates do not exist in the tree.
For example, given
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
Return the following binary tree:
3
/ \
9 20
/ \
15 7
"""
class TreeNode:
def __init__(self,x):
self.val = x
class Solution:
def buildTree(self,preorder, inorder):
if len(preorder) == 0:return None
if len(preorder) == 1:return TreeNode(preorder[0])
root = TreeNode(preorder[0])
index = inorder.index(root.val)
root.left = self.buildTree(preorder[1:index+1],inorder[:index])
root.right = self.buildTree(preorder[index+1:],inorder[index+1:])
return root
| true |
ba83e34b285cfbe78128276b7ce88747d7fa3a53 | Python | Myyyr/imageExplore | /utils.py | UTF-8 | 393 | 2.6875 | 3 | [] | no_license | import os
import numpy as np
import nibabel as nib
import pandas as pd
def nibfile(file):
img = nib.load(file)
return img.get_fdata()
def save_dict(sumary_dict, path = "sumary.csv"):
df = pd.DataFrame(sumary_dict)
df.to_csv(path)
def save_image(img, path):
with open(path, 'wb') as f:
np.save(f, img)
def load_image(path):
with open(path, 'rb') as f:
return np.load(f) | true |
08a9a35558476573ff517943158e5a3d967de74d | Python | raiscreative/100-days-of-python-code | /day_003/divisibility_checker.py | UTF-8 | 506 | 4.1875 | 4 | [] | no_license | print('Welcome to the divisibility checker!')
game_on = 1
while game_on:
first = int(input('Type a large number, more than 3 digits,please.\n'))
second = int(input('Now type a number between 2 and 25.'))
if first % second == 0:
print(f'{first} is perfectly divisible with {second}.')
else:
print(f'{first} is not perfectly divisible with {second}.')
choice = input('Do you want to play again? (y/n)')
if choice.lower().startswith('n'):
game_on = 0 | true |
b4f96278d3d2c91f9b78dcf0503c3ad1d7045736 | Python | beyond-algorithms/JaeEun | /src/koitp/보물찾기.py | UTF-8 | 1,419 | 2.9375 | 3 | [] | no_license | from src.Test import Test as T
from collections import defaultdict
from heapq import *
def main():
t = int(input())
for _ in range(t):
numberOfRuins, numberOfClues, timeout = map(int, input().strip().split())
path = []
for __ in range(numberOfClues):
_from, _to = input().strip().split()
path.append((_from, _to, 1))
T.info(path)
ret = find(path, "1", str(numberOfRuins))
T.info(ret)
print("#" + str(_ + 1) + " " + str(ret))
def find(edges, f, t):
g = defaultdict(list)
for l, r, c in edges:
g[l].append((c, r))
q, seen, mins = [(0, f, ())], set(), {f: 0}
while q:
(cost, v1, path) = heappop(q)
if v1 not in seen:
seen.add(v1)
path = (v1, path)
if v1 == t:
return (cost, path)
for c, v2 in g.get(v1, ()):
if v2 in seen:
continue
prev = mins.get(v2, None)
next = cost + c
if prev is None or next < prev:
mins[v2] = next
heappush(q, (next, v2, path))
return float("inf")
user_input = '''
5
2 1 1
1 2
3 1 3
2 3
4 2 3
1 2
1 3
6 5 2
1 3
3 2
2 5
2 6
5 6
6 5 3
1 3
3 2
2 5
2 6
5 6
'''
expected = '''
#1 1
#2 -1
#3 -1
#4 -1
#5 3
'''
T.runningTest(user_input.strip(), expected.lstrip(), main)
| true |
17ac398ebba99dd8fc173783690f46a4c72e6993 | Python | rosolczolgmakaron/1st-projekt | /python/konwersje.py | UTF-8 | 2,268 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def dec2other(liczba10, podstawa):
"""Konwersja liczby dziesiętnej na system o podanej podstawie """
liczba = []
while liczba10 != 0:
reszta = liczba10 % podstawa
if reszta > 9: # wykorzystanie kodu ASCII
reszta = chr(reszta + 55)
liczba.append(str(reszta)) # dodanie do listy
liczba10 = int(liczba10 / podstawa)
liczba.reverse() # odwrócenie listy
return "".join(liczba) # połączenie wyrazów listy
def zamiana1():
"""Pobranie danych wejściowych"""
liczba = int(input("Podaj liczbe: "))
podstawa = int(input("Podaj podstawe: "))
while podstawa < 2 or podstawa > 16:
podstawa = int(input("Podaj podstawe: "))
print("Wynik konwersji: {}(10) = {}({})".format(
liczba, dec2other(liczba, podstawa), podstawa))
def other2dec(liczba, podstawa):
"""Zamiana podanej liczby na system dziesiety"""
liczba10 = 0
potega = len(liczba) - 1
for cyfra in liczba:
if not cyfra.isdigit():
liczba10 += (ord(cyfra.upper()) - 55) * (podstawa ** potega)
# liczba10 += (ord(cyfra) - 55) * (podstawa ** potega)
# ** - potega
else:
# ** to operator potegowania
liczba10 += int(cyfra) * (podstawa ** potega)
potega -= 1
return liczba10
def zamiana2():
"""Pobranie danych wejściowych"""
liczba = input("Podaj liczbe: ")
podstawa = int(input("Podaj podstawe: "))
while podstawa < 2 or podstawa > 16:
podstawa = int(input("Podaj podstawe: "))
if podstawa > 9:
for i in liczba:
if ord(i.upper()) > 70:
print("Zły format danych wejsciowych")
return 0
else:
for i in liczba:
if int(i) >= podstawa:
print("Liczba nie moze skladac sie z cyfr > podstawy")
return 0
print("Wynik konwersji: {}({}) = {}(10)".format(
liczba, podstawa, other2dec(liczba, podstawa)))
def main(args):
print("Zamiana liczby dziesiętnej na liczbę o podanej podstawie"
"<2;16> lub odwrotnie")
zamiana1()
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| true |
735bd6bf29cf3bc0bf5cc5ac59909d1da8e6877e | Python | atonderski/Euler | /Euler44.py | UTF-8 | 716 | 2.875 | 3 | [] | no_license | __author__ = 'adam'
pentagonals = set([])
n = 1
diff=9999999999
bestPair=[0,0]
oldPenta=0
newPenta=0
for i in xrange(1,100000000):
pentagonals.add(int(i * (3 * i - 1) * 0.5))
print 'done'
countingPenta = []
while True:
oldPenta=newPenta
newPenta = int(n * (3 * n - 1) * 0.5)
# print 'newPenta: ' + str(newPenta)
if newPenta-oldPenta > diff:
break
for penta in countingPenta:
# print penta
a = newPenta + penta
b = newPenta - penta
if a in pentagonals and b in pentagonals and b<diff:
diff=b
bestPair=a,b
print bestPair
print diff
n+=1
countingPenta.append(newPenta)
print bestPair
print diff | true |
f9e1777f1694175cb29b9851e5b532ed787a479c | Python | 154650362/YaSQL | /yasql/apps/sqlquery/utils.py | UTF-8 | 413 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding:utf-8 -*-
# edit by fuzongfei
import sqlparse
# 执行前,删除语句开头的注释
def remove_sql_comment(sql):
for stmt in sqlparse.split(sql.rstrip(';')):
statement = sqlparse.parse(stmt)[0]
comment = statement.token_first()
if isinstance(comment, sqlparse.sql.Comment):
return statement.value.replace(comment.value, '')
return statement.value
| true |
1af2ac502a84837be552aba8be599d3b0907ce39 | Python | pranati05/Misc-4 | /LargestRectangleinHistogram.py | UTF-8 | 1,328 | 3.75 | 4 | [] | no_license | # Time Complexity : O(N)
# Space Complexity : O(N)
# Did this code successfully run on Leetcode : Yes
# Any problem you faced while coding this : No
# Your code here along with comments explaining your approach
# Using monotonous increasing stack to store the indices when the heights are in increasing order.
# Initialize stack with -1
# If the height is smaller than the top of the stack then we resolve all the elements until we find a greater height or stack becomes empty
# If all the heights are in increasing order that is we dont any height greater than top element of the stack then we resolve the stack after we iterate over the heights array
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
if not heights:
return 0
stack = []
stack.append(-1)
maxwidth = -inf
for i in range(len(heights)):
while stack and heights[
stack[-1]] >= heights[i] and stack[-1] != -1:
top = stack.pop()
maxwidth = max(maxwidth, (heights[top] * (i - stack[-1] - 1)))
stack.append(i)
while stack[-1] != -1:
top = stack.pop()
maxwidth = max(maxwidth,
(heights[top] * (len(heights) - stack[-1] - 1)))
return maxwidth | true |
652ea1ea6b3fab0b00b181dcee3bfac3bea6638a | Python | afreedfayaz18/Assignment_5 | /as4.py | UTF-8 | 218 | 3.28125 | 3 | [] | no_license | def add(a,b):
result=a+b
return result
def sub(a,b):
result=a-b
return result
def mul(a,b):
result=a*b
return result
def truediv(a,b):
result=a/b
return result
def floordiv(a,b):
result=a//b
return result | true |
1026580df88454122cdf6b0b25f884fbf3c1bc98 | Python | ybli/Landslide-Analysis | /analysis/resultWofEAnalysis_Example.py | UTF-8 | 7,103 | 2.890625 | 3 | [] | no_license |
# coding: utf-8
import os
import pandas
import matplotlib.pyplot as plt
import numpy
class ProcessSimulations():
def __init__(self, conf_interval):
self.local_percentiles = self.getPercentiles(conf_interval, quartiles=True)
self.local_step_percentiles = (self.local_percentiles[1] - self.local_percentiles[0]) / 2
self.local_median_value = 0.5
def get_unique_rasters(self, pd_rasters, column):
""" Used for identify the number of unique rasters used in simulations. It requires the Pandas dataframe and the column name where the raster names are stored """
unique_rasters = pandas.unique(pd_rasters[column])
return unique_rasters
def set_rasters_analysis(self, pd_rasters, raster_name, class_column, value_column, out_path):
""" Is used for looping over simulations for each rasters and prepare the dataset for generating fan plots """
# Loop over each raster
# for raster in lst_unique_rasters:
# Filter pandas dataframe by raster name
pd_raster_series = pd_rasters[pd_rasters.rasterName == "{}".format(raster_name)]
# print(pd_raster_series.head())
# Get the unique classes for each raster
unique_classes = pd_raster_series[class_column].unique()
# print(unique_classes)
# Write for each raster a file
fileOut = os.path.join(out_path, "{}_{}.csv".format(raster_name, value_column))
if(os.path.exists(fileOut)):
os.remove(fileOut)
for i in range(1, simulations, 1):
# for columns in column - columns
t_pRaster = pd_raster_series[pd_raster_series.iteration == i][[class_column, value_column]].transpose()[1:2]
t_pRaster.to_csv(fileOut, header=False, mode="a")
pd_rasters_arranged = pandas.read_csv(fileOut, names=unique_classes)
return pd_rasters_arranged, unique_classes
def generate_uncertainty_chart(self, pd_raster_series, raster_name, lst_unique_classes, value_column, save_fig_path, save_fig_format, xlabel=None, percentiles=None):
""" Generate fan chart for uncertainty vizualization """
# print("Grafice: ", raster_name)
# Create matplotlib figure and subpplots
fig, ax = plt.subplots()
# Calculate second quartile series
q_median_series = pd_raster_series.quantile(self.local_median_value, axis=0)
# Starting color - yellow
color = (0.5, 0.5, 0.0)
# Set the upper and lower bound equal to median value - for each raster
q_ub = 0.975
q_lb = 0.025
color_step = 0.025
if(percentiles is None):
q_ub = max(self.local_percentiles)
q_lb = min(self.local_percentiles)
color_step = 1.0 / len(self.local_percentiles)
else:
q_ub = max(percentiles)
q_lb = min(percentiles)
color_step = 1.0 / len(percentiles)
red = 1.0
green = 1.0
blue = 0.0
# Loop over percentiles values
for x in self.local_percentiles:
q_ub = q_ub - self.local_step_percentiles
q_lb = q_lb + self.local_step_percentiles
if(q_lb <= q_ub):
# q_lb_color = (q_ub, 0.0, 0.0)
# q_ub_color = (0.0, 0.0, q_ub)
alpha_color = q_lb
red = 1.0
green = green - color_step
# blue = blue + color_step
if(red > 1.0):
red = 1.0
elif(red < 0.0):
red = 0.0
if(green > 1.0):
green = 1.0
elif(green < 0.0):
green = 0.0
if(blue > 1.0):
blue = 1.0
elif(blue < 0.0):
blue = 0.0
random_color = (red, green, blue)
# print(random_color)
# random_color = numpy.random.rand(3,)
q_ub_series = pd_raster_series.quantile(q_ub, axis=0)
q_lb_series = pd_raster_series.quantile(q_lb, axis=0)
label = "{} - {}".format(q_lb, q_ub)
if(q_lb == 0.5 and q_ub == 0.5):
label = q_lb
else:
label = "{} - {}".format(q_lb, q_ub)
ax.fill_between(lst_unique_classes, q_lb_series, q_ub_series, color=random_color, alpha=0.3, interpolate=True, label=label)
# ax.legend([(q_ub, q_lb), ], x)
# ax.fill(lst_unique_classes, q_lb_series, "blue", lst_unique_classes, q_ub_series, "red")
# # Plot de median value - corresponds to second quartile
ax.plot(lst_unique_classes, q_median_series, color="yellow", linewidth=0.5, label="")
ax.legend()
# Save the uncertainty chart
plt.xticks(lst_unique_class, lst_unique_class)
plt.ylabel("{}".format(xlabel))
plt.xlabel("{}".format(raster_name))
plt.grid(True)
figure_name = os.path.join(save_fig_path, '{}_{}.{}'.format(raster_name, value_column, save_fig_format))
fig.savefig(filename=figure_name, dpi=300)
def getPercentiles(self, conf_interval, quartiles=False):
# min = conf_interval / 2
# max = 1.0 - min
min = 0
max = 1.0
if(quartiles is not False):
return [min, 0.25, 0.5, 0.75, max]
else:
# min = conf_interval / 2
# max = 1.0 - min
percentiles = numpy.arange(min, max, conf_interval)
return percentiles
def write_percentiles(self, pd_rasters, percentiles, out_file):
percentiles_table = pd_rasters.quantile(percentiles, axis=0)
percentiles_table.to_csv(out_file, header=True, mode="w")
if __name__ == "__main__":
prodCond = r""
data_path = r""
out_path = r""
# percentiles = getPercentiles(0.05)
simulations = 1000
pandas_simulation = pandas.read_csv(os.path.join(data_path, prodCond), sep="\t")
columns =[column for column in list(pandas_simulation)[2:-1]]
ps = ProcessSimulations(conf_interval=0.05)
unique_rasters = ps.get_unique_rasters(pandas_simulation, "rasterName")
percentiles = ps.getPercentiles(conf_interval=0.05, quartiles=True)
# print(unique_rasters)
for raster in unique_rasters:
print("Processing raster with name: ", raster)
# for column in columns:
for column in ["contrast"]:
rasters_arranged, lst_unique_class = ps.set_rasters_analysis(pd_rasters=pandas_simulation, raster_name=raster, class_column="class", value_column=column, out_path=out_path)
ps.generate_uncertainty_chart(rasters_arranged, raster_name=raster, lst_unique_classes=lst_unique_class, value_column=column, save_fig_path=out_path, save_fig_format="png", xlabel="Contrast", percentiles=None)
out_file_percentiles = os.path.join(out_path, "percentiles_{}.csv".format(raster))
ps.write_percentiles(rasters_arranged, percentiles, out_file=out_file_percentiles) | true |
966a774a38978f8abc05532aa9780e9d664c1f92 | Python | justinhsg/AoC2020 | /src/day20/solution.py | UTF-8 | 6,881 | 2.75 | 3 | [
"MIT"
] | permissive | import sys
import os
import re
from collections import deque
day_number = sys.path[0].split('\\')[-1]
if len(sys.argv)==1:
path_to_source = os.path.join("\\".join(sys.path[0].split("\\")[:-2]), f"input\\{day_number}")
else:
path_to_source = os.path.join("\\".join(sys.path[0].split("\\")[:-2]), f"sample\\{day_number}")
with open(path_to_source, "r") as infile:
tiles = infile.read().split("\n\n")
#Map tile_id to full tile
full_tiles = dict()
#Map border to ids
borders = dict()
#Map ids to borders
border_info = dict()
tile_size = 10
for tile in tiles:
lines = tile.split("\n")[:11]
m = re.match("^Tile ([0-9]+):$", lines[0])
tile_id = int(m.group(1))
full_tiles[tile_id] = lines[1:]
left_border = "".join(map(lambda x: x[0], lines[1:]))[::-1]
if(left_border not in borders):
borders[left_border] = []
borders[left_border[::-1]] = []
borders[left_border].append((tile_id, 0, False))
borders[left_border[::-1]].append((tile_id, 0, True))
top_border = lines[1]
if(top_border not in borders):
borders[top_border] = []
borders[top_border[::-1]] = []
borders[top_border].append((tile_id, 1, False))
borders[top_border[::-1]].append((tile_id, 1, True))
right_border = "".join(map(lambda x: x[-1], lines[1:]))
if(right_border not in borders):
borders[right_border] = []
borders[right_border[::-1]] = []
borders[right_border].append((tile_id, 2, False))
borders[right_border[::-1]].append((tile_id, 2, True))
bottom_border = lines[-1][::-1]
if(bottom_border not in borders):
borders[bottom_border] = []
borders[bottom_border[::-1]] = []
borders[bottom_border].append((tile_id, 3, False))
borders[bottom_border[::-1]].append((tile_id, 3, True))
border_info[tile_id] = (left_border, top_border, right_border, bottom_border)
#maps tiles to other tiles that share a border
TL_id = None
start_rot = None
part1 = 1
for tile_id in border_info:
n_adj = 0
matched_edges = 0
for (side, border) in enumerate(border_info[tile_id]):
for (other_tile_id, other_side, reversed) in borders[border]:
if(other_tile_id != tile_id):
n_adj +=1
matched_edges += 1<<side
if(n_adj == 2):
part1 *= tile_id
if(TL_id is None):
TL_id = tile_id
start_rot = {6:1, 12:0, 8:3, 3:2}[matched_edges]
def rot_tile(tile, rot):
if(rot == 0):
return tile
else:
t_tile = [ "".join([tile[::-1][col_id][row_id] for col_id in range(len(tile))]) for row_id in range(len(tile))]
return rot_tile(t_tile, rot-1)
def transpose_tile(tile):
return [ "".join([tile[col_id][row_id] for col_id in range(len(tile))]) for row_id in range(len(tile))]
def apply_transform(tile, n_rot, transpose):
if(transpose):
t_tile = transpose_tile(tile)
return rot_tile(t_tile, n_rot)
else:
return rot_tile(tile, n_rot)
transpose_swaps = [1,0,3,2]
def get_transform(side, reversed, req_side):
if(reversed):
t_side = transpose_swaps[side]
return ((req_side-t_side)%4, True)
else:
return ((req_side-side)%4, False)
def get_border_from_transform(req_side, n_rot, transposed):
if(transposed):
return (transpose_swaps[(req_side-n_rot)%4], True)
else:
return ((req_side-n_rot)%4 , False)
image_tile_width = round(len(full_tiles)**0.5)
image_transforms = [[None for _ in range(image_tile_width)] for _ in range(image_tile_width)]
image_transforms[0][0] = (TL_id, start_rot, False)
full_image = [[None for _ in range(image_tile_width*(tile_size-2))] for _ in range(image_tile_width*(tile_size-2))]
for row in range(image_tile_width):
for col in range(image_tile_width):
#print(row, col)
if(row == col == 0):
next_id, next_rot, next_trans = image_transforms[row][col]
next_tile = apply_transform(full_tiles[next_id], next_rot, next_trans)
elif(col == 0):
prev_id, prev_rot, prev_trans = image_transforms[row-1][col]
prev_side, prev_reverse = get_border_from_transform(3, prev_rot, prev_trans)
prev_border = border_info[prev_id][prev_side]
if prev_reverse:
prev_border = prev_border[::-1]
next_border = prev_border[::-1]
for (next_id, next_side, next_rev) in borders[next_border]:
if(next_id != prev_id):
next_rot, next_trans = get_transform(next_side, next_rev, 1)
image_transforms[row][col] = (next_id, next_rot, next_trans)
next_tile = apply_transform(full_tiles[next_id], next_rot, next_trans)
else:
prev_id, prev_rot, prev_trans = image_transforms[row][col-1]
prev_side, prev_reverse = get_border_from_transform(2, prev_rot, prev_trans)
prev_border = border_info[prev_id][prev_side]
if prev_reverse:
prev_border = prev_border[::-1]
next_border = prev_border[::-1]
for (next_id, next_side, next_rev) in borders[next_border]:
if(next_id != prev_id):
next_rot, next_trans = get_transform(next_side, next_rev, 0)
image_transforms[row][col] = (next_id, next_rot, next_trans)
next_tile = apply_transform(full_tiles[next_id], next_rot, next_trans)
for img_r in range(row*(tile_size-2), (row+1)*(tile_size-2)):
for img_c in range(col*(tile_size-2), (col+1)*(tile_size-2)):
full_image[img_r][img_c] = next_tile[img_r%(tile_size-2)+1][img_c%(tile_size-2)+1]
monster = [" # ","# ## ## ###"," # # # # # # "]
coords = []
for r_i, r in enumerate(monster):
for c_i, c in enumerate(r):
if(c == '#'):
coords.append((r_i, c_i))
def find_monster(full_image):
n_monster = 0
for row in range(image_tile_width*(tile_size-2)-len(monster)):
for col in range(image_tile_width*(tile_size-2)-len(monster[0])):
is_monster = True
for (dr, dc) in coords:
is_monster = is_monster and (full_image[row+dr][col+dc] == '#')
if(not is_monster):
break
if(is_monster):
n_monster += 1
return n_monster
part2 = 0
for row in full_image:
for c in row:
part2 += 1 if c=='#' else 0
for rot in range(4):
new_image = apply_transform(full_image, rot, False)
n_monster = find_monster(new_image)
part2 -= n_monster*len(coords)
new_image = apply_transform(full_image, rot, True)
n_monster = find_monster(new_image)
part2 -= n_monster*len(coords)
print(part1)
print(part2) | true |
23fbde04e1c4b272cbe76a14a21f470791eb69c0 | Python | metaperl/freegold-focus | /mymail.py | UTF-8 | 1,169 | 2.796875 | 3 | [
"MIT"
] | permissive | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def send(text, html, email, name, cc):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
me = "ElDorado@FreeGold.Biz"
you = email
COMMASPACE = ', '
addressees = [you, cc, 'terrence.brannon@gmail.com']
msg = MIMEMultipart('alternative')
msg['Subject'] = "Karatbars replicated website for {0}".format(name)
msg['From'] = me
msg['To'] = COMMASPACE.join(addressees)
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
s = smtplib.SMTP('localhost')
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(me, addressees, msg.as_string())
s.quit()
| true |
49387d7591b244e88b8cd7601e285f596741b512 | Python | sunatthegilddotcom/Fullerene-Thesis | /triangulation.py | UTF-8 | 1,430 | 2.828125 | 3 | [] | no_license | def triangulation(pentagon_array, hexagon_array):
import numpy as np
if hexagon_array.shape[1] == 0:
T = tri_C20(pentagon_array)
return T
N_T = 5*pentagon_array.shape[1] + 6*hexagon_array.shape[1] #num of coloumns in triangulation
T = np.zeros([3, N_T], dtype=int) #Initialize array for the triangulation
T_p = np.zeros([3,5*12]) #Triagnel array from pent
T_h = np.zeros([3,6*hexagon_array.shape[1]]) #Triagnel array from hex
vertex_num = int(5/3*pentagons.shape[1] + 2*hexagons.shape[1]) #Current vertex num.
vertex_num_max = vertex_num + 12 + hexagons.shape[1] #Upper vertex num.
iterater = 0
while True:
if iterater < 12:
T_p[2,iterater*5:iterater*5+5] = vertex_num
T_p[0,iterater*5:iterater*5+5] = pentagon_array[0][iterater]
T_p[1,iterater*5:iterater*5+5] = np.roll(pentagon_array[0][iterater],1)
T_h[2,iterater*6:iterater*6+6] = vertex_num + 12
T_h[0,iterater*6:iterater*6+6] = hexagon_array[0][iterater]
T_h[1,iterater*6:iterater*6+6] = np.roll(hexagon_array[0][iterater],1)
vertex_num+=1;
iterater+=1
if vertex_num + 12 == vertex_num_max:
break
T[:,:5*12] = T_p; T[:,5*12:] = T_h;
return T | true |
6613cad61ac12020145a4a77bf6eaae79c102d7e | Python | hydrotop/PythonStudy | /test/076.py | UTF-8 | 118 | 2.984375 | 3 | [] | no_license | txt1='A tale that was not right'
txt2='이 또한 지나가리라.'
print(txt1[3:7])
print(txt1[:6])
print(txt2[-4:])
| true |
5888047fcf0d2371cf0d33656775fc180ecec3d6 | Python | Habeen-Jun/SaltLux_Project--TOEIC-Helper | /Flask/sr_test.py | UTF-8 | 3,836 | 2.75 | 3 | [] | no_license | import speech_recognition as sr
from pydub import AudioSegment
import os
from pydub.silence import split_on_silence
import pocketsphinx
from jiwer import wer
import time
from datetime import datetime
# 1db = -5dbfs
def db_2_dbfs(db):
return db * -5
def GetCurrentDatetime():
now = datetime.now()
return ('%s_%s%s_%s%s%s' % (now.year, now.month, now.day, now.hour, now.minute, now.second))
def audio2question(path, save_path, min_silence_len, threshold, min_duration, seek_step=1, time_it=False,
sr=None):
"""
:param path: Path of Audio Folder
:param save_path: Path to save
:param min_silence_len: Minimum length of Silence
:param threshold: threshold (in DB)
:param min_duration: Minimum Duration of a sliced chunk
(if a sliced chunk is shorter than predefined minimum duration, concat with the following chunk.)
:param seek_step:
:param time_it: True -> print processing time
:param sr: sampling rate
:return: None
"""
print('<Pydub>')
start_load = time.time()
base_dir = os.getcwd()
if sr != None:
song = AudioSegment.from_mp3(path).set_frame_rate(sr)
else:
song = AudioSegment.from_mp3(path)
print('Audio Loading Time: ', time.time() - start_load)
print('Audio Length: ', song.duration_seconds)
print('Loaded Sampling Rate', song.frame_rate)
start = time.time()
chunks = split_on_silence(song,
min_silence_len=min_silence_len,
silence_thresh=threshold,
seek_step=seek_step)
print('Audio Segmentation Time: ', time.time() - start)
# print('Total Processing Time: ', time.time() - start_load)
# print(len(chunks), ' chunks detected')
# os.mkdir(save_path)
try:
os.makedirs(save_path)
except(FileExistsError):
pass
os.chdir(save_path)
i = 0
# process each chunk
for chunk in chunks:
if len(chunk) <= min_duration:
pass
else:
print("saving chunk{0}.wav".format(i))
chunk.export("./chunk{0}.wav".format(i), bitrate='22.5k', format="wav")
i += 1
if time_it:
print('time:',time.time() - start_load)
os.chdir(base_dir)
def inference_audio(path, api_type='google'):
recognizer = sr.Recognizer()
recognizer.energy_threshold = 300
## wav 파일 읽어오기
audio = sr.AudioFile(path)
with audio as source:
audio = recognizer.record(source)
if api_type == 'google':
text = recognizer.recognize_google(audio_data=audio, language="en-US")
# text = recognizer.recognize_google(audio_data=audio, language="en-US", enable_automatic_punctuation=True)
elif api_type == 'sphinx':
text = recognizer.recognize_sphinx(audio_data=audio, language="en-US")
print(text)
return text
if __name__ == '__main__':
# 문제 분리
path = '/root/data/TOEIC_Audio/Toeic_3/'
tests = os.listdir(path)
for test in tests:
#Question Segmentation BEST hparams
test_file_path = os.path.join(path, test)
test = test.replace('.mp3','')
print(test)
audio2question(path=test_file_path, save_path='./Toeic_3/'+test+'/'+GetCurrentDatetime()+'/', min_silence_len=4500, min_duration=10000, threshold=-50, time_it=True, seek_step=150, sr=22050)
# Sentence Segmentation BEST hparams
# audio2question(path=path,save_path='./sent_chunks', min_silence_len=700, min_duration=1, threshold=-50, time_it=True, seek_step=5)
# with open('./ground_truth/gr.txt', 'r') as f:
# ground_truth = f.read().strip()
# print('Infer_text')
# print()
# print(infer_text)
#
# error = wer(ground_truth, infer_text)
# print('WER')
# print(error)
| true |
b2f7b7dd6e84e09b4607556cc1a301dd466e4d9f | Python | brandon-rhodes/homedir | /bin/,orphan-xmp | UTF-8 | 756 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
#
# After deleting a few photos with the Delete button in Geeqie, I then
# always want to remove the orphaned .xmp sidecar files.
import glob
import os
import sys
dirs = sys.argv[1:]
if not dirs:
print('usage: give me some directories')
sys.exit(2)
for dirname in dirs:
for dirpath, dirnames, filenames in os.walk(dirname):
for filename in filenames:
if not filename.endswith('.xmp'):
continue
if '_' in filename:
continue # TODO: probably a second version of the same image
path = os.path.join(dirpath, filename)
matches = glob.glob(path[:-4] + '*')
if len(matches) > 1:
continue
print(path)
| true |
d8656cf72e4b674805569711ab7bf44f729b3f6e | Python | leox64/ADT | /Recursividad.py | UTF-8 | 217 | 3.53125 | 4 | [] | no_license | def main():
lista = [3,5,2,1]
print(suma_lista(lista))
def suma_lista (l):
if len(l) == 1:
return l[0]
else:
actual = l.pop()
return actual + suma_lista(l)
main()
| true |
e75dc43b9e4e002b1e98b0aa65c5071abe943914 | Python | prakharshreyash/community_sdk_python | /kentik_api_library/examples/error_handling_example.py | UTF-8 | 1,627 | 2.671875 | 3 | [
"Apache-2.0",
"MIT",
"Python-2.0",
"BSD-3-Clause"
] | permissive | """Examples of handling errors raised in kentik_api library. Only a subset of possible errors is presented."""
import os
import sys
import logging
from typing import Tuple
from kentik_api import KentikAPI, AuthError, NotFoundError, IncompleteObjectError, Device, RateLimitExceededError
from kentik_api.public.types import ID
logging.basicConfig(level=logging.INFO)
def get_auth_email_token() -> Tuple[str, str]:
try:
email = os.environ["KTAPI_AUTH_EMAIL"]
token = os.environ["KTAPI_AUTH_TOKEN"]
return email, token
except KeyError:
print("You have to specify KTAPI_AUTH_EMAIL and KTAPI_AUTH_TOKEN first")
sys.exit(1)
def handle_errors() -> None:
bad_email = "not_an_email"
bad_token = "dummy_token"
client = KentikAPI(bad_email, bad_token)
try:
users = client.users.get_all()
except AuthError:
email, token = get_auth_email_token()
client = KentikAPI(email, token)
users = client.users.get_all()
try:
fake_id = ID(-1)
user = client.users.get(fake_id) # there is no user with -1 ID
except NotFoundError:
print("User with ID: {} not exist".format(fake_id))
new_device = Device(plan_id=ID(10)) # device without required fields to create it
try:
client.devices.create(new_device)
except IncompleteObjectError:
print("Cannot create device")
for _ in range(100):
try:
client.users.get(users[0].id)
except RateLimitExceededError:
print("Requests rate limit exceeded")
if __name__ == "__main__":
handle_errors()
| true |
b801cc5693b264fac5e3a53f8571f4cf9fc9e58a | Python | FBergeron/AdventOfCode2020 | /day_22/aoc_22.py | UTF-8 | 1,697 | 3.34375 | 3 | [] | no_license | from itertools import combinations
import re
import sys
input_data_filename = "player_cards.txt"
# input_data_filename = "player_cards_short.txt"
def is_game_over():
for cards in player_cards:
if len(cards) == 0:
return True
return False
player_cards = []
cards = None
with open(input_data_filename, 'r') as input_file:
for line in input_file:
match = re.search("Player (\d+):", line)
if match:
if cards is not None and player_index is not None:
player_cards.append(cards)
player_index = match.group(1)
cards = []
continue
match = re.search("(\d+)", line)
if match:
card = int(match.group(1))
cards.append(card)
if cards is not None and player_index is not None:
player_cards.append(cards)
print(f"player_cards={player_cards}")
round = 0
while not is_game_over():
top_cards = []
for index, cards in enumerate(player_cards):
top_cards.append((index, cards.pop(0)))
print(f"top_cards={top_cards}")
sorted_top_cards = sorted(top_cards, key=lambda pair: pair[1], reverse=True)
print(f"sorted_top_cards={sorted_top_cards}")
winner = sorted_top_cards[0][0]
print(f"winner={winner}")
for index, card in sorted_top_cards:
player_cards[winner].append(card)
round += 1
print(f"round={round}")
print(f"player_cards={player_cards}")
score = 0
for cards in player_cards:
if len(cards) == 0:
continue
for index, card in enumerate(cards):
print(f"index={index} card={card}")
score += (len(cards) - index) * card
print(f"score={score}")
| true |
1c70682b41c5d9e884f1752e638ac5ea1f646ffb | Python | 1narayan1/Guessing-Number | /main.py | UTF-8 | 620 | 4.71875 | 5 | [] | no_license | # GUESSING THE NUMBER GAME
import random
print("Welcome to the GUESSSING THE NUMBER GAME!")
number = random.randint(0,100)
Guesses = 5
win = False
while Guesses > 0:
guess = int(input("Guess: "))
Guesses -= 1
if guess > number:
print("Your guess was too high,you have", Guesses,"remaning")
elif guess < number:
print("Your guess was too low,you have", Guesses,"remaining")
else:
print("Congrats,you guessed the correct number and won the game")
win = True
guess = 0
if win == False:
print("Sorry,you didn't guess the number, The number was",number) | true |
7f785b63e164dafeca0d78bd79d8abd814c9232c | Python | Srinidhi-SA/mAdvisorProdML | /bi/algorithms/time_series_forecasting.py | UTF-8 | 2,665 | 2.984375 | 3 | [] | no_license | from __future__ import print_function
from __future__ import division
from builtins import range
from builtins import object
from past.utils import old_div
class TimeSeriesAnalysis(object):
def __init__(self):
# self._spark = spark
# self.data_frame = data_frame.toPandas()
# self._measure_columns = dataframe_helper.get_numeric_columns()
# self._dimension_columns = dataframe_helper.get_string_columns()
# self.classifier = initiate_forest_classifier(10,5)
# https://grisha.org/blog/2016/02/17/triple-exponential-smoothing-forecasting-part-iii/
print("TIME SERIES INITIALIZATION DONE")
def initial_trend(self, series, slen):
sum = 0.0
if len(series) >= slen*2:
for i in range(slen):
sum += float(series[i+slen] - series[i]) / slen
return old_div(sum, slen)
else:
new_range = len(series)-slen
for i in range(new_range):
sum += float(series[i+slen] - series[i])
return old_div(sum, new_range)
def initial_seasonal_components(self, series, slen):
seasonals = {}
season_averages = []
n_seasons = int(old_div(len(series),slen))
# compute season averages
for j in range(n_seasons):
season_averages.append(sum(series[slen*j:slen*j+slen])/float(slen))
# compute initial values
for i in range(slen):
sum_of_vals_over_avg = 0.0
for j in range(n_seasons):
sum_of_vals_over_avg += series[slen*j+i]-season_averages[j]
seasonals[i] = old_div(sum_of_vals_over_avg,n_seasons)
return seasonals
def triple_exponential_smoothing(self, series, slen, alpha, beta, gamma, n_preds):
result = []
seasonals = self.initial_seasonal_components(series, slen)
for i in range(len(series)+n_preds):
if i == 0: # initial values
smooth = series[0]
trend = self.initial_trend(series, slen)
result.append(series[0])
continue
if i >= len(series): # we are forecasting
m = i - len(series) + 1
result.append((smooth + m*trend) + seasonals[i%slen])
else:
val = series[i]
last_smooth, smooth = smooth, alpha*(val-seasonals[i%slen]) + (1-alpha)*(smooth+trend)
trend = beta * (smooth-last_smooth) + (1-beta)*trend
seasonals[i%slen] = gamma*(val-smooth) + (1-gamma)*seasonals[i%slen]
result.append(smooth+trend+seasonals[i%slen])
return result
| true |
82ffc9bf46a3d187c9199ab85261a6bc7662a678 | Python | LBJ-Wade/gphist_GW | /gphist/posterior.py | UTF-8 | 7,792 | 2.96875 | 3 | [] | no_license | """Expansion history posterior applied to distance functions.
"""
import math
from abc import ABCMeta,abstractmethod
import numpy as np
import numpy.linalg
import astropy.constants
class GaussianPdf(object):
"""Represents a multi-dimensional Gaussian probability density function.
Args:
mean(ndarray): 1D array of length npar of mean values.
covariance(ndarray): 2D symmetric positive definite covariance matrix
of shape (npar,npar).
Raises:
ValueError: dimensions of mean and covariance are incompatible.
LinAlgError: covariance matrix is not positive definite.
"""
def __init__(self,mean,err):
# Check that the dimensions match or throw a ValueError.
#dimensions_check = mean.dot(covariance.dot(mean))
# Check that the covariance is postive definite or throw a LinAlgError.
#posdef_check = numpy.linalg.cholesky(covariance[:,:,0])
self.mean = mean
self.icov = 1/err**2
#self.norm = 0.5*mean.size*np.log(2*math.pi) + 0.5*np.log(np.linalg.det(covariance))
#self.norm = 0.5*mean.size*np.log(2*math.pi) - 0.5*np.log(err**2).sum()
#print(mean.size)
self.norm = -0.5*mean.size
# Calculate the constant offset of -log(prob) due to the normalization factors.
def get_nlp(self,values):
"""Calculates -log(prob) for the PDF evaluated at specified values.
The calculation is automatically broadcast over multiple value vectors.
Args:
values(ndarray): Array of values where the PDF should be evaluated with
shape (neval,ndim) where ndim is the dimensionality of the PDF and
neval is the number of points where the PDF should be evaluated.
more precisely, it has shape (nsamples,ntype,nzposterior)
nsamples is the number of samples requested
ntype is the number of types of posteriors, types being DH, DA or mu
nzposterior is the number of redshifts in a given posterior
Returns:
float: Array of length neval -log(prob) values calculated at each input point.
Raises:
ValueError: Values can not be broadcast together with our mean vector.
"""
# The next line will throw a ValueError if values cannot be broadcast.
#print('values shape')
#print(values.shape)
#print('mean shape')
#print(self.mean.shape)
residuals = values - self.mean
#a[2] is ntype; the difference between these cases which dimension the covariance matrix is for
# ALL OF THESE RESIDUALS SHOULD BE OF THE FORM (NSAMPLE,Ndata)
chisq = np.einsum('...ij,j,...ij->...i',residuals,self.icov,residuals)
#print(chisq)
print(chisq.min())
#print(self.norm)
#if a[1]>1 and a[2]==1: # should correspond to just SN data
#chisq = np.einsum('...ijk,jl,...ilk->...i',residuals,self.icov,residuals)
#elif a[2]>1 and a[1]>1: #should correspond to just BOSS 2016 w/ BOSS2018 should never happen
#chisq = np.einsum('...ijk,klj,...ijl->...i',residuals,self.icov,residuals)
#else:
#chisq = np.einsum('...ijk,kl,...ijl->...i',residuals,self.icov,residuals)
#print(self.norm + 0.5*chisq)
return self.norm + 0.5*chisq
#return 0.5*chisq
class GaussianPdf1D(GaussianPdf):
"""Represents a specialization of GaussianPdf to the 1D case.
Args:
central_value(float): Central value of the 1D PDF.
sigma(float): RMS spread of the 1D PDF.
"""
def __init__(self,central_value,sigma):
mean = np.array([central_value])
covariance = np.array([[sigma**2]])
GaussianPdf.__init__(self,mean,covariance)
def get_nlp(self,values):
"""Calculates -log(prob) for the PDF evaluated at specified values.
Args:
values(ndarray): Array of values where the PDF should be evaluated with
length neval.
Returns:
float: Array of length neval -log(prob) values calculated at each input point.
"""
return GaussianPdf.get_nlp(self,values)
class GaussianPdf2D(GaussianPdf):
"""Represents a specialization of GaussianPdf to the 2D case.
Args:
x1(float): Central value of the first parameter.
x2(float): Central value of the second parameter.
sigma1(float): RMS spread of the first parameter.
sigma2(float): RMS spread of the second parameter.
rho12(float): Correlation coefficient between the two parameters. Must be
between -1 and +1.
"""
def __init__(self,x1,sigma1,x2,sigma2,rho12):
mean = np.array([x1,x2])
cov12 = sigma1*sigma2*rho12
covariance = np.array([[sigma1**2,cov12],[cov12,sigma2**2]])
#mean = mean[np.newaxis,:]#see the CMB posterior class
GaussianPdf.__init__(self,mean,covariance)
class Posterior(object):
"""Posterior constraint on DH,DA at a fixed redshift.
This is an abstract base class and subclasses must implement the constraint method.
Args:
name(str): Name to associate with this posterior.
zpost(float): Redshift of posterior constraint.
"""
__metaclass__ = ABCMeta
def __init__(self,name,zpost):
self.name = name
self.zpost = zpost
@abstractmethod
def constraint(self,DHz,DAz,muz):
"""Evaluate the posterior constraint given values of DH(zpost) and DA(zpost).
Args:
DHz(ndarray): Array of DH(zpost) values.
DAz(ndarray): Array of DA(zpost) values with the same shape as DHz.
Returns:
nlp(ndarray): Array of -log(prob) values with the same shape as DHz and DAz.
"""
pass
def get_nlp(self,zprior,DH,DA,mu):
"""Calculate -log(prob) for the posterior applied to a set of expansion histories.
The posterior is applied to c/H(z=0).
zprior(ndarray): Redshifts where prior is sampled, in increasing order.
DH(ndarray): Array of shape (nsamples,nz) of DH(z) values to use.
DA(ndarray): Array of shape (nsamples,nz) of DA(z) values to use.
Returns:
ndarray: Array of -log(prob) values calculated at each input value.
Raises:
AssertionError: zpost is not in zprior.
"""
#iprior = np.argmax(zprior==self.zpost)
iprior = np.where(np.in1d(zprior,self.zpost))[0] #for whatever reason np.where returns a tuple of an array so thats why there is the [0] after
DHz = DH[:,iprior]
DAz = DA[:,iprior]
muz = mu[:,iprior]# these should be of the form (nsample,nz)
return self.constraint(DHz,DAz,muz)
class GWPosterior(Posterior):
"""Posterior constraint on DH(z).
Args:
name(str): Name to associate with this posterior.
zpost(float): Redshift of posterior constraint.
DC(float): Central values of DC(z).
cov_Hz(float): cov of H(z).
"""
def __init__(self,name,zpost,DCz,err_DC):
#print('GW posterior shape')
#print(DCz.shape)
self.pdf = GaussianPdf(DCz,err_DC)
Posterior.__init__(self,name,zpost)
def constraint(self,DHz,DAz,muz):
"""Calculate -log(prob) for the posterior applied to a set of expansion histories.
Args:
DHz(ndarray): Array of DH(zpost) values to use (will be ignored).
DAz(ndarray): Array of DA(zpost) values to use.
Returns:
ndarray: Array of -log(prob) values calculated at each input value.
"""
#print('DAz shape')
#print(DAz.shape)
return self.pdf.get_nlp(DAz)
class SNPosterior(Posterior):
"""Posterior constraint on mu(z).
Args:
name(str): Name to associate with this posterior.
zpost(float): Redshift of posterior constraint.
mu(float): Central value of mu*(z): actually mu(z)-(M_1=19.05).
mu_error(float): RMS error on mu(z).
"""
def __init__(self,name,zpost,mu,mu_error):
#print('SN posterior shape')
#print(mu.shape)
self.pdf = GaussianPdf(mu,mu_error)
Posterior.__init__(self,name,zpost)
def constraint(self,DHz,DAz,muz):
"""Calculate -log(prob) for the posterior applied to a set of expansion histories.
Args:
DHz(ndarray): Array of DH(zpost) values to use (will be ignored).
DAz(ndarray): Array of DA(zpost) values to use (also ignored).
muz(ndarray): Array of mu(zpost) 5log(DL)+25
Returns:
ndarray: Array of -log(prob) values calculated at each input value.
"""
#print('muz shape')
#print(DAz.shape)
#print(muz.shape)
return self.pdf.get_nlp(muz)
| true |
57f356579954c46aee8c73dd56e8c4cbff3336a5 | Python | HybridNeos/Graph-Mining-Project | /quickGraph.py | UTF-8 | 8,211 | 3.078125 | 3 | [] | no_license | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from functools import reduce
from copy import deepcopy
from itertools import combinations
def getTrainTest(df, TrainSchool, TestSchool, asMatrix=True, includeFreshmen=True, yearMethod="none"):
if not includeFreshmen:
df = df[df["Year"] != 1]
if yearMethod == "divide":
df = df.astype({column: float for column in df.columns[5:]})
for i in range(df.shape[0]):
df.iloc[i,5:] /= df.iloc[i]["Year"]
elif yearMethod == "proportion":
df = df.astype({column: float for column in df.columns[5:]})
for i in range(df.shape[0]):
df.iloc[i,5:] /= df.iloc[i, 5:].sum()
Train = df[df["School"] == TrainSchool]
Test = df[df["School"] == TestSchool]
X_train = Train.iloc[:, 5:]
y_train = Train["Event"]
X_test = Test.iloc[:, 5:]
y_test = Test["Event"]
if yearMethod == "feature":
X_train.insert(0, "Year", Train["Year"])
X_test.insert(0, "Year", Test["Year"])
if asMatrix:
return X_train.to_numpy(), y_train.values, X_test.to_numpy(), y_test.values
else:
return X_train, y_train, X_test, y_test
# make the graph
def makeGraph(data):
# Filter the dataframe and make the graph
G = nx.Graph()
# Add athletes
athletes = data.index
for athlete in athletes:
row = data.loc[athlete]
G.add_node(
athlete,
type="athlete",
Gender=row[0],
School=row[1],
Year=row[3],
Event=row[4],
)
# Add events
events = data.columns[5:]
for event in events:
G.add_node(
event,
type="event",
uniqueAthletes=sum([1 if x != 0 else 0 for x in data[event]]),
TimesDone=sum(data[event]),
)
# add edges
for athlete in athletes:
for event in events:
if data.loc[athlete][event] != 0:
G.add_edge(athlete, event, timesCompeted=data.loc[athlete][event])
return G
# Output the graph
def drawGraph(G, labels=0):
# This in inefficient but I like having it contained within the function
def labelNode(node, labels):
# No Labels
if labels == 0:
return ""
# What to label athletes
if "," in node:
if labels == 2:
last, first = node.split(", ")
return first[0] + last[0]
else:
return ""
# What to label events
else:
return node
# Put the details in
colors = [
"Blue" if node[1]["type"] == "athlete" else "Red" for node in G.nodes(data=True)
]
labels = {node: labelNode(node, labels) for node in G.nodes()}
# Draw it
nx.draw(G, labels=labels, node_color=colors)
plt.show()
def setCheck(candidateEvents, uniqueMaximals):
# If something in uniqueMaximals is a supserset of candidatEvents we don't
# candidatEvents
# If something in uniqueMaximals is a subset of candidatEvents we add
# candidatEvents and remove it
supersets = set()
subsets = set()
for athlete, events in uniqueMaximals.items():
if events.issuperset(candidateEvents):
supersets.add(athlete)
break
# Since issuperset includes equality we know gives only proper supersets
elif events.issubset(candidateEvents):
subsets.add(athlete)
return supersets, subsets
def getUniqueMaximals(subsets):
uniqueMaximals = {}
for athlete, events in subsets.items():
# people in uniqueMaximals who are subsets/supersets of events
supersets, subsets = setCheck(events, uniqueMaximals)
if not supersets:
uniqueMaximals[athlete] = events
# remove any subsets in uniqueMaximals from adding the new events
for noLongerMaximal in subsets:
del uniqueMaximals[noLongerMaximal]
return uniqueMaximals
def subGraph(subsets):
H = nx.Graph()
for athlete in subsets.keys():
H.add_node(athlete, type="athlete")
for event in reduce(lambda x, y: x.union(y), subsets.values()):
H.add_node(event, type="event")
for athlete in subsets.keys():
for event in subsets[athlete]:
H.add_edge(athlete, event)
return H
# taken from martinbroadhurst.com/greedy-set-cover-in-python.html
def set_cover(universe, subsets):
"""Find a family of subsets that covers the universal set"""
elements = set(e for s in subsets for e in s)
# Check the subsets cover the universe
if elements.issubset(universe) and elements != universe:
return None
covered = set()
cover = []
# Greedily add the subsets with the most uncovered points
while covered != elements:
subset = max(subsets, key=lambda s: len(s - covered))
cover.append(subset)
covered |= subset
return cover
def removeMultiEvents(subsets, G):
newSubsets = deepcopy(subsets)
for athlete, events in subsets.items():
if "Pent" in events:
newSubsets[athlete] -= set(["HJ", "SP", "LJ", "60H", "800"])
if "Hep" in events:
if G.nodes(data=True)[athlete]["Gender"] == "F":
newSubsets[athlete] -= set(["100H", "HJ", "SP", "200", "LJ", "JT", "800"])
else:
newSubsets[athlete] -= set(["1000", "60", "LJ", "SP", "HJ", "60H", "PV"])
if "Dec" in events:
newSubsets[athlete] -= set(["100", "LJ", "SP", "HJ", "400", "110H", "DT", "PV", "JT", "1500"])
return newSubsets
"""def optimizeFurther(subsets, G):
# find the degree 1 events, who does them, and those people's events
lonelyEvents = [node for node, degree in G.degree() if degree == 1]
H = G.copy()
# find the people
trivialPeople = set()
for event in lonelyEvents:
trivialPeople |= set(H[event])
# find the events
trivialEvents = set()
for person in trivialPeople:
trivialEvents |= set(H[person])
# remove them
for node in trivialPeople.union(trivialEvents):
H.remove_node(node)
return {node: set(H[node]) for node in H.nodes() if node in subsets.keys()}, trivialPeople, trivialEvents
### MAKES GREEDY WORSE
# Remove the 2-hop neighborhood of degree 1 vertices
if False:
reducedMaxAthletes, trivialPeople, trivialEvents = optimizeFurther(maxAthletes, H)
maxAthletes = reducedMaxAthletes
universe -= trivialEvents"""
if __name__ == "__main__":
df = pd.read_csv("./fullDetails.csv", index_col=0)
#X_train, y_train, X_test, y_test = getTrainTest(df, "RPI", "RIT", True)
G = makeGraph(df[df.School == "RPI"])
#drawGraph(G, 2)
events = [node for node, stuff in dict(G.nodes(data=True)).items() if stuff["type"] == "event"]
sorted([x for x in nx.eigenvector_centrality(G).items() if x[0] in events], key=lambda x: x[1], reverse=True)
"""
# get sets for set cover problem
# need minimal sets, handle multis, keep track of names
universe = set(df.columns[5:])
subsets = {
node[0]: set(G[node[0]])
for node in G.nodes(data=True)
if node[1]["type"] == "athlete"
}
subsets = removeMultiEvents(subsets, G)
# Initial pass
maxAthletes = getUniqueMaximals(subsets)
H = subGraph(maxAthletes)
#drawGraph(H, 2)
# Greedy algorithm
greedy_cover = set_cover(universe, list(maxAthletes.values()))
greedy_cover = {
athlete: events
for athlete, events in maxAthletes.items()
if events in greedy_cover
}
J = subGraph(greedy_cover)
#drawGraph(J, 2)
# all combinations
combinations_cover = {}
#for comb in combinations(maxAthletes.values(), 11):
# events = reduce(lambda x, y: x.union(y), comb)
# if events == universe:
# combinations_cover = comb
# break
#combinations_cover = {
# athlete: events
# for athlete, events in maxAthletes.items()
# if events in combinations_cover
#}
K = subGraph(combinations_cover)
drawGraph(K, 2)
"""
| true |
9640da1dc5b0dc34519ff3768546c193c2509bcc | Python | gabriele-tasca/tesi2021 | /image_scripts/triang_area/triang_area.py | UTF-8 | 345 | 2.578125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import fract
df = pd.read_csv("stats.csv", sep=";")
ratios = df["Area Ratio"]
# df[ df["Nome"] == "Adamello"]
np.min(ratios)
plt.hist(ratios, bins=20)
plt.xlabel("Area Ratio")
plt.savefig("area-ratio-hist.png", bbox_inches='tight')
# np.mean(ratios)
# np.std(ratios) | true |
949127c5a32445ebfa67a8bf418d0a5a3fefecd3 | Python | vilmarschmelzer/clean-arch | /src/python/products/entities/category.py | UTF-8 | 586 | 2.734375 | 3 | [] | no_license | from abc import ABC
from typing import List
from dataclasses import dataclass
@dataclass
class Category:
name: str
id: int = None
class CategoriesIfRepo(ABC):
def get(self, uid: int) -> Category:
raise NotImplementedError
def get_all(self) -> List[Category]:
raise NotImplementedError
def create(self, category: Category) -> Category:
raise NotImplementedError
def update(self, category: Category) -> Category:
raise NotImplementedError
def delete(self, category: Category) -> bool:
raise NotImplementedError
| true |
bf2cce77d39c988fbf2d3b9d685f3c67c41b2736 | Python | AphroditesChild/Codewars-Katas | /alphabet position.py | UTF-8 | 156 | 3.15625 | 3 | [] | no_license | def alphabet_position(text):
abc = 'abcdefghijklmnopqrstuvwxyz'
return " ".join([str(abc.index(i.lower())+1) for i in text if i.lower() in abc])
| true |
081adeff91053ae9236d281f8ab18423692969c1 | Python | tianhaoz95/py-recurring-investment | /src/core/maybe_invest.py | UTF-8 | 2,037 | 2.6875 | 3 | [
"MIT"
] | permissive | from core.user_context import UserContext
from src.core.stock_context import StockContext
import alpaca_trade_api as tradeapi
class StockFeasibilityChecker():
def __init__(self, user_context: UserContext) -> None:
self.user_context = user_context
def __call__(self, stock_context: StockContext) -> bool:
return False
class StockComparator():
def __init__(self, user_context: UserContext) -> None:
self.user_context = user_context
def __call__(self, lhs: StockContext, rhs: StockContext) -> int:
return 0
class StockActionExecutor():
def __init__(self, user_context: UserContext) -> None:
self.user_context = user_context
def __call__(self, stock_context: StockContext) -> None:
pass
def maybe_invest(api: tradeapi.REST) -> None:
target_symbols = ['AAPL', 'VOO']
user_context = UserContext()
user_context.set_account(api.get_account())
stock_context_list = [
StockContext(target_symbol) for target_symbol in target_symbols
]
# Populate stock context list for potential investment.
for stock_context in stock_context_list:
last_trade = api.get_last_trade(stock_context.symbol)
stock_context.set_last_trade(last_trade)
# Filter out a list of stocks that should be considered
# for investment at the moment.
stock_feasibility_checker = StockFeasibilityChecker(user_context)
filtered_stock_context_list = filter(stock_feasibility_checker,
stock_context_list)
# Rank the potential list of stocks to invest based on
# the potential growth.
stock_comparator = StockComparator(user_context)
ranked_stock_context_list = sorted(filtered_stock_context_list,
key=stock_comparator)
# Send order to invest if fund is sufficient
stock_action_executor = StockActionExecutor(user_context)
for ranked_stock_context in ranked_stock_context_list:
stock_action_executor(ranked_stock_context)
| true |
d9b8ff3a41aea826ca7e813ac85784339d98f860 | Python | aherschend/OOP | /CarClass.py | UTF-8 | 728 | 3.15625 | 3 | [] | no_license | class Car:
def __init__(self,model,make,speed):
self.__year_model = model
self.__make = make
self.__speed = 0
def set_year__model(self,model):
self.__year_model = model
def set_make(self,make):
self.__make = make
def set_speed(self,speed):
self.__speed = 0
#get the returns
def get_year_model(self):
return self.__year_model
def get_make(self):
return self.__make
def get_speed(self):
return self.__speed
#get the methods for accelarate and slow down
def accelerate(self):
self.__speed += 5
def brake(self):
self.__speed -=5
def get_speed(self):
return self.__speed
| true |
0e1e5c9e075726031a68cbe63c93573206019c2c | Python | j0hnsmith/local_files | /tests.py | UTF-8 | 1,918 | 2.578125 | 3 | [] | no_license | import json
import os
import unittest
from config import PROJECT_ROOT
from app import app, db, tasks
from app.models import File
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(PROJECT_ROOT, 'test.db')
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_home(self):
resp = self.app.get('/')
self.assertIn('Hello World', resp.data)
def test_run_tasks_page(self):
resp = self.app.get('/run-task-to-get-files')
self.assertIn('task added', resp.data)
def test_get_local_files(self):
# check no files to start with
count = File.query.count()
self.assertEqual(count, 0)
# now scan for files and check we get some
tasks.get_local_files()
count = File.query.count()
self.assertTrue(count > 0)
def test_api_all_files(self):
# check error response works
resp = self.app.get('/api/v1/files/')
data = json.loads(resp.data)
self.assertTrue(data.has_key('error'))
# now scan for files and ensure response contains some
tasks.get_local_files()
resp = self.app.get('/api/v1/files/')
data = json.loads(resp.data)
self.assertTrue(len(data) > 0)
def test_api_single_file(self):
tasks.get_local_files()
# check error response works
resp = self.app.get('/api/v1/files/9876543210')
data = json.loads(resp.data)
self.assertTrue(data.has_key('error'))
# now check for good response
resp = self.app.get('/api/v1/files/1')
data = json.loads(resp.data)
self.assertIn('name', data)
self.assertIn('dir_path', data)
if __name__ == '__main__':
unittest.main()
| true |
36b3cb3bbfc48f3cb7bfefdde4ce4e4d5af4060f | Python | csduarte/FunPy | /csduarte/ex05/sd3.py | UTF-8 | 673 | 2.953125 | 3 | [] | no_license | # %d - Signed integer decimal
# %i - Signed integer decimal
# %o - unsigned octal
# %u - unsigned decimal
# %x - unsigned hexadecimal(lowercase)
# %X - unsigned hexadecimal(uppercase)
# %e - Floating point exponent format(lowercase)
# %E - Floating point exponent format(uppercase)
# %f - Floating point decimal format
# %F - Floating point decimal format
# %g - Same as "e" if precision less than -4, otherwise "f"
# %G - Same as "E" if precision less than -4, otherwise "F"
# %c - Single Character, accepts integer or character string
# %r - String, converted using repr()
# %s - String, converted using str()
# % - No argument is converted, results in a '%' character
| true |
5b68e88fc1768d2909d5445b458385ac205d8b24 | Python | CivMap/CivMap | /render/large_tiles.py | UTF-8 | 1,784 | 3.1875 | 3 | [] | no_license | import os
import sys
from PIL import Image
def stitch_four(size, x, z, out_path, in_path):
"""
x,z are tile coords of the nw small tile
size is the width of a small tile
"""
nw_path = in_path + '/%i,%i.png' % (x, z)
sw_path = in_path + '/%i,%i.png' % (x, z+1)
ne_path = in_path + '/%i,%i.png' % (x+1, z)
se_path = in_path + '/%i,%i.png' % (x+1, z+1)
out = Image.new('RGBA', (2*size, 2*size))
if os.path.isfile(nw_path):
out.paste(im=Image.open(nw_path), box=(0, 0))
if os.path.isfile(sw_path):
out.paste(im=Image.open(sw_path), box=(0, size))
if os.path.isfile(ne_path):
out.paste(im=Image.open(ne_path), box=(size, 0))
if os.path.isfile(se_path):
out.paste(im=Image.open(se_path), box=(size, size))
out.thumbnail((256, 256))#, Image.NEAREST)
out.save(out_path, 'PNG')
def stitch_all(out_path, in_path):
os.makedirs(out_path, exist_ok=True)
tiles = [tuple(map(int, region[:-4].split(',')))
for region in os.listdir(in_path)
if region[-4:] == '.png']
size = Image.open(in_path + '/%i,%i.png' % tiles[0]).size[0]
min_x = min(x for x,y in tiles) // 2
min_z = min(z for x,z in tiles) // 2
max_x = max(x for x,y in tiles) // 2
max_z = max(z for x,z in tiles) // 2
for x in range(min_x, max_x+1):
for z in range(min_z, max_z+1):
out_tile = out_path + '/%i,%i.png' % (x, z)
stitch_four(size, 2*x, 2*z, out_tile, in_path)
def usage():
print('Args: <large tiles out path> <small tiles in path>')
sys.exit(1)
def main(args):
try:
out_path, in_path = args[1:3]
except ValueError:
usage()
stitch_all(out_path, in_path)
if __name__ == '__main__':
main(sys.argv)
| true |
0324e26c4e3130ca1cf8c7da044f292bf442ec4b | Python | deriktruyts/Python | /Python3_-_Mundo1_-_Fundamentos/EXERCÍCIOSPython01/Ex_Aula_07/Ex009(TABUADA).py | UTF-8 | 1,228 | 4.21875 | 4 | [] | no_license | # Desafio 009 - Aula 07
# Criar um programa que leia um número inteiro e mostre sua tabuada.
# ------------------------------------------------------------------------
from time import sleep
num = int(input('Insira um número inteiro: '))
print('--------------------------')
t1 = num * 1
t2 = num * 2
t3 = num * 3
t4 = num * 4
t5 = num * 5
t6 = num * 6
t7 = num * 7
t8 = num * 8
t9 = num * 9
t10= num * 10
print('\033[1:33mProcessando...\033[m')
print('--------------------------')
sleep(3)
print(' \033[1:36mTABUADA DO NÚMERO:\033[m \033[1:30m{}\033[m\n'
' \033[1:30m>\033[m {} x 1 = {}\n'
' \033[1:30m>\033[m {} x 2 = {}\n'
' \033[1:30m>\033[m {} x 3 = {}\n'
' \033[1:30m>\033[m {} x 4 = {}\n'
' \033[1:30m>\033[m {} x 5 = {}\n'
' \033[1:30m>\033[m {} x 6 = {}\n'
' \033[1:30m>\033[m {} x 7 = {}\n'
' \033[1:30m>\033[m {} x 8 = {}\n'
' \033[1:30m>\033[m {} x 9 = {}\n'
' \033[1:30m>\033[m {} x 10 = {}'.format(num, num, t1, num, t2, num, t3, num, t4, num, t5, num, t6, num, t7,
num, t8, num, t9, num, t10))
print('--------------------------') | true |
561ffce61848881ced0dced72d8e4f574cf7af7d | Python | 709867472/Amazon-Review-Classifier-Using-Scikit-Learn | /code/my_method.py | UTF-8 | 2,034 | 3.40625 | 3 | [] | no_license | import csv
import sklearn
import nltk
from nltk.corpus import stopwords
import re
import time
start_time = time.time()
inputFile = open("reviews.csv")
reader = csv.reader(inputFile, delimiter='|')
next(reader)
# get all the stopWords and put them into set
stopWords = set(stopwords.words('english'))
# skip first line
next(reader)
labels, text = [], []
for row in reader:
labels.append(row[0])
# split the reviews using characters except alphabetic letters, numbers and single quote
text.append(re.split("[^a-zA-Z0-9']+", row[1].lower()))
# for each word, we count how many times it appears in positive reviews and how many times it
# appears in negative reviews
goodCount, badCount = {}, {}
for i in range(len(text)):
if (i + 1) % 5 == 0: continue
for word in text[i]:
if word in stopWords: continue
if labels[i] == "positive":
if word in goodCount: goodCount.update({word: goodCount[word] + 1})
else: goodCount.update({word: 1})
else:
if word in badCount: badCount.update({word: badCount[word] + 1})
else: badCount.update({word: 1})
# we assume that for each word, number of times it appears in positive word / total number of
# times it appears in reviews is "goodness". For each review, we sum up the goodness of all to
# get the goodness of the review, if it larger than 0.5, it is a positive review
total, count = 0, 0
for i in range(len(text)):
if (i + 1) % 5 ==0:
total += 1
goodSum, badSum = 0, 0
for word in text[i]:
good = goodCount[word] if word in goodCount else 0
bad = badCount[word] if word in badCount else 0
if good == 0 and bad == 0: continue
goodSum += float(good) / (good + bad)
badSum += float(bad) / (good + bad)
sentiment = "positive" if goodSum > badSum else "negative"
if sentiment == labels[i]: count += 1
print(str(float(count) / total) + '\n')
print("--- %s seconds ---" % (time.time() - start_time))
| true |
c5d504886d45b8a6989c5fe5691df4c7162181f0 | Python | nish1998/pubg_predictor | /predict.py | UTF-8 | 1,333 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 22:54:38 2018
@author: nishant
"""
# Importing the libraries
import numpy as np
import pandas as pd
import pickle
# Importing the dataset
dataset = pd.read_csv('train.csv')
testdataset = pd.read_csv('test.csv')
X = dataset.iloc[:, 4:25].values
y = dataset.iloc[:, 25:26].values
X_test = testdataset.iloc[:, 4:25].values
y_test = testdataset.iloc[:, 25:26].values
# Fitting Decision Tree Regression to the dataset
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
# Predicting a new result
y_pred = regressor.predict(X_test)
#saving to csv
data=[]
df = pd.DataFrame(data)
df = pd.DataFrame(data,columns=['Id','winPlacePerc'],dtype=float)
df['Id']=testdataset.iloc[:, 0].values
df['winPlacePerc']=y_pred
df.to_csv("sample_submission.csv", sep='\t', encoding='utf-8', index=False)
##############################################################
#### saving and loading a model ####
##############################################################
#saving model
filename = 'final_model.sav'
pickle.dump(regressor, open(filename, 'wb'))
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
#predict
y_pred = loaded_model.predict(X_test)
| true |
9529147578698383b3e742c9a9e9988a8f8208fd | Python | LeeMoonCh/Arithmetic | /com/Bayes/test.py | UTF-8 | 177 | 2.765625 | 3 | [] | no_license | #conding:utf8
from math import log
#from numpy import *
#list = [[1,3,4,5,6,0,8],[1,2,3,4]]
#
#list = array(list)
#print sum(list[0])
print log(0.5,2)
| true |
01bfca27b96caabe372d57093bea5be6ddca3ba7 | Python | zaproxy/community-scripts | /payloadprocessor/sqlmap - charencode.py | UTF-8 | 503 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | import string
import time
def process(payload):
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
else:
retVal += '%%%.2X' % ord(payload[i])
i += 1
time.sleep(10)
return retVal
| true |
a40910489379ba2c508e78063fab9077f1744574 | Python | oliviervg1/inventory-manager | /backend/test/test_app.py | UTF-8 | 10,254 | 2.65625 | 3 | [] | no_license | import unittest
import json
from db import Session, tables, bind_session_engine
from models import Room, Item
from app import app
class AppTestCase(unittest.TestCase):
def setUp(self):
app.config["TESTING"] = True
self.app = app.test_client()
engine = bind_session_engine("sqlite:///:memory:")
metadata = tables.metadata
metadata.create_all(bind=engine)
self.session = Session()
# Fixture data
self.room = Room("living-room")
self.item = Item("TV", self.room, "80", "plasma screen", True)
self.session.add_all([self.room, self.item])
self.session.commit()
def tearDown(self):
Session.remove()
def test_routing_error(self):
r = self.app.get("/this/api/does/not/exist/yet")
self.assertEqual(r.status_code, 404)
self.assertEqual(
json.loads(r.data),
{
"error": ("The requested URL was not found on the server. "
" If you entered the URL manually please check your "
"spelling and try again.")
}
)
def test_invalid_json_fails(self):
r = self.app.post(
"/rooms",
content_type="application/json",
data="THIS IS NOT JSON"
)
self.assertEqual(r.status_code, 400)
self.assertEqual(
json.loads(r.data),
{
"error": "The browser (or proxy) sent a request that this "
"server could not understand."
}
)
def test_add_room_already_exists(self):
r = self.app.post(
"/rooms",
content_type="application/json",
data=json.dumps({"name": "living-room"})
)
self.assertEqual(r.status_code, 409)
self.assertEqual(json.loads(r.data), {"error": "Already exists"})
def test_add_room_success(self):
r = self.app.post(
"/rooms",
content_type="application/json",
data=json.dumps({"name": "kitchen"})
)
self.assertEqual(r.status_code, 201)
self.assertEqual(json.loads(r.data), {"ref": "/rooms/kitchen"})
# Check room has been added
Session.remove()
new_session = Session()
self.assertEqual(len(new_session.query(Room).all()), 2)
def test_remove_room_doesnt_exist(self):
r = self.app.delete(
"/rooms",
content_type="application/json",
data=json.dumps({"name": "doesnt-exist"})
)
self.assertEqual(r.status_code, 404)
self.assertEqual(json.loads(r.data), {"error": "Not found"})
def test_remove_room_success(self):
r = self.app.delete(
"/rooms",
content_type="application/json",
data=json.dumps({"name": "living-room"})
)
self.assertEqual(r.status_code, 201)
self.assertEqual(json.loads(r.data), {})
# Check room and related items have been deleted
Session.remove()
new_session = Session()
self.assertEqual(len(new_session.query(Room).all()), 0)
self.assertEqual(len(new_session.query(Item).all()), 0)
def test_get_item(self):
r = self.app.get("/rooms/living-room/items/TV")
self.assertEqual(r.status_code, 200)
self.assertEqual(
json.loads(r.data),
{
"description": "plasma screen",
"is_fragile": True,
"name": "TV",
"weight": 80
}
)
def test_add_item_room_doesnt_exist(self):
r = self.app.put(
"/rooms/non-existant/items/fridge",
content_type="application/json",
data=json.dumps({"name": "doesnt-exist"})
)
self.assertEqual(r.status_code, 404)
self.assertEqual(json.loads(r.data), {"error": "Not found"})
def test_add_item_success(self):
r = self.app.put(
"/rooms/living-room/items/couch",
content_type="application/json",
data=json.dumps(
{
"weight": 65,
"description": "Something to sit on",
"is_fragile": False
}
)
)
self.assertEqual(r.status_code, 201)
self.assertEqual(
json.loads(r.data), {"ref": "/rooms/living-room/items/couch"}
)
# Check item has been added
Session.remove()
new_session = Session()
item = new_session.query(Item).filter_by(name="couch").one()
self.assertEqual(item.description, "Something to sit on")
def test_update_item_success(self):
r = self.app.put(
"/rooms/living-room/items/TV",
content_type="application/json",
data=json.dumps(
{
"weight": 120,
"description": "Cathode tv",
"is_fragile": False
}
)
)
self.assertEqual(r.status_code, 201)
self.assertEqual(
json.loads(r.data), {"ref": "/rooms/living-room/items/TV"}
)
# Check item has been updated
Session.remove()
new_session = Session()
items = new_session.query(Item).all()
self.assertEqual(len(items), 1)
self.assertEqual(items[0].description, "Cathode tv")
def test_remove_item_doesnt_exist(self):
r = self.app.delete("/rooms/living-room/items/non-existant")
self.assertEqual(r.status_code, 404)
self.assertEqual(json.loads(r.data), {"error": "Not found"})
def test_remove_item_success(self):
r = self.app.delete("/rooms/living-room/items/TV")
self.assertEqual(r.status_code, 201)
self.assertEqual(json.loads(r.data), {})
# Check room is still there but item has been deleted
Session.remove()
new_session = Session()
self.assertEqual(len(new_session.query(Room).all()), 1)
self.assertEqual(len(new_session.query(Item).all()), 0)
def test_get_items(self):
r = self.app.get("/rooms")
self.assertEqual(r.status_code, 200)
self.assertEqual(
json.loads(r.data),
{
"living-room": [{
"name": "TV",
"description": "plasma screen",
"weight": 80,
"is_fragile": True
}]
}
)
def test_generate_manifest(self):
# Add more fixture data
table = Item("table", self.room, 40, "wood", False)
couch = Item("couch", self.room, 200, "leather", False)
self.session.add_all([table, couch])
kitchen = Room("kitchen")
fridge = Item("fridge", kitchen, 100, "Samsung fridge", True)
cutlery = Item("cutlery", kitchen, 10, "Ikea", False)
plates = Item("plates", kitchen, 15, "Ikea", True)
self.session.add_all([kitchen, fridge, cutlery, plates])
self.session.commit()
r = self.app.get("/manifest")
self.assertEqual(r.status_code, 200)
self.assertEqual(
json.loads(r.data),
{
"two_heaviest_items": {
"living-room": [
{
"name": "couch",
"description": "leather",
"weight": 200,
"is_fragile": False
},
{
"name": "TV",
"description": "plasma screen",
"weight": 80,
"is_fragile": True
}
],
"kitchen": [
{
"name": "fridge",
"description": "Samsung fridge",
"weight": 100,
"is_fragile": True
},
{
"name": "plates",
"description": "Ikea",
"weight": 15,
"is_fragile": True
}
]
},
"fragile_items": {
"living-room": [
{
"name": "TV",
"description": "plasma screen",
"weight": 80,
"is_fragile": True
}
],
"kitchen": [
{
"name": "fridge",
"description": "Samsung fridge",
"weight": 100,
"is_fragile": True
},
{
"name": "plates",
"description": "Ikea",
"weight": 15,
"is_fragile": True
}
]
},
"non_fragile_items": {
"living-room": [
{
"name": "couch",
"description": "leather",
"weight": 200,
"is_fragile": False
},
{
"name": "table",
"description": "wood",
"weight": 40,
"is_fragile": False
}
],
"kitchen": [
{
"name": "cutlery",
"description": "Ikea",
"weight": 10,
"is_fragile": False
}
]
}
}
)
| true |
27b6dbb745fe327619b6e2aa5aa70a5abc2b1598 | Python | yusupovbulat/eric-matthes-python-crash-course | /chapter_1_basics/part3/motorcycles.py | UTF-8 | 1,544 | 4.6875 | 5 | [] | no_license | # Create new array and print
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
# Add new element to array's last position with append() method
motorcycles.append('ducati')
print(motorcycles)
# Create empty array and add elements with append() method
motorcycles = []
print(motorcycles)
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print(motorcycles)
# Add new element to array in selected position with insert() method
motorcycles.insert(0, 'ducati')
print(motorcycles)
# Remove selected element from array
del motorcycles[-1]
print(motorcycles)
# Extract an element from an array and subsequent use with pop() method
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
popped_motorcycle = motorcycles.pop()
print(motorcycles)
print(popped_motorcycle)
# Example of use 1
motorcycles = ['honda', 'yamaha', 'suzuki']
last_owned = motorcycles.pop()
print('The last motorcycle I owned was a ' + last_owned.title() + '.')
# Example of use 2
motorcycles = ['honda', 'yamaha', 'suzuki']
first_owned = motorcycles.pop(0)
print('The first motorcycle I owned was a ' + first_owned.title() + '.')
# Remove an element from an array by value
motorcycles = ['honda', 'yamaha', 'suzuki', 'ducati']
print(motorcycles)
motorcycles.remove('ducati')
print(motorcycles)
motorcycles = ['honda', 'yamaha', 'suzuki', 'ducati']
print(motorcycles)
too_expensive = 'ducati'
motorcycles.remove(too_expensive)
print(motorcycles)
print("\nA " + too_expensive.title() + " is too expensive for me.")
| true |
3c9ae1fbf88940232863a973baf65d7b820a8f0b | Python | partone/ChessCNN | /ChessCNN.py | UTF-8 | 6,756 | 3.09375 | 3 | [] | no_license | # Pytorch stuff
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchvision.utils import make_grid
# The usual
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # For plotting graphs
import os
from PIL import Image # For importing images
'''
# Let's have a look at the data
# Get file names
path = "Data"
img_names = []
for folder, subfolders, filenames in os.walk(path):
for img in filenames:
img_names.append(folder + '/' + img)
# Get image dimensions
img_sizes = []
for item in img_names:
with Image.open(item) as img:
img_sizes.append(img.size)
df = pd.DataFrame(img_sizes)
print(f'Number of images: {len(img_names)}')
print(df[0].describe())
print(df[1].describe())
# Check out one of the images
knight = Image.open("Data/Knight/00000032.jpg") # Open image
print(knight.size)
print(knight.getpixel((0, 0)))
# Transform into a tensor
transform = transforms.Compose(
[transforms.ToTensor()]
)
im = transform(knight)
print(type(im))
print(im.shape)
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)))
plt.show()
'''
root = "Data"
# Transformation for all the images in the training set
train_transform = transforms.Compose([
# Play with the data a bit
#transforms.RandomRotation(15),
transforms.RandomHorizontalFlip(),
# Structure the data
transforms.Resize((224)), # Resizing to roughly the mean of the dataset / 4
transforms.CenterCrop(224),
transforms.ToTensor(),
#transforms.Normalize(mean=[.485, .456, .406], std=[.229, .224, .225]) # Common normalisation values
transforms.Normalize(mean=[0.6772, 0.6613, 0.6426], std=[0.2391, 0.2456, 0.2503]) # Data set specific values
])
# For evaluating the model
test_transform = transforms.Compose([
transforms.Resize(224), # Resizing to roughly the mean of the dataset
transforms.CenterCrop(224),
transforms.ToTensor(),
#transforms.Normalize(mean=[.485, .456, .406], std=[.229, .224, .225]) # Common normalisation values
transforms.Normalize(mean=[0.6772, 0.6613, 0.6426], std=[0.2391, 0.2456, 0.2503]) # Data set specific values
])
# Load training data
train_data = datasets.ImageFolder(os.path.join(root, 'train'), train_transform)
train_loader = DataLoader(train_data, batch_size=10, shuffle=True)
# Load test data
test_data = datasets.ImageFolder(os.path.join(root, 'test'), test_transform)
test_loader = DataLoader(train_data, batch_size=10, shuffle=False)
class_names = train_data.classes
print("Classes: ", class_names)
print("Training dataset size: ", len(train_data))
print("Test dataset size: ", len(test_data))
# Have a peek at the first batch
for images, labels in train_loader:
break
print("Label:\t", labels.numpy())
print("Class:\t", *np.array([class_names[i] for i in labels]))
print("Image data dimensions: ", images.shape) # 10(batch size)x3(channels)x800(w)x800(h)
im = make_grid(images, nrow=5)
plt.figure(figsize=(12, 4))
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)))
#plt.show()
# Make the CNN
'''
class ConvolutionalNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 4, 3, 1, 1) # 3 input channels, 4 output filters, 3x3 kernel, stride of 1
self.conv2 = nn.Conv2d(4, 8, 3, 1, 1)
self.conv3 = nn.Conv2d(8, 16, 3, 1, 1)
self.conv4 = nn.Conv2d(16, 32, 3, 1, 1)
self.fc1 = nn.Linear(12*12*32, 128) # Will received the flattened image
self.fc2 = nn.Linear(128, 16)
self.fc3 = nn.Linear(16, 6)
def forward(self, X):
X = F.relu(self.conv1(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv3(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv4(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 12*12*32)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
'''
class ConvolutionalNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 3, 1)
self.conv2 = nn.Conv2d(6, 16, 3, 1)
self.fc1 = nn.Linear(54*54*16, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 6)
def forward(self, X):
X = F.relu(self.conv1(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 54*54*16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
CNNmodel = ConvolutionalNetwork()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(CNNmodel.parameters(), lr=0.0001)
print(CNNmodel)
print("Parameter numbers:")
for p in CNNmodel.parameters():
print(p.numel())
import time
start_time = time.time()
epochs = 100
train_losses = []
test_losses = []
train_correct = []
test_correct = []
for i in range(epochs):
print("Epoch: ", i)
train_correct_epoch = 0
test_correct_epoch = 0
for b, (X_train, y_train) in enumerate(train_loader):
b += 1
y_pred = CNNmodel(X_train)
loss = criterion(y_pred, y_train)
predicted = torch.max(y_pred.data, 1)[1]
batch_correct = (predicted.numpy() == y_train.numpy()).sum()
train_correct_epoch += batch_correct
optimizer.zero_grad()
loss.backward()
optimizer.step()
if b % 10 == 0:
print("Batch: ", b, "\tLoss: ", loss.item())
train_losses.append(loss)
train_correct.append(train_correct_epoch)
# Test set check during training
with torch.no_grad():
for b, (X_test, y_test) in enumerate(test_loader):
y_val = CNNmodel(X_test)
predicted = torch.max(y_val.data, 1)[1]
test_correct_epoch += (predicted.numpy() == y_test.numpy()).sum()
# Update loss
loss = criterion(y_val, y_test)
test_losses.append(loss)
test_correct.append(test_correct_epoch)
total_time = time.time() - start_time
print("Finished in: ", total_time / 60, " minutes")
print(test_correct)
print("Test correct: ", test_correct[-1].item() / 550 * 100, "%")
torch.save(CNNmodel.state_dict(), "chessModel.pt")
plt.clf()
plt.plot(train_losses, label="Training loss")
plt.plot(test_losses, label="Validation loss")
plt.title("Loss over batches")
plt.legend()
plt.show()
plt.clf()
plt.plot([t/80 for t in train_correct], label='training accuracy')
plt.plot([t/30 for t in test_correct], label='validation accuracy')
plt.title('Accuracy at the end of each epoch')
plt.legend();
| true |
a25a341d226fd0626f9f02f1ecf0b80160ca1be0 | Python | captainpainway/advent-of-code-2020 | /day_2/day_2_refactor.py | UTF-8 | 1,242 | 3.671875 | 4 | [] | no_license | import re
puzzle_input = open("input.txt", "r")
lines = puzzle_input.readlines()
lines = [line.strip() for line in lines]
# Attempt with regex.
# It's actually messier than the original.
# But I've never used named groups before.
# Also attempted to use one function for both methods,
# which makes things messier as well.
def password_checker(passwords, method):
reg = re.compile('(?P<num1>\d+)-(?P<num2>\d+)(\s)(?P<letter>\w)(:\s)(?P<password>\w+)')
valid_passwords = 0
for p in passwords:
m = reg.match(p)
password = m.group('password')
letter = m.group('letter')
n1 = int(m.group('num1'))
n2 = int(m.group('num2'))
if method == "part_1":
instances = password.count(letter)
if n1 <= instances <= n2:
valid_passwords += 1
elif method == "part_2":
if len(password) < n2:
return "The index is too high!"
if (password[n1 - 1] == letter) != (password[n2 - 1] == letter):
valid_passwords += 1
else:
return "Select a valid method: part_1 or part_2"
return valid_passwords
print(password_checker(lines, "part_1"))
print(password_checker(lines, "part_2"))
| true |
e359c448210e1499756d381154d4d2ee0383e002 | Python | savourylie/behavioral_cloning | /model.py | UTF-8 | 7,303 | 2.65625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import random
import os
import matplotlib.pyplot as plt
import seaborn
from IPython.display import display
import cv2
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.regularizers import l2, activity_l2
import json
from keras.models import model_from_json
from keras.optimizers import Adam
from math import floor
from sklearn.utils import shuffle
from pathos.multiprocessing import ProcessingPool as Pool
def load_images_from_folder(X_train_names):
# images = []
# for filename in X_train_names:
# img = plt.imread(filename)
# if img is not None:
# images.append(img)
def load_single_image(filename):
img = plt.imread(filename)
if img is not None:
return img
pool = Pool(16)
images = pool.map(load_single_image, X_train_names)
return images
def load_data_from_folder(folder_name):
driving_log = pd.DataFrame.from_csv('./' + folder_name + '/driving_log.csv', index_col=None)
driving_log.columns = ['Center Image', 'Left Image', 'Right Image', 'Steering Angle', 'Throttle', 'Break', 'Speed']
X_train_center = ["./" + folder_name + "/" + x for x in driving_log['Center Image'].values.tolist()]
X_train_center = np.asarray(load_images_from_folder(X_train_center))
X_train_left = ["./" + folder_name + "/" + x[1:] for x in driving_log['Left Image'].values.tolist()]
X_train_left = np.asarray(load_images_from_folder(X_train_left))
X_train_right = ["./" + folder_name + "/" + x[1:] for x in driving_log['Right Image'].values.tolist()]
X_train_right = np.asarray(load_images_from_folder(X_train_right))
X_train = np.concatenate((X_train_center, X_train_left, X_train_right), axis=0)
y_train = driving_log['Steering Angle'].values.tolist()
y_train_center = y_train
y_train_left = [x + 0.2 for x in y_train]
y_train_right = [x - 0.2 for x in y_train]
y_train = np.asarray(y_train_center + y_train_left + y_train_right)
return X_train, y_train
def crop_single_image(image):
return image[70:][:][:-20][:]
def crop_sky_and_front_cover(image_data):
return np.apply_along_axis(crop_single_image, axis=1, arr=image_data)
def normalize_single_image(image):
return (image - np.mean(image)) / (np.max(image) - np.min(image))
def rgb_2_hsv_single_image(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
def preprocess(image_data):
if len(image_data.shape) == 4:
# Crop data
cropped = crop_sky_and_front_cover(image_data)
# Resize data
image_data_small = []
for image in cropped:
image_data_small.append(cv2.resize(image, (0,0), fx=0.5, fy=0.5))
# HSV data
image_data_hsv = []
for image in image_data_small:
image_data_hsv.append(rgb_2_hsv_single_image(image))
# Normalize data
image_data_normalized = []
for image in image_data_hsv:
image_data_normalized.append(normalize_single_image(image))
image_data_normalized = np.asarray(image_data_normalized)
elif len(image_data.shape) == 3:
# Crop data
cropped = crop_single_image(image_data)
# Resize data
small = cv2.resize(cropped, (0,0), fx=0.5, fy=0.5)
# HSV data
image_data_hsv = rgb_2_hsv_single_image(small)
# Normalize data
image_data_normalized = normalize_single_image(image_data_hsv)
else:
raise TypeError("Wrong image shape!")
return image_data_normalized
def batch_generator(features, labels, batch_size):
# Create empty arrays to contain batch of features and labels#
batch_features = np.zeros((batch_size, 35, 160, 3))
batch_labels = np.zeros((batch_size,),)
while 1:
for i in range(batch_size):
#choose random index in features
index = random.randint(0, len(features) - 1)
batch_features[i] = preprocess(features[index])
# For when image is gray and not having the redundant dim
# batch_features[i] = np.expand_dims(preprocess(features[index]), axis=2)
batch_labels[i] = labels[index]
yield batch_features, batch_labels
def yadav_model(image_dim, conv_drop=0.2, fc_drop=0.5):
# image dim
row, col, channel = image_dim
model = Sequential()
model.add(Convolution2D(3, 1, 1, input_shape=image_dim))
model.add(Convolution2D(32, 3, 3))
model.add(Convolution2D(32, 3, 3))
model.add(MaxPooling2D((2, 2), border_mode='same'))
model.add(Dropout(conv_drop))
model.add(ELU())
model.add(Convolution2D(64, 3, 3))
model.add(Convolution2D(64, 3, 3))
model.add(MaxPooling2D((2, 2), border_mode='same'))
model.add(Dropout(conv_drop))
model.add(ELU())
model.add(Convolution2D(128, 3, 3))
model.add(Convolution2D(128, 3, 3))
model.add(MaxPooling2D((2, 2), border_mode='same'))
model.add(Dropout(conv_drop))
model.add(ELU())
model.add(Flatten())
model.add(Dense(512))
model.add(Dropout(fc_drop))
model.add(ELU())
model.add(Dense(64))
model.add(Dropout(0.5))
model.add(ELU())
model.add(Dense(16))
model.add(Dropout(0.5))
model.add(LeakyReLU())
model.add(Dense(1))
return model
if __name__ == '__main__':
# Load data
X_train_my, y_train_my = load_data_from_folder('my_data')
X_train_udacity, y_train_udacity = load_data_from_folder('udacity_data')
# Create balanced dataset
max_label = y_train_my.max()
min_label = y_train_my.min()
step_size = (max_label - min_label) / 20.
data_in_bins = [[] for x in range(20)]
for i, value in enumerate(y_train_my):
index = floor((((value - min_label) * 20) / (max_label - min_label) - 2))
data_in_bins[index].append(i)
# Select indices from data_in_bin
balance_index = []
for b in data_in_bins:
i = 0
b_len = len(b)
while i < b_len and i < 2000:
balance_index.append(b[i])
i += 1
X_train_balance, y_train_balance = X_train_my[balance_index], y_train_my[balance_index]
# Shuffle the dataset
X_train_balance, y_train_balance = shuffle(X_train_balance, y_train_balance, random_state=13)
# Define model
model = yadav_model((35, 160, 3), conv_drop=0.6, fc_drop=0.75)
# Initialize generators
training_samples_gen = batch_generator(X_train_balance, y_train_balance, batch_size=100)
validation_samples_gen = batch_generator(X_train_udacity, y_train_udacity, batch_size=100)
adam = Adam(lr=0.001)
model.compile(loss='mse', optimizer=adam)
history = model.fit_generator(training_samples_gen, samples_per_epoch=20000, nb_epoch=3, verbose=1, validation_data=validation_samples_gen, nb_val_samples=24000)
# Print model summary
print(model.summary())
# Save model
with open('model.json', 'w') as outfile:
json.dump(model.to_json(), outfile)
model.save_weights('model.h5') | true |
4f84adea7d930a3df21291ab592bae13c7faf43e | Python | liuyuanyuan1992/test_auto | /test_appium/test_xueqiu.py | UTF-8 | 2,498 | 2.640625 | 3 | [] | no_license | # This sample code uses the Appium python client
# pip install Appium-Python-Client
# Then you can paste this into a file and simply run with Python
# 第一个通过Appium录制的代码
from appium import webdriver
from time import sleep
from appium.webdriver.common.touch_action import TouchAction
class TestXueqiu:
def setup(self):
caps = {}
caps["platformName"] = "Android"
caps["deviceName"] = "mumu"
caps["appPackage"] = "com.xueqiu.android"
caps["appActivity"] = ".view.WelcomeActivityAlias"
caps["autoGrantPermissions"] = True
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
self.driver.implicitly_wait(20)
#等待元素出现
self.driver.find_element_by_id("user_profile_icon")
# 测试个人中心页面的内容
# def test_profile(self):
# pass
# self.driver.find_element_by_id("user_profile_icon").click()
# print(self.driver.find_element_by_id("user_profile_icon").get_attribute("class"))
# pass
# def test_click(self):
#
# self.driver.tap()
# def test_get_attribute(self):
# pass
# print(self.driver.find_element_by_id("user_profile_icon").get_attribute("class"))
# 报错找不到class类
# def test_selected(self):
#
# self.driver.find_element_by_xpath("//*[contains(text,'行情')]").click()
# self.driver.find_element_by_xpath("//*[@text='行情']").click()
# pass
#
# def test_swipe(self):
#
# self.driver.swipe(100, 200, 500, 800, 1000)
#
# def test_long_press(self):
#
# el = self.driver.find_element_by_xpath("(//*[@text='基金'])[1]")
# TouchAction(self.driver).long_press(el).perform()
#
# def test_uiautomator(self):
#
# self.driver.find_element_by_android_uiautomator('new UiSelector().text("Animal")')
def test_search(self):
self.driver.find_element_by_id("tv_search").click()
self.driver.find_element_by_id("search_input_text").send_keys("alibaba")
self.driver.find_element_by_id("name").click()
list = self.driver.find_element_by_xpath("//*[contains(@resource-id,'stockCode') and @text='BABA']/../../.."
"//*[contains(@resource-id,'current_price')]").text
print(list)
assert float(list) > 100
def teardown(self):
sleep(5)
self.driver.quit()
| true |
01809e5d0c8406acc4f65fd7e0f81dc8b1909149 | Python | codeAligned/coding-practice | /General/Arrays/preserve_order_in_set.py | UTF-8 | 425 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python3
# Naturally this method will work only on lists without duplicates.
# A duplicate list cannot create unique indexes in the dict.
sample = [5, 4, 3, 2, 1]
sampleDict = dict((item, index) for index, item in enumerate(sample))
sampleSet = set(sampleDict)
reconstructedList = [None] * len(sampleSet)
for i in sampleSet:
reconstructedList[sampleDict[i]] = i
print(sampleSet)
print(reconstructedList)
| true |
06b119a529e5847c4c35fd0882d9fea068fab343 | Python | tcdavid/advent-of-code-2019 | /python/day8/Day8.py | UTF-8 | 648 | 3.5625 | 4 | [] | no_license | def main():
f = open("input.txt", 'r')
line = f.readline()
rows = 6
columns = 25
n = rows * columns
layers = [line[i:i + n] for i in range(0, len(line), n)]
print(layers)
counts = list(map(findcounts, layers))
counts.sort(key=lambda x: x.zeros)
first = counts[0]
# 1965
print(first.ones * first.twos)
def findcounts(layer):
zeros = layer.count("0")
ones = layer.count("1")
twos = layer.count("2")
return Counts(zeros, ones, twos)
class Counts:
def __init__(self, zeroes, ones, twos):
self.zeros = zeroes
self.ones = ones
self.twos = twos
main()
| true |
ea944cb125fda072e5bbfcbe1d0c9a8a2d34bb5c | Python | jimschenchen/PythonSummer2019 | /summerTrialUnit67.py | IBM852 | 813 | 2.953125 | 3 | [] | no_license | dir1 = {'color' : 'blue', 'avalue' : 1, "time" : 1}
print(dir1)
dir1['un'] = 'jim'
print(dir1)
del dir1['un']
print(dir1)
for key, value in dir1.items():
print(key + ": " + str(value))
#default
for key in dir1.values():
print(key)
for key in dir1.keys():
print(key)
for key in dir1:
print(key)
print("\n", sorted(dir1))
#set del the repeated value
for value in set(dir1.values()):
print(value)
aliens = []
for alien_number in range(30):
aliens.append(
{'color' : 'green', 'point' : 5, 'number' : alien_number + 1}
)
for alien in aliens[:30]:
print(alien)
print(len(aliens))
print(alien_number)
msg = input("what's your name?\n\t")
print(msg)
active = True
while active:
if input("msg") == 'quit':
active = False
| true |
16a68c05b5f6e7fcf99fff4423af299970b91bc9 | Python | zaihtml/lpthw | /ex15.py | UTF-8 | 1,160 | 4.09375 | 4 | [] | no_license | # this line calls an argument variable from the system
from sys import argv
# this line unpacks the argument variable
# when running in terminal, you have to type the name of this file,
# as well as the name of the file you want to open
script, filename = argv
# this assigns the command 'open' to the 'txt' variable
txt = open(filename)
print "Here's your file %r:" % filename
# you give a file a command by using the full stop,
# the name of the command, and parameters
# when you say txt.read() it is like you are saying
# 'Hey txt! Do your read command with no parameters.'
print txt.read()
# this does the same thing but allows you to write in the file name via input
print "Type the filename again:"
file_again = raw_input("> ")
# then a command is set to open the file which you inputted
txt_again = open(file_again)
# then this command tells the computer to 'read' or 'print' the contents
print txt_again.read()
# you can open a file in one of two ways as shown in this file
# you could use the way as shown in lines 7 - 17
# or the way shown in lines 19 - 24
# I prefer the second way mainly because I don't fully understand the first way | true |
3e2f88971bbcc34d2dfb6f766954472edcb321f0 | Python | alanrvazquez/MIOSE | /FOalgorithms.py | UTF-8 | 7,559 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions containing first-order algorithms to compute good starting solutions
to the MIO problem and to specify the values of the constants in the boosting
constraints.
"""
import numpy as np
class BetaStart(object):
"""
Python class for solutions.
"""
def __init__(self, b, val):
self.b = b
self.objval = val
class AdvancedStart(object):
"""
Python class for advanced starting solutions.
"""
def __init__(self, bp, bm, z, maxb):
self.bp = bp
self.bm = bm
self.z = z
self.maxb = maxb
# Functions -------------------------------------------------------------------
def gbeta(X, y, betap, N, p):
""" Compute residual sum of squares (RSS).
Input:
X: N x p model marix, where N is the number of observations and p the number
of parameters (numpy array, float).
y: N x 1 vector of responses, where N is the number of observations
(numpy array, float).
betap: p x 1 vector of estimated effects (numpy array, float).
N: number of observations (int).
p: number of parameters or effects (int).
Output:
RSS (float).
"""
r_vec = y.reshape(N,1) - np.matmul(X, betap)
ltwonorm_r = np.matmul(r_vec.T, r_vec)
return 0.5*np.asscalar(ltwonorm_r)
def gradbeta(X, y, betap, N, p):
""" Compute gradient of RSS in beta.
Input:
X: N x p model marix, where N is the number of observations and p the number
of parameters (numpy array, float).
y: N x 1 vector of responses, where N is the number of observations
(numpy array, float).
betap: p x 1 vector of estimated effects (numpy array, float).
N: number of observations (int).
p: number of parameters or effects (int).
Output:
Gradient of beta (numpy array, float).
"""
r_vec = y.reshape(N,1) - np.matmul(X, betap)
grad = np.matmul(X.T, r_vec)
return -1*grad
def AlgorithmOne(X, y, k, N, p, betam, L, tol = 0.0001, mm = 1000):
""" Algorithm 1 in Bertsimas, D., King, A., and Mazumder, R. (2016) Best
subset selection via modern optimization lens. Annals of Statistics, 44,
813-852.
Input:
X: N x p model marix, where N is the number of observations and p the number
of parameters (numpy array, float).
y: N x 1 vector of responses, where N is the number of observations
(numpy array, float).
k: number of non-zero parameters (int).
N: number of observations (int).
p: number of parameters or effects (int).
betam: p x 1 vector with starting parameter estimates (numpy array, float).
L: number of steps in the direction of the gradient (float).
tol: tolerance between the obtained solution and the previous one (float).
mm: maximum number of iterations for convergence (int).
Output:
Starting solution and its objective function value
(BetaStart class).
"""
diff_objval = 10**10
cc = 0
while (diff_objval > tol and cc < mm):
beta_iter = np.zeros((p, 1))
c_vec = betam - (1/L)*gradbeta(X, y, betam, N, p)
largestkestimates = abs(c_vec).argsort(axis = 0)[-k:][::-1]
beta_iter[largestkestimates] = c_vec[largestkestimates]
diff_objval = gbeta(X,y,betam,N,p) - gbeta(X,y,beta_iter,N,p)
betam = beta_iter
cc = cc + 1
# Polishing coefficients
sel_active = np.where(betam != 0)[0]
Xr = X[:, sel_active]
XsTXr = np.matmul(Xr.T,Xr)
XrTy = np.matmul(Xr.T,y.reshape(N,1))
try :
invXsTXr = np.linalg.inv(XsTXr)
betam[ sel_active ] = np.matmul(invXsTXr,XrTy)
except np.linalg.linalg.LinAlgError:
print("Alg. 1 did not polish the estimates")
objval = gbeta(X, y, betam, N, p)
return BetaStart(betam, objval)
def advanced_starts(X, y, k, maxiter, mymaxbeta=1000):
""" Compute advanced starting solutions (Modified Algorithm 1)
Input:
X: N x p model marix, where N is the number of observations and p the number
of parameters (numpy array, float).
y: N x 1 vector of responses, where N is the number of observations
(numpy array, float).
k: number of non-zero parameters (int).
maxiter: number of iterations for Algorithm 1 (int).
mymaxbeta: if no solution is found, create a synthetic solution with
mymaxbeta as the maximum absolute component of that solution
(float).
Output:
Starting solution for MIO problem (AdvancedStart class).
"""
# Compute number of steps in the direction of the gradient; see
# Bertsimas et al. (2016) for more information
(N,p) = np.shape(X)
XTX = np.matmul(X.T, X)
eigenval = np.linalg.eigvals(XTX)
L = max(eigenval)
if L.imag != 0:
print('Maximum eigen value is complex, results may not be accurate')
L = L.real
beta_results = np.zeros((p, maxiter))
objval_vec = np.zeros((1,maxiter))
# Apply Algorithm 1
for i in range(maxiter):
mymin = min(0, i)
beta_init = np.random.normal(loc = mymin, scale = 2, size = p).reshape(p, 1)
seteffzero = np.random.choice(p, size = p - k, replace = False)
beta_init[seteffzero] = 0
iter_result = AlgorithmOne(X, y, k, N, p, beta_init, L, tol = 0.0001, mm = 1000)
beta_results[:,i] = iter_result.b.T
objval_vec[:,i] = iter_result.objval
best_sol = np.argmin(objval_vec)
startsol = beta_results[:, best_sol]
# Arrange output for MIO problem
if np.sum( abs(startsol) ) == 0 :
print("No initial solution found")
bp = startsol
bm = startsol
maxb = mymaxbeta
z = (startsol == 0) + 0
else :
bp = abs(startsol*(startsol > 0 ))
bm = abs(startsol*(startsol < 0 ))
maxb = max(abs(startsol))
z = (startsol == 0) + 0
return AdvancedStart(bp.tolist(), bm.tolist(), z.tolist(), maxb.tolist())
def bounds_sim(max_abs_beta, X, y, N, k, tao):
""" Compute bounds for continuous decision variables in the MIO problem.
Input:
max_abs_beta: the smallest absolute \betaˆu value known to be infeasible
(float).
X: N x p model marix, where N is the number of observations and p the number
of parameters (numpy array, float).
y: N x 1 vector of responses, where N is the number of observations
(numpy array, float).
N: number of observations (int).
k: number of non-zero parameters (int).
tao: constant to safeguard against a misspecification of 'max_abs_beta'
(float).
Outputs:
Constant values for the boosting constraints:
- B (float).
- BL (float).
- E (float).
- EL (float).
"""
B = tao*max_abs_beta
BL = k*B
xi_vec = []
sumXimaxb = 0
for i in range(N):
Xiabs = abs(X[i,:])
# Get indices of the top k absolute values
sel_maxk_subset = np.argsort( Xiabs )[-k:]
# Sum the top k absolute values
xi = Xiabs[sel_maxk_subset].sum()
xi_vec.append(xi)
sumXimaxb += Xiabs.max()*BL
E = max(xi_vec)*B
yty = np.inner(y, y)
EL = min(np.sqrt(N*yty), sumXimaxb)
return B, E, BL, EL
| true |
afb9d0e2d11c2e4ef2ef0fd119b9453215ee0712 | Python | google/mobly | /mobly/controllers/android_device_lib/snippet_event.py | UTF-8 | 2,126 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.warning('The module mobly.controllers.android_device_lib.snippet_event '
'is deprecated and will be removed in a future version. Use '
'module mobly.snippet.callback_event instead.')
def from_dict(event_dict):
"""Create a SnippetEvent object from a dictionary.
DEPRECATED: Use mobly.snippet.callback_event.from_dict instead.
Args:
event_dict: a dictionary representing an event.
Returns:
A SnippetEvent object.
"""
return SnippetEvent(callback_id=event_dict['callbackId'],
name=event_dict['name'],
creation_time=event_dict['time'],
data=event_dict['data'])
class SnippetEvent:
"""The class that represents callback events for mobly snippet library.
DEPRECATED: Use mobly.snippet.callback_event.CallbackEvent instead.
Attributes:
callback_id: string, the callback ID associated with the event.
name: string, the name of the event.
creation_time: int, the epoch time when the event is created on the
Rpc server side.
data: dictionary, the data held by the event. Can be None.
"""
def __init__(self, callback_id, name, creation_time, data):
self.callback_id = callback_id
self.name = name
self.creation_time = creation_time
self.data = data
def __repr__(self):
return ('SnippetEvent(callback_id: %s, name: %s, creation_time: %s, '
'data: %s)') % (self.callback_id, self.name, self.creation_time,
self.data)
| true |
b4032d9d5ea470555946b6d772717eb11df8e5f3 | Python | sytelus/regim | /regim/data_utils.py | UTF-8 | 5,671 | 2.703125 | 3 | [] | no_license | import torch
import numpy as np
import random
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import TensorDataset
from torch.utils.data.sampler import SubsetRandomSampler
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
class DataUtils:
@staticmethod
def ensure_tensor(a):
if type(a) is torch.Tensor:
return a
elif type(a) is np.ndarray:
return torch.from_numpy(a)
else: # handle all other types convertible by numpy
return torch.from_numpy(np.array(a))
@staticmethod
def channel_norm(ds, channel_dim=None):
# collect tensors in list
l = [DataUtils.ensure_tensor(data) for data, *_ in ds]
# join back all tensors so the first dimension is count of tensors
l = torch.stack(l, dim=0) #size: [N, X, Y, ...] or [N, C, X, Y, ...]
if channel_dim is None:
# add redundant first dim
l = l.unsqueeze(0)
else:
# swap channel dimension to first
l = torch.transpose(l, 0, channel_dim).contiguous() #size: [C, N, X, Y, ...]
# collapse all except first dimension
l = l.view(l.size(0), -1) #size: [C, N*X*Y]
mean = torch.mean(l, dim=1) #size: [C]
std = torch.std(l, dim=1) #size: [C]
return (mean, std)
@staticmethod
def sample_by_class(ds, k, as_np=False, shuffle=False, no_test=False):
class_counts = {}
train_data = []
train_label = []
test_data = []
test_label = []
#TODO optimize this
if shuffle:
ds = [(d, l) for d, l in ds]
random.shuffle(ds)
for data, label in ds:
if not isinstance(label, torch.Tensor):
label = torch.Tensor([label])
c = label.item()
class_counts[c] = class_counts.get(c, 0) + 1
if class_counts[c] <= k:
train_data.append(torch.unsqueeze(data, 0))
train_label.append(torch.unsqueeze(label, 0))
elif not no_test:
test_data.append(torch.unsqueeze(data, 0))
test_label.append(torch.unsqueeze(label, 0))
train_data = torch.cat(train_data)
train_label = torch.cat(train_label)
if not no_test:
test_data = torch.cat(test_data)
test_label = torch.cat(test_label)
if as_np:
train_data = train_data.numpy()
train_label = train_label.numpy()
if not no_test:
test_data = test_data.numpy()
test_label = test_label.numpy()
return train_data, train_label, test_data, test_label
return train_data, train_label
return (TensorDataset(train_data, train_label),
TensorDataset(test_data, test_label))
@staticmethod
def mnist_datasets(linearize=False, train_test=True):
mnist_transforms=[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]
if linearize:
mnist_transforms.append(ReshapeTransform((-1,)))
train_ds = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose(mnist_transforms));
test_ds = datasets.MNIST('../data', train=False,
transform=transforms.Compose(mnist_transforms));
if train_test:
return train_ds, test_ds
else:
return torch.utils.data.ConcatDataset((train_ds, test_ds))
@staticmethod
def sample_traintest_by_class(train_ds, test_ds, k=None):
if k is not None:
train_ds_part, test_ds_part = DataUtils.sample_by_class(train_ds, k)
test_ds_part = test_ds
else:
train_ds_part, test_ds_part = train_ds, test_ds
return train_ds_part, test_ds_part
@staticmethod
def split_dataset(ds, train_frac=0.6, test_frac=None, validate_frac=0.0):
if test_frac is None and validate_frac is None:
raise ValueError("both test_frac and validation_frac should not be None")
total_len = len(ds)
train_len = int(total_len * train_frac)
test_frac = test_frac or (1 - train_frac - validate_frac)
test_len = int(total_len * test_frac)
validate_frac = validate_frac or (1 - train_frac - test_frac)
validate_len = int(total_len * validate_frac)
train_len = total_len-validate_len-test_len
if validate_len > 0:
train_ds, test_ds, validate_ds = torch.utils.data.random_split(ds,
(train_len, test_len, validate_len))
return (train_ds, test_ds, validate_ds)
else:
train_ds, test_ds = torch.utils.data.random_split(ds,
(train_len, test_len))
return (train_ds, test_ds)
@staticmethod
def get_dataloaders(config, train_ds, test_ds):
kwargs_train = {'pin_memory': True} if config.train_config.use_cuda else {}
kwargs_test = {'pin_memory': True} if config.test_config.use_cuda else {}
train_loader = torch.utils.data.DataLoader(train_ds,
batch_size=config.train_config.batch_size, shuffle=True, **kwargs_train) \
if train_ds is not None else None
test_loader = torch.utils.data.DataLoader(test_ds,
batch_size=config.test_config.batch_size, shuffle=True, **kwargs_test) \
if test_ds is not None else None
return train_loader, test_loader
| true |
c9de20fc2d88a8ff44e396bb90311fff2ffa3c69 | Python | istvan-stv-nagy/master_project | /labeling/label_output.py | UTF-8 | 251 | 2.65625 | 3 | [] | no_license | class LabelOutput:
def __init__(self, pano_image, label_image):
self.pano_image = pano_image
self.label_image = label_image
def data(self):
return self.pano_image
def label(self):
return self.label_image
| true |
77432cdf876d463437c0f32dfd894e228ee71eef | Python | SilverBlaze109/VAMPY2017 | /projects/MMMMM.py | UTF-8 | 790 | 3.40625 | 3 | [
"MIT"
] | permissive | def mode(nums):
"""
nums must be array like mode[1,2,3,1] == 1
mena runs in O(N) time where N = len(nums)
"""
tally = {}
M = nums[0]
for x in nums:
tally[x] = tally.get(x ,0) + 1
if tally[x] > tally[M]:
M = x
return M
#print("The mode is "+str(mode([1,2,3,1,2,3,1,]))
def mean(nums):
if len(nums) == 0:
return NONE
M = 0
for x in nums:
M += x
M /= len(nums)
return M
def maximum(nums):
M = nums[0]
for x in nums:
if x > M:
M = x
return M
def minimum(nums):
M = nums[0]
for x in nums:
if x < M:
M = x
return M
def median(nums):
temp = sorted(nums)
if len(temp) % 2 == 0:
mid1 = int(len(temp) / 2) - 1
mid2 = int(len(temp) / 2)
return (temp[mid1] + temp[mid2]) / 2
else:
mid = int(len(temp)/2)
return temp[mid]
| true |
a8cb3569ce947823320d8cac7fd463ad2c1c6cee | Python | hozza94/PythonStudy | /완주하지못한선수.py | UTF-8 | 657 | 3.203125 | 3 | [] | no_license | ### linear time algorithm
def solution(participant, completion):
d = {}
# list 각각에 대해서 key로 사전에 접근, update, 삽입 / hash로 구성되어있기 때문에 O(n)
for x in participant:
d[x] = d.get(x, 0) + 1 # d.get( key, '디폴드 값')
# list 길이에 비례해 접근, update / hash로 구성되어있기 때문에 O(n-1)
for x in completion:
d[x] -= 1
# 사전에 있는 모든 원소를 꺼내서 검사해야 하기 때문에 O(n)
dnf = [k for k, v in d.items() if v > 0]
answer = dnf[0]
return answer
participant = []
completion = []
solution(participant, completion)
| true |
68f31900dae00a0c8124b26f382a4d5b72b2539c | Python | ArbelRivitz/Four-in-a-row-game | /four_in_a_row.py | UTF-8 | 2,974 | 3.484375 | 3 | [] | no_license | #############################################################
# FILE : four_in_a_row.py
# WRITER : arbelr, noamiel,207904632, 314734302,Arbel Rivitz, Noa Amiel
# EXERCISE : intro2cs ex12 2017-2018
# DESCRIPTION:
# In this excercise we made the game four in a row. This game is moduled to different parts. There is some pares at this
# this game.
# There is the Game class that has all the game rules and methods.
# there is the Gui class that includes all the graphic parts of the game.
# there is the runner class, that has all the functions for the game to run
# there is the communicator class, that communicates between the server and the client in this game.
# there is the ai class that has the way of how to play
# and there is the four in a row file that that runs the whole game.
#############################################################
import tkinter
import sys
from ai import AI
from runner import Runner
MAX_PORT = 65535
MIN_PORT = 1000
HUMANPLAYER = "human"
AIPLAYER = "ai"
ILLEGAL_ARGS = "Illegal program arguments."
def running_a_cilent(is_human, port, ip):
"""
This function runs the client game.
:param is_human: true if it is, false otherwise
:param port: the port to connect to (client)
:param ip: the ip address of the computer of the client
:return:
"""
root = tkinter.Tk()
root.title("Client")
if is_human:
runner = Runner(root, port, None, ip)
runner.run()
else:
ai = AI()
runner = Runner(root, port, ai, ip)
runner.run()
def running_a_server(is_human, port):
"""
This function runs the server game. It checks if it is human or not and
runs according to it
:param is_human:true if it is, false otherwise
:param port:the port to connect to (server)
:return:
"""
root = tkinter.Tk()
root.title("Server")
if is_human:
runner = Runner(root, port)
runner.run()
else:
ai = AI()
runner = Runner(root, port, ai)
runner.run()
def main():
"""
This function runs the whole game according to the numbers of the arguments
:return:
"""
arguments = sys.argv[1:] # the number of the arguments tells us what is the
# player
if 2 == len(arguments) or len(arguments) == 3:
if arguments[0] == HUMANPLAYER:
is_human = True
elif arguments[0] == AIPLAYER:
is_human = False
if MIN_PORT <= int(arguments[1]) or int(arguments[1] > MAX_PORT):
if len(arguments) == 3:
running_a_cilent(is_human, int(arguments[1]), arguments[2])
elif len(arguments) == 2:
running_a_server(is_human, int(arguments[1]))
else:
print(ILLEGAL_ARGS) # prints an error message
else:
print(ILLEGAL_ARGS) # prints an error message
if __name__ == "__main__":
main()
| true |
188fe717ccf335fd4e4c7819d9e4ea195ba8d946 | Python | youareeverysingleday/pyOperateChrome | /pyOperateChrome.py | UTF-8 | 3,975 | 2.640625 | 3 | [] | no_license | import pandas as pd
import numpy as np
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from urllib.request import urlopen
from selenium.webdriver.common.keys import Keys
import time
import os
import argparse
class ChromeOperate():
def __init__(self, user_agent="", accessURL = "http://www.ua110.com/", headlessFlag = True):
super().__init__()
self.startURL = accessURL
self.headlessFlag = headlessFlag
self.user_agent = user_agent
def ChromeOptions(self):
try:
ChromeOptions = Options()
# 设置user-agent。
print("user-agent='"+self.user_agent + "'")
# ChromeOptions.add_argument("user-agent='Mozilla/5.0 (Linux; Android 4.0.3; M031 Build/IML74K) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19'")
ChromeOptions.add_argument("user-agent='"+self.user_agent + "'")
# 启用无痕模式。
ChromeOptions.add_argument('--incognito')
if self.headlessFlag == True:
# 设置为无界面模式。
ChromeOptions.add_argument('--headless')
# ChromeOptions.add_argument('--disable-gpu')
browser = webdriver.Chrome(options=ChromeOptions, executable_path="D:\Python37\Scripts\chromedriver.exe")
else:
browser = webdriver.Chrome(options=ChromeOptions, executable_path="D:\Python37\Scripts\chromedriver.exe")
browser.get(self.startURL)
# 休眠10秒。
time.sleep(5)
# 清除所有cookie。
cookies = browser.get_cookies()
browser.delete_all_cookies()
finally:
print("quit")
browser.quit()
def ChromeAction(StartURL, useragent, headlessFlag):
# 读取大量数据已经没有问题了,需要的是设置编码格式为ANSI。
# user_agents_csv = pd.read_csv('lotof_user_agent.csv', encoding='ANSI')
user_agents_csv = pd.read_csv('ua.csv', header=None, encoding='ANSI',dtype=str)
# 先随机选择出来一行,类型为dataframe。
# 然后将dataframe转换为numpyarray类型,也就是通过values转换的。
# 最后将numpyarray转换为String类型,通过取[0][0]来实现。string作为参数输入。
single_ua = user_agents_csv.sample(1, replace=True,random_state=None,axis=0).values[0][0]
co = ChromeOperate(user_agent=single_ua, headlessFlag=headlessFlag)
co.ChromeOptions()
print("Complete.")
if __name__ == "__main__":
ChromeAction('http://ua110.com','',False)
# try:
# parser = argparse.ArgumentParser(description="The parameters of Chrome browser settings.")
# parser.add_argument('--StartURL', '-u', help='StartURL 属性,初始访问页面,默认值为http://ua110.com,非必要参数', default='http://ua110.com')
# parser.add_argument('--useragent', '-a', help='useragent 属性,设置useragent,有默认值。没有设置的情况下,从文件中随机选择。非必要参数',
# default="Mozilla/5.0 (iPod; U; CPU iPhone OS 2_1 like Mac OS X; ja-jp) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5F137 Safari/525.20")
# parser.add_argument('--headlessFlag', '-l', help='headlessFlag 属性,设置是否使用隐藏运行模式,非必要参数。默认值为False', required=False)
# args = parser.parse_args()
# ChromeAction(args.StartURL, args.useragent, args.headlessFlag)
# except Exception as e:
# print(e)
| true |
329f5b2c1a33e6a4a49863343d63fd080ee66175 | Python | balu-/deconzpy | /deconzpy/Light.py | UTF-8 | 9,106 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from .BaseElement import DeconzBaseElement
import logging
logger = logging.getLogger(__name__)
class Light(DeconzBaseElement):
""" Repraesentation eines Lichts """
class State:
""" ein Status eines Lichts """
brightness = None
colorTemperatur = 0
hue = -1
on = None
sat = -1
xy = [0, 0]
alert = None
def __init__(self, prevState=None):
pass
# if prevState != None:
# self.brightness = prevState.brightness
# self.colormode = prevState.colormode
# self.colorTemperatur = prevState.colorTemperatur
# self.hue = prevState.hue
# self.on = prevState.on
# self.sat = prevState.sat
# self.xy = prevState.xy
def __init__(self, id, arr, urlRoot):
DeconzBaseElement.__init__(self, id, arr, urlRoot)
self.stateStack = {0: Light.State()}
#set defaults
if self.getAttribute("state_bri") != None:
self.stateStack[0].brightness = 0
if self.getAttribute("state_on") != None:
self.stateStack[0].on = False
self.highestStateId = 0 # highest state id
def __getOrAddState(self, prio):
state = None
if prio not in self.stateStack:
# if adding a new higher state
if prio > self.highestStateId:
self.highestStateId = prio
state = Light.State()
self.stateStack[prio] = state
return self.stateStack[prio]
def getName(self):
return self.getAttribute("name")
def getManufacturer(self):
return self.getAttribute("manufacturername")
def getType(self):
return self.getAttribute("type")
def isOn(self):
return self.getAttribute("state_on")
def isReachable(self):
return self.getAttribute("state_reachable")
def setBrightness(self, value, statePrio=10, transitiontime=10):
newState = self.__getOrAddState(statePrio)
newState.brightness = value
if statePrio >= self.highestStateId:
logger.info("new high prio last: %s new: %s", str(self.highestStateId), str(statePrio))
self.__setSate(newState)
def getBrightness(self):
return self.getAttribute("state_bri")
def setColorTemperatur(self, value, statePrio=10, transitiontime=10):
newState = self.__getOrAddState(statePrio)
if (value is not None and value >= 153 and value <= 500):
logger.info("ct max %s ct min %s", self.getAttribute("ctmax"), self.getAttribute("ctmin"))
# check for light specific min max and adapt
if (self.getAttribute("ctmax") is not None and value > self.getAttribute("ctmax")):
newState.colorTemperatur = self.getAttribute("ctmax")
elif (self.getAttribute("ctmin") is not None and value < self.getAttribute("ctmin")):
newState.colorTemperatur = self.getAttribute("ctmin")
else:
newState.colorTemperatur = int(value)
else:
logger.warn("Color Temperatur out of range")
if statePrio >= self.highestStateId:
logger.info("new high prio last: %s new: %s", str(self.highestStateId), str(statePrio))
self.__setSate(newState)
def getColorTemeratur(self):
return self.getAttribute("state_ct")
def actionOn(self, statePrio=10, transitiontime=10, brightness=None, colorTemperatur=None):
newState = self.__getOrAddState(statePrio)
newState.on = True
if brightness is not None:
if brightness >= 0 and brightness <= 255:
newState.brightness = int(brightness)
else:
newState.brightness = 255
#dublicating code her is not pretty, but i dont see any simple beautifull solution
if (colorTemperatur is not None and colorTemperatur >= 153 and colorTemperatur <= 500):
# check for light specific min max and adapt
if (self.getAttribute("ctmax") is not None and colorTemperatur > self.getAttribute("ctmax")):
newState.colorTemperatur = self.getAttribute("ctmax")
elif (self.getAttribute("ctmin") is not None and colorTemperatur < self.getAttribute("ctmin")):
newState.colorTemperatur = self.getAttribute("ctmin")
else:
newState.colorTemperatur = int(colorTemperatur)
if statePrio >= self.highestStateId:
logger.info("new high prio last: %s new: %s", str(self.highestStateId), str(statePrio))
self.__setSate(newState)
# switch light off and flush stack
def actionOff(self):
newState = self.__getOrAddState(0)
newState.on = False
self.__setSate(newState)
# clean up stack
for key in list(self.stateStack.keys()):
if key != 0:
del self.stateStack[key]
# reset highest state
self.highestStateId = 0
def setAlert(self, statePrio=10, alert="select"):
logger.info("LIGHT %s set alert", self.getName())
newState = self.__getOrAddState(statePrio)
newState.alert = alert
self.__setSate(newState)
def stopAlert(self, statePrio=10):
self.setAlert(statePrio=statePrio, alert="none")
def __setSate(self, state):
jsonObj = {}
if state.alert != None: #self.stateStack[self.highestStateId].alert:
jsonObj["alert"] = state.alert
logger.info(
"LIGHT %s update state - %s/%s/state - %s", str(self.getId()), self.getUrlRoot(), self.getId(),
str(jsonObj)
)
r = requests.put(self.getUrlRoot() + "/" + self.getId() + "/state", json=jsonObj, timeout=3)
if not r:
logger.warn("Some Error in update state: %s", r.text)
# todo check if different from current setting
jsonObj = {} # {"transitiontime": 10}
if (
state.colorTemperatur >= 153 and state.colorTemperatur <= 500
and self.getColorTemeratur() != state.colorTemperatur
):
jsonObj["ct"] = state.colorTemperatur
if state.hue >= 0 and state.hue <= 65535:
jsonObj["hue"] = state.hue
if state.sat >= 0 and state.sat <= 255:
jsonObj["sat"] = state.sat
if jsonObj != {}: # {"transitiontime": 10}:
if "IKEA" in self.getManufacturer() or "dresden" in self.getManufacturer():
if state.on != self.isOn():
jsonObj["on"] = state.on
logger.info(
"LIGHT %s update state - %s/%s/state - %s", str(self.getId()), self.getUrlRoot(), self.getId(),
str(jsonObj)
)
r = requests.put(self.getUrlRoot() + "/" + self.getId() + "/state", json=jsonObj, timeout=3)
if not r:
logger.warn("Some Error in update state: %s", r.text)
jsonObj = {} # {"transitiontime": 10}
if (
state.brightness != None and state.brightness >= 0 and state.brightness <= 255
and self.getBrightness() != state.brightness
):
jsonObj["bri"] = state.brightness
if state.on != None and state.on != self.isOn():
jsonObj["on"] = state.on
if state.brightness != None and state.brightness == 0:
jsonObj["on"] = False
state.on = False
if jsonObj != {}: # {"transitiontime": 10}:
logger.info(
"LIGHT %s update state - %s/%s/state - %s", str(self.getId()), self.getUrlRoot(), self.getId(),
str(jsonObj)
)
r = requests.put(self.getUrlRoot() + "/" + self.getId() + "/state", json=jsonObj, timeout=3)
if not r:
logger.warn("LIGHT %s Some Error in update state: %s", self.getName(), r.text)
def hasState(self, prio): # check weather prio is still in stack
return prio in self.stateStack
def revokeState(self, prio):
logger.info("%s revoke State: %s", self.getName(), str(prio))
if prio == 0:
# wont revoke this
return
if prio in self.stateStack:
del self.stateStack[prio]
# find highest prio
hId = None
for prio, state in self.stateStack.items():
if hId is None or prio > hId:
hId = prio
# wenn neuer aktueller status dann setzten
if self.highestStateId != hId:
logger.info("%s revoke set new State: %s", self.getName(), str(hId))
self.highestStateId = hId
self.__setSate(self.stateStack[hId])
def println(self):
color = int(self.getId()) % 7
print(
"\x1b[1;3" + str(color + 1) + ";40m" + "{:2d} : ".format(int(self.getId())) +
" {:7.7s} - {:30s}".format(self.getManufacturer(), self.getName()),
" - " + self.getType() + "\x1b[0m",
)
| true |
6e57e07704eda693c048b5c58c7549bdb3c2f3ef | Python | mgbo/to_do | /test.py | UTF-8 | 786 | 3.34375 | 3 | [] | no_license |
import csv
import math
lat = float(input("Enter a latitude : "))
lon = float(input("Enter a longitude :"))
f_p = 6373
s_p = 6373
ans_1 = 0
ans_2 = 0
def length(lat, lon, n_lat, n_lon):
return math.sqrt((n_lat - lat)**2 + (n_lon - lon)**2)
with open('location_1.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
n_lat = float(row[0])
n_lon = float(row[1])
d = length(lat, lon, n_lat, n_lon)
print(d)
if f_p > d:
# s_p = f_p
# ans_2 = ans_1
f_p = d
ans_1 = row
print("ans 1 : ", ans_1)
print("ans 2 : ", ans_2)
# else:
# if s_p > d:
# s_p = d
print(f"first near : {ans_1}")
print(f"second near : {ans_2}")
# f = open("near_pos.txt", 'w')
# f.write(ans_1.split())
# f.close()
| true |
391f89f8560c5353ddbee6a22ec84d13f1a725de | Python | Workiva/aws-lambda-fsm-workflows | /tools/yaml_to_json.py | UTF-8 | 3,586 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2016-2020 Workiva Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# system imports
from __future__ import print_function
import argparse
import json
# library imports
# application imports
from aws_lambda_fsm.config import get_current_configuration
from aws_lambda_fsm.constants import CONFIG
from aws_lambda_fsm.serialization import json_dumps_additional_kwargs
# setup the command line args
parser = argparse.ArgumentParser(description='Turns an fsm.yaml file into an AWS Step Function json definition.')
parser.add_argument('--machine_name')
parser.add_argument('--lambda_arn')
args = parser.parse_args()
def output_machine_dict(machine_dict):
"""
Outputs a https://states-language.net/spec.html compliant JSON text
representing an FSM.
:param machine_dict: a dict with machine info.
:return: a dict.
"""
data = {
'Comment': 'Generated by: yaml_to_json.py --machine_name=%s' % args.machine_name,
'States': {}
}
for state_dict in machine_dict.get(CONFIG.STATES, []):
transitions = state_dict.get(CONFIG.TRANSITIONS, [])
state_name = state_dict[CONFIG.NAME]
choices_state_name = state_name + '-choices'
if state_dict.get(CONFIG.INITIAL):
data['StartAt'] = state_name
data['States'][state_name] = {
'Type': 'Task',
'Resource': args.lambda_arn,
'Retry': [
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 1,
"MaxAttempts": int(machine_dict.get(CONFIG.MAX_RETRIES, CONFIG.DEFAULT_MAX_RETRIES)),
"BackoffRate": 2.0
}
]
}
if state_dict.get(CONFIG.FINAL):
data['States'][state_name]['End'] = True
continue
data['States'][state_name]['Next'] = choices_state_name
data['States'][choices_state_name] = {
'Type': 'Choice',
'Choices': []
}
for transition_dict in transitions:
choice = {
'StringEquals': transition_dict[CONFIG.EVENT],
'Variable': '$.system_context.current_event',
'Next': transition_dict[CONFIG.TARGET]
}
data['States'][choices_state_name]['Choices'].append(choice)
return data
def search_for_machine(filename='fsm.yaml'):
"""
Searches the .yaml hierarchy for the correct machine.
:param filename: a path to a fsm.yaml file
:return:
"""
for machine_dict in get_current_configuration(filename=filename)[CONFIG.MACHINES]:
if CONFIG.IMPORT in machine_dict:
search_for_machine(filename=machine_dict[CONFIG.IMPORT])
continue
if machine_dict[CONFIG.NAME] == args.machine_name:
data = output_machine_dict(machine_dict)
print(json.dumps(data, indent=2, **json_dumps_additional_kwargs()))
return
# find the machine in the machine list
search_for_machine()
| true |
112f8a2e10130580e7f8b0a9b3b66f2c41f901a1 | Python | lujh410/matplotlib | /chapter02/demo05.py | UTF-8 | 402 | 3.71875 | 4 | [] | no_license | # 定制盒装图每一部分的颜色
import random
import matplotlib.pyplot as plt
# 0:平均值 1:标准差
values = [random.gauss(0,1) for i in range(100)]
print(values)
b = plt.boxplot(values)
colorList = ['r','b','g','y']
i = 0
for name, line_list in b.items():
color = colorList[i % len(colorList)]
i += 1
for line in line_list:
line.set_color(color)
plt.show() | true |
fb4fe8d5fe38f9c1e8a79b333b348e07c5c135e5 | Python | webclinic017/stox | /Examples/AlgoTradingImplementation/main.py | UTF-8 | 2,765 | 3.796875 | 4 | [
"MIT"
] | permissive | ## Import The Modules
import stox
import pandas as pd
stock_list = ['FB','AAPL','AMZN','NFLX','GOOG'] ## List Of Stocks You Would Want To Buy
number_of_stocks = len(stock_list)
print(number_of_stocks)
x = 0
starting_cash = 10000 ## Amount Of Money In Trading Account
current_cash = starting_cash
percent_to_spend = 5
money_to_spend = (5/100)*percent_to_spend
def buy(ticker, price, amt):
## Here Add Use Your Brocker's API To Buy
cost = price*amt
brockerage = 0.1 ## Your Brockerage %
brockerage = brockerage/100 ## Convert To Decimel
cost = cost + (cost*brockerage) ## Get Total Cost
current_cash = current_cash - cost
print("Bought!")
return current_cash
def short(ticker, price, amt):
## Use Your Brocker's API To Short
cost = price*amt
brockerage = 0.1 ## Your Brockerage %
brockerage = brockerage/100 ## Convert To Decimel
cost = cost + (cost*brockerage) ## Get Total Cost
current_cash = current_cash - cost
print("Shorted!")
return current_cash
while x < number_of_stocks:
ticker = stock_list[x] ## Get The Current Ticker Symbol
data = stox.stox.exec(ticker,'list') ## Get Analysis From Stox
## Import All Needed Data (Price, Prediction, Analysis, etc.)
df = pd.DataFrame()
df['Ticker'] = ticker
df['Price'] = data[1]
df['Prediction'] = data[2]
df['Analysis'] = data[3]
df['DateFor'] = data[4]
good_pct = data[1]*0.02
minus_pct = good_pct*-1
## Run Scan For Buy/Up/Down/Sell
if data[2] - data[1] >= good_pct:
if data[3] == "Bullish (Starting)":
df['Signal'] = "Buy"
if money_to_spend <= current_cash: ## Check If Enough Money Left
price = df.Price
amt = price
current_cash = buy(ticker, price, amt) ## Call Buy Function
print("Bought "+ticker)
else:
print("Not Enough Money Left!")
elif data[3] == "Bullish (Already)":
df['Signal'] = "Up"
print(ticker+" is in a Uptrend")
elif data[2] - data[1] <= minus_pct:
if data[3] == "Bearish (Starting)":
df['Signal'] = "Sell"
if money_to_spend <= current_cash: ## Check If Enough Money Left
price = df.Price
amt = price
current_cash = short(ticker, price, amt) ## Call Short Function
print("Shorted "+ticker)
else:
print("Not Enough Money Left!")
elif data[3] == "Bearish (Already)":
df['Signal'] = "Down"
print(ticker+" is in a Downtrend")
else:
df['Signal'] = "None"
print("No Signal For "+ticker)
x = x+1
print("Done") ## Print 'Done' After Complete
| true |
a7c5a0629e5711b109a892f148314ecb6fd47e2d | Python | Hashizu/atcoder_work | /abc164/D/main.py | UTF-8 | 765 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
def solve(S: int):
S =str(S)[::-1]
MOD = 2019
mod_l = [int(0)]*2019
mod_l[0] = 1
prev = 0
for x in range(len(S)):
k = int(S[x]) * pow(10, x, MOD) % MOD + prev
mod_l[k%2019] += 1
prev = k%2019
s = sum([x*(x-1)//2 for x in mod_l])
print(s)
return
# Generated by 1.1.6 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
S = int(next(tokens)) # type: int
solve(S)
if __name__ == '__main__':
main()
| true |
75ac9629a604f0370a2ae4a01131931e5def325c | Python | Magicspell/Programming-Challanges | /imcompression.py | UTF-8 | 6,225 | 2.65625 | 3 | [] | no_license | import pygame
from PIL import Image, ImageFilter
import colorsys
import zlib
import thorpy
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
im = Image.open(input('Which image? '))
print(pytesseract.image_to_string(im))
data = pytesseract.image_to_data(im, output_type=pytesseract.Output.DICT)
boxes = len(data['level'])
for i in range(boxes ):
(x, y, w, h) = (data['left'][i], data['top'][i], data['width'][i], data['height'][i])
print(x, y, w, h)
pix = im.load()
width, height = im.size
screen = pygame.display.set_mode((width, height), pygame.RESIZABLE)
pygame.display.set_caption('Image Compression')
pygame.init()
mainclock = pygame.time.Clock()
hues = []
values = []
def get_2x2(i):
for w in range(width):
for h in range(height):
if w == i and h == i:
pixel1 = pix[w,h]
if w == i+1 and h == i:
pixel2 = pix[w,h]
if w == i and h == i+1:
pixel3 = pix[w,h]
if w == i+1 and h == i+1:
pixel4 = pix[w,h]
return pixel1, pixel2, pixel3, pixel4
def avg_2x2(coords):
pixels = get_2x2(coords)
hsv1 = colorsys.rgb_to_hsv(pixels[0][0], pixels[0][1], pixels[0][2])
hsv2 = colorsys.rgb_to_hsv(pixels[1][0], pixels[1][1], pixels[1][2])
hsv3 = colorsys.rgb_to_hsv(pixels[2][0], pixels[2][1], pixels[2][2])
hsv4 = colorsys.rgb_to_hsv(pixels[3][0], pixels[3][1], pixels[3][2])
newhue = (hsv1[0]+hsv2[0]+hsv3[0]+hsv4[0])/4
rgb1 = colorsys.hsv_to_rgb(newhue,hsv1[1],hsv1[2])
rgb2 = colorsys.hsv_to_rgb(newhue,hsv2[1],hsv2[2])
rgb3 = colorsys.hsv_to_rgb(newhue,hsv3[1],hsv3[2])
rgb4 = colorsys.hsv_to_rgb(newhue,hsv4[1],hsv4[2])
print(hsv1, hsv2, hsv3, hsv4)
return rgb1, rgb2, rgb3, rgb4
def grayscale():
for w in range(width):
for h in range(height):
hsv1 = colorsys.rgb_to_hsv(pix[w,h][0], pix[w,h][1], pix[w,h][2])
rgb1 = colorsys.hsv_to_rgb(0,0,hsv1[2])
screen.set_at((w, h), rgb1)
# def save():
def compress(size, pixalated, round_value):
hues = []
values = []
blurImage = im.filter(ImageFilter.BoxBlur(size))
blurpix = blurImage.load()
for w in range(width):
for h in range(height):
temp_w = w
temp_h = h
temp_w -= w % size
temp_h -= h % size
temp_w %= width
temp_h %= height
if w % size == 0 and h % size == 0:
hues.append(blurpix[w,h])
f = open('bytes.txt','wb')
pix_bytes = bytearray(0)
for w in range(width):
for h in range(height):
temp_w = w
temp_h = h
temp_w -= w % pixalated
temp_h -= h % pixalated
temp_w %= width
temp_h %= height
hsv1 = colorsys.rgb_to_hsv(pix[temp_w,temp_h][0], pix[temp_w,temp_h][1], pix[temp_w,temp_h][2])
blurhsv = colorsys.rgb_to_hsv(blurpix[w,h][0], blurpix[w,h][1], blurpix[w,h][2])
rgb1 = colorsys.hsv_to_rgb(round(blurhsv[0], round_value), round(blurhsv[1], round_value), round(hsv1[2], round_value))
if round_value >= 0:
r = round(rgb1[0], round_value)
g = round(rgb1[1], round_value)
b = round(rgb1[2], round_value)
else:
r = (round(rgb1[0]/(-10*round_value))*10)%255
g = (round(rgb1[1]/(-10*round_value))*10)%255
b = (round(rgb1[2]/(-10*round_value))*10)%255
rgb1 = (r,g,b)
if not w % width:
print(r)
if w % pixalated == 0 and h % pixalated == 0:
values.append(round(hsv1[2], round_value))
# hues.append((round(blurhsv[0], round_value), round(blurhsv[1], round_value)))
# pix_byte = hsv1[2].to_bytes(2, byteorder='little')
pix_bytes.append(hsv1[2])
# pix_bytes = zlib.compress(pix_bytes)
# print(pix_bytes)
# print(int.from_bytes(pix_bytes, 'little'))
screen.set_at((w, h), rgb1)
print(len(hues))
print(len(values))
f.write(zlib.compress(pix_bytes))
# f.write(pix_bytes)
f.close()
# i = 0
# for pixel in get_2x2(0):
# print(pixel)
# rect = pygame.Rect((i*30,0), (30,40))
# pygame.draw.rect(screen, pixel, rect)
# i += 1
# i = 0
# for pixel in avg_2x2(0):
# rect = pygame.Rect((i*30,40), (30,40))
# pygame.draw.rect(screen, pixel, rect)
# i += 1
size = 1
pixalated = 1
round_value = 5
compress(size, pixalated, round_value)
print(size)
pygame.display.flip()
prev_blur = None
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 4:
size += 10
compress(size, pixalated, round_value)
print(size)
pygame.display.flip()
if event.button == 5:
size -= 10
if size <0:
size = 0
compress(size, pixalated, round_value)
print(size)
pygame.display.flip()
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
pixalated += 1
compress(size, pixalated, round_value)
pygame.display.flip()
if event.key == pygame.K_DOWN:
if pixalated > 1:
pixalated -= 1
compress(size, pixalated, round_value)
pygame.display.flip()
if event.key == pygame.K_RIGHT:
compress(size, pixalated, round_value)
print(f'Round Value: {round_value}')
round_value -= 1
if event.key == pygame.K_LEFT:
compress(size, pixalated, round_value)
round_value += 1
mainclock.tick(60)
| true |
59871e0ea8714ead2f69a0fdfe3c68320e414cf7 | Python | yugimaster/service.1905.content.provider | /resources/lib/util.py | UTF-8 | 1,561 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# coding=utf8
import urllib2
import re
import StringIO
import gzip
from common import *
def GetHttpData(url, data=None, cookie=None):
log("Fetch URL :%s, with data: %s" % (url, data))
for i in range(0, 2):
try:
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) {0}{1}'.
format('AppleWebKit/537.36 (KHTML, like Gecko) ',
'Chrome/28.0.1500.71 Safari/537.36'))
req.add_header('Accept-encoding', 'gzip')
if cookie is not None:
req.add_header('Cookie', cookie)
if data:
response = urllib2.urlopen(req, data, timeout=3)
else:
response = urllib2.urlopen(req, timeout=3)
httpdata = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
httpdata = gzip.GzipFile(fileobj=StringIO.StringIO(httpdata)).read()
response.close()
match = re.compile('encoding=(.+?)"').findall(httpdata)
if not match:
match = re.compile('meta charset="(.+?)"').findall(httpdata)
if match:
charset = match[0].lower()
if (charset != 'utf-8') and (charset != 'utf-8'):
httpdata = unicode(httpdata, charset).encode('utf-8')
break
except Exception:
print_exc()
httpdata = '{"status": "failed"}'
return httpdata
| true |
a3133ec126d34bded090f0f54e602242e999e12c | Python | dm-Charles/textEditor | /app_gui.py | UTF-8 | 653 | 2.546875 | 3 | [] | no_license | import tkinter as tk
window = tk.Tk()
window.title("Editor PRO 2")
buttonFrame = tk.Frame(window)
buttonFrame.pack(fill=tk.Y, side=tk.LEFT)
textFrame = tk.Frame(window)
textFrame.pack(side=tk.RIGHT)
saveButton = tk.Button(buttonFrame,text="save", bg="green", width=10)
saveButton.grid(row=1, column=0, pady=5, sticky="ew")
openButton = tk.Button(buttonFrame, text="open", bg="green", width=10)
openButton.grid(row=0, column=0, pady=5, sticky="ew")
deleteButton = tk.Button(buttonFrame, text="delete", bg="red", width=10)
deleteButton.grid(row=2, column=0, sticky="ew")
textArea = tk.Text(textFrame)
textArea.grid(sticky="news")
window.mainloop()
| true |
6a512215a68ac1e5a7489f6c431219b6e335eb85 | Python | disissaikat/cfc_2020 | /ngo_app_code/password_hashing.py | UTF-8 | 508 | 3.1875 | 3 | [] | no_license | from cryptography.fernet import Fernet
key = b'mlgHc4CrmeiVLmR82I1dRTL7zrR-Dff-k8mS_x7x1uY='
cipher_suite = Fernet(key)
def encrypt_pwd(pwd):
byte_pwd = bytes(pwd, 'utf-8') #convert to byte for encryption
ciphered_pwd = cipher_suite.encrypt(byte_pwd)
ciphered_pwd = str(ciphered_pwd, 'utf-8')
return ciphered_pwd
def decrypt_pwd(pwd):
byte_pwd = bytes(pwd, 'utf-8') #convert to byte for encryption
decryptpwd = (cipher_suite.decrypt(byte_pwd))
decryptpwd = str(decryptpwd, 'utf-8')
return decryptpwd | true |
7b34c7da4cee2bc289d0ebff62045ceabb9c5593 | Python | erceth/pyflags | /bullet.py | UTF-8 | 570 | 2.875 | 3 | [] | no_license | from gameObject import GameObject
import gameConsts
class Bullet(GameObject):
def __init__(self, color, position, direction, angle):
self.color = color
image = f'img/{color}_tank.png'
size = (gameConsts.BULLET_SIZE, gameConsts.BULLET_SIZE)
speed = gameConsts.BULLET_SPEED
super().__init__(image, position, size, direction, speed, angle)
def update(self):
x, y = self.position
if(x < 0 or x > 800 or y < 0 or y > 800): # TODO: replace number with map height and width constant
self.markedForTermination = True
super().update() | true |
61867fbc1a33355561827c553ea504ae079a0b39 | Python | alexlu07/AI_from_scratch | /NeuralNetwork/network.py | UTF-8 | 2,900 | 2.890625 | 3 | [] | no_license | import numpy as np
class Network:
def __init__(self, *nodes, m=10, lr=2):
self.a = []
self.mini = m
self.layers = len(nodes)
self.sizes = nodes
self.weights = [np.random.randn(nodes[i - 1], nodes[i]) for i in range(1, len(nodes))]
self.biases = [np.zeros([1, nodes[i]]) for i in range(1, len(nodes))]
self.lr = lr
def save_weights_biases(self):
np.savez("weights", *self.weights)
np.savez("biases", *self.biases)
def load_weights_biases(self):
w = np.load("weights.npz")
b = np.load("biases.npz")
self.weights = [w[i] for i in w.files]
self.biases = [b[i] for i in b.files]
def sigmoid(self, s):
return 1 / (1 + np.exp(-s))
def sigmoid_der(self, s):
return s * (1 - s)
def cross_softmax_der(self, output, expected):
return output - expected
def softmax(self, s):
exps = np.exp(s - np.max(s, axis=1, keepdims=True))
return exps/np.sum(exps, axis=1, keepdims=True)
def relu(self, s):
return s * (s > 0)
def relu_der(self, s):
return 1 * (s > 0)
def feed_forward(self, ip):
cv = ip
self.a = []
self.a.append(cv)
for i in range(len(self.weights)-1):
z = np.dot(cv, self.weights[i]) + self.biases[i]
cv = self.sigmoid(z)
# cv = self.relu(z)
self.a.append(cv)
z = np.dot(cv, self.weights[-1]) + self.biases[-1]
cv = self.softmax(z)
self.a.append(cv)
def backprop(self, actual):
a_der = self.cross_softmax_der(self.a[-1], actual) # technically a_der * z_der
a_der /= self.mini # take average by dividing first, because everything else multiplies with this
# this is for second to last set of weights,
# has to be before self.weights is changed
w_der = np.dot(a_der, self.weights[-1].T)
self.weights[-1] -= self.lr * np.dot(self.a[-2].T, a_der)
self.biases[-1] -= self.lr * np.sum(a_der, axis=0, keepdims=True)
for layer in range(self.layers-2, 0, -1):
a_der = w_der * self.sigmoid_der(self.a[layer])
# a_der = w_der * self.relu_der(self.a[layer])
w_der = np.dot(a_der, self.weights[layer-1].T)
self.weights[layer-1] -= self.lr * np.dot(self.a[layer-1].T, a_der)
self.biases[layer-1] -= self.lr * np.sum(a_der, axis=0)
def train(self, ip, res):
for i in range(len(ip) // self.mini):
self.feed_forward(ip[self.mini*i:self.mini*(i+1)])
self.backprop(res[self.mini*i:self.mini*(i+1)])
def test(self, ip, res):
self.feed_forward(ip)
output = self.a[-1].argmax(axis=1)
expected = np.argmax(res, axis=1)
print(output)
# print(expected)
return np.mean(output==expected) | true |
15ed6caa2430b11908cdacb9f798941cc33f2674 | Python | dternyak/FlaskAppEngine | /models.py | UTF-8 | 2,511 | 2.59375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive |
import time
from google.appengine.api import users
from google.appengine.ext import ndb
DEFAULT_GUESTBOOK_NAME = 'default_guestbook'
# We set a parent key on the 'Greetings' to ensure that they are all
# in the same entity group. Queries across the single entity group
# will be consistent. However, the write rate should be limited to
# ~1/second.
def guestbook_key(guestbook_name):
"""Constructs a Datastore key for a Guestbook entity.
We use guestbook_name as the key.
"""
guestbook_name = str(guestbook_name)
return ndb.Key('Guestbook', DEFAULT_GUESTBOOK_NAME)
class Author(ndb.Model):
#Sub model for representing an author.
identity = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=False)
class Greeting(ndb.Model):
#A main model for representing an individual Guestbook entry.
author = ndb.StructuredProperty(Author)
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
def post_ajax(first_value, second_value):
# We set the same parent key on the 'Greeting' to ensure each
# Greeting is in the same entity group. Queries across the
# single entity group will be consistent. However, the write
# rate to a single entity group should be limited to
# ~1/second
user = users.get_current_user()
all_content = Greeting.query()
for contents in all_content:
if contents.content == first_value and user.user_id() == contents.author.identity:
contents.content = second_value
contents.put()
print contents
time.sleep(1)
else:
return "failure"
def post(form):
# We set the same parent key on the 'Greeting' to ensure each
# Greeting is in the same entity group. Queries across the
# single entity group will be consistent. However, the write
# rate to a single entity group should be limited to
# ~1/second
guestbook_name = ('guestbook_name', DEFAULT_GUESTBOOK_NAME)
greeting = Greeting(parent=guestbook_key(guestbook_name))
if users.get_current_user():
greeting.author = Author(
identity=users.get_current_user().user_id(),
email=users.get_current_user().email())
form = str(form)
if not "!" in form:
greeting.content = form
greeting.put()
query_params = {'guestbook_name': guestbook_name}
time.sleep(1)
else:
return "! is not allowed to be posted"
| true |
d51de4d55174956120a3a12d196ac8e7b573219d | Python | Bstijn/visualisationPythonS7 | /vis1_3.py | UTF-8 | 2,031 | 2.9375 | 3 | [] | no_license | from vtkmodules.all import(
vtkActor, vtkPolyDataMapper, vtkActor, vtkCylinderSource, vtkProp
)
from util.window_renderer import WindowRenderer
from vis1 import Cone
class Cylinder:
def __init__(self, renderer):
self.__renderer = renderer
self.__cylinder = vtkCylinderSource()
self.__cyl_mapper = vtkPolyDataMapper()
self.__cyl_actor = vtkActor()
def setup_cylinder_with_diffuse_and_specular(self, radius, height, resolution, center, color ,diffuse, specular, specularpower):
#Setting the propperty of diffuse and specular
prop = self.__cyl_actor.GetProperty()
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specularpower)
self.__cyl_actor.SetProperty(prop)
#rest of the steps
self.setup_cylinder(radius,height,resolution,center,color)
def setup_cylinder(self,radius, height, resolution, center, color):
# Setup Cylinder size and positioning
self.__cylinder.SetRadius(radius)
self.__cylinder.SetHeight(height)
self.__cylinder.SetResolution(resolution)
self.__cylinder.SetCenter(center)
# setup mapper
self.__cyl_mapper.SetInputConnection(self.__cylinder.GetOutputPort())
#setup actor
self.__cyl_actor.SetMapper(self.__cyl_mapper)
#Setup cylinder color
self.__cyl_actor.GetProperty().SetColor(color)
self.__renderer.AddActor(self.__cyl_actor)
if __name__ == '__main__':
window_renderer = WindowRenderer()
Cylinder(window_renderer.renderer).setup_cylinder(
1,3,10, #radius, height, resolution
(0,0,0), #center
(1,1,0.4) #color 0-1
)
Cylinder(window_renderer.renderer).setup_cylinder_with_diffuse_and_specular(
1, 3, 10, # radius, height, resolution
(4, 0, 0), # center
(1, 1, 0.4), # color 0-1
0.7,
0.4,
20
)
window_renderer.setup_render_window()
window_renderer.start_render_window() | true |
a02ee4e1d5faf282f457251ed688f01316465e1d | Python | DainDwarf/AdventOfCode | /2019/Day20/day20.py | UTF-8 | 11,542 | 2.640625 | 3 | [] | no_license | import pytest
import networkx as nx
from networkx.algorithms.shortest_paths.generic import shortest_path
# That's handy, the Advent of Code gives unittests.
# Careful with the test input, as there wil be no strip() in the code : space has meaning here
@pytest.mark.parametrize("inp, exp", [
(""" A
A
#######.#########
#######.........#
#######.#######.#
#######.#######.#
#######.#######.#
##### B ###.#
BC...## C ###.#
##.## ###.#
##...DE F ###.#
##### G ###.#
#########.#####.#
DE..#######...###.#
#.#########.###.#
FG..#########.....#
###########.#####
Z
Z """, 23),
(""" A
A
#################.#############
#.#...#...................#.#.#
#.#.#.###.###.###.#########.#.#
#.#.#.......#...#.....#.#.#...#
#.#########.###.#####.#.#.###.#
#.............#.#.....#.......#
###.###########.###.#####.#.#.#
#.....# A C #.#.#.#
####### S P #####.#
#.#...# #......VT
#.#.#.# #.#####
#...#.# YN....#.#
#.###.# #####.#
DI....#.# #.....#
#####.# #.###.#
ZZ......# QG....#..AS
###.### #######
JO..#.#.# #.....#
#.#.#.# ###.#.#
#...#..DI BU....#..LF
#####.# #.#####
YN......# VT..#....QG
#.###.# #.###.#
#.#...# #.....#
###.### J L J #.#.###
#.....# O F P #.#...#
#.###.#####.#.#####.#####.###.#
#...#.#.#...#.....#.....#.#...#
#.#####.###.###.#.#.#########.#
#...#.#.....#...#.#.#.#.....#.#
#.###.#####.###.###.#.#.#######
#.#.........#...#.............#
#########.###.###.#############
B J C
U P P """, 58),
])
def test_one(inp, exp):
res = part_one(inp)
assert res == exp
@pytest.mark.parametrize("inp, exp", [
(""" A
A
#######.#########
#######.........#
#######.#######.#
#######.#######.#
#######.#######.#
##### B ###.#
BC...## C ###.#
##.## ###.#
##...DE F ###.#
##### G ###.#
#########.#####.#
DE..#######...###.#
#.#########.###.#
FG..#########.....#
###########.#####
Z
Z """, 26),
(""" Z L X W C
Z P Q B K
###########.#.#.#.#######.###############
#...#.......#.#.......#.#.......#.#.#...#
###.#.#.#.#.#.#.#.###.#.#.#######.#.#.###
#.#...#.#.#...#.#.#...#...#...#.#.......#
#.###.#######.###.###.#.###.###.#.#######
#...#.......#.#...#...#.............#...#
#.#########.#######.#.#######.#######.###
#...#.# F R I Z #.#.#.#
#.###.# D E C H #.#.#.#
#.#...# #...#.#
#.###.# #.###.#
#.#....OA WB..#.#..ZH
#.###.# #.#.#.#
CJ......# #.....#
####### #######
#.#....CK #......IC
#.###.# #.###.#
#.....# #...#.#
###.### #.#.#.#
XF....#.# RF..#.#.#
#####.# #######
#......CJ NM..#...#
###.#.# #.###.#
RE....#.# #......RF
###.### X X L #.#.#.#
#.....# F Q P #.#.#.#
###.###########.###.#######.#########.###
#.....#...#.....#.......#...#.....#.#...#
#####.#.###.#######.#######.###.###.#.#.#
#.......#.......#.#.#.#.#...#...#...#.#.#
#####.###.#####.#.#.#.#.###.###.#.###.###
#.......#.....#.#...#...............#...#
#############.#.#.###.###################
A O F N
A A D M """, 396),
])
def test_two(inp, exp):
res = part_two(inp)
assert res == exp
def left(pos):
return (pos[0]-1, pos[1])
def right(pos):
return (pos[0]+1, pos[1])
def up(pos):
return (pos[0], pos[1]-1)
def down(pos):
return (pos[0], pos[1]+1)
class DictMap(dict):
def __init__(self, inp):
for y, line in enumerate(inp.split('\n')):
for x, cell in enumerate(line):
self[x, y] = cell
self.minx = 0
self.miny = 0
self.maxx = x-1
self.maxy = y-1
def near_edge(pos, full_map, max_distance=3):
return (pos[0]-full_map.minx <= max_distance
or full_map.maxx-pos[0] <= max_distance
or pos[1]-full_map.miny <= max_distance
or full_map.maxy-pos[1] <= max_distance
)
def parse_portals(first_pass):
"""Returns a dictionary of position: tile with only passable tile.
Regular tiles are '.', while portals are noted as "NAME-inner" and "NAME-outer",
placed on the adjacent '.' tile on the input map.
Placing portals on the '.' instead of near it simplifies distance computation.
Portal are suffixed to simplify the work for part2."""
parse = dict()
for pos, cell in first_pass.items():
if not cell.isalpha():
if cell == '.' and pos not in parse: # avoid overwriting a portal
parse[pos] = cell
else:
neighbors = {neighpos: first_pass.get(neighpos, ' ') for neighpos in [left(pos), right(pos), up(pos), down(pos)]}
suffix = "-outer" if near_edge(pos, first_pass) else "-inner"
if neighbors[left(pos)] == '.':
parse[left(pos)] = cell + neighbors[right(pos)] + suffix
elif neighbors[right(pos)] == '.':
parse[right(pos)] = neighbors[left(pos)] + cell + suffix
elif neighbors[up(pos)] == '.':
parse[up(pos)] = cell + neighbors[down(pos)] + suffix
elif neighbors[down(pos)] == '.':
parse[down(pos)] = neighbors[up(pos)] + cell + suffix
# else: don't add to parse portals: This is the other end of the portal word.
return parse
def _to_donut(parsed):
"""Transforms the parsed input into a graph, connecting portals. Also returns start and end positions."""
G = nx.Graph()
start = end = None
for pos, cell in parsed.items():
if cell == '.':
for neigh in [left(pos), right(pos), up(pos), down(pos)]:
if neigh in parsed:
G.add_edge(pos, neigh)
else: # portal
# AA and ZZ are not really portals.
if cell == 'AA-outer':
start = pos
elif cell == 'ZZ-outer':
end = pos
else:
connected = [p for p, c in parsed.items() if c[:2] == cell[:2] and p != pos]
if len(connected) < 1:
raise RuntimeError(f"({pos}): No sibling for {cell}")
if len(connected) > 1:
raise RuntimeError(f"({pos}): Too many siblings for {cell}: {len(connected)}")
for connect_pos in connected:
G.add_edge(pos, connect_pos)
return G, start, end
def _precompute_single_level_paths(parsed):
"""Transform the parsed input into another graph, corresponding to a single recursion level.
Nodes are the portal (with their inner-outer suffix)
Edges are weighted by the length of the shortest path + 1 step for going through the portal.
We keep the +1 on the length, as it simplifies the computation of the whole length of path in part 2.
Of course, most (and probably all) inputs will generate non-connected graphs."""
# First, construct the graph of the maze itself.
Dots = nx.Graph()
portal_positions = dict()
for pos, cell in parsed.items():
if cell == '.':
for neigh in [left(pos), right(pos), up(pos), down(pos)]:
if neigh in parsed:
Dots.add_edge(pos, neigh)
else:
portal_positions[pos] = cell
# Now, construct the output graph
Portals = nx.Graph()
for p1, p1_name in portal_positions.items():
for p2, p2_name in portal_positions.items():
if p1 != p2:
try:
path_length = len(shortest_path(Dots, p1, p2))
Portals.add_edge(p1_name, p2_name, weight=path_length)
except nx.NetworkXNoPath:
pass
return Portals
def reverse_portal_name(name):
if '-outer' in name:
return name[:2]+'-inner'
else:
return name[:2]+'-outer'
def recursive_maze_dijkstra(portals):
"""Take a graph of Node: portals, Edge: shortest_path_length,
and go from AA-outer on lvl 0 to ZZ-outer on lvl 0.
We use Dijkstra to find the shortest path: Nodes are (portals, recursion lvl)
We use the portal name on the "output". i.e., we start at (AA-outer, 0),
and if we choose to look at portal BC-inner, next node is (BC-outer, 1)."""
explored = dict() # (portal, lvl): cumulated path length
neighbors = {('AA-outer', 0): 0} # We start on AA-outer
while neighbors:
smallest_node, distance = min(neighbors.items(), key= lambda i:i[1])
portal, lvl = smallest_node
if portal == 'AA-inner': # Actually not possible, discard the node.
neighbors.pop(smallest_node)
continue
if portal == 'ZZ-inner':
if lvl == -1:
return distance # Shortest path!
else: # Actually not possible, discard the node.
neighbors.pop(smallest_node)
continue
if lvl == -1: #Actually not possible either.
neighbors.pop(smallest_node)
continue
for new_portal in portals.neighbors(portal):
new_lvl = lvl+1 if 'inner' in new_portal else lvl-1
out_portal = reverse_portal_name(new_portal)
new_node = (out_portal, new_lvl)
new_distance = distance + portals.edges[(portal, new_portal)]['weight']
if new_node not in explored:
if new_node in neighbors:
neighbors[new_node] = min(neighbors[new_node], new_distance)
else:
neighbors[new_node] = new_distance
explored[smallest_node] = distance
neighbors.pop(smallest_node)
def part_one(inp):
first_pass = DictMap(inp)
parsed = parse_portals(first_pass)
G, start, end = _to_donut(parsed)
return len(shortest_path(G, start, end)) - 1
def part_two(inp):
first_pass = DictMap(inp)
parsed = parse_portals(first_pass)
portals = _precompute_single_level_paths(parsed)
return recursive_maze_dijkstra(portals) - 1
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
args = ArgumentParser()
args.add_argument("input", help='Your input file', type=FileType('r'))
options = args.parse_args()
inp = options.input.read()
print("Answer for part one is : {res}".format(res=part_one(inp)))
print("Answer for part two is : {res}".format(res=part_two(inp)))
| true |
76f165f41d492683342370e950bf8d2907f58aa0 | Python | tjtimer/aio_arango | /tests/test_graph.py | UTF-8 | 1,675 | 2.625 | 3 | [
"MIT"
] | permissive | """
test_graph.py
author: Tim "tjtimer" Jedro
created: 17.04.19
"""
from pprint import pprint
from aio_arango.db import DocumentType
from aio_arango.graph import ArangoGraph
async def test_graph_create(test_db):
await test_db.create_collection('test_node')
await test_db.create_collection('test_edge', doc_type=DocumentType.EDGE)
await test_db.create_graph(
'test_graph',
[{'collection': 'test_edge', 'from': ['test_node'], 'to': ['test_node']}]
)
assert isinstance(test_db.test_graph, ArangoGraph)
async def test_graph_create_entries(graph_db):
await graph_db.knows_n_rides.vertex_create('person', {'_key': 'jane', 'name': 'Jane', 'age': 23})
await graph_db.knows_n_rides.vertex_create('person', {'_key': 'kalle', 'name': 'Kalle', 'age': 42})
p_resp = await graph_db.knows_n_rides.vertex_create('person', {'_key': 'sven', 'name': 'Sven', 'age': 1})
await graph_db.knows_n_rides.vertex_create('bike', {'_key': 'diamant', 'brand': 'Diamant', 'weight': '12 kg'})
b_resp = await graph_db.knows_n_rides.vertex_create('bike', {'_key': 'megacycle', 'brand': 'MegaCycle', 'weight': '2 kg'})
await graph_db.knows_n_rides.edge_create('knows', {'_from': 'person/jane', '_to': 'person/kalle'})
await graph_db.knows_n_rides.edge_create('knows', {'_from': 'person/jane', '_to': 'person/sven'})
await graph_db.knows_n_rides.edge_create('rides', {'_from': 'person/jane', '_to': 'bike/megacycle'})
e_resp = await graph_db.knows_n_rides.edge_create('rides', {'_from': 'person/kalle', '_to': 'bike/diamant'})
pprint(e_resp)
assert p_resp['_id'] == 'person/sven'
assert b_resp['_id'] == 'bike/megacycle'
| true |
7f3bc8ee2428ec6ee8e7d2ae4858e58c0c4183ee | Python | jeon-chanhee/DataScience | /Python/basic/Python_day1/Python01_elif_전찬희.py | UTF-8 | 337 | 4.09375 | 4 | [] | no_license | #90점 이상이면 : A 학점
#80점 이상이면 : B 학점
#70점 이상이면 : C 학점
#60점 이상이면 : D 학점
#이외는 F학점
grade = 91
if grade >= 90:
print("A학점")
elif grade >=80:
print("B학점")
elif grade >=70:
print("C학점")
elif grade >=60:
print("D학점")
else :
print("F학점")
| true |
054daed9bc01f7cdc988510404477b41d734c22e | Python | akshaylike/gstracker | /fill_games_into_db.py | UTF-8 | 690 | 2.578125 | 3 | [] | no_license | import sqlite3
import json
json_file = open('gmggames.json')
list_of_games = json.load(json_file)
conn = sqlite3.connect('development.sqlite3')
query = ""
id = 1
for each in list_of_games:
try:
onSale = int(each['onSale'][0])
#steamworks = int(each['steamworks'][0])
gameTitle = each['gameTitle'][0]
gameLink = each['gameLink']
rrp = float(each['rrp'][0][1:])
drp = 0
if onSale == 1:
drp = float(each['drp'][0][1:])
query = "INSERT INTO GAMES VALUES(%d, \"%s\", %f, %f, %d, \"%s\")" % (id, gameTitle, rrp, drp, onSale, gameLink) + ";"
id = id + 1
except IndexError:
pass
else:
#print query
#print
pass
conn.execute(query)
conn.commit()
conn.close()
| true |
a018faadb75c98616394542514b4c3398197560c | Python | thelumen/Python-com | /plot_presure1.py | UTF-8 | 2,841 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
import numpy as np
data = np.fromfile('data/out.prs', np.uint8)
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
data1 = data[0::8]
data2 = data[1::8]
data3 = data[2::8]
data4 = data[3::8]
data5 = data[4::8]
data6 = data[5::8]
data7 = data[6::8]
data8 = data[7::8]
# # intial parameters
# n_iter = data1.size
# sz = (n_iter,) # size of array
# x = -0.37727 # truth value (typo in example at top of p. 13 calls this z)
# # z = np.random.normal(x,0.1,size=sz) # observations (normal about x, sigma=0.1)
# Q = 1e-5 # process variance
# # allocate space for arrays
# xhat=np.zeros(sz) # a posteri estimate of x
# P=np.zeros(sz) # a posteri error estimate
# xhatminus=np.zeros(sz) # a priori estimate of x
# Pminus=np.zeros(sz) # a priori error estimate
# K=np.zeros(sz) # gain or blending factor
# R = 0.1**2 # estimate of measurement variance, change to see effect
# # intial guesses
# xhat[0] = 0.0
# P[0] = 1.0
# for k in range(1,n_iter):
# # time update
# xhatminus[k] = xhat[k-1]
# Pminus[k] = P[k-1]+Q
# # measurement update
# K[k] = Pminus[k]/( Pminus[k]+R )
# xhat[k] = xhatminus[k]+K[k]*(data1[k]-xhatminus[k])
# P[k] = (1-K[k])*Pminus[k]
# order = 6
# fs = 30.0 # sample rate, Hz
# cutoff = 3.667
# b, a = butter_lowpass(cutoff, fs, order)
# data11 = butter_lowpass_filter(data1, cutoff, fs, order)
# data22 = butter_lowpass_filter(data2, cutoff, fs, order)
# data33 = butter_lowpass_filter(data3, cutoff, fs, order)
# data44 = butter_lowpass_filter(data4, cutoff, fs, order)
# data55 = butter_lowpass_filter(data5, cutoff, fs, order)
# data66 = butter_lowpass_filter(data6, cutoff, fs, order)
# data77 = butter_lowpass_filter(data7, cutoff, fs, order)
# data88 = butter_lowpass_filter(data8, cutoff, fs, order)
p1 = plt.subplot(421)
p2 = plt.subplot(422)
p3 = plt.subplot(423)
p4 = plt.subplot(424)
p5 = plt.subplot(425)
p6 = plt.subplot(426)
p7 = plt.subplot(427)
p8 = plt.subplot(428)
# x1 = np.arange(data1.size)
# plt.plot(x1, data1, 'r')
# x2 = np.arange(data2.size)
# plt.plot(x2, data2, 'g')
# # p3.plot(data33[100:])
# x4 = np.arange(data4.size)
# plt.plot(x4, data4, 'b')
# x5 = np.arange(data5.size)
# plt.plot(x5, data5, 'y')
# p6.plot(data66[100:])
# p7.plot(data77[100:])
# p8.plot(data88[100:])
p1.plot(data1)
p2.plot(data2)
p3.plot(data3)
p4.plot(data4)
p5.plot(data5)
p6.plot(data6)
p7.plot(data7)
p8.plot(data8)
np.save('out.pcm', data1)
plt.show() | true |
c771dfb7e2fffe77c35704188988cbd34da3bbf7 | Python | citlaligm/Advance-Lane-Detection | /test_images/filters.py | UTF-8 | 3,240 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 12 17:39:05 2016
@author: uidr9588
"""
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
filename = 'test6.jpg'
img=mpimg.imread(filename)
#image_cv2=cv2.imread(filename)
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Grayscale image
# NOTE: we already saw that standard grayscaling lost color information for the lane lines
# Explore gradients in other colors spaces / color channels to see what might work better
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Sobel x
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1) # Take the derivative in x
abs_sobely = np.absolute(sobely) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobely = np.uint8(255*abs_sobely/np.max(abs_sobely))
# Threshold x gradient
thresh_min = 50
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold y gradient
thresh_min_y = 50
thresh_max_y = 100
sxbinaryy = np.zeros_like(scaled_sobely)
sxbinaryy[(scaled_sobely >= thresh_min_y) & (scaled_sobely <= thresh_max_y)] = 1
# Threshold magnitude
mag_thresh_min = 0
mag_thresh_max = 255
magnitude = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(magnitude)/255 # Will use this to scale back to 8-bit scale
magnitude = (magnitude/scale_factor).astype(np.uint8) #rescaling to 8-bit
binary_output = np.zeros_like(magnitude)
binary_output[(magnitude > mag_thresh_min) & (magnitude <= mag_thresh_max)] = 1
# Threshold direction
dir_thresh_min = 0
dir_thresh_max = 255
if sobelx!= 0:
direction = np.arctan(sobely/sobelx)
abs_direction = np.absolute(direction)
binary_output = np.zeros_like(abs_direction)
binary_output[(abs_direction > dir_thresh_min) & (abs_direction <= dir_thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
combined = np.zeros_like(binary_output)
combined[((sxbinary == 1) & (sxbinaryy == 1)) | ((binary_output == 1) & (binary_output == 1))] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
# Plotting thresholded images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Stacked thresholds')
ax1.imshow(color_binary)
ax2.set_title('Combined S channel and gradient thresholds')
ax2.imshow(combined, cmap='gray') | true |
f8c83821888e048577fb933b6065ec26c3700fee | Python | sudo-hemant/CP_CipherSchools | /recursion_and_backtracking/all_possible_word_from_phone_digits.py | UTF-8 | 806 | 3.734375 | 4 | [] | no_license |
# https://www.geeksforgeeks.org/find-possible-words-phone-digits/
def find_possible_combinations(number):
result = []
hash_number = [ "", "", 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz' ]
temp = []
util(0, temp, len(number), number, result, hash_number)
return result
def util(index, temp, n, number, result, hash_number):
# base case
if index >= n:
ans = ''.join(temp)
result.append(ans)
return
curr_no = number[index]
possible_char = hash_number[int(curr_no)]
for char in possible_char:
temp.append(char)
util(index + 1, temp, n, number, result, hash_number)
temp.pop()
tc = int(input())
for _ in range(tc):
number = input()
print(find_possible_combinations(number)) | true |
6e5427c79c5687c1754395586e3ac691a0e652b4 | Python | phani-1995/Week3-python_libraries | /Matplotlib/Line_marker.py | UTF-8 | 268 | 3.234375 | 3 | [] | no_license | import matplotlib.pyplot as plt
x = [1,4,5,6,7]
y = [2,6,3,6,3]
plt.plot(x, y, color='red', linestyle='dashdot', linewidth = 3,
marker='o', markerfacecolor='blue', markersize=12)
plt.ylim(1,8)
plt.xlim(1,8)
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show() | true |
da27e81bea2e960b59f03586f589762902b5cb3b | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_156/574.py | UTF-8 | 444 | 2.875 | 3 | [] | no_license | import math
num = raw_input()
num = int(num)
for x in range(num):
output = "Case #"+str(x+1)+': '
i = raw_input()
lis= raw_input().split()
for i in range(len(lis)):
lis[i] = int(lis[i])
m = max(lis)
o = 999999
for sq in range(1,m+1):
s = 0
for x in lis:
s += int(x/sq)
if (x %sq == 0):
s-=1
s += sq
o = min(o,s)
print output+str(o)
| true |
b3a2aa75915b2b8c928eb8162698d68207a75df2 | Python | sajanganesh/salesforce | /pthon1.py | UTF-8 | 147 | 3.90625 | 4 | [] | no_license | num=int(input("enter the number :"))
i=0
while i<num:
print(" ---"*num)
print(f"| {0} "*num+"|")
i=i+1
if i==num:
print(" ---"*num) | true |
eb3c645adef1e9ed0023a6bc3785d3c68a5e3f5a | Python | pypr/compyle | /examples/axpb_jit.py | UTF-8 | 578 | 2.671875 | 3 | [
"BSD-3-Clause"
] | permissive | """Shows the use of annotate without any type information.
The type information is extracted from the arguments passed
and the function is annotated and compiled at runtime.
"""
from compyle.api import annotate, Elementwise, wrap, get_config, declare
import numpy as np
from numpy import sin
@annotate
def axpb(i, x, y, a, b):
xi = x[i]
y[i] = a * sin(xi) + b
x = np.linspace(0, 1, 10000)
y = np.zeros_like(x)
a = 2.0
b = 3.0
backend = 'opencl'
get_config().use_openmp = True
x, y = wrap(x, y, backend=backend)
e = Elementwise(axpb, backend=backend)
e(x, y, a, b)
| true |
6d0f82859b8265d08b7f78e013d905e8c415654f | Python | dgarridouma/streamlit-example | /streamlit1.py | UTF-8 | 1,795 | 3.46875 | 3 | [] | no_license | import streamlit as st
import pandas
import plotly.graph_objects as go
st.title("Ejemplo utilización streamlit")
st.markdown("Este ejemplo muestra cómo utilizar streamlit para mostrar datos de DataFrames Pandas en una aplicación web")
st.sidebar.title("Seleccionar gráficas")
st.sidebar.markdown("Selecciona el tipo de gráfica que quieras mostrar:")
df = pandas.read_csv("https://raw.githubusercontent.com/kirenz/datasets/master/gapminder.csv")
chart_visual = st.sidebar.selectbox('Seleccionar gráfica',
('Line Chart', 'Bar Chart', 'Pie Chart'))
if st.sidebar.checkbox("Mostrar datos", True):
st.dataframe(df)
fig = go.Figure()
if chart_visual == 'Line Chart':
df2 = df[df["country"] == 'Spain']
fig.add_trace(go.Scatter(x = df2["year"], y = df2["pop"],
mode = "lines"))
fig.update_layout(title='Evolución Población España',
xaxis_title='Año',
yaxis_title='Población (millones)')
st.plotly_chart(fig, use_container_width=True)
if chart_visual == 'Bar Chart':
year = st.slider('year',2007,1952,2007)
df2 = df[df["year"] == year]
fig.add_trace(go.Bar(x = df2.iloc[0:10].country, y = df2.iloc[0:10].lifeExp, text = df2.iloc[0:10].lifeExp,
textposition='auto'))
fig.update_layout(title='Expectativa de vida en años')
st.plotly_chart(fig, use_container_width=True)
if chart_visual == 'Pie Chart':
df2 = df[df["year"] == 2007].groupby("continent").sum()
fig.add_trace(go.Pie(labels=df2["pop"].keys(), values=df2["pop"]))
fig.update_layout(title='Porcentajes de población por continente')
st.plotly_chart(fig, use_container_width=True)
| true |
3c7cb827dd94764bdc05754c3219b1e6ef630ad3 | Python | dle519/CS1026-Assignment-1 | /dle46_Assign1.py | UTF-8 | 1,498 | 4.21875 | 4 | [] | no_license | ##
# This program simulates flipping a coin into a grid for a carnival game
from random import random
# Variable list
distance = float(input("Please enter the distance between the lines: ")) # Distance between the lines (in mm)
reward = int(input("Please enter the reward if the customer wins: ")) # How much the customer will win
radius = 14.0 # radius of the circle
customerPocket = ownerPocket = 0
# Constants
TOONIE = 2
ATTEMPTS = 1000
# Randomizing the location of the coin
for coinToss in range(ATTEMPTS):
r = random()
xCentre = -radius + (distance + radius) * r # will randomize the centre of the circle from -14 to 90
r = random()
yCentre = -radius + (distance + radius) * r
if (radius < xCentre < distance - radius) and (radius < yCentre < distance - radius): # If the coin does not touch the square grid
customerPocket += reward
else: # If the coin touches the square grid
ownerPocket += TOONIE
# Owner making a profit or losing money
if customerPocket == 0:
print("For 1000 toonie tosses, the owner will win all $2000!")
elif (ownerPocket / customerPocket) > 1:
print("For 1000 toonie tosses, the owner may gain of ${}".format(ownerPocket)) # How much the owner will gain if he has a profit
elif 0 < (ownerPocket / customerPocket) < 1:
print("For 1000 toonie tosses, the owner may lose ${}".format(abs(customerPocket))) # How much the owner will lose to the customer if there is no profit
| true |
66052a42e1091d441136a51fbd06039ce7991f01 | Python | Kent-EGUCHI/practice | /matplotlib_practice.py | UTF-8 | 115 | 3.046875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 1, 100)
y = x ** 2
plt.plot(x, y)
plt.show() | true |
7b81ee5a5c48d601f0eb4733b24ca7db6e853524 | Python | hdjsjyl/machine_learning_interview | /torchMnist.py | UTF-8 | 1,282 | 2.84375 | 3 | [] | no_license | # step1 load dataset
# step2 make dataset iterable
# step3 create model class
# step4 instantiate model class
# step5 instantiate loss function
# step6 instantiate optimizer
# step7 train model
# step8 test model
import torch as tc
import torchvision as tv
import torchvision.transforms as trans
import torch.utils.data as Data
import matplotlib.pyplot as plt
import time
import sys
mnist_train = tv.datasets.FashionMNIST(root='./data', train=True, download=False, transform=trans.ToTensor())
mnist_test = tv.datasets.FashionMNIST(root='./data', train=False, download=False, transform=trans.ToTensor())
def get_fashionMnist_labels(labels):
text_labels = {}
text_labels[0] = 't-shirt'
text_labels[1] = 'trouser'
text_labels[2] = 'pullover'
text_labels[3] = 'dress'
text_labels[4] = 'coat'
text_labels[5] = 'sandal'
text_labels[6] = 'shirt'
text_labels[7] = 'sneaker'
text_labels[8] = 'bag'
text_labels[9] = 'ankle boot'
res = [text_labels[i] for i in labels]
return res
trainIter = Data.DataLoader(mnist_train, batch_size=256, shuffle=True, num_workers=4)
testIter = Data.DataLoader(mnist_test, batch_size=256, shuffle=False, num_workers=4)
start = time.time()
for x, y in trainIter:
continue
print(time.time() - start)
| true |
13e12ce117edcfb2d64c48e727546049b1b1b0cb | Python | LuisBernabe/RoboticaSalonNav | /src/mesa_marker/src/mesa.py | UTF-8 | 2,334 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
#9 cuadritos de ancho
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
"""
Clase que crea un publicador Marker simulando una mesa
author: Berna
"""
class Mesa(object):
"""
Constructor que tiene 3 parametros:
index: Funciona como identificador de la mesa
x_val: coordenada x en el plano donde se encontrara
y_val: coordenada y en el plano donde se encontrara
z_val: coordenada z en el plano donde se encontrara
Los argumentos los toma del archivo launch
"""
def __init__(self,index,x_val,y_val,z_val):
self.marker_publicador=rospy.Publisher("/marker_mesa_"+str(index),Marker,queue_size=1)
self.rate=rospy.Rate(1)
self.init_marker(index,x_val,y_val,z_val)
def init_marker(self,index,x_val,y_val,z_val):
self.marker_obj=Marker()
self.marker_obj.header.frame_id="/odom"
self.marker_obj.header.stamp=rospy.get_rostime()
self.marker_obj.ns="mesa"
self.marker_obj.id=index
self.marker_obj.type=Marker.CUBE
self.marker_obj.action=Marker.ADD
my_point=Point()
my_point.x=x_val
my_point.y=y_val
my_point.z=z_val
self.marker_obj.pose.position=my_point
self.marker_obj.pose.orientation.x=0
self.marker_obj.pose.orientation.y=0
self.marker_obj.pose.orientation.z=0
self.marker_obj.pose.orientation.w=0
self.marker_obj.scale.x=9.0
self.marker_obj.scale.y=2.0
self.marker_obj.scale.z=2.0
self.marker_obj.color.r=0.5
self.marker_obj.color.g=0.5
self.marker_obj.color.b=0.5
self.marker_obj.color.a=1.0
self.marker_obj.lifetime=rospy.Duration(0)
def start(self):
while not rospy.is_shutdown():
self.marker_publicador.publish(self.marker_obj)
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node("mesa_marker_node",anonymous=True)
idx=rospy.get_param('~num_mesa')
x=rospy.get_param('~x')
y=rospy.get_param('~y')
z=rospy.get_param('~z')
markerBasic_obj=Mesa(idx,x,y,z) #(idx,x,y,z)
#markerBasic_obj=Mesa(0,0,0,0) #(idx,x,y,z)
try:
markerBasic_obj.start()
except rospy.RosInterruptException:
pass
| true |
f6f442a3b599e542a87db29e731395ec09d96ca6 | Python | sukraBhandari/pythonCode | /BST.py | UTF-8 | 2,147 | 3.59375 | 4 | [] | no_license | #binary search tree
class Node(object):
def __init__(self, data):
self.left = None
self.right = None
self.data = data
class Tree(object):
def __init__(self):
self.root = Node(data)
def addChild(self,data):
if self.root is None:
self.root = Node(data)
else:
current = self.root
lastNode = False
while not lastNode:
parent = current
if data < current.data:
current = current.left
if current is None:
parent.left = Node(data)
lastNode = True
else:
current = current.right
if current is None:
parent.right = Node(data)
lastNode = True
def printInorder(self, node):
if node is not None:
self.printInorder(node.left)
print node.data
self.printInorder(node.right)
def printPreorder(self, node):
if node is not None:
print node.data
self.printPreorder(node.left)
self.printPreorder(node.right)
def printPostorder(self, node):
if node is not None:
self.printPostorder(node.left)
self.printPostoder(node.right)
print node.data
def printBreathFirst(self):
node = [self.root]
while node:
current = node.pop(0)
print current.data
if current.left is not None:
node.append(current.left)
if current.right is not None:
node.append(current.right)
def treeDepth(self, node, depth=0):
if node is None:
return depth
return max(self.treeDepth(node.left, depth+1), self.treeDepth(node.right, depth+1)
def printLeaf(self, node):
if node is None:
return 0
if node.left is None and node.right is None:
print node.data
return 1
else:
return self.printLeaf(node.left) + self.printLeaf(node.right)
def findData(self, node, find):
if node is None:
return False
if node.data == find:
return True
elif find < node.data:
return self.findData(node.left, find)
else:
return self.findData(node.right, find)
| true |
d1a103133c46db7edccbc2b07822561b959faa15 | Python | KAA-sign/stepik | /python_1/multi_table.py | UTF-8 | 219 | 3.578125 | 4 | [] | no_license | a = 7
b = 10
c = 5
d = 6
for j in range(c, d+1):
print('\t', j, end='')
print()
for i in range(a, b+1):
print(i, '\t', end='')
for j in range(c, d + 1):
print(i * j, '\t', end='')
print()
print() | true |
3d528192e69abe50aab8714065c3947653c3e546 | Python | Echocage/Data-Management | /GraphUser.py | UTF-8 | 984 | 2.84375 | 3 | [] | no_license | import sqlite3
from pylab import *
con = sqlite3.connect('C:/data/FacebookFriendsData.db')
c = con.cursor()
times = [0] * 24
user = input("Enter user's name: "),
#Load timestamps into memory
c.execute("SELECT timestamp FROM TimestampIds")
timestamps = c.fetchall()
#Get Users's ID
c.execute('SELECT id FROM Userids WHERE user = ?', user)
userId = (c.fetchone()[0] - 1,)
#Query database with userId getting timestamp indexes for user's
c.execute('SELECT timestampid FROM datatable WHERE userId = ?', userId)
timestampIds = c.fetchall()
#CrossRefrence timestamps in memory vs timestamp's ids which are the in memory timestamp's indexs (+1)
list = [timestamps[x[0] - 1][0] for x in timestampIds]
for row in list:
nTime = datetime.datetime.fromtimestamp(row).hour
times[int(nTime)] += 1
plt.bar(range(times.__len__()), times)
plt.xlabel("Hours since midnight")
plt.ylabel("Number of times online recorded")
plt.xlim((0, 24))
plt.title(user[0] + "'s Facebook useage")
plt.show()
| true |
07439891b648f77ad585fb17f702395f074ac8a3 | Python | daniel-reich/ubiquitous-fiesta | /Lx9mL2uBWwtJFv94a_23.py | UTF-8 | 457 | 3.203125 | 3 | [] | no_license |
def checker_board(n, el1, el2):
if el1 == el2:
return 'invalid'
output = []
lsteven = []
while len(lsteven) < n:
lsteven.append(el1)
if len(lsteven) == n:
break
lsteven.append(el2)
lstodd = []
while len(lstodd) < n:
lstodd.append(el2)
if len(lstodd) == n:
break
lstodd.append(el1)
for i in range(n):
if i% 2 == 0:
output.append(lsteven)
else:
output.append(lstodd)
return output
| true |