blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7b30e4ac647403c15967b502d1a8b5cc4fb217e7 | Python | MaclaineSabino/ADS_IFPI-Exercicios | /Exercicios_Python/Ex04/Ex04Q14.py | UTF-8 | 172 | 3.296875 | 3 | [] | no_license | from random import randint
lista=[]
maior=0
for i in range(0,100):
lista.append(randint(1,100000))
for i in lista:
if(i>maior):
maior=i
print(maior)
| true |
76a88d5b380969c84050251c9c13ccd4c4537bd3 | Python | maihan040/Python_Random_Scripts | /binaryExpression.py | UTF-8 | 1,158 | 4.53125 | 5 | [] | no_license | # binaryExpression.py
#
# purpose: to evaluate an arithmetic expression as given by a binary tree
#
# Example:
# *
#
# / \
#
# + +
#
# / \ / \
#
# 3 2 4 5
#
#
# equals: [(3 + 2) * (4 + 5)]
#class definition
class bstNode:
def __init__(self, d):
self.left = None
self.d = d
self.right = None
#function definition
def evalExp(node):
#base cases
if(node == None):
return 0
#return operand
if(node.left == None and node.right == None):
return node.d
#compute the left side of the tree
left = evalExp(node.left)
#compute the right side of the tree
right = evalExp(node.right)
#determine the operator
if(node.d == "+"):
return left + right
if(node.d == "-"):
return left - right
if(node.d == "*"):
return left * right
if(node.d == "/"):
return left / right
#main
node = bstNode('*')
#left side
node.left = bstNode('+')
node.left.left = bstNode(3)
node.left.right = bstNode(2)
#right side
node.right = bstNode('+')
node.right.right = bstNode(5)
node.right.left = bstNode(4)
print("The expression = " + str(evalExp(node)))
| true |
96b020b5a1b53481bd6caad9ce2569a497918241 | Python | pranavjoy/pythonForPenTesting | /Day3/classwork/server.py | UTF-8 | 1,160 | 2.9375 | 3 | [] | no_license | import os
import socket
def download(conn, command):
conn.send(command.encode())
grab, path = command.split("*")
f = open('/root/Desktop/' + path, 'wb')
while True:
bits = conn.recv(1024)
if bits.endswith('DONE'.encode()):
f.write(bits[:-4]) # Write those last received bits without the word 'DONE'
f.close()
print('[+] Transfer completed ')
break
if 'File not found'.encode() in bits:
print('[-] Unable to find out the file')
break
f.write(bits)
def connecting():
s = socket.socket()
s.bind(("0.0.0.0", 9001))
s.listen(1)
print('[+] Listening for income TCP connection on port 9001')
conn, addr = s.accept()
print('[+]We got a connection from', addr)
while True:
command = input("Shell> ")
if 'terminate' in command:
conn.send('terminate'.encode())
break
elif 'download' in command:
download(conn, command)
else:
conn.send(command.encode())
print(conn.recv(1024).decode())
def main():
connecting()
main()
| true |
abf9915deeb1868161b46f77bb9ec4496a31652d | Python | FranckNdame/leetcode | /problems/448. Find All Numbers Disappeared in an Array/solution.py | UTF-8 | 345 | 3.140625 | 3 | [] | no_license | class Solution:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
result = []
for i in range(len(nums)):
index = abs(nums[i]) - 1
nums[index] = -1 * abs(nums[index])
for j in range(len(nums)):
if nums[j] > 0:
result.append(j+1)
return result
| true |
1fe54ba660f90a092047dc270dd9b6b62151583a | Python | keioni/ink_mock1 | /ink/sys/config.py | UTF-8 | 3,500 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''INK system configuration module.
This module is used to customizing INK system settings.
When you want to access any settings, you must use
the instance -- already created when imported timing --
of this class 'CONF' on this module.
For example:
from ink.sys.config import CONF
CONF.load(path_to_setting_file)
some_instance.do_something(CONF.toplevel.secondlevel)
'''
import os
import json
from attrdict import AttrDict
class Configure:
'''INK system configuration manager.
How to use this class, see module docstring.
'''
def __init__(self, conf_dict: dict = None):
self.__conf = {}
self.__files = []
if conf_dict:
self.__conf = conf_dict
else:
conf_dir = __file__ + '../../..'
conf_file = os.path.abspath(conf_dir + '/var/settings.json')
if os.path.exists(conf_file):
with open(conf_file, 'r') as f:
self.__conf = json.load(f)
self.__files.append(conf_file)
def __getattr__(self, name):
if not self.__conf:
msg = 'Setting file does not loaded.'
raise AttributeError(msg)
values = self.__conf['configurations'].get(name)
if values:
return AttrDict(values)
msg = 'No configuration values of name: {}'.format(name)
raise AttributeError(msg)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.__conf)
# def __str__(self):
# return json.dumps(self.__conf, indent=4)
def load(self, conf_file: str, force_load: bool = False):
'''load json format setting file.
Arguments:
* conf_file {str} -- file name of the setting file.
* force_load {bool} -- In default, if setting file was
already loaded, raise exception. If you need load
twice or more and override loaded settings, change
True. (default: {False})
Return value:
Return {True} when settings is loaded successfully.
This method raise ValueError exception instead of
returning {False}. So use try-except.
'''
if self.__conf:
if not force_load:
msg = 'Always loaded.'
raise ValueError(msg)
with open(conf_file, 'r') as f:
self.__conf = json.load(f)
if not self.__conf:
msg = 'Cannot load system settings from the file.'
raise ValueError(msg)
if self.__conf.get('version') != '1.0':
msg = 'Version number does not exist or not match this system.'
raise ValueError(msg)
if not self.__conf.get('configurations'):
msg = "Setting file's format is invalid."
raise ValueError(msg)
return True
def is_loaded(self) -> bool:
return bool(self.__conf)
def clear(self):
self.__conf.clear()
CONF = Configure()
# conf = AttrDict()
# conf_file = os.environ.get('INK_CONF_FILE')
# if not conf_file:
# conf_file = './var/settings.json'
# with open(conf_file, 'r') as f:
# raw_conf = json.load(f)
# if not raw_conf:
# msg = 'Cannot load system settings from the file.'
# raise ValueError(msg)
# if raw_conf.get('version') != '1.0':
# msg = 'Version number does not exist or not match this system.'
# raise ValueError(msg)
# conf = AttrDict(raw_conf.get('configurations'))
| true |
1275c06e57ac22e5d426996de7895268fa188a97 | Python | rafaelwitter/UFSC | /POO/Aula_5.0.py | UTF-8 | 1,901 | 4.375 | 4 | [] | no_license | ####################
# Estudando funções#
####################
####################
#Entendendo funções#
####################
def soma(x,y):
'''
Insira um numero x e y, para que seja feita a soma dos mesmos
'''
return(x+y)
def multi(z,w):
'''
Recebe dois numeros inteiros e multiplica-os
'''
return z * w
print(soma(4,5))
print(multi(5,5))
def fatorial(a):
'''
Realiza o fatorial de um numero inteiro
'''
fat = 1
while a >= 1:
fat *= a
a -= 1
return fat
print(fatorial(5))
###################################################################################################
#Escreva uma função que recebe dois numeros inteiros, e faça o coeficiente binomial deles usando a#
# função fatorial ja escrita #
###################################################################################################
def coef_binomial(m, n):
'''
Recebe dois numeros inteiros m>=0 e n>= 0 e m>n
Devolve o coeficiente binomail de m e n
'''
return fatorial(m)//(fatorial(m-n)) * fatorial(n)
print(coef_binomial(5,3))
#############################################################################################
#Escreva uma funçao que lê um numero inteiro k>0 e imprime com k linhas do triang. de Pascal#
#############################################################################################
def triangulo_pascal(k):
i = 0
while i < k:
#imprima linha i do triangulo de pascal
m = i
n = 0
while n <= m:
print(coef_binomial(m,n), end = " ")
n+=1
print()
#---------------------------------------
i += 1
k=int(input("Entre com o numero de linhas do triangulo de pascal: "))
triangulo_pascal(k) | true |
2c84132be5ee9dcd181e76ea57ec202d8669b12f | Python | Ron-Chang/MyNotebook | /Coding/Python/Ron/Trials_and_Materials/(*)num_fun.py | UTF-8 | 900 | 3.734375 | 4 | [] | no_license | """
Test.describe('Basic Tests')
Test.assert_equals(seven(times(five())), 35)
Test.assert_equals(four(plus(nine())), 13)
Test.assert_equals(eight(minus(three())), 5)
Test.assert_equals(six(divided_by(two())), 3)
seven(times(five())); // must return 35
four(plus(nine())); // must return 13
eight(minus(three())); // must return 5
six(dividedBy(two())); // must return 3
Ruby:
seven(times(five)) # must return 35
four(plus(nine)) # must return 13
eight(minus(three)) # must return 5
six(divided_by(two)) # must return 3
"""
import operator
def zero():
return 0
def one():
def two():
def three():
def four():
def five():
def six():
def seven():
return 7
def eight():
def nine():
def plus():
def minus(x):
return operator.sub(, x)
def times():
def divided_by():
print(seven( times( five() ) ))
print("\n".join(method for method in operator.__dir__() if "__" not in method))
| true |
e03ba7e0b90db0ff62dfb96574bc25d199d885eb | Python | cpappas18/Health-Records-System | /tests/test_health_records_system.py | UTF-8 | 5,698 | 2.65625 | 3 | [] | no_license | import mock
import builtins
from unittest import TestCase
from src.health_records_system import *
class TestHealthRecordsSystem(TestCase):
def setUp(self):
self.system = HealthRecordsSystem()
def tearDown(self):
HealthRecordsSystem._reset()
def test_get_instance(self):
instance = HealthRecordsSystem.get_instance()
self.assertEqual(instance, self.system)
def test_get_patient_for_valid_id(self):
patient = Patient(1, "Jane", 20, 123)
self.system.add_patient(patient)
self.assertEqual(self.system.get_patient(1), patient)
def test_get_patient_for_invalid_id(self):
self.assertEqual(self.system.get_patient(1), None)
def test_add_patient_already_exists_overwrite(self):
patient1 = Patient(1, "Jane", 20, 123)
self.system.add_patient(patient1)
patient2 = Patient(1, "John", 20, 123)
with mock.patch.object(builtins, 'input', lambda _: 'Y'):
self.system.add_patient(patient2)
self.assertEqual(self.system._patients, {1: patient2})
def test_add_patient_already_exists_no_overwrite(self):
patient1 = Patient(1, "Jane", 20, 123)
self.system.add_patient(patient1)
patient2 = Patient(1, "John", 20, 123)
with mock.patch.object(builtins, 'input', lambda _: 'N'):
self.system.add_patient(patient2)
self.assertEqual(self.system._patients, {1: patient1})
def test_add_patient_new(self):
patient = Patient(1, "Jane", 20, 123)
self.system.add_patient(patient)
self.assertEqual(self.system._patients, {1: patient})
def test_remove_patient_for_valid_id(self):
patient = Patient(1, "Jane", 20, 123)
self.system.add_patient(patient)
self.system.remove_patient(1)
self.assertEqual(self.system._patients, {})
def test_remove_patient_for_invalid_id(self):
result = self.system.remove_patient(1)
self.assertEqual(result, None)
class TestPatient(TestCase):
def setUp(self):
self.patient = Patient(1, "Jane", 20, 123)
def test_get_medication_for_valid_name(self):
med = Medication("Advil", "1 tablet", "once a day")
self.patient.add_medication(med)
self.assertEqual(self.patient.get_medication("Advil"), med)
def test_get_medication_for_invalid_name(self):
self.assertEqual(self.patient.get_medication("Advil"), None)
def test_add_medication_already_exists_overwrite(self):
med1 = Medication("Advil", "1 tablet", "once a day")
self.patient.add_medication(med1)
med2 = Medication("Advil", "1 tablet", "twice a day")
with mock.patch.object(builtins, 'input', lambda _: 'Y'):
self.patient.add_medication(med2)
self.assertEqual(self.patient.medication, {"Advil": med2})
def test_add_medication_already_exists_no_overwrite(self):
med1 = Medication("Advil", "1 tablet", "once a day")
self.patient.add_medication(med1)
med2 = Medication("Advil", "1 tablet", "twice a day")
with mock.patch.object(builtins, 'input', lambda _: 'N'):
self.patient.add_medication(med2)
self.assertEqual(self.patient.medication, {"Advil": med1})
def test_add_medication_new(self):
med = Medication("Advil", "1 tablet", "once a day")
self.patient.add_medication(med)
self.assertEqual(self.patient.medication, {"Advil": med})
def test_remove_medication_for_valid_name(self):
med = Medication("Advil", "1 tablet", "once a day")
self.patient.add_medication(med)
self.patient.remove_medication("Advil")
self.assertEqual(self.patient.medication, {})
def test_remove_medication_for_invalid_name(self):
result = self.patient.remove_medication("Advil")
self.assertEqual(result, None)
def test_clear_medication(self):
med = Medication("Advil", "1 tablet", "once a day")
self.patient.add_medication(med)
self.patient.clear_medication()
self.assertEqual(self.patient.medication, {})
def test_get_test_results_for_valid_name_date(self):
self.patient.add_test_results("COVID", "June 26, 2021", "Negative")
self.assertEqual(self.patient.get_test_results("COVID", "June 26, 2021"), "Negative")
def test_get_test_results_for_invalid_name_date(self):
self.assertEqual(self.patient.get_test_results("COVID", "June 26, 2021"), None)
def test_add_test_results_already_exists_overwrite(self):
self.patient.add_test_results("COVID", "June 26, 2021", "Negative")
with mock.patch.object(builtins, 'input', lambda _: 'Y'):
self.patient.add_test_results("COVID", "June 26, 2021", "Positive")
self.assertEqual(self.patient.test_results, {("COVID", "June 26, 2021"): "Positive"})
def test_add_test_results_already_exists_no_overwrite(self):
self.patient.add_test_results("COVID", "June 26, 2021", "Negative")
with mock.patch.object(builtins, 'input', lambda _: 'N'):
self.patient.add_test_results("COVID", "June 26, 2021", "Positive")
self.assertEqual(self.patient.test_results, {("COVID", "June 26, 2021"): "Negative"})
def test_add_test_results_new(self):
self.patient.add_test_results("COVID", "June 26, 2021", "Negative")
self.assertEqual(self.patient.test_results, {("COVID", "June 26, 2021"): "Negative"})
def test_clear_test_results(self):
self.patient.add_test_results("COVID", "June 26, 2021", "Negative")
self.patient.clear_test_results()
self.assertEqual(self.patient.test_results, {})
| true |
c419ce5e2e91ee13d398af2e5c64e348ed213ced | Python | sanoyo/analysys | /substract.py | UTF-8 | 1,623 | 3.21875 | 3 | [] | no_license | # https://algorithm.joho.info/programming/python/opencv-background-subtraction-py/
# -*- coding: utf-8 -*-
import cv2
import numpy as np
def main():
i = 0 # カウント変数
th = 50 # 差分画像の閾値
cap = cv2.VideoCapture("sample.MOV")
# 最初のフレームを背景画像に設定
# bg = cv2.imread('test.png')
# bg = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY)
while(cap.isOpened()):
ret,frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 切り取った画像と同じ高さ、幅を指定
gray = gray[300:800,400:1100]
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)
# ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
num_b = np.count_nonzero(thresh)
num_w = (thresh.size) - num_b
print(num_w)
# 差分の絶対値を計算
# 現在のフレーム ー 背景
# mask = cv2.absdiff(gray, bg)
# 差分画像を二値化してマスク画像を算出
# mask[mask < th] = 0
# mask[mask >= th] = 255
cv2.imshow("Th", thresh)
# i += 1 # カウントを1増やす
# 背景画像の更新(一定間隔)
# if(i > 3000):
# ret, bg = cap.read()
# bg = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY)
# i = 0 # カウント変数の初期化
# qキーが押されたら途中終了
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| true |
2c39a33db65c6a3580c6f79c6d463a87b919371f | Python | SirGuiL/Python | /Mundo 2/Python_Exercicios/ex056.py | UTF-8 | 868 | 3.828125 | 4 | [] | no_license | nomes = []
idades = []
sexo = []
soma = 0
maisvelho = 0
menos20 = 0
for c in range(0, 4):
nomes += [input('Digite o nome da {}ª pessoa: '.format(c + 1))]
idades += [int(input('Digite a idade da {}ª pessoa: '.format(c + 1)))]
sexo += [input('Digite o sexo da {}ª pessoa: '.format(c + 1))]
print('')
for c in range(0, 4):
soma += idades[c]
for c in range(0, 4):
if sexo[c].lower() == 'masculino':
if c > 0:
if idades[c] > idades[c - 1]:
maisvelho = c
elif c == 0:
maisvelho = c
if sexo[c].lower() == 'feminino':
if idades[c] < 20:
menos20 += 1
print('A média de idade do grupo é: {}'.format(soma / len(idades)))
print('O nome do homem mais velho é: {}'.format(nomes[maisvelho]))
print('Quantidade de mulheres com menos de 20 anos: {}'.format(menos20)) | true |
95c5a3e5fb6db7afa32a0a7d09b75708491b29cf | Python | JoelBender/bacpypes | /tests/test_constructed_data/test_array_of.py | UTF-8 | 9,505 | 3 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Array
----------
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.primitivedata import TagList, Integer, Time
from bacpypes.constructeddata import ArrayOf
from bacpypes.basetypes import TimeStamp
from .helpers import SimpleSequence
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# array of integers
IntegerArray = ArrayOf(Integer)
@bacpypes_debugging
class TestIntegerArray(unittest.TestCase):
def test_empty_array(self):
if _debug: TestIntegerArray._debug("test_empty_array")
# create an empty array
ary = IntegerArray()
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# array sematics
assert len(ary) == 0
assert ary[0] == 0
# encode it in a tag list
tag_list = TagList()
ary.encode(tag_list)
if _debug: TestIntegerArray._debug(" - tag_list: %r", tag_list)
# create another sequence and decode the tag list
ary = IntegerArray()
ary.decode(tag_list)
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
def test_append(self):
if _debug: TestIntegerArray._debug("test_append")
# create an empty array
ary = IntegerArray()
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# append an integer
ary.append(2)
assert len(ary) == 1
assert ary[0] == 1
assert ary[1] == 2
def test_delete_item(self):
if _debug: TestIntegerArray._debug("test_delete_item")
# create an array
ary = IntegerArray([1, 2, 3])
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# delete something
del ary[2]
assert len(ary) == 2
assert ary[0] == 2
assert ary.value[1:] == [1, 3]
def test_index_item(self):
if _debug: TestIntegerArray._debug("test_index_item")
# create an array
ary = IntegerArray([1, 2, 3])
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# find something
assert ary.index(3) == 3
# not find something
with self.assertRaises(ValueError):
ary.index(4)
def test_remove_item(self):
if _debug: TestIntegerArray._debug("test_remove_item")
# create an array
ary = IntegerArray([1, 2, 3])
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# remove something
ary.remove(2)
assert ary.value[1:] == [1, 3]
# not remove something
with self.assertRaises(ValueError):
ary.remove(4)
def test_resize(self):
if _debug: TestIntegerArray._debug("test_resize")
# create an array
ary = IntegerArray([1, 2, 3])
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# make it shorter
ary[0] = 2
assert ary.value[1:] == [1, 2]
# make it longer
ary[0] = 4
assert ary.value[1:] == [1, 2, 0, 0]
def test_get_item(self):
if _debug: TestIntegerArray._debug("test_get_item")
# create an array
ary = IntegerArray([1, 2, 3])
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# BACnet semantics
assert ary[1] == 1
def test_set_item(self):
if _debug: TestIntegerArray._debug("test_set_item")
# create an array
ary = IntegerArray([1, 2, 3])
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# BACnet semantics, no type checking
ary[1] = 10
assert ary[1] == 10
def test_codec(self):
if _debug: TestIntegerArray._debug("test_codec")
# test array contents
ary_value = [1, 2, 3]
# create an array
ary = IntegerArray(ary_value)
if _debug: TestIntegerArray._debug(" - ary: %r", ary)
# encode it in a tag list
tag_list = TagList()
ary.encode(tag_list)
if _debug: TestIntegerArray._debug(" - tag_list: %r", tag_list)
# create another sequence and decode the tag list
ary = IntegerArray()
ary.decode(tag_list)
if _debug: TestIntegerArray._debug(" - ary %r", ary)
# value matches
assert ary.value[1:] == ary_value
# fixed length array of integers
IntegerArray5 = ArrayOf(Integer, fixed_length=5)
@bacpypes_debugging
class TestIntegerArray5(unittest.TestCase):
def test_empty_array(self):
if _debug: TestIntegerArray5._debug("test_empty_array")
# create an empty array
ary = IntegerArray5()
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# array sematics
assert len(ary) == 5
assert ary[0] == 5
# value correct
assert ary.value[1:] == [0, 0, 0, 0, 0]
def test_append(self):
if _debug: TestIntegerArray5._debug("test_append")
# create an empty array
ary = IntegerArray5()
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# append an integer
with self.assertRaises(TypeError):
ary.append(2)
def test_delete_item(self):
if _debug: TestIntegerArray5._debug("test_delete_item")
# create an array
ary = IntegerArray5([1, 2, 3, 4, 5])
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# delete something
with self.assertRaises(TypeError):
del ary[2]
def test_index_item(self):
if _debug: TestIntegerArray5._debug("test_index_item")
# create an array
ary = IntegerArray5([1, 2, 3, 4, 5])
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# find something
assert ary.index(3) == 3
# not find something
with self.assertRaises(ValueError):
ary.index(100)
def test_remove_item(self):
if _debug: TestIntegerArray5._debug("test_remove_item")
# create an array
ary = IntegerArray5([1, 2, 3, 4, 5])
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# remove something
with self.assertRaises(TypeError):
ary.remove(4)
def test_resize(self):
if _debug: TestIntegerArray5._debug("test_resize")
# create an array
ary = IntegerArray5([1, 2, 3, 4, 5])
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# make it the same length (noop)
ary[0] = 5
# changing it to something else fails
with self.assertRaises(TypeError):
ary[0] = 4
def test_get_item(self):
if _debug: TestIntegerArray5._debug("test_get_item")
# create an array
ary = IntegerArray5([1, 2, 3, 4, 5])
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# BACnet semantics
assert ary[1] == 1
def test_set_item(self):
if _debug: TestIntegerArray5._debug("test_set_item")
# create an array
ary = IntegerArray5([1, 2, 3, 4, 5])
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# BACnet semantics, no type checking
ary[1] = 10
assert ary[1] == 10
def test_codec(self):
if _debug: TestIntegerArray5._debug("test_codec")
# test array contents
ary_value = [1, 2, 3, 4, 5]
# create an array
ary = IntegerArray5(ary_value)
if _debug: TestIntegerArray5._debug(" - ary: %r", ary)
# encode it in a tag list
tag_list = TagList()
ary.encode(tag_list)
if _debug: TestIntegerArray5._debug(" - tag_list: %r", tag_list)
# create another sequence and decode the tag list
ary = IntegerArray()
ary.decode(tag_list)
if _debug: TestIntegerArray5._debug(" - ary %r", ary)
# value matches
assert ary.value[1:] == ary_value
# array of a sequence
SimpleSequenceArray = ArrayOf(SimpleSequence)
@bacpypes_debugging
class TestSimpleSequenceArray(unittest.TestCase):
def test_codec(self):
if _debug: TestSimpleSequenceArray._debug("test_codec")
# test array contents
ary_value = [
SimpleSequence(hydrogen=True),
SimpleSequence(hydrogen=False),
SimpleSequence(hydrogen=True),
]
# create an array
ary = SimpleSequenceArray(ary_value)
if _debug: TestSimpleSequenceArray._debug(" - ary: %r", ary)
# encode it in a tag list
tag_list = TagList()
ary.encode(tag_list)
if _debug: TestSimpleSequenceArray._debug(" - tag_list: %r", tag_list)
# create another sequence and decode the tag list
ary = SimpleSequenceArray()
ary.decode(tag_list)
if _debug: TestSimpleSequenceArray._debug(" - ary %r", ary)
# value matches
assert ary.value[1:] == ary_value
# fixed length array of TimeStamps
ArrayOfTimeStamp = ArrayOf(TimeStamp, fixed_length=16,
prototype=TimeStamp(time=Time().value),
)
@bacpypes_debugging
class TestArrayOfTimeStamp(unittest.TestCase):
def test_empty_array(self):
if _debug: TestArrayOfTimeStamp._debug("test_empty_array")
# create an empty array
ary = ArrayOfTimeStamp()
if _debug: TestArrayOfTimeStamp._debug(" - ary: %r", ary)
# array sematics
assert len(ary) == 16
assert ary[0] == 16
| true |
08527c0199ce36b46e9d54b4cd123eb4c7fadb48 | Python | IsThatYou/Competitive-Programming | /ACM/2018Fall/Homer_Simpson.py | UTF-8 | 642 | 2.734375 | 3 | [] | no_license | #https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&category=655&problem=1406
from sys import stdin
for line in stdin:
m,n,t = [int(x) for x in line.split()]
if m >n:
temp = n
n =m
m = temp
if t%m == 0:
print(int(t/m))
else:
residual = t%m
ans = t//m
ns = n
sol = ns % m
counter = 2
maxn = t//n
solved = False
while sol!=residual:
sol = (ns * counter) % m
if counter >= maxn:
print( ans,residual)
solved = True
break
counter += 1
if not solved:
counter -= 1
ans = ans - ((ns*counter)//m) + counter
print(int(ans))
| true |
57aad26199deccdd4beeeb9a946cfb29188021f0 | Python | Camiko0/Arbol_PosOrden-Aritmetica | /inicio.py | UTF-8 | 1,266 | 3.171875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from pila import *
from arbol_expresiones import *
class Inicio:
""" INSTANCIAS """
def __init__(self):
self.arbol = Arbol()
self.pila = Pila()
""" AGREGAR ELEMENTOS A LA COLA """
def abrir_archivo(self):
#Abrir .txt con expresiones aritmeticas
expresiones = open("expresiones.txt")
linea = [" "]
impresion = ''
while linea != '':
#Leer linea a linea del .txt
linea = expresiones.readline().split(' ')
if (linea == ['']):
expresiones.close()
break
#Se envia una a una cada expresión del archivo
self.arbol.convertir(linea[:-1], self.pila)
#Resultado para el archivo
impresion += "La respuesta para ["+' '.join(map(str, linea[:-1])).strip('[]')+"] es: "+str(self.arbol.evaluar(self.pila.desapilar()))+'\n'
return impresion
""" AGREGAR EL RESULTADO AL ARCHIVO """
def escribir_archivo(self,resultado):
busquedas = open("resultados.txt", "w")
busquedas.write(resultado)
busquedas.close()
inicio = Inicio()
salida = inicio.abrir_archivo()
inicio.escribir_archivo(salida)
| true |
826c6ea72df53638f549a415ca5fb51361fbe4bc | Python | pcw1993/stu_Machine_learning | /机器学习/14.聚类-means.py | UTF-8 | 1,640 | 3 | 3 | [] | no_license | # -*- coding:utf-8 -*-
# author: pcw
# datetime: 2018/12/7 10:47 AM
# software: PyCharm
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
# with open('./data/1.txt', 'r') as f:
# cont = f.read()
# print(cont)
def kmeans():
# 合并表
prior = pd.read_csv('./data/order_products__prior.csv')
products = pd.read_csv('./data/products.csv')
orders = pd.read_csv('./data/orders.csv')
aisles = pd.read_csv('./data/aisles.csv')
_mg = pd.merge(prior, products, on=['product_id', 'product_id'])
_mg = pd.merge(_mg, orders, on=['order_id', 'order_id'])
mt = pd.merge(_mg, aisles, on=['aisle_id', 'aisle_id'])
print(mt.head(10))
# 交叉表(特殊分组工具)
cross = pd.crosstab(mt['user_id'], mt['aisle'])
print(cross.head(10))
# 进行降维,主成本分析
pca = PCA(n_components=0.9)
data = pca.fit_transform(cross)
print(data)
print(data.shape) # 降维
# 聚类
# 减少样本数量
x = data[:500]
km = KMeans(n_clusters=4)
km.fit(x)
predict = km.predict(x)
print(predict)
# 显示聚类结果
plt.figure(figsize=(10,10))
# 建立四个颜色的列表
colored = ['orange', 'green', 'blue', 'purple']
colr = [colored[i] for i in predict]
plt.scatter(x[:,1], x[:,20], color=colr)
plt.xlabel('1')
plt.ylabel('20')
plt.show()
# 评判聚类效果,轮廓系数
score = silhouette_score(x, predict)
print(score)
if __name__ == '__main__':
kmeans()
| true |
dd4b2cdc3aadebcc48306170fbff22d5010069f3 | Python | facelessuser/Rummage | /rummage/lib/gui/dialogs/file_ext_dialog.py | UTF-8 | 2,800 | 2.578125 | 3 | [
"MIT"
] | permissive | """
File Ext Dialog.
Licensed under MIT
Copyright (c) 2013 - 2018 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import wx
from ..localization import _
from .. import gui
class FileExtDialog(gui.FileExtDialog):
"""File extension dialog."""
def __init__(self, parent, extensions):
"""Initialize dialog."""
super().__init__(parent)
self.extensions = extensions
self.localize()
self.refresh_localization()
self.m_ext_textbox.SetValue(self.extensions)
self.m_ext_panel.Layout()
self.m_ext_panel.Fit()
self.Fit()
if self.GetSize()[0] < 500:
self.SetSize(wx.Size(500, self.GetSize()[1]))
self.SetMinSize(wx.Size(500, self.GetSize()[1]))
self.SetMinSize(self.GetSize())
self.Centre()
def localize(self):
"""Translate strings."""
self.TITLE = _("File Extension")
self.EXTENSIONS = _("Extensions")
self.OKAY = _("Save")
self.CANCEL = _("Cancel")
def refresh_localization(self):
"""Localize dialog."""
self.SetTitle(self.TITLE)
self.m_ext_label.SetLabel(self.EXTENSIONS)
self.m_okay_button.SetLabel(self.OKAY)
self.m_cancel_button.SetLabel(self.CANCEL)
self.Fit()
def on_okay_click(self, event):
"""Handle on overwrite."""
value = self.m_ext_textbox.GetValue()
new_items = []
for item in value.split(','):
item = item.strip()
if item:
if not item.startswith('.'):
item = '.' + item
new_items.append(item)
self.extensions = ', '.join(new_items)
self.Close()
def on_cancel_click(self, event):
"""Handle on skip."""
self.Close()
| true |
945d578c6f688ca2cf48de07731e0c84f1686938 | Python | thankew/BTVN_Python | /pythonProject2/BTVN_Buoi17/eg.py | UTF-8 | 241 | 2.8125 | 3 | [] | no_license | en_dict = {
"Laptop": "Máy tính xách tay",
"Vietnamese": "Người Việt, tiếng Việt",
"Snake": "Con rắn",
"Happy": "Hạnh phúc",
"Sad": "Buồn bã"}
while True:
print(next(en_dict))
show_dict(en_dict) | true |
2f39b9d719ea09ba74b24719b46f9376603362a3 | Python | pobrien11/PyAniLib | /pyani/core/mngr/ui/core.py | UTF-8 | 36,645 | 2.59375 | 3 | [] | no_license | import os
import logging
import pyani.core.ui
import pyani.core.mngr.tools
import pyani.core.appvars
import collections
# set the environment variable to use a specific wrapper
# it can be set to pyqt, pyqt5, pyside or pyside2 (not implemented yet)
# you do not need to use QtPy to set this variable
os.environ['QT_API'] = 'pyqt'
# import from QtPy instead of doing it directly
# note that QtPy always uses PyQt5 API
from qtpy import QtWidgets, QtCore
from PyQt4.QtCore import pyqtSignal
logger = logging.getLogger()
class AniTaskList:
"""
The purpose of this class is to provide a list of tasks that can be run in order, sequentially, and let this
class manage running those. It uses signal/slots of pyqt to do this. A function runs, fires a signal when it
completes or errors, which causes this class to then respond and either report the error or run the next
function in the list. Provides a post task list option to run task(s) after the main tasks complete
Doesn't handle errors directly, connects to methods that get called when error occurs
Uses a generalized format for task list. See task_list_to_run under init() for format.
USAGE:
1. Create an instance, for ex:
self.task_list = [
# make tools cache
{
'func': self.tools_mngr.sync_local_cache_with_server,
'params': [],
'finish signal': self.tools_mngr.finished_cache_build_signal,
'error signal': self.tools_mngr.error_thread_signal,
'thread task': False,
'desc': "Created local tools cache."
}
]
2. Start the process using start_task_list()
"""
def __init__(
self,
task_list_to_run,
error_callback=None,
ui_callback=None,
post_tasks_to_run=None
):
"""
:param task_list_to_run: a list of dicts that hold task information. The format is:
{
'func': this is the function to call - do not put parenthesis, ie do
_create_asset_list_for_update_report, not _create_asset_list_for_update_report()
'params': any parameters, pass as a list
'finish signal': the pyqt signal to connect to for when a method finishes
'error signal': the pyqt signal to connect to for when a method errors
'thread task': True means put task in thread, False does not. Only thread non threaded methods. If
the method in 'func' creates threads, set this to False otherwise errors will occur.
'desc': string description describing what this method does. shown in activity log.
}
:param post_tasks_to_run: optional task(s) to call when main task(s) finish. a list of dicts in format:
{
'func': this is the function(s) to call, pass as a list
'params': any parameters, pass as a list
}
Note optionally you can later call the set_post_tasks method - useful if the post tasks depend on this windows
creation
:param error_callback: optional error callback/function for when errors occur
:param ui_callback: optional ui callback to update a ui
"""
# setup threading
self._thread_pool = QtCore.QThreadPool()
logger.info("Multi-threading with maximum %d threads" % self._thread_pool.maxThreadCount())
# this tells the next_step_in_task_list() method to not get any more tasks from the task list defined by
# the class variable task_list
self._stop_tasks = False
# function to call if error occurs
self._error_callback = error_callback
# function to call to update a ui
self._ui_callback = ui_callback
# method vars for setup and updating
self._task_list = task_list_to_run
self._method_to_run = None
self._method_params = None
self._method_finish_signal = None
self._method_error_signal = None
# tasks to run after the main task list runs
self._post_tasks = post_tasks_to_run
def set_error_method(self, func):
"""Set the error callback function when errors occur"""
self._error_callback = func
def set_post_tasks(self, post_tasks):
"""
Call this to set task(s) to run after the main task list finishes
:param post_tasks: a list of dicts in format:
{
'func': this is the function(s) to call, pass as a list
'params': any parameters, pass as a list
}
"""
self._post_tasks = post_tasks
def add_task(self, task):
self._task_list.append(task)
def stop_tasks(self):
"""Stops tasks from running"""
self._stop_tasks = True
def start_tasks(self):
"""Starts the task list by getting first task"""
self._get_next_task_to_run()
def is_task_remaining(self):
"""Returns true if tasks remain, False if no more tasks"""
if self._task_list:
return True
else:
return False
def next_step_in_task_list(self):
"""
Increments to the next step in the update or setup process task list, provided via the class variable
task_list. If no more tasks are left, shows the activity report and hides step and progress ui labels
"""
# check for more steps that need to be run
if self._task_list and not self._stop_tasks:
# add to activity log as success
self._get_next_task_to_run()
# no more steps
else:
# run the post task(s)
if self._post_tasks:
for task in self._post_tasks:
func = task['func']
params = task['params']
func(*params)
def _get_next_task_to_run(self):
"""
Gets a task from the task list and runs it in a thread
"""
if self._task_list:
# update the ui with the first step / task
if self._ui_callback:
self._ui_callback()
task_list_package = self._task_list.pop(0)
self._method_to_run = task_list_package['func']
self._method_params = task_list_package['params']
self._method_finish_signal = task_list_package['finish signal']
self._method_error_signal = task_list_package['error signal']
self._task_desc = task_list_package['desc']
# some tasks are already multi-threaded, so only thread tasks that have the 'thread task' key in task list
# set to True
if task_list_package['thread task']:
# thread task
worker = pyani.core.ui.Worker(
self._method_to_run,
False,
*self._method_params
)
self._thread_pool.start(worker)
# slot that is called when a thread finishes, passes the active_type so calling classes can
# know what was updated and the save cache method so that when cache gets updated it can be
# saved
worker.signals.finished.connect(self.next_step_in_task_list)
if self._error_callback:
worker.signals.error.connect(self._error_callback)
# already threaded, don't thread
else:
self._method_finish_signal.connect(self.next_step_in_task_list)
if self._error_callback:
self._method_error_signal.connect(self._error_callback)
self._method_to_run(*self._method_params)
class AniTaskListWindow(pyani.core.ui.AniQMainWindow):
"""
The purpose of this class is to provide a simple gui interface for running a list of tasks. Displays
progress, an app description, and the steps being run. Shows an activity log after running. Uses the AniTaskList
to handle running the tasks
Inherits from AniQMainWindow
USAGE:
1. Create an instance, for ex:
self.task_list = [
# make tools cache
{
'func': self.tools_mngr.sync_local_cache_with_server,
'params': [],
'finish signal': self.tools_mngr.finished_cache_build_signal,
'error signal': self.tools_mngr.error_thread_signal,
'thread task': False,
'desc': "Created local tools cache."
}
]
# create a ui (non-interactive) to run setup
AniTaskListWindow(
error_logging,
progress_list,
"Setup",
"Setup",
self.task_list
)
2. Start the process using start_task_list()
"""
def __init__(
self,
error_logging,
progress_list,
win_title,
metadata,
task_list_to_run,
app_description=None,
post_tasks_to_run=None,
asset_mngr=None,
tools_mngr=None):
"""
:param error_logging : error log (pyani.core.error_logging.ErrorLogging object) from trying
to create logging in main program
:param progress_list: a list of strings describing the steps being run
:param win_title: title of the window
:param metadata: metadata like app name, where it's located. See AniQMainWindow for metadata values
:param task_list_to_run: a list of dicts that hold task information. The format is:
{
'func': this is the function to call - do not put parenthesis, ie do _create_asset_list_for_update_report, not _create_asset_list_for_update_report()
'params': any parameters, pass as a list
'finish signal': the pyqt signal to connect to for when a method finishes
'error signal': the pyqt signal to connect to for when a method errors
'thread task': True means put task in thread, False does not. Only thread non threaded methods. If
the method in 'func' creates threads, set this to False otherwise errors will occur.
'desc': string description describing what this method does. shown in activity log.
}
:param app_description: optional text (can be html formmatted) to display for what this app does
:param post_tasks_to_run: optional task(s) to call when main task(s) finish. a list of dicts in format:
{
'func': this is the function(s) to call, pass as a list
'params': any parameters, pass as a list
}
Note optionally you can later call the set_post_tasks method - useful if the post tasks depend on this windows
creation
:param asset_mngr: a pyani.core.mngr.asset object
:param tool_mngr: a pyani.core.mngr.tool object
"""
tools_mngr_for_ani_win = pyani.core.mngr.tools.AniToolsMngr()
# pass win title, icon path, app manager, width and height
super(AniTaskListWindow, self).__init__(
win_title,
"images\\setup.ico",
metadata,
tools_mngr_for_ani_win,
450,
700,
error_logging,
show_help=False,
disable_version=False
)
if tools_mngr:
self.tools_mngr = tools_mngr
else:
self.tools_mngr = None
if asset_mngr:
self.asset_mngr = asset_mngr
else:
self.asset_mngr = None
# check if logging was setup correctly in main()
if error_logging.error_log_list:
errors = ', '.join(error_logging.error_log_list)
self.msg_win.show_warning_msg(
"Error Log Warning",
"Error logging could not be setup because {0}. You can continue, however "
"errors will not be logged.".format(errors)
)
# save the setup class for error logging to use later
self.error_logging = error_logging
# the description if provided to show in the window for what this app does
self.app_description = app_description
# the list of steps/tasks descriptions
self.progress_list = progress_list
# current step/task
self.step_num = 0
# total number of steps / tasks
if self.progress_list:
self.step_total = len(self.progress_list)
else:
self.step_total = 0
# logs what runs successfully and errors
self.activity_log = []
# shown at end as a description of what ran
self.task_desc = None
# indicates an error occurred
self.error_occurred = False
self.task_mngr = AniTaskList(
task_list_to_run,
error_callback=self.process_error,
post_tasks_to_run=post_tasks_to_run,
ui_callback=self.update_ui
)
# gui vars
self.progress_label = QtWidgets.QLabel("")
self.step_label = QtWidgets.QLabel("")
self.close_btn = QtWidgets.QPushButton("Close Window", self)
self.activity_report = QtWidgets.QTextEdit("")
self.activity_report.setFixedWidth(400)
self.activity_report.setFixedHeight(350)
# hide at start, shown when all tasks done
self.activity_report.hide()
self.create_layout()
self.set_slots()
def create_layout(self):
h_layout_btn = QtWidgets.QHBoxLayout()
h_layout_btn.addStretch(1)
h_layout_btn.addWidget(self.close_btn)
h_layout_btn.addItem(QtWidgets.QSpacerItem(10, 1))
self.main_layout.addLayout(h_layout_btn)
self.main_layout.addItem(QtWidgets.QSpacerItem(1, 25))
desc_label = QtWidgets.QLabel(self.app_description)
desc_label.setMaximumWidth(self.frameGeometry().width())
desc_label.setWordWrap(True)
self.main_layout.addWidget(desc_label)
self.main_layout.addItem(QtWidgets.QSpacerItem(1, 30))
h_layout_progress = QtWidgets.QHBoxLayout()
h_layout_progress.addStretch(1)
sub_layout_progress = QtWidgets.QVBoxLayout()
sub_layout_progress.addWidget(self.step_label)
sub_layout_progress.addItem(QtWidgets.QSpacerItem(1, 10))
sub_layout_progress.addWidget(self.progress_label)
sub_layout_progress.setAlignment(self.step_label, QtCore.Qt.AlignHCenter)
sub_layout_progress.setAlignment(self.progress_label, QtCore.Qt.AlignHCenter)
sub_layout_progress.addItem(QtWidgets.QSpacerItem(1, 20))
h_layout_progress.addLayout(sub_layout_progress)
h_layout_progress.addStretch(1)
self.main_layout.addLayout(h_layout_progress)
self.main_layout.addItem(QtWidgets.QSpacerItem(1, 20))
h_layout_report = QtWidgets.QHBoxLayout()
h_layout_report.addStretch(1)
h_layout_report.addWidget(self.activity_report)
h_layout_report.addStretch(1)
self.main_layout.addLayout(h_layout_report)
self.main_layout.addStretch(1)
self.add_layout_to_win()
def set_slots(self):
self.close_btn.clicked.connect(self.close_window)
def close_window(self):
"""
Prevent any more tasks from running and close window. Asks user before closing.
"""
if self.task_mngr.is_task_remaining():
if self.error_occurred:
response = self.msg_win.show_question_msg(
"Warning",
"Tasks are still running, however it seems a task has errors or stalled. "
"Close the window?"
)
else:
response = self.msg_win.show_question_msg(
"Warning",
"Tasks are still running. Are you sure you want to close the window?"
)
if response:
self.task_mngr.stop_tasks()
self.close()
else:
self.close()
def set_post_tasks(self, post_tasks):
"""
Call this to set task(s) to run after the main task list finishes
:param post_tasks: a list of dicts in format:
{
'func': this is the function(s) to call, pass as a list
'params': any parameters, pass as a list
}
"""
self.task_mngr.set_post_tasks(post_tasks)
def start_task_list(self):
"""
Starts running the first task/method in the task list provided via the class variable task_list
"""
# run the first task
self.task_mngr.start_tasks()
def update_ui(self):
"""
Updates the ui elements
"""
if self.progress_list:
self.step_num += 1
self.step_label.setText(
"<p align='center'>"
"<font style='font-size:10pt; font-family:{0}; color: #ffffff;'>S T E P</font><br>"
"<font style='font-size:20pt; font-family:{0}; color: #ffffff;'>{1} / {2}</font>"
"</p>".format(
self.font_family,
self.step_num,
self.step_total
)
)
self.progress_label.setText(
"<span style='font-size:{0}pt; font-family:{1}; color: #ffffff;'>{2}</span>".format(
self.font_size,
self.font_family,
self.progress_list.pop(0)
)
)
def process_error(self, error):
"""
Collects any errors and adds to activity log
:param error: the error that occurred
"""
# add to activity log with red text and formatting
error_msg = (
"<span style='font-size:{2}pt; font-family:{0}; color: {1};'><strong>ERROR</strong><br><br></span>"
"<span style='font-size:{2}pt; font-family:{0}; color: #ffffff;'>The following step errored: {3}.<br><br>"
" The error is:</br> {4}</span>"
.format(
self.font_family,
pyani.core.ui.RED.name(),
self.font_size,
self.progress_label.text(),
error
)
)
self.task_mngr.stop_tasks()
self.error_occurred = True
self.activity_log.append(error_msg)
logger.error(error_msg)
self.display_activity_log()
def add_activity_log_item(self, item):
"""
Adds a new item to the log
:param item: a string item, can contain html formatting
"""
self.activity_log.append(item)
def display_activity_log(self):
"""
Show the activity (i.e. install or update steps) that ran or failed
"""
if not self.error_occurred:
success_msg = "<span style='font-size:10pt; font-family:{0}; color: {1};'><strong>" \
"Setup completed successfully.</strong><br><br></span>".format(
self.font_family,
pyani.core.ui.GREEN
)
else:
success_msg = ""
self.activity_report.setText(
"<span style='font-size:18pt; font-family:{0}; color: #ffffff;'>ACTIVITY LOG <br><br></span>{1}"
"<font style='font-size:10pt; font-family:{0}; color: #ffffff;'>"
"<ul><li>{2}</ul>"
"</font>".format(
self.font_family,
success_msg,
'<li>'.join(self.activity_log)
)
)
self.activity_report.show()
class AniReportCore(QtWidgets.QDialog):
"""
Core functionality for all reports, takes the parent window and a title
"""
# general signal for successful tasks
finished_signal = pyqtSignal()
# error message for other classes to receive when doing any local file operations
error_thread_signal = pyqtSignal(object)
def __init__(self, parent_win, title, width=800, height=900):
"""
:param parent_win: window opening this window
"""
super(AniReportCore, self).__init__(parent=parent_win)
self.app_vars = pyani.core.appvars.AppVars()
# font styling
self.font_family = pyani.core.ui.FONT_FAMILY
self.font_size_heading_1 = "20"
self.font_size_heading_2 = "16"
self.font_size_heading_3 = "11"
self.font_size_body = "10"
# image for line
self.h_line_img = "C:\\PyAniTools\\core\\images\\h_line_cyan.png"
self.setWindowTitle(title)
self.win_width = width
self.setMinimumWidth(self.win_width)
self.setMinimumHeight(height)
self.btn_close = QtWidgets.QPushButton("Close")
self.btn_close.clicked.connect(self.close)
layout = QtWidgets.QVBoxLayout()
btn_layout = QtWidgets.QHBoxLayout()
btn_layout.addStretch(1)
btn_layout.addWidget(self.btn_close)
layout.addLayout(btn_layout)
self.content = QtWidgets.QTextEdit()
self.content.setReadOnly(True)
layout.addWidget(self.content)
self.setLayout(layout)
def show_content(self, html_content):
"""
Sets the content to display in the pyqt Text edit widget and fires a finished signal
:param html_content: a string of html
"""
self.content.setHtml(html_content)
# do show before finished signal, otherwise might move on before executing display of window
self.show()
self.finished_signal.emit()
class AniAssetTableReport(AniReportCore):
"""
A table report that is customizable. Can control headings/number of columns, cellspacing, column width
headings are a string list and define the number of columns
column widths are a string list of percents for the size of each column
row data is a list of tuples, each list item represents a row of data, so the tuple size must match the len of the
headings list
"""
def __init__(self, parent_win, cellspacing=5):
"""
:param parent_win: window opening this window
:param cellspacing: amount of cellspacing
"""
super(AniAssetTableReport, self).__init__(parent_win, "Review Assets Download Report", width=1500)
self.cellspacing = cellspacing
self.headings = None
self.col_widths = None
self.data = None
def generate_table_report(self):
"""
Creates an html table for reporting data
"""
# create header row
html_content = "<table cellspacing='{0}' border='0'>".format(self.cellspacing)
html_content += "<tr style='font-size:{0}pt; font-family:{1}; color:{2};'>".format(
self.font_size_heading_2,
self.font_family,
pyani.core.ui.CYAN
)
if not self.headings:
self.headings = ["Could not build headings"]
self.col_widths = ["100"]
self.data = ["Heading build error, could not construct data portion of table."]
for index, heading in enumerate(self.headings):
html_content += "<td width='{0}%'>".format(self.col_widths[index])
html_content += heading
html_content += "</td>"
html_content += "</tr>"
# add spacer row
html_content += "<tr>"
for _ in self.headings:
html_content += "</td> </td>"
html_content += "</tr>"
if self.data:
for data in self.data:
html_content += "<tr style='font-size:{0}pt; font-family:{1}; color: #ffffff;'>".format(
self.font_size_body,
self.font_family
)
for item in data:
html_content += "<td>"
html_content += item
html_content += "</td>"
html_content += "</tr>"
html_content += "</table>"
self.show_content(html_content)
class AniAssetUpdateReport(AniReportCore):
"""
Creates a window with a report of assets that have been added, modified or removed. Displays as html. General
format is:
Modified Assets
category (such as rig, audio, etc...)
asset name : version (if available)
New Assets
....
Deleted Assets
....
"""
def __init__(self, parent_win):
"""
:param parent_win: window opening this window
"""
super(AniAssetUpdateReport, self).__init__(parent_win, "Asset Update Report")
self.app_vars = pyani.core.appvars.AppVars()
# dictionary for displaying the assets by category in the following order:
# rigs, audio, gpu cache, maya tools then pyanitools
self.assets_grouped_by_cat = collections.OrderedDict()
self.assets_grouped_by_cat["rig"] = {
'display name': 'Rigs',
'assets': []
}
self.assets_grouped_by_cat["audio"] = {
'display name': 'Audio',
'assets': []
}
self.assets_grouped_by_cat["model/cache"] = {
'display name': 'GPU Cache',
'assets': []
}
self.assets_grouped_by_cat["scripts"] = {
'display name': 'Maya Scripts',
'assets': []
}
self.assets_grouped_by_cat["plugins"] = {
'display name': 'Maya Plugins',
'assets': []
}
self.assets_grouped_by_cat["apps"] = {
'display name': 'PyAniTools Apps',
'assets': []
}
self.assets_grouped_by_cat["core"] = {
'display name': 'PyAniTools Core Files',
'assets': []
}
self.assets_grouped_by_cat["lib"] = {
'display name': 'PyAniTools Library Files',
'assets': []
}
self.assets_grouped_by_cat["shortcuts"] = {
'display name': 'PyAniTools App Shortcuts',
'assets': []
}
def generate_asset_update_report(self, asset_mngr=None, tools_mngr=None):
"""
Gets the assets that have changed, been added, or removed for all assets (tools, show, shot) and shows the
report. Sorts the assets by type, putting show and shot assets first, then tool assets
:param asset_mngr: an asset manager object - pyani.core.mngr.assets
:param tools_mngr: a tool manager object - pyani.core.mngr.tools
"""
# see pyani.core.mngr.core.find_new_and_updated_assets() for format of dicts
if asset_mngr:
assets_added, assets_modified, assets_deleted = asset_mngr.find_changed_assets()
else:
assets_added = dict()
assets_modified = dict()
assets_deleted = dict()
if tools_mngr:
tools_added, tools_modified, tools_deleted = tools_mngr.find_changed_assets()
else:
tools_added = dict()
tools_modified = dict()
tools_deleted = dict()
# combine assets
assets_added.update(tools_added)
assets_modified.update(tools_modified)
assets_deleted.update(tools_deleted)
self.display_asset_update_report(assets_added, assets_modified, assets_deleted)
def display_asset_update_report(self, assets_added, assets_modified, assets_deleted):
"""
Shows a report on screen with assets that were added, removed or modified during an update. emits a signal
when finished.
:param assets_added: dictionary of assets added, see see pyani.core.mngr.core.find_new_and_updated_assets()
for format of dicts
:param assets_modified: dictionary of assets that have had files updated/modified. in same format as assets
added.
:param assets_deleted: dictionary of assets that have been removed. in same format as assets added.
"""
html_report = "<p><div style='font-size:{0}pt; font-family:{1}; color:{2};'><b>NEW ASSETS</b>" \
"<br>" \
"<img src='{3}'></img>" \
"</div>" \
"</p>".format(self.font_size_heading_1, self.font_family, pyani.core.ui.CYAN, self.h_line_img)
self._reset_assets_list()
if assets_added:
self._order_by_asset_category(assets_added)
html_report += self._create_asset_list_for_update_report()
else:
html_report += "<p>" \
"<div style='font-size:{0}pt; font-family:{1}; color:#ffffff; margin-left:30px;'>" \
"No assets have been updated." \
"</div>" \
"</p>".format(
self.font_size_heading_3,
self.font_family
)
html_report += "<p><div style='font-size:{0}pt; font-family:{1}; color:{2};'><b>UPDATED ASSETS</b>" \
"<br>" \
"<img src='C:\\PyAniTools\\core\\images\\h_line_cyan.png'></img>" \
"</div>" \
"</p>".format(self.font_size_heading_1, self.font_family, pyani.core.ui.CYAN)
self._reset_assets_list()
if assets_modified:
self._order_by_asset_category(assets_modified)
html_report += self._create_asset_list_for_update_report()
else:
html_report += "<p>" \
"<div style='font-size:{0}pt; font-family:{1}; color:#ffffff; margin-left:30px;'>" \
"No assets were added." \
"</div>" \
"</p>".format(
self.font_size_heading_3,
self.font_family
)
html_report += "<p><div style='font-size:{0}pt; font-family:{1}; color:{2};'><b>REMOVED ASSETS</b>" \
"<br>" \
"<img src='C:\\PyAniTools\\core\\images\\h_line_cyan.png'></img>" \
"</div>" \
"</p>".format(self.font_size_heading_1, self.font_family, pyani.core.ui.CYAN)
self._reset_assets_list()
if assets_deleted:
self._order_by_asset_category(assets_deleted)
html_report += self._create_asset_list_for_update_report()
else:
html_report += "<p>" \
"<div style='font-size:{0}pt; font-family:{1}; color:#ffffff; margin-left:30px;'>" \
"No assets were removed." \
"</div>" \
"</p>".format(
self.font_size_heading_3,
self.font_family
)
self.show_content(html_report)
def _reset_assets_list(self):
"""
Clears the ordered assets list
"""
# clear assets
for asset_category in self.assets_grouped_by_cat:
if self.assets_grouped_by_cat[asset_category]['assets']:
self.assets_grouped_by_cat[asset_category]['assets'] = list()
def _order_by_asset_category(self, assets_list):
"""
orders the dictionary by category in this format, and then sorts assets by name:
{
asset_category: {
'display name' : name
'assets': [
(
asset_name, {asset info such as version, file names, etc}
),
(
asset_name, {asset info such as version, file names, etc}
),
...
]
},
...
}
:param assets_list: a dictionary in the format found here: pyani.core.mngr.core.find_new_and_updated_assets()
"""
# convert unordered to ordered
for asset_type in assets_list:
for asset_category in assets_list[asset_type]:
# convert to an ordered list from an unordered dict. converts the dict to a
# list of tuples sorted by name
dict_to_sorted_list_tuples = [
(key, assets_list[asset_type][asset_category][key])
for key in sorted(assets_list[asset_type][asset_category].keys())
]
self.assets_grouped_by_cat[asset_category]['assets'] = dict_to_sorted_list_tuples
def _create_asset_list_for_update_report(self):
"""
Creates the html to display the list of assets by category and then name.
:return: a string containing the html
"""
html_report = ""
for asset_category in self.assets_grouped_by_cat:
if self.assets_grouped_by_cat[asset_category]['assets']:
# list the asset category first
html_report += "<p>" \
"<div style='font-size:{0}pt; font-family:{1}; color:{3}; margin-left:30px;'>" \
"{2}" \
"</div>" \
"</p>".format(
self.font_size_heading_2,
self.font_family,
self.assets_grouped_by_cat[asset_category]['display name'],
pyani.core.ui.CYAN
)
html_report += "<div style='font-size:{0}pt; font-family:{1}; color: #ffffff;'>" \
"<ul>".format(
self.font_size_body,
self.font_family
)
for asset_name, asset_info in self.assets_grouped_by_cat[asset_category]['assets']:
if asset_info['version']:
html_report += "<li>{0} : <span style='color:{2};'><i>Version {1}</i></span></li>".format(
asset_name, asset_info['version'], pyani.core.ui.CYAN
)
else:
html_report += "<li>{0}</li>".format(asset_name)
if asset_info['files added']:
html_report += "<ul>" \
"<li><span style='color:{0};'>ADDED:<span></li>".format(pyani.core.ui.GREEN)
html_report += "<ul>"
# add files that were added, modified or removed
for file_name in asset_info['files added']:
html_report += "<li>{0}</li>".format(file_name)
html_report += "</ul>" \
"</ul>"
if asset_info['files modified']:
html_report += "<ul>" \
"<li><span style='color:{0};'>UPDATED:<span></li>".format(pyani.core.ui.GOLD)
html_report += "<ul>"
# add files that were added, modified or removed
for file_name in asset_info['files modified']:
html_report += "<li>{0}</li>".format(file_name)
html_report += "</ul>" \
"</ul>"
if asset_info['files removed']:
html_report += "<ul>" \
"<li><span style='color:{0};'>REMOVED:<span></li>".format(
pyani.core.ui.RED.name()
)
html_report += "<ul>"
# add files that were added, modified or removed
for file_name in asset_info['files removed']:
html_report += "<li>{0}</li>".format(file_name)
html_report += "</ul>" \
"</ul>"
html_report += "</ul>" \
"</div>"
html_report += "<p> </p>"
return html_report
| true |
f3840f4e5d685c1708500f223c0ef3d32d5bc7c2 | Python | caseycas/CodeNLPReplication | /lexer/utilities.py | UTF-8 | 37,637 | 2.515625 | 3 | [
"MIT"
] | permissive | '''
Created on Oct 12, 2015
@author: Naji Dmeiri
@author: Bogdan Vasilescu
@author: Casey Casalnuovo
'''
from pygments.token import *
from collections import OrderedDict
import Android
import Api
import csv
#import jsbeautifier
#from sets import Set
import re
#Check for TREE_TEXTS before NATURAL_LANGUAGE_EXTS
TREE_TEXTS = {
'*.txt.tokens',
'*.java_ast.tokens',
'*.mrg'
}
NATURAL_LANGUAGE_EXTS = {
'*.txt',
'*.text',
'*.tokens' #Assume English for now.
}
SUPPORTED_LANGUAGE_STRINGS = {
'Ruby',
'Python',
'JavaScript',
'PHP',
'Java',
'Scala',
'C',
'C++',
'Objective-C',
'Swift',
'Haskell',
'Common Lisp',
'Prolog',
'FSharp',
'Clojure'
}
def languageForLexer(lexer):
"""
:param lexer: A `Lexer` object as defined in `pygments.lexer`
:returns: A string indicating the language supported by the lexer
Currently supported return values: 'Ruby',
'Python',
'JavaScript',
'PHP',
'Java',
'Scala',
'C',
'C++',
'Objective-C',
'Swift'
'Haskell'
'Common Lisp'
'Prolog'
'FSharp'
'Clojure'
"""
mapping = {
'Ruby': 'Ruby',
'Python': 'Python',
'JavaScript': 'JavaScript',
'Php': 'PHP',
'Java': 'Java',
'Scala': 'Scala',
'C': 'C',
'Cpp': 'C++',
'Objective-C': 'Objective-C',
'Swift': 'Swift',
'Haskell': 'Haskell',
'Common Lisp': 'Common Lisp',
'Prolog': 'Prolog',
'FSharp': 'FSharp',
'Clojure': 'Clojure'
}
print(lexer)
print(lexer.name)
assert mapping[lexer.name] in SUPPORTED_LANGUAGE_STRINGS # sanity check; can be disabled in release build
return mapping[lexer.name]
def tokensForTokenType(tokens, tokenType, ignoreSubtypes = False):
"""
:param tokens: A list of `Token` objects as defined in `pygments.token`
:param tokenType: A `TokenType` object as defined in `pygments.token`
:param ignoreSubtypes: When set to True, the returned list will include subtypes of `tokenType` ; default is `False`.
:returns: An iterable of tuples that each hold information about a `tokenType` tokens.
"""
if tokenType not in STANDARD_TYPES:
raise ValueError("%s is not a standard Pygments token type." % tokenType)
if not ignoreSubtypes:
return [t for t in tokens if is_token_subtype(t[0], tokenType)]
else:
return [t for t in tokens if t[0] == tokenType]
def isSubTypeIn(token, tokenTypes):
for t in tokenTypes:
if(is_token_subtype(token[0], t)):
return True
return False
def tokensForTokenTypes(tokens, tokenTypes, ignoreSubtypes = False):
"""
:param tokens: A list of `Token` objects as defined in `pygments.token`
:param tokenTypes: A list of `TokenType` object as defined in `pygments.token`
:param ignoreSubtypes: When set to True, the returned list will include subtypes of `tokenType` ; default is `False`.
:returns: An iterable of tuples that each hold information about a `tokenType` tokens.
"""
for t in tokenTypes:
if(t not in STANDARD_TYPES):
raise ValueError("%s is not a standard Pygments token type." % tokenType)
if not ignoreSubtypes:
return [t for t in tokens if isSubTypeIn(t, tokenTypes)]
else:
return [t for t in tokens if t[0] in tokenTypes]
def tokensExceptTokenType(tokens, tokenType, ignoreSubtypes = False, retainedTypes = []):
"""
:param tokens: A list of `Token` objects as defined in `pygments.token`
:param tokenType: A `TokenType` object as defined in `pygments.token`
:param ignoreSubtypes: When set to True, the returned list will include subtypes of `tokenType` ; default is `False`.
:param retainedTypes: Specific subtypes of the excluded type we wish to keep. (Applies when ignoreSubtypes = False)
:returns: An iterable of tuples that each hold information about a `tokenType` tokens.
"""
if tokenType not in STANDARD_TYPES:
raise ValueError("%s is not a standard Pygments token type." % tokenType)
if not ignoreSubtypes:
return [t for t in tokens if (not is_token_subtype(t[0], tokenType)) or (t[0] in retainedTypes)]
else:
return [t for t in tokens if not t[0] == tokenType]
def getOnlyNewLines(token):
"""
:param token: A token to modify
:returns: A Token.Text that is either blank or contains only newline characters
"""
lineCount = len(token[1].splitlines())
if(lineCount == 1 and '\n' in token[1]):
newText = u'\n'
else:
newText = u'' + (lineCount-1)*u'\n'
return (Token.Text, newText)
def reduceToNewLine(tokens, tokenType, ignoreSubtypes = False, retainedTypes = []):
"""
:param tokens: A list of `Token` objects as defined in `pygments.token`
:param tokenType: A `TokenType` object as defined in `pygments.token`
:param ignoreSubtypes: When set to True, the returned list will include subtypes of `tokenType` ; default is `False`.
:param retainedTypes: Specific subtypes of the excluded type we wish to keep. (Applies when ignoreSubtypes = False)
:returns: An iterable of tuples that each hold information about a `tokenType` tokens
with only newLines remaining in targeted tokens
"""
if tokenType not in STANDARD_TYPES:
raise ValueError("%s is not a standard Pygments token type." % tokenType)
if not ignoreSubtypes:
return [t if ((not is_token_subtype(t[0], tokenType)) or (t[0] in retainedTypes)) else getOnlyNewLines(t) for t in tokens]
# newTokens = []
# for t in tokens:
# if (not is_token_subtype(t[0], tokenType)) or (t[0] in retainedTypes):
# newTokens.append(t)
# else:
# nt = getOnlyNewLines(t)
# newTokens.append(nt)
# return newTokens
else:
return [t if not t[0] == tokenType else getOnlyNewLines(t) for t in tokens]
#Return a list of tokens of the keywords/reserved words for a
#language. The Pygments token reprsentation may may vary depending
# on the language, so you'll need to implement a new one for each
#language you want to support.
#Currently supports: Java, Haskell
def getKeywords(tokens, language):
if(language.lower() == "java"):
tokens = tokensForTokenType(tokens, Token.Keyword)
tokens = tokensExceptTokenType(tokens, Token.Keyword.Type)
elif(language.lower() == "haskell"):
#Apparently 'error' is not a haskell keyword
tokens = tokensForTokenType(tokens, Token.Keyword) + tokensForTokenType(tokens, Token.Operator.Word) # + tokensForTokenType(tokens, Token.Name.Exception)
tokens = tokensExceptTokenType(tokens, Token.Keyword.Type)
else:
print("That language type is not supported for keyword extraction.")
quit()
return tokens
def isOpenType(token, language):
if(language.lower() in ["java", "haskell", "fsharp", "ruby", "clojure", "c"]):
return(isSubTypeIn(token, [Token.Name, Token.Keyword.Type]) and token[0] != Token.Name.Builtin)
else:
print("That language type is not supported for name extraction.")
quit()
def getNameAndLiteralTypes(tokens, language):
if(language.lower() == "java"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type, Token.Literal.String, Token.Number])
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
#tokens = tokensExceptTokenType(tokens, Token.Name.Namespace)
elif(language.lower() == "haskell"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type, Token.Literal.String, Token.Number])
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
elif(language.lower() == "fsharp"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type, Token.Literal.String, Token.Number])
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
#tokens = tokensExceptTokenType(tokens, Token.Name.Namespace)
elif(language.lower() == "ruby"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type, Token.Literal.String, Token.Number]) #No builtins?
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
elif(language.lower() == "clojure"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type, Token.Literal.String, Token.Number]) #Builtins removed earlier
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
elif(language.lower() == "c"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type, Token.Literal.String, Token.Number]) #Builtins removed earlier
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
else:
print("That language type is not supported for name extraction.")
quit()
return tokens
#Return a list of tokens of the types (variable, class, functions) for a
#language. The Pygments token reprsentation may may vary depending
# on the language, so you'll need to implement a new one for each
#language you want to support.
#Currently supports: Java, Haskell
def getNameTypes(tokens, language):
if(language.lower() == "java"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type])
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
#tokens = tokensExceptTokenType(tokens, Token.Name.Namespace)
elif(language.lower() == "haskell"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type])
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
elif(language.lower() == "fsharp"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type])
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
#tokens = tokensExceptTokenType(tokens, Token.Name.Namespace)
elif(language.lower() == "ruby"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type]) #No builtins?
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
elif(language.lower() == "clojure"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type]) #Builtins removed earlier
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
elif(language.lower() == "c"):
tokens = tokensForTokenTypes(tokens, [Token.Name, Token.Keyword.Type]) #Builtins removed earlier
tokens = tokensExceptTokenType(tokens, Token.Name.Builtin)
else:
print("That language type is not supported for name extraction.")
quit()
return tokens
def collapseNames(tokens):
'''
Replace everything in the 'name' category with it's Pygments subtype.
'''
newTokens = []
for t in tokens:
if(is_token_subtype(t[0], Token.Name)):
#How about replacing all non whitespace with the token.
nStartOff = len(t[1]) - len(t[1].lstrip())
nEndOff = len(t[1]) - len(t[1].rstrip())
newName = str(t[0]).replace(".", "_")
if(nStartOff > 0):
newName = t[1][:nStartOff] + newName
if(nEndOff > 0):
newName = newName + t[1][-nEndOff:]
# print("================")
# print("|" + t[1][:nStartOff] + "|")
# print("----------------")
# print("|" + t[1][-nEndOff:] + "|")
# print("================")
# print("New:" + newName + " " + str(newName.count("\n")))
# print("Old:" + t[1] + " " + str(t[1].count("\n")))
assert(newName.count("\n") == t[1].count("\n"))
newTokens.append((t[0], newName))
else:
newTokens.append(t)
return newTokens
def getClosedTypes(tokens, language):
if(language.lower() in ["java", "haskell", "fsharp", "ruby", "clojure", "c"]):
tokens = tokensExceptTokenType(tokens, Token.Name, False, [Token.Name.Builtin])
tokens = tokensExceptTokenType(tokens, Token.Keyword.Type)
tokens = tokensExceptTokenType(tokens, Token.Literal.String)
tokens = tokensExceptTokenType(tokens, Token.Number)
else:
print("That language type is not supported for name extraction.")
quit()
print(len(tokens))
return tokens
#Return a list of token types with name types (except for builtins) excluded. Complement of getNameTypes
#TODO: More language logic necessary if getNameTypes winds up with different language behavior.
def getNonNameTypes(tokens):
tokens = tokensExceptTokenType(tokens, Token.Name, False, [Token.Name.Builtin])
return tokens
#Remove all ' and " from the corpus.
def stripQuotes(tokens):
return [(t[0], t[1].replace("\"", "<quote>").replace("\'", "<quote>")) for t in tokens]
#Given a list of tokens and a function of the form string token -> string token
#Modify all tokens of String.Literal type according to the function
def modifyStrings(tokens, modifyFunc):
return [modifyFunc(t) if is_token_subtype(t[0], Token.Literal.String) else t for t in tokens]
#Modify all tokens of the Number type according to the function
def modifyNumbers(tokens, modifyFunc):
return [modifyFunc(t) if is_token_subtype(t[0], Token.Number) else t for t in tokens]
def modifyNames(tokens, modifyFunc):
return [modifyFunc(t) if (is_token_subtype(t[0], Token.Name) or t[0] == Token.Keyword.Type) else t for t in tokens]
#Replace spaces in the token with underscores
def underscoreString(strToken):
return (strToken[0], re.sub(r"\s+", '-', strToken[1]))
#Replace all strings with a <str> token
def singleStringToken(strToken):
return (strToken[0], "<str>")
#Keep strings that are empty or single character ascii, replace all others with <str>
def threeTypeToken(strToken):
if(len(strToken[1]) == 2 or len(strToken[1]) == 3): #Includes the '' and "" so if 2 or 3, str is empty or 1 char.
#Replace "" with '' on single chars.
try:
strToken[1].decode('ascii')
new = strToken[1].replace("\"", "\'")
except UnicodeDecodeError:
new = "<str>"
except UnicodeEncodeError:
new = "<str>"
else:
new = "<str>"
return (strToken[0], new)
#Handles the case where there are baskslahes in the string to reduce to a single item.
def collapseStrings(tokens):
newTokens = []
if(len(tokens) == 0):
return tokens
newTokens.append(tokens[0])
for t in tokens[1:]:
if(not is_token_subtype(t[0], Token.Literal.String)):
#if(is_token_subtype(curToken[0], Token.String)):
# newTokens.append(curToken)
newTokens.append(t)
elif (is_token_subtype(t[0], Token.Literal.String) and (not is_token_subtype(newTokens[-1][0], Token.Literal.String))): #skip String repeats
newTokens.append(t)
return newTokens
#Reduce the strings to " ", single characters, and <str> for everything else.
#def collapseStringsThreeTypes(tokens):
# newTokens = []
# newTokens.append(tokens[0])
# for t in tokens[1:]:
#Ensure that all string tokens have spaces after the initial " and before the
#closing "
def spaceString(strToken):
assert(strToken[1][0] == "\"")
assert(strToken[1][len(strToken[1]) - 1] == "\"")
return (strToken[0], strToken[1][:1] + " " + strToken[1][1:len(strToken[1])-1] + " " + strToken[1][len(strToken[1])-1])
#Collapse the numbers, but retain numbers were the value is a integer close to 0.
def keepSmallNumToken(numToken):
try:
if(numToken[0] == Token.Literal.Number.Integer and int(numToken[1]) in [0,1,2,3]): #The way this is treated means -1,-2,-3 will be retained to
return numToken
elif(numToken[0] == Token.Literal.Number.Float and float(numToken[1]) in [0,1,2,3]): #Retain floating point ones too.
return numToken
else:
return singleNumberToken(numToken)
except:
return singleNumberToken(numToken) #If error, default to type '3'
#Currently can handle Hex, Float, Integer, Oct, Bin types, if something else, return <num>
def singleNumberToken(numToken):
if(numToken[0] == Token.Literal.Number.Integer):
return(numToken[0], "<int>")
elif(numToken[0] == Token.Literal.Number.Float):
return(numToken[0], "<float>")
elif(numToken[0] == Token.Literal.Number.Oct):
return(numToken[0], "<oct>")
elif(numToken[0] == Token.Literal.Number.Bin):
return(numToken[0], "<bin>")
elif(numToken[0] == Token.Literal.Number.Hex):
return(numToken[0], "<hex>")
else:
return(numToken[0], "<num>")
def singleNameToken(nameToken):
if(nameToken[0] == Token.Name):
return(nameToken[0], "<name>")
elif(nameToken[0] == Token.Name.Class):
return(nameToken[0], "<class>")
elif(nameToken[0] == Token.Name.Namespace):
return(nameToken[0], "<namespace>")
elif(nameToken[0] == Token.Name.Function):
return(nameToken[0], "<function>")
elif(nameToken[0] == Token.Name.Attribute):
return(nameToken[0], "<attribute>")
elif(nameToken[0] == Token.Name.Label):
return(nameToken[0], "<label>")
elif(nameToken[0] == Token.Keyword.Type):
return(nameToken[0], "<type>")
elif(nameToken[0] == Token.Name.Variable):
return(nameToken[0], "<variable>")
elif(nameToken[0] == Token.Name.Decorator or nameToken[0] == Token.Name.Builtin or Token.Name.Exception[0]):
return nameToken
else: #Not a name?
print("Name Conversion - Unrecognized Type:")
print(nameToken)
return nameToken
#Not really needed, the problem is coming from minimized JS code...
#def mergeDollarSign(tokens):
# newTokens = []
# dollarFound = False
# for t in tokens:
# if(t[0] == Token.Name.Other):
# dollarFound = True
# elif(dollarFound):
# assert(is_token_subtype(t, Token.Name))
# newTokens.append(t[0], "$" + t[1])
# dollarFound = False
# else:
# newTokens.append(t)
# return newTokens
#string string -> string
#Break up the combined Namespace tokens
#Example: org . apache . xalan . xsltc . trax ->
#<org|Token.Name.Namespace> <.|Token.Punctuation> <apache|Token.Name.Namespace> <.|Token.Punctuation> <xalan|Token.Name.Namespace>
#<.|Token.Punctuation> <xsltc|Token.Name.Namespace> <.|Token.Punctuation> <trax|Token.Name.Namespace>
#Note: In clojure, this behavior can be seen in Token.Name.Variable (
def convertNamespaceToken(text, tokenType):
#assert(tokenType == "Token.Name.Namespace" or tokenType == "Android.Namespace")
pieces = text.split(" ")
next = ""
for p in pieces:
if(p == "."):
next += "<.|Token.Punctuation> "
else:
next += "<" + p + "|" + tokenType + "> "
return next.strip()
def convertNamespaceTokens(tokens, language):
newTokens = []
for t in tokens:
#Clojure made need this for functions too?
if(t[0] == Token.Name.Namespace or t[0] == Android.Namespace):# or (language == "Clojure" and t[0] == Token.Name.Variable and "." in t[1])):
pieces = t[1].split(".")
i = 0
for p in pieces:
newTokens.append((t[0], p))
if(i < len(pieces) -1):
newTokens.append((Token.Punctuation, "."))
i += 1
else:
newTokens.append(t)
return newTokens
#In general, some of the values returned by the pygments lexer don't
#compare well with other languages or fit into the categories we
#want. This general function to fix all the issues observed in the data.
def fixTypes(tokens, language):
newTokens = []
i = 0
if(language == "Java"):
#In java we:
#1)Remap the boolean and null? keywords to be literals. (done in mergeEntropy as these and only these are Token.Keyword.Constant
#2)What about Decorators? I think this is okay b/c these are mapped to single unique type like the boolean literals are.
return tokens
elif(language == "Haskell"):
#1) Anonymous functions have name "\" and the next sequence of word characters are the ARGUMENTS to it
#Prem recommends treating this as an operator.
#2) Remap true and false with Token.Keyword.Type to Token.Keyword.Constant (then it will be remapped with Java's later).
#3) Fix keywords (often from the haskell language extensions) forall, foreign, family, mdo, proc, and rec
#This will change if they are Token.Name and Not Token.Name.Function. rec is skipped b/c it was observed to often be just a
#a normal name
#family must come after data or type keyword
#foreign + proc seem to be the only ones in the base set.
#4) Relabel the Keyword.Types that are purely non word characters.
while i < len(tokens):
#print(i)
if(tokens[i][0] == Token.Keyword.Type and tokens[i][1].strip() in ("True", "False")):
newTokens.append((Token.Keyword.Constant, tokens[i][1]))
elif(tokens[i][0] == Token.Name.Function and tokens[i][1].strip() == "\\"):
newTokens.append((Token.Operator, tokens[i][1]))
elif(tokens[i][0] == Token.Keyword.Type and re.match("^[\W]+$", tokens[i][1].strip()) != None): #[], :, :+ :~: observed
newTokens.append((Token.Name.Builtin, tokens[i][1]))
elif(tokens[i][0] == Token.Name and (tokens[i][1].strip() in ("proc", "forall", "mdo"))):
newTokens.append((Token.Keyword, tokens[i][1]))
elif(tokens[i][0] == Token.Name.Function and tokens[i][1].strip() == "foreign"):
newTokens.append((Token.Keyword, tokens[i][1]))
elif(tokens[i][1].strip() == "family" and i >= 2):
#print(tokens[i-5:i+5])
if(tokens[i-2][1].strip() == "data" or tokens[i-2][1].strip() == "type"):
newTokens.append((Token.Keyword, tokens[i][1]))
else:
newTokens.append(tokens[i])
elif(tokens[i][1].strip() == "null" and tokens[i][0] == Token.Name):
newTokens.append((Token.Keyword.Constant, tokens[i][1]))
else:
newTokens.append(tokens[i])
i += 1
return newTokens
elif(language == "Ruby"):
#1) Remap true and false with Token.Keyword.Pseudo to Token.Keyword.Constant
#2) Remap the following to Token.Keyword:
#__ENCODING__ is Token.Name
#__END__ is Token.Name.Constant
#__FILE__ is Token.Name.Builtin.Psuedo
#__LINE__ is Token.Name.Builtin.Psuedo
#Remap Token.Name.Builtin to Token.Name
#Move true, false, nil to Token.Keyword.Constant (Keyword/Literal)
while i < len(tokens):
if(tokens[i][1].strip() == "__ENCODING__" or tokens[i][1].strip() == "__END__" or tokens[i][1].strip() == "__FILE__" or tokens[i][1].strip() == "__LINE__"):
newTokens.append((Token.Keyword, tokens[i][1]))
elif(tokens[i][0] == Token.Name.Builtin):
newTokens.append((Token.Name, tokens[i][1]))
elif(tokens[i][0] == Token.Keyword.Pseudo and tokens[i][1].strip() in ("nil", "true", "false")):
newTokens.append((Token.Keyword.Constant, tokens[i][1]))
else:
newTokens.append(tokens[i])
i += 1
return newTokens
elif(language == "Clojure"):
#1)Split / and . in Variables
#2)Fix booleans and nil to Token.Keyword.Constant
#3)Split Token.Name.Builtin into operators and Names.
#4)=> Is a Midje (test suite) operator, --> and ->> are clojure macros. Is calling them operators is more fair?
#Also % or %1, %2, etc are placeholders for anonymous functions. Keep them as Names too?
#5)Add in the rest of the special forms. Avoid Token.Name.Variable designations as they are not the special forms.
while i < len(tokens):
if(tokens[i][0] == Token.Name.Variable and tokens[i][1].strip() in ("nil", "true", "false")):
newTokens.append((Token.Keyword.Constant, tokens[i][1]))
elif(tokens[i][0] == Token.Name.Builtin):
if(tokens[i][1].strip() in ("*", "+", "-", "->", "..", "/", "<", "<=", "=","==", ">", ">=")):
newTokens.append((Token.Operator, tokens[i][1]))
else:
newTokens.append((Token.Name, tokens[i][1]))
elif(tokens[i][0] == Token.Name.Function and tokens[i][1].strip() in ("recur", "set!", "moniter-enter", "moniter-exit", "throw", "try", "catch", "finally")):
newTokens.append((Token.Keyword, tokens[i][1]))
elif(is_token_subtype(tokens[i][0], Token.Name) and "/" in tokens[i][1]):
#print("SPLIT ME!")
pieces = tokens[i][1].split("/")
newTokens.append((tokens[i][0], pieces[0]))
for p in pieces[1:]:
newTokens.append((Token.Punctuation, "/"))
newTokens.append((tokens[i][0], p))
elif(is_token_subtype(tokens[i][0], Token.Name) and "." in tokens[i][1][1:-1]): #contains a dot inside, not at edges
pieces = tokens[i][1].split(".")
newTokens.append((tokens[i][0], pieces[0]))
for p in pieces[1:]:
newTokens.append((Token.Punctuation, "."))
newTokens.append((tokens[i][0], p))
elif(is_token_subtype(tokens[i][0], Token.Name) and tokens[i][1].strip() in ("=>", "->>", "-->")):
newTokens.append((Token.Operator, tokens[i][1]))
else:
newTokens.append(tokens[i])
i += 1
return newTokens
elif(language == "C"):
while i < len(tokens):
#Remap Name.Builtin to Literals
if(tokens[i][0] == Token.Name.Builtin):
newTokens.append((Token.Literal, tokens[i][1]))
else:
newTokens.append(tokens[i])
i += 1
return newTokens
else: #No remapping for other languages yet
print("No type remap for this language implemented")
return tokens
def insertToApiDict(packages, api_package, api_class, api_method):
if(api_package in packages):
if(api_class in packages[api_package]):
packages[api_package][api_class].append(api_method)
else:
packages[api_package][api_class] = [api_method]
else:
packages[api_package] = {api_class:[api_method]}
return packages
#Read in the csv file with the android api list
#csv file should be in decreasing order of package string length.
def parseAndroidApis():
packages = OrderedDict()
with open(Android.ANDROID_API_FILE, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=';', quotechar='\"')
for line in csvreader:
(api_package, api_class, api_method) = line
packages = insertToApiDict(packages, api_package, api_class, api_method)
return packages
#If we've merge a token and its type like <"Token"|Type>, return "Token"
def removeLabel(tokenString):
if("|Token." in tokenString):
tokenString = tokenString[1:-1]
return tokenString[:tokenString.rfind("|Token")]
else: #Ignore unlabeled tokens
return tokenString
#list of tokens -> list of tokens
#Given a list of tokens (label, string), from pygments, read in a file of android
#api information and change the token labels of all android api references to
#Android.*
def labelAndroidTypes(tokens):
#Read in File (assumed to be a csv file of "package, file, method")
androidDict = parseAndroidApis() #Dict of Dict (key1 = package, key2 = file)
#Check references only from the packages loaded in the imports (assumed to be first)
validPackages = [] #List of imported packages
newTokens = []
for t in tokens:
found = False
if(t[0] == Token.Name.Namespace):
for package in androidDict.keys():
if(package in t[1]):
validPackages.append(package)
newTokens.append((Android.Namespace, t[1]))
found = True
break
elif(t[0] == Token.Name): #If in valid packages and in android Dict, relabel to Android.*
#Looking for classes here. This covers things referenced in code and extended classes
#e.g. "class <A|Token.Name.Class> extends <B|Token.Name>"
for package in validPackages:
if(t[1] in androidDict[package]):
newTokens.append((Android.Name, t[1]))
found = True
break
elif(t[0] == Token.Name.Function or t[0] == Token.Name.Attribute):
for package in validPackages:
for api_class in androidDict[package]:
if(t[1] in androidDict[package][api_class]):
newTokens.append((Android.Function, t[1]))
found = True
break
if(found):
break
else:
newTokens.append(t)
found = True
if(not found):
newTokens.append(t)
return newTokens
#list of tokens -> set of definitions
#Given a list of tokens created by pygments, for each token
#marked with the function label, identify if it is a function
#definition. Return the list of tokens with the new label
#breaking them up into definitions and calls, along with a
#set of new function definitions
def getFunctionDefinitions(tokens, language):
#TODO:
#What patterns signify a function definition?
#--------------------------------------- Java ---------------------------------------
#YES: Token.Keyword.Type -> Token.Name.Function
#NO: Token.Keyword.Operator -> Token.Name.Function.
#NO: Token.Operator -> Token.Name.Function.
#YES: Token.Keyword.Declaration, Token.Name -> Token.Name.Function (function with more complex type)
#Constructor: Token.Keyword.Declaration -> Token.Name.Function
#What other possbilities are there?
#Others: (YES) Token.Operator, Token.Name -> Token.Name.Function
#(YES) Token.Name.Decorator, Token.Name -> Token.Name.Function
# (YES) (Token.Operator, u'.') (Token.Name.Attribute, u'Unsafe') (Token.Name.Function, u'getUnsafe') (YES?)
# --------------------------------------- Haskell ---------------------------------------
# ???
definitions = Set()
if(language.lower() == "java"):
for i in range(0, len(tokens)):
if(tokens[i][0] == Token.Name.Function):
if(tokens[i-1][0] == Token.Name or tokens[i-1][0] == Token.Keyword.Type or tokens[i-1][0] == Token.Name.Attribute or tokens[i-1][0] == Token.Keyword.Declaration):
definitions.add(tokens[i])
elif(tokens[i-1][0] != Token.Keyword.Operator and tokens[i-1][0] != Token.Operator):
print("Not Found")
print(str(tokens[i-2]) + " " + str(tokens[i-1]) + " " + str(tokens[i]) + " " + str(tokens[i+1]) + " " + str(tokens[i+2]))
elif(language.lower() == "haskell"):
for i in range(0, len(tokens)):
if(tokens[i][0] == Token.Name.Function):
print(str(tokens[i][0]) + " " + str(tokens[i+1][0]) + " " + str(tokens[i+2][0]) + " " + str(tokens[i+3][0]) + " " + str(tokens[i+4][0]))
print(str(tokens[i][1]) + " " + str(tokens[i+1][1]) + " " + str(tokens[i+2][1])+ " " + str(tokens[i+3][1]) + " " + str(tokens[i+4][1]))
# Function followed by '::' token must be definition, should there be anymore, or just group the names rather than calls?
# No, problem is that in haskell, these can be variables too. So must also have a -> further on? (Lexer mistakenly labels these as function types too...
#print(definitions)
return definitions
#list of tokens, set of definitions, string -> list of tokens
#Given a file's list of tokens where the function labels
#have been divided as per getFunctionDefinitions, the language of the corpus, and the
#set of all functions defined in this project, relabel all
#function calls as being from either inside or outside the
#project. (e.g. what function calls are from external libraries).
#TODO: Handle more than Java
#TODO: I see function calls being labelled as NAME, not function. This is a problem.
#Convert string sequences of "Token.Name, ("
def relabelFunctions(tokens, funcDefinitions, language):
newTokens = []
j = 0
if(language.lower() == "java"):
for i in range(0, len(tokens)):
if(i <= j): #Skip ahead if we did a rewrite of a constructor with "."'s in it.
continue
if(tokens[i][0] == Token.Name.Function):
if(tokens[i-1][0] == Token.Name or tokens[i-1][0] == Token.Keyword.Type or tokens[i-1][0] == Token.Name.Attribute or tokens[i-1][0] == Token.Keyword.Declaration):
newTokens.append((Api.Definition , tokens[i][1]))
elif(tokens[i-1][0] == Token.Keyword.Operator or tokens[i-1][0] == Token.Operator):
if(tokens[i][1] in funcDefinitions):
newTokens.append((Api.Internal, tokens[i][1]))
else:
newTokens.append((Api.External, tokens[i][1]))
else:
print("Not recognized.")
print(tokens[i-1])
quit()
elif(tokens[i][0] == Token.Name and tokens[i-1][1] == "new"): #Constructor Case
#print("Constructor Case")
j = i
while(tokens[j + 2][0] == Token.Name.Attribute): #Deal with constructor calls like com.google.common.primitives.ByteTest
j += 2
if(j != i):
newType = ""
if(tokens[j][1] in funcDefinitions):
newType = Api.Internal
else:
newType = Api.External
for k in range(i, j+1):
if(tokens[k][0] == Token.Name.Attribute or tokens[k][0] == Token.Name):
newTokens.append((newType, tokens[k][1]))
elif(tokens[k][0] == Token.Operator):
newTokens.append(tokens[k])
else:
print("Not valid type (relabelFunctions): " + str(tokens[k]))
quit()
else:
#print(tokens[i])
if(tokens[i][1] in funcDefinitions):
newTokens.append((Api.Internal, tokens[i][1]))
else:
newTokens.append((Api.External, tokens[i][1]))
elif(is_token_subtype(tokens[i][0], Token.Name) and tokens[i+1][1] == "("): #Multiple name types can could be functions
#print(tokens[i-2][1] + " " + tokens[i-1][1] + " " + tokens[i][1] + " " + tokens[i+1][1] + " " + tokens[i+2][1])
#print(" ".join([str(tokens[i-2][0]), str(tokens[i-1][0]), str(tokens[i][0]), str(tokens[i+1][0]), str(tokens[i+2][0])]))
#newTokens.append(tokens[i])
if(tokens[i][1] in funcDefinitions):
newTokens.append((Api.Internal, tokens[i][1]))
else:
newTokens.append((Api.External, tokens[i][1]))
#elif(tokens[i][0] == Token.Name):
# print(tokens[i-2][1] + " " + tokens[i-1][1] + " " + tokens[i][1] + " " + tokens[i+1][1] + " " + tokens[i+2][1])
# print(" ".join([str(tokens[i-2][0]), str(tokens[i-1][0]), str(tokens[i][0]), str(tokens[i+1][0]), str(tokens[i+2][0])]))
# newTokens.append(tokens[i])
else:
newTokens.append(tokens[i])
elif(language.lower() == "haskell"):
print("Not supported yet.")
quit()
#for i in range(0, len(tokens)):
# if(tokens[i][0] == Token.Name.Function):
# print(str(tokens[i-2]) + " " + str(tokens[i-1]) + " " + str(tokens[i]) + " " + str(tokens[i+1]) + " " + str(tokens[i+2]))
else:
print("Not supported yet.")
quit()
return newTokens
| true |
baded4193b221c504106fedd12c7efcbd75883da | Python | qxjl1010/classification_task | /SVM.py | UTF-8 | 1,482 | 3.15625 | 3 | [] | no_license | from sklearn import svm
import pandas as pd
import numpy as np
import random
# using SVM
# for details, visit:
# https://scikit-learn.org/stable/modules/svm.html#regression
# since the dataset has more than 280k class_0 and only 492 class_1
# we need to extract the same number of class_0 as class_1
def get_0(raw_array):
class_0 = 0
class_1 = 0
balance_array = []
for line in raw_array:
# manually set number to 492, according to the result get by check_class.py
if line[-1] == 0 and class_0 < 492:
balance_array.append(line)
class_0 += 1
elif line[-1] == 1:
balance_array.append(line)
class_1 += 1
return np.asarray(balance_array)
# read csv file
data = pd.read_csv('fraud_prep.csv')
array = data.values
# get clean dataset
array = get_0(array)
# get target and features
target = array[:,-1]
features = array[:,:-1]
# shuffle dataset
li=list(range(len(target)))
random.shuffle(li)
shuffled_features = [x for _,x in sorted(zip(li,features))]
shuffled_target = [x for _,x in sorted(zip(li,target))]
# get SVM model and train
# you can choose classifier in document:SVC, NuSVC and LinearSV
clf = svm.SVC(gamma=0.001)
# here again, I didn't cut trainning and testing data, I use all data to train
clf.fit(shuffled_features, shuffled_target)
# you can predict here
# need a function compare predict value and real result and calculate the accuracy
clf.predict([array[20][:-1]]) | true |
30aea9a04f83716ae7ffee23c29a05924fd865cc | Python | kunwarmahen/CarND-Advanced-Lane-Lines | /camera.py | UTF-8 | 1,389 | 2.6875 | 3 | [] | no_license | import glob
import numpy as np
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class Camera:
def __init__(self):
self.mtx = None;
self.dist = None;
def calibirateCamera(self):
# Read in all the calibration images
images = glob.glob('camera_cal/calibration*.jpg')
objpoints = []
imgpoints = []
nx = 9
ny = 6
objp = np.zeros((ny*nx, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
for fname in images:
img = mpimg.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx,ny),None)
if ret==True:
imgpoints.append(corners)
objpoints.append(objp)
cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
def undistort(self,img):
return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
def display(self, show=False):
if show == True:
img = cv2.imread('camera_cal/calibration2.jpg')
undst = self.undistort(img)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,20))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(undst)
ax2.set_title('Undistorted Image', fontsize=30) | true |
2c8f153a35a293403a65e174ec2874e599a177e6 | Python | yuma3496/Capstone_Project_3 | /task_manager.py | UTF-8 | 16,390 | 3.078125 | 3 | [] | no_license | from datetime import date
from datetime import datetime as dt
# Helper functions
def read_txt_file(filename):
with open(filename, 'r') as file:
read_lines = file.readlines()
file.close()
return read_lines
def write_txt_file_with_line_number(line_no, values):
lines = read_txt_file('tasks.txt')
lines[line_no] = values
with open('tasks.txt', 'w') as file:
file.writelines(lines)
file.close()
def set_user_dict(user_file):
users_dict = {}
for user in user_file:
info = user.split(', ')
# if the line is not last then it has '\n' at the end so removing it and adding to dictionary
users_dict[info[0]] = info[1].replace("\n", "")
return users_dict
def set_task_list(task_file):
tasks_list = []
count = 0
for task in task_file:
info = task.split(', ')
temp = []
for i in range(len(info) - 1):
temp.append(info[i])
# if the line is not last then it has '\n' at the end so removing it and adding to list
temp.append(info[-1].replace("\n", ""))
temp.append(count)
tasks_list.append(temp)
count += 1
return tasks_list
# it will check which user is logging in
def authenticate(users_dict):
while True:
username = input("Enter your username: ")
if username in users_dict.keys():
while True:
password = input("Enter your password: ")
if password == users_dict[username]:
print("************Welcome to Task Management Application*************")
current_user = username
break
else:
print("***Your Password is not right!***")
break
else:
print("***Your username is not right!***")
return current_user
# Append given text as a new line at the end of file
def append_new_line(file_name, text_to_append):
# Open the file in append & read mode ('a+')
with open(file_name, "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0:
file_object.write("\n")
# Append text at the end of file
file_object.write(text_to_append)
# this helper method is using for editing lines in txt files
def editing_text(mode, values, tasks_list, task, lookup, option):
# below if to check if the line to change is last or not
if task[-1] == tasks_list[-1][-1]:
if mode[0] == 'm': # this m is for if the editing is to mark if the the task is completed or not
text_to_write = "" + task[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + task[
4] + ", " + values[0] + ""
elif mode[0] == 'u': # u is for if the task assigned to user has to change
text_to_write = "" + values[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + task[
4] + ", " + task[5] + ""
elif mode[0] == 'd': # d is for if the due date of task has to change
text_to_write = "" + task[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + values[
0] + ", " + task[5] + ""
elif mode[0] == 'b': # b is for if both the assigned username and due date of task has to change
text_to_write = "" + values[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + values[
1] + ", " + task[5] + ""
write_txt_file_with_line_number(lookup[option], text_to_write)
else: # if line
# print("temp=", temp, "task_list=", tasks_list[dict_lookup[option_first]])
if mode[0] == 'm':
text_to_write = "" + task[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + task[
4] + ", " + values[0] + "\n"
elif mode[0] == 'u':
text_to_write = "" + values[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + task[
4] + ", " + task[5] + "\n"
elif mode[0] == 'd':
text_to_write = "" + task[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + values[
0] + ", " + task[5] + "\n"
elif mode[0] == 'b':
text_to_write = "" + values[0] + ", " + task[1] + ", " + task[2] + ", " + task[3] + ", " + values[
1] + ", " + task[5] + "\n"
if task[-1] == tasks_list[0][-1]:
# print("first")
write_txt_file_with_line_number(0, text_to_write)
else:
# print("mid")
write_txt_file_with_line_number(lookup[option], text_to_write)
return
# this method is to update the lists whenever the menu is opened
def update_data():
# reading user.txt file
file_users = read_txt_file('user.txt')
file_tasks = read_txt_file('tasks.txt')
# storing user info in dictionary
users_data = set_user_dict(file_users)
tasks_data = set_task_list(file_tasks)
return users_data, tasks_data
# below are functions which are required
def reg_user(user_dict):
username = input("Enter new username: ")
while username in user_dict: # checking if entered username already exists
print("User name already exits! Try again")
username = input("Enter new username: ")
password = input("Enter new password: ")
confirm_password = input("Enter the password again to confirm: ")
if password == confirm_password:
# making a list of info so used for entering value
whole_info_to_append = [username, password]
# adding ', ' after every info and calling the function to enter new user in user.txt
append_new_line('user.txt', ', '.join(whole_info_to_append))
return "***User registered!!***"
else:
return "***User failed to register because passwords were not confirmed***"
def add_task():
username_for_task = input("Enter the username to whom you want to assign task: ")
title_of_task = input("Enter the title of task: ")
desc_of_task = input("Enter the description of task: ")
due_date = input("Enter the due date(format is dd-mm-yyyy): ")
today = date.today().strftime("%d-%m-%Y")
completed_task = input("Enter 'yes' if task is completed otherwise 'no': ")
# making a list of info so used for entering value
whole_info_to_append = [username_for_task, title_of_task, desc_of_task, due_date, today, completed_task]
# adding ', ' after every info and calling the function to enter new task in tasks.txt
append_new_line('tasks.txt', ', '.join(whole_info_to_append))
return "***Task added successfully!***"
def view_all(tasks_list):
for task in tasks_list:
print("Assigned to: ", task[0], ", Title: ", task[1], ", Description: ", task[2],
", Due date: ", task[3], ", Assigned Date: ", task[4], ", Completed: ", task[5])
def view_mine(tasks_list, current_user):
count = 1
# to help to get the task number according to main list value, key will be main value from list and value will be
# the number of task showed to user
dict_lookup = {}
for task in tasks_list:
if task[0] == current_user:
dict_lookup[count] = task[-1]
print("Task number: ", count, ", Assigned to: ", task[0], ", Title: ", task[1], ", Description: ", task[2],
", Due date: ", task[3], ", Assigned Date: ", task[4], ", Completed: ", task[5])
# print("dictionary=", dict_lookup)
count += 1
option_first = int(input("Please select the task number or enter -1 to return to menu: "))
if option_first == -1:
return
elif count >= option_first > 0:
option_second = input("Enter 'm' for marking the task or 'e' for editing the task: ")
if option_second == 'm':
mark = input("Please enter 'yes' if task is completed or 'no': ")
temp = tasks_list[dict_lookup[option_first]]
editing_text(mode='m', values=[mark], tasks_list=tasks_list, task=temp, lookup=dict_lookup,
option=option_first)
elif option_second == 'e':
option_third = input("Whether you want to change the username 'u' or due date 'd' or both 'b': ")
if option_third == 'u':
username = input("Please enter the username to shift task: ")
temp = tasks_list[dict_lookup[option_first]]
editing_text(mode='u', values=[username], tasks_list=tasks_list, task=temp, lookup=dict_lookup,
option=option_first)
elif option_third == 'd':
due_date = input("Please enter the due date(dd-mm-yyyy): ")
temp = tasks_list[dict_lookup[option_first]]
editing_text(mode='d', values=[due_date], tasks_list=tasks_list, task=temp, lookup=dict_lookup,
option=option_first)
elif option_third == 'b':
username = input("Please enter the username to shift task: ")
due_date = input("Please enter the due date(dd-mm-yyyy): ")
temp = tasks_list[dict_lookup[option_first]]
editing_text(mode='b', values=[username, due_date], tasks_list=tasks_list, task=temp,
lookup=dict_lookup, option=option_first)
def show_stats():
try:
print("***Task overview***")
with open('task_overview.txt', 'r') as task_overview:
for line in task_overview.readlines():
print(line.replace("\n", ""))
except FileNotFoundError:
print("task_overview.txt is not accessible/available")
finally:
task_overview.close()
try:
print("***User overview***")
with open('user_overview.txt', 'r') as user_overview:
for line in user_overview.readlines():
print(line.replace("\n", ""))
except FileNotFoundError:
print("user_overview.txt is not accessible/available")
finally:
user_overview.close()
return
def generate_reports(users_list, tasks_list):
today = dt.strptime(date.today().strftime("%d-%m-%Y"), "%d-%m-%Y")
with open('task_overview.txt', 'w+') as task_overview:
tasks_generated = str(len(tasks_list))
completed_tasks = 0
uncompleted_tasks = 0
uncompleted_and_overdue = 0
for task in tasks_list:
if task[5] == 'yes':
completed_tasks += 1
else:
uncompleted_tasks += 1
if task[5] == 'no' and dt.strptime(task[3], "%d-%m-%Y") < today:
uncompleted_and_overdue += 1
percentage_uncompleted = round((uncompleted_tasks / len(tasks_list)) * 100, 2)
percentage_overdue = round((uncompleted_and_overdue / len(tasks_list)) * 100, 2)
tasks_text_lines = [f"The total number of tasks that have been generated: {tasks_generated} \n",
f"The total number of completed task: {completed_tasks} \n",
f"The total number of uncompleted tasks: {uncompleted_tasks} \n",
f"The total number of uncompleted tasks and overdue: {uncompleted_and_overdue} \n",
f"The percentage of tasks that are incomplete: {percentage_uncompleted} \n",
f"The percentage of tasks that are overdue: {percentage_overdue}"]
task_overview.writelines(tasks_text_lines)
task_overview.close()
with open('user_overview.txt', 'w+') as user_overview:
registered_users = len(users_list)
users_text_lines = [f"The total number of registered users: {registered_users} \n",
f"The total number of tasks that have been generated: {tasks_generated} \n"]
for key in users_list.keys():
tasks_assigned = 0
tasks_assigned_completed = 0
tasks_assigned_uncompleted = 0
tasks_assigned_uncompleted_overdue = 0
for task in tasks_list:
if task[0] == key:
tasks_assigned += 1
if task[0] == key and task[5] == 'yes':
tasks_assigned_completed += 1
if task[0] == key and task[5] == 'no':
tasks_assigned_uncompleted += 1
if task[0] == key and task[5] == 'no' and dt.strptime(task[3], "%d-%m-%Y") < today:
tasks_assigned_uncompleted_overdue += 1
if tasks_assigned > 0:
percentage_tasks_assigned = round((tasks_assigned / len(tasks_list) * 100), 2)
percentage_tasks_assigned_completed = round((tasks_assigned_completed / tasks_assigned * 100), 2)
percentage_tasks_assigned_uncompleted = round((tasks_assigned_uncompleted / tasks_assigned * 100), 2)
tasks_assigned_uncompleted_overdue = round(
(tasks_assigned_uncompleted_overdue / tasks_assigned * 100), 2)
else:
percentage_tasks_assigned = 0
percentage_tasks_assigned_completed = 0
percentage_tasks_assigned_uncompleted = 0
tasks_assigned_uncompleted_overdue = 0
string_user = f"The user: {key} \n"
string_assigned = f"The percentage of total number of tasks assigned: {percentage_tasks_assigned} \n"
string_completed = f"The percentage of total number of tasks completed: " \
f"{percentage_tasks_assigned_completed} \n"
string_uncompleted = f"The percentage of total number of tasks uncompleted: " \
f"{percentage_tasks_assigned_uncompleted} \n"
string_uncompleted_overdue = f"The percentage of total number of tasks uncompleted and overdue: " \
f"{tasks_assigned_uncompleted_overdue} \n"
string_line = "----" * 5 + "\n"
users_text_lines.append(string_user)
users_text_lines.append(string_assigned)
users_text_lines.append(string_completed)
users_text_lines.append(string_uncompleted)
users_text_lines.append(string_uncompleted_overdue)
users_text_lines.append(string_line)
user_overview.writelines(users_text_lines)
user_overview.close()
return
if __name__ == "__main__":
users, tasks = update_data()
current_logged_in = authenticate(users)
admin_choice = ""
while True:
users, tasks = update_data()
print("""
You are logged in as '""" + current_logged_in + """'
Please select one of the following options:
r - register user
a - add task
va - view all tasks
vm - view my tasks""")
if current_logged_in == 'admin':
print("ds - show statistics")
print("gn - generate reports")
admin_choice = input("""
e - exit
Enter your choice: """)
if admin_choice == 'r':
if current_logged_in == 'admin':
print(reg_user(users))
else:
print("***You cannot register user because you are not admin!!!***")
elif admin_choice == 'a':
print(add_task())
elif admin_choice == 'va':
if len(tasks) > 0:
print("Here are all the tasks: ")
view_all(tasks)
else:
print("***There is no task available!***")
elif admin_choice == 'vm':
print("Here are all your tasks: ")
view_mine(tasks, current_logged_in)
elif admin_choice == 'ds':
if current_logged_in == 'admin':
show_stats()
else:
print("***You cannot perform this command because you are not admin!!!***")
elif admin_choice == 'gn':
if current_logged_in == 'admin':
generate_reports(users, tasks)
else:
print("***You cannot perform this command because you are not admin!!!***")
elif admin_choice == 'e':
break
else:
print("***You have not entered a right command. Please choose from the above commands***")
print("***Ending program GoodBye!!!***")
| true |
5252aaf5a8f241014f454261679ea471482c5356 | Python | DKU-STUDY/Algorithm | /codility_training/lessons.lesson08.Leader.Dominator/sangmandu.py | UTF-8 | 417 | 3.3125 | 3 | [] | no_license | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
pass
B = {}
C = set(A)
for i in C:
B[i] = 0
for i in A:
B[i] += 1
for k, v in B.items():
if (v > len(A) // 2):
return A.index(k)
return -1
print(
solution(3,4,3,2,3,-1,3,3 == 0)
) | true |
b3f4bf866cbd4f1281104ef59689610e6afe576e | Python | Alexzsh/oj | /jianzhi/005替换空格/replaceBlank.py | UTF-8 | 130 | 2.578125 | 3 | [] | no_license | def replaceBlank(strList):
return strList.replace(' ','%20')
if __name__ == '__main__':
print(replaceBlank('a b c d')) | true |
2bbb529a7a917d27f367835cdda2e503deecc38b | Python | ms0695861/password | /pwd.py | UTF-8 | 305 | 3.59375 | 4 | [] | no_license | #Password retry
password = '123456a?'
i = 3 # The max times u can enter pwd.
while i > 0:
i = i - 1
pwd = input('Please enter your password: ')
if pwd == password:
print('login sucess!')
break
elif i == 0:
print('login failed')
else:
print('WRONG!! You have ', i, 'times chances')
| true |
abb8bac380c5eb5889272f15133e949c50f2f7ba | Python | jercas/offer66-leetcode-newcode | /toTheMoon/leetcode_014_LongestCommonPrefix.py | UTF-8 | 3,267 | 4.09375 | 4 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed May 15 15:30:41 2019
@author: jercas
"""
"""
leetcode-14: 最长公共前缀 EASY
'字符串'
编写一个函数来查找字符串数组中的最长公共前缀。
如果不存在公共前缀,返回空字符串 ""。
"""
"""
Thinking:
0.Python特性-字符串排序解法:Python中字符串按照ascII码排序,如sorted(['abb','aba','abac']) -> ['aba','abac','abb'] -> min:aba, max:abb;
在此基础上,只需要比较最大和最小的字符串的公共前缀即可。
1.Python特性-zip+set处理:首先zip(*)做解压处理,将strs中各个str拆分为按位置的单个字符组成的tuple,再利用set去重的效果使得如果该字符为
公共前缀的部分,则该tuple长度必为1,当大于1时说明前缀到此不一致
"""
class Solution(object):
def longestCommonPrefix1(self, strs):
"""
:type strs: List[str]
:rtype: str
时间复杂度:O(s),s为公共前缀长度,24ms beaten 99.90%
空间复杂度:O(1),未使用任何额外空间,11.8MB beaten 31.06%
"""
if not strs:
return ""
s1, s2 = min(strs), max(strs)
for i in range(len(s1)):
# 当遍历到不相等前缀时,返回截止此位置前的公共前缀
if s1[i] != s2[i]:
return s2[:i]
# 当整体为公共前缀时, 即较短的s1整体为s2的组成部分前缀,直接返回s1
return s1
def longestCommonPrefix2(self, strs):
"""
:type strs: List[str]
:rtype: str
时间复杂度:O(s),s为公共前缀长度,24ms beaten 99.90%
空间复杂度:O(1),使用额外数组来保存zip切分后的单个字符数组,12MB beaten 15.72%
"""
if not strs:
return ""
# 经过zip(*) -> [('f', 'f', 'f'), ('l', 'l', 'l'), ('o', 'o', 'i'), ('w', 'w', 'g')]
# 经过map()分别映射到set()去重 -> [{'f'}, {'l'}, {'i', 'o'}, {'g', 'w'}]
ss = list(map(set, zip(*strs)))
res = ""
for i, x in enumerate(ss):
x = list(x)
# 长度大于1,说明此位置的字符不一致,即前缀到此不一样
if len(x) > 1:
break
# 前缀相同,记为公共前缀
res += x[0]
return res
def longestCommonPrefix3(self, strs):
"""
:type strs: List[str]
:rtype: str
时间复杂度:O(s),s为公共前缀长度,28ms beaten 99.52%
空间复杂度:O(1),使用额外数组来保存单个字符数组,11.9MB beaten 29.28%
"""
if not strs:
return ''
length = [len(s) for s in strs]
res = ''
for i in range(min(length)):
# 分别取出strs中每个str的各个位置字符s组成数组
cur = [s[i] for s in strs]
# 同理判断长度,为1时说明未含有重复,为公共前缀
if len(set(cur)) == 1:
res += cur[0]
# 有不用前缀字符时,直接跳出,或返回任意字符串的公共前缀
else:
return strs[0][:i]
# break
return res
if __name__ == "__main__":
Q = [["flower","flow","flight"], ["dog","racecar","car"], ["aca","cba"]]
A = ['fl', '', '']
solution = Solution()
for i in range(3):
if solution.longestCommonPrefix1(Q[i]) == A[i] and solution.longestCommonPrefix2(Q[i]) == A[i] \
and solution.longestCommonPrefix3(Q[i]) == A[i]:
print("The longest common prefix of {0} is '{1}'".format(Q[i], A[i]))
print("AC") | true |
8ac4321fce76a79dde959688c641dff1b52aeff3 | Python | bhargavpanth/Spark-Experiments | /movie_similarity.py | UTF-8 | 3,867 | 2.875 | 3 | [] | no_license | import sys
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType
from pyspark.sql import functions as func
spark = SparkSession.builder.appName('movie_similarities').master('local[*]').getOrCreate()
def movie_name_schema():
return StructType([ \
StructField('movieID', IntegerType(), True), \
StructField('movieTitle', StringType(), True) \
])
def movie_schema():
return StructType([ \
StructField('userID', IntegerType(), True), \
StructField('movieID', IntegerType(), True), \
StructField('rating', IntegerType(), True), \
StructField('timestamp', LongType(), True) \
])
def movie_similarity(movie_pairs):
# Compute xx, xy and yy columns
pair_wise_scores = movie_pairs \
.withColumn('xx', func.col('rating1') * func.col('rating1')) \
.withColumn('yy', func.col('rating2') * func.col('rating2')) \
.withColumn('xy', func.col('rating1') * func.col('rating2'))
calculate_similarity = pair_wise_scores \
.groupBy('movie1', 'movie2') \
.agg( \
func.sum(func.col('xy')).alias('numerator'), \
(func.sqrt(func.sum(func.col('xx'))) * func.sqrt(func.sum(func.col('yy')))).alias('denominator'), \
func.count(func.col('xy')).alias('num_pairs')
)
# Calculate score and select only needed columns (movie1, movie2, score, num_pairs)
result = calculate_similarity \
.withColumn('score', \
func.when(func.col('denominator') != 0, func.col('numerator') / func.col('denominator')) \
.otherwise(0) \
).select('movie1', 'movie2', 'score', 'num_pairs')
return result
def get_movie_name(movie_pairs, movie_id):
return movie_pairs.filter(func.col('movieID') == movie_id) \
.select('movieTitle').collect()[0][0]
def main():
name_schema = movie_name_schema()
# Broadcast dataset of movieID and movieTitle
movie_names = spark.read.option('sep', '|').option('charset', 'ISO-8859-1') \
.schema(name_schema).csv('./ml-100k/u.item')
# Movie data
schema = movie_schema()
movies = spark.read.option('sep', '\t').schema(schema) \
.csv('./ml-100k/u.data')
# Ratings
ratings = movies.select('userId', 'movieId', 'rating')
movie_pairs = ratings.alias('ratings1') \
.join(ratings.alias('ratings2'), (func.col('ratings1.userId') == func.col('ratings2.userId')) \
& (func.col('ratings1.movieId') < func.col('ratings2.movieId'))) \
.select(func.col('ratings1.movieId').alias('movie1'), \
func.col('ratings2.movieId').alias('movie2'), \
func.col('ratings1.rating').alias('rating1'), \
func.col('ratings2.rating').alias('rating2'))
# Compute the cosine similarity between the movies
pairs = movie_similarity(movie_pairs).cache()
if (len(sys.argv) > 1):
score_threshold = 0.97
co_occurrence_threshold = 50.0
movieID = int(sys.argv[1])
# Filter for movies with this sim that are "good" our quality thresholds above
filtered_results = pairs.filter( \
((func.col('movie1') == movieID) | (func.col('movie2') == movieID)) & \
(func.col('score') > score_threshold) & (func.col('numPairs') > co_occurrence_threshold) \
)
# Sort by quality
results = filtered_results.sort(func.col('score').desc()).take(10)
for result in results:
similar_movie_id = result.movie1
if (similar_movie_id == movieID):
similar_movie_id = result.movie2
print(get_movie_name(movie_names, similar_movie_id) + '\tscore: ' \
+ str(result.score) + '\tstrength: ' + str(result.numPairs))
if __name__ == '__main__':
main()
| true |
62f6a2bcfee69fe6b88ced202b0feeae37e9d7e5 | Python | rahlin1004/sc-projects | /Assignment3/breakout.py | UTF-8 | 1,144 | 3.109375 | 3 | [
"MIT"
] | permissive | """
Name: Sarah
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
YOUR DESCRIPTION HERE
"""
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
FRAME_RATE = 1000 / 120 # 120 frames per second.
NUM_LIVES = 3
def main():
"""
this project plays breakout
"""
global NUM_LIVES
graphics = BreakoutGraphics()
score = 0 # the score you have
score2 = 0
delay = 0 # the speed you have
win = 1000
# Add animation loop here!
while NUM_LIVES > 0: # if your lives > 0 you die
if graphics.get_game_state(): # if true ( you are playing the game now )
dx = graphics.get_dx() # get dx
dy = graphics.get_dy() # get dy
NUM_LIVES, score, delay, score2, win = graphics.bounce_ball(dx, dy, NUM_LIVES, score, delay, score2) # bouncing the ball
pause(FRAME_RATE + delay + 20) # the speed of ball bouncing
if score2 == win: # if you break all of the bricks
break
graphics.remove_all(score) # show you win or lose
if __name__ == '__main__':
main()
| true |
eebbdfff16f0dff6423dea44169e46dda8d8097b | Python | Beheroth/Smallworld | /gamestate.py | UTF-8 | 3,032 | 3.359375 | 3 | [] | no_license | from random import randint
#from race import Race, Power
from map import Map
from abc import ABC, abstractmethod
from civilisation import Civilisation, Race, Power
class Strategy(ABC):
@abstractmethod
def pickciv(self, gamestate) -> int:
pass
class User(Strategy):
def __init__(self, player):
self.player = player
def pickciv(self, gamestate):
min_index = 0
min_value = None
for i in range(len(gamestate.civilisations)):
benef = gamestate.civilisations[i]["reward"] - i
if not min_value or benef < min_value:
min_value = benef
min_index = i
return min_index
class Player(object):
def __init__(self, gamestate, strategy: Strategy):
self.civilisations = []
self.score = 5
self.gamestate = gamestate
self.strategy = strategy #AI or User
def addciv(self, civilisation: Civilisation):
civilisation.setplayer(self)
self.civilisations.append(civilisation)
def pickciv(self, index):
self.score -= index
civ = self.gamestate.withdrawciv(index=index)
self.addciv(civ)
def play(self):
if self.civilisations[-1].declined:
index = self.strategy.pickciv(self.gamestate)
self.pickciv(index)
for civ in self.civilisations:
civ.play()
class GameState(object):
def __init__(self, numberofplayers=2):
self.map = Map(path='maps/{}players.json'.format(numberofplayers))
self.racepool = self.loadracepool()
self.powerpool = self.loadpowerpool()
self.civilisations = []
self.shuffleraces()
self.round = 0
self.players = [User(self) for i in range(numberofplayers)]
self.playerturn = 0
def loadracepool(self):
return [Human()]
def loadpowerpool(self):
pass
def shuffleraces(self):
while(len(self.civilisations)<6):
race = self.racepool.pop(randint(0, len(self.racepool)))
power = self.powerpool.pop(randint(0, len(self.racepool)))
self.civilisations.append({"civilisation": Civilisation(race, power, self.map), "reward":0})
def withdrawciv(self, index):
for i in range(index):
self.civilisations[i]["reward"] += 1
civ = self.civilisations.pop(index)
self.shuffleraces()
return civ
def run(self):
while(self.round < 10):
while(self.playerturn < len(self.players)):
self.players[self.playerturn].play()
self.playerturn += 1
self.playerturn = self.playerturn%len(self.players)
self.round += 1
print("The game has reached the end.")
scores = []
for player in self.players:
print("{}: {}".format(player.name, player.getscore()))
scores.append(player.getscore())
print("{} wins the game with {}".format(self.players[scores.index(max(scores))].name, max(scores)))
| true |
73a8dcc4279dec7805adc7d13d55cf7a54d47cf9 | Python | reevesba/computational-intelligence | /projects/project3/src/max_func/individual.py | UTF-8 | 2,994 | 3.46875 | 3 | [] | no_license | ''' Function Maximization Individual
Author: Bradley Reeves, Sam Shissler
Date: 05/11/2021
'''
from numpy import random
from max_func.mf_fitness import MaxFuncFitness
from typing import List, TypeVar
# Custom types
Individual = TypeVar("Individual")
class Individual:
def __init__(self: Individual, length: int, values: List, parent_a: Individual, parent_b: Individual) -> None:
''' Initialize Individual instance
Parameters
----------
self : Individual instance
length : Individual length
values : Individual values
parent_a : Individual's first parent
parent_b : Individual's second parent
Returns
-------
None
'''
self.length = length
if values: self.values = values
else: self.values = self.__random_values()
self.parent_a = parent_a
self.parent_b = parent_b
self.fitness_function = MaxFuncFitness()
self.fitness, self.result = self.fitness_function.fitness(self.values)
def __random_values(self: Individual) -> List:
''' Generate random Individual values
Parameters
----------
self : Individual instance
Returns
-------
Random Individual values
'''
return [random.randint(3, 11), random.randint(4, 9)]
def get_values(self: Individual) -> List:
''' Return Individual values
Parameters
----------
self : Individual instance
Returns
-------
Individual values
'''
return self.values
def get_parent_a(self: Individual) -> Individual:
''' Return Individual's first parent
Parameters
----------
self : Individual instance
Returns
-------
Individual's first parent
'''
return self.parent_a
def get_parent_b(self: Individual) -> Individual:
''' Return Individual's second parent
Parameters
----------
self : Individual instance
Returns
-------
Individual's second parent
'''
return self.parent_b
def get_fitness(self: Individual) -> float:
''' Return Individual's fitness score
Parameters
----------
self : Individual instance
Returns
-------
Individual's fitness
'''
return self.fitness
def get_result(self: Individual) -> float:
''' Return Individual's function result
Parameters
----------
self : Individual instance
Returns
-------
Individual's function result
'''
return self.result | true |
2b8473f61517cc557a6479cc06f111cef4658c8b | Python | CastleWhite/LeetCodeProblems | /1574.py | UTF-8 | 442 | 3.09375 | 3 | [] | no_license | class Solution:
def findLengthOfShortestSubarray(self, arr: List[int]) -> int:
b = []
n = len(arr)
for i in range(1, n):
if arr[i] < arr[i-1]:
b.append(i)
if not b: return 0
res = b[-1]
j = n-1
for i in range(b[0]-1, -1, -1):
while arr[j] >= arr[i] and j >= b[-1]:
j -= 1
res = min(res, j - i)
return res
| true |
ea636949ffe170b7616498aa901ea87c483ffc4d | Python | VINCENT101132/vincent1 | /20210710/homework/1.py | UTF-8 | 376 | 3.734375 | 4 | [] | no_license | """
Topic:輸入分子及分母,確認是否等於 350/450:
Show:Please input numerator"
Input1:70
show:Please input Denominator:
Input2:90
Output:True
Input1:6
Input2:9
Output:False
"""
numerator=int(input('please input numerator'))
denominator=int(input("please input denominator"))
if(numerator/denominator)==(350/450):
print('True')
else:
print("False") | true |
1982a83703059c0173dc6dfe6e53439242f9e4a5 | Python | stanyu2013/Team-Zero---Data-Science-Futures-Hackathon | /gdeltDates.py | UTF-8 | 998 | 2.84375 | 3 | [] | no_license |
import csv
import gdelt
import json
import re
# Version 2 queries
gd2 = gdelt.gdelt(version=2)
datepat=re.compile("201[5-7]-[0-9-]+$")
with open('extracted_dates.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=",")
for row in reader:
name=row[0]
date=row[1]
if datepat.match(date):
print("Video: " + name)
print("Date: " + date)
print("Entities/Actors for that date:")
results = gd2.Search([date],table='events',coverage=True)
res = results['Actor1Name'].value_counts()
list1 = res.index.tolist()
print list1[:30]
print("")
df = results.dropna(subset=['Actor1Name'])
df2 = df.dropna(subset=['Actor2Name'])
res = results['Actor2Name'].value_counts()
list2 = res.index.tolist()
print list2[:30]
print("--------------------------------------------------------------------")
| true |
2671d0a659ec945ee58532ae67c8640ec5901bf9 | Python | juanpabloalfonzo/PHY224 | /Radius of the Earth/GravRadius.py | UTF-8 | 2,572 | 3.515625 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def radius(floor, slope, intercept): #Define the curve fit with parameters needed to construct a linear trend
return slope*floor + intercept
def chi(y_i,y_x,sigmai): #Where y_i are dependent variables, y_x is the line of best fit y values, and sigmai is the error of the the y_i measurement
a=((y_i-y_x)/sigmai)**2
return((1/12)*(np.sum(a))) #14 trials and 2 parameters define the 1/v in this case v=12
#Importing Data
Data=np.loadtxt("FloorData.txt")
Floors=Data[:,0]
GravRaw1=Data[:,1]
GravRaw2=Data[:,2]
RawError=Data[:,3]
#Conversions
mGalGrav1=0.10155*GravRaw1 #Converts from Div to mGal
Grav1=mGalGrav1/100000
mGalGrav2=0.10155*GravRaw2
Grav2=mGalGrav2/100000
mGalError=0.10155*RawError
Error=mGalError/100000
#Curve Fitting
p_optR1, p_covR1=curve_fit(radius, Floors , Grav1 , p0=[0,0] ,sigma=Error , absolute_sigma=True) #Call linear best fit function
p_optR2, p_covR2=curve_fit(radius, Floors , Grav2 , p0=[0,0] ,sigma=Error , absolute_sigma=True) #Call linear best fit function
#Plotting Data
plt.title("Day 1 Measurements")
plt.errorbar(Floors,Grav1, yerr=Error, fmt='m.', label='Experimental Measurements (Day 1)')
plt.plot(Floors,radius(Floors,p_optR1[0],p_optR1[1]),label='Line of Best Fit')
plt.xlabel("Floor")
plt.ylabel("Measurement Of Gravity")
plt.legend(loc='upper right')
plt.savefig('Day 1')
plt.figure()
plt.title("Day 2 Measurements")
plt.errorbar(Floors,Grav2, yerr=Error, fmt='m.', label='Experimental Measurements (Day 2)')
plt.plot(Floors,radius(Floors,p_optR2[0],p_optR2[1]),label='Line of Best Fit')
plt.xlabel("Floor")
plt.ylabel("Measurement Of Gravity")
plt.legend(loc='upper right')
plt.savefig('Day 2')
#Reading values of Radius of the Earth
R=6371*1000
R1=-2*(3.95)*(9.81/p_optR1[0])
R1U=np.abs(((np.sqrt(p_covR1[0,0])/p_optR1[0])*100)*R1) #unceratanty calculation
print('The Radius of the Earth from the first days measurement is:',R1, '±', R1U)
R2=-2*(3.95)*(9.81/p_optR2[0])
R2U=np.abs(((np.sqrt(p_covR2[0,0])/p_optR2[0])*100)*R2) #unceratanty calculation
print('The Radius of the Earth from the first days measurement is:',R2, '±', R2U)
#Chi Squared
print("The value for how good the Day 1 data has been fitted is: ", chi(Grav1,radius(Floors, p_optR1[0], p_optR1[1]),Error)) #chi squared for day 1
print("The value for how good the Day 2 data has been fitted is: ", chi(Grav2,radius(Floors, p_optR2[0], p_optR2[1]),Error)) #chi squared for day 2
| true |
b0b7a5823a725a14dea03f02f6061d19003203d5 | Python | 478855960/Plants_V.S._Zombies | /entity/bullet.py | UTF-8 | 608 | 3.34375 | 3 | [] | no_license | import pygame
class Bullet(object):
def __init__(self, screen, image, peaX, peaY, type):
self.screen = screen
self.image = pygame.image.load(image)
# x,y
self.x = peaX
self.y = peaY
self.width = self.image.get_rect()[2]
self.height = self.image.get_rect()[3]
# 子弹种类, 0代表豌豆(不穿透) 1代表仙人掌刺(穿透)
self.type = type
def outOfBounds(self):
return self.x > 1400
def step(self):
self.x += 3
def blitme(self):
self.screen.blit(self.image, (self.x, self.y))
| true |
15257f0d612c2790921c31992dd72e54a80baaef | Python | FarbrorGao/point_cloud_compression_test | /draco_test/compare.py | UTF-8 | 1,291 | 3.5 | 4 | [] | no_license | import csv
def compare(input, output):
input.sort()
output.sort()
# print(input)
# print(output)
print("# of input:", len(input))
print("# of output:", len(output))
if(len(input) != len(output)):
print('The lengths of input and output are not equal')
exit(0)
diff = []
count = 0
for i in range(0, len(input)):
diff.append(output[i] - input[i])
if(diff[-1] < 0.001 and diff[-1] > -0.001):
# if(diff[-1] > 0.01):
count += 1
# if(diff[-1] == 0.0):
# print(str(input[i]) + ' ' + str(output[i]))
print("equal:", diff.count(0.0))
print("(-0.001, 0.001):", count)
# print(diff)
def load(filename):
x = []
y = []
z = []
intensity = []
with open(filename, 'r') as f:
f_csv = csv.reader(f, delimiter=' ')
for row in f_csv:
x.append(float(format(float(row[0]), '.3f')))
y.append(float(format(float(row[1]), '.3f')))
z.append(float(format(float(row[2]), '.3f')))
intensity.append(float(row[3]))
return x, y, z, intensity
if __name__ == '__main__':
in_x, in_y, in_z, in_intensity = load('in.xyz')
out_x, out_y, out_z, out_intensity = load('out.xyz')
print('compare x:')
compare(in_x, out_x)
print('compare y:')
compare(in_y, out_y)
print('compare z:')
compare(in_z, out_z)
print('compare intensity:')
compare(in_intensity, out_intensity) | true |
4d99d1cea7bdd19b628d98f358898b3f0ace32dd | Python | Jaafoub/Backtest-Framework | /asset_variables.py | UTF-8 | 759 | 3.1875 | 3 | [] | no_license | import pandas as pd
import numpy as np
from yahoo_data import *
def compute_daily_return( df ):
ret = df / df.shift( 1 ) -1
return( ret )
def compute_daily_return_yahoo( ticker, start_date, end_date ):
data = yahoo_stock_data_ticker( ticker, start_date, end_date )
close = data['Close']
return( compute_daily_return( close ) )
def compute_annualized_volatility_yahoo( ticker, start_date, end_date, window_in_years = 1 ):
ret = compute_daily_return_yahoo( ticker, start_date, end_date )
return( compute_annualized_vol( ret, window_in_years ) )
def compute_annualized_vol(ret, window_in_years):
N = window_in_years * 252
vol = ret.rolling( N ).std()
annualized_vol = np.sqrt( 252 ) * vol
return( annualized_vol )
| true |
17721abd6ab291a2938cdbaf9c682bc5bbe596d2 | Python | shubhamgupta16/Python_Programs | /51_greatest.py | UTF-8 | 373 | 3.953125 | 4 | [] | no_license | # find greatest number between three number
def greatest(a,b,c):
if a > b and a > c:
return a
elif b > a and b > c:
return b
else:
return c
num1 = int(input("enter first number: "))
num2 = int(input("enter second number: "))
num3 = int(input("enter third number: "))
print(f"greatest number is {greatest(num1, num2, num3)}") | true |
3679568b5cb8b482e4fa5ec290735d921e7bb05c | Python | SorianoJuan/ProgConcurrente-UNC | /src/test_t_invariantes.py | UTF-8 | 1,147 | 2.953125 | 3 | [] | no_license | import re
def checkTInvariant(f, inv):
exp = re.compile('(?<=Transicion disparada: ).+')
aux = list()
for line in f:
transition = exp.search(line).group(0)
if(transition in inv):
aux.append(inv[transition])
inv_size = len(inv)
list_size = len(aux)
status = True
expected = list(range(inv_size))
for i in range(0, list_size-(list_size%inv_size), inv_size):
status &= aux[i:i+inv_size] == expected
return status
t_inv_tren = {
'BAJABAR_A':0,
'LEVANTABAR_A':1,
'LLEGATREN_B':2,
'SALETREN_B':3,
'LLEGATREN_C':4,
'SALETREN_C':5,
'BAJARBAR_C':6,
'LEVANTARBAR_C':7,
'LLEGATREN_D':8,
'SALETREN_D':9,
'LLEGATREN_A':10,
'SALETREN_A':11
}
t_inv_autoa = {
'LLEGA_AUTO_A':0,
'AUTO_CRUZA_A':1,
'AUTO_SE_VA_A':2
}
t_inv_autob = {
'LLEGA_AUTO_B':0,
'AUTO_CRUZA_B':1,
'AUTO_SE_VA_B':2
}
inv = t_inv_tren
f = open("log.txt", "r")
print("T-Invariante de Tren:")
print(checkTInvariant(f, t_inv_tren))
print("T-Invariante de Auto-A")
print(checkTInvariant(f, t_inv_autoa))
print("T-Invariante de Auto-B")
print(checkTInvariant(f, t_inv_autob))
f.close()
| true |
894dee8cf8f77dd810b4a4528a424ed3f291cfc9 | Python | Winnerabalogu/py_demo | /derrick_toutorial/file.py | UTF-8 | 533 | 3.296875 | 3 | [] | no_license | # import sys
#find the index of a value
# print(name.find("weda"))
# print(name.replace("weda", "weather"))
#create / open a file
# text_file = open("test.txt", "wb")
# text_file.write(bytes("ill get ther soon\n" 'UTF-8'))
# text_in_file = text_file.read()
# print(text_in_file)
name = ('david coldshot\n''ayo ogunbiyi\n''celestine sniper\n')
names = input(':')
with open('names.txt', 'r') as students:
val = students.readlines()
for i in val:
if names in i:
full_name = i
print(full_name.strip('\n'))
| true |
f98a3dd1a67185c6174dcfa4ad072a3eb2338e1c | Python | devm1023/GeekTalentDB | /src/nuts_to_geojson.py | UTF-8 | 2,270 | 2.734375 | 3 | [] | no_license | '''
Converts NUTS data to GeoJSON for insights
'''
import json
import csv
import shapely.geometry as geo
import conf
from nuts import NutsRegions
countries = [
'AT', 'BE', 'BG', 'CH', 'CY', 'CZ', 'DE', 'DK', 'EE', 'EL', 'ES', 'FI', 'FR', 'HR',
'HU', 'IE', 'IS', 'IT', 'LI', 'LT', 'LU', 'LV', 'ME', 'MK', 'MT', 'NL', 'NO', 'PL',
'PT', 'RO', 'SE', 'SI', 'SK', 'TR', 'UK'
]
nutsnames = {}
for level in range(1, 4):
with open('nutsregions/nuts{}.csv'.format(level), 'r', newline='') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
if len(row) != 2:
continue
nutsnames[row[0]] = row[1]
for country in countries:
print(country)
nuts = NutsRegions(conf.NUTS_DATA, countries=[country])
country = country.lower()
if country == 'uk':
country = 'gb'
if country == 'el':
country = 'gr'
for level in range(1, 4):
features = []
for id, (nutsid, shape) in enumerate(nuts.level(level)):
geometry = geo.mapping(shape)
if nutsid not in nutsnames:
print('Description missing for NUTS ID: '+nutsid)
features.append({'type' : 'Feature',
'id' : id,
'properties' : {
'nutsId' : nutsid,
#'count' : 0,
'name' : nutsnames.get(nutsid, '')
},
'geometry' : geometry})
geojson = {'type' : 'FeatureCollection',
'features' : features}
with open('nutsregions/{}_nuts{}.json'.format(country.lower(), level), 'w') as jsonfile:
json.dump(geojson, jsonfile)
nuts = NutsRegions(conf.NUTS_DATA, countries=countries)
bounds = {}
for id, (nutsid, shape) in enumerate(nuts.level(0)):
geometry = geo.mapping(shape)
country = nutsid.lower()
if country == 'uk':
country = 'gb'
if country == 'el':
country = 'gr'
bounds[country] = [[shape.bounds[1], shape.bounds[0]], [shape.bounds[3], shape.bounds[2]]]
with open('nutsregions/country_bounds.json', 'w') as jsonfile:
json.dump(bounds, jsonfile, indent=4)
| true |
925f259058956b0f7a86eff86d2737a0c312377d | Python | RollingBear/pyQT | /darw/drawingText.py | UTF-8 | 929 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# 2019/3/12 0012 上午 10:35
__author__ = 'RollingBear'
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QFont
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.text = 'Лев Николаевич Толстой\nАнна Каренина'
self.setGeometry(300, 300, 280, 170)
self.setWindowTitle('Drawing text')
self.show()
def paintEvent(self, QPaintEvent):
qp = QPainter()
qp.begin(self)
self.drawText(QPaintEvent, qp)
qp.end()
def drawText(self, event, qp):
qp.setPen(QColor(168, 34, 3))
qp.setFont(QFont('Decorative', 10))
qp.drawText(event.rect(), Qt.AlignCenter, self.text)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| true |
f5b5fcb38a402fdbcff3b18d6dc7d979884f5cb6 | Python | tousifeshan/WebScrapping | /search_process_names_in_shouldiremoveitdotcom.py | UTF-8 | 3,854 | 2.6875 | 3 | [] | no_license | __author__ = 'tousif'
import requests
import json
from time import sleep
import urllib
from lxml import html
import csv
import unicodedata
inputfile=open('process_list.csv', 'rt')
# Output Files
outputfile=open('complete_process_list_with_number_of_results.csv','wt')
oneresultfile= open('process_list_with_one_result.csv','wt')
noresultfile= open('process_list_output_with_noresult.csv','wt')
mtoneresultfile= open('process_list_output_with_morethanone_result.csv','wt')
try:
reader=csv.DictReader(inputfile)
# Fieldnames for different output files
fieldnames=['process_name','no_of_results']
fieldnames_results=['process_name','no_of_results','Title' ,'url']
fieldnames_more_results=['index','process_name','estimated_results_count','Title' ,'url']
writer=csv.DictWriter(outputfile, fieldnames=fieldnames)
writer_for_one= csv.DictWriter(oneresultfile, fieldnames=fieldnames_results)
writer_for_no= csv.DictWriter(noresultfile, fieldnames=fieldnames)
writer_for_mtone= csv.DictWriter(mtoneresultfile, fieldnames=fieldnames_more_results)
writer.writeheader()
writer_for_mtone.writeheader()
writer_for_no.writeheader()
writer_for_one.writeheader()
# counters
total_1=0
total_mt1=0
total_0=0
total_found=0
for i,row in enumerate(reader):
sleep(120) # Waiting time between each call. Otherwise Google will Block you
# Search Query
query = '"'+row["executable_value"]+'" site:shouldiremoveit.com'
#NB. add 'start=3' to the query string to move to later results
r = requests.get('http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=' + query)
# JSON object
theJson = r.content
theObject = json.loads(theJson)
# results with no results
if len(theObject['responseData']['results'])==0:
total_found=0
total_0=total_0+1
writer_for_no.writerow({'process_name': row['executable_value'], 'no_of_results':total_found})
else:
# Print it all out
total_found= len(theObject['responseData']['results'])
# processes with one results
if total_found==1:
total_1=total_1+1
for index,result in enumerate(theObject['responseData']['results']):
# print str(index+1) + ") " + result['titleNoFormatting']
# print result['url']
writer_for_one.writerow({'process_name': row['executable_value'], 'no_of_results':total_found,
'Title': result['titleNoFormatting'], 'url': result['url']})
else:
#processes with multiple results
total_mt1=total_mt1+1
total_found=theObject['responseData']['cursor']['estimatedResultCount']
for index,result in enumerate(theObject['responseData']['results']):
title=result['titleNoFormatting']
writer_for_mtone.writerow({'index': i, 'process_name': row['executable_value'], 'estimated_results_count':theObject['responseData']['cursor']['estimatedResultCount'],
'Title': title.encode('utf8'), 'url': result['url']})
print str(i)+ ": file:"+ row['executable_value']+\
", Results: "+str(total_found)+", total 0:"+ str(total_0)+ ", total_1: "+ str(total_1)+ ", MT: "+ str(total_mt1)
writer.writerow({'process_name': row['executable_value'], 'no_of_results':total_found})
print "Total Unknown: "+str(total_0)+" , Total Known: "+ str(total_1) + "Total confusing: "+ str(total_mt1)
finally:
inputfile.close()
outputfile.close()
oneresultfile.close()
noresultfile.close()
mtoneresultfile.close()
| true |
d6f6fc462eb274b4c2ef2dd23f19185c4b1a853f | Python | chdoig/Smashfast | /smashfast.py | UTF-8 | 3,810 | 3.546875 | 4 | [] | no_license | from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enter()."
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your mom would be proud...if she were smarter.",
"Such a loser.",
"I have a small puppy that's better at this."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class Opening_Scene(Scene):
def enter(self):
print "opening scene text here"
return 'scene1'
class Scene_1(Scene):
def enter(self):
print "description of scene 1"
action = raw_input("> ")
if action == "option_a":
print "outcome of option_a"
return 'scene2'
elif action == "option_b":
print "outcome of option_b"
return 'death'
elif action == "option_c":
print "outcome of option c"
return 'death'
else:
print "DOES NOT COMPUTE!"
return 'scene1'
class Scene_2(Scene):
def enter(self):
print "description of scene 2"
action = raw_input("> ")
if action == "option_a":
print "outcome of option_a"
return 'scene3'
elif action == "option_b":
print "outcome of option_b"
return 'death'
elif action == "option_c":
print "outcome of option c"
return 'death'
else:
print "DOES NOT COMPUTE!"
return 'scene2'
class Scene_3(Scene):
def enter(self):
print "description of scene 3"
action = raw_input("> ")
if action == "option_a":
print "outcome of option_a"
return 'scene4'
elif action == "option_b":
print "outcome of option_b"
return 'death'
elif action == "option_c":
print "outcome of option c"
return 'death'
else:
print "DOES NOT COMPUTE!"
return 'scene3'
class Scene_4(Scene):
def enter(self):
print "Scene 4 description"
action = raw_input("> ")
if action == "option_a":
print "Option A description"
return 'finalscene'
elif action == "option_b":
print "Option B description"
return 'death'
elif action == "option_c":
print "Option C description"
return 'death'
else:
print "DOES NOT COMPUTE!"
return 'scene4'
class Final_Scene(Scene):
def enter(self):
print "You won! Good job."
exit()
class Map(object):
scenes = {
'openingscene': Opening_Scene(),
'scene1': Scene_1(),
'scene2': Scene_2(),
'scene3': Scene_3(),
'scene4': Scene_4(),
'finalscene': Final_Scene(),
'death': Death(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('openingscene')
a_game = Engine(a_map)
a_game.play()
| true |
6024720ad09639256d375f4ac02072ffadbca39d | Python | yipenglai/Chinese-Word-Representation | /eval.py | UTF-8 | 2,528 | 3.171875 | 3 | [] | no_license | """Evaluate learned word representation on word similarity task"""
import sys
import os
import logging
import argparse
import numpy as np
import pandas as pd
from fasttext import load_model
from scipy.stats import spearmanr
from tqdm import tqdm
from convert_subchar import convert_graphical as graphical
from convert_subchar import convert_wubi as wubi
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Evaluate word representations')
parser.add_argument('--input', type=str, default='wordsim-296.txt', help='Evaluation data path')
parser.add_argument('--model_path', type=str, default='model.bin', help='Trained model path')
parser.add_argument('--subword', type=str, default='character', help='Convert evaluation to subcharacters if needed {character, graphical, wubi}')
parser.add_argument('--output', type=str, default='result.txt',help='Path to save the words pairs and their predicted cosine similarity scores')
args = parser.parse_args()
logging.info('Start evaluation for {} data..'.format(args.subword))
model = load_model(args.model_path)
eval_data = open(args.input, 'r')
human_score = []
result_list = [] # Store (w1, w2, similarity) for error analysis
for line in tqdm(eval_data):
word1, word2, human = line.split()
if args.subword == 'wubi':
w1, w2 = wubi(word1), wubi(word2)
elif args.subword == 'graphical':
w1, w2 = graphical(word1), graphical(word2)
elif args.subword == 'character':
w1, w2 = word1, word2
else:
logging.error('Please enter the correct subword component {character, graphical, wubi}')
break
# Compute cosine similarity
emb1, emb2 = model[w1], model[w2]
pred = np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2))
human_score.append(human)
result_list.append((word1, word2, pred))
# Save predicted scores
result = pd.DataFrame(result_list, columns =['word_1', 'word_2', 'pred_score'])
result.to_csv(args.output, sep='\t')
# Compute Spearman correlation coefficients for evaluation
pred_score = [i[-1] for i in result_list]
corr = spearmanr(human_score, pred_score)
logging.info('Finish evaluation on dataset {}. Score = {}'.format(args.input,corr))
eval_data.close()
logging.info('Done')
if __name__ == '__main__':
main()
| true |
6eb75a005124fbda42e0ffab2fba61aca8aac1d4 | Python | washingtoncandeia/PyCrashCourse | /09_Classes/fvm9.13.py | UTF-8 | 770 | 4.15625 | 4 | [] | no_license | ##-------------------------------
# Cap.9 - Classes
# Python Crash Course
# Autor: Washington Candeia
# Faça você mesmo, p.251
# 9.13 - Reescrevendo o programa com OrderedDict
##-------------------------------
from collections import OrderedDict
glossario = OrderedDict()
glossario['instanciar'] = 'atribuir comportamento e características de uma classe a um objeto'
glossario['oop'] = 'programação orientada a objetos'
glossario['dicionário'] = 'estrutura de dados chave-valor em python'
glossario['módulo'] = 'arquivo contendo funções e classes'
glossario['classe'] = 'oop; estrutura que guarda comportamentos e características de um objeto real'
for k, v in glossario.items():
print('Palavra e sifnificado: \n'
+ k.title() + ': ' + v.title() + '\n')
| true |
e9126d4fdef8e3ccdd79d74c283407b1dcf0fa09 | Python | ahmed789-dev/capstone-project | /backend/test_app.py | UTF-8 | 5,804 | 2.609375 | 3 | [] | no_license | import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from app import create_app
from models import setup_db, Movies, Actors
class CapstonProjectTestCase(unittest.TestCase):
def setUp(self):
# Define test variables and initialize app.
self.app = create_app()
self.client = self.app.test_client
self.database_name = "casting"
self.database_path = "postgres://{}/{}".format('postgres:1234@localhost:5432', self.database_name)
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
self.new_actor = Actors(
name="new actor",
age="15",
gender="male"
)
self.new_movie = Movies(
title="new movie",
release_date="1-1-2020"
)
def tearDown(self):
# Executed after reach test
pass
def test_getting_actors(self):
res = self.client().get('/actors')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
def test_getting_movies(self):
res = self.client().get('/movies')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
def test_add_new_actor(self):
newActor = {
"name":"new actor",
"age":"15",
"gender":"male"
}
res = self.client().post('/actors', json=newActor)
data = json.loads(res.data)
self.assertEqual(data['success'], True)
self.assertEqual(res.status_code, 200)
def test_add_new_movie(self):
newMovie = {
"title": "new movie",
"release_date": "1-1-2020"
}
res = self.client().post('/movies', json=newMovie)
data = json.loads(res.data)
self.assertEqual(data['success'], True)
self.assertEqual(res.status_code, 200)
def test_add_new_actor_422(self):
newActor = {
"name":"new actor",
"gender":"male"
}
res = self.client().post('/actors', json=newActor)
data = json.loads(res.data)
self.assertEqual(data['success'], False)
self.assertEqual(res.status_code, 422)
def test_add_new_movie_422(self):
newMovie = {
"title": "new movie"
}
res = self.client().post('/movies', json=newMovie)
data = json.loads(res.data)
self.assertEqual(data['success'], False)
self.assertEqual(res.status_code, 422)
def test_delete_actor(self):
newActor = Actors(name="new actor", age="15", gender="male")
newActor.insert()
actor_id = newActor.id
res = self.client().delete(f'/actors/{actor_id}')
data = json.loads(res.data)
self.assertEqual(data['success'], True)
self.assertEqual(data['actor'], actor_id)
self.assertEqual(res.status_code, 200)
def test_delete_movie(self):
newMovie = Movies(title="new movie", release_date="1-1-2020")
newMovie.insert()
movie_id = newMovie.id
res = self.client().delete(f'/movies/{movie_id}')
data = json.loads(res.data)
self.assertEqual(data['success'], True)
self.assertEqual(data['movie'], movie_id)
self.assertEqual(res.status_code, 200)
def test_delete_actor_404(self):
res = self.client().delete('/actors/id')
data = json.loads(res.data)
self.assertEqual(data['success'], False)
self.assertEqual(res.status_code, 404)
def test_delete_movie_404(self):
res = self.client().delete('/movies/id')
data = json.loads(res.data)
self.assertEqual(data['success'], False)
self.assertEqual(res.status_code, 404)
def test_update_actor(self):
newActor = Actors(name="new actor", age="15", gender="male")
newActor.insert()
actor_id = newActor.id
actor_patch = {
"name": "updated name"
}
res = self.client().patch(f'/actors/{actor_id}', json=actor_patch)
data = json.loads(res.data)
self.assertEqual(data['success'], True)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['actor']['name'], actor_patch['name'])
def test_update_movie(self):
newMovie = Movies(title="new title", release_date="1-1-2020")
newMovie.insert()
movie_id = newMovie.id
movie_patch = {
"title": "updated title"
}
res = self.client().patch(f'/movies/{movie_id}', json=movie_patch)
data = json.loads(res.data)
self.assertEqual(data['success'], True)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['movie']['title'], movie_patch['title'])
def test_update_actor_404(self):
actor_patch = {
"name": "updated name"
}
res = self.client().patch('/actors/id', json=actor_patch)
data = json.loads(res.data)
self.assertEqual(data['success'], False)
self.assertEqual(res.status_code, 404)
def test_update_movie_404(self):
movie_patch = {
"title": "updated title"
}
res = self.client().patch('/movies/id', json=movie_patch)
data = json.loads(res.data)
self.assertEqual(data['success'], False)
self.assertEqual(res.status_code, 404)
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
| true |
7b393b1f5808424f965e1fd65e943a9ecd1707a6 | Python | diana-md/Data-Analytics-Bootcamp-Projects | /10.WebScraping/scrape_mars.py | UTF-8 | 2,840 | 2.5625 | 3 | [] | no_license | import pandas as pd
import requests
from bs4 import BeautifulSoup as bs
from splinter import Browser
import flask
def scrape():
scrape_dict = {}
# Open Browser
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser(
'chrome', **executable_path, headless=False)
# NASA Mars News
nasa_url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
browser.visit(nasa_url)
soup_nasa = bs(browser.html)
articles_dict = {"Title": [], "Paragraph": []}
articles = soup_nasa.find_all("li", class_="slide")
for article in articles:
articles_dict["Title"].append(
article.find('div', class_="content_title").text)
articles_dict["Paragraph"].append(article.find(
'div', class_="article_teaser_body").text)
scrape_dict["Mars_News"] = articles_dict
# JPL Mars Space Images - Featured Image
jpl_img_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(jpl_img_url)
soup_jpl = bs(browser.html)
start_url = 'https://www.jpl.nasa.gov'
featured_image_url = start_url + \
soup_jpl.find("a", class_="button")["data-fancybox-href"]
scrape_dict["Featured_Image"] = featured_image_url
# Mars Weather
weather_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(weather_url)
soup_weather = bs(browser.html)
tweet = soup_weather.find('div', class_='content').find('p')
unwanted = tweet.find('a')
unwanted.extract()
scrape_dict["Weather"] = tweet.text
# Mars Facts
facts_url = "https://space-facts.com/mars/"
facts_df = pd.read_html(facts_url)[0]
facts_df = facts_df.rename(columns={0: "description", 1: "value"})
facts_df = facts_df.set_index("description")
scrape_dict["Mars_Facts"] = facts_df.to_html()
# Mars Hemispheres
astrogeology_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(astrogeology_url)
soup_astro = bs(browser.html)
imgs = soup_astro.find_all("div", class_="item")
hemisphere_hrefs = []
for div in imgs:
hemisphere_hrefs.append(div.find('a')['href'])
hemisphere_image_urls = []
start_url = "https://astrogeology.usgs.gov"
for href in hemisphere_hrefs:
url = start_url + href
browser.visit(url)
soup_hemisphere = bs(browser.html)
hemisphere = {"title": soup_hemisphere.find('h2').text.strip(" Enhanced"),
"img_url": soup_hemisphere.find('div', class_="downloads").find("a")["href"]}
hemisphere_image_urls.append(hemisphere)
scrape_dict["Hemisphere"] = hemisphere_image_urls
browser.quit()
return scrape_dict
| true |
117d4b44f04c7adf3e5ce3f22324adf7a967a378 | Python | fastso/learning-python | /atcoder/contest/solved/abc153_e.py | UTF-8 | 342 | 2.703125 | 3 | [] | no_license | h, n = map(int, input().split())
ab = [list(map(int, input().split())) for _ in range(n)]
a = [_[0] for _ in ab]
inf = float('inf')
dp = [inf] * (h + max(a) + 1)
for i in range(1, len(dp)):
for x, y in ab:
if i - x > 0:
dp[i] = min(dp[i], dp[i - x] + y)
else:
dp[i] = min(dp[i], y)
print(dp[h])
| true |
92f8b44e102a6adc1889c05892804f657ca4fe25 | Python | alon-albalak/TLiDB | /dataset_preprocessing/DailyDialog/generate_instance_ids.py | UTF-8 | 3,549 | 2.515625 | 3 | [
"MIT"
] | permissive | import json
TASK_TYPE_MAP={
"emotion_recognition": "utt_level_classification",
"dialogue_act_classification": "utt_level_classification",
"topic_classification": "dial_level_classification",
"causal_emotion_span_extraction": "span_extraction",
"causal_emotion_entailment": "causal_emotion_entailment",
"dialogue_nli": "dialogue_nli",
"dialogue_reasoning_span_extraction": "dialogue_reasoning_span_extraction",
"dialogue_reasoning_multiple_choice_span_selection": "multiple_choice",
"dialogue_reasoning_commonsense_relation_prediction": "relation_extraction",
"adversarial_response_selection": "adversarial_response_selection"
}
def generate_instance_ids(dataset):
for datum in dataset['data']:
for task in datum['dialogue_metadata']:
if task == "original_data_partition":
continue
task_type = TASK_TYPE_MAP[task]
if task_type == "utt_level_classification":
for turn in datum['dialogue']:
if task in turn:
instance_id = f"{datum['dialogue_id']}_t{turn['turn_id']}"
turn[task] = {"label": turn[task], "instance_id": instance_id}
elif task_type == "dial_level_classification":
instance_id = datum['dialogue_id']
datum[task] = {"label": datum[task], "instance_id": instance_id}
elif task_type == "span_extraction":
for qas in datum[task]:
for qa in qas['qas']:
qa['instance_id'] = qa['id']
del qa['id']
elif task_type == "causal_emotion_entailment":
for i, sample in enumerate(datum[task]):
instance_id = f"{datum['dialogue_id']}_cee{i}"
sample['instance_id'] = instance_id
elif task_type == "dialogue_nli":
for i, sample in enumerate(datum[task]):
instance_id = f"{datum['dialogue_id']}_dnli{i}"
sample['instance_id'] = instance_id
elif task_type == "dialogue_reasoning_span_extraction":
for i, qas in enumerate(datum[task]):
for j, qa in enumerate(qas['qas']):
instance_id = f"{datum['dialogue_id']}_context{i}_qa{j}"
qa['instance_id'] = instance_id
elif task_type == "multiple_choice":
for i, q in enumerate(datum[task]['mcqs']):
instance_id = f"{datum['dialogue_id']}_mcq{i}"
q['instance_id'] = instance_id
elif task_type == "relation_extraction":
for i, sample in enumerate(datum[task]):
instance_id = f"{datum['dialogue_id']}_re{i}"
sample['instance_id'] = instance_id
elif task_type == "adversarial_response_selection":
for i, sample in enumerate(datum[task]):
for j, triple in enumerate(sample['samples']):
instance_id = f"{datum['dialogue_id']}_context{i}_sample{j}"
triple['instance_id'] = instance_id
else:
raise ValueError(f"Unknown task type: {task_type}")
TLiDB_path="TLiDB_DailyDialog/TLiDB_DailyDialog.json"
# Load original DailyDialog data
dailydialog_data = json.load(open(TLiDB_path, "r"))
generate_instance_ids(dailydialog_data)
with open(TLiDB_path, "w") as f:
json.dump(dailydialog_data, f, indent=2) | true |
ce23d0ce1f278a6e08bbf5216d09739b04ba3069 | Python | CaimeiWang/python100 | /001.py | UTF-8 | 271 | 3.671875 | 4 | [] | no_license | #encoding:'utf-8'
#有四个数字:1、2、3、4,能组成多少个互不相同且无重复数字的三位数?各是多少?
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if(i!=j and i!=k):
print(i,j,k)
| true |
8421df571a6b2c972bd1854e3710755c9c112b77 | Python | berquist/sgr_analysis | /sgr_analysis/analysis.py | UTF-8 | 33,133 | 2.546875 | 3 | [
"BSD-3-Clause"
] | permissive | """analysis.py: Where most of the analysis for the 'droplet' snapshots
is.
"""
import pickle
import csv
from copy import deepcopy
from functools import partial
import numpy as np
import scipy.stats as sps
from sgr_analysis.analysis_utils import filter_snapshots, get_single_snapshot_results, mangle_dict_keys, pprint_linregress, read_snapshot_file, slice
from sgr_analysis.model_hamiltonian_frequencies import distance
def condon():
"""Testing whether or not the Condon approximation is appropriate."""
fig, ax = plt.subplots()
frequencies_all = []
intensities_all = []
csvfile = open('condon_analysis_linear_regression.csv', 'w')
csvwriter = csv.writer(csvfile)
csvwriter.writerow([
'# QM',
'# MM',
'# points',
'slope',
'intercept',
'rsq',
])
list_l12 = []
geometries = geometries_d[0][0]
C, O1, O2 = 0, 1, 2
for geometry in geometries:
d_C_O1 = distance(geometry[C], geometry[O1])
d_C_O2 = distance(geometry[C], geometry[O2])
d_O1_O2 = distance(geometry[O1], geometry[O2])
bond_sum = d_C_O1 + d_C_O2
# bond_difference = abs(d_C_O1 - d_C_O2)
list_l12.append(bond_sum)
list_l12 = np.array(list_l12)
for n_qm in sorted(frequencies_CO2_d):
print("Forming Condon approximation plot for {}".format(labels[n_qm]))
frequencies_single_qm_all_mm = []
intensities_single_qm_all_mm = []
# This is only necessary to get this mess to work, so the list
# lengths are correct. The CO2 geometry will always be the
# same.
geometries_single_qm_all_mm = []
for n_mm in possible_keys:
f = frequencies_CO2_d[n_qm][n_mm]
i = intensities_CO2_d[n_qm][n_mm]
s = snapnums_frequencies_d[n_qm][n_mm]
# filter the geometry results based on the current
# snapshots
indices = [(snapnum - 1) for snapnum in s]
g = list_l12[indices]
assert len(f) == len(i) == len(g)
frequencies_single_qm_all_mm.extend(f)
intensities_single_qm_all_mm.extend(i)
geometries_single_qm_all_mm.extend(g)
frequencies_all.extend(f)
intensities_all.extend(i)
print('{} QM/{} MM'.format(n_qm, n_mm))
try:
slope, intercept, rsq = pprint_linregress(f, i)
csvwriter.writerow([n_qm, n_mm, len(f), slope, intercept, rsq])
except:
pass
assert len(frequencies_single_qm_all_mm) == len(intensities_single_qm_all_mm)
# ax.scatter(frequencies_single_qm_all_mm,
# intensities_single_qm_all_mm,
# marker=markers[n_qm],
# label=labels[n_qm],
# color=colors[n_qm])
ax.scatter(geometries_single_qm_all_mm,
intensities_single_qm_all_mm,
marker=markers[n_qm],
label=labels[n_qm],
color=colors[n_qm])
print('{} QM/all MM'.format(n_qm))
slope, intercept, rsq = pprint_linregress(frequencies_single_qm_all_mm,
intensities_single_qm_all_mm)
csvwriter.writerow([n_qm, 'all', len(frequencies_single_qm_all_mm), slope, intercept, rsq])
assert len(frequencies_all) == len(intensities_all)
print('all QM/all MM')
slope, intercept, rsq = pprint_linregress(frequencies_all, intensities_all)
csvwriter.writerow(['all', 'all', len(frequencies_all), slope, intercept, rsq])
ax.set_ylim((0.0, 1000.0))
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
ax.tick_params(direction='out')
ax.set_xlabel(r"$\nu_{3}$ frequency (cm$^{-1}$)")
ax.set_ylabel(r"$\nu_{3}$ intensity (km/mol)")
ax.legend(loc='lower right',
fancybox=True,
framealpha=0.50,
numpoints=1,
scatterpoints=1)
if args.do_condon_plots:
fig.savefig('condon_approximation.pdf', bbox_inches='tight')
# now add the no CT data
if args.include_noCT:
for n_qm in sorted(frequencies_noCT_CO2_d):
frequencies_single_qm_all_mm = []
intensities_single_qm_all_mm = []
for n_mm in possible_keys:
f = frequencies_noCT_CO2_d[n_qm][n_mm]
i = intensities_noCT_CO2_d[n_qm][n_mm]
assert len(f) == len(i)
frequencies_single_qm_all_mm.extend(f)
intensities_single_qm_all_mm.extend(i)
frequencies_all.extend(f)
intensities_all.extend(i)
print('{} QM/{} MM'.format(n_qm, n_mm))
assert len(frequencies_single_qm_all_mm) == len(intensities_single_qm_all_mm)
ax.scatter(frequencies_single_qm_all_mm,
intensities_single_qm_all_mm,
marker=markers_noCT[n_qm],
label=labels_noCT[n_qm],
color=colors_noCT[n_qm])
print('{} QM/all MM'.format(n_qm))
ax.legend(loc='lower right',
fancybox=True,
framealpha=0.50,
numpoints=1,
scatterpoints=1)
if args.do_condon_plots:
fig.savefig('condon_approximation_noCT.pdf', bbox_inches='tight')
csvfile.close()
plt.close(fig)
return
def do_result_convergence_plots(results_d,
name='frequency',
n_qm_start=0,
n_qm_end=2,
func_to_apply=lambda x: x,
ylabel=r"$\nu_{3}$ frequency (cm$^{-1}$)",
labels=None,
colors=None,
errorbars=False):
slice_partial = partial(slice, start=n_qm_start, end=n_qm_end + 1)
print('Doing {} convergence plots'.format(name))
fig, ax = plt.subplots()
for n_qm in filter(slice_partial, sorted(results_d)):
if labels:
print("Doing plots for {}".format(labels[n_qm]))
else:
print("Doing plots of some kind.")
ticks = []
results_single_qm_all_mm = []
results_single_qm_all_mm_mean = []
results_single_qm_all_mm_stdev = []
for n_mm in possible_keys:
results_single_qm_single_mm = [func_to_apply(x) for x in results_d[n_qm][n_mm]]
if len(results_single_qm_single_mm) > 0:
results_single_qm_all_mm.extend(results_single_qm_single_mm)
ticks.append(n_mm)
results_single_qm_all_mm_mean.append(np.mean(results_single_qm_single_mm))
results_single_qm_all_mm_stdev.append(np.std(results_single_qm_single_mm))
# What's a cleaner way to do this...
if markers:
marker = markers[n_qm]
else:
marker = None
if labels:
label = labels[n_qm]
else:
label = None
if colors:
color = colors[n_qm]
else:
color = None
# Make sure the data is offset properly.
ticks = np.array(ticks)
ticks += n_qm
# Undo the re-categorization of the last point as always being 256.
ticks[-1] -= n_qm
if errorbars:
ax.errorbar(ticks,
results_single_qm_all_mm_mean,
yerr=results_single_qm_all_mm_stdev,
marker=marker,
label=label,
color=color)
else:
ax.plot(ticks,
results_single_qm_all_mm_mean,
marker=marker,
label=label,
color=color)
ax.set_xscale('symlog', basex=2)
ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
ax.tick_params(direction='out')
ax.set_xlabel("# IL pairs in solvent box")
ax.set_ylabel(ylabel)
ax.legend(loc='best', fancybox=True, framealpha=0.50)
fig.savefig('{}_convergence.pdf'.format(name), bbox_inches='tight')
ax.set_xscale('linear')
rlim = -5
ax.set_xticks(possible_keys[:rlim + 1])
ax.set_xlim((possible_keys[0], possible_keys[rlim]))
fig.savefig('{}_convergence_{}.pdf'.format(name, possible_keys[rlim]),
bbox_inches='tight')
plt.close(fig)
fig, ax = plt.subplots()
ticks = list(filter(slice_partial, sorted(results_d)))
for n_mm in possible_keys:
results = [np.mean([func_to_apply(x)
for x in results_d[n_qm][n_mm]])
for n_qm in ticks]
ax.plot(ticks,
results,
marker='o',
label=n_mm)
ax.set_xticks(ticks)
ax.set_xticklabels(ticks)
ax.yaxis.set_major_formatter(y_formatter)
ax.tick_params(direction='out')
ax.set_xlabel("# QM IL pairs")
ax.set_ylabel('mean {}'.format(ylabel))
ax.legend(loc='best', fancybox=True, framealpha=0.50)
fig.savefig('{}_convergence_n_qm.pdf'.format(name), bbox_inches='tight')
plt.close(fig)
return
def do_result_convergence_plots_gaps(results_d,
name='frequency',
func_to_apply=lambda x: x,
ylabel=r'$\nu_{3}$ frequency (cm$^{-1}$)',
symbol='\omega'):
fig, ax = plt.subplots()
gaps_0_1 = []
gaps_1_2 = []
gaps_2_3 = []
n_mm_ticks_0_1 = []
n_mm_ticks_1_2 = []
n_mm_ticks_2_3 = []
for n_mm in possible_keys:
try:
gap_0_1 = np.mean(func_to_apply(results_d[1][n_mm])) - np.mean(func_to_apply(results_d[0][n_mm]))
gaps_0_1.append(gap_0_1)
n_mm_ticks_0_1.append(n_mm)
except:
pass
try:
gap_1_2 = np.mean(func_to_apply(results_d[2][n_mm])) - np.mean(func_to_apply(results_d[1][n_mm]))
gaps_1_2.append(gap_1_2)
n_mm_ticks_1_2.append(n_mm)
except:
pass
try:
gap_2_3 = np.mean(func_to_apply(results_d[3][n_mm])) - np.mean(func_to_apply(results_d[2][n_mm]))
gaps_2_3.append(gap_2_3)
n_mm_ticks_2_3.append(n_mm)
except:
pass
ax.plot(n_mm_ticks_0_1, gaps_0_1, marker='s', color='red',
label='$\Delta {symbol}_{{1-0\,\mathrm{{QM}}}}$'.format(**locals()))
ax.plot(n_mm_ticks_1_2, gaps_1_2, marker='p', color='green',
label='$\Delta {symbol}_{{2-1\,\mathrm{{QM}}}}$'.format(**locals()))
ax.plot(n_mm_ticks_2_3, gaps_2_3, marker='*', color='blue',
label='$\Delta {symbol}_{{3-2\,\mathrm{{QM}}}}$'.format(**locals()))
ax.set_xscale('symlog', basex=2)
ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
ax.set_ylim(ax.get_ylim()[::-1])
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
ax.tick_params(direction='out')
ax.set_xlabel('# IL pairs treated as point charges')
ax.set_ylabel(r'difference in {}'.format(ylabel))
# ax.set_title('gaps')
ax.legend(loc='best', fancybox=True, framealpha=0.50)
filename = '{}_convergence_gaps.pdf'.format(name)
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
ax.set_xscale('linear')
rlim = -5
ax.set_xticks(possible_keys[:rlim + 1])
ax.set_xlim((possible_keys[0], possible_keys[rlim]))
filename = '{}_convergence_{}_gaps.pdf'.format(name, possible_keys[rlim])
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
return
def do_result_convergence_analysis(results_d,
name='frequency',
n_qm_start=0,
n_qm_end=2,
func_to_apply=lambda x: x):
slice_partial = partial(slice, start=n_qm_start, end=n_qm_end + 1)
print('Doing {} convergence analysis'.format(name))
csvfile = open('{}_convergence.csv'.format(name), 'w')
csvwriter = csv.writer(csvfile)
csvwriter.writerow([
'# QM',
'# MM',
'# points',
'mean',
'median',
'mode',
'min',
'max',
'range',
'stdev',
])
for n_qm in filter(slice_partial, sorted(results_d)):
print("Doing analysis for {}".format(labels[n_qm]))
results_single_qm_all_mm = []
results_single_qm_all_mm_mean = []
results_single_qm_all_mm_median = []
results_single_qm_all_mm_mode = []
results_single_qm_all_mm_min = []
results_single_qm_all_mm_max = []
results_single_qm_all_mm_range = []
results_single_qm_all_mm_stdev = []
for n_mm in possible_keys:
results_single_qm_single_mm = [func_to_apply(x) for x in results_d[n_qm][n_mm]]
if len(results_single_qm_single_mm) > 0:
# print('{} QM/{} MM'.format(n_qm, n_mm))
results_single_qm_all_mm.extend(results_single_qm_single_mm)
results_single_qm_all_mm_mean.append(np.mean(results_single_qm_single_mm))
results_single_qm_all_mm_median.append(np.median(results_single_qm_single_mm))
results_single_qm_all_mm_mode.append(sps.mode(results_single_qm_single_mm)[0][0])
results_single_qm_all_mm_min.append(min(results_single_qm_single_mm))
results_single_qm_all_mm_max.append(max(results_single_qm_single_mm))
# we've jut updated the last values for each of these
# lists, so take them rather than recalculate them
results_single_qm_all_mm_range.append(results_single_qm_all_mm_max[-1] - results_single_qm_all_mm_min[-1])
results_single_qm_all_mm_stdev.append(np.std(results_single_qm_single_mm))
# Write entries for each possible QM/MM number
# combination.
csvwriter.writerow([
n_qm,
n_mm,
len(results_single_qm_single_mm),
# same thing here with just having appended
# calculated values to these lists
results_single_qm_all_mm_mean[-1],
results_single_qm_all_mm_median[-1],
results_single_qm_all_mm_mode[-1],
results_single_qm_all_mm_min[-1],
results_single_qm_all_mm_max[-1],
results_single_qm_all_mm_range[-1],
results_single_qm_all_mm_stdev[-1],
])
# Write entries for all MM values combined for a single QM
# value.
# print('{} QM/all MM'.format(n_qm))
val_min = min(results_single_qm_all_mm)
val_max = max(results_single_qm_all_mm)
val_range = val_max - val_min
csvwriter.writerow([
n_qm,
'all',
len(results_single_qm_all_mm),
np.mean(results_single_qm_all_mm),
np.median(results_single_qm_all_mm),
sps.mode(results_single_qm_all_mm)[0][0],
val_min,
val_max,
val_range,
np.std(results_single_qm_all_mm),
])
csvfile.close()
return
def plot_single_snapshot_results(snapnum,
snapnums_results_d,
results_d,
name='frequency',
func_to_apply=lambda x: x,
ylabel=r"$\nu_{3}$ frequency (cm$^{-1}$)",
inp_fig=None,
inp_ax=None,
do_manip_fig=True,
do_manip_ax=True):
results_snap_d = get_single_snapshot_results(snapnum, snapnums_results_d, results_d)
fig, ax = plt.subplots()
if inp_fig:
fig = inp_fig
if inp_ax:
ax = inp_ax
for n_qm in sorted(results_snap_d):
ticks = []
results = []
for n_mm in possible_keys:
if len(results_snap_d[n_qm][n_mm]) > 0:
if n_mm + n_qm >= 256:
ticks.append(256)
else:
ticks.append(n_mm + n_qm)
results.append(func_to_apply(results_snap_d[n_qm][n_mm][0]))
ax.plot(ticks,
results,
marker=markers[n_qm],
label=labels[n_qm],
color=colors[n_qm])
if do_manip_ax:
ax.set_xscale('symlog', basex=2)
ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
ax.tick_params(direction='out')
ax.set_xlabel("total # of IL pairs included")
ax.set_ylabel(ylabel)
# ax.set_title("snapshot {}".format(snapnum))
ax.legend(loc='best', fancybox=True, framealpha=0.50)
if do_manip_fig:
filename = '{}_convergence_snap{}.pdf'.format(name, snapnum)
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
if do_manip_ax:
ax.set_xscale('linear')
rlim = -5
ax.set_xticks(possible_keys[:rlim + 1])
ax.set_xlim((possible_keys[0], possible_keys[rlim]))
if do_manip_fig:
filename = '{}_convergence_snap{}_{}.pdf'.format(name, snapnum, possible_keys[rlim])
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
return
def plot_single_snapshot_results_qm_gaps(snapnum,
snapnums_results_d,
results_d,
name='frequency',
func_to_apply=lambda x: x,
ylabel=r'$\nu_{3}$ frequency (cm$^{-1}$)',
symbol='\omega'):
results_snap_d = get_single_snapshot_results(snapnum,
snapnums_results_d,
results_d)
fig, ax = plt.subplots()
gaps_0_1 = []
gaps_1_2 = []
gaps_2_3 = []
n_mm_ticks_0_1 = []
n_mm_ticks_1_2 = []
n_mm_ticks_2_3 = []
for n_mm in possible_keys:
try:
gap_0_1 = func_to_apply(results_snap_d[1][n_mm][0]) - func_to_apply(results_snap_d[0][n_mm][0])
gaps_0_1.append(gap_0_1)
n_mm_ticks_0_1.append(n_mm)
except:
pass
try:
gap_1_2 = func_to_apply(results_snap_d[2][n_mm][0]) - func_to_apply(results_snap_d[1][n_mm][0])
gaps_1_2.append(gap_1_2)
n_mm_ticks_1_2.append(n_mm)
except:
pass
try:
gap_2_3 = func_to_apply(results_snap_d[3][n_mm][0]) - func_to_apply(results_snap_d[2][n_mm][0])
gaps_2_3.append(gap_2_3)
n_mm_ticks_2_3.append(n_mm)
except:
pass
ax.plot(n_mm_ticks_0_1, gaps_0_1, marker='s', color='red',
label='$\Delta {symbol}_{{1-0\,\mathrm{{QM}}}}$'.format(**locals()))
ax.plot(n_mm_ticks_1_2, gaps_1_2, marker='p', color='green',
label='$\Delta {symbol}_{{2-1\,\mathrm{{QM}}}}$'.format(**locals()))
ax.plot(n_mm_ticks_2_3, gaps_2_3, marker='*', color='blue',
label='$\Delta {symbol}_{{3-2\,\mathrm{{QM}}}}$'.format(**locals()))
ax.set_xscale('symlog', basex=2)
ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
ax.set_ylim(ax.get_ylim()[::-1])
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
ax.tick_params(direction='out')
ax.set_xlabel('# IL pairs treated as point charges')
ax.set_ylabel(r'difference in {}'.format(ylabel))
# ax.set_title('snapshot {} gaps'.format(snapnum))
ax.legend(loc='best', fancybox=True, framealpha=0.50)
filename = '{}_convergence_snap{}_gaps.pdf'.format(name, snapnum)
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
ax.set_xscale('linear')
rlim = -5
ax.set_xticks(possible_keys[:rlim + 1])
ax.set_xlim((possible_keys[0], possible_keys[rlim]))
filename = '{}_convergence_snap{}_{}_gaps.pdf'.format(name, snapnum, possible_keys[rlim])
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
return
def getargs():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mpl-usetex", action="store_true")
parser.add_argument("--do-condon-plots", action="store_true")
parser.add_argument("--do-snapshot-plots", action="store_true")
parser.add_argument("--include-noCT", action="store_true")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = getargs()
import matplotlib as mpl
if args.mpl_usetex:
mpl.rc(usetex=True)
mpl.use("Agg")
import matplotlib.pyplot as plt
# Read in the pickle files that contain all the raw data.
with open('frequencies.pypickle', 'rb') as picklefile:
frequencies_CO2_d = pickle.load(picklefile)
with open('intensities.pypickle', 'rb') as picklefile:
intensities_CO2_d = pickle.load(picklefile)
with open('frequencies_noCT.pypickle', 'rb') as picklefile:
frequencies_noCT_CO2_d = pickle.load(picklefile)
with open('intensities_noCT.pypickle', 'rb') as picklefile:
intensities_noCT_CO2_d = pickle.load(picklefile)
with open('dipoles.pypickle', 'rb') as picklefile:
dipoles_d = pickle.load(picklefile)
with open('geometries.pypickle', 'rb') as picklefile:
geometries_d = pickle.load(picklefile)
with open('snapnums_frequencies.pypickle', 'rb') as picklefile:
snapnums_frequencies_d = pickle.load(picklefile)
with open('snapnums_frequencies_noCT.pypickle', 'rb') as picklefile:
snapnums_frequencies_noCT_d = pickle.load(picklefile)
with open('snapnums_dipoles.pypickle', 'rb') as picklefile:
snapnums_dipoles_d = pickle.load(picklefile)
with open('snapnums_geometries.pypickle', 'rb') as picklefile:
snapnums_geometries_d = pickle.load(picklefile)
# Until I come up with a better idea, here's where I mangle some
# of the keys (253, 254, 255, 256) into 256.
# Make a copy beforehand, just in case...
frequencies_CO2_d_unmangled = deepcopy(frequencies_CO2_d)
intensities_CO2_d_unmangled = deepcopy(intensities_CO2_d)
frequencies_noCT_CO2_d_unmangled = deepcopy(frequencies_noCT_CO2_d)
intensities_noCT_CO2_d_unmangled = deepcopy(intensities_noCT_CO2_d)
dipoles_d_unmangled = deepcopy(dipoles_d)
geometries_d_unmangled = deepcopy(geometries_d)
snapnums_frequencies_d_unmangled = deepcopy(snapnums_frequencies_d)
snapnums_frequencies_noCT_d_unmangled = deepcopy(snapnums_frequencies_noCT_d)
snapnums_dipoles_d_unmangled = deepcopy(snapnums_dipoles_d)
snapnums_geometries_d_unmangled = deepcopy(snapnums_geometries_d)
# Do the mangling.
frequencies_CO2_d = mangle_dict_keys(frequencies_CO2_d)
intensities_CO2_d = mangle_dict_keys(intensities_CO2_d)
frequencies_noCT_CO2_d = mangle_dict_keys(frequencies_noCT_CO2_d)
intensities_noCT_CO2_d = mangle_dict_keys(intensities_noCT_CO2_d)
dipoles_d = mangle_dict_keys(dipoles_d)
geometries_d = mangle_dict_keys(geometries_d)
snapnums_frequencies_d = mangle_dict_keys(snapnums_frequencies_d)
snapnums_frequencies_noCT_d = mangle_dict_keys(snapnums_frequencies_noCT_d)
snapnums_dipoles_d = mangle_dict_keys(snapnums_dipoles_d)
possible_keys = list(range(0, 18, 2)) + [32, 64, 128, 256]
markers = [
'o',
's',
'D',
'*',
]
markers_noCT = markers
labels = [
'0 QM pairs',
'1 QM pair',
'2 QM pairs',
'3 QM pairs',
]
labels_noCT = [
'',
'1 QM pair (no CT)',
'2 QM pair (no CT)',
'3 QM pair (no CT)',
]
colors = [
'black',
'red',
'green',
'blue',
]
colors_noCT = [
'',
'orange',
'lime',
'cyan',
]
###################################
# Do some simple statistical analysis on the data sets and dump
# them to CSV files.
do_result_convergence_analysis(frequencies_CO2_d,
name='frequency',
n_qm_start=0,
n_qm_end=3)
# do_result_convergence_analysis(intensities_CO2_d,
# name='intensity',
# n_qm_start=0,
# n_qm_end=2)
# do_result_convergence_analysis(dipoles_d,
# name='dipole',
# n_qm_start=1,
# n_qm_end=2,
# func_to_apply=npl.norm)
# if args.include_noCT:
# do_result_convergence_analysis(frequencies_noCT_CO2_d,
# name='frequency_noCT',
# n_qm_start=1,
# n_qm_end=2)
# do_result_convergence_analysis(intensities_noCT_CO2_d,
# name='intensity_noCT',
# n_qm_start=1,
# n_qm_end=2)
###################################
# plots!
# do_result_convergence_plots(frequencies_CO2_d,
# name='frequency',
# n_qm_start=0,
# n_qm_end=3,
# ylabel=r"$\nu_{3}$ frequency (cm$^{-1}$)",
# labels=labels,
# colors=colors)
# do_result_convergence_plots_gaps(frequencies_CO2_d,
# name='frequency',
# func_to_apply=lambda x: x,
# ylabel=r'$\nu_{3}$ frequency (cm$^{-1}$)',
# symbol='\omega')
# do_result_convergence_plots(intensities_CO2_d,
# name='intensity',
# n_qm_start=0,
# n_qm_end=3,
# ylabel=r"$\nu_{3}$ intensity (cm$^{-1}$)",
# labels=labels,
# colors=colors)
# do_result_convergence_plots(dipoles_d,
# name='dipole',
# n_qm_start=1,
# n_qm_end=3,
# ylabel='total dipole moment (Debye)',
# func_to_apply=npl.norm,
# labels=labels,
# colors=colors)
# do_result_convergence_plots(dipoles_d,
# name='dipole_0qm',
# n_qm_start=0,
# n_qm_end=0,
# ylabel='total dipole moment (Debye)',
# func_to_apply=npl.norm,
# labels=labels,
# colors=colors)
# if args.include_noCT:
# do_result_convergence_plots(frequencies_noCT_CO2_d,
# name='frequency_noCT',
# n_qm_start=1, n_qm_end=2,
# ylabel=r"$\nu_{3}$ frequency (cm$^{-1}$)",
# labels=labels_noCT,
# colors=colors_noCT)
# do_result_convergence_plots(intensities_noCT_CO2_d,
# name='intensity_noCT',
# n_qm_start=1,
# n_qm_end=2,
# ylabel=r"$\nu_{3}$ intensity (cm$^{-1}$)",
# labels=labels_noCT,
# colors=colors_noCT)
condon()
# Read in the most "restrictive" set of snapshot numbers; this
# will let us compare sets of equal size.
snapnums = read_snapshot_file("/home/eric/Chemistry/calc.sgr/paper_02_CD_SC/inputs_freq/representative_snapshots_3qm")
filter_snapshots(snapnums, snapnums_frequencies_d, frequencies_CO2_d)
do_result_convergence_plots(frequencies_CO2_d,
name='frequency_same_set',
n_qm_start=0,
n_qm_end=3,
ylabel=r"$\nu_{3}$ frequency (cm$^{-1}$)",
labels=labels,
colors=colors)
do_result_convergence_plots(frequencies_CO2_d,
name='frequency_same_set_2QM',
n_qm_start=0,
n_qm_end=2,
ylabel=r"$\nu_{3}$ frequency (cm$^{-1}$)",
labels=labels,
colors=colors)
do_result_convergence_plots_gaps(frequencies_CO2_d,
name='frequency_same_set',
func_to_apply=lambda x: x,
ylabel=r'$\nu_{3}$ frequency (cm$^{-1}$)',
symbol='\omega')
if args.do_snapshot_plots:
print('snapshot numbers:', snapnums)
for snapnum in snapnums:
plot_single_snapshot_results(snapnum,
snapnums_frequencies_d,
frequencies_CO2_d,
name='frequency',
func_to_apply=lambda x: x,
ylabel=r'$\nu_{3}$ frequency (cm$^{-1}$)')
plot_single_snapshot_results_qm_gaps(snapnum,
snapnums_frequencies_d,
frequencies_CO2_d,
name='frequency',
func_to_apply=lambda x: x,
ylabel=r'$\nu_{3}$ frequency (cm$^{-1}$)',
symbol='\omega')
# plot_single_snapshot_results(snapnum,
# snapnums_frequencies_d,
# intensities_CO2_d,
# name='intensity',
# func_to_apply=lambda x: x,
# ylabel=r'$\nu_{3}$ intensity (km/mol)')
# plot_single_snapshot_results_qm_gaps(snapnum,
# snapnums_frequencies_d,
# intensities_CO2_d,
# name='intensity',
# func_to_apply=lambda x: x,
# ylabel=r'$\nu_{3}$ intensity (km/mol)',
# symbol='I')
# plot_single_snapshot_results(snapnum,
# snapnums_dipoles_d,
# dipoles_d,
# name='dipole',
# func_to_apply=npl.norm,
# ylabel='total dipole moment (Debye)')
# plot_single_snapshot_results_qm_gaps(snapnum,
# snapnums_dipoles_d,
# dipoles_d,
# name='dipole',
# func_to_apply=npl.norm,
# ylabel='total dipole moment (Debye)',
# symbol='\mu')
###################################
| true |
47800d8e5051709fc38e048e747450d7f29557c9 | Python | svrswetha/Python | /hello.py | UTF-8 | 238 | 2.6875 | 3 | [] | no_license | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__=="__main__":
print "i am running as an independent program"
app.run()
else:
print "i am running as an imported module"
| true |
a06320efdf9a561ae5449ceb3e1f9d39556d1c8f | Python | jagatheeswari21/Python-programming | /Beginner/max among 10 num.py | UTF-8 | 87 | 2.6875 | 3 | [] | no_license | input=raw_input().split()
if len(input)==10:
input=map(int,input)
print max(input)
| true |
57756c6ce2aad037eabce9a9d52bc976506b9183 | Python | alexandraback/datacollection | /solutions_5708921029263360_0/Python/Spelvin/c.py | UTF-8 | 1,565 | 2.90625 | 3 | [] | no_license | def outfitlistmaker(j,p,s):
output = []
for x in range(1,j+1):
for y in range(1,p+1):
for z in range(1,s+1):
output.append([x,y,z])
return output
def countmatrix(c,d):
outputx = []
for x in range(c):
outputy = []
for y in range(d):
outputy.append(0)
outputx.append(outputy)
return outputx
def greedyoutfitplanner(j,p,s,k):
outfitlist = outfitlistmaker(j,p,s)
outfitsworn = []
violationmatrix01 = countmatrix(j,p)
violationmatrix02 = countmatrix(j,s)
violationmatrix12 = countmatrix(p,s)
for potentialoutfit in outfitlist:
piece0 = potentialoutfit[0]-1
piece1 = potentialoutfit[1]-1
piece2 = potentialoutfit[2]-1
if violationmatrix01[piece0][piece1] < k and violationmatrix02[piece0][piece2] < k and violationmatrix12[piece1][piece2] < k:
violationmatrix01[piece0][piece1] += 1
violationmatrix02[piece0][piece2] += 1
violationmatrix12[piece1][piece2] += 1
outfitsworn.append(potentialoutfit)
return outfitsworn
import sys
with open(sys.argv[1], "r") as fileIN:
inputLines = fileIN.readlines()
with open(sys.argv[2], "w") as fileOUT:
numberOfCases = int(inputLines.pop(0))
for num in range(numberOfCases):
quartet = inputLines.pop(0).rstrip().split(' ')
j = int(quartet[0])
p = int(quartet[1])
s = int(quartet[2])
k = int(quartet[3])
outfitsworn = greedyoutfitplanner(j,p,s,k)
fileOUT.write('Case #' + str(num+1) + ': ' + str(len(outfitsworn)) + '\n')
for outfit in outfitsworn:
fileOUT.write(' '.join([str(x) for x in outfit]) + '\n')
| true |
bdb730ab8238953a55a8143c03edc3ed197405b4 | Python | yevfurman/Rosalind | /LCSM.py | UTF-8 | 818 | 2.90625 | 3 | [] | no_license | def long_substr(data):
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0])-i+1):
if j > len(substr) and is_substr(data[0][i:i+j], data):
substr = data[0][i:i+j]
return substr
def is_substr(find, data):
if len(data) < 1 and len(find) < 1:
return False
for i in range(len(data)):
if find not in data[i]:
return False
return True
f = open("Files/rosalind_lcsm.txt")
a = (f.read()).rstrip()
f.close()
s = a.split("\n")
ind = []
cnt = []
i = 0
strings=[]
while i < len(s):
ind.append(s[i][1:])
i += 1
DNA = ""
while (i < len(s)) and (s[i][0] != ">"):
DNA += s[i]
i += 1
strings.append(DNA)
print (long_substr(strings)) | true |
582d8b3013b80dbd43c44be17be78b8c9c247d62 | Python | steve98654/ProjectEuler | /392.py | UTF-8 | 329 | 2.703125 | 3 | [] | no_license | import cvxpy as cp
import numpy as np
# Problem data.
n = 10
# Construct the problem.
x = cp.Variable(n)
obj = cp.Minimize(cp.sum_entries([(x[i] - x[i-1])*cp.sqrt(1-x[i]**2) for i in range(1,n)]))
consts = [x[0] == -1, x[-1]==1]
consts = [x[i] > x[i-1] for i in range(1,n)]
prob = cp.Problem(objective, constraints)
| true |
c0e6fa0cffcf483fd750b2927e729ca6a6abb199 | Python | Anusha2605/terraform-aws-tech-test | /instance_status.py | UTF-8 | 1,217 | 2.78125 | 3 | [] | no_license | import boto3
import datetime
import time
from datetime import datetime as dt
from pprint import pprint
def lambda_handler(event, context):
# Connect to EC2 and DynamoDB client
client = boto3.client("ec2")
dynamodb = boto3.resource('dynamodb')
#Get EC2 instance statuses
status = client.describe_instance_status(IncludeAllInstances = True)
#pprint(status)
#Get ttl time
days = dt.today() + datetime.timedelta(days=1)
expiryDateTime = int(time.mktime(days.timetuple()))
#Connect to right table in Dynamodb
table = dynamodb.Table('ec2_instance_status')
#Report data to Dynamodb table
try:
for i in status["InstanceStatuses"]:
pprint(i)
#get current datetime
currenttime = round(time.time() * 1000)
table.put_item(
Item={
"currentdatetime": str(currenttime),
"InstanceId": i['InstanceId'],
"InstanceState": i["InstanceState"]["Name"],
"expirydatetime": str(expiryDateTime)
}
)
return True
except Exception as e:
print('Exception: ', e)
return False
| true |
246be67dbbc743ebf770ee337705d65f1409507b | Python | chris4540/DT2119 | /lab3/lab1_proto.py | UTF-8 | 8,746 | 3.421875 | 3 | [] | no_license | """
DT2119, Lab 1 Feature Extraction
See also:
https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
"""
import numpy as np
import scipy
import scipy.signal
from scipy import fftpack
from lab1_tools import trfbank
from lab1_tools import lifter
# Function given by the exercise ----------------------------------
def mspec(samples, winlen=400, winshift=200, preempcoeff=0.97, nfft=512, samplingrate=20000):
"""Computes Mel Filterbank features.
Args:
samples: array of speech samples with shape (N,)
winlen: lenght of the analysis window
winshift: number of samples to shift the analysis window at every time step
preempcoeff: pre-emphasis coefficient
nfft: length of the Fast Fourier Transform (power of 2, >= winlen)
samplingrate: sampling rate of the original signal
Returns:
N x nfilters array with mel filterbank features (see trfbank for nfilters)
"""
frames = enframe(samples, winlen, winshift)
preemph = preemp(frames, preempcoeff)
windowed = windowing(preemph)
spec = powerSpectrum(windowed, nfft)
return logMelSpectrum(spec, samplingrate)
def mfcc(samples, winlen=400, winshift=200, preempcoeff=0.97, nfft=512, nceps=13, samplingrate=20000, liftercoeff=22):
"""Computes Mel Frequency Cepstrum Coefficients.
Args:
samples: array of speech samples with shape (N,)
winlen: lenght of the analysis window
winshift: number of samples to shift the analysis window at every time step
preempcoeff: pre-emphasis coefficient
nfft: length of the Fast Fourier Transform (power of 2, >= winlen)
nceps: number of cepstrum coefficients to compute
samplingrate: sampling rate of the original signal
liftercoeff: liftering coefficient used to equalise scale of MFCCs
Returns:
N x nceps array with lifetered MFCC coefficients
"""
mspecs = mspec(samples, winlen, winshift, preempcoeff, nfft, samplingrate)
ceps = cepstrum(mspecs, nceps)
return lifter(ceps, liftercoeff)
# Functions to be implemented ----------------------------------
def enframe(samples, winlen, winshift):
"""
Slices the input samples into overlapping windows.
Args:
winlen: window length in samples.
winshift: shift of consecutive windows in samples
Returns:
numpy array [N x winlen], where N is the number of windows that fit
in the input signal
"""
# consider the end of one frame as a pointer
# 1. Subtract the first frame from the total length
# 2. count number the number of frames obtained by shifting
# 3. add back the step 1 frame
num_frame = ((samples.shape[0] - winlen) // winshift) + 1
ret = np.ndarray(shape=(num_frame, winlen))
js = 0
jn = js + winlen
for i in range(num_frame):
ret[i, :] = samples[js:jn]
js += winshift
jn = js + winlen
return ret
def preemp(input_, p=0.97):
"""
Pre-emphasis filter.
Args:
input: array of speech frames [N x M] where N is the number of frames and
M the samples per frame
p: preemhasis factor (defaults to the value specified in the exercise)
Output:
output: array of pre-emphasised speech samples
Note (you can use the function lfilter from scipy.signal)
"""
# y[n] = x[n] - p * x[n-1]
b_coeff = np.array([1.0, -p])
a_coeff = np.array([1.0])
return scipy.signal.lfilter(b_coeff, a_coeff, input_, axis=1)
def windowing(input_):
"""
Applies hamming window to the input frames.
Args:
input: array of speech samples [N x M] where N is the number of frames and
M the samples per frame
Output:
array of windowed speech samples [N x M]
Note (you can use the function hamming from scipy.signal, include the sym=0 option
if you want to get the same results as in the example)
PS: We use hamming windows to reduce the spectral leakage as we do finite data fourier transform.
See also:
1. https://www.edn.com/electronics-news/4383713/Windowing-Functions-Improve-FFT-Results-Part-I
2. https://en.wikipedia.org/wiki/Spectral_leakage
"""
frame_size = input_.shape[1] # We apply the hamming window to each frame
return input_ * scipy.signal.hamming(frame_size, sym=False)
def powerSpectrum(input_, nfft):
"""
Calculates the power spectrum of the input signal, that is the square of the modulus of the FFT
Args:
input: array of speech samples [N x M] where N is the number of frames and
M the samples per frame
nfft: length of the FFT
Output:
array of power spectra [N x nfft]
Note: you can use the function fft from scipy.fftpack
"""
ret = np.abs(fftpack.fft(input_, nfft))**2
return ret
def logMelSpectrum(input_, samplingrate):
"""
Calculates the log output of a Mel filterbank when the input is the power spectrum
Args:
input: array of power spectrum coefficients [N x nfft] where N is the number of frames and
nfft the length of each spectrum
samplingrate: sampling rate of the original signal (used to calculate the filterbank shapes)
Output:
array of Mel filterbank log outputs [N x nmelfilters] where nmelfilters is the number
of filters in the filterbank
Note: use the trfbank function provided in lab1_tools.py to calculate the filterbank shapes and
nmelfilters
"""
nfft = input_.shape[1]
return np.log(input_.dot(trfbank(samplingrate, nfft).T))
def cepstrum(input_, nceps):
"""
Calulates Cepstral coefficients from mel spectrum applying Discrete Cosine Transform
Args:
input: array of log outputs of Mel scale filterbank [N x nmelfilters] where N is the
number of frames and nmelfilters the length of the filterbank
nceps: number of output cepstral coefficients
Output:
array of Cepstral coefficients [N x nceps]
Note: you can use the function dct from scipy.fftpack.realtransforms
"""
# Lecture notes match only type II cosine transform
ret = fftpack.dct(input_, type=2, axis=1)[:, :nceps]
return ret
def dtw(x, y, dist=None, debug=False):
"""
Dynamic Time Warping
Args:
x, y: arrays of size NxD and MxD respectively, where D is the dimensionality
and N, M are the respective lenghts of the sequences
dist: distance function (can be used in the code as dist(x[i], y[j]))
Outputs:
d: global distance between the sequences (scalar) normalized to len(x)+len(y)
LD: local distance between frames from x and y (NxM matrix)
AD: accumulated distance between frames of x and y (NxM matrix)
path: best path through AD
Note that you only need to define the first output for this exercise.
Impl. details:
https://en.wikipedia.org/wiki/Dynamic_time_warping
https://github.com/pierre-rouanet/dtw
"""
# check args
if x.shape[1] != y.shape[1]:
raise ValueError("x and y should have the same 2nd dimension!")
if dist is None:
# default use Euclidean distances
dist = lambda x, y: np.linalg.norm(x-y, ord=2)
# obtain the dimensions
N = x.shape[0]
M = y.shape[0]
D = x.shape[1]
# calculate the local distacne matrix first
loc_dist = np.empty((N, M))
for n in range(N):
for m in range(M):
loc_dist[n, m] = dist(x[n], y[m])
# start to calcualte the acc_dist
acc_dist = np.zeros((N, M))
acc_dist[0, 0] = loc_dist[0, 0]
# fill the first column and row
for n in range(1, N):
acc_dist[n, 0] = loc_dist[n, 0] + acc_dist[n-1, 0]
for m in range(1, M):
acc_dist[0, m] = loc_dist[0, m] + acc_dist[0, m-1]
for n in range(1, N):
for m in range(1, M):
acc_dist[n, m] = (loc_dist[n, m]
+ min(acc_dist[n-1, m], acc_dist[n-1, m-1], acc_dist[n, m-1]))
# normalization
d = acc_dist[N-1, M-1] / (N + M)
if debug:
path = __path_backtrace(acc_dist)
return d, loc_dist, acc_dist, path
else:
return d
def __path_backtrace(acc_dist):
"""
For debug use of the Dynamic Time Warping function
"""
ret = list()
i, j = np.array(acc_dist.shape) - 1
ret.append((i, j))
while (i > 0) or (j > 0):
case_ = np.argmin(
(acc_dist[i-1, j-1], acc_dist[i-1, j], acc_dist[i, j-1]))
if case_ == 0:
i -= 1
j -= 1
elif case_ == 1:
i -= 1
else:
j -= 1
ret.append((i, j))
return list(reversed(ret))
| true |
f5e00a7cda5d3a8c9ae3aa9ed14deb50e848596a | Python | NAVEEN-LUCIFER/Letsupgrade-python | /ass-1.1.py | UTF-8 | 1,436 | 3.453125 | 3 | [] | no_license | print("------------------------------------LIST------------------------------------------------------")
a=["cricket","bike","food"]
print("MAIN LIST",a)
a.append("LOVE")
print("APPEND",a)
a.extend(["GOOD","BAD"])
print("EXTEND",a)
a.insert(2,"Friendship")
print("INSERT",a)
a.pop(1)
print("POP",a)
a.reverse()
print("REVERSE",a)
print("------------------------------------DICT------------------------------------------------------")
Dict={'1':'Cricket', '2':'Food', '3':'Bike'}
First_value= Dict.setdefault('1')
print("Dictionary:", Dict)
print("First_value:",First_value)
Fourth_value= Dict.setdefault('4','MONEY')
print("Fourth_value:",Fourth_value)
print("------------------------------------SETS------------------------------------------------------")
print('2011 ICC-WORLDCUP-FINAL SCORECARD FOR TEAM-INDIA')
def Player(name,score,balls_faced):
print(name,'has scored-',score,'outof-',balls_faced)
Player(name='V SEWAGH',score='0',balls_faced='2')
Player(name='SACHIN TENDULKAR',score='18',balls_faced='14')
Player(name='G GAMBHIR',score='97',balls_faced='122')
Player(name='VIRAT KOHLI',score='35',balls_faced='49')
Player(name='M.S.DHONI*',score='91',balls_faced='79')
Player(name='YUARAJ_SINGH*',score='21',balls_faced='24')
print("------------------------------------TUPLE------------------------------------------------------")
string = "Let's Upgrade-PYTHON"
tuple = tuple(string)
print(tuple)
| true |
be7f821102c7d2a9674ca403b4694477a356fe62 | Python | museRhee/basicPython | /ageCheck.py | UTF-8 | 188 | 4.34375 | 4 | [] | no_license | '''
input age and print if age is 20 and over.
'''
#input age
age = int(input("How old are U? "))
#print result
if (age>=20):
print("U are an adult")
else:
print("U are a baby")
| true |
82027dcda722dd797a7983f2735d78ea88daf087 | Python | mrcgndr/weathercrawler | /utils/visualize.py | UTF-8 | 1,125 | 2.859375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from .weatherfilestack import WeatherFileStack
def plotTemperature(wstack: WeatherFileStack, unit: str, feelslike: bool):
assert unit in ["celsius", "fahrenheit"], "Unknown degree unit. Choose 'celsius' or 'fahrenheit'"
time = [f.current.obs_datetime_loc for f in wstack.files]
if unit == "celsius":
T = [f.current.weather.temp.celsius for f in wstack.files]
if feelslike:
Tf = [f.current.weather.feelslike.celsius for f in wstack.files]
elif unit == "fahrenheit":
T = [f.current.weather.temp.fahrenheit for f in wstack.files]
if feelslike:
Tf = [f.current.weather.feelslike.fahrenheit for f in wstack.files]
fig, ax = plt.subplots()
fig.autofmt_xdate()
xfmt = mdates.DateFormatter('%y-%m-%d %H:%M')
ax.xaxis.set_major_formatter(xfmt)
ax.plot(time, T, label="real")
if feelslike:
ax.plot(time, Tf, label="feels like")
ax.set(title=wstack.location, ylabel=f"Temperature [{'C' if unit == 'celsius' else 'F'}]")
ax.legend()
return fig, ax
| true |
e4ac538229157df6ecedb37089b68eb108fcfc71 | Python | jsevamo/RayTracerTest | /Main.py | UTF-8 | 10,431 | 3.015625 | 3 | [
"MIT"
] | permissive | # /*******************************************************
# * 2020 Juan Sebastián Vargas Molano j.sevamo@gmail.com
# *******************************************************/
# https://github.com/Keeeweee/Raytracing-In-One-Weekend-in-Python Add randomInUnitSphere method
# TODO: CHECK HOW Hit_Records ARE BEING HANDLED WHEN RENDERING THE WORLD.
from PIL import Image
import cv2
from RayTracerTest.Vec3 import Vec3 as vec3
from RayTracerTest.Ray import Ray as ray
from playsound import playsound
import math
from RayTracerTest.Sphere import *
from RayTracerTest.HittableList import *
from RayTracerTest.Hittable import *
from RayTracerTest.Sphere import *
from RayTracerTest.Camera import *
import sys
# /*******************************************************
# from PIL import Image
# import cv2
# from RayTracerTest.Vec3 import Vec3 as vec3
# from RayTracerTest.Ray import Ray as ray
# from playsound import playsound
# *******************************************************/
# Used to determine t_max for our ray. For now it's at infinity!
MAXRANGE: float = math.inf
# # Not used anymore. Used with GetColorOfPixels. Since we use a world now, this is in Sphere class
# def Hit_Sphere(center: vec3, radius: float, r: ray):
# """
#
# :rtype: float
# """
#
# # To add a sphere, we can use: (X - Cx)² + (Y - Cy)² + (Z - Cz)² = R²
# # In vector form we have dot((P-C),(P-C)) = R²
# # And since our ray is P(t) = A + t*B
# # Then we have dot((A + t*B - C), (A + t*B - C) = R²
# # Doing some algebra we then have:
# # dot(B,B) t² + dot(B, A - C) * 2t + dot(A-C, A-C) - R² = 0
# # which is an equation in the form of:
# # aX² + bX + c = 0
# # Now the discriminant is b² - 4*a*c
# # if that is greater than zero, we have a valid solution, meaning
# # the ray hit the sphere.
# # So then we return the complete solution for t, but the smallest value.
#
# oc: vec3 = r.GetOrigin - center
# a: float = vec3.DotProduct(r.GetDirection, r.GetDirection)
# b: float = 2.0 * vec3.DotProduct(oc, r.GetDirection)
# c: float = vec3.DotProduct(oc, oc) - radius * radius
# discriminant: float = b * b - 4 * a * c
#
# if discriminant < 0:
# return -1.0
# else:
# return (-b - math.sqrt(discriminant)) / (a * 2.0)
# # Not used anymore. Replaced by GetColorOfPixelsWithWorld
# # Returns a Vector3D with the color of the pixel based on where the ray is.
# def GetColorOfPixels(r: ray):
# """
#
# :rtype: Vec3
#
# """
# # if Hit_Sphere(vec3(0, 0, -1), 0.5, r):
# # return vec3(1, 0, 0)
#
# # To get the color of the pixel, we see first the value of t. It can be -1 or any number
# # greater than 0 if it hit a sphere.
# t: float = Hit_Sphere(vec3(0, 0, -1), 0.5, r)
#
# # If the ray hit the sphere, we get the exact point of where it got it by using PointAtParamenter(), and
# # subtract the sphere's position from the hit position in order to get the normal vector at hit point.
# # We then make this normal vector an unit vector.
# # And finally we make a standard graphics trick to have the normal be from -1 -> 1 to 0 -> 1
# if t > 0.0:
# N_notUnit: vec3 = r.PointAtParameter(t) - vec3(0, 0, -1)
# N_notUnit.MakeUnitVector()
# N: vec3 = N_notUnit
# return vec3(N.x + 1, N.y + 1, N.z + 1) * 0.5
#
# # We get the direction of the ray, make it a unit vector.
# Direction: vec3 = r.GetDirection
# Direction.MakeUnitVector()
# unitDirection: vec3 = Direction
# # We make a standard graphics trick in which we take the unit direction,
# # add one and multiply by 0.5. This is to have 0 < t < 1 instead of -1 < t < 1
# # t starts with high values and decreases as the ray goes down the image with it's "y" value.
# t = 0.5 * (unitDirection.y + 1)
# # Color white to use
# color1: vec3 = vec3(1.0, 1.0, 1.0)
# # Color blueish to use
# color2: vec3 = vec3(0.5, 0.7, 1.0)
#
# # We make a linear interpolation between the two colors based on the value of t using (1-t)A + tB
# return color1 * (1.0 - t) + color2 * t
def RandomInUnitSphere() -> vec3:
while True:
p: vec3 = (vec3(RandomFloat(), RandomFloat(),
RandomFloat()) * 2.0) - vec3(1, 1, 1)
if p.SquaredLength < 1.0:
return p
def GetColorOfPixelsWithWorld(r: ray, world: Hittable):
# If we hit something in the world, return the normal vector of that pixel and do the graphics trick.
rec = [Hit_Record()]
if world.Hit(r, 0.001, MAXRANGE, rec):
# return (rec[0].normal + vec3(1, 1, 1)) * 0.5
target: vec3 = rec[0].p + rec[0].normal + RandomInUnitSphere()
return GetColorOfPixelsWithWorld(ray(rec[0].p, target - rec[0].p), world) * 0.4
else:
Direction: vec3 = r.GetDirection
Direction.MakeUnitVector()
unitDirection: vec3 = Direction
# We make a standard graphics trick in which we take the unit direction,
# add one and multiply by 0.5. This is to have 0 < t < 1 instead of -1 < t < 1
# t starts with high values and decreases as the ray goes down the image with it's "y" value.
t = 0.5 * (unitDirection.y + 1)
# Color white to use
color1: vec3 = vec3(1.0, 1.0, 1.0)
# Color blueish to use
color2: vec3 = vec3(0.5, 0.7, 1.0)
# We make a linear interpolation between the two colors based on the value of t using (1-t)A + tB
return color1 * (1.0 - t) + color2 * t
# Main function for the raytracer
def Main():
sys.setrecursionlimit(5000)
# This is how we can create a ppm image to write.
outputImage = open("renderedImage.ppm", "w+")
# width (nx) and height (ny) of the output image.
nx: int = 600
ny: int = 300
# Number of samples per pixel for antialiasing. The more samples the better the effect
# but takes longer to render.
samples: int = 50
# create a ppm image header based on this: https://en.wikipedia.org/wiki/Netpbm#File_formats
# print("P3\n" + str(nx) + " " + str(ny) + "\n255\n")
outputImage.write("P3\n" + str(nx) + " " + str(ny) + "\n255\n")
# Create a world of type HittableList to add HittableO Objects (Spheres)
world = HittableList()
# Adds two spheres. The first one is so big we just see the top and looks like a floor. Cool!
world.append(Sphere(vec3(0, -100.5, -1), 100))
world.append(Sphere(vec3(0, 0, -1), 0.5))
# Created a camera to better handle rendering.
cam = Camera()
# The for loop that writes the pixels of the image. It writes from left to right
# and then from top to bottom.
for j in range(ny, 0, -1):
for i in range(0, nx, 1):
# # THIS WAS REPLACED BY THE CAMERA CLASS. iT'S DONE THERE NOW.
#
# # U and V are vectors inside de image plane. They go from 0 to 1.
#
# # If U is 0 and V is 1, it means we are pointing are the top left corner of the image plane.
#
# # They are necessary to move the ray through each pixel, as with each iteration in the for loop,
# # they change values.
#
# u: float = i / nx
# v: float = j / ny
#
# # Next is the magic formula that moves the Ray through all the pixels of the image.
#
# # We give the ray it's origin, but then here's the good part:
# # for the Direction, we start with the lower left corner that was set before,
# # but then we add to this position the horizontal size of the plane time u.
# # This is crucial because since U goes from 0 to 1, it effectively makes it so
# # we do indeed go through the whole plane.
# # Same goes for vertical size time V.
# r: ray = ray(originOfCamera, lowerLeftCorner + horizontalSize * u + verticalSize * v)
# # col: vec3 = GetColorOfPixels(r)
# col: vec3 = GetColorOfPixelsWithWorld(r, world)
# Rendering now using the camera object and testing if we want to render with
# antialiasing or not.
# So the color of each pixel now starts as black.
col: vec3 = vec3(0, 0, 0)
# If we use antialiasing, now for each given pixel we also have a loop that sends rays
# with values +1 or -1 of the original u and v coordinates using the RandomFloat function in Camera.
# This ensures each pixel now gets a color sample of slightly shifted rays.
# Everytime we get a color back we add it to the original color variable of the pixel,
# and then divide by the amount of rays we shot per pixel (samples) in order to average the colors and
# get proper antialiasing. Cool!
for s in range(0, samples, 1):
u: float = (i + RandomFloat()) / nx
v: float = (j + RandomFloat()) / ny
r: ray = cam.GetRay(u, v)
col = col + GetColorOfPixelsWithWorld(r, world)
col = col / samples
col = vec3(math.sqrt(col.r), math.sqrt(col.g), math.sqrt(col.b))
ir: int = int(255.99 * col.r)
ig: int = int(255.99 * col.g)
ib: int = int(255.99 * col.b)
# It's necessary to check later what is going on. For now, here's a quick fix:
# if ir < 0:
# ir = 0
# if ir > 255:
# ir = 255
# if ig < 0:
# ig = 0
# if ig > 255:
# ig = 255
# if ib < 0:
# ib = 0
# if ib > 255:
# ib = 255
print(str(ir) + " " + str(ig) + " " + str(ib) + "\n")
outputImage.write(str(ir) + " " + str(ig) + " " + str(ib) + "\n")
# Makes sure to close the output image.
outputImage.close()
print("Image Rendered Correctly! Success!")
print("The Rendering engine works!")
print("You suck a little bit less today!")
print("Rejoice!")
ShowImage()
playsound('victory.mp3')
# Uses OpenCV to change the format of the rendered image from PPM to JPG, and then uses Pillow (PIL) to show it.
def ShowImage():
i = cv2.imread('renderedImage.ppm')
cv2.imwrite('renderedImage.jpg', i)
img = Image.open('renderedImage.jpg')
img.show()
Main()
| true |
c60d4c6dbb93e68fc1849bb2d8728b046b5698c4 | Python | EmersonDantas/SI-UFPB-IP-P1 | /Exercícios-Lista4-Comando condicional-IP-Python/Lista4-Lvr-Pag84-E4.10.py | UTF-8 | 632 | 3.375 | 3 | [] | no_license | # EMERSON DANTAS S.I IP-P1
consumo = float(input('Digite o consumo de energia em kWh:'))
tipo = str.lower(input('Digite o tipo de instalação conforme a tabela abaixo:\nR para Residências;\nI para indústrias\nC para comércios.\n'))
if tipo == 'r':
if consumo > 500:
preco = 0.65
else:
preco = 0.40
elif tipo == 'i':
if consumo > 5000:
preco = 0.60
else:
preco = 0.55
elif tipo == 'c':
if consumo > 1000:
preco = 0.60
else:
preco = 0.55
else:
print('Tipo de instalação inválido!')
exit()
print('Você deverá pagar R${r:.2f}'.format(r=(consumo * preco))) | true |
111a1c75c7040c19b7ff62949b2b332a52f700e4 | Python | sunxianfeng/LeetCode-and-python | /problem-solving-with-algorithms-and-data-structure-using-python 中文版/递归/汉诺塔问题.py | UTF-8 | 761 | 3.75 | 4 | [] | no_license | # -*- coding:utf-8 -*-
'''
汉诺塔问题
下面是关于将塔经由中间杆,从起始杆移到目标杆的抽象概述:
1、 把圆盘数减一层数的小塔经过目标杆移动到中间杆
2、 把剩下的圆盘移动到目标杆
3、 把圆盘数减一层数的小塔从中间杆,经过起始杆移动到目标杆
'''
def moveTower(height,fromPole, toPole, withPole):
if height >= 1:
moveTower(height-1,fromPole,withPole,toPole)
moveDisk(fromPole,toPole)
moveTower(height-1,withPole,toPole,fromPole)
def moveDisk(fromPole,toPole):
print("moving disk from",fromPole,"to",toPole)
toPole.append(fromPole.pop(-1)) #三个塔用三个栈表示
A = [('A'),3,2,1]
B = [('B')]
C = [('C')]
moveTower(3,A,B,C) | true |
118fe91e57c78db316a3cfb5e1ba622c8382404b | Python | 18786683795/IntelligentSystem | /bpnn_x1x2.py | UTF-8 | 7,891 | 3 | 3 | [] | no_license | #__author__ = 'cuihe'
# coding:utf-8
import math
import random
import BPNN
random.seed(0)
# calculate a random number where: a <= rand < b
def rand(a, b):
return (b-a)*random.random() + a
# Make a matrix I*J filled by fill, default=0.0
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m
# sigmoid function, tanh is a little nicer than the standard 1/(1+e^-x)
def S_fy(x):
return math.tanh(x)
# derivative of our sigmoid function, in terms of the output (i.e. y)
def dsigmoid(y):
return 1.0 - y**2
class NN:
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no
# activations for nodes
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no
# create weights
self.wi = makeMatrix(self.ni, self.nh) #神经网络第一层 第二层的连接权值
self.wo = makeMatrix(self.nh, self.no) #神经网络第二层 第三层的连接权值
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-2.0, 2.0)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-1.0, 1.0)
# last change in weights for momentum
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def update(self, inputs):
#按照已有的权值运算一遍,并非更新
if len(inputs) != self.ni-1:
raise ValueError('wrong number of inputs')
# input activations
for i in range(self.ni-1):
#self.ai[i] = S_fy(inputs[i])
self.ai[i] = inputs[i]
# hidden activations
for j in range(self.nh): #对隐含层的每一个神经元
sum = 0.0 #这个神经元初始化为0
for i in range(self.ni): #接受前一层所有的神经元信息
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = S_fy(sum) #S化后存入
# output activations
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = S_fy(sum)
return self.ao[:]
def backPropagate(self, targets, N, M):
if len(targets) != self.no:
raise ValueError('wrong number of target values')
# calculate error terms for output
output_deltas = [0.0] * self.no
for k in range(self.no): #每一个输出
error = targets[k]-self.ao[k]
output_deltas[k] = dsigmoid(self.ao[k]) * error
# calculate error terms for hidden
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
error = 0.0
for k in range(self.no):
error = error + output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = dsigmoid(self.ah[j]) * error
# update output weights
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
self.co[j][k] = change
#print N*change, M*self.co[j][k]
# update input weights
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j]*self.ai[i]
self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
self.ci[i][j] = change
# calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.ao[k])**2
return error
def test(self, patterns):
FOutput = open('x1x2_Output.txt', 'w')
for p in patterns:
temp = self.update(p[0])
print(p[0], "->", temp) #update的参数是inputs
for item in p[0]:
FOutput.write(str(item)+' '),
for item in temp:
FOutput.write(str(item)+' '),
FOutput.write('\n')
FOutput.close()
def weights(self):
print('Input weights: '),
for i in range(self.ni):
print(self.wi[i] ),
print
print('Output weights: '),
for j in range(self.nh):
print(self.wo[j] ),
print
def train(self, patterns, iterations=100000, N=0.001, M=0.001):
# N: learning rate
# M: momentum factor
# change = hidden_deltas[j]*self.ai[i]
# self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
# self.ci[i][j] = change
XLErrorList = []
olderror = 0
for i in range(iterations): #训练次数
error = 0.0 #本次误差
for p in patterns: #数据中每行
inputs = p[0] #每行的第一个数据是一个输入数组
targets = p[1] #每行的后一个数据是期望输出
self.update(inputs) #return self.ao[:]
error = error + self.backPropagate(targets, N, M) #这次训练的累加误差
if i % 80 == 0: #每???0次训练打印一次误差
print('error=%-.9f' % error),
XLErrorList.append(error)
if i % 640 == 0:
print(' [%3.2f %%] delta=%3.9f\n' % ((i*1.0/iterations)*100, abs(error-olderror)))
olderror = error
print('\n')
XLErrorOutput = open('x1x2-XLError.txt', 'w')
for item in XLErrorList:
XLErrorOutput.write(str(item)+' '),
XLErrorOutput.close()
def demo():
TestList2 = []
TestFileList = ['sinx_InputData.txt', '1sinx1_InputData.txt', 'x1x2_InputData.txt']
TestFileXL = [9, 9, 121]
FileNum = 2
TestFile = TestFileList[FileNum] #确保数据在这个文件
f = open(TestFile,'r')
for line in f: #对于每一行
TestList = [float(x) for x in line.split()] #读取这行的每一个实数,形成一行数据
TestList2.append([TestList]) #形成2维数组
traindata = []
for i in range(len(TestList2)):
tempLa = TestList2[i]
tempLb = tempLa[0]
tempLc = tempLb[len(tempLb)-1]
tempLb = [tempLb[:len(tempLb)-1]]
tempLb.append([tempLc])
traindata.append(tempLb)
#
#
datalen = len(traindata[0][0]) #输入层的数量
#
# n = NN(datalen, datalen+24, 1)
#
# n.train(traindata[0:TestFileXL[FileNum]]) #def train(self, patterns, iterations=500, N=0.02, M=0.01):
# #n.train(traindata[:])
# n.test(traindata[TestFileXL[FileNum]:])
example_list=traindata[:TestFileXL[FileNum]]
bpnn = BPNN.Bpnn(datalen, [datalen+16, datalen+16, 1])
bpnn.train(example_list, 0.165, 0.1, 0.1)
#bpnn.debug_train(example_list,2000)
FOutput = open('x1x2_Output.txt', 'w')
for line in traindata[TestFileXL[FileNum]:]:
bpnn.compute(line[0])
print(line[0][0], line[0][1],' -> ', bpnn.output()[0])
for item in line[0]:
FOutput.write(str(item)+' '),
for item in bpnn.output():
FOutput.write(str(item)+' \n')
FOutput.close()
if __name__ == '__main__':
demo()
# clear;
# file_t = fopen('D:\!zju\!IntelligentSystem\HW#4\x1x2_Output.txt','r');
# [x fx] = fscanf(file_t,'%f %f');
# for i=1:3:fx
# x1((i+2)/3)=x(i,1);
# x2((i+2)/3)=x(i+1,1);
# yy((i+2)/3)=x(i+2,1);
# end
# fclose(file_t);
# x1=reshape(x1,21,21);
# x2=reshape(x2,21,21);
# yy=reshape(yy,21,21);
#
# for i=1:21
# for j=1:21
# if x1(i,j)==0 temp1=1;
# else temp1=sin(x1(i,j))/x1(i,j);end
# if x2(i,j)==0 temp2=1;
# else temp2=sin(x2(i,j))/x2(i,j);end
# YY(i,j)=temp1*temp2;
# end
# end
# %mesh(x1,x2,YY); %理论图
# mesh(x1,x2,yy); %训练图 | true |
17a681ffc262d5bc5bfa32b5dd29d3cfc803d0cd | Python | natha1601/FaceRecognitionwithFacialLandmarkPython | /fix uji.py | UTF-8 | 1,929 | 2.828125 | 3 | [] | no_license | import pandas as pd
import numpy as np
wine = pd.read_csv('trainingdataxx.csv', names = ["1", "2", "3", "4", "5", "6","7","8", "9",
"10","11", "12", "13","14","15","16","17","18",
"name"])
x_train = wine.drop('name', axis=1)
y_train = wine['name']
#from sklearn.model_selection import train_test_split
#x_train, x_test, y_train, y_test = train_test_split(x,y)
#from sklearn.preprocessing import StandardScaler
#scaler = StandardScaler()
#scaler.fit(x_train)
#StandardScaler(copy=True, with_mean=True, with_std=True)
#x_train = scaler.transform(x_train)
#x_test = scaler.transform(x_test)
from sklearn.neural_network import MLPClassifier
#from sklearn.metrics import classification_report,confusion_matrix
mlp = MLPClassifier(hidden_layer_sizes=(13,13,13),max_iter=500,
learning_rate_init=0.001, momentum=0.9, random_state=0)
model = mlp.fit(x_train, y_train)
'''MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(13, 13, 13), learning_rate='constant',
learning_rate_init=0.001, max_iter=500, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=None,
shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,
verbose=False, warm_start=False)'''
x_test = pd.read_csv('testingdata.csv', names = ["1", "2", "3", "4", "5", "6","7","8", "9",
"10","11", "12", "13","14","15","16","17","18"])
predictions = model.predict_log_proba(x_test)
prediction_prob = model.predict_proba(x_test)
predict = model.predict(x_test)
print(predict)
print((prediction_prob)+1)
#prediction = mlp.predict(x_test)
#print(classification_report(y_test,prediction))
| true |
a2cf0100e97e854b1d8aa86b89b0747920a46628 | Python | JannisK89/AdventOfCode2020 | /Day6/part1.py | UTF-8 | 477 | 3.0625 | 3 | [] | no_license |
# https://adventofcode.com/2020/day/6
def countDifferentAnswers(inputFile):
with open(inputFile, 'r') as file:
lines = file.readlines()
group, total = '', 0
for answer in lines:
if answer.strip() != '':
group += answer.strip()
else:
total += (len(set(group)))
group = ''
total += (len(set(group)))
return total
print(countDifferentAnswers('input.txt'))
| true |
414c856b007709f6b3ebcf0990cc508547b13275 | Python | PaulaSena/Python | /script-python/b.py | UTF-8 | 413 | 3.609375 | 4 | [] | no_license | nome=input('Qual é seu nome? ')
idade=input('Qual é a sua idade? ')
peso=input('Qual é a seu peso? ')
print("Seu nome é "+nome," sua idade é de "+idade, " seu peso é de "+peso)
verific=input("Correto? ")
if verific=='sim':
print('Bem Vinda: '+nome)
elif verific=='não':
print('Informe seus dados novamente :')
else:
verific=='null'
print('Informe seus dados novamente :')
| true |
08830c0a725ed97f15a58d1f55f54f678144eb74 | Python | RevathiRathi/Revat | /power.py | UTF-8 | 45 | 2.546875 | 3 | [] | no_license | n,k=map(int,input().split())
s=n**k
print(s)
| true |
b1961c5d69099673ac8616ccbf786d047ebec10e | Python | amnamoh/MiniNN_Modified- | /Modified_MiniNN.py | UTF-8 | 9,817 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[19]:
import numpy
import numpy as np
import numpy.random
numpy.set_printoptions(precision=3, floatmode="fixed")
class MiniNN:
"""
Naming convention: Any self variable starting with a capitalized letter and ending with s is a list of 1-D or 2-D numpy arrays, each element of which is about one layer, such as weights from one layer to its next layer.
self.Ws: list of 2-D numpy arrays, tranfer matrixes of all layers, ordered in feedforward sequence
self.phi: activation function
self.psi: derivative of activation function, in terms of its OUTPUT, ONLY when phi is logistic
self.Xs: list of 2-D numpy arrays, output from each layer
self.Deltas: list of 2-D numpy arrays, delta from each layer
"""
def logistic(self, x):
return 1/(1 + numpy.exp(-x))
def logistic_psi(self, x):
"""If the output of a logistic function is x, then the derivative of x over
the input is x * (1-x)
"""
return x * (1-x)
def __init__(self, input ,output, NeuronsInLayers):
"""the user provides the number of non-bias neurons in each layer in form
of a list.
list NeuronsInLayers elements represents the number of non-bais neurons in each layer.
it keeps track of the size of the input and output to determine the dimensions of the transfer matrices after
the input layer and before the output layer.
"""
"""Initialize an NN
hidden_layer: does not include bias
"""
Ws = [] #place holder
L =len(NeuronsInLayers) #number of layer
for n in range(L):
if n == 0:
Ws.append(np.random.randn(len(input[0]),NeuronsInLayers[n])) # the transfer matrix for the first layer, input is augumented.
else:
Ws.append(np.random.randn(NeuronsInLayers[n - 1] + 1,NeuronsInLayers[n])) # +1 because of the augmnetion, output of previous layer is some value x n_neuornsL1 + 1.
Ws.append(np.random.randn(NeuronsInLayers[n] + 1, len(output[0]))) #last layer transfer matrix.
self. Ws = Ws
self.phi = self.logistic # same activation function for all neurons
self.psi = self.logistic_psi
def feedforward(self, x, W, phi):
"""feedforward from previou layer output x to next layer via W and Phi
return an augmented out where the first element is 1, the bias
Note the augmented 1 is redundant when the forwarded layer is output.
x: 1-D numpy array, augmented input
W: 2-D numpy array, transfer matrix
phi: a function name, activation function
"""
return numpy.concatenate(([1], # augment the bias 1
phi(
numpy.matmul( W.transpose(), x )
) # end of phi
)) # end of concatenate
def predict(self, X_0):
"""make prediction, and log the output of all neurons for backpropagation later
X_0: 1-D numpy array, the input vector, AUGMENTED
"""
Xs = [X_0]; X=X_0
for W in self.Ws:
X = self.feedforward(X, W, self.phi)
Xs.append(X)
self.Xs = Xs
self.oracle = X[1:] # it is safe because Python preserves variables used in for-loops
def backpropagate(self, delta_next, W_now, psi, x_now):
"""make on step of backpropagation
delta_next: delta at the next layer, INCLUDING that on bias term
(next means layer index increase by 1;
backpropagation is from next layer to current/now layer)
W_now: transfer matrix from current layer to next layer (e.g., from layer l to layer l+1)
psi: derivative of activation function in terms of the activation, not the input of activation function
x_now: output of current layer
"""
delta_next = delta_next[1:] # drop the derivative of error on bias term
# first propagate error to the output of previou layer
delta_now = numpy.matmul(W_now, delta_next) # transfer backward
# then propagate thru the activation function at previous layer
delta_now *= self.psi(x_now)
# hadamard product This ONLY works when activation function is logistic
return delta_now
def get_deltas(self, target):
"""Produce deltas at every layer
target: 1-D numpy array, the target of a sample
delta : 1-D numpy array, delta at current layer
"""
delta = self.oracle - target # delta at output layer is prediction minus target
# only when activation function is logistic
delta = numpy.concatenate(([0], delta)) # artificially prepend the delta on bias to match that in non-output layers.
self.Deltas = [delta] # log delta's at all layers
for l in range(len(self.Ws)-1, -1, -1): # propagate error backwardly
# technically, no need to loop to l=0 the input layer. But we do it anyway
# l is the layer index
W, X = self.Ws[l], self.Xs[l]
delta = self.backpropagate(delta, W, self.psi, X)
self.Deltas.insert(0, delta) # prepend, because BACK-propagate
def print_progress(self):
"""print Xs, Deltas, and gradients after a sample is feedforwarded and backpropagated
"""
print ("\n prediction: ", self.oracle)
for l in range(len(self.Ws)+1):
print ("layer", l)
print (" X:", self.Xs[l], "^T")
print (" delta:", self.Deltas[l], "^T")
if l < len(self.Ws): # last layer has not transfer matrix
print (' W:', numpy.array2string(self.Ws[l], prefix=' W: '))
try: # because in first feedforward round, no gradient computed yet
# also, last layer has no gradient
print(' gradient:', numpy.array2string(self.Grads[l], prefix=' gradient: '))
except:
pass
def compute_grad(self):
""" Given a sequence of Deltas and a sequence of Xs, compute the gradient of error on each transform matrix.
Note that the first element on each delta is on the bias term. It should not be involved in computing the gradient on any weight because the bias term is not connected with previous layer.
"""
""" We modified the function 'update_weights' to the 'compute_grad' that only computes the gradient
of error on each transform matrix.
"""
self.Grads = []
for l in range(len(self.Ws)): # l is layer index
x = self.Xs[l]
delta = self.Deltas[l+1]
# print (l, x, delta)
gradient = numpy.outer(x, delta[1:])
self.Ws[l] -= 1 * gradient # descent!
self.Grads.append(gradient)
#print(self.Grads)
return self.Grads
def update(self, grad):
""" this function updates the weights given the gradients.
this function is called at the end of the training of each batch to
enable batch update.
"""
for l in range(len(self.Ws)):
self.Ws[l] -= 1 * grad[l] # descent!
# show that the new prediction will be better to help debug
# self.predict(self.Xs[0])
# print ("new prediction:", self.oracle)
def train(self, X, Y, max_iter=100, verbose=False,batchSize = 1):
"""feedforward, backpropagation, and update weights
The train function updates an NN using one sample.
Unlike scikit-learn or Tensorflow's fit(), x and y here are not a bunch of samples.
Homework: Turn this into a loop that we use a batch of samples to update the NN.
x: 2-D numpy array, an input vector
y: 1-D numpy array, the target
"""
""" The updated code:
feedforward, backpropagation, compute_grad and update.
The train function updates an NN using batches of samples.
x and y here are bunch of samples.
x: 2-D numpy array, an input matrix
y: 1-D numpy array, the target
batch size: is defined by the user, default is 1.
"""
for epoch in range(max_iter):
print ("epoch", epoch, end=":")
#print(self.Ws)
GradientL = [] # place holder for the gradient in each layer.
for j in range(0,len(X),batchSize): # divide the input into batches
x = X[j:j+batchSize]
y = Y[j:j+batchSize]
for i in range(len(x)): # loops through the samples in a batch.
self.predict(x[i]) # forward
print (self.oracle)
self.get_deltas(y[i]) # backpropagate
if verbose:
self.print_progress()
if (i==0):
GradientL = self.compute_grad()# compute gradients for the first sample in the batch.
#print(GradientL)
else:
GradSum = [] # place holder for the sum of greadients per layer.
for h,m in zip(GradientL,self.compute_grad()):
#print(h,m)
GradSum.append(np.add(h,m)) # sums the gradients per layer for each sample.
#print(GradSum)
GradientL = GradSum
self.update(GradientL) # updates the weights by the sum of gradients for each sample in the batch.
#print(self.Ws)
if __name__ == "__main__":
# The training sample
x_0 = numpy.array(([[1, 1,3], [1,0,0], [1,4,5], [1,0,0]])) # input matrix, augmented
y_0 = numpy.array(([[0],[1],[0],[1]]))# output, target.
# this number must be between 0 and 1 because we used logistic activation and cross entropy loss.
# To use functions individually
#MNN = MiniNN(x_0,y_0,10,7)
#Ws = MNN.Ws
#MNN.predict(x_0)
#MNN.get_deltas(y_0)
#MNN.print_progress()
#MNN.update_weights()
#MNN.print_progress()
# Or a recursive training process
MNN = MiniNN(x_0,y_0,[2,2]) # re-init
MNN.train(x_0, y_0, max_iter=20,verbose=True,batchSize =2)
# In[ ]:
| true |
ad3c4fcc9e0b6de04d0a848e12e99112e70cbb14 | Python | karandeepSJ/Robust-Oblivious-Transfer | /NetworkNode.py | UTF-8 | 922 | 2.71875 | 3 | [] | no_license | import random
from TransmissionBlock import TransmissionBlock
from Reconstructor import Reconstructor
class NetworkNode:
def __init__(self, p, g):
self.p, self.g = p, g
def generate_private_key(self):
self.priv_key = random.randint(0, self.p-1)
def generate_public_key(self):
self.pub_key = pow(self.g, self.priv_key, self.p)
def get_public_key(self):
return self.pub_key, self.p, self.g
def set_partner_public_key(self, pub_key):
self.partner_public_key = pub_key
def compute_routing_array(self, Z, n):
blocks = [TransmissionBlock(Z, self.priv_key, self.p, self.g, self.pub_key) for i in range(n)]
return [b.show() for b in blocks]
def recover_array(self, X, k):
recon = Reconstructor(*self.partner_public_key, X)
print("Verified blocks: " + str(recon.verify()))
rec = recon.recover(k-1)
return rec | true |
1760e40f30b68d03c3df0dd26f00f18c5653f407 | Python | claudio1624/Grafico | /Grafico_4en1.py | UTF-8 | 686 | 3.46875 | 3 | [] | no_license | #! /usr/bin/python
# -*- coding: iso-8859-15 -*-
from pylab import *
import matplotlib.pyplot as plt
# import matplotlib import *
import numpy as np
#definimos el periodo de la grafica
periodo = 2
#definimos el array dimensional
x = np.linspace(0, 10, 1000)
#defimos la funcion
y = np.sin(2*np.pi*x/periodo)
#creamos la figura
plt.figure()
#primer grafico
plt.subplot(2,2,1)
plt.plot(x, y, 'r', linestyle=":")
#segundo grafico
plt.subplot(2,2,2)
plt.plot(x, y, 'g', linestyle="--")
#tercer grafico
plt.subplot(2,2,3)
plt.plot(x, y, 'B', linestyle=":")
#cuarto grafica
plt.subplot(2,2,4)
plt.plot(x, y, 'k', linestyle="--")
#mostramos en pantalla
plt.show()
| true |
fc904877d10b45dc4b1b69a94f4eb1ae7bc3257f | Python | eliasssantana/API_activities | /app.py | UTF-8 | 4,636 | 2.890625 | 3 | [] | no_license | from flask import Flask, json,request
from flask_restful import Resource, Api
from flask_httpauth import HTTPBasicAuth
from werkzeug.wrappers import response
from models import People, Activities, Users
auth = HTTPBasicAuth() # crio um objeto do método verificador
app = Flask(__name__) # crio uma instância da classe Flask
api = Api(app) # aqui crio uma API da intância flask
# dicionário de usuário e senha
# USERS = {
# "Elias": "2077",
# "John": "9873"
# }
# função verficadora; onde irei validar o usuário e senha
@auth.verify_password # anotação que informa que a função verifica a senha.
def verification(username, password):
if not (username, password):
return False
return Users.query.filter_by(username=username, password=password)
class Person(Resource):
@auth.login_required # anotação que informa que o login é requerido.
def get(self,name):
person = People.query.filter_by(name=name).first()
try:
response = {
"name": person.name,
"age": person.age,
"id": person.id
}
except AttributeError:
response = {
'status':"ERRO",
'message':"Person not found."
}
return response
def put(self,name):
try:
person = People.query.filter_by(name=name).first()
dados = request.json
print("to aqui")
if "name" in dados:
person.name = dados["name"]
if "age" in dados:
person.age = dados["age"]
response = {
"name" : person.name,
"age": person.age
}
person.save()
except AttributeError:
response = {
"status":"erro",
"message": "person not found"
}
return response
def delete(self, name):
try:
person = People.query.filter_by(name=name).first()
message = f"{person} deleted successfully"
person.delete()
response = {'status':'success','message': message}
except AttributeError:
message = f"{name} not found"
response = {
"status":"erro",
"message": message
}
return response
class PeopleList(Resource):
@auth.login_required
def get(self):
pessoas = People.query.all()
response = [{"id":i.id,"name":i.name,"age":i.age} for i in pessoas]
return response
def post(self):
data = request.json #isso equivale à json.loads(request.data).
person = People(name=data['name'],age=data['age'],id = data['id'])
person.save()
response = {
"name": person.name,
"age": person.age,
"id": person.id
}
return response
class ActivitiesList(Resource):
def get(self):
activities = Activities.query.all()
response = [{"id": i.id,"name": i.name,"person":i.person.name,"status": i.status} for i in activities]
return response
def post(self):
data = request.json
person = People.query.filter_by(name=data['person']).first()
activity = Activities(name=data['name'], person = person, status = data['status'])
activity.save()
response = {
'pessoa': activity.person.name,
'name': activity.name,
'id': activity.id,
'status': activity.status
}
return response
class Person_Activities(Resource):
def get(self,name):
try:
person = People.query.filter_by(name=name).first()
activity = Activities.query.filter_by(person=person).first()
response = {
"person": activity.person.name,
"activities": activity.name
}
except AttributeError:
response = {
"status":"erro",
"message": "record not found."
}
return response
class Activities_status(Resource):
def get(self,id):
activity = Activities.query.filter_by(id=id).first()
response = {
"activities": activity.name
}
return response
api.add_resource(Person,"/person/<string:name>")
api.add_resource(PeopleList,"/person")
api.add_resource(ActivitiesList,"/activities/")
api.add_resource(Person_Activities,"/activities/<string:name>")
api.add_resource(Activities_status,"/activity/<int:id>")
if __name__=="__main__":
app.run(debug=True) | true |
5ff4bedab0794413fa1302183bda364c8ec41ad7 | Python | JamesGardner1/tictactoe | /main.py | UTF-8 | 3,425 | 3.859375 | 4 | [] | no_license | # This is a basic Tic Tac Toe game where the player plays against the computer
import random
def main():
display_ui()
player_turn()
check_victory()
player_victory()
# Starts new game
gameStillOn = True
playerWins = False
computerWins = False
def newGame():
global gameStillOn
if not gameStillOn:
print("New Game!")
ui = ["-", "-","-","-","-","-","-","-","-"]
display_ui()
player_turn()
gameStillOn = True
# Tic Tac Toe UI
ui = ["-", "-","-","-","-","-","-","-","-"]
# Displays Tic Tac Toe board
def display_ui():
print(" | " + ui[0] +" | " + ui[1] +" | " + ui[2] + " | ")
print(" | " + ui[3] +" | " + ui[4] +" | " + ui[5] + " | ")
print(" | " + ui[6] +" | " + ui[7] +" | " + ui[8] + " | ")
# Player Turn
def player_turn():
print("Your Turn!")
choice = input("Choose a spot from 1 - 9: ")
# Players move
# Input validation code based off of https://www.youtube.com/watch?v=BHh654_7Cmw
validMove = False
while not validMove:
while choice not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
choice = input("Choose a spot from 1 - 9: ")
choice = int(choice) - 1
if ui[choice] == "-":
validMove = True
else:
print("Ooops, that spot is filled. Pick another")
ui[choice] = "X"
display_ui()
check_victory()
player_victory()
computer_victory()
tie()
computer_turn()
# Computer Turn
def computer_turn():
print("Computers Turn!")
choice = random.randint(0, 8)
markO = choice
ui[markO] = "O"
display_ui()
check_victory()
player_victory()
computer_victory()
tie()
player_turn()
# Check victory
def check_victory():
global playerWins
global computerWins
row_1 = ui[0] == ui[1] == ui[2] == "X"
row_2 = ui[3] == ui[4] == ui[5] == "X"
row_3 = ui[6] == ui[7] == ui[8] == "X"
column_1 = ui[0] == ui[3] == ui[6] == "X"
column_2 = ui[1] == ui[4] == ui[7] == "X"
column_3 = ui[2] == ui[5] == ui[8] == "X"
diagonal_1 = ui[0] == ui[4] == ui[8] == "X"
diagonal_2 = ui[2] == ui[4] == ui[6] == "X"
if row_1 or row_2 or row_3 or column_1 or column_2 or column_3 or diagonal_1 or diagonal_2:
playerWins = True
return playerWins
com_row_1 = ui[0] == ui[1] == ui[2] == "O"
com_row_2 = ui[3] == ui[4] == ui[5] == "O"
com_row_3 = ui[6] == ui[7] == ui[8] == "O"
com_column_1 = ui[0] == ui[3] == ui[6] == "O"
com_column_2 = ui[1] == ui[4] == ui[7] == "O"
com_column_3 = ui[2] == ui[5] == ui[8] == "O"
com_diagonal_1 = ui[0] == ui[4] == ui[8] == "O"
com_diagonal_2 = ui[2] == ui[4] == ui[6] == "O"
if com_row_1 or com_row_2 or com_row_3 or com_column_1 or com_column_2 or com_column_3 or com_diagonal_1 or com_diagonal_2:
computerWins = True
return computerWins
# Player Wins
def player_victory():
global gameStillOn
if playerWins:
print("Player wins!")
gameStillOn = False
# Computer Wins
def computer_victory():
global gameStillOn
if computerWins:
gameStillOn = False
print("Computer wins!")
# Tie
def tie():
global gameStillOn
if "-" not in ui and not playerWins and not computer_victory:
gameStillOn = False
print("Tie!")
main() | true |
a56ba7e8cf84ca3455c222fd7a4a4457f9c83a8a | Python | muratortak/bizmeme-ng | /linkfarmer.py | UTF-8 | 1,440 | 2.5625 | 3 | [] | no_license | import time
from random import shuffle, sample
from re import search, findall
from data import Post
from utils.chandata import ChanBoards
from utils.operations import getThreadIdsFromCatalog, getThread, getCommentsFromThreadAsList, removeHTMLFromComment
import db
boards = ['pol',
'vg',
'v',
'b',
'biz',
'int',
'a',
'tv',
'vt',
'trash',
'mu',
'fit',
'r9k',
'g',
'x',
'his',
'adv',
'lit',
'bant',
'ck',
'qa',
'aco',
'mlp',
'vrpg',
'soc',
'vr',
's4s'
]
shuffle(boards)
def scrapeBoard(board: str) -> None:
threadsIdList = getThreadIdsFromCatalog(board)
if not threadsIdList: exit()
print(f"Beginning {board}, total threads {len(threadsIdList)}")
for threadIndex, threadId in enumerate(threadsIdList):
delta = 0
timePast = time.time()
thread = getThread(board, threadId)
if thread:
for comment in getCommentsFromThreadAsList(thread):
db.addPost(board,Post(comment))
delta = time.time() - timePast
print(board, threadIndex, "/", len(threadsIdList), delta)
db.con.commit()
for board in boards:
scrapeBoard(board)
db.con.close()
| true |
b146c413568e930eb905a7d918aae392094f2714 | Python | BITMystery/leetcode-journey | /46. Permutations.py | UTF-8 | 593 | 2.96875 | 3 | [] | no_license | class Solution(object):
def backtrack(self, nums, start, path, res):
if len(path) == len(nums):
res.append(path) # leaf
return
for i in xrange(start, len(nums)):
nums[i], nums[start] = nums[start], nums[i]
self.backtrack(nums, start + 1, path + [nums[start]], res)
nums[i], nums[start] = nums[start], nums[i]
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.backtrack(nums, 0, [], res)
return res | true |
e1ea7d9eb623406536e5dab73e5d56ea7e26248c | Python | KimSeonBin/algo_practice | /acmicpc/16196.py | UTF-8 | 1,677 | 2.71875 | 3 | [] | no_license | def sol():
st = input()
n = [st[0:6], st[6:14], st[14:17], st[17:18]]
locate = []
check = False
for i in range(0, int(input())):
if n[0] == input():
check = True
if check is False:
return 'I'
if n[2] == '000':
return 'I'
ndate = [n[1][0:4], n[1][4:6], n[1][6::]]
if ndate[0] < '1900' or ndate[0] > '2011':
return 'I'
if ndate[1] < '01' or ndate[1] > '12':
return 'I'
year = int(ndate[0])
month = int(ndate[1])
day = int(ndate[2])
if month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or \
month == 10 or month == 12:
if day < 1 or day > 31 or ndate[2] < '01' or ndate[2] > '31':
return 'I'
elif month == 2:
yoon = False
if year % 4 == 0:
yoon = True
if year % 100 == 0:
yoon = False
if year % 400 == 0:
yoon = True
if yoon is False:
if day < 1 or day > 28 or ndate[2] < '01' or ndate[2] > '28':
return 'I'
else:
if day < 1 or day > 29 or ndate[2] < '01' or ndate[2] > '29':
return 'I'
else:
if day < 1 or day > 30 or ndate[2] < '01' or ndate[2] > '30':
return 'I'
temp = 2
checksum = 0
for i in range(1, len(st)):
checksum += int(st[-1-i]) * temp
temp *= 2
if st[-1] == 'X':
checksum += 10
else:
checksum += int(st[-1])
if checksum % 11 != 1:
return 'I'
if int(n[2]) % 2 == 1:
return 'M'
else:
return 'F'
if __name__ == '__main__':
print(sol())
| true |
2997264ddff2839271079968a19d547de8570b3d | Python | jplhanna/TBD | /tree/management/commands/add_movie_data.py | UTF-8 | 2,640 | 3.125 | 3 | [] | no_license | from django.core.management.base import BaseCommand, CommandError
from tree.models import Movie
from parser import MovieParser
#So rather than calling the parser we call manage with a specific function line which will call this. The function should include the location of the file being added
#Parser could be called to parse the file and return the proper list. The parser could be a factory method
class Command(BaseCommand):
help="Updates Movie table/database with data given in csv's"
def add_arguments(self,parser):
#The argument of the add_movie_data command, aka the csv file location, there should be at least 1
parser.add_argument('csv_location',nargs=1,help='Must input one valid csv file location')
parser.add_argument('available',nargs=1,help='Must input one availability location')
'''
CreateMovies: A method which adds movies into the database, or updates the information of currently existing movies
input: csv: A string containing the file location with the movies to be input
modifies: The movie table contained in db.splite3, and based on the Movie model in the Tree application. Adds movies into said database
'''
def CreateMovies(self, csv, available):
_movie_Data=MovieParser(csv)
for _movie_tmp in _movie_Data:
_movie = Movie.objects.filter(imdb=_movie_tmp[1]).all()
if(len(_movie) == 1):
_new_Movie_tmp = _movie[0]
else:
_new_Movie_tmp=Movie(title=_movie_tmp[0],imdb=_movie_tmp[1],poster=_movie_tmp[2],popularity=_movie_tmp[4])
if available == "amazon":
_new_Movie_tmp.amazon = True
elif available == "amazonPrime":
_new_Movie_tmp.amazonPrime = True
elif available == "googlePlay":
_new_Movie_tmp.googlePlay = True
elif available == "hulu":
_new_Movie_tmp.hulu = True
elif available == "itunes":
_new_Movie_tmp.itunes = True
elif available == "netflix":
_new_Movie_tmp.netflix = True
_new_Movie_tmp.save()
'''
handle: The command connected to the manage.py command prompt. Is meant to update/populate the movie database using a csv
input: *args: A list of arguments which have been input.
**options: A map of operations, which contain options for the command which is being run. Which should just include the csv file location
'''
def handle(self,*args,**options):
self.CreateMovies(options['csv_location'][0], options['available'][0]) | true |
b6bdf65c38ea60e5d0d5250869af20d4ee1c532d | Python | BeTripTeam/BeTrip_Places_Evaluation | /evluation/PhotoEvaluation.py | UTF-8 | 1,183 | 3.078125 | 3 | [] | no_license | from Images_Beauty.ImageAnalytics import ImageAnalytics
from numpy import array
class PhotoEvaluator:
def __init__(self):
self.images_analyzer = ImageAnalytics()
def evaluate_photos(self, photos):
"""
Gives a mark to photo list according to
- number of photos
- beauty of photos
:param photos:
:return: mark in range [0, 1]
"""
self.update_photos_scores(photos)
if not photos:
return 0
if len(photos) <= 10:
w = 0.5
else:
w = 1
m = array([photo.mark for photo in photos]).mean() * w
if m > 0.85: m = m + 0.2
return m
def update_photos_scores(self, photos):
for photo in photos:
if photo.mark < 0:
photo.mark = self._beauty_score(photo)
def _beauty_score(self, photo):
return self.images_analyzer.beauty_score_url(photo.photo_link)
# from models.Photo import Photo
# pe = PhotoEvaluator()
# print(pe.evaluate_photos([Photo('https://travelpassionate.com/wp-content/uploads/2017/08/Kirkjufellsfoss-waterfall-Iceland-Europe.-Beauty-world.-min.jpg', 0.0)])) | true |
cc9b31488e88decee4ce3ed12088e9a23b81688a | Python | JrdnVan/csesoc-personal-projects-competition | /scripts/remove_event.py | UTF-8 | 1,539 | 2.59375 | 3 | [] | no_license | import boto3
from boto3.dynamodb.conditions import Key, Attr
from decouple import Config, RepositoryEnv
DOTENV_PATH = ".env"
env = Config(RepositoryEnv(DOTENV_PATH))
# Call user/event table from AWS
session = boto3.Session(
aws_access_key_id=env.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=env.get('AWS_SECRET_ACCESS_KEY'),
)
s3 = session.client('s3')
dynamodb = session.resource('dynamodb', region_name='ap-southeast-2')
meet_ball_user = dynamodb.Table('meet_ball_user')
meet_ball_join = dynamodb.Table('meet_ball_join_table')
# Requires Host and Event_id
def delete_event (host_id, event_id):
# Verify fields are not empty
if type(host_id) == str and type(event_id):
if host_id == "" or event_id =="" :
print("One of the UID is empty")
return False
# Delete event from table
try:
meet_ball_user.delete_item(
Key = {
"UID_User": host_id,
"UID_Event/User" : event_id,
}
)
# Delete guess-event relationship
get_resp_event = meet_ball_join.scan(
FilterExpression=Attr("event").eq(event_id)
)
event_dict = get_resp_event["Items"]
for item in event_dict:
meet_ball_join.delete_item(
Key = {
"guest": item["guest"],
"event" : item["event"],
}
)
print("Event is deleted!")
except Exception as e:
print("Could not delete_event")
return False
| true |
02f15e0217bc56df630c38d4d7edbe3feae39e80 | Python | MinaxiG/Codewars | /Strip_comments.py | UTF-8 | 654 | 3.5 | 4 | [] | no_license | # Question Link: https://www.codewars.com/kata/51c8e37cee245da6b40000bd
def solution(string,markers):
'''Split the string based on newlines'''
diff = string.split('\n')
res = [] #Final result variable
'''If each line contains any markers, append only the initial part of the line else append the whole line'''
for i in diff:
j = sorted([i.find(j) for j in markers if i.find(j)>-1])
if len(j)>0:
res.append((i[0:j[0]]).strip())
else:
res.append(i.strip())
return "\n".join(res) # Join all the separate with '\n' to bring back to original format | true |
1472faa191e2a6edbb4365e2848230ab89e33404 | Python | jorcuad/weatherStation | /scpdaemon.py | UTF-8 | 7,026 | 2.546875 | 3 | [] | no_license | # !/usr/bin/env python
''' YapDi Example - Demonstrate basic YapDi functionality.
Author - Kasun Herath <kasunh01@gmail.com>
USAGE - python basic.py start|stop|restart
python basic.py start would execute count() in daemon mode
if there is no instance already running.
count() prints a counting number to syslog. To view output of
count() execute a follow tail to syslog file. Most probably
tail -f /var/log/syslog under linux and tail -f /var/log/messages
under BSD.
python basic.py stop would kill any running instance.
python basic.py restart would kill any running instance; and
start an instance. '''
import sys
sys.path.append( "./lib" )
import syslog
import time
import datetime
from transfer import SCPClient
import paramiko
import yapdi
from os import system, listdir
from os.path import isfile, join
import utils
COMMAND_START = 'start'
COMMAND_STOP = 'stop'
COMMAND_RESTART = 'restart'
COMMAND_RESTORE = 'restore'
address= ""
user = ""
password = ""
port = 22
directory = ""
samples_path = "./data/"
backup_samples_path = "./data/backup/"
log_path = "./logs/scplog.txt"
def createSSHClient(server, port, user, password):
""" Create a client for a SSH connection.
:param server: address of the target machine.
:type server: string
:param port: port where connect with the target machine.
:type port: int
:param user: user of the target machine.
:type user: string
:param password: password for the target machine.
:type password: string
"""
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
return client
def get_arguments():
""" Get the arguments from the command line and then ask user for password.
The parameters that this deamon uses are:
* Address: address to the target machine.
* User: user of the target machine.
* Password: password for the target machine.
* Port: port for the SCP connection.
* Directory: directory in the target machine where we want to store the samples.
"""
global address
global user
global password
global port
global directory
try:
address = sys.argv[2]
user = sys.argv [3]
port = int(sys.argv[4])
directory = sys.argv[5]
password = raw_input()
except:
usage()
#print ("frec %s addr %s user %s port %s dir %s pass %s" % (frequency, address, user, port, directory, password))
def usage():
""" Print in the shell the right usage of this daemon.
"""
print("USAGE: python %s %s|%s|%s|%s <address> <user> <port> <directory>" % (sys.argv[0], COMMAND_START, COMMAND_STOP, COMMAND_RESTART, COMMAND_RESTORE))
# Invalid executions
if len(sys.argv) < 2 or sys.argv[1] not in [COMMAND_START, COMMAND_STOP, COMMAND_RESTART, COMMAND_RESTORE]:
usage()
exit()
#every time a send a scp file i made a backup, when i want to see what files i need to send i compare the samples and the
#backup directories files.
def getFilesToSend():
""" Get the files which weren't send to the target machine sorted from the newest one to the oldest one.
:return: array with the names of the files wich weren't send.
:rtype: string[]
"""
try:
with open("./config/sendedFiles", 'r') as file:
sendedFiles = [x.strip('\n') for x in file.readlines()]
file.close()
except:
utils.log(log_path, 'No files in sendedFiles configuration.')
sendedFiles = []
datafiles = [f for f in listdir(samples_path) if isfile(join(samples_path, f))]
files_to_send = list(set(datafiles) - set(sendedFiles))
files_to_send.sort(reverse=True)
return files_to_send
def createBackup(file_name):
""" Create a copy of the selected data file in the backup directory.
:param file_name: name of the file to backup.
:type file_name: string
"""
command = 'cp -a ' + samples_path + file_name + ' ' + backup_samples_path
system(command)
utils.log(log_path, 'Created backup of ' + file_name)
def turnWifiOn():
""" Turn on the wifi.
"""
system('sudo ifup wlan0')
utils.log(log_path, 'Turn on wifi for send data.')
sleep(5)
def turnWifiOff():
""" Turn off the wifi.
"""
system('sudo ifdown wlan0')
utils.log(log_path,'Turn off wifi after send data.')
def mark_as_send(file_name):
""" Write the chosen file in the sendedFiles configuration file marking it as read if the data file is full.
:param file_name: name of the file to mark as send.
:type file_name: string
"""
file_size = int(utils.getConfiguration('file_size'))
with open(samples_path+file_name) as datafile:
samples_in_file = enumerate(datafile)
datafile.close()
if(samples_in_file >= file_size):
with open("./config/sendedFiles", 'a+') as file:
file.write(file_name+"\n")
file.close
def count():
""" Infinite loop executed by this daemon. In every iteration the daemon gets the files which are not sended to the target
machine and send it to the target machine marking it as sended in the configuration file. The daemon will turn on the wifi if needed.
"""
while 1:
frequency = int(utils.getConfiguration('frequency_scp'))
powermode = int(utils.getConfiguration('powermode'))
if(powermode == 2 or powermode == 3):
turnWifiOn()
try:
ssh = createSSHClient(address, port, user, password)
scp = SCPClient(ssh.get_transport())
except:
utils.log(log_path, "Error trying connect with the destiny device.")
yapdi.Daemon().kill()
exit()
try:
datafiles = getFilesToSend()
for datafile in datafiles:
scp.put(samples_path+datafile, directory)
createBackup(datafile)
mark_as_send(datafile)
utils.log(log_path, "File " + datafile + " sended to " + address)
if(powermode == 2 or powermode == 3):
turnWifiOff()
time.sleep(frequency * 3600)
except:
utils.log(log_path, "Error sending the files.")
time.sleep(frequency * 3600)
if sys.argv[1] == COMMAND_START:
get_arguments()
daemon = yapdi.Daemon(pidfile='/var/run/scp.pid')
utils.log(log_path, "Starting daemon.")
# Check whether an instance is already running
if daemon.status():
print("An instance is already running.")
exit()
retcode = daemon.daemonize()
# Execute if daemonization was successful else exit
if retcode == yapdi.OPERATION_SUCCESSFUL:
count()
else:
print('Daemonization failed')
elif sys.argv[1] == COMMAND_RESTORE:
with open("./config/sendedFiles", 'w+') as file:
print "Data about sended files cleaned."
utils.log(log_path, "Data about sended files cleaned.")
file.close()
elif sys.argv[1] == COMMAND_STOP:
daemon = yapdi.Daemon(pidfile='/var/run/scp.pid')
utils.log(log_path, "Daemon Stoped.")
# Check whether no instance is running
if not daemon.status():
print("No instance running.")
exit()
retcode = daemon.kill()
if retcode == yapdi.OPERATION_FAILED:
print('Trying to stop running instance failed')
elif sys.argv[1] == COMMAND_RESTART:
get_arguments()
daemon = yapdi.Daemon(pidfile='/var/run/scp.pid')
retcode = daemon.restart()
# Execute if daemonization was successful else exit
if retcode == yapdi.OPERATION_SUCCESSFUL:
count()
else:
print('Daemonization failed') | true |
4ce798dd960ab82b75a85320331fbf2e20b5f03b | Python | AlexeyBazanov/algorithms | /sprint_3/bracket_generator.py | UTF-8 | 438 | 3.546875 | 4 | [] | no_license | import sys
def generate_brackets(n, counter_open, counter_close, sequence):
if counter_open + counter_close == n * 2:
print(sequence)
if counter_open < n:
generate_brackets(n, counter_open + 1, counter_close, sequence + "(")
if counter_open > counter_close:
generate_brackets(n, counter_open, counter_close + 1, sequence + ")")
n = int(sys.stdin.readline().strip())
generate_brackets(n, 0, 0, "")
| true |
d3f01d431dbf7c57cb50375aa270b962a8c6b17f | Python | heynemann/tornado-geopy | /tests/geocoders/test_google_v3.py | UTF-8 | 3,800 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tornado-geopy geocoding library.
# https://github.com/heynemann/tornado-geopy
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2013 Bernardo Heynemann heynemann@gmail.com
import sys
from tornado.testing import AsyncTestCase, gen_test
from preggy import expect
from tornado_geopy.geocoders import GoogleV3, BoundingBox
class GoogleV3GeoCoderTestCase(AsyncTestCase):
def test_can_geocode_address(self):
g = GoogleV3(io_loop=self.io_loop)
g.geocode(u"10900 Euclid Ave in Cleveland", callback=self.stop)
results = self.wait()
expect(results).to_length(1)
place, (lat, lng) = results[0]
expect(place).to_equal(u"10900 Euclid Avenue, Cleveland, OH 44106, USA")
expect(lat).to_equal(41.5072596)
expect(lng).to_equal(-81.6070113)
class GoogleV3GeoCoderTestCaseUsingGenTest(AsyncTestCase):
@gen_test
def test_can_geocode_address(self):
g = GoogleV3(io_loop=self.io_loop)
results = yield g.geocode(u"10900 Euclid Ave in Cleveland")
expect(results).to_length(1)
place, (lat, lng) = results[0]
expect(place).to_equal(u"10900 Euclid Avenue, Cleveland, OH 44106, USA")
expect(lat).to_equal(41.5072596)
expect(lng).to_equal(-81.6070113)
@gen_test
def test_can_geocode_address_with_region(self):
g = GoogleV3(io_loop=self.io_loop)
results = yield g.geocode(u"Toledo", region="ES")
expect(results).to_length(1)
place, (lat, lng) = results[0]
expect(place).to_equal(u"Toledo, Spain")
expect(lat).to_equal(39.8628316)
expect(lng).to_equal(-4.027323099999999)
@gen_test
def test_can_geocode_address_with_bounds(self):
g = GoogleV3(io_loop=self.io_loop)
west = -22.917274
south = -43.186623
east = -22.906078
north = -43.162494
box = BoundingBox((west, south), (east, north))
results = yield g.geocode(u"Avenida Rio Branco", bounds=box)
expect(results).to_length(1)
place, (lat, lng) = results[0]
expect(place).to_equal(u"Avenida Rio Branco, Rio de Janeiro, Brazil")
expect(lat).to_equal(-22.9049854)
expect(lng).to_equal(-43.1777056)
@gen_test
def test_can_geocode_address_in_portuguese(self):
g = GoogleV3(io_loop=self.io_loop)
west = -22.917274
south = -43.186623
east = -22.906078
north = -43.162494
box = BoundingBox((west, south), (east, north))
results = yield g.geocode(u"Avenida Rio Branco", bounds=box, language="pt-BR")
expect(results).to_length(1)
place, (lat, lng) = results[0]
expect(place).to_equal(u"Avenida Rio Branco, Rio de Janeiro, República Federativa do Brasil")
expect(lat).to_equal(-22.9049854)
expect(lng).to_equal(-43.1777056)
def test_geocoding_with_invalid_bounding_fails(self):
g = GoogleV3(io_loop=self.io_loop)
try:
g.geocode(u"Avenida Rio Branco", bounds="whatever")
except ValueError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of("Please use tornado_geopy.geocoders.BoundingBox to specify a bounding box.")
else:
assert False, "Should not have gotten this far"
#@gen_test
#def test_geocoding_with_key(self):
#g = GoogleV3(io_loop=self.io_loop, key="AIzaSyB0B_dU8N3AdI00Kc1vuBu7o2Rnn2k3_Hs")
#results = yield g.geocode(u"10900 Euclid Ave in Cleveland")
#expect(results).to_length(1)
#place, (lat, lng) = results[0]
#expect(place).to_equal(u"10900 Euclid Avenue, Cleveland, OH 44106, USA")
#expect(lat).to_equal(41.5072596)
#expect(lng).to_equal(-81.6070113)
| true |
02a214368dec899cecf036f8326675525f8cf0e6 | Python | Walleve/conftracker | /models.py | UTF-8 | 11,817 | 2.625 | 3 | [] | no_license | import sqlite3
import hashlib
import datetime
from sys import platform
if 'linux' in platform:
db = '/var/www/conftracker/db_files/conftracker.db'
else:
db = 'db_files/conftracker.db'
class Schema:
def __init__(self):
self.conn = sqlite3.connect(db)
self.create_conf_table()
self.create_record_table()
self.create_user_table()
def __del__(self):
# body of destructor
self.conn.commit()
self.conn.close()
def create_conf_table(self):
query = """
CREATE TABLE IF NOT EXISTS "Confs" (
id INTEGER PRIMARY KEY AUTOINCREMENT,
abbr char(64) NOT NULL,
title varchar(128) NOT NULL,
category varchar(64) DEFAULT NULL,
publisher char(64) DEFAULT NULL,
hindex int(4) DEFAULT -1,
ccfrank char(1) DEFAULT NULL,
irank char(2) DEFAULT NULL,
create_time timestamp DEFAULT CURRENT_TIMESTAMP
);
"""
# UserId INTEGER FOREIGNKEY REFERENCES User(_id)
self.conn.execute(query)
def create_record_table(self):
query = """
CREATE TABLE IF NOT EXISTS "Records" (
id INTEGER PRIMARY KEY AUTOINCREMENT,
uid INTEGER FOREIGNKEY REFERENCES User(id),
abbr char(64) NOT NULL,
title varchar(128) NOT NULL,
category varchar(64) DEFAULT NULL,
publisher char(64) DEFAULT NULL,
hindex int(4) DEFAULT -1,
ccfrank char(1) DEFAULT NULL,
irank char(2) DEFAULT NULL,
year int(4) NOT NULL,
startdate date DEFAULT NULL,
enddate date DEFAULT NULL,
absdate date DEFAULT NULL,
subdate date DEFAULT NULL,
notifdate date DEFAULT NULL,
crdate date DEFAULT NULL,
city varchar(35) DEFAULT NULL,
country varchar(35) DEFAULT NULL,
link varchar(256) DEFAULT NULL,
subno int(11) DEFAULT NULL,
acceptno int(11) DEFAULT NULL,
acceptrate int(11) DEFAULT NULL,
confirmed boolean DEFAULT 0,
subscribers char(128) DEFAULT NULL,
update_time timestamp DEFAULT CURRENT_TIMESTAMP,
create_time timestamp DEFAULT CURRENT_TIMESTAMP
);
"""
self.conn.execute(query)
def create_user_table(self):
query = """
CREATE TABLE IF NOT EXISTS "Users" (
id INTEGER PRIMARY KEY AUTOINCREMENT,
first_name char(64) NOT NULL,
last_name char(64) NOT NULL,
email char(128) NOT NULL,
homepage varchar(256) DEFAULT NULL,
affiliation char(128) DEFAULT NULL,
create_time timestamp DEFAULT CURRENT_TIMESTAMP
);
"""
self.conn.execute(query)
class Conf:
TABLENAME = "Confs"
def __init__(self):
self.conn = sqlite3.connect(db)
def __del__(self):
# body of destructor
self.conn.commit()
self.conn.close()
def get_items(self, query_arg = ''):
# query = """
# SELECT * FROM Confs;
# """
query = "SELECT * FROM " + self.TABLENAME + " WHERE " + query_arg
ret = self.conn.execute(query)
return ret
def create(self, conf):
# query = f'insert into {self.TABLENAME} ' \
# f'(Name, Email) ' \
# f'values ({name},{email})'
data = [conf['abbr'], conf['title'], conf['category'], conf['publisher'], conf['hindex'], conf['ccfrank'], conf['irank']]
query = 'insert into ' + self.TABLENAME + '(abbr, title, category, publisher, hindex, ccfrank, irank) ' \
+ 'values (' + ','.join(data) +')'
result = self.conn.execute(query)
return result
class Record:
TABLENAME = "Records"
def __init__(self):
self.conn = sqlite3.connect(db)
def __del__(self):
# body of destructor
self.conn.commit()
self.conn.close()
def get_items(self):
return get_items(query_args = 'True', sort='subdate')
def get_items(self, query_args, sort):
# query = """
# SELECT * FROM Confs;
# """
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
# conn.text_factory = str
query = "SELECT * FROM " + self.TABLENAME + " WHERE " + query_args + " ORDER BY " + sort + " DESC"
cursor = conn.execute(query)
# print(query)
results = [dict(row) for row in cursor.fetchall()]
conn.commit()
conn.close()
return results
def get_item(self, conf_id):
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
conn.text_factory = str
query = "SELECT * FROM " + self.TABLENAME + " WHERE id=" + conf_id
results = conn.execute(query).fetchone()
if results:
results = dict(results)
conn.commit()
conn.close()
return results
def create(self, conf):
conn = sqlite3.connect(db)
data = [conf['uid'], conf['abbr'], conf['title'], conf['year'], conf['category'], conf['publisher'], conf['ccfrank'], conf['link'], conf['city'], conf['country'], conf['startdate'], conf['enddate'], conf['absdate'], conf['subdate'], conf['notifdate'], conf['crdate']]
data = ', '.join("'{0}'".format(w) for w in data)
query = 'insert into ' + self.TABLENAME + ' (uid, abbr, title, year, category, publisher, ccfrank, link, city, country, startdate, enddate, absdate, subdate, notifdate, crdate) ' \
+ 'values (' + data +')'
# print(query)
result = conn.execute(query)
conn.commit()
conn.close()
return result
def update(self, conf):
conn = sqlite3.connect(db)
# data = [conf['abbr'], conf['title'], conf['year'], conf['category'], conf['publisher'], conf['ccfrank'], conf['link'], conf['city'], conf['country'], conf['startdate'], conf['enddate'], conf['absdate'], conf['subdate'], conf['notifdate'], conf['crdate']]
# data = ', '.join("'{0}'".format(w) for w in data)
# print(data)
query = 'UPDATE ' + self.TABLENAME + ' SET abbr=?, title=?, year=?, category=?, publisher=?, ccfrank=?, ' \
'link=?, city=?, country=?, startdate=?, enddate=?, absdate=?, subdate=?, notifdate=?, crdate=? ' \
+ ' WHERE id=?'
# print(query)
result = conn.execute(query, (conf['abbr'], conf['title'], conf['year'], conf['category'],
conf['publisher'], conf['ccfrank'], conf['link'], conf['city'], conf['country'],
conf['startdate'], conf['enddate'], conf['absdate'], conf['subdate'],
conf['notifdate'], conf['crdate'], conf['id']))
conn.commit()
conn.close()
return result
def subscribe(self, conf, user):
status = -1
user = str(user)
conn = sqlite3.connect(db)
conn.text_factory = str
query = "SELECT subscribers FROM " + self.TABLENAME + " WHERE id=" + str(conf)
subscribe = conn.execute(query).fetchone()
if not subscribe:
print('No record with conf id ', conf)
return status
subscribe = subscribe[0]
if not subscribe:
subscribe = ',' + user + ','
status = 1
elif ',' + user + ',' not in subscribe:
subscribe += user + ','
status = 1
else:
subscribe = subscribe.replace(","+user+",", ",")
status = 0
query = "UPDATE " + self.TABLENAME + " SET subscribers='" + subscribe + "' WHERE id=" + str(conf)
# print(query)
result = conn.execute(query)
conn.commit()
conn.close()
return status
class ToDoModel:
TABLENAME = "Todo"
def __init__(self):
self.conn = sqlite3.connect('todo.db')
self.conn.row_factory = sqlite3.Row
def __del__(self):
# body of destructor
self.conn.commit()
self.conn.close()
# def get_by_id(self, _id):
# where_clause = f"AND id={_id}"
# return self.list_items(where_clause)
# def create(self, params):
# print (params)
# query = f'insert into {self.TABLENAME} ' \
# f'(Title, Description, DueDate, UserId) ' \
# f'values ("{params.get("Title")}","{params.get("Description")}",' \
# f'"{params.get("DueDate")}","{params.get("UserId")}")'
# result = self.conn.execute(query)
# return self.get_by_id(result.lastrowid)
# def delete(self, item_id):
# query = f"UPDATE {self.TABLENAME} " \
# f"SET _is_deleted = {1} " \
# f"WHERE id = {item_id}"
# print (query)
# self.conn.execute(query)
# return self.list_items()
# def update(self, item_id, update_dict):
# """
# column: value
# Title: new title
# """
# set_query = ", ".join([f'{column} = {value}'
# for column, value in update_dict.items()])
# query = f"UPDATE {self.TABLENAME} " \
# f"SET {set_query} " \
# f"WHERE id = {item_id}"
# self.conn.execute(query)
# return self.get_by_id(item_id)
# def list_items(self, where_clause=""):
# query = f"SELECT id, Title, Description, DueDate, _is_done " \
# f"from {self.TABLENAME} WHERE _is_deleted != {1} " + where_clause
# print (query)
# result_set = self.conn.execute(query).fetchall()
# result = [{column: row[i]
# for i, column in enumerate(result_set[0].keys())}
# for row in result_set]
# return result
class User:
TABLENAME = "Users"
def __init__(self):
pass
def create(self, user):
conn = sqlite3.connect(db)
# query = f'insert into {self.TABLENAME} ' \
# f'(Name, Email) ' \
# f'values ({name},{email})'
data = [user['first_name'], user['last_name'], user['email']]
data = ', '.join("'{0}'".format(w) for w in data)
query = 'insert into ' + self.TABLENAME + '(first_name, last_name, email) ' \
+ 'values (' + data +')'
# print(query)
result = conn.execute(query)
conn.commit()
conn.close()
return result
def login(self, user):
ret = 0
conn = sqlite3.connect(db)
conn.text_factory = str
query = "SELECT * FROM " + self.TABLENAME + " WHERE email='" + user['email'] + "'"
results = conn.execute(query).fetchone()
if results:
query = "SELECT * FROM " + self.TABLENAME + " WHERE email='" + user['email'] \
+ "' AND first_name='" + user['first_name'] + "' AND last_name='" + user['last_name'] + "'"
conn.row_factory = sqlite3.Row
results = conn.execute(query).fetchone()
if not results:
ret = 1 # name incorrect
else:
results = dict(results)
else:
ret = 2 # email not exist, new user
conn.commit()
conn.close()
return ret, results
def get_by_id(self, user_id):
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
conn.text_factory = str
query = "SELECT * FROM " + self.TABLENAME + " WHERE id=" + str(user_id)
cursor = conn.execute(query)
results = cursor.fetchone()
results = dict(results)
conn.commit()
conn.close()
return results
if __name__ == "__main__":
pass | true |
a6b46d604b87cb3bc4a6d6024f2d2522eb22dce8 | Python | geekquad/Feature-Scaling | /featurescalling.py | UTF-8 | 472 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: geekquad
"""
import numpy as np
def featurescale(input_list, new_min, new_max):
old_min = np.min(input_list)
old_max = np.max(input_list)
old_range = (old_max - old_min)
new_range = (new_max - new_min)
new_list = []
for i in input_list:
newi = (((i - old_min) * new_range) / old_range) + new_min
new_list.append(newi)
return new_list | true |
8fb1a43e29714d1a8eb4f50d72802c22a6661c08 | Python | liuchengyuan123/ZJU-Homework-on-cifar10 | /test.py | UTF-8 | 2,909 | 2.734375 | 3 | [] | no_license | from os import read
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import argparse
import numpy as np
import pickle
from tqdm import tqdm
import matplotlib.pyplot as plt
# from Model.Resnet import ResNet50
from Model.ResNetWithDropOut import ResNet50WithDropout
def read_data(path):
f = open(path, 'rb')
data = pickle.load(f, encoding='bytes')
return data[b'data'].reshape(-1, 3, 32, 32).astype('float'),\
np.array(data[b'labels']).reshape(-1).astype('float')
class CifarDataset(Dataset):
def __init__(self, d, l) -> None:
self.x_data = torch.FloatTensor(d)
self.y_data = torch.LongTensor(l)
self.len = self.x_data.shape[0]
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
def build_dataset():
data_path = './data/'
d, l = [], []
data_name = data_path + 'test_batch'
data_d, data_l = read_data(data_name)
d.append(data_d)
l.append(data_l)
d = np.concatenate(d, axis=0)
l = np.concatenate(l, axis=0)
test_dataset = CifarDataset(d, l)
return test_dataset
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.resnet50 = ResNet50WithDropout()
self.softmax = nn.Softmax()
def forward(self, x):
return self.softmax(self.resnet50(x))
def build_model(device, args):
model = Model()
if device:
model = model.to(device)
model.load_state_dict(torch.load(args.checkpoint))
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str,
default='checkpoint/Wed-Sep-22-07:57:06-2021/best.pt', help='checkpoint path')
parser.add_argument('--batch_size', type=int, default=64)
args = parser.parse_args()
device = None
if torch.cuda.is_available():
device = torch.device('cuda')
print('using', device)
test_dataset = build_dataset()
model = build_model(device, args)
tot = 0
correct = 0
loss = 0
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size)
t = tqdm(test_dataloader, total=len(test_dataset) // args.batch_size)
criterion = nn.CrossEntropyLoss()
if device:
criterion = criterion.to(device)
model.eval()
for batch_x, batch_y in t:
if device:
batch_x, batch_y = batch_x.to(device), batch_y.to(device)
prediction = model(batch_x)
cur_loss = criterion(prediction, batch_y)
pred = prediction.argmax(dim=1)
correct += torch.eq(pred, batch_y).float().sum().item()
tot += batch_x.size()[0]
loss += cur_loss.item() * batch_x.size()[0]
print(f'total: {tot}, loss: {loss / tot}, accuracy: {correct / tot}')
| true |
e118a8df2b1616409ea6e1ac345578e6ddeed621 | Python | 219Winter2019adjz/Project1 | /sandbox/problem6.py | UTF-8 | 3,760 | 2.796875 | 3 | [] | no_license | ########################################################################################################################
# Fetching 20NewsGroups dataset
from sklearn.datasets import fetch_20newsgroups
# Refer to the offcial document of scikit-learn for detailed usages:
# http://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_20newsgroups.html
categories = ['comp.graphics', 'comp.sys.mac.hardware']
twenty_train = fetch_20newsgroups(subset='train', # choose which subset of the dataset to use; can be 'train', 'test', 'all'
categories=categories, # choose the categories to load; if is `None`, load all categories
shuffle=True,
random_state=42, # set the seed of random number generator when shuffling to make the outcome repeatable across different runs
# remove=['headers'],
)
twenty_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42)
########################################################################################################################
# Convert train and test data to counts
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(min_df=3, stop_words='english')
# do for training
X_train_counts = count_vect.fit_transform(twenty_train.data)
# do for testing
X_test_counts = count_vect.transform(twenty_test.data)
########################################################################################################################
# Get TFIDF of training and test sets
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer()
# do for training
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
# do for testing
X_test_tfidf = tfidf_transformer.transform(X_test_counts)
########################################################################################################################
# Perform NMF
from sklearn.decomposition import NMF
model = NMF(n_components=50, init='random', random_state=42)
# do for training
W_nmf_train_reduced = model.fit_transform(X_train_tfidf)
H_nmf_train_reduced = model.components_
print(W_nmf_train_reduced.shape)
print(twenty_train.target.shape)
# do for testing
W_nmf_test_reduced = model.transform(X_test_tfidf)
H_nmf_test_reduced = model.components_
########################################################################################################################
# Train a Naive Bayes Gaussian classifier on the TFIDF training set from problem 2
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB().fit(W_nmf_train_reduced, twenty_train.target)
########################################################################################################################
# Generate predictions for test set
predicted = clf.predict(W_nmf_train_reduced)
correct = 0
for i, category in enumerate(predicted):
if category == twenty_train.target[i]:
correct += 1
# if i < 5:
# print('{} =? {}'.format(twenty_test.target_names[category], twenty_test.target_names[twenty_test.target[i]]))
# else:
# break
print('Accuracy of NB Gaussian (train): {}'.format(correct / W_nmf_train_reduced.shape[0]))
predicted = clf.predict(W_nmf_test_reduced)
correct = 0
for i, category in enumerate(predicted):
if category == twenty_test.target[i]:
correct += 1
# if i < 5:
# print('{} =? {}'.format(twenty_test.target_names[category], twenty_test.target_names[twenty_test.target[i]]))
# else:
# break
print('Accuracy of NB Gaussian (test): {}'.format(correct / W_nmf_test_reduced.shape[0]))
| true |
9a119cf9c0d367ff55599af1aa5a7a9c6b7ff8c2 | Python | sankalpsagar/Placement-Practise | /python/username.py | UTF-8 | 419 | 3.09375 | 3 | [] | no_license | Userdict = {}
username_stream = ["a", "a", "a1", "a1", "b", "b", "b", "a21", "a21", "a12"]
assigned = []
for users in username_stream:
# print(users)
if users in Userdict:
Userdict[users]+=1
# print(Userdict[users])
string = users + str(Userdict[users]-1)
# print(string)
assigned.append(string)
Userdict[string] = 1
else:
Userdict[users]=1
assigned.append(users)
print(Userdict)
print(assigned) | true |
3d9ebdb309e2abb255f600f9949e5c315353ce82 | Python | den01-python-programming-exercises/exercise-4-16-payment-card-MrSullivanStCadocs | /src/payment_card.py | UTF-8 | 865 | 3.734375 | 4 | [] | no_license | class PaymentCard:
def __init__(self, opening_balance):
self.opening_balance = opening_balance
def __str__(self):
return str("The card has a balance of " + str(self.opening_balance) + " pounds")
def eat_affordably(self):
if(self.opening_balance - 2.6 >= 0):
self.opening_balance = float(self.opening_balance - 2.6)
else:
return self.opening_balance
def eat_heartily(self):
if(self.opening_balance - 4.6 >= 0):
self.opening_balance = float(self.opening_balance - 4.6)
else:
return self.opening_balance
def add_money(self,amount):
self.amount = amount
if(self.opening_balance +self.amount <0):
return self.opening_balance
elif(self.opening_balance + self.amount <= 150):
self.opening_balance = self.opening_balance + self.amount
else:
self.opening_balance = 150
| true |
eb5ebd81b8e5e0e855b820be7d99c743a5598c27 | Python | linhhv1996/Python | /MergeSort.py | UTF-8 | 478 | 3.5625 | 4 | [] | no_license | def Merge(L,R):
Result = []
i,j = 0,0
while i < len(L) and j < len(R):
if (L[i] < R[j]):
Result.append(L[i])
i += 1
else:
Result.append(R[j])
j += 1
Result += L[i:]
Result += R[j:]
return Result
def MergeSort(A):
if (len(A) <= 1):
return A
Mid = int(len(A)/2)
L = MergeSort(A[:Mid])
R = MergeSort(A[Mid:])
return Merge(L,R)
A = [16, 9, 3]
print (MergeSort(A))
| true |
72ef2bcc5540820fdf06256152d02d70857e8d99 | Python | Manal-Almodala/EE511project2 | /waiting1.py | UTF-8 | 566 | 3.125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import random
from scipy.stats import chisquare
import math
X=[]
for i in range(1, 1001):
X.append((-1/5)*np.log(1-random.random()))
print(X)
data, m, n = plt.hist(X, bins=np.arange(0,3.1,0.1), histtype='bar', edgecolor='r')
plt.xlabel('Xi')
plt.ylabel('frequency')
def expcdf(x):
return (1-math.exp(-5*x))
em = []
for i in range(0, 30):
em.append(1000*(expcdf((i+1)/10)-expcdf(i/10)))
chisq, p = chisquare(data, em)
print("chisquare is:", chisq)
print("P value is:", p)
plt.show() | true |
ea2469c792b54c02ed9cf32f63cb64a6f606f3dd | Python | skkoobb/DataMining | /DataMining.py | UTF-8 | 1,723 | 2.78125 | 3 | [] | no_license | __author__ = 'Daniel'
import sys, getopt
from glob import glob
from os import path
import numpy as np
import imagetools
import pywt
from sklearn import svm
def LoadDataFromFolder(folder = '.', ftype = '*.dat'):
filelist = glob(path.join(folder,ftype))
firstpattern = np.loadtxt(filelist[1],dtype=np.float32)
firstpattern = firstpattern[:,2]
n = len(filelist)
d = firstpattern.size
X = np.zeros([n,d])
y = np.zeros([n,1], dtype = np.int32)
for i in range(len(filelist)):
data = np.loadtxt(filelist[i],dtype=np.float32)
data = imagetools.arraytoimage(data[:,2])
X[i,:] = data
if "BKGND" in filelist[i]: y[i] = 0
if "ECH" in filelist[i]: y[i] = 1
if "NBI" in filelist[i]: y[i] = 2
if "STRAY" in filelist[i]: y[i] = 3
return X,y
def ReduceDimension(X = np.zeros([2,2])):
r, c = X.shape
image = X[0,:].reshape([385,576])
coeffs = pywt.wavedec2(image,'db1', level=4)
cA4, (cH4, cV4, cD4), (cH3, cV3, cD3),(cH2, cV2, cD2),(cH1, cV1, cD1) = coeffs
nr,nc = cA4.shape
rX = np.zeros([r,nc*nr], dtype=np.float32)
for i in range(r):
image = X[i,:].reshape([385,576])
coeffs = pywt.wavedec2(image,'db1', level=4)
cA4, (cH4, cV4, cD4), (cH3, cV3, cD3),(cH2, cV2, cD2),(cH1, cV1, cD1) = coeffs
rX[i,:] = cV4.flatten()
return rX
def TrainSVM(X,y):
clf = svm.SVC()
clf.fit(X, y.ravel())
print(clf)
print(y.ravel())
return clf
def main():
X,y = LoadDataFromFolder('C:\\Users\\Daniel\\Google Drive\\Master ISC\\Tercer Curso\\1C Mineria de Datos\\Datos\\SenalesTJII\\Imagenes')
rX = ReduceDimension(X)
C=TrainSVM(rX,y)
print(C)
if __name__ == "__main__":
main()
| true |
9e1a94a545b821ad5814f0e67bceb91ce0736bca | Python | steezkelly/TkInter | /eventcap.py | UTF-8 | 734 | 3.421875 | 3 | [] | no_license | from tkinter import *
import random
root = Tk()
def key(event):
print ("pressed", repr(event.char))
def callback(event):
frame.focus_set()
print ("clicked at", event.x, event.y)
def a_pressed(event):
print("You are love")
def r_pressed(event):
rnum = random.randint(0, 9)
rm = ["I love you", "You are great", "Wow you are stunning",
"God Bless you", "Let's make love", "Let's make lots of money",
"Keep motivated you are great", "Keep learning", "Keep steady",
"You will be Blessed soon"]
print(rm[rnum])
frame = Frame(root, width=300, height=300)
frame.bind("<Key>", key)
frame.bind("<Button-1>", callback)
frame.bind("a", a_pressed)
frame.bind("r", r_pressed)
frame.pack()
root.mainloop()
| true |
b99ec6be8e95c096d0b683206c4561ee3b53ded5 | Python | haoyingl/PythonLearning | /euler2.py | UTF-8 | 659 | 2.9375 | 3 | [] | no_license | #-*- coding:utf-8 -*-
#########################################################################
# File Name: test.py
# Author: Liang Haoying
# mail: Haoying.Liang@nokia-sbell.com
# Created Time: Tue 09 Jan 2018 02:02:20 PM CST
#########################################################################
#!usr/bin/env python
def F(n,cache=None):
if cache == None:
cache = {1:1}
if n<2:
return 1
if n not in cache:
# print(cache,n)
cache[n] = F(n-1,cache) + F(n-2,cache)
# print (cache,n)
return cache[n]
f = []
for n in range(2000000):
if F(n)>4000000:
break
elif F(n)%2==0:
f.append(F(n))
print(sum(f))
| true |
289ecec306dcf2ba6f2781b1c89d035e10edca5c | Python | 4ND4/visage_augmentor | /run.py | UTF-8 | 966 | 2.640625 | 3 | [] | no_license | # obtain MAX DS size
# get MIN DS size
# create augmented images for MIN DS
import os
import Augmentor
image_path = os.path.expanduser('~/Documents/images/dataset/visage_v1.1b/12/')
output_directory = os.path.expanduser('~/Documents/images/dataset/augmented/')
probability = 1
p = Augmentor.Pipeline(
source_directory=image_path,
output_directory=output_directory
)
p.flip_left_right(probability=probability)
p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5)
p.zoom_random(probability=probability, percentage_area=0.95)
p.random_distortion(probability=probability, grid_width=2, grid_height=2, magnitude=8)
p.random_color(probability=1, min_factor=0.8, max_factor=1.2)
p.random_contrast(probability=1, min_factor=0.8, max_factor=1.2)
p.random_brightness(probability=1, min_factor=0.8, max_factor=1.2)
p.random_erasing(probability=probability, rectangle_area=0.2)
for i in range(0, 2):
print(i)
p.process()
print('processed') | true |
61e4bef7be95bdb34a0bd9c5cee14adb6f5d12b5 | Python | ClemenceK/deep4deep | /deep4deep/text_processing.py | UTF-8 | 3,063 | 2.984375 | 3 | [] | no_license | import numpy
import string
import regex
import re
import unidecode
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer, PorterStemmer
#from deep4deep.utils import simple_time_tracker
def remove_numbers(text):
"""
removes numbers from text
text: string
returns text without numbers as a string
"""
return ''.join(char for char in text if not char.isdigit())
def remove_punctuation(text):
"""
removes punctuation from text
text: string
returns text without punctuation as a string
"""
for punctuation in string.punctuation+"’":
text = text.replace(punctuation, ' ')
# adding ` as French apostrophe wasn't treated
# and replacing by space to avoid "lapparition"
return text
def remove_special_chars(text):
return regex.sub(r'\p{So}+', ' ', text)
def remove_accents(text):
return unidecode.unidecode(text)
def remove_stopwords(text):
"""
removes stopwords from text
text: string
returns text without stopwords as a list of words
"""
my_stopwords = set(stopwords.words('english'))
my_stopwords.add('•')
my_stopwords.add('’')
#adding special characters found in hello tomorrow reports
tokens = word_tokenize(text) # correspond à un split
# also removing single characters
tokens = [word for word in tokens if (len(word)>2 or word == "ai" or word == "ia")]
# also removing 2 letter words except AI and IA (as there are French snippets so at least not le, la…)
return [word for word in tokens if word not in my_stopwords]
def lemmatize(tokenized_text):
"""
tokenized_text: list of words
returns lemmatized text
"""
lemmatizer = WordNetLemmatizer()
return [lemmatizer.lemmatize(word) for word in tokenized_text]
def stem(tokenized_text):
"""
tokenized_text: list of words
returns stemmed text
"""
stemmer = PorterStemmer()
return [stemmer.stem(word) for word in tokenized_text]
def text_preprocessing(text):
"""
applies preprocessing steps
text: string
returns preprocessed, tokenized text
"""
try:
text = text.lower()
except: # exception thrown if NaN, None…
print(f"text was {text}, replacing by empty string")
return ""
text = remove_numbers(text)
text = remove_punctuation(text)
text = remove_special_chars(text)
text = remove_accents(text)
tokenized_text = remove_stopwords(text)
#can add either stem or lemmatize
return tokenized_text
# used in data preparation (as it needs the name from the Dealroom data)
def remove_own_name(text, name):
return text.replace(name, "") #regex.sub(name, "", text)
#########################################################################
# unused
def dealroom_phrase_removal(text):
dealroom_phrase = r"Here you'll find information about their funding, investors and team."
if dealroom_phrase in text:
text = re.sub(dealroom_phrase, "", text)
return text
| true |
eeec02d07a013c1f32ecbcc0a5662dbec566c753 | Python | Takuma-Ikeda/other-LeetCode | /src/medium/test_max_increase_to_keep_city_skyline.py | UTF-8 | 676 | 3.15625 | 3 | [] | no_license | import unittest
from answer.max_increase_to_keep_city_skyline import Solution
class TestSolution(unittest.TestCase):
def setUp(self):
self.grid = [
[[3, 0, 8, 4], [2, 4, 5, 7], [9, 2, 6, 3], [0, 3, 1, 0]],
[[0, 0, 0], [0 ,0 ,0], [0, 0, 0]],
]
self.answers = [
35,
0,
]
def test_solution(self):
for i in range(len(self.answers)):
print('----- TEST NO.%i START -----' % i)
s = Solution()
result = s.maxIncreaseKeepingSkyline(self.grid[i])
self.assertEqual(self.answers[i], result)
if __name__ == "__main__":
unittest.main()
| true |
7bba95e82394cdccf2734e622509bc6c8a3370ed | Python | zlz2013/zlz | /spider_project/spider/day03/05_biji_spider.py | UTF-8 | 1,648 | 2.734375 | 3 | [] | no_license | import requests
from lxml import etree
import time,random
from model_tool.useragents import ua_list
class BijiSpider(object):
def __init__(self):
# 定义常用变量,url,headers及计数等
self.url='http://code.tarena.com.cn/AIDCode/aid1904/15-spider/'
self.auth=('tarenacode','code_2013')
def get_html(self):
# 获取响应内容函数,使用随机User-Agent
headers={'user-agent':random.choice(ua_list)}
html=requests.get(url=self.url,headers=headers,auth=self.auth).text
return html
def parse_html(self):
# 使用正则表达式来解析页面,提取数据
html=self.get_html()
xpath_dbs='//a/@href'
parse_html=etree.HTML(html)
r_list=parse_html.xpath(xpath_dbs)
print(r_list)
for i in r_list:
if i.endswith('.zip') or i.endswith('.rar'):
file_url=self.url+i
self.write_html(file_url,i)
def write_html(self,file_url,i):
# 将提取的数据按要求保存,csv、MySQL数据库等
headers = {'user-agent': random.choice(ua_list)}
html_content=requests.get(url=file_url,headers=headers,auth=self.auth).content
with open('./biji/'+i,'wb') as f:
f.write(html_content)
print('下载成功')
def main(self):
# 主函数,用来控制整体逻辑
self.parse_html()
if __name__ == '__main__':
# 程序开始运行时间戳
start = time.time()
spider = BijiSpider()
spider.main()
# 程序运行结束时间戳
end = time.time()
print('执行时间:%.2f' % (end-start)) | true |
cdf2416b8cb2e4e093109ab8a28a195fb92aa987 | Python | TomiyamaSatoshi/FaceAuthApp | /face_learn.py | UTF-8 | 4,327 | 2.6875 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import sys
import cv2
import os
import configparser
import numpy as np
from PIL import Image
# 引数を取得
args = sys.argv
id = args[1]
# 設定ファイル読み込み
inifile = configparser.ConfigParser()
inifile.read('./config.ini', 'UTF-8')
# 学習画像データ枚数取得変数初期化
sample_cnt = 0
# 学習画像データ保存領域パス情報
learnPath = inifile.get('file-dir', 'learnPath').format(args[1])
# 学習用した結果を.ymlファイルの保存先
ymlPath = inifile.get('file-dir', 'ymlPath').format(args[1])
# 元画像格納先
imgPath = inifile.get('file-dir', 'imgPath').format(args[1])
#######################################################################
# 顔検出を認識する カスケードファイルは「haarcascade_frontalface_alt2.xml」 #
#######################################################################
face_detector = cv2.CascadeClassifier(inifile.get('file-dir', 'cascadeFace'))
#######################################################
# 学習画像用データから顔認証データymlファイル作成するメソッド #
#######################################################
def image_learning_make_Labels():
# リスト保存用変数
face_list=[]
ids_list=[]
# Local Binary Patterns Histogram(LBPH)アルゴリズム インスタンス
recognizer = cv2.face_LBPHFaceRecognizer.create()
# 学習画像ファイルパスを全て取得
imagePaths = [os.path.join(learnPath,f) for f in os.listdir(learnPath)]
# 学習画像ファイル分ループ
for imagePath in imagePaths:
# グレースケールに変換
PIL_img = Image.open(imagePath).convert('L')
img_numpy = np.array(PIL_img,'uint8')
# UseriDが入っているファイル名からUserID番号として取得
id = int(os.path.split(imagePath)[-1].split(".")[1])
# 物体認識(顔認識)の実行
faces = face_detector.detectMultiScale(img_numpy)
# 認識した顔認識情報を保存
for (x,y,w,h) in faces:
face_list.append(img_numpy[y:y+h,x:x+w])
ids_list.append(id)
print ("\n Training Start ...")
##############################
# 学習スタート #
##############################
recognizer.train(face_list, np.array(ids_list))
#####################################
# 学習用した結果を.ymlファイルに保存する #
#####################################
recognizer.save(ymlPath + "/trainer.yml")
# 学習した顔種類を標準出力
print("\n User {0} trained. Program end".format(len(np.unique(ids_list))))
#####################################
# ディレクトリがなかったら作るメソッド
#####################################
def dir_check(dirPath):
if not os.path.exists(dirPath):
os.mkdir(dirPath)
#####################################
# 顔認証したい人物の通し番号を入力させる
#####################################
User_id = args[1]
print("\n Learn Image Get Start ............")
####################################
# 学習用画像データ取得と保存
####################################
# 各ディレクトリ作成
dir_check(learnPath)
dir_check(ymlPath)
# 学習用顔データを取得する
imagePaths = [os.path.join(imgPath, f) for f in os.listdir(imgPath)]
# 学習用画像分処理
for imagePath in imagePaths:
# 画像を読み込む
img = cv2.imread(imagePath)
# 画像をグレースケールに変換する
image_pil = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# NumPyの配列に格納
gray = np.array(image_pil, 'uint8')
# Haar-like特徴分類器で顔を検知
faces = face_detector.detectMultiScale(gray)
# 学習用画像データを作成
for (x,y,w,h) in faces:
# 顔部分を切り取り
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
sample_cnt += 1
# 画像ファイル名にUSERIDを付与して保存
cv2.imwrite(learnPath + "/User.".format(args[1]) + str(User_id) + '.' + str(sample_cnt) + ".jpg", image_pil[y:y+h,x:x+w])
print("\n Learn Image Get End ")
########################
# 学習ファイル作成
########################
image_learning_make_Labels()
| true |