blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e808bdc8050d6a725a27495b9e3927f85d830847
|
Python
|
kobi485/package1
|
/Game_cards/test_Deckofcards.py
|
UTF-8
| 1,235
| 3.21875
| 3
|
[] |
no_license
|
from unittest import TestCase
from Game_cards.Deckofcards import Deckofcards
from Game_cards.Card import Card
class TestDeckofcards(TestCase):
def setUp(self):
self.d1 = Deckofcards()
self.d2 = Deckofcards()
self.d3 = Deckofcards()
self.d4 = ''
def test_deck_has_52_cards(self):
# Arrange
d7 = Deckofcards()
# Act
# Assert
self.assertEqual(len(d7.deck), 52)
def test_shuffle(self):
self.assertTrue(self.d1.shuffle() == True)
self.d1.dealOne()
self.assertTrue(self.d1.shuffle() == False)
self.d3.shuffle()
self.assertFalse(self.d3 != self.d2 == True)
def test_deal_one(self):
card = self.d1.deck[0]
card1 = self.d1.dealOne()
if card == card1:
self.assertEqual((card, card1) == True)
self.assertTrue(len(self.d1.deck) == 51)
for i in range(51):
self.d1.dealOne()
self.assertTrue(len(self.d1.deck) == 0)
def test_new_game(self):
self.d1.newGame()
self.assertTrue((self.d1 != self.d2) == True)
self.d1.dealOne()
self.assertTrue((self.d1 != self.d2) == True)
def test_show(self):
pass
| true
|
bca68eaf3decbd314bc0229a7a508ee36baaccd4
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_96/819.py
|
UTF-8
| 854
| 2.96875
| 3
|
[] |
no_license
|
#from __future__ import division
import sys
rl = lambda: sys.stdin.readline().strip()
def getA(n):
if n==0:
return [0, 0]
if n%3==0:
if n==3:
return [1, 1]
else:
return [n/3, n/3+1]
if n%3==1:
if n==1:
return [1, 1]
else:
return [(n-1)/3+1, (n-4)/3+2]
if n%3==2:
if n==2:
return [1, 2]
else:
return [(n-2)/3+1, (n-2)/3+2]
for c in range(int(rl())):
v = map(int, rl().split())
N = v[0]
S = v[1]
P = v[2]
T = v[3:]
ans = 0
for t in T:
A = getA(t)
#print t, A, P
if A[0]>=P:
ans += 1
elif A[1]>=P and S>0:
S -= 1
ans += 1
print 'Case #%d: %d' % (c+1, ans)
| true
|
93e47093d16db66413b013651ebefc501dc023a1
|
Python
|
ahmedvuqarsoy/Network-Programming
|
/Lab5 - ZeroMQ/analyzer.py
|
UTF-8
| 1,390
| 3.234375
| 3
|
[] |
no_license
|
import zmq
import json
import datetime
# 0MQ Settings
context = zmq.Context()
# CSV Recevier from Data Seperator
csvReceiver = context.socket(zmq.PULL)
csvReceiver.connect('tcp://127.0.0.1:4444')
# Array and Age Sender to Reducer
arraySender = context.socket(zmq.PUSH)
arraySender.bind('tcp://127.0.0.1:4445')
# Get age in form of months
def getage(now, dateOfBirth):
years = now.get("year") - dateOfBirth.get("year")
months = now.get("month") - dateOfBirth.get("month")
if (now.get("day") < dateOfBirth.get("day")):
months -= 1
while months < 0:
months += 12
years -= 1
months += (12* years)
return months
# Get current year, month and day
now = {}
now['year'] = datetime.datetime.now().year
now['month'] = datetime.datetime.now().month
now['day'] = datetime.datetime.now().day
# Read CSV row and find how many months the person lives
while True:
message = csvReceiver.recv()
# b'array' -> a string representation of array -> list object
arr = eval(message.decode('utf-8'))
# print(arr[3])
dateOfBirth = {}
date = arr[3]
dateOfBirth['day'] = int(date.split('.', 3)[0])
dateOfBirth['month'] = int(date.split('.', 3)[1])
dateOfBirth['year'] = int(date.split('.', 3)[2])
# Append that how many months a person lives
arr.append(getage(now, dateOfBirth))
# print(arr)
# Send them to Reducer
arrJson = json.dumps(arr)
arraySender.send_string(arrJson)
| true
|
83a90fe350573f99808eb03475d0e5332525305e
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02795/s926124861.py
|
UTF-8
| 392
| 2.515625
| 3
|
[] |
no_license
|
import sys
import heapq
import math
import fractions
import bisect
import itertools
from collections import Counter
from collections import deque
from operator import itemgetter
def input(): return sys.stdin.readline().strip()
def mp(): return map(int,input().split())
def lmp(): return list(map(int,input().split()))
h=int(input())
w=int(input())
n=int(input())
a=max(h,w)
print((n-1)//a+1)
| true
|
70d889b9b4ada4935fdb4f46250e60d54983ff79
|
Python
|
bryanliem/m26413126
|
/try1.txt
|
UTF-8
| 116
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import time; # This is required to include time module.
ticks = time.time()
print "ticks",ticks
| true
|
23da4f0073393e6c522f4e08c17796e04e9fda2b
|
Python
|
hickeroar/python-test-skeleton
|
/test/addition.py
|
UTF-8
| 571
| 3.375
| 3
|
[] |
no_license
|
import unittest
from skeleton.addition import Addition
class TestAddition(unittest.TestCase):
def setUp(self) -> None:
self.addition = Addition()
def test_that_adding_two_numbers_yields_correct_answer(self):
result = self.addition.add(3, 4.5)
self.assertEquals(result, 7.5)
def test_that_adding_non_numbers_raises_exception(self):
with self.assertRaises(TypeError) as context:
self.addition.add(3, 'i can count to potato') # NOQA
self.assertEquals('unsupported operand', str(context.exception)[:19])
| true
|
fa335025b298ce8cfc0aca34a711e1a06de4e842
|
Python
|
pahuja-gor/Python-Lectures
|
/CS 1064/test_requests.py
|
UTF-8
| 198
| 2.515625
| 3
|
[] |
no_license
|
import pprint
import requests
response = requests.get("https://data.cityofnewyork.us/api/views/25th-nujf/rows.json?accessType=DOWNLOAD")
print(response.status_code)
pprint.pprint(response.json())
| true
|
ccd3ed1c5c92bac682ca27701e81a7ead3c00cfc
|
Python
|
moves-rwth/dft-bdmp
|
/2021-NFM/KB3TOSCRAM/KB3TOOPSA_MEF.py
|
UTF-8
| 6,904
| 2.890625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Oct 01 2017
@author: Marc Bouissou
"""
# Transformation of a FT generated by KB3 into format OPSA-MEF
# ATTENTION : the fault tree must be generated with a naming rules group (that may be empty)
# so that EXPORT_NAME structures are present in the file.
# The only basic events that are handled are GLM models
import sys
# import library for XML files
from lxml import etree
if __name__ == "__main__":
# The script must be launched by a command python KB3TOOPSA.py ...
# so that arguments can be fetched !!
# For tests, launch without arguments, which will use the hard coded names for files
# NB: sys.argv[0] is the absolute name of the program itself
if len(sys.argv)==1:
input_file_path = "KB3_FT.xml"
output_file_path = "OPSA_FT.opsa" # FT in OPSA-MEF
else: # the program was launched with two arguments
input_file_path = sys.argv[1]
work_directory = sys.argv[2] # there must be no '\' at the end!
output_file_path = work_directory + '\\OPSA_FT.opsa'
# Loading the file containing the FT
print(input_file_path)
xmldoc = etree.parse(input_file_path)
# Fault tree name
FTName = xmldoc.xpath("/TREE_ROOT/NAME")[0].text
print ("Translation of a FT generated by KB3 in OPSA-MEF format")
print ("Fault tree name: " + FTName)
with open(output_file_path, "w") as output_file:
output_file.write (r'<?xml version="1.0" encoding="UTF-8"?><open-psa>')
# Processing basic events
for i in xmldoc.xpath("/TREE_ROOT/OBJECT/FAILURE"):
# Search for the first EXPORT_NAME (thus corresponding to the
# first failure in the object)
LeafNAMEobj = i.find('EXPORT_NAME')
if LeafNAMEobj == None : output_file.write ("Error: missing export name in the tree")
else:
# defining the basic event. Parameters cannot be set directly by numerical values
LeafNAME = LeafNAMEobj.text
output_file.write (r' <define-basic-event name="' + LeafNAME + r'"><GLM>')
glm=i.find('FIAB_MODELS').find('GLM')
g= glm.find('GAMMA').text
l= glm.find('LAMBDA').text
m= glm.find('MU').text
output_file.write( r'<parameter name="G_' + LeafNAME + '"/>')
output_file.write( r'<parameter name="L_' + LeafNAME + '"/>')
output_file.write( r'<parameter name="M_' + LeafNAME + '"/>')
output_file.write(r'<mission-time/></GLM></define-basic-event>')
# defining the basic event parameters. TODO: define a function to avoid repetition of instructions below
output_file.write (r'<define-parameter name="G_' + LeafNAME + '" unit="float">')
output_file.write (r' <lognormal-deviate><float value="' + g + '"/>')
output_file.write (r' <float value="1.0"/><float value="0.9"/>')
output_file.write (r'</lognormal-deviate></define-parameter>')
output_file.write (r'<define-parameter name="L_' + LeafNAME + '" unit="float">')
output_file.write (r' <lognormal-deviate><float value="' + l + '"/>')
output_file.write (r' <float value="1.0"/><float value="0.9"/>')
output_file.write (r'</lognormal-deviate></define-parameter>')
output_file.write (r'<define-parameter name="M_' + LeafNAME + '" unit="float">')
output_file.write (r' <lognormal-deviate><float value="' + m + '"/>')
output_file.write (r' <float value="1.0"/><float value="0.9"/>')
output_file.write (r'</lognormal-deviate></define-parameter>')
# Initialize the list of negated basic events, for which NOT gates will be created
neg_basic_events=[]
# Processing gates
for i in xmldoc.xpath("/TREE_ROOT/GATE"):
GateTYPE = i.find('TYPE').text
GateNAMEobj = i.find('EXPORT_NAME')
if GateNAMEobj == None : output_file.write ("\nError: missing export name in the tree")
else:
# Start of gate declaration : opening the tag define-gate and declaring the gate type
GateNAME=GateNAMEobj.text
output_file.write (r' <define-gate name="' + GateNAME + r'">')
if GateTYPE =="ET": output_file.write ("<and>")
elif GateTYPE =="OU": output_file.write ("<or>")
elif GateTYPE =="K_SUR_N":
K = i.find('K').text
output_file.write ('<atleast min="' + K +'">')
else: print("Error: gate type unknown: " + GateTYPE)
# Gate daughters: depending on the version of KB3, the fact that they are NOT negated
# is explicit or implicit (absence of <NEGATED>FAUX</NEGATED>)
daughters = i.find('DAUGHTERS')
for j in daughters:
negatedobj = j.find('NEGATED')
if (negatedobj != None ):
if negatedobj.text == "FAUX": prefix = ""
else: prefix = "NOT_"
else : prefix = ""
if j.tag =='GATE_REF':
# Gates IDs are just integers in KB3, so EXPORT_NAME must be reconstructed
output_file.write (r'<gate name= "'+ prefix + FTName+ "_" + j.find("NAME").text + r'"/>')
if j.tag =='BASIC_EVENT_REF':
BE_name = j.find("OBJECT_NAME").text + "_" + j.find("FAILURE_NAME").text
if prefix =="NOT_": # if a basic event is negated, it must be written as a gate
tag= r'<gate name= "'
if BE_name not in neg_basic_events: neg_basic_events.append(BE_name)
else: tag= r'<basic-event name= "'
output_file.write (tag + prefix + BE_name + r'"/>')
# End of gate declaration : closing the tags
if GateTYPE =="ET": output_file.write ("</and>")
elif GateTYPE =="OU": output_file.write ("</or>")
elif GateTYPE =="K_SUR_N": output_file.write ("</atleast>")
output_file.write (r' </define-gate>')
# Creating NOT gates pointing at negated basic events
for i in neg_basic_events:
output_file.write(r' <define-gate name="NOT_' + i + r'">')
output_file.write (r'<not><basic-event name= "' + i + r'"/></not></define-gate>')
# Final tag...
output_file.write ("</open-psa>")
| true
|
d259bef14589fad96203f98e9c4784c379d83bb9
|
Python
|
Pratyush1014/Algos
|
/algorithms/DP/9.UnboundedKnapsack.py
|
UTF-8
| 574
| 2.578125
| 3
|
[] |
no_license
|
def UKnapsack (N , s) :
global dp , wt, val
for i in range (N + 1) :
for j in range (s + 1) :
if (i == 0) :
dp[i][j] = 0
elif (j == 0) :
dp[i][j] = 0
else :
if (wt[i-1] > s) :
dp[i][j] = dp[i-1][j]
else :
dp[i][j] = max(dp[i-1][j],val[i-1]+dp[i][j-wt[i-1]])
return dp[-1][-1]
N = int (input("Enter the number of inputs : "))
wt = list(map(int , input().split()))
val = list(map(int , input().split()))
max_cap = int(input("Enter max cap : "))
dp = [[0 for i in range(max_cap + 1)]for j in range(N + 1)]
print(UKnapsack(N,max_cap))
| true
|
d06e15fa6d0d99cbe753c29a44b6d82258304fa8
|
Python
|
irinatalia/Udacity-DataEng-P1
|
/sql_queries.py
|
UTF-8
| 4,959
| 2.609375
| 3
|
[] |
no_license
|
# DROP TABLES
# The following SQL queries drop all the tables in sparkifydb.
songplay_table_drop = "DROP TABLE IF EXISTS songs"
user_table_drop = "DROP TABLE IF EXISTS artists"
song_table_drop = "DROP TABLE IF EXISTS users"
artist_table_drop = "DROP TABLE IF EXISTS time"
time_table_drop = "DROP TABLE IF EXISTS songplays"
# CREATE TABLES
# The following SQL queries create tables in sparkifydb.
songplay_table_create = ("""CREATE TABLE IF NOT EXISTS songplays (songplay_id SERIAL PRIMARY KEY NOT NULL,
start_time timestamp NOT NULL,
user_id varchar NOT NULL,
level varchar NULL,
song_id varchar NOT NULL,
artist_id varchar NOT NULL,
session_id int NOT NULL,
location varchar NULL,
user_agent varchar NULL);
""")
user_table_create = ("""CREATE TABLE IF NOT EXISTS users (user_id int PRIMARY KEY UNIQUE NOT NULL,
first_name varchar NULL,
last_name varchar NULL,
gender char NULL,
level varchar NULL)
""")
song_table_create = ("""CREATE TABLE IF NOT EXISTS songs (song_id varchar PRIMARY KEY UNIQUE NOT NULL,
title varchar NULL,
artist_id varchar NULL,
year int NULL,
duration decimal NULL);
""")
artist_table_create = ("""CREATE TABLE IF NOT EXISTS artists (artist_id varchar PRIMARY KEY UNIQUE NOT NULL,
name varchar NULL,
location varchar NULL,
latitude float NULL,
longitude float NULL)
""")
time_table_create = ("""CREATE TABLE IF NOT EXISTS time (start_time timestamp NOT NULL,
hour int NULL,
day int NULL,
week int NULL,
month int NULL,
year int NULL,
weekday int NULL)
""")
# INSERT RECORDS
# The following SQL queries insert data into sparkifydb tables.
songplay_table_insert = ("""INSERT INTO songplays (
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
""")
user_table_insert = ("""INSERT INTO users ( user_id,
first_name,
last_name,
gender,
level)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (user_id) DO UPDATE
SET level = EXCLUDED.level || 'free';
""")
song_table_insert = ("""INSERT INTO songs (song_id, title, artist_id, year, duration)
VALUES (%s,%s,%s,%s,%s)
ON CONFLICT (song_id) DO NOTHING;
""")
artist_table_insert = ("""INSERT INTO artists (artist_id, name, location, latitude, longitude)
VALUES (%s,%s,%s,%s,%s)
ON CONFLICT (artist_id) DO NOTHING;
""")
time_table_insert = ("""INSERT INTO time ( start_time,
hour,
day,
week,
month,
year,
weekday)
VALUES (%s, %s, %s, %s, %s, %s, %s);
""")
# FIND SONGS
# The following SQL query selects song_id and artist_id from artists and songs tables in sparkifydb
song_select = ("""SELECT s.song_id, a.artist_id
FROM songs AS s
LEFT JOIN artists AS a ON a.artist_id = s.artist_id
WHERE s.title = (%s) AND a.name = (%s);
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
| true
|
e94d907369b641a4727b5995865fdaaee7348a73
|
Python
|
asya229/bar_project
|
/bar_info/test.py
|
UTF-8
| 356
| 2.734375
| 3
|
[] |
no_license
|
import json
with open('bar_info1.json', 'r', encoding='cp1251') as f:
bar = json.load(f)
for k in bar:
print(
k['Name'],
k['Longitude_WGS84'],
k['Latitude_WGS84'],
k['Address'],
k['District'],
k['AdmArea'],
k['PublicPhone'][0]['PublicPhone']
)
| true
|
7ddd3b77cddd5fbfb90b396f5c8989163ae3d7c2
|
Python
|
jeremybaby/leetcode
|
/Python/169_majority_element.py
|
UTF-8
| 1,065
| 3.71875
| 4
|
[] |
no_license
|
class Solution1:
""" defaultdict计数 """
def majorityElement(self, nums):
from collections import defaultdict
lookup = defaultdict(int)
half_len = len(nums) // 2
for num in nums:
lookup[num] += 1
if lookup[num] > half_len:
return num
class Solution2:
"""众数的出现次数 > Ln / 2」, 排序完后中间的数就是众数"""
def majorityElement(self, nums):
nums.sort()
return nums[len(nums) // 2]
class Solution3:
"""
我们维护一个计数器,
- 如果遇到一个我们目前的候选众数,就将计数器加一,
- 否则减一
只要计数器等于0,我们就将nums中之前访问的数字全部忘记,
并把下一个数字当做候选的众数
"""
def majorityElement(self, nums):
count = 0
candidate = None
for num in nums:
if count == 0:
candidate = num
count += (1 if num == candidate else -1)
return candidate
| true
|
21083aed1d576849ef03e27880dacea25240239a
|
Python
|
cha63506/nvnotifier
|
/serializer.py
|
UTF-8
| 2,727
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
# from: https://github.com/lilydjwg/winterpy
import os
import abc
import pickle
def safe_overwrite(fname, data, *, method='write', mode='w', encoding=None):
# FIXME: directory has no read perm
# FIXME: symlinks and hard links
tmpname = fname + '.tmp'
# if not using "with", write can fail without exception
with open(tmpname, mode, encoding=encoding) as f:
getattr(f, method)(data)
# if the above write failed (because disk is full etc), the old data should be kept
os.rename(tmpname, fname)
class Serializer(metaclass=abc.ABCMeta):
def __init__(self, fname, readonly=False, default=None):
'''
读取文件fname。readonly指定析构时不回存数据
如果数据已加锁,将会抛出SerializerError异常
default 指出如果文件不存在或为空时的数据
注意:
要正确地写回数据,需要保证此对象在需要写回时依旧存在,或者使用with语句
将自身存入其data属性中不可行,原因未知
'''
self.fname = os.path.abspath(fname)
if readonly:
self.lock = None
else:
dir, file = os.path.split(self.fname)
self.lock = os.path.join(dir, '.%s.lock' % file)
for i in (1,):
# 处理文件锁
if os.path.exists(self.lock):
try:
pid = int(open(self.lock).read())
except ValueError:
break
try:
os.kill(pid, 0)
except OSError:
break
else:
self.lock = None
raise SerializerError('数据已加锁')
with open(self.lock, 'w') as f:
f.write(str(os.getpid()))
try:
self.load()
except EOFError:
self.data = default
except IOError as e:
if e.errno == 2 and not readonly: #文件不存在
self.data = default
else:
raise
def __del__(self):
'''如果需要,删除 lock,保存文件'''
if self.lock:
self.save()
os.unlink(self.lock)
def __enter__(self):
return self.data
def __exit__(self, exc_type, exc_value, traceback):
pass
@abc.abstractmethod
def load(self):
pass
@abc.abstractmethod
def save(self):
pass
class PickledData(Serializer):
def save(self):
data = pickle.dumps(self.data)
safe_overwrite(self.fname, data, mode='wb')
def load(self):
self.data = pickle.load(open(self.fname, 'rb'))
class SerializerError(Exception): pass
if __name__ == '__main__':
# For testing purpose
import tempfile
f = tempfile.mkstemp()[1]
testData = {'sky': 1000, 'kernel': -1000}
try:
with PickledData(f, default=testData) as p:
print(p)
p['space'] = 10000
print(p)
finally:
os.unlink(f)
| true
|
484375068f328aa85e3d061b994dd50672a119e7
|
Python
|
shivaji50/PYTHON
|
/LB4_5.py
|
UTF-8
| 642
| 4.21875
| 4
|
[] |
no_license
|
# a program which accept number from user and return difference between
# summation of all its factors and non factors.
# Input : 12
# Output : -34 (16 - 50)
# Function name : Factor()
# Author : Shivaji Das
# Date : 21 august 2021
def Factor(no):
sum1,sum2=0,0
if no <= 0:
return
for i in range(1,no):
if no%i!=0:
sum2=sum2+i
else:
sum1=sum1+i
return sum1-sum2
def main():
x=int(input("Enter the number :"))
ret=Factor(x)
print("The Difference is :",ret)
if __name__=="__main__":
main()
| true
|
9115e8b0036eb8b823edf654ce37eb4d0df9f455
|
Python
|
skanda99/Hackerrank-Leetcode
|
/TwoStrings.py
|
UTF-8
| 230
| 3.5625
| 4
|
[] |
no_license
|
# problem: "https://www.hackerrank.com/challenges/two-strings/problem"
n = int(input())
for i in range(n):
s1=set(input())
s2=set(input())
if s1.intersection(s2):
print('YES')
else:
print('NO')
| true
|
5ca4d0f0f4eb934024b32a3e653e51c1abb3bf62
|
Python
|
lm05985/Client-Server-Hangman-Python
|
/Hangman_Client_v3.py
|
UTF-8
| 2,569
| 3.578125
| 4
|
[] |
no_license
|
#HANGMAN CLIENT v3
# WORKS!!! USE THIS ONE
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name, this works on local machine
# host = "SERVER IP ADDRESS HERE" #find this out from server
#host = socket.gethostbyname(socket.gethostname()) # could use this too
port = 12345 # Reserve a port for your service.
s.connect((host, port)) #connect to socket
# print("Host:",host)
print("\nThank you for connecting to Hangman Server")
print("You need to guess what word I am thinking of....")
print("Number of letters in word: ",end=" ")
num_letters = s.recv(1024).decode() #Receives number of letters
print(num_letters)
max_guesses = s.recv(1024).decode() # recieve variables
s.close() # had to close socket and open new
s = socket.socket() # so that would have two different variables
s.connect((host, port))
word_chosen = s.recv(1024).decode() #receive variable
###set up variables for game
#word_chosen = ""
word_visualization = ""
#max_guesses = recieved from server
current_guesses_counter = 0
letters_guessed = []
current_guess = ""
letters_guessed = len(word_chosen) * "_"
current_guesses = 0
correct_num_guesses = 0 #keeps track of correct number of guesses
while current_guesses_counter-correct_num_guesses < int(max_guesses):
print("Guesses left: ",int(max_guesses)-current_guesses_counter+correct_num_guesses)
current_guess = input("Enter a letter: ")
print()
for i in range(0, len(word_chosen)):
if word_chosen[i] == current_guess:
letters_guessed = letters_guessed[:i] + current_guess + letters_guessed[i+1:]
print("You got a letter!")
print(letters_guessed)
print()
correct_num_guesses= correct_num_guesses+1
if word_chosen == letters_guessed:
print("You won this time!")
result = "client win"
s.send(result.encode())
s.send(str(current_guesses_counter+1).encode())
print("Socket Connection Closed")
s.close()
exit()
current_guesses_counter+=1
print("I got you this time, the word was:", word_chosen)
print('You guessed',current_guesses_counter,'times')
result = "client lose"
s.send(result.encode())
print("Socket Connection Closed")
s.close()
exit()
| true
|
b88a9e8d168a67d40871d50d979a0d8836d7d56f
|
Python
|
kaushil268/Code-Jam-2020-
|
/3.py
|
UTF-8
| 1,232
| 3.1875
| 3
|
[] |
no_license
|
def fun1(abc, xyz):
if abc[0] > xyz[0] and abc[0] < xyz[1]:
return True
if abc[1] > xyz[0] and abc[1] < xyz[1]:
return True
return False
def funR(abc, xyz):
return fun1(abc, xyz) or fun1(xyz, abc) or abc[0] == xyz[0] or abc[1] == xyz[1]
t = int(input())
for var1 in range(t):
n = int(input())
array = []
for i in range(n):
sa = input().split(" ")
inp = (int(sa[0]), int(sa[1]), i)
array.append(inp)
org = array
array.sort(key=lambda x: x[0])
print("Case #" + str(var1 + 1) + ": ", end='')
arrayj = []
arrayc = []
pqr = srt = 0
pos = True
for i in range(len(array)):
if array[i][0] >= pqr:
arrayj.append(array[i][2])
pqr = array[i][1]
else:
if array[i][0] >= srt:
arrayc.append(array[i][2])
srt = array[i][1]
else:
pos = False
break
if not pos:
print("IMPOSSIBLE")
else:
que = [0] * len(array)
for i in arrayj:
que[i] = "J"
for i in arrayc:
que[i] = "C"
print(''.join(que))
| true
|
61c619dc42af3f55845095eea334d14ab305f2cd
|
Python
|
prise-3d/rawls-tools
|
/utils/extract_specific_png.py
|
UTF-8
| 1,636
| 2.734375
| 3
|
[] |
no_license
|
import os
import argparse
import glob
def main():
parser = argparse.ArgumentParser(description="Extract specific samples indices")
parser.add_argument('--folder', type=str, help='folder with all rawls files', required=True)
parser.add_argument('--index', type=str, help='current rawls image index', required=True)
parser.add_argument('--nsamples', type=str, help='expected nsamples for image', required=True)
parser.add_argument('--output', type=str, help='folder with all png files', required=True)
args = parser.parse_args()
p_folder = args.folder
p_output = args.output
p_index = args.index
p_samples = args.nsamples
expected_index = str(p_index)
while len(expected_index) < 6:
expected_index = "0" + expected_index
output_index = ""
while len(output_index) < 6:
output_index = "0" + output_index
images_path = glob.glob(f"{p_folder}/**/**/*{expected_index}.png")
for img in sorted(images_path):
# replace expected Samples value
img_data = img.split('/')[-1].split('-')
img_data[-2] = "S" + p_samples
img_data[-1] = output_index + ".png"
output_path = '-'.join(img_data)
output_path = os.path.join(p_output, img.split('/')[-2], output_path)
output_folder, _ = os.path.split(output_path)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if not os.path.exists(output_path):
os.system(f'cp {img} {output_path}')
else:
print(f'{output_path} already exists')
if __name__ == "__main__":
main()
| true
|
7fa4e23ff0f288cfd35c7cc397ededcd9dd71e09
|
Python
|
ganeshpodishetti/PycharmProjects
|
/Hangman/Problems/Beta distribution/task.py
|
UTF-8
| 116
| 2.796875
| 3
|
[] |
no_license
|
import random
random.seed(3)
alpha = 0.9
beta = 0.1
# call the function here
print(random.betavariate(alpha, beta))
| true
|
685f82217af5990b4330dfa2cac81c704c61ea20
|
Python
|
DiegoT-dev/Estudos
|
/Back-End/Python/CursoPyhton/Mundo 03/Exercícios/ex096.py
|
UTF-8
| 321
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
def cab(msg):
print(f'{msg:^30}\n{"-"*30}')
def área(lst):
a = 1
for n in lst:
a *= n
print(f'A área de um terreno {lst[0]}x{lst[1]} é de {a:.1f}m²')
def cham(txt):
x.append(float(input(f'{txt} (m): ')))
x = list()
cab('Controle de Terreno')
cham('Largura')
cham('Comprimento')
área(x)
| true
|
c0f01dba07598d2fdd4a2ebbf77b38fd65d1282b
|
Python
|
DropName/infa_2019_primak
|
/test.2019/1one.py
|
UTF-8
| 387
| 3.453125
| 3
|
[] |
no_license
|
from math import sqrt
def prime_nembers(n):
"""
returns list a of prime numbers up to n
"""
a = []
for i in range(2, n + 1):
for j in a:
if j > int((sqrt(i)) + 1):
a.append(i)
break
if (i % j == 0):
break
else:
a.append(i)
return a
print(prime_nembers(1000))
| true
|
5e690dc821860e05f6578bbdd1f09903acf4be44
|
Python
|
ivenkatababji/pds
|
/src/membership.py
|
UTF-8
| 425
| 3.40625
| 3
|
[] |
no_license
|
from bloom_filter import BloomFilter
def test(ds):
ds.add(1)
ds.add(2)
ds.add(6)
if 1 in ds :# True
print 'test 1 : +ve'
else:
print 'test 1 : -ve'
if 3 in ds :# False
print 'test 3 : +ve'
else:
print 'test 3 : -ve'
print 'Using Set'
myset = set([])
test(myset)
print 'Using Bloom filter'
mybloom = BloomFilter(max_elements=10000, error_rate=0.1)
test(mybloom)
| true
|
8196d947816f1c2e7d9d70dd6cd37fd658fb18d2
|
Python
|
jameszhan/leetcode
|
/algorithms/033-search-in-rotated-sorted-array.py
|
UTF-8
| 1,621
| 4.65625
| 5
|
[] |
no_license
|
"""
搜索旋转排序数组
假设按照升序排序的数组在预先未知的某个点上进行了旋转。
( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。
你可以假设数组中不存在重复的元素。
你的算法时间复杂度必须是 O(log n) 级别。
示例 1:
输入: nums = [4,5,6,7,0,1,2], target = 0
输出: 4
示例 2:
输入: nums = [4,5,6,7,0,1,2], target = 3
输出: -1
"""
from typing import List
# 根据旋转数组的特性,当元素不重复时,如果 nums[i] <= nums[j],说明区间 [i,j] 是「连续递增」的。
def search(nums: List[int], target: int) -> int:
nums_len = len(nums)
if nums_len <= 0:
return -1
elif nums_len == 1:
return 0 if target == nums[0] else -1
else:
i, j = 0, nums_len - 1
while i <= j:
mid = (i + j) // 2
if nums[mid] == target:
return mid
if nums[i] <= nums[mid]: # [i, mid] 连续递增
if nums[i] <= target <= nums[mid]:
j = mid -1
else:
i = mid + 1
else: # [mid, j] 连续递增
if nums[mid] <= target <= nums[j]:
i = mid + 1
else:
j = mid - 1
return -1
if __name__ == '__main__':
print(search([4, 5, 6, 7, 0, 1, 2], 0))
print(search([4, 5, 6, 7, 0, 1, 2], 3))
| true
|
8d74390592e3014b7391ca9b0921a3ac6907407a
|
Python
|
ntnunk/aws_credential_manager
|
/cred_loader/loader.py
|
UTF-8
| 2,518
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
import os
import wx
import helpers
from ui import MainWindow
class MainUi(MainWindow):
region_list = []
account_list = []
def __init__(self, parent):
super().__init__(parent)
account_list = helpers.get_local_accounts()
self.combo_accounts.Items = account_list
self.combo_region.Items = helpers.get_aws_regions()
def on_account_change(self: MainWindow, event: wx.Event):
# If the account/profile exists, load the currently-configured region if it has one.
region = helpers.get_profile_region(self.combo_accounts.Value)
if region != '':
self.combo_region.Value = region
if (
self.combo_accounts.Value != '' and
self.combo_region.Value != '' and
self.text_credentials.Value != ''
):
self.button_save.Enable(True)
self.button_save_and_close.Enable(True)
def on_region_change(self: MainWindow, event: wx.Event):
if self.combo_accounts.Value != '' and self.combo_region.Value != '' and self.text_credentials != '':
self.button_save.Enable(True)
self.button_save_and_close.Enable(True)
def on_cancel_click(self: MainWindow, event: wx.Event):
self.Close()
self.Destroy()
def on_save_click(self: MainWindow, event: wx.Event):
result = helpers.parse_input(self.combo_accounts.Value, self.combo_region.Value, self.text_credentials.Value)
if result:
wx.MessageBox("AWS Credentials file updated successfully.", "Success", wx.OK_DEFAULT | wx.ICON_INFORMATION)
result = helpers.update_aws_regions(self.combo_region.Value, self.combo_accounts.Value)
if result['success'] == False:
if result['error'] == 'RequestExpired':
wx.MessageBox('SSO Credentials appear to have expired, unable to update Regions.', 'Credentials Expired', wx.OK_DEFAULT | wx.ICON_ERROR)
else:
wx.MessageBox('Uknown error. Failed to update AWS regions.', 'Error', wx.OK_DEFAULT | wx.ICON_ERROR)
print(result['error'])
self.combo_accounts.Value = ''
self.text_credentials.Value = ''
self.combo_region.Value = ''
def on_save_and_close_click(self: MainWindow, event: wx.Event):
self.on_save_click(event)
self.Close()
self.Destroy()
def run():
app = wx.App()
main = MainUi(None)
main.Show()
app.MainLoop()
| true
|
fa980296fa925a8637a950b6cecab1d3c5d05990
|
Python
|
MiyabiTane/myLeetCode_
|
/30-Day_Challenge/28_First_Unique_Number.py
|
UTF-8
| 929
| 3.4375
| 3
|
[] |
no_license
|
class FirstUnique:
def __init__(self, nums):
self.queue = []
self.seen = {}
for num in nums:
if num in self.seen:
self.seen[num] += 1
else:
self.seen[num] = 1
self.queue.append(num)
def showFirstUnique(self):
if len(self.queue) == 0:
print(-1)
return -1
while self.queue:
num = self.queue[0]
if self.seen[num] > 1:
self.queue.pop(0)
else:
print(num)
return num
print(-1)
return -1
def add(self, value):
if value in self.seen:
self.seen[value] += 1
else:
self.seen[value] = 1
self.queue.append(value)
qu = FirstUnique([7,7,7,7,7])
qu.showFirstUnique()
qu.add(7)
qu.add(3)
qu.add(3)
qu.add(7)
qu.add(17)
qu.showFirstUnique()
| true
|
57398548af3e4af88f6872715c0c5707e506a589
|
Python
|
tzhou2018/LeetCode
|
/linkedList/19removeNthFromEnd.py
|
UTF-8
| 1,291
| 3.453125
| 3
|
[] |
no_license
|
'''
@Time : 2020/2/13 21:31
@FileName: 19removeNthFromEnd.py
@Author : Solarzhou
@Email : t-zhou@foxmail.com
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# 为了操作方便,我们引入头结点。
# 首先 p 指针移动 n 个节点,之后 p, q 指针同时移动,
# 若 p.next 为空,满足条件,删除倒数第 n 个节点,返回head.
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
pHead = ListNode(-1)
pHead.next = head
p = pHead
q = pHead
print("p->next->next:", p.next.val)
# p.next = head
for i in range(n):
if p.next:
p = p.next
else:
return None
while p.next:
p = p.next
q = q.next
q.next = q.next.next
return pHead.next
if __name__ == '__main__':
pHead = ListNode(-1)
p = pHead
for i in range(10):
node = ListNode(i)
p.next = node
p = node
head = Solution().removeNthFromEnd(ListNode(1), 1)
while head:
print(head.val)
head = head.next
| true
|
0308b89049063f50170a660e6c782d58358fb089
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03207/s775367688.py
|
UTF-8
| 174
| 3.03125
| 3
|
[] |
no_license
|
def main():
N = int(input())
prices = [int(input()) for _ in range(N)]
r = sum(prices) - max(prices) / 2
print(int(r))
if __name__ == '__main__':
main()
| true
|
0d87ee7fafa06458480ccf1f18cc16e11d5cc6ed
|
Python
|
wbsth/f1retstat
|
/misc.py
|
UTF-8
| 4,381
| 3.1875
| 3
|
[] |
no_license
|
import json
import pandas as pd
def import_race_statuses(file_adr):
"""imports possible race finish statuses"""
statuses_csv = pd.read_csv(file_adr, sep=";")
print('Race statuses imported')
return statuses_csv
def build_season_list(file_name):
"""from season list, returns list of season years"""
with open(file_name, 'r', encoding='utf-8') as f:
season_years = []
seasons_data = json.load(f)
for i in seasons_data['MRData']['SeasonTable']['Seasons']:
season_years.append(int(i['season']))
return season_years
def build_dataframe():
"""builds dataframe skeleton"""
column_names = ['year', 'race', 'country', 'track', 'date', 'started', 'retired_overall', 'retired_mech',
'retired_accident',
'retired_misc']
df = pd.DataFrame(columns=column_names)
df = df.astype({'year': int, 'race': int, 'country': object, 'track': object, 'date': object, 'started': int,
'retired_overall': int,
'retired_mech': int, 'retired_accident': int, 'retired_misc': int})
return df
def fill_dataframe(df, season_list, statuses):
"""fills the dataframe with data loaded from json files"""
for i in season_list:
# iterating through seasons
url = f'season/{i}'
with open(f"{url}/{i}.json", encoding='utf-8') as f:
data = json.load(f)
for j in range(1, len(data['MRData']['RaceTable']['Races'])):
# iterating through races in particular season
with open(f"{url}/{j}.json", encoding='utf-8') as g:
race_result_data = json.load(g)['MRData']['RaceTable']
race_df = pd.DataFrame(columns=df.columns)
try:
# assigning text values to race dataframe
started = 0 # number of drivers who started race
finished = 0 # numb of driver who finished race
ret_mech = 0 # number of drivers who retired by mechanical failure
ret_acc = 0 # number of drivers who retired due to accident
ret_dnf = 0 # number of drivers who retired due to other reasons
dns = 0 # number of drivers who did not start the race
for k in race_result_data['Races'][0]['Results']:
status = k['status']
if status in statuses['finish'].values:
started += 1
finished += 1
elif status in statuses['mech'].values:
started += 1
ret_mech += 1
elif status in statuses['acc'].values:
started += 1
ret_acc += 1
elif status in statuses['dnf'].values:
started += 1
ret_dnf += 1
elif status in statuses['dns'].values:
dns += 1
ret_ov = ret_mech + ret_acc + ret_dnf
race_df.loc[0, 'year'] = race_result_data['season']
race_df.loc[0, 'race'] = race_result_data['Races'][0]['round']
race_df.loc[0, 'country'] = race_result_data['Races'][0]['raceName']
race_df.loc[0, 'track'] = race_result_data['Races'][0]['Circuit']['circuitName']
race_df.loc[0, 'date'] = race_result_data['Races'][0]['date']
race_df.loc[0, 'started'] = started
race_df.loc[0, 'retired_overall'] = ret_ov
race_df.loc[0, 'retired_mech'] = ret_mech
race_df.loc[0, 'retired_accident'] = ret_acc
race_df.loc[0, 'retired_misc'] = ret_dnf
df = pd.concat([df, race_df], ignore_index=True)
except IndexError:
pass
return df
def print_df(dataframe):
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(dataframe)
| true
|
412ea44988c0bccada695335e418a9ec6f09c40c
|
Python
|
arturbs/Programacao_1
|
/uni6/Calculo_De_Seguro/calculo_de_seguro.py
|
UTF-8
| 1,153
| 2.765625
| 3
|
[] |
no_license
|
#coding:utf-8
#Artur Brito Souza - 118210056
#Laboratorio de Progamacao 1, 2018.2
#A Primeira Letra em Caixa Alta
def calcula_seguro(valor_veiculo, lista):
idade = lista[0]
relacionamento = lista[1]
moradia_risco = lista[2]
portao = lista[3]
casa = lista[4]
casa_propria = lista[5]
uso = lista[6]
pontos = 0
if idade < 22:
pontos += 20
elif idade < 31:
pontos += 15
elif idade < 41:
pontos += 12
elif idade < 61:
pontos += 10
else:
pontos += 20
if relacionamento == True:
pontos += 10
else:
pontos += 20
if moradia_risco == True:
pontos += 20
else:
pontos += 10
if portao == True:
pontos += 20
else:
pontos += 10
if casa == True:
pontos += 20
else:
pontos += 10
if casa_propria == True:
pontos += 10
else:
pontos += 20
if uso == "Trabalho":
pontos += 10
else:
pontos += 20
if pontos <= 80:
risco = "Risco Baixo"
pago_ao_seguro = (valor_veiculo / 100.0) * 10
elif pontos <= 100:
risco = "Risco Medio"
pago_ao_seguro = (valor_veiculo / 100.0) * 20
else:
risco = "Risco Alto"
pago_ao_seguro = (valor_veiculo / 100.0) * 30
return [pontos, risco, pago_ao_seguro]
| true
|
2ea2d489b7676c7c4d6893a1690225affa475a6f
|
Python
|
fabocode/gps-project
|
/python_test.py
|
UTF-8
| 808
| 2.796875
| 3
|
[] |
no_license
|
import threading
import time
import gps_data
import RPi.GPIO as IO
x = 0
flag = False
gps = gps_data.GPS
# GPS Class
init_gps = gps()
# Setup the GPS Device
gps.setup_skytraq_gps(init_gps)
def loop_thread():
try:
while flag == False:
gps.update_gps_time(init_gps)
#print("gps seconds inside thread: {}".format(gps.seconds))
except KeyboardInterrupt:
IO.cleanup()
sys.exit
try:
my_thread = threading.Thread(target=loop_thread) # instance the thread
my_thread.start() # call to start the thread
while True:
x = 0
#gps.update_gps_time(init_gps)
print("gps_seconds outside thread: {}". format(gps.seconds))
time.sleep(1)
except KeyboardInterrupt:
flag = True
IO.cleanup()
sys.exit
| true
|
f9e3ffa8f311983a61da4589d40669798a2f4047
|
Python
|
rrajesh0205/python_in_HP
|
/show.py
|
UTF-8
| 226
| 3.671875
| 4
|
[] |
no_license
|
class Student:
def __init__(self, name, rollno):
self.name = name
self.rollno = rollno
def show(self):
print(self.name, self.rollno)
s1 = Student('Navin', 2)
s2 = Student('Jenny', 3)
s1.show()
| true
|
8550004dcb620fe77d49ad91cb2f852cf427b119
|
Python
|
hugolribeiro/Python3_curso_em_video
|
/World3/exercise085.py
|
UTF-8
| 579
| 4.8125
| 5
|
[] |
no_license
|
# Exercise 085: List with even and odd numbers
# Make a program that the user input seven numeric values and register them into a unique list.
# That list will keep separated the odd and even numbers.
# At the end, show the odd and even values in crescent order.
# numbers = [[even], [odd]]
numbers = [[], []]
for amount in range(0, 7):
number = int(input('Input here a number: '))
if number % 2 == 0:
numbers[0].append(number)
else:
numbers[1].append(number)
print(f'Even numbers: {sorted(numbers[0])}')
print(f'Odd numbers: {sorted(numbers[1])}')
| true
|
d9c150ba7eba4a7253c39b822bf2d6bacb1b0425
|
Python
|
Inkiu/Algorithm
|
/src/main/python/socks_laundering.py
|
UTF-8
| 1,211
| 3.125
| 3
|
[] |
no_license
|
from collections import defaultdict
def solution(K, C, D):
ans = 0
clean_d = defaultdict(lambda : 0)
for c in C:
clean_d[c] += 1
for k in clean_d.keys():
fair = clean_d[k]
ans += fair // 2
clean_d[k] = fair % 2
dirty_d = defaultdict(lambda : 0)
for d in D:
dirty_d[d] += 1
for k, v in clean_d.items():
for i in range(v):
if K == 0:
break
dirty = dirty_d[k]
if dirty:
K -= 1
ans += 1
dirty_d[k] -= 1
for k, v in dirty_d.items():
while v > 1:
if K < 2:
break
ans += 1
v -= 2
K -= 2
return ans
import random
# solution(1, [1, 2, 3], [4, 5, 6, 1])
while True:
random_k = random.randint(1, 10)
random_c = [random.randint(1, 10) for _ in range(random.randint(1, 10))]
random_d = [random.randint(1, 10) for _ in range(random.randint(1, 10))]
s1 = solution(random_k, random_c, random_d)
s2 = re_solution(random_k, random_c, random_d)
if s1 != s2:
print(s1, s2, random_k, sorted(random_c), sorted(random_d))
break
| true
|
8c5a31c13d17372d17374196c4b34d038a761586
|
Python
|
acroooo/aprendizajePython
|
/Fundamentos/tuplas.py
|
UTF-8
| 391
| 3.953125
| 4
|
[] |
no_license
|
# Tuplas : mantienen el orden pero no se pueden modificar
frutas = ("naranja", "platano", "kiwi", "sandia")
print(frutas)
# largo de la tupla
print(len(frutas))
# accediendo al elemento
print(frutas[0])
# conversion para agregar elementos
frutasLista = list(frutas)
frutasLista[0] = "El modificado"
frutas = tuple(frutasLista)
print(frutas)
for fruta in frutas:
print(fruta, end=" ")
| true
|
cb9cad553dac3115d6cb032ad841b7e452ddfbf3
|
Python
|
t-eckert/ctci_solutions
|
/c16_moderate/q1_Number_Swapper.py
|
UTF-8
| 501
| 4.3125
| 4
|
[] |
no_license
|
"""
16.1 Number Swapper: Write a function to swap a number in place.
"""
test_numberPairs = [
(2, 3),
(1, 1),
(362943.273415, 15115234283.9958300288593),
(-3.14159, 3.14159),
]
def swap_in_place(a, b):
a = a - b
b = a + b
a = b - a
return a, b
def main():
for test_numberPair in test_numberPairs:
a, b = test_numberPair
print("%s, %s" % (a, b))
a, b = swap_in_place(a, b)
print("swapped in place -> %s, %s" % (a, b))
main()
| true
|
bd47af050b39a24f5291b417b57b19535175bfda
|
Python
|
Gunnika/the-cuisine-menu
|
/app.py
|
UTF-8
| 2,514
| 2.78125
| 3
|
[] |
no_license
|
from flask import Flask, jsonify, request
import json
from flask_sqlalchemy import SQLAlchemy
app= Flask(__name__)
app.config['SECRET_KEY'] = 'thisissecret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///new_database1.mysql'
db= SQLAlchemy(app)
class Cuisine(db.Model):
id = db.Column(db.Integer, primary_key= True)
name = db.Column(db.String(20))
origin = db.Column(db.String(50))
ingredients = db.Column(db.String(600))
@app.route("/addDish", methods=['POST'])
def addDish():
data= request.get_json()
c_id = data['id']
c_name = data['name']
c_origin = data['origin']
c_ingredients = data['ingredients']
new_cuisine = Cuisine(id= c_id, name= c_name, origin= c_origin, ingredients= json.dumps(c_ingredients))
db.session.add(new_cuisine)
db.session.commit()
return jsonify({"message":"Dish added"})
@app.route("/All", methods=['GET'])
def All():
output=[]
all_cuisines = Cuisine.query.all()
for cuisine in all_cuisines:
a_cuisine={}
a_cuisine['id']= cuisine.id
a_cuisine['name']= cuisine.name
a_cuisine['origin']= cuisine.origin
a_cuisine['ingredients']= json.loads(cuisine.ingredients)
output.append(a_cuisine)
return jsonify({"message":output})
@app.route("/origin/<place>", methods=['GET'])
def origin(place):
output=[]
places= Cuisine.query.filter_by(origin=place).all()
for place1 in places:
part = {}
part['id']= place1.id
part['name']= place1.name
part['origin']=place1.origin
part['ingredients']= json.loads(place1.ingredients)
output.append(part)
return jsonify({"message":output})
@app.route("/rename/<id>", methods=['POST'])
def rename(id):
data=request.get_json()
place1= Cuisine.query.filter_by(id=id).first()
place1.name = data['name']
db.session.add(place1)
db.session.commit()
return jsonify({"message":"Cuisine edited"})
@app.route("/addIngredient/<id>", methods=['POST'])
def addIngredient(id):
data = request.get_json()
place1= Cuisine.query.filter_by(id=id).first()
Existing = json.loads(place1.ingredients)
Addition= data['ingredients']
Existing.extend(Addition)
place1.ingredients=json.dumps(Existing)
db.session.add(place1)
db.session.commit()
return jsonify({"message":"Ingredients added"})
if __name__=='__main__':
app.run(host='0.0.0.0', debug= True)
| true
|
30842463d3a646e1adbb8a056299661d8536cb6f
|
Python
|
tshauck/phmdoctest
|
/src/phmdoctest/main.py
|
UTF-8
| 13,070
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
from collections import Counter, namedtuple
from enum import Enum
import inspect
from typing import List, Optional
import click
import commonmark.node # type: ignore
import monotable
from phmdoctest import tool
from phmdoctest import print_capture
class Role(Enum):
"""Role that markdown fenced code block plays in testing."""
UNKNOWN = '--'
CODE = 'code'
OUTPUT = 'output'
SESSION = 'session'
SKIP_CODE = 'skip-code'
SKIP_OUTPUT = 'skip-output'
SKIP_SESSION = 'skip-session'
class FencedBlock:
"""Selected fields from commonmark node plus new field role."""
def __init__(self, node: commonmark.node.Node) -> None:
"""Extract fields from commonmark fenced code block node."""
self.type = node.info
self.line = node.sourcepos[0][0] + 1
self.role = Role.UNKNOWN
self.contents = node.literal
self.output = None # type: Optional['FencedBlock']
self.skip_reasons = list() # type: List[str]
def __str__(self) -> str:
return 'FencedBlock(role={}, line={})'.format(
self.role.value, self.line)
def set(self, role: Role) -> None:
"""Set the role for the fenced code block in subsequent testing."""
self.role = role
def set_link_to_output(self, fenced_block: 'FencedBlock') -> None:
"""Save a reference to the code block's output block."""
assert self.role == Role.CODE, 'only allowed to be code'
assert fenced_block.role == Role.OUTPUT, 'only allowed to be output'
self.output = fenced_block
def skip(self, reason: str) -> None:
"""Skip an already designated code block. Re-skip is OK."""
if self.role == Role.CODE:
self.set(Role.SKIP_CODE)
if self.output:
self.output.set(Role.SKIP_OUTPUT)
elif self.role == Role.SESSION:
self.set(Role.SKIP_SESSION)
else:
is_skipped = any(
[self.role == Role.SKIP_CODE,
self.role == Role.SKIP_SESSION])
assert is_skipped, 'cannot skip this Role {}'.format(self.role)
self.skip_reasons.append(reason)
Args = namedtuple(
'Args',
[
'markdown_file',
'outfile',
'skips',
'is_report',
'fail_nocode'
]
)
"""Command line arguments with some renames."""
@click.command()
@click.argument(
'markdown_file',
nargs=1,
type=click.Path(
exists=True,
dir_okay=False,
allow_dash=True, # type: ignore
)
)
@click.option(
'--outfile',
nargs=1,
help=(
'Write generated test case file to path TEXT. "-"'
' writes to stdout.'
)
)
@click.option(
'-s', '--skip',
multiple=True,
help=(
'Any Python code or interactive session block that contains'
' the substring TEXT is not tested.'
' More than one --skip TEXT is ok.'
' Double quote if TEXT contains spaces.'
' For example --skip="python 3.7" will skip every Python block that'
' contains the substring "python 3.7".'
' If TEXT is one of the 3 capitalized strings FIRST SECOND LAST'
' the first, second, or last Python block in the'
' Markdown file is skipped.'
' The fenced code block info string is not searched.'
)
)
@click.option(
'--report',
is_flag=True,
help='Show how the Markdown fenced code blocks are used.'
)
@click.option(
'--fail-nocode',
is_flag=True,
help=(
'This option sets behavior when the Markdown file has no Python'
' fenced code blocks or interactive session blocks'
' or if all such blocks are skipped.'
' When this option is present the generated pytest file'
' has a test function called test_nothing_fails() that'
' will raise an assertion.'
' If this option is not present the generated pytest file'
' has test_nothing_passes() which will never fail.'
)
)
@click.version_option()
# Note- docstring for entry point shows up in click's usage text.
def entry_point(markdown_file, outfile, skip, report, fail_nocode):
args = Args(
markdown_file=markdown_file,
outfile=outfile,
skips=skip,
is_report=report,
fail_nocode=fail_nocode,
)
# Find markdown blocks and pair up code and output blocks.
with click.open_file(args.markdown_file, encoding='utf-8') as fp:
blocks = convert_nodes(tool.fenced_block_nodes(fp))
identify_code_and_output_blocks(blocks)
apply_skips(args, blocks)
if args.is_report:
print_report(args, blocks)
# build test cases and write to the --outfile path
if args.outfile:
test_case_string = build_test_cases(args, blocks)
with click.open_file(args.outfile, 'w', encoding='utf-8') as ofp:
ofp.write(test_case_string)
def convert_nodes(nodes: List[commonmark.node.Node]) -> List[FencedBlock]:
"""Create FencedBlock objects from commonmark fenced code block nodes."""
blocks = []
for node in nodes:
blocks.append(FencedBlock(node))
return blocks
PYTHON_FLAVORS = ['python', 'py3', 'python3']
"""Python fenced code blocks info string will start with one of these."""
def identify_code_and_output_blocks(blocks: List[FencedBlock]) -> None:
"""
Designate which blocks are Python or session and guess which are output.
The block.type is a copy of the Markdown fenced code block info_string.
This string may start with the language intended for syntax coloring.
A block is an output block if it has an empty markdown info field
and follows a designated python code block.
A block is a session block if the info_string starts with 'py'
and the first line of the block starts with the session prompt '>>> '.
"""
for block in blocks:
for flavor in PYTHON_FLAVORS:
if block.type.startswith(flavor):
block.set(Role.CODE)
if block.contents.startswith('>>> ') and block.type.startswith('py'):
block.set(Role.SESSION)
# When we find an output block we update the preceding
# code block with a link to it.
previous_block = None
for block in blocks:
if previous_block is not None:
if not block.type and previous_block.role == Role.CODE:
block.set(Role.OUTPUT)
previous_block.set_link_to_output(block)
previous_block = block
# If we didn't find an output block for a code block
# it can still be run, but there will be no comparison
# to expected output. If assertions are needed, they can
# be added to the code block.
def apply_skips(args: Args, blocks: List[FencedBlock]) -> None:
"""Designate Python code/session blocks that are exempt from testing."""
skip_candidates = [] # type: List[FencedBlock]
for b in blocks:
if b.role in [Role.CODE, Role.SESSION]:
skip_candidates.append(b)
# Skip blocks identified by patterns 'FIRST', 'SECOND', 'LAST'
if skip_candidates:
apply_special_skips(skip_candidates, args.skips)
# Skip blocks identified by pattern matches.
# Try to find each skip pattern in each block.
# If there is a match, skip the block. Blocks can
# be skipped more than once.
for block in skip_candidates:
for pattern in args.skips:
if block.contents.find(pattern) > -1:
block.skip(pattern)
def apply_special_skips(blocks: List[FencedBlock], skips: List[str]) -> None:
"""Skip blocks identified by patterns 'FIRST', 'SECOND', 'LAST'"""
for pattern in skips:
index = None
if pattern == 'FIRST':
index = 0
elif pattern == 'LAST':
index = -1
elif pattern == 'SECOND' and len(blocks) > 1:
index = 1
if index is not None:
blocks[index].skip(pattern)
def print_report(args: Args, blocks: List[FencedBlock]) -> None:
"""Print Markdown fenced block report and skips report."""
report = []
filename = click.format_filename(args.markdown_file)
title1 = filename + ' fenced blocks'
text1 = fenced_block_report(blocks, title=title1)
report.append(text1)
roles = [b.role.name for b in blocks]
counts = Counter(roles)
number_of_test_cases = counts['CODE'] + counts['SESSION']
report.append('{} test cases'.format(number_of_test_cases))
if counts['SKIP_CODE'] > 0:
report.append('{} skipped code blocks'.format(
counts['SKIP_CODE']
))
if counts['SKIP_SESSION'] > 0:
report.append('{} skipped interactive session blocks'.format(
counts['SKIP_SESSION']
))
num_missing_output = counts['CODE'] - counts['OUTPUT']
report.append(
'{} code blocks missing an output block'.format(
num_missing_output
)
)
if args.skips:
report.append('')
title2 = 'skip pattern matches (blank means no match)'
text2 = skips_report(args.skips, blocks, title=title2)
report.append(text2)
print('\n'.join(report))
def fenced_block_report(blocks: List[FencedBlock], title: str = '') -> str:
"""Generate text report about the input file fenced code blocks."""
table = monotable.MonoTable()
table.max_cell_height = 7
table.more_marker = '...'
cell_grid = []
for block in blocks:
if block.role in [Role.SKIP_CODE, Role.SKIP_SESSION]:
quoted_skips = [r.join(['"', '"']) for r in block.skip_reasons]
skips = '\n'.join(quoted_skips)
else:
skips = ''
cell_grid.append([block.type, block.line, block.role.value, skips])
headings = [
'block\ntype', 'line\nnumber', 'test\nrole',
'skip pattern/reason\nquoted and one per line']
formats = ['', '', '', '(width=30)']
text = table.table(headings, formats, cell_grid, title) # type: str
return text
def skips_report(
skips: List[str], blocks: List[FencedBlock], title: str = '') -> str:
"""Generate text report about the disposition of --skip options."""
# Blocks with role OUTPUT and SKIP_OUTPUT will always have an
# empty skip_reasons list even if the linking code block is skipped.
table = monotable.MonoTable()
table.max_cell_height = 5
table.more_marker = '...'
cell_grid = []
for skip in skips:
code_lines = []
for block in blocks:
if skip in block.skip_reasons:
code_lines.append(str(block.line))
cell_grid.append([skip, ', '.join(code_lines)])
headings = ['skip pattern', 'matching code block line number(s)']
formats = ['', '(width=36;wrap)']
text = table.table(headings, formats, cell_grid, title) # type: str
return text
def test_nothing_fails() -> None:
"""Fail if no Python code blocks or sessions were processed."""
assert False, 'nothing to test'
def test_nothing_passes() -> None:
"""Succeed if no Python code blocks or sessions were processed."""
# nothing to test
pass
_ASSERTION_MESSAGE = 'zero length {} block at line {}'
def build_test_cases(args: Args, blocks: List[FencedBlock]) -> str:
"""Generate test code from the Python fenced code blocks."""
# repr escapes back slashes from win filesystem paths
# so it can be part of the generated test module docstring.
quoted_markdown_path = repr(click.format_filename(args.markdown_file))
markdown_path = quoted_markdown_path[1:-1]
docstring_text = 'pytest file built from {}'.format(markdown_path)
builder = print_capture.PytestFile(docstring_text)
number_of_test_cases = 0
for block in blocks:
if block.role == Role.CODE:
code_identifier = 'code_' + str(block.line)
output_identifier = ''
code = block.contents
assert code, _ASSERTION_MESSAGE.format('code', block.line)
output_block = block.output
if output_block:
output_identifier = '_output_' + str(output_block.line)
expected_output = output_block.contents
assert expected_output, _ASSERTION_MESSAGE.format(
'expected output', block.line)
else:
expected_output = ''
identifier = code_identifier + output_identifier
builder.add_test_case(identifier, code, expected_output)
number_of_test_cases += 1
elif block.role == Role.SESSION:
session = block.contents
assert session, _ASSERTION_MESSAGE.format('session', block.line)
builder.add_interactive_session(str(block.line), session)
number_of_test_cases += 1
if number_of_test_cases == 0:
if args.fail_nocode:
test_function = inspect.getsource(test_nothing_fails)
else:
test_function = inspect.getsource(test_nothing_passes)
builder.add_source(test_function)
return str(builder)
| true
|
90a58f8b4ef9682f94d8bbd52e02ee8e9d5f45a0
|
Python
|
nickderobertis/py-ex-latex
|
/tests/figure/test_inline_graphic.py
|
UTF-8
| 1,282
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import pyexlatex as pl
from tests.base import EXAMPLE_IMAGE_PATH, GENERATED_FILES_DIR, INPUT_FILES_DIR
from tests.utils.pdf import compare_pdfs
EXPECT_GRAPHIC = '\\vcenteredinclude{width=0.1\\textwidth}{Sources/nd-logo.png}'
EXPECT_DEFINITION = r"""
\newcommand{\vcenteredinclude}[2]{\begingroup
\setbox0=\hbox{\includegraphics[#1]{#2}}%
\parbox{\wd0}{\box0}\endgroup}
""".strip()
def test_inline_graphic():
ig = pl.InlineGraphic(str(EXAMPLE_IMAGE_PATH), width=0.1)
assert str(ig) == EXPECT_GRAPHIC
def test_inline_graphic_in_document():
ig = pl.InlineGraphic(str(EXAMPLE_IMAGE_PATH), width=0.1)
ig2 = pl.InlineGraphic(str(EXAMPLE_IMAGE_PATH), width=0.1)
contents = ['Some inline text before', ig, 'and after and then wrapping onto the next line so that '
'I can make sure that it is working properly in the case '
'that it is used in a real document', ig2]
doc = pl.Document(contents)
assert EXPECT_DEFINITION in str(doc)
assert EXPECT_GRAPHIC in str(doc)
doc.to_pdf(GENERATED_FILES_DIR, outname='inline graphic document')
compare_pdfs(INPUT_FILES_DIR / 'inline graphic document.pdf', GENERATED_FILES_DIR / 'inline graphic document.pdf')
| true
|
f79c9b329775b661e5924fb4d9688a22fb600dba
|
Python
|
Shantnu25/ga-learner-dst-repo
|
/Banking-Inferences/code.py
|
UTF-8
| 4,782
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#Importing header files
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.stats.weightstats import ztest
from statsmodels.stats.weightstats import ztest
from scipy.stats import chi2_contingency
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# Critical Value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Reading file
data=pd.read_csv(path)
#Code starts here
#1 Finding the Confidence Interval
#Sampling the dataframe
data_sample=data.sample(n=sample_size,random_state=0)
#Finding the mean of the sample
sample_mean=data_sample['installment'].mean()
#Finding the standard deviation of the sample
population_std = data['installment'].std()
#Finding the margin of error
margin_error=(z_critical*population_std)/math.sqrt(sample_size)
#Finding the confidence interval
confidence_interval= (sample_mean-margin_error,sample_mean+margin_error)
print('Confidence interval:', confidence_interval)
#Finding the true mean
true_mean=data['installment'].mean()
print('True mean:', true_mean)
print('--------------------------------------')
#2 Chiecking if CLT holds for installment column
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Creating different subplots
fig,axes=plt.subplots(3,1, figsize=(10,20))
#Running loop to iterate through rows
for i in range(len(sample_size)):
#Initialising a list
m=[]
#Loop to implement the no. of samples
for j in range(1000):
#Finding mean of a random sample
mean=data['installment'].sample(sample_size[i]).mean()
#Appending the mean to the list
m.append(mean)
#Converting the list to series
mean_series=pd.Series(m)
#Plotting the histogram for the series
axes[i].hist(mean_series, normed=True)
#Displaying the plot
plt.show()
#3 Small Business Interests
# The bank manager believes that people with purpose as 'small_business'
# have been given int.rate more due to the risk assosciated.
# Hypothesis testing(one-sided)
#Null Hypothesis H0: μ= 12 % i.e There is no difference in interest rate being given to people with purpose as 'small_business'
#Alternate Hypothesis H1: μ>12 % i.e.Interest rate being given to people with purpose as 'small_business' is higher than the average interest rate
# Removing the last character from the values in column
data['int.rate'] = data['int.rate'].map(lambda x: str(x)[:-1])
#Dividing the column values by 100
data['int.rate']=data['int.rate'].astype(float)/100
#Applying ztest for the hypothesis
z_statistic_1, p_value_1 = ztest(x1=data[data['purpose']=='small_business']['int.rate'], value=data['int.rate'].mean(), alternative='larger')
print(('Z-statistic 1 is :{}'.format(z_statistic_1)))
print(('P-value 1 is :{}'.format(p_value_1)))
#4 Installment vs Loan Defaulting
# The bank thinks that monthly installments (installment column)
# customers have to pay might have some sort of effect on loan defaulters.
#Null Hypothesis: There is no difference in installments being paid by loan defaulters and loan non defaulters
#Alternate Hypothesis: There is difference in installments being paid by loan defaulters and loan non defaulters
#Applying ztest for the hypothesis
z_statistic_2, p_value_2 = ztest(x1=data[data['paid.back.loan']=='No']['installment'], x2=data[data['paid.back.loan']=='Yes']['installment'])
print(('Z-statistic 2 is :{}'.format(z_statistic_2)))
print(('P-value 2 is :{}'.format(p_value_2)))
#5 Purpose vs Loan Defaulting (both categorical columns)
#Another thing bank suspects is that there is a strong association between purpose of the loan(purpose column) of a person and whether that person has paid back loan (paid.back.loan column)
#Null Hypothesis : Distribution of purpose across all customers is same.
#Alternative Hypothesis : Distribution of purpose for loan defaulters and non defaulters is different.
# Subsetting the dataframe
yes=data[data['paid.back.loan']=='Yes']['purpose'].value_counts()
no=data[data['paid.back.loan']=='No']['purpose'].value_counts()
#Concating yes and no into a single dataframe
observed=pd.concat([yes.transpose(),no.transpose()], 1,keys=['Yes','No'])
print(observed)
chi2, p, dof, ex = chi2_contingency(observed)
if chi2 > critical_value:
print('Rejct the null Hypothesis')
else:
print('Null Hypothesis can not be rejected')
| true
|
3d9391e6b3fd52756954236c544e4d1d46c77fa4
|
Python
|
robodave94/Honors
|
/ResultsAnalytics/time_Interpret Ball Analytics.py
|
UTF-8
| 9,102
| 2.515625
| 3
|
[] |
no_license
|
import csv
import numpy as np
import cv2
'''
Goal_Area
Time
Classification
Tag
Frame'''
'''
Single Channel Segmentation
Single Channel Segmentation_DoG
Single Channel Segmentation_Lap
Grid Single Channel Scanline
Grid Single Channel Scanline_DoG
Grid Single Channel Scanline_Lap
Vertical Single Channel Scanline
Vertical Single Channel Scanline_DoG
Vertical Single Channel Scanline_Lap
Horizontal Single Channel Scanline
Horizontal Single Channel Scanline_DoG
Horizontal Single Channel Scanline_Lap
RGB Channel Segmentation
RGB Channel Segmentation_DoG
RGB Channel Segmentation_Lap
Grid RGB Channel Scanline
Grid RGB Channel Scanline_DoG
Grid RGB Channel Scanline_Lap
Vertical RGB Channel Scanline
Vertical RGB Channel Scanline_DoG
Vertical RGB Channel Scanline_Lap
Horizontal RGB Channel Scanline
Horizontal RGB Channel Scanline_DoG
Horizontal RGB Channel Scanline_Lap
Vertical Field Gap with Dark Interior
Vertical Field Gap with Dark Interior_DoG
Vertical Field Gap with Dark Interior_Lap
Vertical Field Gaps,Vertical Field Gaps_DoG
Vertical Field Gaps_Lap
Frame'''
class Preprocessingstruct:
def __init__(self, var1, var2,var3,var4,var5,var6,
var7,var8,var9,var10,
var11, var12, var13, var14,var15,
var16,var17,var18,var19,var20,var21,
var22,var23,var24,var25,var26,var27,var28,var29,var30,var31):
self.SingleChannelSegmentation=var1
self.SingleChannelSegmentation_DoG=var2
self.SingleChannelSegmentation_Lap=var3
self.GridSingleChannelScanline=var4
self.GridSingleChannelScanline_DoG=var5
self.GridSingleChannelScanline_Lap=var6
self.VerticalSingleChannelScanline=var7
self.VerticalSingleChannelScanline_DoG=var8
self.VerticalSingleChannelScanline_Lap=var9
self.HorizontalSingleChannelScanline=var10
self.HorizontalSingleChannelScanline_DoG=var11
self.HorizontalSingleChannelScanline_Lap=var12
self.RGBChannelSegmentation=var13
self.RGBChannelSegmentation_DoG=var14
self.RGBChannelSegmentation_Lap=var15
self.GridRGBChannelScanline=var16
self.GridRGBChannelScanline_DoG=var17
self.GridRGBChannelScanline_Lap=var18
self.VerticalRGBChannelScanline=var19
self.VerticalRGBChannelScanline_DoG=var20
self.VerticalRGBChannelScanline_Lap=var21
self.HorizontalRGBChannelScanline=var22
self.HorizontalRGBChannelScanline_DoG=var23
self.HorizontalRGBChannelScanline_Lap=var24
self.VerticalFieldGapwithDarkInterior=var25
self.VerticalFieldGapwithDarkInterior_DoG=var26
self.VerticalFieldGapwithDarkInterior_Lap=var27
self.VerticalFieldGaps=var28
self.VerticalFieldGaps_DoG=var29
self.VerticalFieldGaps_Lap=var30
self.preFrame=var31
return
class VerificationAnalysisStruct:
def __init__(self, var1, var2,var3,var4,var5):
self.Goal_Area=var1
self.Time=var2
self.Classification=var3
self.Tag=var4
self.Frame=var5
return
def gtData():
with open('Resultsball/bPreprocessingSegmentation.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
#print ', '.join(row)
try:
e = Preprocessingstruct(float(row[0]),row[1],int(row[2]),float(row[3]),
int(row[4]),int(row[5]),float(row[6]),int(row[7]),
int(row[8]),float(row[9]),int(row[10]),int(row[11]),float(row[12]),
int(row[13]),int(row[14]),float(row[15]),int(row[16]),int(row[17]),
float(row[18]),int(row[19]),int(row[20]),float(row[21]),int(row[22]),
int(row[23]),float(row[24]),int(row[25]),int(row[26]),float(row[27]),int(row[28]),
int(row[29]),str(row[30]))
PreprcfArr.append(e)
except:
print 'err'
with open('Resultsball/bVerificationExamination.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
#print ', '.join(row)
try:
if str(row[0]).__contains__('['):
ara=str(row[0]).split(' ')
c = VerificationAnalysisStruct([int(ara[0][1:]),int(ara[1]),int(ara[2]),int(ara[3][:-1])],
float(row[1]),str(row[2]),str(row[3]),str(row[4]))
verifArr.append(c)
except:
print 'err'
def dispAv():
#region preprocessingTiming
SingleChannelSegmentation=[]
GridSingleChannelScanline=[]
VerticalSingleChannelScanline=[]
HorizontalSingleChannelScanline=[]
RGBChannelSegmentation=[]
GridRGBChannelScanline=[]
VerticalRGBChannelScanline=[]
HorizontalRGBChannelScanline=[]
VerticalFieldGapwithDarkInterior=[]
VerticalFieldGaps=[]
for c in PreprcfArr:
SingleChannelSegmentation.append(c.SingleChannelSegmentation)
GridSingleChannelScanline.append(c.GridSingleChannelScanline)
VerticalSingleChannelScanline.append(c.VerticalSingleChannelScanline)
HorizontalSingleChannelScanline.append(c.HorizontalSingleChannelScanline)
RGBChannelSegmentation.append(c.RGBChannelSegmentation)
GridRGBChannelScanline.append(c.GridRGBChannelScanline)
VerticalRGBChannelScanline.append(c.VerticalRGBChannelScanline)
HorizontalRGBChannelScanline.append(c.HorizontalRGBChannelScanline)
VerticalFieldGapwithDarkInterior.append(c.VerticalFieldGapwithDarkInterior)
VerticalFieldGaps.append(c.VerticalFieldGaps)
print 'SingleChannelSegmentation',np.average(SingleChannelSegmentation)
print 'GridSingleChannelScanline',np.average(GridSingleChannelScanline)
print 'VerticalSingleChannelScanline',np.average(VerticalSingleChannelScanline)
print 'HorizontalSingleChannelScanline',np.average(HorizontalSingleChannelScanline)
print 'RGBChannelSegmentation',np.average(RGBChannelSegmentation)
print 'GridRGBChannelScanline',np.average(GridRGBChannelScanline)
print 'VerticalRGBChannelScanline',np.average(VerticalRGBChannelScanline)
print 'HorizontalRGBChannelScanline',np.average(HorizontalRGBChannelScanline)
print 'VerticalFieldGapwithDarkInterior', np.average(VerticalFieldGapwithDarkInterior)
print 'VerticalFieldGaps', np.average(VerticalFieldGaps)
HoGTmng=[]
CNNTmng=[]
truecontrast=[]
falsebhuman=[]
falsecontrast = []
truebhuman = []
for c in verifArr:
if str(c.Tag).__contains__('bhuman'):
if str(c.Classification)=='True':
truebhuman.append(c.Time)
else:
falsebhuman.append(c.Time)
elif str(c.Tag).__contains__('Constrast'):
if str(c.Classification)=='True':
truecontrast.append(c.Time)
else:
falsecontrast.append(c.Time)
elif str(c.Tag).__contains__('HoG'):
HoGTmng.append(c.Time)
else:
CNNTmng.append(c.Time)
print 'Truebhuman',np.average(truebhuman)
print 'Truecontrast',np.average(truecontrast)
print 'Falsebhuman', np.average(falsebhuman)
print 'Falsecontrast', np.average(falsecontrast)
print 'HoGTime',np.average(HoGTmng)
print 'CNN_Time',np.average(CNNTmng)
return
def pltFreq():
return
PreprcfArr=[]
verifArr=[]
gtData()
#dispAv()
#pltFreq()
strlst=[]
truecnt=0
falsecnt=0
cnt = 0
count = 0
import ball_Classification
for x in verifArr:
if not strlst.__contains__(x.Frame):
strlst.append(x.Frame)
recView=cv2.imread(x.Frame)
test=[]
for c in verifArr:
if x.Frame==c.Frame:
if str(c.Tag).__contains__('DoG_bhuman'):
#print c.Goal_Area, c.Classification, c.Tag
valud=ball_Classification.bhumanInteriorExamination(recView[c.Goal_Area[1]:c.Goal_Area[1]+c.Goal_Area[3],
c.Goal_Area[0]:c.Goal_Area[0] + c.Goal_Area[2]])
print valud
if valud[1] == True:
cv2.rectangle(recView, (c.Goal_Area[0], c.Goal_Area[1]),
(c.Goal_Area[0] + c.Goal_Area[2], c.Goal_Area[1] + c.Goal_Area[3]), (255,255,255), 1)
truecnt+=1
else:
cv2.rectangle(recView, (c.Goal_Area[0], c.Goal_Area[1]),
(c.Goal_Area[0] + c.Goal_Area[2], c.Goal_Area[1] + c.Goal_Area[3]), (100,100,255), 1)
falsecnt+=1
cnt+=1
print cnt,truecnt,falsecnt
#cv2.imshow('',recView)
#cv2.waitKey(2020202020)
count += 1
if count > 150:
break
print cnt,truecnt,falsecnt
print cnt
| true
|
3232d85a11dec97e8719448ea1bf4f79f524560b
|
Python
|
hyyoka/text_style_transfer_Tobigs
|
/Style_Transformer/evaluator/evaluator.py
|
UTF-8
| 3,695
| 2.734375
| 3
|
[] |
no_license
|
from nltk.tokenize import word_tokenize
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from pytorch_pretrained_bert import BertTokenizer,BertForMaskedLM
import fasttext
import pkg_resources
import math
from numpy import mean
import torch
from torch.nn import Softmax
class Evaluator(object):
def __init__(self):
resource_package = __name__
native_fasttext = 'native_fasttext.bin'
native_fasttext_file = pkg_resources.resource_stream(resource_package, native_fasttext)
self.classifier_native = fasttext.load_model(native_fasttext_file.name)
self.native_ppl_model = BertForMaskedLM.from_pretrained('bert-base-uncased')
self.smoothing = SmoothingFunction().method4
# acc_b에서 사용
def native_style_check(self, text_transfered, style_origin):
text_transfered = ' '.join(word_tokenize(text_transfered.lower().strip()))
if text_transfered == '':
return False
label = self.classifier_native.predict([text_transfered])
style_transfered = label[0][0] == '__label__positive'
return (style_transfered != style_origin)
# acc 측정 위한 함수 (지금은 생략함)
def native_acc_b(self, texts, styles_origin):
assert len(texts) == len(styles_origin), 'Size of inputs does not match!'
count = 0
for text, style in zip(texts, styles_origin):
if self.native_style_check(text, style):
count += 1
return count / len(texts)
def native_acc_0(self, texts):
styles_origin = [0] * len(texts)
return self.native_acc_b(texts, styles_origin)
def native_acc_1(self, texts):
styles_origin = [1] * len(texts)
return self.native_acc_b(texts, styles_origin)
# BLEU 측정 위한 함수 (지금은 생략함)
def nltk_bleu(self, texts_origin, text_transfered):
texts_origin = [word_tokenize(text_origin.lower().strip()) for text_origin in texts_origin]
text_transfered = word_tokenize(text_transfered.lower().strip())
return sentence_bleu(texts_origin, text_transfered, smoothing_function = self.smoothing) * 100
def self_bleu_b(self, texts_origin, texts_transfered):
assert len(texts_origin) == len(texts_transfered), 'Size of inputs does not match!'
sum = 0
n = len(texts_origin)
for x, y in zip(texts_origin, texts_transfered):
try :
bleu = self.nltk_bleu([x], y)
except ZeroDivisionError:
bleu = 0
sum += bleu
return sum / n
# ppl 체크를 위한 함수
def native_ppl(self, texts_transfered): #생성된 문장이 input
softmax = Softmax(dim = 0)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenize_input = [tokenizer.tokenize(line) for line in texts_transfered]
tensor_input = [torch.tensor(tokenizer.convert_tokens_to_ids(line)).unsqueeze_(1).to(torch.int64) for line in tokenize_input]
ppl_result = []
for sentence in tensor_input:
sentence_prediction = self.native_ppl_model(sentence)
sentence_confidence = softmax(sentence_prediction).squeeze_(dim = 1)
sentence_ppl_list = [confidence[token_idx].item() for confidence, token_idx in zip(sentence_confidence, sentence)]
length = len(sentence_ppl_list)
if length == 0 : length = 1
sentence_ppl = prod_list(sentence_ppl_list)**(-1/length)
ppl_result.append(sentence_ppl)
return mean(ppl_result)
def prod_list(ppl_list):
result = 1
for elem in ppl_list:
result *= elem
return result
| true
|
42017c5235c4470a4c2b597453009c24e0f7ab89
|
Python
|
mlixytz/learning
|
/algorithm/limiting/counter.py
|
UTF-8
| 783
| 3.375
| 3
|
[] |
no_license
|
'''
计数器限流法
采用滑动窗口的方式实现,如果不采用滑动窗口的话,会出现临界问题
举例:
假设每秒允许访问100次,则设置一个1秒钟的滑动窗口,窗口中有10个格子,
每个格子100ms,窗口每100ms移动一次,格子里存储计数器的值。每次移动比较
窗口最后一个格子的和第一个格子,如果差值大于100,则限流。(格子越多越平滑)。
'''
import threading
import time
counter = 0
# 列表中元素存储的值为:{time,count}
windows = []
def accept():
if grant():
print("请求完成!")
else:
print("被限制了!")
def grant():
now = int(time.time() / 1000))
for window in windows:
if
| true
|
7e98ae05caf76461ca4d1e55d55c6af3778376e8
|
Python
|
niteshagrahari/pythoncodecamp
|
/OOPs/scratch.py
|
UTF-8
| 537
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
class A:
def f(self):
print("F in A")
def addAll(*args):
sum = 0
for arg in args:
sum += arg
print(sum)
def f(*args):
print(type(args))
for arg in args:
print(arg)
def ff(**kargs):
print(type(kargs))
for key,value in kargs.items():
print(key,value)
f(1,2,3,4)
ff(name="pappu",age="10")
def f1():
print("F1")
def f2():
print("F2")
def sub(a,b):
print(a-b)
def f(fn):
fn()
#print("F")
def fs(fn):
fn(1,2)
#print("F")
a=A()
fs(sub)
f(a.f)
addAll()
| true
|
6f88aaad34ad6b136aee4da0833d975c62c87402
|
Python
|
liyuanyuan11/Python
|
/def/def1.py
|
UTF-8
| 67
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
def firstFunction(name):
str1="Hello "+name+"!"
print(str1)
| true
|
06da674ca8e793ca758fe6782a3ce3ef679336f7
|
Python
|
IamPatric/pe_lab_4
|
/task_2.1.py
|
UTF-8
| 1,023
| 3.203125
| 3
|
[] |
no_license
|
from task_1 import get_files_count
from task_2 import FileProcessing
from task_2 import list_sorting
def main():
fp = FileProcessing
print(f'Task 1: count files\n{get_files_count("C:/pe_lab_4") = }')
print(f'Task 2: making list from file\n{fp.make_list_from_file("students.csv") = }')
data = fp.make_list_from_file("students.csv")
print(f'Task 2.1: list sorting by last name\n{list_sorting(data[1:], 1) = }')
print(f'Task 2.2: age > 22\n{[x for x in data[1:] if int(x[2]) >= 22] = }')
# 2.3
print(f'Task 2.3: writing in csv\n{fp.write_csv(data, "newfiles.csv") = }')
data.append(['11', 'Якубов Фарид Ильясович', '26', '351066'])
fp.write_csv(data, "students_changed.csv")
print(f'Task 2.4: writing in csv changed data\n{data = }')
print(f'{fp.make_list_from_file("students_changed.csv") = }')
print(f'Task 2.5: writing in pickle')
fp.write_pickle(data, 'test.pickle')
print(f'Task 2.6: read pickle\n{fp.read_pickle("test.pickle")}')
if __name__ == '__main__':
main()
| true
|
b2b1a03d1552e07a4298116f3e9e6ac56c45c11a
|
Python
|
joaabjb/curso_em_video_python_3
|
/desafio031_custo_da_viagem.py
|
UTF-8
| 195
| 3.421875
| 3
|
[] |
no_license
|
d = int(input('Dgite a distância da viagem em Km: '))
if d <= 200:
print(f'O preço da passagem será R$ {0.50 * d :.2f}')
else:
print(f'O preço da passagem será R$ {0.45 * d :.2f}')
| true
|
93c22e0e5ccd3339e22f384b32496188de58ed89
|
Python
|
ChetanKaushik702/DSA
|
/python/trimmedMean.py
|
UTF-8
| 439
| 3.03125
| 3
|
[] |
no_license
|
from statistics import mean
from scipy import stats
def trimmedMean(data):
data.sort()
n = int(0.1*len(data))
data = data[n:]
data = data[:len(data)-n]
mean = 0
for i in data:
mean = mean + i
print(mean/len(data))
data = [1, 2, 1, 3, 2, 1, 2, 5, 5, 10, 22, 20, 24, 129, 500, 23, 356, 2345]
data.sort()
print(stats.trim_mean(data, 0.1))
trimmedMean(data)
# for i in range(len(data)):
# print(data[i])
| true
|
e1a8a397d5979aff369eb5976393aab78558180c
|
Python
|
jbeks/compsci
|
/code/tests/black_hole.py
|
UTF-8
| 2,707
| 2.796875
| 3
|
[] |
no_license
|
import argparse
import numpy as np
from warnings import warn
import code_dir
from nbody import *
def set_parser_bh(parser):
"""
Adds black hole arguments (speed and distance) to parser.
"""
parser.add_argument(
"dist", type=float,
help="distance of black hole from solar system"
)
parser.add_argument(
"speed", type=float,
help="speed of black hole (km/s)"
)
def simulate_bh(dist, speed, args, G, sys):
"""
Runs a solar system simulation
with a black hole at the given distance and with the given speed.
"""
# longest distance of black hole from solar system
start_dist = 1.5e11
# axis on which the black hole is placed
e1 = np.array([1,0,0], dtype=float)
e2 = np.array([0,0,1], dtype=float)
e1 /= np.linalg.norm(e1)
e2 /= np.linalg.norm(e2)
# minimum heigt for a simulation (takes 1.2 * orbit neptune in time)
min_height = 1.2 * 5201280000. * speed / 2
# check whether given distance is smaller than maximum distance
if start_dist < dist:
warn("Given distance is larger than assumed largest distance")
height = min_height
else:
# calculate height for simulation
# (where dist from solar system is start_dist)
height = np.sqrt(start_dist ** 2 - dist ** 2)
# if height is less than min_height, set height to min_height
if height < min_height:
height = min_height
# calculate position and velocity of black hole
vec_dist = dist * e1
vec_height = height * e2
bh_p = vec_dist + vec_height
bh_v = -e2 * speed
# create black hole
sun_m = 1.989e+30
bh = Body(
6.5 * sun_m, # stellar black hole
bh_p,
bh_v,
("Black_Hole", "None")
)
# create system with black hole
system = System(G, sys+[bh], args.itype.lower())
# if no time is given, run for the time it takes
# for the black hole to move 2 * height
if args.t_end == 0:
t_end = 2 * np.linalg.norm(vec_height) / speed
else:
t_end = args.t_end
# return output of simulation
return simulate(system, t_end, args.dt, args.t_dia, args.t_out)
if __name__ == "__main__":
# create parser
parser = argparse.ArgumentParser()
set_parser(parser)
set_parser_bh(parser)
args = parser.parse_args()
# get system from standard input
G, sys = get_system_data()
# run black hole simulation
sim_data = simulate_bh(args.dist, args.speed, args, G, sys)
# plot data if asked to
if args.plot_2d or args.plot_3d:
simple_plot([p.T for p in sim_data], args.plot_3d, args.n_points)
| true
|
55ba9c403277818540087edbc294c2e332cfbf1e
|
Python
|
mtreviso/university
|
/Projeto de Linguagens de Programacao/Trabalho 1/python/lacos.py
|
UTF-8
| 406
| 3.109375
| 3
|
[] |
no_license
|
import os, sys
def multMatrix(matrix1, matrix2, n):
mat = [[0 for y in range(n)] for x in range(n)]
for i in range(n):
for j in range(n):
for k in range(n):
mat[i][j] += matrix1[i][k]*matrix2[k][j]
return mat
n = int(sys.argv[1])
mat1 = [[x+y for y in range(1, n+1)] for x in range(1, n+1)]
mat2 = [[x*y for y in range(1, n+1)] for x in range(1, n+1)]
print(str(multMatrix(mat1, mat2, n)))
| true
|
8c6590427061a4365c26351e29f1c8fc04bb8c74
|
Python
|
jssvldk/Practicum1
|
/Rabota1(№13).py
|
UTF-8
| 1,350
| 3.15625
| 3
|
[] |
no_license
|
Python 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> """
Имя проекта: Rabota1
Номер версии: 1.0
Имя файла: Rabota1(№13).py
Автор: 2020 © В.А.Шаровский, Челябинск
Лицензия использования: CC BY-NC 4.0 (https://creativecommons.org/licenses/by-nc/4.0/deed.ru)
Дата создания: 15.12.2020
Дата последней модификации: 15.12.2020
Описание: Решение задачи №13 Практикума №1
Описание: Можно ли из бревна, имеющего диаметр поперечного сечения D, выпилить квадратный брус шириной A?
# Версия Python: 3.9
"""
import math
D = int(input("Введите диаметр поперечного сечения:"))
A = int(input("Введите ширину квадратного бруса:"))
A = math.sqrt(2) * A
print("Диагональ бруса равна",A)
if (A <=D):
print("Выпилить квадратный брус шириной ",A,"возможно")
else:
print("Выпилить квадратный брус шириной ", A, "невозможно")
| true
|
ed40e8f38a0176101a9e510daaf05fb01e005406
|
Python
|
jihoonyou/problem-solving
|
/Baekjoon/학교 탐방하기.py
|
UTF-8
| 890
| 3.09375
| 3
|
[] |
no_license
|
'''
학교 탐방하기
https://www.acmicpc.net/problem/13418
'''
import sys
input = sys.stdin.readline
N,M = map(int, input().split())
parents = [i for i in range(N+1)]
graph = []
def find(x):
if x == parents[x]:
return x
parents[x] = find(parents[x])
return parents[x]
def union(a,b):
a = parents[a]
b = parents[b]
if a < b:
parents[b] = a
else:
parents[a] = b
for _ in range(M+1):
A,B,C = map(int, input().split())
graph.append((C,A,B))
graph.sort()
worst = 0
for i in range(M+1):
C,A,B = graph[i]
if find(A) != find(B):
union(A,B)
if C == 0:
worst += 1
graph.sort(reverse=True)
parents = [i for i in range(N+1)]
best = 0
for i in range(M+1):
C,A,B = graph[i]
if find(A) != find(B):
union(A,B)
if C == 0:
best += 1
print(worst*worst - best*best)
| true
|
b6050d26ca021665861861745ad74a1592b816d9
|
Python
|
mwaiton/python-macrobenchmarks
|
/benchmarks/pytorch_alexnet_inference.py
|
UTF-8
| 1,393
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
import json
import time
import torch
import urllib
import sys
if __name__ == "__main__":
start = time.time()
model = torch.hub.load('pytorch/vision:v0.6.0', 'alexnet', pretrained=True)
# assert time.time() - start < 3, "looks like we just did the first-time download, run this benchmark again to get a clean run"
model.eval()
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
from PIL import Image
from torchvision import transforms
input_image = Image.open(filename)
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
n = 1000
if len(sys.argv) > 1:
n = int(sys.argv[1])
with torch.no_grad():
times = []
for i in range(n):
times.append(time.time())
if i % 10 == 0:
print(i)
output = model(input_batch)
times.append(time.time())
print((len(times) - 1) / (times[-1] - times[0]) , "/s")
if len(sys.argv) > 2:
json.dump(times, open(sys.argv[2], 'w'))
| true
|
860c30526bc5b1940fc225c1e1f27ff11aa6ea84
|
Python
|
ethyl2/Intro-Python-I
|
/src/misc/hackerrank/strings/piglatin.py
|
UTF-8
| 2,304
| 4.71875
| 5
|
[] |
no_license
|
"""
https://www.codewars.com/kata/520b9d2ad5c005041100000f/python
Given a string, move the first letter of each word to the end of it, and then add 'ay' to the end of the world.
Leave punctuation marks untouched.
Examples:
pig_it('Pig latin is cool') # igPay atinlay siay oolcay
pig_it('Hello world !') # elloHay orldway !
"""
import string
import re
# First version uses my custom punctuation set
# This is the only one that checks for punctuation that is right next to a word, like 'end.' and deals with it.
# It didn't seem needed to get the tests to pass, so I didn't implement that case in the other versions.
def pig_it(text):
punctuation = {'.', ',', '!', '?'}
piggy_words = []
for word in text.split(' '):
if word in punctuation:
piggy_words.append(word)
elif word[-1] in punctuation:
piggy_words.append(word[1:-1] + word[0] + 'ay' + word[-1])
else:
piggy_words.append(word[1:] + word[0] + 'ay')
piggy_string = ' '.join(piggy_words)
return piggy_string
# Second version uses re to check for punctuation
def pig_it2(text):
piggy_words = []
for word in text.split(' '):
if re.match('\W', word):
piggy_words.append(word)
else:
piggy_words.append(word[1:] + word[0] + 'ay')
return ' '.join(piggy_words)
# Third version uses string.punctuation for punctuation check
def pig_it3(text):
piggy_words = []
for word in text.split(' '):
if word in string.punctuation:
piggy_words.append(word)
else:
piggy_words.append(word[1:] + word[0] + 'ay')
return ' '.join(piggy_words)
# My fourth version is like the 3rd version, but puts it into a list comprehension
def pig_it4(text):
return ' '.join([word if word in string.punctuation else word[1:] + word[0] + 'ay' for word in text.split(' ')])
# And here's fifth version that uses .isalpha() to check for punctuation:
def pig_it5(text):
return ' '.join([word[1:] + word[0] + 'ay' if word.isalpha() else word for word in text.split(' ')])
print(pig_it('Thomas is so crazy and loves to eat sushi.'))
print(pig_it2('Hello world !'))
print(pig_it3('Pig latin is cool !'))
print(pig_it4('Pig latin is cool !'))
print(pig_it5('Pig latin is cool !'))
| true
|
80d8ed6b1d415770d61797894c44ae18fddc6ae5
|
Python
|
V4p1d/FPSP_Covid19
|
/python/clds/agents/lockdown_policy.py
|
UTF-8
| 1,768
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from ..core import Agent
class BatchLockdown(Agent):
""" Lockdown policy.
Agent returns
[0, ... suppression start]: beta_high
[suppression_start, ..., suppression_end): beta_low
[suppression_end, ..., END]: beta_high
"""
def __init__(self, beta_high=1, beta_low=0, batch_size=1, suppression_start=0, suppression_end=None):
self.batch_size = batch_size
self.beta_high = self._to_batch(beta_high)
self.beta_low = self._to_batch(beta_low)
self.suppression_start = suppression_start
self.suppression_end = suppression_end
# variable should have shape (batch, ) + shape
def _to_batch(self, x, shape=()):
# return placeholder key or callable
if isinstance(x, str) or callable(x):
return x
x_arr = np.array(x)
target_shape = (self.batch_size, ) + shape
if x_arr.shape == target_shape:
return x_arr
elif (x_arr.shape == shape):
return np.matlib.repmat(x_arr.reshape(shape), self.batch_size,1).reshape(target_shape)
elif len(x_arr.shape) > 0 and x_arr.shape[0] == target_shape:
return x_arr.reshape(target_shape)
else:
print("Warning: unable to convert to target shape", x, target_shape)
return x
def reset(self):
self.steps = 1
return self.beta_high
def step(self, x):
y = self.beta_high
if (self.steps >= self.suppression_start):
y = self.beta_low
if (self.suppression_end is not None) and (self.steps >= self.suppression_end):
y = self.beta_high
self.steps += 1
return y, 0, False, None
| true
|
8dd4ac35a9f88898f96a39908a18479c6b1cb0fe
|
Python
|
the-py/the
|
/test/test_the_exe.py
|
UTF-8
| 1,297
| 2.765625
| 3
|
[] |
no_license
|
import unittest
from the import *
class TestTheExe(unittest.TestCase):
def setUp(self):
self.eq = self.assertEqual
self.neq = self.assertNotEqual
self.r = self.assertRaises
self.true = self.assertTrue
# ---- coders keyworld ----
# true
def test_true(self):
self.true(the(True).true)
with self.r(AssertionError):
the(False).true
# false
def test_false(self):
self.true(the(False).false)
with self.r(AssertionError):
the(True).false
# NOT
def test_should_not(self):
self.true(the(True).should_not.be.false)
with self.r(AssertionError):
the(True).should_not.be.true
# none
def test_none_is_none(self):
self.true(the(None).none)
with self.r(AssertionError):
the(1).none
# exist
def test_exist(self):
self.true(the(1).exist)
with self.r(AssertionError):
the(None).exist
# ok
def test_ok(self):
self.true(the(1).ok)
with self.r(AssertionError):
the([]).ok
# emtpy
def test_empty(self):
self.true(the([]).empty)
with self.r(AssertionError):
the(1).empty
if __name__ == '__main__':
unittest.main()
| true
|
0e33bc5b900b1a64532df9820c9fcb390222eb3e
|
Python
|
MeghnaPrabhu/Multimedia-Text-and-Image-Retrieval
|
/phase3/LSH.py
|
UTF-8
| 6,594
| 2.6875
| 3
|
[] |
no_license
|
import math
from collections import defaultdict
from functools import reduce
from pprint import pprint
import numpy as np
import pandas as pd
from phase1.csvProcessor import CsvProcessor
SEED = 12
np.random.seed(SEED)
IMAGE_ID_COL = 'imageId'
class LSH:
def __init__(self, hash_obj, num_layers, num_hash, vec, b, w):
self.hash_obj = hash_obj
self.num_layers = num_layers
self.num_hash = num_hash
self.vec = vec
self.b = b
self.w = w
def create_hash_table(self, img_vecs, verbose=False):
""" Vectorized hash function to bucket all img vecs
Returns
-------
hash_table : List of List of defaultdicts
"""
hash_table = self.init_hash_table()
for vec in img_vecs:
img_id, img_vec = vec[0], vec[1:]
for idx, hash_vec in enumerate(hash_table):
buckets = self.hash_obj.hash(img_vec, self.vec[idx], self.b[idx], self.w)
for i in range(len(buckets)):
hash_vec[i][buckets[i]].add(img_id)
# TODO save hashtable somewhere
if verbose:
pprint(hash_table)
return hash_table
def init_hash_table(self):
hash_table = []
for i in range(self.num_layers):
hash_layer = []
for j in range(self.num_hash):
hash_vec = defaultdict(set)
hash_layer.append(hash_vec)
hash_table.append(hash_layer)
return hash_table
def find_ann(self, query_point, hash_table, k=5):
candidate_imgs = set()
num_conjunctions = self.num_hash
for layer_idx, layer in enumerate(self.vec):
hash_vec = hash_table[layer_idx]
buckets = self.hash_obj.hash(query_point, layer, self.b[layer_idx], self.w)
cand = hash_vec[0][buckets[0]].copy()
# self.test(hash_vec[1])
for ix, idx in enumerate(buckets[1:num_conjunctions]):
# needs ix+1 since we already took care of index 0
cand = cand.intersection(hash_vec[ix + 1][idx])
candidate_imgs = candidate_imgs.union(cand)
if len(candidate_imgs) > 4 * k:
print(f'Early stopping at layer {layer_idx} found {len(candidate_imgs) }')
break
if len(candidate_imgs) < k:
if num_conjunctions > 1:
num_conjunctions -= 1
return self.find_ann(query_point, hash_table, k=k)
else:
print('fubar')
return candidate_imgs
def post_process_filter(self, query_point, candidates, k):
distances = [{IMAGE_ID_COL: int(row[IMAGE_ID_COL]),
'dist': self.hash_obj.dist(query_point, row.drop(IMAGE_ID_COL))}
for idx, row in candidates.iterrows()]
# distances []
# for row in candidates.iterrows():
# dist = self.hash_obj.dist(query_point, )
return sorted(distances, key=lambda x: x['dist'])[:k]
class l2DistHash:
def hash(self, point, vec, b, w):
"""
Parameters
----------
point :
vec:
Returns
-------
numpy array of which buckets point falls in given layer
"""
val = np.dot(vec, point) + b
val = val * 100
res = np.floor_divide(val, w)
return res
def dist(self, point1, point2):
v = (point1 - point2)**2
return math.sqrt(sum(v))
class lshOrchestrator:
def __init__(self, base_path, databas_ops):
suffix_image_dir = "/descvis/img"
self.csvProcessor = CsvProcessor(base_path + suffix_image_dir, databas_ops)
def run_lsh(self, input_vec, num_layers, num_hash):
w = 5
dim = len(input_vec[0])
vec = np.random.rand(num_layers, num_hash, dim - 1)
b = np.random.randint(low=0, high=w, size=(num_layers, num_hash))
l2_dist_obj = l2DistHash()
lsh = LSH(hash_obj=l2_dist_obj, num_layers=num_layers, num_hash=num_hash, vec=vec, b=b, w=w)
hashTable = lsh.create_hash_table(input_vec, verbose=False)
return hashTable
def get_combined_visual_model(self, models):
model_dfs = []
for model_name in models:
df = self.csvProcessor.create_concatenated_and_normalised_data_frame_for_model(model_name, normalise=True)
df = df.rename(columns={df.columns[0]: IMAGE_ID_COL})
model_dfs.append(df)
img_dfs = reduce(lambda left, right: pd.merge(left, right, on=[IMAGE_ID_COL, 'location']), model_dfs)
return img_dfs
def img_ann(self, query, k, num_layers=100, num_hash=30, layer_file_name=None):
models = ['CN3x3', 'CM3x3', 'HOG', 'CSD', 'GLRLM']
print(f'Using models : {models}')
img_df = self.get_combined_visual_model(models)
img_id_loc_df = img_df[[IMAGE_ID_COL, 'location']]
img_df.drop('location', axis=1, inplace=True)
assert img_df.shape[1] > 256
n, dim = img_df.shape
# w = int(math.sqrt(n)) if n > 100 else k**3
w = 400
# Create vector with rand num in num_layers X num_hash X dim-1(1 dim for img_id)
vec = np.random.rand(num_layers, num_hash, dim - 1)
#vec = np.arange(num_layers*num_hash*(dim-1)).reshape(num_layers, num_hash, dim-1)
b = np.random.randint(low=0, high=w, size=(num_layers, num_hash))
# b = np.arange(num_layers*num_hash).reshape(num_layers, num_hash)
l2_dist_obj = l2DistHash()
lsh = LSH(hash_obj=l2_dist_obj, num_layers=num_layers, num_hash=num_hash, vec=vec, b=b, w=w)
hash_table = lsh.create_hash_table(img_df.values)
query_vec = img_df.loc[img_df[IMAGE_ID_COL] == int(query)].drop(IMAGE_ID_COL, axis=1)
t = query_vec.shape[1]
query_vec = query_vec.values.reshape(t, )
candidate_ids = lsh.find_ann(query_point=query_vec, hash_table=hash_table, k=k)
candidate_vecs = img_df.loc[img_df[IMAGE_ID_COL].isin(candidate_ids)]
if not candidate_ids:
return None
dist_res = lsh.post_process_filter(query_point=query_vec, candidates=candidate_vecs, k=k)
for i in dist_res:
img_id = i[IMAGE_ID_COL]
i['loc'] = img_id_loc_df.loc[img_id_loc_df[IMAGE_ID_COL] == img_id, 'location'].item()
return dist_res
| true
|
fc3109c947a366c848706d7c3b879ad9b69cc06e
|
Python
|
InesTeudjio/FirstPythonProgram
|
/ex14.py
|
UTF-8
| 302
| 4.375
| 4
|
[] |
no_license
|
# 14. Write a Python program that accepts a comma separated sequence of words as input and prints the unique words in sorted form
items = input("Input comma separated sequence of words")
words = items.split() #breakdown the string into a list of words
words.sort()
for word in words:
print(word)
| true
|
51586c90b0796158f211dc7b79c67a78f5275c36
|
Python
|
fructoast/Simulation-Eng
|
/simu1113/trapezoid-simpson.py
|
UTF-8
| 1,763
| 4
| 4
|
[] |
no_license
|
#encode:utf-8
import math
def trapezoid_integral(a,b,power):
n = 10000 #n=サンプリング数,任意の数
h = (b-a)/n
add = 0
for term in range(n+1):
if term==0 or term==n:
if power==3:
add += level3_func(h*term)
elif power==4:
add += level4_func(h*term)
else:
print("undefined.")
else:
if power==3:
add += 2*level3_func(h*term)
elif power==4:
add += 2*level4_func(h*term)
else:
print("undefined.")
result = add * h / 2
print("trapezoid:",result)
def simpson_integral(a,b,power):
n = 10000 #n=サンプリング数,任意の数
n *= 2
h = (b-a)/n
add = 0
for term in range(n+1):
if term==0 or term==n:
if power==3:
add += level3_func(h*term)
elif power==4:
add += level4_func(h*term)
else:
print("undefined.")
else:
if power==3:
if term%2 == 1:
add += 4*level3_func(h*term)
else:
add += 2*level3_func(h*term)
elif power==4:
if term%2 == 1:
add += 2*level4_func(h*term)
else:
add += 2*level4_func(h*term)
else:
print("undefined.")
result = add * h / 3
print("simpson:",result)
def level3_func(x):
return float(4*x**3-10*x**2+4*x+5)
def level4_func(x):
return float(x**4+2*x)
#~0-2(4x^3-10x^2+4x+5)dx
trapezoid_integral(0,2,3)
simpson_integral(0,2,3)
#~0-3(x^4+2x)dx
trapezoid_integral(0,3,4)
simpson_integral(0,3,4)
| true
|
79bcd432841a271894629d59268f0ca3afda5517
|
Python
|
carlos2020Lp/progra-utfsm
|
/diapos/programas/replace.py
|
UTF-8
| 192
| 3.28125
| 3
|
[] |
no_license
|
>>> palabra = 'cara'
>>> palabra.replace('r', 's')
'casa'
>>> palabra.replace('ca', 'pa')
'para'
>>> palabra.replace('a', 'e', 1)
'cera'
>>> palabra.replace('c', '').replace('a', 'o')
'oro'
| true
|
38621d67a56d9b4e1fb693372710f37633f67aa0
|
Python
|
1325052669/leetcode
|
/src/JiuZhangSuanFa/BinarySearch/457. Classical Binary Search.py
|
UTF-8
| 571
| 3.453125
| 3
|
[] |
no_license
|
class Solution:
"""
@param nums: An integer array sorted in ascending order
@param target: An integer
@return: An integer
"""
def findPosition(self, nums, target):
# write your code here
if not nums: return -1
l, r = 0, len(nums) - 1
while l + 1 < r:
mid = l + (r - l) // 2
if nums[mid] <= target:
l = mid
else:
r = mid
if nums[l] == target:
return l
if nums[r] == target:
return r
return -1
| true
|
df4d859cdc5ce36d607e7e7faf7e72b040d6f98b
|
Python
|
MunoDevelop/codingTest
|
/1197/1197.py
|
UTF-8
| 1,033
| 3.390625
| 3
|
[] |
no_license
|
import sys
import heapq
class DisjointSet:
def __init__(self, n):
self.data = [-1]*n
self.size = n
def find(self, index):
value = self.data[index]
if value < 0:
return index
return self.find(value)
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.data[x] < self.data[y]:
self.data[y] = x
elif self.data[x] > self.data[y]:
self.data[x] = y
else:
self.data[x] -= 1
self.data[y] = x
self.size -= 1
N, M = [int(x) for x in sys.stdin.readline().rstrip().split()]
disjoint = DisjointSet(N)
que = []
for i in range(M):
a, b ,c = [int(x) for x in sys.stdin.readline().rstrip().split()]
heapq.heappush(que,(c, (a, b)))
heapq.heappush(que,(c, (b, a)))
s = 0
while que:
c,(a, b) = heapq.heappop(que)
if disjoint.find(a-1) != disjoint.find(b-1):
disjoint.union(a-1, b-1)
s+=c
print(s)
| true
|
96fc12803b116032be78a8775e0303ce1b7abba1
|
Python
|
sebastianhutteri/RamanFungiANN
|
/Code.py
|
UTF-8
| 8,249
| 2.65625
| 3
|
[] |
no_license
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
import os
import scipy as sp
from IPython.display import display
from ipywidgets import FloatProgress
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from sklearn.model_selection import train_test_split
import seaborn as sns
import datetime
class Classifier:
def __init__(self, samplefolder, wavenum, smoothness, asymmetry, nruns, layers, nodes, act, lr, opt, loss, epochs):
self.samplefolder = samplefolder
self.wavenum = wavenum
self.smoothness = smoothness
self.asymmetry = asymmetry
self.nruns = nruns
self.layers = layers
self.nodes = nodes
self.act = act
self.lr = lr
self.opt = opt
self.loss = loss
self.epochs = epochs
#The measurements of the fungal species were split over several files.
self.NSessions = [3, 3, 3, 3, 3, 3, 7, 7, 3, 3, 3, 7, 3, 3, 3, 3] #How many files each species has. Same order in list as the files are read by Python.
self.NSpecies = len(self.NSessions) #Number of different species.
NSpectra = [66, 66, 66, 66, 66, 66, 18, 18, 66, 66, 66, 18, 66, 66, 66, 66] #Number of spectra per individual species file.
Identity = np.identity(self.NSpecies) #Generates a one hot matrix of unit vectors used as validation data.
Y = []
for i in range(self.NSpecies):
for j in range(self.NSessions[i]):
Z = []
for k in range(NSpectra[i]):
Z.append(list(Identity[i]))
Y.append(Z)
self.Y = Y
self.SpeciesNames = ['AGP', 'AUP', 'CAC', 'CON20',
'CON25', 'C', 'EnzC2_24h3', 'EnzC2_48h2',
'GYM', 'G', 'LEPSP', 'NaOH3M',
'OP', 'PHALA', 'PSS', 'TEN'] #Labels for heatmap.
def ALSS(self, y, niter=10,): #ALSS normalization.
L = len(y)
D = sp.sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sp.sparse.spdiags(w, 0, L, L)
Z = W + self.smoothness * D.dot(D.transpose())
z = sp.sparse.linalg.spsolve(Z, w * y)
w = self.asymmetry * (y > z) + (1 - self.asymmetry) * (y < z)
return z
def FitData(self, fileindex): #Quadratic interpolation.
ALSSData = []
Data = pd.read_csv('{}/{}'.format(self.samplefolder, fileindex), sep=';', header=None)
for i in range(1, len(Data.columns)):
f = interp1d(Data[0], Data[i], kind='quadratic')
y1 = f(self.wavenum)
y2 = self.ALSS(y1)
y = y1 - y2
ALSSData.append(y)
return ALSSData
def ProcessData(self): #Interpolates and normalizes data using functions above, then stores it in a matrix.
fp = FloatProgress(min=0,max=len(os.listdir(self.samplefolder)))
display(fp)
DataMatrix = []
print('Storing data in DataMatrix...')
for i in sorted(os.listdir(self.samplefolder)):
DataMatrix.append(self.FitData(i))
fp.value += 1
self.DataMatrix = DataMatrix
print('Data stored.')
def XTrain(self, index): #Functions for slicing the data matrix into training and validation data.
return np.concatenate(self.DataMatrix[:index] + self.DataMatrix[index + 1:])
def YTrain(self, index):
return np.concatenate(self.Y[:index] + self.Y[index + 1:])
def XTest(self, index):
return np.array(self.DataMatrix[index])
def YTest(self, index):
return np.array(self.Y[index])
def ANN(self, index): #Constructs the ANN.
model = Sequential()
model.add(Dense(self.nodes, activation=self.act, input_dim=len(self.wavenum))) #Input layer.
for i in range(self.layers): #Adding selected number of hidden layers.
model.add(Dense(self.nodes, activation=self.act))
model.add(Dense(self.NSpecies, activation='softmax'))
model.compile(loss=self.loss, optimizer=self.opt, metrics=['accuracy'])
model.fit(self.XTrain(index), self.YTrain(index), epochs=self.epochs, batch_size=128) #Training the network.
#Classification.
predict = model.predict(self.XTest(index)) #Generates a prediction distrubution among classrs for every spectra.
predictclass = np.argmax(predict, axis=1) #Returns the indices of the classes with the highest prediction.
predictcount = np.zeros(self.NSpecies) #Array of zeros.
for i in predictclass:
predictcount[i] += 1 #Every max prediction adds 1 to its class.
return predictcount/sum(predictcount) #Normalizes the classifier distribution.
def SingleRun(self, run): #Single run of training and evaluating the network as well as generating a heatmap of the results.
Pred = []
for i in range(len(os.listdir(self.samplefolder))):
Pred.append(self.ANN(i))
Pred = np.array(Pred)
Now = datetime.datetime.now()
Stamp = ' {}{}{} {}.{}.{}'.format(Now.year, Now.month, Now.day, Now.hour, Now.minute, Now.second) #Time stamp.
np.savetxt('{}/{}'.format(self.FolderStamp, 'Pred' + Stamp), Pred) #Saving data.
Accuracy = []
for i in range(self.NSpecies):
Accuracy.append(sum(Pred[sum(self.NSessions[:i]):sum(self.NSessions[:i]) + self.NSessions[i]])/self.NSessions[i])
Accuracy = np.array(Accuracy)
AccuracyData = pd.DataFrame(Accuracy, columns=self.SpeciesNames, index=self.SpeciesNames) #Average accuracy for species.
np.savetxt('{}/{}'.format(self.FolderStamp, 'Acc {}'.format(run)), Accuracy)
plt.figure(figsize=(self.NSpecies,self.NSpecies))
sns.heatmap(AccuracyData, annot=True)
plt.savefig('{}/{}'.format(self.FolderStamp, 'Acc {}'.format(run) + '.png'))
Perf = []
for i in range(self.NSpecies):
Perf.append(Accuracy[i][i])
return sum(Perf)/self.NSpecies
def Run(self): #Running the single run multiple times.
Now = datetime.datetime.now()
FolderStamp = 'Run {}{}{} {}.{}.{}'.format(Now.year, Now.month, Now.day, Now.hour, Now.minute, Now.second) #Time stamp.
self.FolderStamp = FolderStamp
os.makedirs(self.FolderStamp)
parameters = 'WaveNumMin={}, WaveNumMax={}, WaveNumValues={}, ALSSSmoothness={}, ALSSAsymmetry={}, HiddenLayers={}, Nodes={}, Activation={}, LearningRate={}, Optimization={}, Loss={}, Epochs={}'.format(min(self.wavenum), max(self.wavenum), len(self.wavenum), self.smoothness, self.asymmetry, self.layers, self.nodes, self.act, self.lr, self.opt, self.loss, self.epochs)
p = open(self.FolderStamp + '/Parameters.txt', 'w+')
p.write(parameters)
p.close()
fp = FloatProgress(min=0,max=self.nruns)
display(fp)
print('Running network...')
PerfList = []
for i in range(self.nruns):
PerfList.append(self.SingleRun(i + 1))
fp.value += 1
np.savetxt(self.FolderStamp + '/Performance', PerfList)
return PerfList
#Parameters to bet set before running the code.
SampleFolder = 'Samples' #Folder name string of .csv-files.
WaveNum = np.linspace(1000, 1500, 500) #Numpy array of spectral data to be extracted.
Smoothness = 1e6 #Smoothness parameter of the ALSS.
Asymmetry = 0.001 #Asymmetry parameter of the ALSS.
NRuns = 5 #Number of training/evaluation iterations.
Layers = 4 #Number of hidden layers for the ANN.
Nodes = 32 #Number of nodes for the ANN.
Activation = 'relu' #Keras activation function string for input layer and hidden layers.
LearningRate = 1e-4 #Keras learning rate.
Optimization = Adam(lr=LearningRate) #Keras optimization algorithm string.
Loss = 'categorical_crossentropy' #Keras loss function string.
Epochs = 100 #Number of epochs.
#How to run code.
Pipeline = Classifier(SampleFolder, WaveNum, Smoothness, Asymmetry, NRuns, Layers, Nodes, Activation, LearningRate, Optimization, Loss, Epochs)
Pipeline.ProcessData()
Pipeline.Run()
| true
|
ade4c4056f78f0cb67f34fab26d996dffda73886
|
Python
|
slopey112/moomoo
|
/main.py
|
UTF-8
| 4,121
| 2.515625
| 3
|
[] |
no_license
|
from wrapper import Game
from model import Model
from color import get_heal
from time import sleep
from math import atan, pi
import datetime
import threading
directory = "/home/howardp/Documents/Code/moomoo"
g = Game("fatty", directory)
m = {
"tree": Model("tree_res_s", directory),
"food": Model("food", directory)
}
def explore(r):
stop_time = 1
axis = 0
pts = m[r].scan(str(g.screenshot()))
flag = False
while not pts:
segment = round(stop_time / 2)
for i in range(segment):
g.move(axis)
sleep(2)
g.stop()
pts = m[r].scan(str(g.screenshot()))
if pts:
flag = True
break
if flag:
break
stop_time *= 2
axis += -6 if axis == 6 else 2
pts = m[r].scan(str(g.screenshot()))
def auto(r):
# r = resource r = command[1]
screenshot_id = g.screenshot()
pts = m[r].scan(str(screenshot_id))
if len(pts) == 0:
return
# we don't want the point to be the upper left corner but to be in relative center
pt = (pts[0][0] + (m[r].w / 2), pts[0][1] + (m[r].h / 2))
origin = (g.width / 2, g.height / 2)
axis = get_axis(pt, origin)
r_initial = g.get_tree() if r == "tree" else (g.get_food() if r == "food" else g.get_stone())
resource = r_initial
g.move(axis)
g.set_axis(axis)
time = int(datetime.datetime.now().strftime("%s"))
while resource == r_initial and int(datetime.datetime.now().strftime("%s")) - time < 2:
resource = g.get_tree() if r == "tree" else (g.get_food() if r == "food" else g.get_stone())
g.stop()
def get_axis(pt, origin):
# We need to adjust pt such that the origin is not the top left corner but the center of the page
adj_pt = (pt[0] - origin[0], origin[1] - pt[1])
# Now we need to find what axial quadrant the point is located in (1..8)
# 360 / 8 = 45 deg per quadrant, shifted (45 / 2) deg down so the sector pads the radius
# First the quadrant:
if adj_pt[0] > 0:
if adj_pt[1] > 0:
quad = 1
else:
quad = 4
else:
if adj_pt[1] > 0:
quad = 2
else:
quad = 3
# atan will give us more than one possibility, first adjust to first quadrant
adj2_pt = (abs(adj_pt[0]), abs(adj_pt[1]))
theta = atan(adj2_pt[1] / adj2_pt[0]) * (180 / pi)
# Now adjust back to original quadrant
adj_theta = theta
if quad == 3:
adj_theta = 180 + theta
elif quad == 2 or quad == 4:
adj_theta = (quad * 90) - theta
# Now match to axial quadrant
if (adj_theta < 360 and adj_theta >= (360 - 22.5)) or (adj_theta > 0 and adj_theta < 22.5):
return 0
a = 22.5
b = a + 45
for i in range(7):
if adj_theta <= b and adj_theta > a:
return i + 1
a += 45
b += 45
def naive_algo():
def f():
while True:
if g.get_food() >= 10 and get_heal("{}/screenshots/{}.png".format(directory, str(g.screenshot()))):
print("Healing...")
g.heal()
def upgrade():
age_2 = False
age_3 = False
while True:
age = g.get_age()
if age == 2 and not age_2:
age_2 = True
g.upgrade("8")
elif age == 3 and not age_3:
age_3 = True
g.upgrade("17")
elif age == 4:
break
sleep(1)
t = threading.Thread(target=f)
t2 = threading.Thread(target=upgrade)
t.start()
t2.start()
food_init = g.get_food()
tree_init = g.get_tree()
while True:
food = g.get_food()
tree = g.get_tree()
if food < 500 and (food > food_init or tree > tree_init):
food_init = food
tree_init = tree
sleep(1)
continue
if food < 500 and food <= food_init:
food_init = g.get_food()
print("Exploring food")
explore("food")
print("Food found")
auto("food")
elif tree == tree_init:
tree_init = g.get_tree()
print("Exploring tree")
explore("tree")
print("Tree found")
auto("tree")
while True:
command = input().split()
if command[0] == "screenshot":
i = g.screenshot()
print(m.scan(str(i)))
elif command[0] == "move":
g.move(int(command[1]))
elif command[0] == "stop":
g.stop()
elif command[0] == "axis":
g.set_axis(int(command[1]))
elif command[0] == "close":
g.close()
break
elif command[0] =="heal":
g.heal()
elif command[0] == "auto":
auto(command[1])
elif command[0] == "explore":
explore(command[1])
elif command[0] == "algo":
naive_algo()
| true
|
d3dd659e8453627ca759df02d5222bd528ab98cb
|
Python
|
arules15/EECS4415Project2019
|
/app/steamproject.py
|
UTF-8
| 8,480
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/python
import numpy as np
import csv
from datetime import datetime
from pprint import pprint
import re
#import matplotlib.pyplot as plt
# plt.rcdefaults()
#import matplotlib.pyplot as plt
def wordOccurences(col, list, amount):
occur = dict()
# is is for the break counter, row is the value of the list which comes in a list of lists
for i, row in enumerate(list):
splitter = []
# games usually have more than one genre seprated by commas
splitter = row[col].split(',')
for word in splitter:
# sometimes a game has a '' value
if word not in '':
occur[word] = occur.get(word, 0) + 1
if i >= amount:
break
# sorts by the highest amount of occurences
occur = sorted(occur.items(), key=lambda x: x[1], reverse=True)
return occur
# Used for sorting total list of games by popularity of reviews
def sortSecond(val):
return val[1]
def sortDate(list):
# print(list)
list.sort(key=lambda x: datetime.strptime(x[10], '%b %d, %Y'))
return list
def getDeveloperGames(name, list, checkDate):
devList = []
for row in list:
if name == row[4]:
if checkDate:
if row[10] == 'NaN':
continue
elif re.sub(r"(\w+ \d{4})", '', row[10]) == '':
row[10] = re.sub(r"(\s)", ' 1, ', row[10])
devList.append(row)
else:
devList.append(row)
else:
devList.append(row)
return devList
def gameDateDevCorr(name, list):
simpleList = []
devList = getDeveloperGames(name, list, True)
sortedDevList = sortDate(devList)
# sortedDevList = devList.sort(key=lambda date: datetime.strptime(date, "%b %d, %y"))
for i in sortedDevList:
# simpleList.append([[i[0], [[i[1], i[2]], i[3]]], [[i[4], i[5]], i[10]]])
simpleList.append([i[0], i[1], i[2], i[10]])
# return sortedDevList
return simpleList
# def getGames(list):
# gameList = []
# for row in list:
# gameList.append(row[0])
# return gameList
# Choose which rows to filter from the whole list
# 0 = game name, 1 = Review Count, 2 = Percentage of Review Count, 3 = Recent Review List, 4 = Developers
# 5 = Publisher, 6 = Game Tags, 7 = Game Details, 8 = Genre, 9 = Price, 10 = Release Date
# Example getRows(list, 0, 1, 9) will give you the game name, review count and price
def getRows(list, *row):
newList = []
rowVal = []
for i in list:
for x in row:
rowVal.append(i[int(x)])
newList.append(rowVal)
rowVal = []
return newList
def main(var):
gamesReviews = []
with open('steam_games.csv') as csv_file:
# with open ('steam_games.csv',encoding="utf-8") as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
for i, row in enumerate(csv_reader):
# if i > 10:
# break
# row names: url, types, name, desc_snippet, recent_reviews, all_reviews,
# release_date, developer, publisher, popular_tags, game_details,
# languages, achievements, genre, game_description, mature_content,
# minimum_requirements, recommended_requirements, original_price,
# discount_price(wrong?)
# Reviews come in the form of
# 'Mostly Positive,(7,030),- 71% of the 7,030 user reviews for...'
# To get the total we have use two splitters as the total might
# contain a comma
if row['types'] != 'app':
continue
if 'Need more user reviews to generate a score' in row['all_reviews']:
continue
if row['all_reviews']:
splitterReview1 = row['all_reviews'].split(',(')
splitterReview2 = splitterReview1[1].split(')')
reviewCount = int(splitterReview2[0].replace(',', ''))
allReviews, extra1 = row['all_reviews'].split('- ')
percentageReview = extra1.split(' ')
percentage = percentageReview[0]
all_review_list = [reviewCount, percentage]
if row['recent_reviews']:
splitterReview1_recent = row['recent_reviews'].split(',(')
splitterReview2_recent = splitterReview1_recent[1].split(')')
reviewCount_recent = int(
splitterReview2_recent[0].replace(',', ''))
re_reviews, extra1_recent = row['recent_reviews'].split('- ')
percentageReview_recent = extra1_recent.split(' ')
percentage_recent = percentageReview_recent[0]
recent_review_list = [reviewCount_recent, percentage_recent]
if row['original_price']:
row['original_price'].lower()
if "$" in row['original_price']:
Price = row['original_price'].replace('$', '')
amount = float(Price)
row['original_price'] = amount
else:
row['original_price'] = row['original_price'].replace(
row['original_price'], 'Free')
if 'Downloadable Content' in row['game_details']:
continue
if var == 1:
if str(row['original_price']) in 'Free':
continue
if var == 2:
if str(row['original_price']) not in 'Free':
continue
gamesReviews.append([row['name'], reviewCount, percentage, recent_review_list,
row['developer'], row['publisher'], row['popular_tags'],
row['game_details'], row['genre'], row['original_price'],
row['release_date']])
gamesReviews.sort(key=sortSecond, reverse=True)
if var == 1: # paid games
return gamesReviews
elif var == 2: # free games
return gamesReviews
else:
return gamesReviews
# gamesReviews = sorted(gamesReviews.items(), key=lambda x: x[1], reverse=True)
# popList = []
popList = main(0)
popListPaid = main(1)
popListFree = main(2)
# print("List of Valve Games ordered from Release Date")
# pprint (gameDateDevCorr('Valve', popList))
# print("List of Bluehole, Inc. Games ordered from Release Date")
# pprint (gameDateDevCorr('Bluehole, Inc.', popList))
genreTotal = wordOccurences(8, popList, 100)
# print("TOP 100 games genre paid and free")
# print(genreTotal)
# print('\n')
# y_pos = np.arange(len(genreTotal))
objects = list(genreTotal)
objects, performance = zip(*objects)
# plt.bar(range(len(genreTotal)), list(genreTotal.value()), align='center', alpha=0.8)
# plt.xticks(range(len(genreTotal)), list(genreTotal.key()))
y_pos = np.arange(len(objects))
#plt.bar(y_pos, performance, align='center', alpha=0.5)
#plt.xticks(y_pos, objects)
#plt.ylabel('positive review in percentage')
#plt.title('DevloperGames rating')
# plt.show()
# genreTotalPaid = wordOccurences(8, popListPaid, 100)
# print("TOP 100 games genre paid")
# print(genreTotalPaid)
# print('\n')
# genreTotalFree = wordOccurences(8, popListFree, 100)
# print("TOP 100 games genre free")
# print(genreTotalFree)
# print('\n')
# detailsTotal = wordOccurences(7, popList, 100)
# print("TOP 100 games details paid and free")
# print(detailsTotal)
# print('\n')
# detailsTotalPaid = wordOccurences(7, popListPaid, 100)
# print("TOP 100 games details paid")
# print(detailsTotalPaid)
# print('\n')
# detailsTotalFree = wordOccurences(7, popListFree, 100)
# print("TOP 100 games details free")
# print(detailsTotalFree)
# print('\n')
# tagsTotal = wordOccurences(6, popList, 100)
# print("TOP 100 games tags paid and free")
# print(tagsTotal)
# print('\n')
# tagsTotalPaid = wordOccurences(6, popListPaid, 100)
# print("TOP 100 games tags paid")
# print(tagsTotalPaid)
# print('\n')
# tagsTotalFree = wordOccurences(6, popListFree, 100)
# print("TOP 100 games tags free")
# print(tagsTotalFree)
# print('\n')
# print('\n')
# print(getRows(popList, 0, 1))
# print("Top games Paid/Free")
# for i,x in enumerate(getGames(popList)):
# print(x)
# if i >= 9:
# print('\n')
# break
# print('\n')
# print("Testing rows")
# for i,x in enumerate(getRows(popList, 0, 1, 9)):
# print(x)
# if i >= 9:
# print('\n')
# break
| true
|
33c8c1e147b274f6e485ded0ee919f25db5a5165
|
Python
|
frvnkly/algorithm-practice
|
/leetcode/may-2020-challenge/day4/number_complement.py
|
UTF-8
| 1,867
| 4.125
| 4
|
[] |
no_license
|
# Given a positive integer, output its complement number. The complement strategy is to flip the bits of its binary representation.
# Example 1:
# Input: 5
# Output: 2
# Explanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.
# Example 2:
# Input: 1
# Output: 0
# Explanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.
# Note:
# The given integer is guaranteed to fit within the range of a 32-bit signed integer.
# You could assume no leading zero bit in the integer’s binary representation.
# This question is the same as 1009: https://leetcode.com/problems/complement-of-base-10-integer/
class Solution:
def to_binary(self, n: int) -> str:
p = 32
out = list()
while p >= 0:
x = 2**p
if x <= n:
out.append('1')
n -= x
elif len(out) > 0:
out.append('0')
p -= 1
return ''.join(out)
def find_binary_complement(self, binary: str) -> str:
binary_complement = list()
for c in binary:
if c == '1':
binary_complement.append('0')
else:
binary_complement.append('1')
return ''.join(binary_complement)
def to_base_ten(self, binary: str) -> int:
out = 0
p = 0
for i in range(len(binary) - 1, -1, -1):
if binary[i] == '1':
out += 2**p
p += 1
return out
def findComplement(self, num: int) -> int:
binary_num = self.to_binary(num)
binary_complement = self.find_binary_complement(binary_num)
complement = self.to_base_ten(binary_complement)
return complement
| true
|
171bbee08930484cee0bbe9a1789f5731498cf09
|
Python
|
RyuZacki/PythonStudent
|
/PyGame/PyGameTest.py
|
UTF-8
| 291
| 2.609375
| 3
|
[] |
no_license
|
import pygame
pygame.init()
screen = pygame.display.set_mode((468, 60)) # Настройка графического режима дисплея
pygame.display.set_caption('Monkey Fever') # Заголовок окна
pygame.mouse.set_visible(0) # Выключаем курсор мыши
| true
|
cadc4bd618cf4cfb6e75aa142d17c9da3fbcd6e9
|
Python
|
masakiaota/kyoupuro
|
/practice/green_diff/dwango2015_prelims_2/dwango2015_prelims_2.py
|
UTF-8
| 1,084
| 3.453125
| 3
|
[] |
no_license
|
# https://atcoder.jp/contests/dwango2015-prelims/tasks/dwango2015_prelims_2
# 25の部分を1文字に置換して連長圧縮
# 連長部分について(n+1)C2が答えかな
def run_length_encoding(s):
'''
連長圧縮を行う
s ... iterable object e.g. list, str
return
----------
s_composed,s_num,s_idx
それぞれ、圧縮後の文字列、その文字数、その文字が始まるidx
'''
s_composed = []
s_sum = []
s_idx = [0]
pre = s[0]
cnt = 1
for i, ss in enumerate(s[1:], start=1):
if pre == ss:
cnt += 1
else:
s_sum.append(cnt)
s_composed.append(pre)
s_idx.append(i)
cnt = 1
pre = ss
s_sum.append(cnt)
s_composed.append(pre)
# assert len(s_sum) == len(s_composed)
return s_composed, s_sum, s_idx
S = input()
S = S.replace('25', 'x')
S_comp, S_num, S_idx = run_length_encoding(S)
ans = 0
for s, n in zip(S_comp, S_num):
if s == 'x':
ans += (n + 1) * (n) // 2
print(ans)
# print(S_comp, S_num)
| true
|
be0075bf9420f40c7d6b2499beff53e0a82e2d10
|
Python
|
nakmuayFarang/tf_objectDetection_Script
|
/1-preprocessing/1-createTestTrain.py
|
UTF-8
| 1,303
| 2.953125
| 3
|
[] |
no_license
|
"""Create training and test sample
80% de train, 20% de test.
This script create 2 text files contening the name of the file
"""
import os
import random
import sys
import json
pathScript = str(os.path.dirname(os.path.abspath(__file__))) + '/'
os.chdir(pathScript)
param = '../' + 'param.json'
if not os.path.isfile(param):
with open(param,'w') as jsn:
jsn.write('{"pathData" : ""\n,"pathX" :"" \n,"pathAnnotation" : ""\n}')
assert False, "Fill param.json"
with open(param) as jsn:
jsn = json.load(jsn)
pathData = jsn["pathData"]
pathAnnotation = jsn['pathAnnotation'] + '{}'
assert os.path.exists(pathData),' "pathData": "{}" is not a valid path'.format(pathData)
assert os.path.exists(pathAnnotation), ' "pathAnnotation": "{}" is not a valid path'.format(pathAnnotation)
files = os.listdir(pathData)
files = list( map(lambda s: s.split('.')[0],files ) )#no file extension
random.shuffle(files)
ntrain = int( round(80 * len(files)/100,0))
train = files[0:ntrain]
test = files[ntrain:]
with open( pathAnnotation.format("train.txt"),'w') as t:
for x in train:
t.write(x + '\n')
print("train.txt created")
with open(pathAnnotation.format("test.txt"),'w') as t:
for x in test:
t.write(x + '\n')
print("test.txt created")
| true
|
423172b1d3339f63b8bb18be6dbf71d75f904653
|
Python
|
deepanshusachdeva5/Histogram-Equalization
|
/normal_equalizer.py
|
UTF-8
| 994
| 2.6875
| 3
|
[] |
no_license
|
import cv2
import argparse
import numpy as np
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help='image to be prcoessed')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
equalized_image = cv2.equalizeHist(gray)
hist_original = cv2.calcHist([gray], [0], None, [256], [0, 256])
plt.figure()
plt.title('Original image Histogram')
plt.plot(hist_original)
hist_equalized = cv2.calcHist([equalized_image], [0], None, [256], [0, 256])
plt.figure()
plt.title('Eqalized image image Histogram')
plt.plot(hist_equalized)
cv2.imshow("original gray", gray)
cv2.imshow("Equalized image", equalized_image)
equalized_image = cv2.resize(equalized_image, (300, 300))
gray = cv2.resize(gray, (300, 300))
cv2.imwrite("Equalized image.jpg", equalized_image)
cv2.imwrite("gray_original.jpg", gray)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
bc0bc3e49cd69c88ad89f927d4a1811a814f4cbb
|
Python
|
priyatam0509/Automation-Testing
|
/app/features/network/core/fuel_disc_config.py
|
UTF-8
| 2,002
| 2.6875
| 3
|
[] |
no_license
|
from app import mws, system, Navi
import logging
class FuelDiscountConfiguration:
"""
Core fuel discount config feature class. Supports Concord network. To be extended and overridden for other networks
where needed.
"""
def __init__(self):
self.log = logging.getLogger()
self.navigate_to()
@staticmethod
def navigate_to():
return Navi.navigate_to("Fuel Discount Configuration")
def change(self, config):
"""
Changes the configuration of the Fuel Discount Configuration.
Args:
config: The dictionary of values being added.
Returns:
True: If the values were successfully set.
False: If the values could not be changed.
Examples:
\code
fd_info = {
"Gulf": "NONE",
"JCB": "NONE"
}
fd = fuel_disc_config.FuelDiscount()
if not fd.change(fd_info):
mws.recover()
tc_fail("Could not set the configuration")
True
\endcode
"""
#Select the card we're configuring
for card in config:
if not mws.set_value("Cards", card):
self.log.error(f"Failed to find card, {card}, in the list")
system.takescreenshot()
return False
if not mws.set_value("Discount Group", config[card]):
self.log.error(f"Could not set Discount Group with {config[card]}")
system.takescreenshot()
return False
try:
mws.click_toolbar("Save", main = True)
return True
except:
self.log.error("Failed to navigate back to Splash Screen")
error_msg = mws.get_top_bar_text()
if error_msg is not None:
self.log.error(f"Error message: {error_msg}")
system.takescreenshot()
return False
| true
|
37d46c03ae3368ac1b7f96b6b023e914155ae010
|
Python
|
trwn/psych_punks
|
/metadata.py
|
UTF-8
| 492
| 2.640625
| 3
|
[] |
no_license
|
import csv
import os
import json
dirname = os.path.dirname(__file__)
csv_file = open(dirname + '/data/punks.csv','r')
csv_reader = csv.DictReader(csv_file,fieldnames = ('Name:','Background:', 'Type:', 'Mouth:', 'Accessory:', 'Eyes:', 'Hair:', 'Beard:', 'Psych DNA:'))
lcount = 0
for row in csv_reader:
out = json.dumps(row, indent=4)
jsonoutput = open(dirname + '/fin/PsychPunk'+str(lcount)+'.json','w')
jsonoutput.write(out)
lcount+=1
jsonoutput.close()
csv_file.close()
| true
|
462e29ff5c330c8f58700a125680ab57264d2142
|
Python
|
herohunfer/leet
|
/1042.py
|
UTF-8
| 813
| 2.84375
| 3
|
[] |
no_license
|
class Solution:
def gardenNoAdj(self, N: int, paths: List[List[int]]) -> List[int]:
m = {}
for p in paths:
i = min(p[0], p[1])
j = max(p[0], p[1])
if j-1 in m:
m[j-1].append(i-1)
else:
m[j-1] = [i-1]
return self.dfs(N, [], m)
def dfs(self, N, result, m):
if len(result) == N:
return result
avaiable = set([1,2,3,4])
current = len(result)
if current in m:
for i in m[current]:
avaiable.discard(result[i])
for i in avaiable:
result.append(i)
self.dfs(N, result, m)
if len(result) == N:
return result
result.pop()
return result
| true
|
a32dea39ad7f8a473b418775b98e9de5dd77bb1f
|
Python
|
masopust/TicTacToe
|
/Tester/testSquare.py
|
UTF-8
| 2,504
| 2.90625
| 3
|
[] |
no_license
|
import unittest
try:
from TicTacToe import square
from TicTacToe import field as playingField
from TicTacToe import endValidator
from TicTacToe import clickManager
except ModuleNotFoundError:
import square
import field as playingField
import endValidator
import clickManager
class SquareTester(unittest.TestCase):
def test_incorrect_creation(self):
input_x = "wrong input"
input_y = 1
input_padding = 30
self.assertRaises(ValueError, square.Square, input_x, input_y, input_padding)
self.assertRaises(ValueError, square.Square, input_y, input_x, input_padding)
self.assertRaises(ValueError, square.Square, input_x, input_y, input_x)
def test_correct_creation(self):
input_x = 1
input_y = 1
input_padding = 30
square.Square(input_x, input_y, input_padding)
self.assertTrue("Class creation successful!")
def test_correct_inside_click(self):
input_x = 1
input_y = 1
sq = square.Square(0, 0, 2)
self.assertEqual(True, sq.click_inside(input_x, input_y))
self.assertEqual(True, sq.click_inside(input_y, input_x))
def test_correct_outside_click(self):
input_x = 100
input_y = 1
sq = square.Square(0, 0, 2)
self.assertEqual(False, sq.click_inside(input_x, input_y))
self.assertEqual(False, sq.click_inside(input_y, input_x))
def test_incorrect_click(self):
input_x = "wrong_input"
input_y = 1
sq = square.Square(0, 0, 2)
self.assertRaises(TypeError, sq.click_inside, input_x, input_y)
self.assertRaises(TypeError, sq.click_inside, input_y, input_x)
def test_correct_click_creation(self):
input_field = playingField.Field()
input_validator = endValidator.EndValidator(input_field)
clickManager.ClickManager(input_field, input_validator)
self.assertTrue("Class creation successful!")
def test_incorrect_switch(self):
input_x = "wrong_input"
input_y = 1
input_field = playingField.Field()
input_validator = endValidator.EndValidator(input_field)
cm = clickManager.ClickManager(input_field, input_validator)
self.assertRaises(TypeError, cm.switch_turns, input_x, input_y)
self.assertRaises(TypeError, cm.switch_turns, input_y, input_x)
if __name__ == "__main__":
unittest.main()
| true
|
3412b6a972ab225295fb2ea1c72c381a535504d1
|
Python
|
WonkySpecs/link-prediction
|
/AUC_measures.py
|
UTF-8
| 12,601
| 2.671875
| 3
|
[] |
no_license
|
import random
import math
import networkx as nx
import numpy as np
#For indices whose scores can be determined with matrix calculations, it is viable to
#find the scores of all edges.
def mat_AUC_score(score_mat, test_edges, non_edges, nodelist):
total = 0
for i in range(len(non_edges)):
missing_edge = test_edges[i]
non_edge = non_edges[i]
non_edge_score = score_mat[nodelist.index(non_edge[0]), nodelist.index(non_edge[1])]
missing_edge_score = score_mat[nodelist.index(missing_edge[0]), nodelist.index(missing_edge[1])]
if missing_edge_score > non_edge_score:
total += 1
elif missing_edge_score == non_edge_score:
total += 0.5
return total / float(len(non_edges))
#These indices require more processing than just looking up matrix elements
def extra_mat_AUC_score(cn_mat, train_graph, test_edges, nodelist, non_edges, index):
total = 0
for non_edge, missing_edge in zip(non_edges, test_edges):
u_non = nodelist.index(non_edge[0])
v_non = nodelist.index(non_edge[1])
u_miss = nodelist.index(missing_edge[0])
v_miss = nodelist.index(missing_edge[1])
with np.errstate(all = "raise"):
if index == "jaccard":
non_edge_denom = len(set(train_graph[non_edge[0]]) | set(train_graph[non_edge[1]]))
missing_edge_denom = len(set(train_graph[missing_edge[0]]) | set(train_graph[missing_edge[1]]))
elif index == "lhn1":
non_edge_denom = len(train_graph[non_edge[0]]) * len((train_graph[non_edge[1]]))
missing_edge_denom = len(train_graph[missing_edge[0]]) * len((train_graph[missing_edge[1]]))
elif index == "salton":
non_edge_denom = math.sqrt(len(train_graph[non_edge[0]]) * len((train_graph[non_edge[1]])))
missing_edge_denom = math.sqrt(len(train_graph[missing_edge[0]]) * len((train_graph[missing_edge[1]])))
elif index == "sorensen":
non_edge_denom = 0.5 * (len(train_graph[non_edge[0]]) + len((train_graph[non_edge[1]])))
missing_edge_denom = 0.5 * (len(train_graph[missing_edge[0]]) + len((train_graph[missing_edge[1]])))
elif index == "hpi":
non_edge_denom = min(len(train_graph[non_edge[0]]), len((train_graph[non_edge[1]])))
missing_edge_denom = min(len(train_graph[missing_edge[0]]), len((train_graph[missing_edge[1]])))
elif index == "hdi":
non_edge_denom = max(len(train_graph[non_edge[0]]), len((train_graph[non_edge[1]])))
missing_edge_denom = max(len(train_graph[missing_edge[0]]), len((train_graph[missing_edge[1]])))
else:
raise ParameterError("{} is not a valid index for extra_mat_AUC_score()".format(index))
if non_edge_denom > 0:
non_edge_score = cn_mat[u_non, v_non] / non_edge_denom
else:
non_edge_score = 0
if missing_edge_denom > 0:
missing_edge_score = cn_mat[u_miss, v_miss] / missing_edge_denom
else:
missing_edge_score = 0
if missing_edge_score > non_edge_score:
total += 1
elif missing_edge_score == non_edge_score:
total += 0.5
return total / float(len(non_edges))
def pa_AUC_score(train_graph, test_edges, non_edges):
total = 0
for non_edge, missing_edge in zip(non_edges, test_edges):
non_edge_score = len(train_graph[non_edge[0]]) * len(train_graph[non_edge[1]])
missing_edge_score = len(train_graph[missing_edge[0]]) * len(train_graph[missing_edge[1]])
if missing_edge_score > non_edge_score:
total += 1
elif missing_edge_score == non_edge_score:
total += 0.5
return total / float(len(non_edges))
def aa_ra_AUC_score(train_graph, test_edges, non_edges, index, parameter = None):
total = 0
for non_edge, missing_edge in zip(non_edges, test_edges):
if index == "aa":
try:
non_edge_score = sum([1 / math.log(len(train_graph[n])) for n in nx.common_neighbors(train_graph, non_edge[0], non_edge[1])])
except ZeroDivisionError:
non_edge_score = 0
try:
missing_edge_score = sum([1 / math.log(len(train_graph[n])) for n in nx.common_neighbors(train_graph, missing_edge[0], missing_edge[1])])
except ZeroDivisionError:
missing_edge_score = 0
elif index == "ra":
try:
non_edge_score = sum([1 / len(train_graph[n]) for n in nx.common_neighbors(train_graph, non_edge[0], non_edge[1])])
except ZeroDivisionError:
non_edge_score = 0
try:
missing_edge_score = sum([1 / len(train_graph[n]) for n in nx.common_neighbors(train_graph, missing_edge[0], missing_edge[1])])
except ZeroDivisionError:
missing_edge_score = 0
#Resource Allocation extended
#Similarity score between 2 nodes is RA + a small contribution from nodes on length 3 paths between the endpoints
elif index == "ra_e":
non_edge_cn = nx.common_neighbors(train_graph, non_edge[0], non_edge[1])
path_3_nodes = set()
#Get all nodes that are a neighbour of exactly 1 end point
non_edge_other_neighbours_0 = set(train_graph[non_edge[0]]) - set(non_edge_cn)
non_edge_other_neighbours_1 = set(train_graph[non_edge[1]]) - set(non_edge_cn)
#Find all nodes on length 3 paths between the endpoints
for neighbour in non_edge_other_neighbours_0:
#If these nodes have neighbours that are neighbours of the other endpoint, they are on a path of length 3
if set(train_graph[neighbour]) & (non_edge_other_neighbours_1 | set(non_edge_cn)):
path_3_nodes.add(neighbour)
for neighbour in non_edge_other_neighbours_1:
if set(train_graph[neighbour]) & (non_edge_other_neighbours_0 | set(non_edge_cn)):
path_3_nodes.add(neighbour)
non_edge_score = 0
try:
non_edge_score = sum([1 / len(train_graph[n]) for n in non_edge_cn])
except ZeroDivisionError:
pass
try:
non_edge_score += parameter * sum([1 / len(train_graph[n]) for n in path_3_nodes])
except ZeroDivisionError:
pass
#Repeat for missing edge
missing_edge_cn = nx.common_neighbors(train_graph, missing_edge[0], missing_edge[1])
path_3_nodes = set()
#Get all nodes that are a neighbour of exactly 1 end point
missing_edge_other_neighbours_0 = set(train_graph[missing_edge[0]]) - set(missing_edge_cn)
missing_edge_other_neighbours_1 = set(train_graph[missing_edge[1]]) - set(missing_edge_cn)
for neighbour in missing_edge_other_neighbours_0:
#If these nodes have neighbours that are neighbours of the other endpoint, they are on a path of length 3
if set(train_graph[neighbour]) & (missing_edge_other_neighbours_1 | set(missing_edge_cn)):
path_3_nodes.add(neighbour)
for neighbour in missing_edge_other_neighbours_1:
if set(train_graph[neighbour]) & (missing_edge_other_neighbours_0 | set(missing_edge_cn)):
path_3_nodes.add(neighbour)
missing_edge_score = 0
try:
missing_edge_score = sum([1 / len(train_graph[n]) for n in missing_edge_cn])
except ZeroDivisionError:
pass
try:
missing_edge_score += parameter * sum([1 / len(train_graph[n]) for n in path_3_nodes])
except ZeroDivisionError:
pass
#Very similar to ra_e but takes into account the number of paths each node is on
elif index == "ra_e2":
non_edge_cn = nx.common_neighbors(train_graph, non_edge[0], non_edge[1])
#Get all nodes that are a neighbour of exactly 1 end point
non_edge_other_neighbours_0 = set(train_graph[non_edge[0]]) - set(non_edge_cn)
non_edge_other_neighbours_1 = set(train_graph[non_edge[1]]) - set(non_edge_cn)
non_edge_score = 0
try:
non_edge_score = sum([1 / len(train_graph[n]) for n in non_edge_cn])
except ZeroDivisionError:
pass
for neighbour in non_edge_other_neighbours_0:
#If these nodes have neighbours that are neighbours of the other endpoint, they are on a path of length 3
try:
non_edge_score += (parameter * len(set(train_graph[neighbour]) & (non_edge_other_neighbours_1 | set(non_edge_cn)))) / len(train_graph[neighbour])
except ZeroDivisionError:
pass
for neighbour in non_edge_other_neighbours_1:
try:
non_edge_score += (parameter * len(set(train_graph[neighbour]) & (non_edge_other_neighbours_0 | set(non_edge_cn)))) / len(train_graph[neighbour])
except ZeroDivisionError:
pass
#Repeat for missing edge
missing_edge_cn = nx.common_neighbors(train_graph, missing_edge[0], missing_edge[1])
#Get all nodes that are a neighbour of exactly 1 end point
missing_edge_other_neighbours_0 = set(train_graph[missing_edge[0]]) - set(missing_edge_cn)
missing_edge_other_neighbours_1 = set(train_graph[missing_edge[1]]) - set(missing_edge_cn)
missing_edge_score = 0
try:
missing_edge_score = sum([1 / len(train_graph[n]) for n in missing_edge_cn])
except ZeroDivisionError:
pass
for neighbour in missing_edge_other_neighbours_0:
#If these nodes have neighbours that are neighbours of the other endpoint, they are on a path of length 3
try:
missing_edge_score += (parameter * len(set(train_graph[neighbour]) & (missing_edge_other_neighbours_1 | set(missing_edge_cn)))) / len(train_graph[neighbour])
except ZeroDivisionError:
pass
for neighbour in missing_edge_other_neighbours_1:
try:
missing_edge_score += (parameter * len(set(train_graph[neighbour]) & (missing_edge_other_neighbours_0 | set(missing_edge_cn)))) / len(train_graph[neighbour])
except ZeroDivisionError:
pass
if missing_edge_score > non_edge_score:
total += 1
elif missing_edge_score == non_edge_score:
total += 0.5
return total / float(len(non_edges))
def experimental_AUC_score(train_graph, test_edges, nodelist, lp_mat, non_edges, index):
total = 0
for non_edge, missing_edge in zip(non_edges, test_edges):
u_non = nodelist.index(non_edge[0])
v_non = nodelist.index(non_edge[1])
u_miss = nodelist.index(missing_edge[0])
v_miss = nodelist.index(missing_edge[1])
with np.errstate(all = "raise"):
if index == "lhn1_e":
non_edge_denom = len(train_graph[non_edge[0]]) * len((train_graph[non_edge[1]]))
missing_edge_denom = len(train_graph[missing_edge[0]]) * len((train_graph[missing_edge[1]]))
elif index == "salton_e":
non_edge_denom = math.sqrt(len(train_graph[non_edge[0]]) * len((train_graph[non_edge[1]])))
missing_edge_denom = math.sqrt(len(train_graph[missing_edge[0]]) * len((train_graph[missing_edge[1]])))
elif index == "hpi_e":
non_edge_denom = min(len(train_graph[non_edge[0]]), len((train_graph[non_edge[1]])))
missing_edge_denom = min(len(train_graph[missing_edge[0]]), len((train_graph[missing_edge[1]])))
elif index == "hdi_e":
non_edge_denom = max(len(train_graph[non_edge[0]]), len((train_graph[non_edge[1]])))
missing_edge_denom = max(len(train_graph[missing_edge[0]]), len((train_graph[missing_edge[1]])))
else:
raise ParameterError("{} is not a valid index for extra_mat_AUC_score()".format(index))
if non_edge_denom > 0:
non_edge_score = lp_mat[u_non, v_non] / non_edge_denom
else:
non_edge_score = 0
if missing_edge_denom > 0:
missing_edge_score = lp_mat[u_miss, v_miss] / missing_edge_denom
else:
missing_edge_score = 0
if missing_edge_score > non_edge_score:
total += 1
elif missing_edge_score == non_edge_score:
total += 0.5
return total / float(len(non_edges))
def rw_AUC_score(train_graph, test_edges, non_edges, index):
total = 0
a_mat = nx.adjacency_matrix(train_graph)
row_sums = a_mat.sum(axis = 1)
#If a node has become an isolate during k-fold, row sum will be 0 which causes an division error
with np.errstate(invalid = "ignore"):
transition_matrix = a_mat / row_sums
#Division errors put nan into matrix, replace nans with 0 (no chance of transition)
transition_matrix = np.nan_to_num(transition_matrix)
transition_matrix = np.transpose(transition_matrix)
score_mat = np.eye((transition_matrix.shape[0]))
max_diff = 1
count = 0
print(train_graph[train_graph.nodes()[750]])
print(train_graph['92'])
print(train_graph['639'])
while max_diff > 0.01:
old_mat = score_mat
score_mat = np.dot(transition_matrix, score_mat)
diff_mat = abs(old_mat - score_mat)
max_diff = np.amax(diff_mat)
i, j = np.unravel_index(diff_mat.argmax(), diff_mat.shape)
print(score_mat[i, j])
count += 1
nodelist = list(train_graph.nodes())
for non_edge, missing_edge in zip(non_edges, test_edges):
u_non = nodelist.index(non_edge[0])
v_non = nodelist.index(non_edge[1])
u_miss = nodelist.index(missing_edge[0])
v_miss = nodelist.index(missing_edge[1])
s_non = score_mat[u_non, v_non] + score_mat[v_non, u_non]
s_miss = score_mat[u_miss, v_miss] + score_mat[v_miss, u_miss]
if s_miss > s_non:
total += 1
elif s_miss == s_non:
total += 0.5
if index == "rw":
pass
elif index == "rwr":
pass
return total / float(len(non_edges))
| true
|
d2f8751575f106b26dbf5b3a90dfd649874efff7
|
Python
|
carlos-novak/kant
|
/kant/events/serializers.py
|
UTF-8
| 552
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
from json import JSONEncoder, JSONDecoder
from .models import EventModel
class EventModelEncoder(JSONEncoder):
"""
A class serializer for EventModel to be converted to json
>>> found_added = FoundAdded(amount=25.5)
>>> isinstance(found_added, EventModel)
True
>>> json.dumps(found_added, cls=EventModelEncoder)
'{"$version": 0, "amount": 25.5, "$type": "FoundAdded"}'
"""
def default(self, obj):
if isinstance(obj, EventModel):
return obj.decode()
return JSONEncoder.default(self, obj)
| true
|
47caa94b5b665cc31fc4e5490e4b3b8729686efa
|
Python
|
vishalb007/Assignment15
|
/Assignment15.py
|
UTF-8
| 565
| 2.984375
| 3
|
[] |
no_license
|
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import datasets
def wine_classifier():
wine_data=datasets.load_wine()
xtrain,xtest,ytrain,ytest=train_test_split(wine_data.data,wine_data.target,test_size=0.3)
model=KNeighborsClassifier(n_neighbors=3)
model.fit(xtrain,ytrain)
predict=model.predict(xtest)
print("Accuracy is : ",accuracy_score(ytest,predict))
def main():
wine_classifier()
if __name__=="__main__":
main()
| true
|
fb72ddbf6d13377e193229eeb601991545f35baa
|
Python
|
guenthermi/dwtc-geo-parser
|
/coverageScores.py
|
UTF-8
| 1,768
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import ujson as json
import sys
import copy
# make sure that there is no cycle in the graph!!
MAX_ITERATION = 1000 # maximal number of nodes (to prevent infinite loops)
class CoverageTree:
def __init__(self, config):
f = open(config, 'r')
data = json.loads(''.join(f.readlines()))
self.origin = self._load_tree(data["0"], data)
self.node_lookup = self._create_lookup()
def _load_tree(self, node, data):
count = 0
result = dict()
for key in node:
if key == "successors":
succs = []
for id in node["successors"]:
count += 1
if count < MAX_ITERATION:
succs.append(self._load_tree(data[id], data))
else:
print('ERROR: Maximal number of nodes reached. Either '
+ 'your graph has cycles or there are simply to '
+ 'much nodes', file=sys.stderr)
result["successors"] = succs
else:
result[key] = node[key]
return result
def _create_lookup(self):
result = dict()
paths = [[copy.deepcopy(self.origin)]]
found = True
while found:
found = False
new_paths = []
for path in paths:
if path[-1]['successors']:
for succ in path[-1]['successors']:
new_paths.append(path + [succ])
found = True
else:
new_paths.append(path)
paths = new_paths
for path in paths:
for entry in path:
if 'successors' in entry:
del entry['successors']
result[path[-1]['name']] = path
return result
def get_origin(self):
return self.origin
def get_lookup(self):
return self.node_lookup
def main(argc, argv):
if argc > 1:
tree = CoverageTree(argv[1])
lookup = tree.get_lookup()
for key in lookup:
print(key, lookup[key])
else:
print('config file missing')
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
| true
|
b3f8ea56bd975bde4f3a8e1abf035c65f4d0143f
|
Python
|
monpeco/python-tribble
|
/so-documentation/mutable-object-02.py
|
UTF-8
| 261
| 4.28125
| 4
|
[] |
no_license
|
x = y = [7, 8, 9] # x and y refer to the same list i.e. refer to same memory location
x[0] = 13 # now we are replacing first element of x with 13 (memory location for x unchanged)
print(x)
print(y) # this time y changed!
# Out: [13, 8, 9]
| true
|
112d7520299ef8680b35fef41bb58993df5809b0
|
Python
|
qwertyuiop6/Python-tools
|
/simple_crawl/douban.py
|
UTF-8
| 394
| 2.640625
| 3
|
[] |
no_license
|
import requests
url='https://movie.douban.com/j/new_search_subjects?sort=T&range=0,10&tags=&start=0'
def geturl(mvtype='科幻'):
mvurl=[]
web_data = requests.get(url+'&genres='+mvtype).json()
data=web_data.get('data')
print(data)
for item in data:
mvurl.append(item.get('url'))
print(mvurl)
return mvurl
if __name__ == '__main__':
geturl()
| true
|
70a52247b5caba772d4ea48a7d945d5b40884de2
|
Python
|
michal93cz/calculator-python
|
/main.py
|
UTF-8
| 269
| 2.859375
| 3
|
[] |
no_license
|
from add import Add
from subtract import Subtract
from multiple import Multiple
from divide import Divide
operation1 = Add()
operation2 = Subtract(operation1)
operation3 = Divide(operation2)
operation4 = Multiple(operation3)
print(operation4.handle_request("2 + 3"))
| true
|
13abe431c0ae00b8366f6519a624a539510bd256
|
Python
|
perovai/deepkoopman
|
/aiphysim/models/unet.py
|
UTF-8
| 10,037
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class ResBlock3D(nn.Module):
"""3D convolutional Residue Block. Maintains same resolution."""
def __init__(self, in_channels, neck_channels, out_channels, final_relu=True):
"""Initialization.
Args:
in_channels: int, number of input channels.
neck_channels: int, number of channels in bottleneck layer.
out_channels: int, number of output channels.
final_relu: bool, add relu to the last layer.
"""
super(ResBlock3D, self).__init__()
self.in_channels = in_channels
self.neck_channels = neck_channels
self.out_channels = out_channels
self.conv1 = nn.Conv3d(in_channels, neck_channels, kernel_size=1, stride=1)
self.conv2 = nn.Conv3d(
neck_channels, neck_channels, kernel_size=3, stride=1, padding=1
)
self.conv3 = nn.Conv3d(neck_channels, out_channels, kernel_size=1, stride=1)
self.bn1 = nn.BatchNorm3d(num_features=neck_channels)
self.bn2 = nn.BatchNorm3d(num_features=neck_channels)
self.bn3 = nn.BatchNorm3d(num_features=out_channels)
self.shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1)
self.final_relu = final_relu
def forward(self, x): # pylint:
identity = x
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x += self.shortcut(identity)
if self.final_relu:
x = F.relu(x)
return x
class UNet3d(nn.Module): # pylint: disable=too-many-instance-attributes
"""UNet that consumes even dimension grid and outputs odd dimension grid."""
def __init__(
self,
in_features=4,
out_features=32,
igres=(4, 32, 32),
ogres=None,
nf=16,
mf=512,
):
"""initialize 3D UNet.
Args:
in_features: int, number of input features.
out_features: int, number of output features.
igres: tuple, input grid resolution in each dimension. each dimension must be integer
powers of 2.
ogres: tuple, output grid resolution in each dimension. each dimension must be integer
powers of 2. #NOTE for now must be same as igres or must be 2^k multipliers of igres.
nf: int, number of base feature layers.
mf: int, a cap for max number of feature layers throughout the network.
"""
super(UNet3d, self).__init__()
self.igres = igres
self.nf = nf
self.mf = mf
self.in_features = in_features
self.out_features = out_features
# for now ogres must be igres, else not implemented
if ogres is None:
self.ogres = self.igres
else:
self.ogres = ogres
# assert integer multipliers of igres
mul = np.array(self.ogres) / np.array(self.igres)
fac = np.log2(mul)
if not np.allclose(fac % 1, np.zeros_like(fac)):
raise ValueError(
"ogres must be 2^k times greater than igres where k >= 0. "
"Instead igres: {}, ogres: {}".format(igres, ogres)
)
if not np.all(fac >= 0):
raise ValueError(
"ogres must be greater or equal to igres. "
"Instead igres: {}, ogres: {}".format(igres, ogres)
)
self.exp_fac = fac.astype(np.int32)
if not np.allclose(self.exp_fac, np.zeros_like(self.exp_fac)):
self.expand = True
else:
self.expand = False
# assert dimensions acceptable
if isinstance(self.igres, int):
self.igres = tuple([self.igres] * 3)
if isinstance(self.ogres, int):
self.ogres = tuple([self.ogres] * 3)
self._check_grid_res()
self.li = math.log(np.max(np.array(self.igres)), 2) # input layers
self.lo = math.log(np.max(np.array(self.ogres)), 2) # output layers
assert self.li % 1 == 0
assert self.lo % 1 == 0
self.li = int(self.li) # number of input levels
self.lo = int(self.lo) # number of output levels
self._create_layers()
def _check_grid_res(self):
# check type
if not (hasattr(self.igres, "__len__") and hasattr(self.ogres, "__len__")):
raise TypeError("igres and ogres must be tuples for grid dimensions")
# check size
if not (len(self.igres) == 3 and len(self.ogres) == 3):
raise ValueError(
"igres and ogres must have len = 3, however detected to be"
"{} and {}".format(len(self.igres), len(self.ogres))
)
# check powers of 2
for d in list(self.igres) + list(self.ogres):
if not (math.log(d, 2) % 1 == 0 and np.issubdtype(type(d), np.integer)):
raise ValueError(
"dimensions in igres and ogres must be integer powers of 2."
"instead they are {} and {}.".format(self.igres, self.ogres)
)
def _create_layers(self):
# num. features in downward path
nfeat_down_out = [self.nf * (2 ** (i + 1)) for i in range(self.li)]
# cap the maximum number of feature layers
nfeat_down_out = [n if n <= self.mf else self.mf for n in nfeat_down_out]
nfeat_down_in = [self.nf] + nfeat_down_out[:-1]
# num. features in upward path
# self.nfeat_up = nfeat_down_out[::-1][:self.lo]
nfeat_up_in = [int(n * 2) for n in nfeat_down_in[::-1][:-1]]
nfeat_up_out = nfeat_down_in[::-1][1:]
self.conv_in = ResBlock3D(self.in_features, self.nf, self.nf)
self.conv_out = ResBlock3D(
nfeat_down_in[0] * 2,
nfeat_down_in[0] * 2,
self.out_features,
final_relu=False,
)
self.conv_mid = ResBlock3D(
nfeat_down_out[-1], nfeat_down_out[-2], nfeat_down_out[-2]
)
self.down_modules = [
ResBlock3D(n_in, int(n / 2), n)
for n_in, n in zip(nfeat_down_in, nfeat_down_out)
]
self.up_modules = [
ResBlock3D(n_in, n, n) for n_in, n in zip(nfeat_up_in, nfeat_up_out)
]
self.down_pools = []
self.up_interps = []
prev_layer_dims = np.array(self.igres)
for _ in range(len(nfeat_down_out)):
pool_kernel_size, next_layer_dims = self._get_pool_kernel_size(
prev_layer_dims
)
pool_layer = nn.MaxPool3d(pool_kernel_size)
# use the reverse op in the upward branch
upsamp_layer = nn.Upsample(scale_factor=tuple(pool_kernel_size))
self.down_pools.append(pool_layer)
self.up_interps = [upsamp_layer] + self.up_interps # add to front
prev_layer_dims = next_layer_dims
# create expansion modules
if self.expand:
n_exp = np.max(self.exp_fac)
# self.exp_modules = [ResBlock3D(2*self.nf, self.nf, self.nf)]
# self.exp_modules = self.exp_modules + [ResBlock3D(self.nf, self.nf, self.nf) for _ in range(n_exp-1)]
self.exp_modules = [
ResBlock3D(2 * self.nf, 2 * self.nf, 2 * self.nf) for _ in range(n_exp)
]
self.exp_interps = []
for _ in range(n_exp):
exp_kernel_size, self.exp_fac = self._get_exp_kernel_size(self.exp_fac)
self.exp_interps.append(
nn.Upsample(scale_factor=tuple(exp_kernel_size))
)
self.exp_interps = nn.ModuleList(self.exp_interps)
self.exp_modules = nn.ModuleList(self.exp_modules)
self.down_modules = nn.ModuleList(self.down_modules)
self.up_modules = nn.ModuleList(self.up_modules)
self.down_pools = nn.ModuleList(self.down_pools)
self.up_interps = nn.ModuleList(self.up_interps)
@staticmethod
def _get_pool_kernel_size(prev_layer_dims):
if np.all(prev_layer_dims == np.min(prev_layer_dims)):
next_layer_dims = (prev_layer_dims / 2).astype(np.int)
pool_kernel_size = [2, 2, 2]
else:
min_dim = np.min(prev_layer_dims)
pool_kernel_size = [1 if d == min_dim else 2 for d in prev_layer_dims]
next_layer_dims = [
int(d / k) for d, k in zip(prev_layer_dims, pool_kernel_size)
]
next_layer_dims = np.array(next_layer_dims)
return pool_kernel_size, next_layer_dims
@staticmethod
def _get_exp_kernel_size(prev_exp_fac):
"""Get expansion kernel size."""
next_exp_fac = np.clip(prev_exp_fac - 1, 0, None)
exp_kernel_size = prev_exp_fac - next_exp_fac + 1
return exp_kernel_size, next_exp_fac
def forward(self, x):
"""Forward method.
Args:
x: `[batch, in_features, igres[0], igres[1], igres[2]]` tensor, input voxel grid.
Returns:
`[batch, out_features, ogres[0], ogres[1], ogres[2]]` tensor, output voxel grid.
"""
x = self.conv_in(x)
x_dns = [x]
for mod, pool_op in zip(self.down_modules, self.down_pools):
x = pool_op(mod(x_dns[-1]))
x_dns.append(x)
x = x_dns.pop(-1)
upsamp_op = self.up_interps[0]
x = self.conv_mid(upsamp_op(x))
for mod, upsamp_op in zip(self.up_modules, self.up_interps[1:]):
x = torch.cat([x, x_dns.pop(-1)], dim=1)
x = mod(x)
x = upsamp_op(x)
x = torch.cat([x, x_dns.pop(-1)], dim=1)
if self.expand:
for mod, upsamp_op in zip(self.exp_modules, self.exp_interps):
x = mod(x)
x = upsamp_op(x)
x = self.conv_out(x)
return x
| true
|
422c41ad4a627a69ba0d915504ffac1ba09c560e
|
Python
|
cltl-students/bosman_jona_el_for_cnd
|
/src/error_analysis.py
|
UTF-8
| 1,518
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
import pandas as pd
import spacy
from spacy.kb import KnowledgeBase
def entities_info(path):
entity_info = dict()
with open(path, 'r', encoding='utf8') as infile:
for line in infile:
row = line.split('\t')
entity_info[row[0]] = dict()
entity_info[row[0]]['name'] = row[1]
entity_info[row[0]]['description'] = row[2]
return entity_info
def error_analysis():
nlp = spacy.load('../resources/nen_nlp')
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=96)
kb.load_bulk('../resources/kb_probs')
predictions = pd.read_csv("../data/model_data/predictions.tsv", sep='\t')
entity_info = entities_info("../data/model_data/entities.tsv")
i = 0
for prediction, label, org, sent in zip(predictions['el_system'], predictions['label'], predictions['org'], predictions['sentence']):
label = str(label)
if prediction != label and prediction != 'NIL':
i += 1
print()
print(i, org)
print([c.entity_ for c in kb.get_candidates(org)])
print("Prediction:", entity_info[prediction]['name'], prediction)
print(entity_info[prediction]['description'])
print("Label:", entity_info[label]['name'], label)
print(entity_info[label]['description'])
print()
print("Sentence: ", sent)
print()
print(i, "errors.")
def main():
error_analysis()
if __name__ == "__main__":
main()
| true
|
f1791b23d466dc70853a5ea4c6c22c75f52ac3f7
|
Python
|
dpaddon/IRGAN
|
/ltr-gan/ltr-gan-pointwise/gen_model_nn.py
|
UTF-8
| 2,899
| 2.578125
| 3
|
[] |
no_license
|
import tensorflow as tf
import cPickle
class GEN:
def __init__(self, feature_size, hidden_size, weight_decay, learning_rate, temperature=1.0, param=None):
self.feature_size = feature_size
self.hidden_size = hidden_size
self.weight_decay = weight_decay
self.learning_rate = learning_rate
self.temperature = temperature
self.g_params = []
self.reward = tf.placeholder(tf.float32, shape=[None], name='reward')
self.pred_data = tf.placeholder(tf.float32, shape=[None, self.feature_size], name="pred_data")
self.sample_index = tf.placeholder(tf.int32, shape=[None], name='sample_index')
self.important_sampling = tf.placeholder(tf.float32, shape=[None], name='important_sampling')
with tf.variable_scope('generator'):
if param == None:
self.W_1 = tf.get_variable('weight_1', [self.feature_size, self.hidden_size],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
self.W_2 = tf.get_variable('weight_2', [self.hidden_size, 1],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
self.b_1 = tf.get_variable('b_1', [self.hidden_size], initializer=tf.constant_initializer(0.0))
self.b_2 = tf.get_variable('b_2', [1], initializer=tf.constant_initializer(0.0))
else:
self.W_1 = tf.Variable(param[0])
self.W_2 = tf.Variable(param[1])
self.b_1 = tf.Variable(param[2])
self.b_2 = tf.Variable(param[3])
self.g_params.append(self.W_1)
self.g_params.append(self.W_2)
self.g_params.append(self.b_1)
self.g_params.append(self.b_2)
# Given batch query-url pairs, calculate the matching score
# For all urls of one query
self.pred_score = tf.reshape(tf.nn.xw_plus_b(
tf.nn.tanh(tf.nn.xw_plus_b(self.pred_data, self.W_1, self.b_1)), self.W_2, self.b_2), [-1]) / self.temperature
self.gan_prob = tf.gather(
tf.reshape(tf.nn.softmax(tf.reshape(self.pred_score, [1, -1])), [-1]), self.sample_index)
self.gan_loss = -tf.reduce_mean(tf.log(self.gan_prob) * self.reward * self.important_sampling) \
+ self.weight_decay * (tf.nn.l2_loss(self.W_1) + tf.nn.l2_loss(self.W_2)
+ tf.nn.l2_loss(self.b_1) + tf.nn.l2_loss(self.b_2))
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.g_updates = self.optimizer.minimize(self.gan_loss, var_list=self.g_params)
def save_model(self, sess, filename):
param = sess.run(self.g_params)
cPickle.dump(param, open(filename, 'w'))
| true
|
377bd812c429ef0e01b4b5564474463df4394f5b
|
Python
|
satyaaditya/MachineLearning
|
/DecisionTree/FakeBankNoteDetection.py
|
UTF-8
| 1,641
| 3.21875
| 3
|
[] |
no_license
|
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn import tree
import pandas as pd
def load_csv():
data = pd.read_csv('datasets/banknote_authentication.csv')
print('--------------- data preview\n', data.head())
return data
def check_for_null_values_in_data(data):
print('---------------check for null values')
print(data[data.isnull().any(axis=1)].count())
def use_kfold(independent_data, dependent_data):
""" kfold split k-1 parts for training and 1 for test, so
you need to give complete data without split, its different from train_test_split
"""
decision_tree = tree.DecisionTreeClassifier(criterion='gini', max_depth=None)
kf = KFold(n_splits=10)
scores = cross_val_score(decision_tree, independent_data, dependent_data, cv=kf)
print('kfold -')
print('accuracy - ', scores.mean())
def decision_tree(data):
independent_data = data.drop(columns=[data.columns[-1]])
dependent_data = data.iloc[:, -1]
# print(dependent_data.head())
X_train, X_test, y_train, y_test = train_test_split(independent_data, dependent_data, test_size=0.3)
decision_tree = tree.DecisionTreeClassifier(criterion='gini', max_depth=None)
decision_tree.fit(X_train, y_train)
y_predicted = decision_tree.predict(X_test)
print(decision_tree.score(X_train, y_train), '\t', accuracy_score(y_test, y_predicted))
use_kfold(independent_data, dependent_data) # try using kfold
if __name__ == '__main__':
data = load_csv()
check_for_null_values_in_data(data)
decision_tree(data)
| true
|
120e10e4748f2b0ae356631048c42842399a1df5
|
Python
|
MiaoDX/DataLabel
|
/track_for_detection_bbox/helper_f.py
|
UTF-8
| 374
| 2.859375
| 3
|
[] |
no_license
|
import os
def generate_all_abs_filenames(data_dir):
files = [os.path.abspath(data_dir+'/'+f) for f in os.listdir(data_dir) if os.path.isfile(data_dir+'/'+f)]
files = sorted(files)
return files
def split_the_abs_filename(abs_filename):
f_basename = os.path.basename(abs_filename)
f_no_suffix = f_basename.split('.')[0]
return f_basename, f_no_suffix
| true
|
787f2ed060b806bddb39caef852fb277d073b6fd
|
Python
|
Upupupdown/Logistic-
|
/dis_5_6.py
|
UTF-8
| 12,618
| 3.265625
| 3
|
[] |
no_license
|
import operator
from SVM import*
import numpy as np
def load_data(filename):
"""
数据加载函数
:param filename: 数据文件名
:return:
data_mat - 加载处理后的数据集
label_mat - 加载处理后的标签集
"""
num_feat = len(open(filename).readline().split(';')) - 1
data_mat = []
label_mat = []
fr = open(filename)
for line in fr.readlines():
line_arr = []
cur_line = line.strip().split(';')
# 得到分数为5和6的数据集
if cur_line[-1] == '5' or cur_line[-1] == '6':
# 循环特征数据加入line_arr
for i in range(num_feat):
line_arr.append(float(cur_line[i]))
data_mat.append(line_arr)
if cur_line[-1] == '5':
label_mat.append(-1)
else:
label_mat.append(1)
return data_mat, label_mat
# 利用kNN模型对数据集进行分类
def classify(test, data_set, label, k):
"""
kNN算法
:param test: 待分类的数据
:param data_set: 已分好类的数据集
:param label: 分类标签
:param k: kNN算法参数,选择距离最小的k个数据
:return: classify_result —— kNN算法分类结果
"""
# 计算两组数据的欧氏距离
test_copy = np.tile(test, (data_set.shape[0], 1)) - data_set
# 二维特征相减后平方
sq_test_copy = test_copy ** 2
# sum() 所有元素相加,sum(0)列相加,sum(1)行相加
row_sum = sq_test_copy.sum(axis=1)
# 开方,得到数据点间的距离
distance = row_sum ** 0.5
# 返回 distances 中元素从小到大排序后的索引值
sorted_index = distance.argsort()
# 定义一个记录类别次数的字典
class_count = {}
# 遍历距离最近的前n个数据,统计类别出现次数
for v in range(k):
# 取出前 k 个元素的类别
near_data_label = label[sorted_index[v]]
# dict.get(key,default=None),字典的get()方法,返回指定键的值,如果值不在字典中返回默认值。
# 计算类别次数
class_count[near_data_label] = class_count.get(near_data_label, 0) + 1
# 根据字典的值进行降序排序
classify_result = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
# print(classify_result)
# 返回次数最多的类别,即待分类点的类别
return classify_result[0][0]
def test_for_kNN(filename, horatio=0.1, k=4):
"""
利用kNN对5、6分数类别进行分类
:param filename: 文件名
:param horatio: 测试集比例
:param k: kNN参数
:return: 无
"""
data, label = load_data(filename)
data = np.array(data)
m = np.shape(data)[0]
test_num = int(m * horatio)
error_count = 0.0
for i in range(test_num):
classify_result = classify(data[i, :], data[test_num:m, :], label[test_num:m], k)
if classify_result != label[i]:
error_count += 1.0
print("kNN模型的预测准确率: %.1f%%" % (100 * (1 - error_count / test_num)))
# 利用支持向量机模型对数据集进行分类
def test_rbf(filename, k1=20, horatio=0.1):
"""
测试函数
:param horatio: 测试集比例
:param filename: 文件名
:param k1: 使用高斯核函数的时候表示到达率
:return: 无
"""
data_arr, label_arr = load_data(filename)
m = len(data_arr)
test_num = int(m * horatio)
test_arr = data_arr[0:test_num]
test_label = label_arr[0:test_num]
train_arr = data_arr[test_num:]
train_label = label_arr[test_num:]
b, alphas = smo_P(train_arr, train_label, 200, 0.0001, 100, ('rbf', k1))
train_mat = np.mat(train_arr)
train_label_mat = np.mat(train_label).transpose()
# 获得支持向量
sv_ind = np.nonzero(alphas.A > 0)[0]
svs = train_mat[sv_ind]
label_sv = train_label_mat[sv_ind]
print(f'支持向量个数: {np.shape(svs)[0]}')
m, n = np.shape(train_mat)
error_count = 0
for i in range(m):
# 计算各点的核
kernel_eval = kernel_trans(svs, train_mat[i, :], ('rbf', k1))
# 根据支持向量的点,计算超平面,返回预测结果
predict = kernel_eval.T * np.multiply(label_sv, alphas[sv_ind]) + b
# 返回数组中各元素的正负符号,用1和-1表示,并统计错误个数
if np.sign(predict) != np.sign(train_label[i]):
error_count += 1
print(f'训练集准确率:{(1 - float(error_count) / m) * 100}')
# 加载测试集
error_count = 0
test_mat = np.mat(test_arr)
m, n = np.shape(test_mat)
for i in range(m):
kernel_eval = kernel_trans(svs, test_mat[i, :], ('rbf', k1))
predict = kernel_eval.T * np.multiply(label_sv, alphas[sv_ind]) + b
if np.sign(predict) != np.sign(test_label[i]):
error_count += 1
print(f'测试集准确率:{(1 - float(error_count) / m) * 100}')
# 利用AdaBoost模型对数据集进行分类
def stump_classify(data_matrix, col, thresh_val, thresh_flag):
"""
单层决策树分类函数
:param data_matrix: 数据矩阵
:param col: 第cal列,也就是第几个特征
:param thresh_val: 阈值
:param thresh_flag: 标志
:return:
ret_array - 分类结果
"""
# 初始化预测分类结果
ret_array = np.ones((np.shape(data_matrix)[0], 1))
if thresh_flag == 'lt':
# col列的特征数据小于('lt')分界值(阈值 thresh_val)时,将其类别设置为负类,值为-1.0(基于某阈值的预测)
ret_array[data_matrix[:, col] <= thresh_val] = -1.0
else:
# col列的特征数据大于('gt')分界值(阈值 thresh_val)时,将其类别设置为负类,值为-1.0(基于某阈值的预测)
ret_array[data_matrix[:, col] > thresh_val] = -1.0
return ret_array
def build_stump(data_arr, class_labels, D):
"""
找到数据集上最佳的单层决策树
:param data_arr: 数据矩阵
:param class_labels: 数据标签
:param D: 样本权重
:return:
best_stump - 最佳单层决策树信息
min_error - 最小误差
best_result - 最佳分类结果
"""
data_matrix = np.mat(data_arr)
label_mat = np.mat(class_labels).T
m, n = np.shape(data_matrix)
num_steps = 10.0
best_stump = {}
best_result = np.mat(np.zeros((m, 1)))
# 初始化最小错误率为无穷大
min_error = float('inf')
# 遍历不同特征(遍历列)
for i in range(n):
# 找出特征数据极值(一列中的最大和最小值),设置步长 step_size(即增加阈值的步长)
range_min = data_matrix[:, i].min()
range_max = data_matrix[:, i].max()
step_size = (range_max - range_min) / num_steps
# 设置不同的阈值,计算以该阈值为分界线的分类结果 ———— 不同阈值不同分类情况('lt', 'gt')找到错误率最小的分类方式
# 阈值的设置从最小值-步长到最大值,以步长为间隔逐渐增加阈值
# 分类结果设置按小于('lt')阈值为负类和大于('gt')阈值为负类分别进行设置,计算最后分类结果
for j in range(-1, int(num_steps) + 1):
for situation in ['lt', 'gt']:
thresh_val = (range_min + float(j) * step_size)
predicted_val = stump_classify(data_matrix, i, thresh_val, situation)
err_arr = np.mat(np.ones((m, 1)))
# 将分类正确的设置为0
err_arr[predicted_val == label_mat] = 0
# 计算错误率
weighted_error = D.T * err_arr
# print('\n split:dim %d, thresh %.2f, thresh situation: %s \
# the weighted error is %.3f' % (i, thresh_val, situation, weighted_error))
# 记录最小错误率时的信息,生成最佳单层决策树
if weighted_error < min_error:
min_error = weighted_error
best_result = predicted_val.copy()
best_stump['dim'] = i
best_stump['thresh'] = thresh_val
best_stump['situation'] = situation
return best_stump, min_error, best_result
def ada_boost_train_DS(data_arr, class_labels, num_iter=40):
"""
基于单层决策树的AdaBoost训练
:param data_arr: 数据集
:param class_labels: 数据标签
:param num_iter: 迭代次数
:return:
weak_class_arr - 多次训练后得到的弱分类器
"""
# 存放分类器提升过程中的弱分类器
weak_class_arr = []
m = np.shape(data_arr)[0]
# 初始化权重
D = np.mat(np.ones((m, 1)) / m)
agg_class_result = np.mat(np.zeros((m, 1)))
for i in range(num_iter):
# 构建单层决策树
# 弱分类器的错误率 error -> 分类器的权重 alpha -> 数据类别结果权重 -> 弱分类器错误率
# 弱分类器的错误率 error -> 分类器的权重 alpha -> 累计结果估计值 agg_class_result -> 为0时结束训练
best_stump, error, class_result = build_stump(data_arr, class_labels, D)
# print(D.T)
# 计算alpha,为每个分类器分配的一个权重值alpha,基于每个弱分类器的错误率进行计算
# max(error, 1e-16)是为避免当弱分类器的错误率为零时进行除零运算
alpha = float(0.5 * np.log((1.0 - error) / max(error, 1e-16)))
# 记录求得的alpha,和该弱分类器的分类结果
best_stump['alpha'] = alpha
weak_class_arr.append(best_stump)
# print(f'class_result: {class_result}')
# 计算改变样本权重的e的指数,分类正确为-alpha,错误则为alpha,利用label与result相乘判断正负
e_exponent = np.multiply(-1 * alpha * np.mat(class_labels).T, class_result)
D = np.multiply(D, np.exp(e_exponent))
D = D / D.sum()
# 记录每个数据点的类别估计积累值
agg_class_result += alpha * class_result
# print(f'agg_class_result: {agg_class_result.T}')
# 计算累加错误率
agg_errors = np.multiply(np.sign(agg_class_result) != np.mat(class_labels).T, np.ones((m, 1)))
error_rate = agg_errors.sum() / m
# print(f'total error: {error_rate}')
if error_rate == 0.0:
break
return weak_class_arr
def ada_classify(test_data, classifier_arr):
"""
测试分类函数
:param test_data: 测试数集
:param classifier_arr: AdaBoost训练得到的弱分类器集合
:return:
sign(agg_class_result) - 分类结果
"""
test_matrix = np.mat(test_data)
m = np.shape(test_matrix)[0]
# 累计分类估计值
agg_class_result = np.mat(np.zeros((m, 1)))
# 遍历得到的每一个弱分类器,
for i in range(len(classifier_arr)):
# 根据该分类器进行分类
class_result = stump_classify(test_matrix, classifier_arr[i]['dim'],
classifier_arr[i]['thresh'], classifier_arr[i]['situation'])
# 利用分类器权重累加分类估计值
agg_class_result += classifier_arr[i]['alpha'] * class_result
# print(agg_class_result)
# 利用sign函数得到分类结果,其实是根据概率进行分类
# 根据累加的估计分类值,值属于正样本的概率大(这里为值大于0),则判为正类,
# 属于负样本的概率大(小于0),则判为负类。实质上这里的分类阈值为0.5
return np.sign(agg_class_result)
def test_for_Ada(filename, horatio=0.1, num_item=30):
# 利用AdaBoost算法对数据集进行训练预测
data_arr, label_arr = load_data(filename)
m = len(data_arr)
# 划分数据集为训练集和测试集
test_num = int(m * horatio)
test_arr = data_arr[0:test_num]
test_label = label_arr[0:test_num]
train_arr = data_arr[test_num:]
train_label = label_arr[test_num:]
# 基于单层决策树训练训练集
classifier_arr = ada_boost_train_DS(train_arr, train_label, num_item)
# 对测试集进行预测并统计其错误率
prediction = ada_classify(test_arr, classifier_arr)
m = np.shape(test_arr)[0]
error_arr = np.mat(np.ones((m, 1)))
print("AdaBoost模型预测准确率: %.1f%%" % (100 * (1 - error_arr[prediction != np.mat(test_label).T].sum() / m)))
test_for_Ada('red_wine', horatio=0.1, num_item=35)
test_for_kNN('red_wine', k=4)
test_rbf('red_wine')
| true
|
ea33d48ddfef592d48f0be8b9fc14a4feb8c78bd
|
Python
|
rafaelperazzo/programacao-web
|
/moodledata/vpl_data/59/usersdata/171/61571/submittedfiles/testes.py
|
UTF-8
| 652
| 3.453125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
def sub(a,b):
c=[]
for i in range(len(a)-1,-1,-1):
if a[i]<b[i]:
sub=(10+a[i])-b[i]
c.insert(0,sub)
a[i]=a[i]-1
else:
sub=a[i]-b[i]
c.insert(0,sub)
a[i]=a[i]
if a[i]==len(a)-1:
sub=a[i]-b[i]
return(sub)
n=int(input('digite o numero:'))
a=[]
for i in range(0,n,1):
valor1=float(input('digite numeor p/ a:'))
a.append(valor1)
m=int(input('digite o numero:'))
b=[]
for i in range(0,m,1):
valor2=float(input('digite numeor p/ b:'))
b.append(valor2)
print(sub(a.b))
| true
|
31b9bb38bef964786576907a53e2bfe66d765dbc
|
Python
|
bymayanksingh/open-source-api-wrapper
|
/src/GithubWrapper.py
|
UTF-8
| 4,815
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime, timezone
from github import Github
from GithubToken import github_token
# Github API Wrapper
class GithubWrapper:
def __init__(self):
"""
handles user authentication &
creates user object
"""
self.user_obj = Github(github_token)
def get_org_obj(self, organization):
"""
creates oraganization object
Return type: <class 'github.Organization.Organization'>
"""
self.org_obj = self.user_obj.get_organization(organization)
return self.org_obj
def get_org_members(self, organization):
"""
get all public & private members of the org.
outputs member html url, member username, member name
Return type: dict
"""
org_obj = self.get_org_obj(organization)
members_dict = {}
members = org_obj.get_members()
i = 1
for member in members:
member_dict = {}
member_dict["username"] = member._identity
member_dict["name"] = self.user_obj.get_user(member._identity).name
member_dict["url"] = member.html_url
members_dict[i] = member_dict
i += 1
return members_dict
def get_org_repos(self, organization):
"""
get repositories of the org.
outputs repo html url, repo full name
Return type: dict
"""
org_obj = self.get_org_obj(organization)
repos = org_obj.get_repos()
repos_dict = {}
i = 1
for repo in repos:
repo_dict = {}
repo_dict["full_name"] = repo.full_name
repo_dict["url"] = repo.url
repos_dict[i] = repo_dict
i += 1
return repos_dict
def get_repo_commits(self, repository):
"""
get repo commits.
outputs commit author, commit url, commit sha
Return type: dict
"""
repo = self.user_obj.get_repo(repository)
commits = repo.get_commits()
commits_dict = {}
i = 1
for commit in commits:
commit_dict = {}
commit_dict["author"] = commit.author.login
commit_dict["url"] = commit.html_url
commit_dict["sha"] = commit.sha
commits_dict[i] = commit_dict
i += 1
return commits_dict
def get_repo_issues(self, repository):
"""
get repository issues only.
outputs issue tile, issue url, id
Return type: dict
"""
repo = self.user_obj.get_repo(repository)
issues = repo.get_issues()
issues_dict = {}
i = 1
for issue in issues:
issue_dict = {}
issue_dict["id"] = issue.id
issue_dict["title"] = issue.title
issue_dict["url"] = issue.url
issue_dict["labels"] = []
for label in issue.labels:
issue_dict["labels"].append(label.name)
issues_dict[i] = issue_dict
i += 1
return issues_dict
def get_org_issues(self, organization):
"""
get all orgs issues, repo wise
outputs reponame: issue title, issue url
Return type: dict
"""
org_obj = self.user_obj.get_organization(organization)
repos = org_obj.get_repos()
org_issues_dict = {}
for repo in repos:
issues_dict = self.get_repo_issues(repo.full_name)
org_issues_dict[repo.full_name] = issues_dict
return org_issues_dict
def get_issue_comments_dict(self, repository):
"""
get issue comments
outputs index: issue title, issue url, comments
Return type: dict
"""
repo = self.user_obj.get_repo(repository)
issues = repo.get_issues()
issues_dict = {}
i = 1
for issue in issues:
issue_dict = {}
issue_dict["url"] = issue.url
issue_dict["title"] = issue.title
issue_dict["comments"] = [comment.body for comment in issue.get_comments()]
issues_dict[i] = issue_dict
i += 1
return issues_dict
def get_repo_pulls(self, repository):
"""
get all repo pull requests
outputs index: pull name, pull url
Return type: dict
"""
repo = self.user_obj.get_repo(repository)
pulls = repo.get_pulls()
pulls_dict = {}
i = 1
for pull in pulls:
pull_dict = {}
pull_dict["url"] = pull.url
pull_dict["title"] = pull.title
pull_dict["merged"] = pull.is_merged()
pulls_dict[i] = pull_dict
i += 1
return pulls_dict
| true
|
08f777b21c701de8b7f8f533145b20eed3fbed13
|
Python
|
RouganStriker/BDOBot
|
/plugins/base.py
|
UTF-8
| 3,073
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
import boto3
TABLE_NAME = 'bdo-bot'
class BasePlugin(object):
# Plugin type is used to uniquely identify the plugin's item in dynamodb
PLUGIN_TYPE = None
# Mapping of attribute names to a type
ATTRIBUTE_MAPPING = {}
def __init__(self, discord_client=None):
self.db = boto3.client('dynamodb')
self.discord = discord_client
@property
def partition_key(self):
return {
'plugin-type': {
'S': self.PLUGIN_TYPE
}
}
def get_item(self):
"""Returns this plugin's data stored in dynamodb."""
if self.PLUGIN_TYPE is None:
raise NotImplemented("PLUGIN_TYPE is not defined, cannot access DB.")
return self.db.get_item(
TableName=TABLE_NAME,
Key=self.partition_key
)
def _python_type_to_dynamo_type(self, attribute_class):
if issubclass(attribute_class, str):
return 'S'
elif attribute_class in [int, float]:
return 'N'
elif issubclass(attribute_class, list):
return 'L'
elif issubclass(attribute_class, bool):
return 'BOOL'
else:
raise Error("Unexpected attribute class {0}".format(attribute_class))
def create_item(self, **kwargs):
"""Create an item in dynamodb with attributes in initial kwargs."""
item = self.partition_key
for attribute, value in kwargs.items():
if attribute not in self.ATTRIBUTE_MAPPING:
continue
attribute_type = self._python_type_to_dynamo_type(self.ATTRIBUTE_MAPPING[attribute])
if attribute_type == "N":
# Cast number to string
value = str(value)
item[attribute] = {
attribute_type: value
}
return self.db.put_item(
TableName=TABLE_NAME,
Item=item
)
def update_item(self, **kwargs):
"""Create an item in dynamodb with attributes in kwargs."""
placeholders = {}
update_attributes = []
for attribute, value in kwargs.items():
if attribute not in self.ATTRIBUTE_MAPPING:
continue
attribute_type = self._python_type_to_dynamo_type(self.ATTRIBUTE_MAPPING[attribute])
if attribute_type == "N":
# Cast number to string
value = str(value)
attr_placeholder = ":value{}".format(len(placeholders))
placeholders[attr_placeholder] = {attribute_type: value}
update_attributes.append("{} = {}".format(attribute, attr_placeholder))
if not update_attributes:
return None
return self.db.update_item(
TableName=TABLE_NAME,
Key=self.partition_key,
UpdateExpression="SET {}".format(", ".join(update_attributes)),
ExpressionAttributeValues=placeholders
)
def run(self):
"""Entry point for the plugin."""
raise NotImplementedError()
| true
|
b1c7563682031fbb99c8529666b47c051df31602
|
Python
|
Matacristos/api-flask
|
/src/predict.py
|
UTF-8
| 1,660
| 2.796875
| 3
|
[] |
no_license
|
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from src.utils import univariate_data, get_test_data
def predict(
csv_path: str,
model,
past_history: int = 72,
future_target: int = 0,
num_hours_past: int = 120
):
data = pd.read_csv(csv_path, names=['Date', 'Close'])
data = data.sort_values('Date')
price = data[['Close']]
# Normalize data
min_max_scaler = MinMaxScaler()
norm_data = min_max_scaler.fit_transform(price.values)
_, y_test = univariate_data(norm_data,
int(len(norm_data) - num_hours_past),
None,
past_history,
future_target)
x_test = get_test_data(norm_data,
int(len(norm_data) - num_hours_past),
past_history)
original = pd.DataFrame(min_max_scaler.inverse_transform(y_test))
predictions = pd.DataFrame(min_max_scaler.inverse_transform(model.predict(x_test)))
plt.clf()
ax = sns.lineplot(x=original.index, y=original[0], label="Real Data", color='royalblue')
ax = sns.lineplot(x=predictions.index, y=predictions[0], label="Prediction", color='tomato')
ax.set_title('Bitcoin price', size = 14, fontweight='bold')
ax.set_xlabel("Hours", size = 14)
ax.set_ylabel("Cost (USD)", size = 14)
ax.set_xticklabels('', size=10)
#ax.get_figure().savefig('../images/prediction.png')
plt.savefig(os.getcwd() + '/images/prediction.png')
| true
|
1ed83669cb8f4d60d5a7a89834918f0def4c2176
|
Python
|
ShogoAkiyama/rltorch2
|
/sentiment/iqn/utils.py
|
UTF-8
| 6,206
| 2.609375
| 3
|
[] |
no_license
|
import string
import re
import os
import io
import sys
import csv
import six
import itertools
from collections import Counter
from collections import defaultdict, OrderedDict
import torch
from torchtext.vocab import Vectors, Vocab
from dataAugment.dataAugment import *
# 前処理
def preprocessing_text(text):
# カンマ、ピリオド以外の記号をスペースに置換
for p in string.punctuation:
if (p == ".") or (p == ",") or (p == ":") or (p == "<" )or (p == ">"):
continue
else:
text = text.replace(p, " ")
# ピリオドなどの前後にはスペースを入れておく
text = text.replace(".", " . ")
text = text.replace(",", " , ")
text = re.sub(r'[0-9 0-9]', '0', text)
return text
# 分かち書き(今回はデータが英語で、簡易的にスペースで区切る)
def tokenizer_punctuation(text):
return text.strip().split(':')
# 前処理と分かち書きをまとめた関数を定義
def tokenizer_with_preprocessing(text):
text = preprocessing_text(text)
ret = tokenizer_punctuation(text)
return ret
def unicode_csv_reader(unicode_csv_data, **kwargs):
# Fix field larger than field limit error
maxInt = sys.maxsize
while True:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt / 10)
csv.field_size_limit(maxInt)
if six.PY2:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [cell.decode('utf-8') for cell in row]
else:
for line in csv.reader(unicode_csv_data, **kwargs):
yield line
class MyDataset(torch.utils.data.Dataset):
def __init__(self, path, max_len=64, vocab=None, vectors=None,
min_freq=10, specials=[], phase='val'):
self.keys = ['Date', 'Code', 'State', 'Next_State', 'Reward']
self.string_keys = ['State', 'Next_State']
self.tensor_keys = ['Reward'] + ['Next_State'] #self.string_keys
self.data_list = {k: [] for k in self.keys}
with io.open(os.path.expanduser(path), encoding="utf8") as f:
reader = unicode_csv_reader(f, delimiter='\t')
for line in reader:
for k, x in zip(self.keys, line):
if k in self.string_keys:
self.data_list[k].append(x.split(':'))
elif k in self.tensor_keys:
self.data_list[k].append(float(x))
else:
self.data_list[k].append(x)
self.unk_token = '<unk>'
self.pad_token = '<pad>'
self.init_token = '<cls>'
self.eos_token = '<eos>'
self.max_len = max_len
self.fix_len = self.max_len + (self.init_token, self.eos_token).count(None) - 2
self.specials = specials
self.words = list(itertools.chain.from_iterable(self.data_list['State']))
self.counter = Counter(self.words)
specials = list(OrderedDict.fromkeys(
tok for tok in [self.unk_token, self.pad_token, self.init_token,
self.eos_token] + self.specials
if tok is not None))
if (phase=='val') and (vocab is not None):
self.vocab = vocab
elif (phase=='train') and (vectors is not None):
self.vocab = Vocab(self.counter,
specials=specials,
vectors=vectors,
min_freq=min_freq)
self.padded_list = self.pad(self.data_list)
self.tensor_list = self.numericalize(self.padded_list)
stopwords = []
for w in ['<cls>', '<eos>', '<pad>', '<span>']:
stopwords.append(self.vocab.stoi[w])
self.transform = DataTransform(self.vocab, stopwords)
self.phase = phase
def pad(self, data):
padded = {k: [] for k in self.keys}
for key, val in data.items():
if key in self.string_keys:
arr = []
for x in val:
arr.append(
([self.init_token])
+ list(x[:self.fix_len])
+ ([self.eos_token])
+ [self.pad_token] * max(0, self.fix_len - len(x)))
padded[key] = arr
else:
padded[key] = val
return padded
def numericalize(self, padded):
tensor = {k: [] for k in self.keys}
for key, val in padded.items():
if key in self.string_keys:
arr = []
for ex in val:
arr.append([self.vocab.stoi[x] for x in ex])
if key == 'State':
tensor[key] = arr
else:
tensor[key] = torch.LongTensor(arr).to('cpu')
elif key in self.tensor_keys:
tensor[key] = torch.FloatTensor(val).to('cpu')
else:
tensor[key] = val
return tensor
def __len__(self):
return len(self.tensor_list['State'])
def __getitem__(self, i):
arr = {k: [] for k in self.keys}
for key in self.keys:
data = self.tensor_list[key][i]
if key == 'State':
data = torch.LongTensor(self.transform(data, self.phase))
arr[key] = data
return arr
class DataTransform:
def __init__(self, vectors, stopwords):
self.data_transform = {
'train': Compose([
RandomSwap(vectors, aug_p=0.1, stopwords=stopwords),
RandomSubstitute(vectors, aug_p=0.1, stopwords=stopwords),
]),
'val': Compose([
])
}
def __call__(self, text, phase):
return self.data_transform[phase](text)
| true
|
4bfd31139ecef2530cadd3053ae8e332e9a9f808
|
Python
|
rakeshsukla53/interview-preparation
|
/Rakesh/subsequence-problems/longest_substring_without_repeating_characters.py
|
UTF-8
| 821
| 3.28125
| 3
|
[] |
no_license
|
__author__ = 'rakesh'
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if s is not None:
finalResult = 0
for i in range(len(s)):
frequency = {}
count = 0
for j in range(i, len(s)):
if not frequency.has_key(s[j]):
frequency[s[j]] = ''
count += 1
else:
if count > finalResult:
finalResult = count
count = 0
if count > finalResult:
finalResult = count
return finalResult
value = Solution()
print value.lengthOfLongestSubstring('pwwkew')
| true
|
b2304ae38ce3508eff90a6f5ce1fbc1a118a7fc1
|
Python
|
rahmankashfia/Hacker-Rank
|
/python/bracket_match.py
|
UTF-8
| 200
| 3.03125
| 3
|
[] |
no_license
|
s = ")))((("
t = []
matched = True
for x in s:
if x == "(":
t.append(x)
if x == ")":
if len(t) == 0:
matched = False
else:
t.pop()
if len(t) > 0:
matched = False
print(matched)
| true
|
d65f95ff2ed645559c6c1bba3054c0518fd530f9
|
Python
|
maoxx241/code
|
/Top_K_Frequent_Elements/Top_K_Frequent_Elements.py
|
UTF-8
| 317
| 3.0625
| 3
|
[] |
no_license
|
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
dic=collections.Counter(nums)
ans=[]
lst=sorted(dic.items(),key=lambda x:x[1],reverse=True)
for i in lst:
ans.append(i[0])
if len(ans)==k:
return ans
| true
|
9bd01b34dff6aefad381a3fb1d2fb737e20a5402
|
Python
|
nsidnev/edgeql-queries
|
/edgeql_queries/contrib/aiosql/queries.py
|
UTF-8
| 1,515
| 2.5625
| 3
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
"""Definition for aiosql compatible queries."""
from typing import List, Union
from edgeql_queries import queries as eq_queries
from edgeql_queries.contrib.aiosql.adapters import EdgeQLAsyncAdapter, EdgeQLSyncAdapter
from edgeql_queries.models import Query
from edgeql_queries.typing import QueriesTree
class EdgeQLQueries:
"""Queries that are compatible with aiosql."""
def __init__(self, adapter: Union[EdgeQLSyncAdapter, EdgeQLAsyncAdapter]) -> None:
"""Init queries.
Arguments:
adapter: adapter for aiosql with `is_aio_driver` field.
"""
self._use_async = adapter.is_aio_driver
def load_from_list(self, queries: List[Query]) -> eq_queries.Queries:
"""Load list of queries.
Arguments:
queries: list of queries that should be used for creating
executors for them.
Returns:
Built collection of queries with binded executors.
"""
return eq_queries.load_from_list(eq_queries.Queries(self._use_async), queries)
def load_from_tree(self, queries_tree: QueriesTree) -> eq_queries.Queries:
"""Load queries tree.
Arguments:
queries_tree: tree of queries that should be used for creating
executors for them.
Returns:
Built collection of queries with binded executors.
"""
return eq_queries.load_from_tree(
eq_queries.Queries(self._use_async),
queries_tree,
)
| true
|
b39af8ba6fd05a50c0368cb303c6c796c6939096
|
Python
|
roflmaostc/Euler-Problems
|
/028.py
|
UTF-8
| 1,494
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/env python
"""
Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:
21 22 23 24 25
20 7 8 9 10
19 6 1 2 11
18 5 4 3 12
17 16 15 14 13
It can be verified that the sum of the numbers on the diagonals is 101.
What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?
"""
import tabulate
def createSpiral(size):
"""Creates spiral with numbers. Requires odd size"""
grid=[[0 for i in range(size)] for i in range(size) ]
if size%2==0:
return []
else:
i,j=size//2,size//2
for value in range(1,size**2+1):
grid[i][j]=value
i,j=giveNewField(i,j,size)
return grid
def giveNewField(i, j, size):
"""Returns the next field for the spiral"""
if j>=i and i+j<size:
return i,j+1
elif i<j:
return i+1,j
elif j<=i and i+j>=size:
return i,j-1
else:
return i-1,j
def sumDiagonals(grid):
diag1=sum(grid[i][i] for i in range(len(grid)))
diag2=sum(grid[len(grid)-i-1][i] for i in range(len(grid)))
return diag1+diag2-1
def sumDiagonalsSmart(size):
"""size>=3"""
import numpy as np
prev=np.array([3,5,7,9])
diag=np.array([0,0,0,0])
diag+=prev
for i in range(1, size//2):
prev=prev+[2,4,6,8]+8*i
diag+=prev
return sum(diag)+1
print(sumDiagonalsSmart(1001))
# print(sumDiagonals(createSpiral(1001)))
| true
|
17e308c9ffec08b88e972ac2295a4da44add4000
|
Python
|
shanahanjrs/LR35902
|
/opcodes.py
|
UTF-8
| 2,456
| 2.828125
| 3
|
[] |
no_license
|
"""
opcodes
https://www.pastraiser.com/cpu/gameboy/gameboy_opcodes.html
Instr mnemonic -> | INS reg |
Length Bytes -> | 2 8 | <- duration (cycles)
Flags -> | Z N H C |
> Inline version of this: | INS reg 2b 8c Z N H C |
Flag register (F) bits (3,2,1,0 always zero):
7 6 5 4 3 2 1 0
Z N H C 0 0 0 0
Z zero
N subtraction
H half carry
C carry
d8 means immediate 8 bit data
d16 means immediate 16 bit data
a8 means 8 bit unsigned data, which are added to $FF00 in certain instructions (replacement for missing IN and OUT instructions)
a16 means 16 bit address
r8 means 8 bit signed data, which are added to program counter
LD A,(C) has alternative mnemonic LD A,($FF00+C)
LD C,(A) has alternative mnemonic LD ($FF00+C),A
LDH A,(a8) has alternative mnemonic LD A,($FF00+a8)
LDH (a8),A has alternative mnemonic LD ($FF00+a8),A
LD A,(HL+) has alternative mnemonic LD A,(HLI) or LDI A,(HL)
LD (HL+),A has alternative mnemonic LD (HLI),A or LDI (HL),A
LD A,(HL-) has alternative mnemonic LD A,(HLD) or LDD A,(HL)
LD (HL-),A has alternative mnemonic LD (HLD),A or LDD (HL),A
LD HL,SP+r8 has alternative mnemonic LDHL SP,r8
"""
def NOP():
""" 0x00 1b 4c - - - -"""
pass
def STOP():
""" 0x10 2b 4c - - - - """
pass
def LD_BC_D16(cpu, d):
""" 0x01 3b 12c - - - - Load 16bit data into BC"""
cpu.set_bc(d)
def LD_BC_A(cpu):
""" 0x02 1b 8c - - - - Load A into BC"""
cpu.set_bc(cpu.a)
def INC_BC(cpu):
""" 0x03 1b 8c - - - - """
cpu.bc += 1
def INC_B(cpu):
""" 0x04 1b 4c Z 0 H - """
cpu.b += 1
cpu.fz = 0x1 if cpu.b == 0 else 0x0
cpu.fn = 0x0
cpu.fh = 0x1 if cpu.b > 256 else 0x0
def DEC_B(cpu):
""" 0x05 1b 4c Z 1 H - """
cpu.b -= 1
cpu.fz = 0x1 if cpu.b == 0 else 0x0
cpu.fn = 0x1
cpu.fh = 0x1 if cpu.b > 256 else 0x0
def LD_B_D8(cpu, d):
""" 0x06 2b 8c - - - - """
cpu.b = d
def RLCA(cpu):
""" 0x07 1b 4c 0 0 0 C """
# Rotate C and put the 7th bit in reg A
pass
def LD_A16_SP(cpu):
""" 0x08 3b 20c - - - - """
cpu.a = cpu.sp
def ADD_HL_BC(cpu):
""" 0x09 1b 8c - 0 H C """
cpu.set_hl(cpu.bc)
cpu.fn = 0x0
cpu.fh = 0x1 if cpu.hl > 256 else 0x0
#cpu.fc = ?
def LD_A_BC(cpu):
""" 0x0A 1b 8c - - - - """
cpu.a = cpu.bc
def DEC_BC(cpu):
""" 0x0b 1b 8c - - - -"""
cpu.set_bc(cpu.get_bc()-1)
def INC_C(cpu):
""" 0x0c 1b 4c Z 0 H - """
cpu.c = cpu.c+1
# set flags
| true
|
1cb152007c50a4544b223761dd7abe8b2032d927
|
Python
|
2021Anson2016/tensorflow_note
|
/tf_ex17_Train Model_v2.py
|
UTF-8
| 7,474
| 2.53125
| 3
|
[] |
no_license
|
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
import math
import random
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def LoadDataFileFolder_gray(path, total):
files = [f for f in os.listdir(path)]
img_list = []
label_list = []
count = 0
for filename in files:
if count > total and total > 0:
break
f = filename.split(".")
if len(f) == 2 and f[1].strip() == "jpg":
if filename[0] == 'a':
label_list.append([1, 0, 0])
elif filename[0] == 'b':
label_list.append([0, 1, 0])
else:
label_list.append([0, 0, 1])
img = np.asarray(Image.open(path + "/" + filename))
gray = rgb2gray(img).tolist()
img_list.append(img)
count += 1
img_list = np.asarray(img_list)
label_list = np.asarray(label_list)
return img_list, label_list
def LoadDataFileFolder_RGB(path, total):
files = [f for f in os.listdir(path)]
img_list = []
label_list = []
count = 0
for filename in files:
if count > total and total > 0:
break
f = filename.split(".")
if len(f) == 2 and f[1].strip() == "jpg":
if filename[0] == 'a':
label_list.append([1, 0, 0])
elif filename[0] == 'b':
label_list.append([0, 1, 0])
else:
label_list.append([0, 0, 1])
img = np.asarray(Image.open(path + "/" + filename))
gray = rgb2gray(img).tolist()
img_list.append(gray)
count += 1
img_list = np.asarray(img_list)
label_list = np.asarray(label_list)
return img_list, label_list
path_train = "training"
path_test = "my_testing"
imgs_train, labels_train = LoadDataFileFolder_gray(path_train, -1)
imgs_test, labels_test = LoadDataFileFolder_RGB(path_test, -1)
batch_size = 128
def next_batch(imgs, labels, size):
id_samp = np.ndarray(shape=(size), dtype=np.int32)
img_samp = np.ndarray(shape=(size, imgs.shape[1], imgs.shape[2]))
label_samp = np.ndarray(shape=(size, labels.shape[1]))
for i in range(size):
r = random.randint(0, imgs.shape[0] - 1)
img_samp[i] = imgs[r]
label_samp[i] = labels[r]
id_samp[i] = r
return [img_samp, label_samp]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape): # 通常bias用正值
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W, strides): # x:輸入的數值、圖片值,W:權重
# Must have strides[0] = strides[3] = 1 , strides = [1, stride, stride, 1]
# strides = [1, x_movement, y_movement, 1]
return tf.nn.conv2d(x, W, strides=strides, padding='SAME')
def max_pool_2X2(x):
# Must have strides[0] = strides[3] = 1
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [batch_size, 64, 64], name='x_input') # 64X64,xs 是所有圖片例子
ys = tf.placeholder(tf.float32, [batch_size, 3], name='y_input') # 3:label size
# keep_prob = tf.placeholder(tf.float32)
xs_re = tf.reshape(xs, [batch_size, 64, 64, 1])
## conv1 + max pooling layer ##
W_conv1 = weight_variable([5, 5, 1, 32]) # patch 5X5, in size 1 是圖片厚度, out size 32是輸出高度
b_conv1 = bias_variable([32]) # 對應輸出厚度 32
# conv2d(x_image, W_conv1) + b_conv1 與之前類似
h_conv1 = tf.nn.relu(conv2d(xs_re, W_conv1, [1, 1, 1, 1]) + b_conv1) # output size 64x64x32
h_pool1 = max_pool_2X2(h_conv1) # output size 32x32x32
## conv2 + max pooling layer ##
W_conv2 = weight_variable([5, 5, 32, 64]) # patch 5X5, in size 32 是圖片厚度, out size 64是輸出高度
b_conv2 = bias_variable([64]) # 對應輸出厚度 64
# conv2d(x_image, W_conv1) + b_conv1 與之前類似
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, [1, 1, 1, 1]) + b_conv2) # output size 32x32x64
h_pool2 = max_pool_2X2(h_conv2) # output size 16x16x64
## fc1 layer ##
W_fc1 = weight_variable([16 * 16 * 64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 7,7,64] ->> [n_samples,7*7*64 ]
h_pool2_flat = tf.reshape(h_pool2, [batch_size, 16 * 16 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# avoid overfitting
h_fc1_drop = tf.nn.dropout(h_fc1, 1)
## fc2 layer ##
W_fc2 = weight_variable([1024, 3])
b_fc2 = bias_variable([3])
## ----------------------------------------------------------------------------------------------
# prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
prediction = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_prob = tf.nn.sigmoid(prediction)
# # avoid overfitting
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# the error between prediction and real data
# cross_entropy = tf.reduce_mean(tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1])) # loss
cross_entropy = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=prediction, targets=ys), 1))
tf.summary.scalar('loss', cross_entropy)
# solver = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# 巨大系統用AdamOptimizer比GradientDescent好
solver = tf.train.AdamOptimizer().minimize(cross_entropy)
# 初始化所有的op
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# summary writer goes in here
train_writer = tf.summary.FileWriter("save/train_graph/", sess.graph)
test_writer = tf.summary.FileWriter("save/test_graph/", sess.graph)
if not os.path.exists('save/'):
os.makedirs('save/')
isTrain = True
# if isTrain == True:
# saver = tf.train.Saver()
# saver.restore(sess, "save/model.ckpt")
AccNum = 0
for it in range(5000):
if isTrain: # training
img_batch, label_batch = next_batch(imgs_train, labels_train, batch_size)
_, loss_ = sess.run([solver, cross_entropy], feed_dict={xs: img_batch, ys: label_batch})
if it % 50 == 0: # testing
img_batch, label_batch = next_batch(imgs_test, labels_test, batch_size)
y__, loss_ = sess.run([y_prob, cross_entropy], feed_dict={xs: img_batch, ys: label_batch})
print("Testing step: " + str(it) + " " + str(loss_))
print(y__[0])
print(label_batch[0])
AccNum = 0
for num in range(0, 128, 1):
if (y__[num] == label_batch[num]).all():
# print("same")
AccNum = AccNum + 1
print(AccNum)
print("準確%", (AccNum / 128) * 100)
saveName = "model.ckpt"
saver = tf.train.Saver()
save_path = saver.save(sess, "save/" + saveName)
print("训练完成!")
print("保存模型成功!")
print("Model saved in file: %s" % save_path)
| true
|
400e1115d17abac56066840c72596a1e618d4ece
|
Python
|
mayaraarrudaeng/preprocessamentodadosCAWM
|
/calcula_dist_estacoes.py
|
UTF-8
| 1,946
| 2.71875
| 3
|
[] |
no_license
|
import pandas as pd
from datetime import date
import utm
import calendar
import os
from scipy.spatial.distance import squareform, pdist
nome_arquivo = "estacoes.csv"
# diretorio com arquivo
diretorio_estacoes = 'dados'
# diretorio para salvar a matriz gerada
diretorio_resultados = 'resultados'
diretorio_distancias = 'resultados/dist_estacoes'
dados = pd.read_csv(diretorio_estacoes+'/'+nome_arquivo, delimiter=',', decimal='.')
dados = dados[ [ 'Codigo' , 'Latitude' , 'Longitude'] ]
print('dados.size', dados.shape[0])
for i in range(dados.shape[0]):
resultado = utm.from_latlon(dados.iloc[i].Latitude, dados.iloc[i].Longitude)
dados.iloc[i] = [dados.iloc[i].Codigo,resultado[0],resultado[1]]
#print(dados.iloc[i].Codigo, dados.iloc[i].Latitude, dados.iloc[i].Longitude)
dados = dados.rename(index=str, columns={"Codigo" : "Código", "Latitude": "x", "Longitude": "y"})
lista_estacoes = dados['Código'].unique()
#convertendo todos os codigos das estações de float para int
lista_estacoes = list(map(int, lista_estacoes))
dist_matrix = pd.DataFrame(squareform(pdist(dados.iloc[:, 1:]) ), columns=lista_estacoes, index=lista_estacoes )
dist_matrix.to_csv(diretorio_resultados+'/dist_matriz.csv',decimal='.')
matriz_estacoes_proximas = pd.DataFrame()
for estacao in lista_estacoes:
estacao = int(estacao)
#seleciona coluna da estacao alvo
dados_selecionados = dist_matrix[estacao]
dados_selecionados = pd.DataFrame(dados_selecionados)
#ordenar ascendente pela coluna da estacao alvo
dados_ordenados = dados_selecionados.sort_values(by=estacao, ascending=True)
# seleciona as estações com distancia maior que 1 metro
dados_ordenados = dados_ordenados[dados_ordenados[estacao] > 1]
estacoes_proximas = dados_ordenados.iloc[ : , :]
estacoes_proximas.to_csv(diretorio_distancias +'/'+str(estacao)+'.csv', decimal='.')
estacoes_proximas.to_csv(diretorio_distancias+'/completo.csv', decimal='.', mode='a')
| true
|
8cc8b5d973cdd586e1198f28d3f5486c645b99c7
|
Python
|
betty29/code-1
|
/recipes/Python/577588_Clear_screen_beep_various/recipe-577588.py
|
UTF-8
| 3,329
| 2.90625
| 3
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
# Clear-screen and error beep module for various platforms.
# ---------------------------------------------------------
#
# File saved as "clsbeep.py" and placed into the Python - Lib drawer or
# where-ever the modules are located.
#
# Setting up a basic error beep and clear screen for Python 1.4 and greater.
# (Original idea copyright, (C)2002, B.Walker, G0LCU.)
#
# Issued as Public Domain and you can do with it as you please.
#
# Tested on Python 1.4.x for a stock AMIGA 1200 and Python 2.0.x for WinUAE.
# Tested on Python 2.4.2 for Windows ME and Python 2.6.2 for XP-SP2.
# (Now changed to include Windows Vista, [and Windows 7?], to Python 2.7.x)
# Tested on Python 2.5.2 for PCLinuxOS 2009, Knoppix 5.1.1 and Python 2.6.6
# on Debian 6.0.0...
# All platforms in CLI/Command-Prompt/Terminal mode.
#
# It is SO easy to convert to Python 3.x that I have not bothered. I`ll leave
# you guys to work that one out... :)
#
# ----------------------
# Usage in other files:-
# >>> import clsbeep[RETURN/ENTER]
# ----------------------
# Called as:-
# clsbeep.beep()
# clsbeep.cls()
# clsbeep.both()
# ----------------------
# The ~if~ statement selects the correct format for the platform in use.
# ----------------------
# Import necessary modules for this to work.
import os
import sys
# Generate a beep when called.
def beep():
# A stock AMIGA 1200 using Python 1.4 or greater.
# This assumes that the sound is enabled in the PREFS: drawer.
# AND/OR the screen flash is enabled also.
if sys.platform=='amiga':
print '\a\v'
# MS Windows (TM), from Windows ME upwards. Used in Command
# Prompt mode for best effect.
# The *.WAV file can be anything of your choice.
# CHORD.WAV was the default.
# SNDREC32.EXE no longer exists in WIndows Vista, and higher?
if sys.platform=='win32':
# os.system('SNDREC32.EXE "C:\WINDOWS\MEDIA\CHORD.WAV" /EMBEDDING /PLAY /CLOSE')
print chr(7),
# A generic error beep for all Linux platforms.
# There is a simple way to change the frequency, and the amplitude.
# This also works in a Linux terminal running a Python interpreter!
if sys.platform=='linux2':
audio=file('/dev/audio', 'wb')
count=0
while count<250:
beep=chr(63)+chr(63)+chr(63)+chr(63)
audio.write(beep)
beep=chr(0)+chr(0)+chr(0)+chr(0)
audio.write(beep)
count=count+1
audio.close()
# Add here for other OSs.
# Add here any peculiarities.
# if sys.platform=='some-platform':
# Do some sound error beep.
# Do a clear screen, with the limitations as shown.
def cls():
# A stock AMIGA 1200 using Python 1.4 or greater.
if sys.platform=='amiga':
print '\f',
# MS Windows (TM), from Windows ME upwards.
# This is for the Command Prompt version ONLY both windowed AND/OR
# screen modes.
if sys.platform=='win32':
print os.system("CLS"),chr(13)," ",chr(13),
# A generic version for all Linux platforms.
# For general console Python usage.
if sys.platform=='linux2':
print os.system("clear"),chr(13)," ",chr(13),
# Add here for other OSs.
# Peculiarities here.
# if sys.platform=='some-platform':
# Do some clear screen action...
# Do both if required.
def both():
beep()
cls()
# Module end...
# Enjoy finding simple solutions to often very difficult problems.
| true
|