blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
357de6310b875ed6cc7c7e139a610c970d7e663a | Python | JJayeee/CodingPractice | /BaekJoon/단계별로 풀어보기/Backtracking/15651_N과 M(3).py | UTF-8 | 218 | 2.78125 | 3 | [] | no_license | def sol(result, depth):
if depth == lenth:
print(result.strip())
else:
for x in range(1, num+1):
sol(result+str(x)+' ', depth + 1)
num, lenth = map(int, input().split())
sol('', 0) | true |
d5c1351e6c48c30195f4e3d42318c3109a03e99b | Python | DeltaEcho192/Glenwood_School_Code | /array_excersize.py | UTF-8 | 259 | 2.78125 | 3 | [] | no_license | nameArr = ['ant','tiger','lawrance','chris','godfree','anastacia','jazz']
i = 0
x = 0
lenarray = len(nameArr)
for i in range(lenarray):
lencheck = len(nameArr[i])
if lencheck > 6:
print(nameArr[i])
i = i + 1
| true |
1e413e84a055647b781fbafbb644b0ee544b9b5b | Python | tabegon/monster-fighter | /gui/test.py | UTF-8 | 1,101 | 3.140625 | 3 | [] | no_license | import pygame
from pygame.locals import *
pygame.init()
win = pygame.display.set_mode((400,500))
run = True
nb_cases_cote = 10
taille_case = min(win.get_size()) / nb_cases_cote # min renvoie la valeur minimale d'une liste, ici la dimension de la fenêtre
font = pygame.font.SysFont("",20)
while run:
win.fill((0,0,0))
events = pygame.event.get()
for event in events:
if event.type == QUIT:
run = False
for x in range(nb_cases_cote):
for y in range(nb_cases_cote): # on parcoure les 2 dimensions
pygame.draw.rect(win, [255]*3, [x*taille_case, y*taille_case, taille_case, taille_case], 1) # dessin du rect avec la méthode que je t'avais marqué
lettre = font.render("ciao", True, [255]*3) # on crée la lettre
lettre_rect = lettre.get_rect() # je recupere le rect
lettre_rect.center = [x*taille_case + 1/2*taille_case, y*taille_case + 1/2*taille_case] # je place le centre du rect au milieu de la case
win.blit( lettre , lettre_rect ) # on blit le tout
pygame.display.flip() | true |
7d05d3104452b15a28ff4219ce1559533885f517 | Python | klich1984/holbertonschool-higher_level_programming | /0x0A-python-inheritance/3-is_kind_of_class.py | UTF-8 | 496 | 4 | 4 | [] | no_license | #!/usr/bin/python3
""" function that check if the object
is an instance of, or if the object is an instance
of a class that inherited from, the specified class
"""
def is_kind_of_class(obj, a_class):
"""check if obj is an instance of a_class
Args:
obj (object): [description]
a_class (class): [description]
Return: True in case exit o otherwise False
"""
if issubclass(type(obj), a_class) is True:
return True
else:
return False
| true |
28eb0371e876c9bcd199909178d324df550964d3 | Python | dougsoa/Cursoemvideo | /teste 34.py | UTF-8 | 370 | 4.21875 | 4 | [] | no_license | salario = float(input('Qual é o salário do funcinário? R$'))
# Salários acima R$1.250,00 calculado aumento de 10%
# Salários abaixo ou igual a R$1.250,00 calculado aumento de 15%
if salario <= 1250:
novo = salario + (salario * 15 / 100)
else:
novo = salario + (salario * 10 / 100)
print('Quem ganhava R${:.2f} passa a ganhar R${:.2f}'.format(salario,novo))
| true |
568e4ef09db4b35a391accf76225f09ba31202b4 | Python | D3f0/txscada | /src/pysmve/nguru/apps/mara/tests/test_dsl.py | UTF-8 | 4,662 | 2.578125 | 3 | [] | no_license | '''
This file tests the DSL for changing attribute
+----------------------+ +------------------------+ +--------------------+
| Formula |+->| SVGElement | | SVGScreen |
|----------------------|| |------------------------| |--------------------|
| last_error || | tag | | name |
| formula || | fill | | file |
| target (FK)+----------+ | stroke | | |
+----------------------+ | text | | |
| screen (FK)+------------>| |
+------------------------+ +--------------------+
'''
from django_fasttest import TestCase
from factories import (ProfileFactory, COMasterFactory, IEDFactory, DIFactory, AIFactory,
SVGScreenFactory, SVGElementFactory, FormulaFactory)
from apps.hmi.models import Formula
from bunch import bunchify
class BasicFormulaDSLTestCase(TestCase):
def setUp(self):
self.profile = ProfileFactory()
self.co_master = COMasterFactory(profile=self.profile)
self.ied = IEDFactory(co_master=self.co_master)
self.di0 = DIFactory(ied=self.ied, port=0, bit=0, tag='E4DI00', value=0)
self.ai0 = AIFactory(ied=self.ied, tag='E4AI00', value=0)
self.screen = SVGScreenFactory(profile=self.profile, name='E4', prefix='E4')
self.element = SVGElementFactory(screen=self.screen, tag='E4EG00',
text='',
fill=0,
stroke=0)
class TestContext(BasicFormulaDSLTestCase):
def setUp(self):
super(TestContext, self).setUp()
self.formula_fill = FormulaFactory(target=self.element,
attribute='fill',
formula='SI(di.E4DI00.value,1,2)')
def test_formula_full_context(self):
ctx = Formula.full_context()
self.assertEqual(len(ctx.di), 1, "There should be only E4DI00 in DI context")
self.assertEqual(len(ctx.ai), 1, "There should be only E4AI00 in AI context")
self.assertEqual(len(ctx.eg), 1, "There should be only E4EG00 in EG context")
def test_formula_get_related(self):
related = self.formula_fill.get_related()
self.assertIn('di', related)
self.assertIn('E4DI00', related['di'])
self.assertNotIn('ai', related)
self.assertNotIn('eg', related)
def test_formula_context(self):
context = self.formula_fill.context()
self.assertEqual(len(context.di), 1)
self.assertIn('E4DI00', context['di'])
self.assertFalse(context.ai)
self.assertFalse(context.eg)
def test_overrides_existing_value(self):
context = {
'di': {
'E4DI00': {
'value': 0
}
}
}
context = bunchify(context)
Formula._patch_context(context, {'di.E4DI00.value': 1})
self.assertEqual(context.di.E4DI00.value, 1, "Context should be updted")
def test_overrides_non_existing_tag(self):
context = {
'di': {
'E4DI00': {
'value': 0
}
}
}
context = bunchify(context)
Formula._patch_context(context, {'di.E4DI01.value': 1})
self.assertEqual(context.di.E4DI00.value, 0)
self.assertEqual(context.di.E4DI01.value, 1)
class TestDSLCaheIsDroppedAfterFormulaIsUpdated(BasicFormulaDSLTestCase):
def setUp(self):
super(TestDSLCaheIsDroppedAfterFormulaIsUpdated, self).setUp()
self.formula = FormulaFactory(target=self.element, attribute='fill', formula='1')
def test_cache_is_dropped(self):
init_prepeared_formula = self.formula.prepeared_formula
self.formula.formula = '2'
self.formula.save()
self.assertNotEqual(init_prepeared_formula, self.formula.prepeared_formula,
"Cache was not cleared")
class TestDSLOperations(BasicFormulaDSLTestCase):
def setUp(self):
super(TestDSLOperations, self).setUp()
self.formula_IF = FormulaFactory(target=self.element,
attribute='fill',
formula='SI(di.E4DI00.value,1,2)')
def test_operation_si(self):
ok = self.formula_IF.evaluate()
self.assertTrue(ok, "Formula %s did not validate" % self.formula_IF.formula)
| true |
0f8ccf0f2bb2284f0a0407550eac864338e0979d | Python | jgarte/friends-omg | /src/app/plotting.py | UTF-8 | 1,043 | 2.890625 | 3 | [] | no_license | """Utilities to produce plots using eplots."""
from math import sqrt
def normal_approximation_interval(k: int, n: int, z: float = 1.96) -> float:
"""Compute the normal approximation for the interval around a binomial P."""
if min(k, n) <= 0:
return None
p = k / n
return z * sqrt((p * (1 - p)) / n)
def make_bar_chart(data: dict) -> dict:
"""Prep a plotly bar chart payload."""
names = [i.title() for i in data.keys()]
ps = [v["p"] for k, v in data.items()]
ci = [normal_approximation_interval(v["k"], v["n"]) for k, v in data.items()]
return dict(
data=[
dict(
x=names,
y=ps,
error_y=dict(type="data", array=ci, visible=True),
type="bar",
)
],
layout={
"showlegend": False,
"margin": dict(t=0, b=70, r=60, l=60),
"yaxis": {"tickformat": ",.3%", "range": [0, None]},
},
config={"displayModeBar": False, "responsive": True},
)
| true |
8814a7966d1944232936ca96510337b8ed6a1126 | Python | nurdanay/sentimentanalysis | /sentiment_analysis/get_ftse_value_test.py | UTF-8 | 196 | 2.796875 | 3 | [] | no_license | import pytest
from get_ftse_value import get_ftse_value
def test_get_ftse_value():
ftse_value = get_ftse_value()
# check we got the ftse value as a float
assert isinstance(ftse_value, float)
| true |
d489b1e43f6c337e1abcd6ec383a554e42867f68 | Python | Roshgar/SmallVersionningPythonScript | /srcs/utils.py | UTF-8 | 6,955 | 3.046875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
import os
import time
import parse
import argparse
import shutil
import re
defaultFolderPath = "./projectFolder/"
archivePath = "./Archives/"
# Class used to take care of the versionning information
class versionHeader:
# All commits excluding overwrites.
versions = 1
# Commits which used default folder name (VERSION_X) with X being the value of defaultVersions
defaultVersions = 1
# Number of commits
commits = 1
# Number of times a user asked for the files of this project
pulls = 0
# Current version name
currentVers = "VERSION_1"
# This function takes as first parameter, a list with the first three lines of the .versFile : The Header.
# It will return a versionHeader object with all the file information included in the header.
def initFromFile(self, tab):
versLine = tab[0]
changeLine = tab[1]
currentVers = tab[2]
# Split on semicolons to get all information
tab = versLine.split(':')
# Fairly explicit
self.versions = int(tab[1])
self.defaultVersions = int(tab[3])
tab2 = changeLine.split(':')
self.commits = int(tab2[1])
self.pulls = int(tab2[3])
# Just removes the last character of the string which would otherwise be a carriage return.
self.currentVers = currentVers[:-1]
# Pretty much doest what it ways. Serializes the object into a string.
def serialize(self):
return ("Versions:"+str(self.versions)+":DefaultVersions:"+str(self.defaultVersions)+
"\nCommits:"+str(self.commits)+":pulls:"+str(self.pulls)+"\n"+self.currentVers+"\n")
#Class used to represent one commit.
class commitObj:
def __init__(self, tab):
self.info = tab.pop(0)
self.message = ""
for line in tab:
self.message += line
def getVers(self):
tmp = self.info.split(':')
return (tmp[1])
def __str__(self):
return ("commitObj with info : " + self.info + " and message : " + self.message)
def serialize(self):
return ("<commit>\n\t" + self.info + "\n\t" + self.message + "\n</commit>\n")
'''
Parses the header information from the file .versFile,
located in the project specified by the user
'''
def getCommitObjs(tab):
commitObjs = [];
tmpTab = []
for i in tab:
if (i == "</commit>\n"):
commitObjs.append(commitObj(tmpTab))
tmpTab = []
elif (i != "<commit>\n"):
tmpTab.append(i.strip())
return (commitObjs)
# Does what it says using shutil functionnalities.
# os.path.basename just gets the last folder. (i.e `os.path.basename("hello/foo/bar")` would return "bar")
def copyAll(dest, srcs):
for toCopy in srcs:
if (os.path.isfile(toCopy)):
shutil.copy2(toCopy, dest)
else:
shutil.copytree(toCopy, dest + "/" + os.path.basename(toCopy))
#Retrieves the header information, updates it, and commits it.
def getHeaderAndCommit(path):
if (os.path.isfile(path)):
myFile = open(path)
else:
print("From getHeaderAndCommit : [" + path + "] does not exists or is not a file.")
return (None, None)
header = versionHeader()
lines = myFile.readlines()
headerLines = []
for i in range(0, 3):
headerLines.append(lines.pop(0))
header.initFromFile(headerLines)
commitObjs = getCommitObjs(lines)
myFile.close()
return (header, commitObjs)
'''
Self explanatory. But it checks if the user is asking for an overwrite or specified
a version. This allows to get the actual name of the version being pushed.
'''
def getVersionName(args, header):
if (args.version):
return (args.version)
else:
return ("VERSION_" + str(header.defaultVersions + 1))
def createCommitObj(projectName, args, versionName):
tab = []
date = time.asctime( time.localtime(time.time()) )
tab.append(args.user + ':' + versionName + ':' + date)
tab.append(str(args.message))
return (commitObj(tab))
'''
Creates the string for a given commit. in form :
<commit>
USER:VERSIONNAME:DATE
COMMIT_MESSAGE
</commit>
'''
def countVersions(path):
cnt = 0
versionNames = []
for f in os.listdir(path):
if (os.path.isdir(os.path.join(path, f))):
versionNames.append(f)
cnt += 1
return (cnt, versionNames)
def commitLog(projectName, args, versionName):
date = time.asctime( time.localtime(time.time()) )
return ("<commit>\n\t" + args.user + ":" + versionName + ":" + date + "\n\t" +
str(args.message) + "\n</commit>\n")
'''
Sets up the .versFile on first commit of a project.
Creates a header and then writes everything to the file.
'''
def setUpFiles(projectName, args, currPath):
# Object representing the "Header" of the .versFile
header = versionHeader()
# If a version was specified
if (args.version):
# set that version as currentVersion and, since this is a first time for a project,
# defaultVersions is set to 0
header.currentVers = args.version
header.defaultVersions = 0
# Just sets the correct
version = header.currentVers
# Opening the versFile to update it with the newly created header.
versFile = open(currPath + projectName + "/" + "versFile", 'w')
# Write the header object in a string format ( returned by header.serialize() ) to the .versFile
versFile.write(header.serialize())
# Write the commit information to the .versFile Seeing as it is a new version, just a call to commitLog is sufficient.
versFile.write(commitLog(projectName, args, version))
# Not explaining this. :)
versFile.close()
return (version)
def setLock(path, user, lock):
#print(path)
lockFile = open(path + '/.lockFile', 'w+')
#print("lockFile = " + str(lockFile))
#print("User = [" + user + "]")
#print(lockFile)
if (lock):
#print("Wrote to file.")
ret = lockFile.write("locked:" + user)
#lockFile.write("IM A HIPPO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
#print(ret)
else:
lockFile.write("unlocked:" + user)
lockFile.write("NOOOOO")
lockFile.close()
'''
Simple function, opens .lockFile from the requested project, verifies if a user previously checked out the file(s) and returns :
- True, userName IF it is 'locked'.
- False, "" IF EITHER the project isn't 'locked' or if it's the user who 'locked' that is requesting the files.
'''
def lockCheck(path, user):
if not (os.path.exists(path + '/.lockFile')):
if (os.path.exists(path)):
tmpFile = open(path + '/.lockFile', 'w+')
tmpFile.write("unlocked:" + user)
tmpFile.close()
print ("The lockFile was previously deleted. Restored.")
else:
print ("!! An error occured. Currently in lockCheck (utils.py Line 185) with no path to specified project. Something went terribly wrong previously. If this message is showing up there is a real problem. !!")
return (False, user)
lockFile = open(path + '/.lockFile', 'r+')
line = lockFile.readline()
line.strip()
lockFile.close()
info = line.split(':')
locked, lockUser = info[0], info[1]
#print("Locked = " + locked)
if (locked == "locked"):
if (user == lockUser):
#print ("In user == lockUser with user : " + user + " and lockUser : " + lockUser)
return False, lockUser
else:
return True, lockUser
return False, ""
| true |
753dc2aea601d8498804bfcc688222115572af55 | Python | dresen/praat | /scripts/Tier.py | UTF-8 | 6,224 | 2.921875 | 3 | [
"MIT"
] | permissive | from Interval import Interval
import sys
class Tier(object):
"""
A class for a Praat Tier. The class supports extraction of new tiers from
existing Tiers and adding new Tiers from extracted Praat object as well as
transforms on the Interval objects that are stored in a Tier object. Also
implements a wrapper for Mace, to compute inter-annotator agreement etc."""
def __init__(self, xmin, xmax, size, nid):
super(Tier, self).__init__()
self.xmin = xmin
self.xmax = xmax
self.size = size
self.intervals = []
self.id = nid
self.typefreq = {}
self.updateSize()
self.competence = False
def __str__(self):
print("ID: ", self.id)
print("Grid start: ", self.xmin)
print("Grid end: ", self.xmax)
print("Number of intervals: ", self.size)
print("Current number of Intervals: ", self.currentsize)
print("Type count: ", len(self.typefreq))
if self.competence:
print("Mace competence: ", self.competence)
return ''
def __getitem__(self, key):
return self.intervals[key]
def addInterval(self, interval):
"""Adds an Interval, adds an id and updates the Tier statistics"""
self.intervals.append(interval)
interval.id = len(self.intervals) - 1
self.updateSize()
if type(interval.text) == str:
self.typefreq[interval.text] = self.typefreq.get(interval.text, 0) + 1
def resize(self, newIntervals):
"""Updates Tier statistics if new Interval objects replace the existing
Interval objects."""
self.intervals = []
for i in newIntervals:
self.addInterval(i)
self.size = len(self.intervals)
def updateSize(self):
"""Updates the size of the Tier"""
self.currentsize = len(self.intervals)
try:
assert self.currentsize <= self.size
except AssertionError:
print(self.currentsize)
print(self.size)
sys.exit('[', sys.arg[0] + ']: Size problem')
def printGrid(self, filehandle):
"""Print function called by a TextGrid object to output a complete TextGrid
Calls a similar functions on all Interval objects stored in the Tier."""
header = ['class = "IntervalTier"',
'name = ' + self.id,
'xmin = ' + str(self.xmin),
'xmax = ' + str(self.xmax),
'intervals: size = ' + str(self.size)
]
filehandle.write('\n'.join([" " * 8 + x for x in header]) + '\n')
for n, i in enumerate(self.intervals):
filehandle.write(' ' * 8 + 'intervals [' + str(n + 1) + ']:\n')
i.printGrid(filehandle)
def timedInterval(self, start, end=False):
"""Returns the interval at the specified time. The time boundaries will
rarely exactly match, so choose the minimum distance one. It is also
possible to give a time frame and be returned a larger set of Interval
objects."""
assert type(start) == float
interval1 = min(
enumerate(self.intervals), key=lambda x: abs(x[1].xmin - start))
if end:
assert type(end) == float
interval2 = self.timedInterval(end)
else:
interval2 = interval1
return (interval1[0], interval2[0] + 1)
def timedAnnotation(self, time):
"""Returns the annotation at of a tier at that time"""
assert type(time) == float
tgtInterval = min(
enumerate(self.intervals), key=lambda x: abs(x[1].xmin - time))
return tgtInterval.text
def thresholdInterval(self, threshold, interval):
"""Compares an Interval object with numeric annotation with a
threshold"""
assert type(threshold) == float
try:
return float(interval.text.strip('"')) > threshold
except:
sys.exit("thresholdInterval(): Unable to compare " +
interval.text + " and " + str(threshold))
def splitText(self, newTiername, glueleft, glueright):
newIntervals = []
for i in self.intervals:
if i.text.strip() == '""' or i.viewable == False:
# if viewable is False, i.text likely contains a list or array
newIntervals.append(i.copy())
continue
letters = [x for x in i.text[1:-1]]
newtext = []
try:
first = letters.pop(0)
except IndexError:
# Some thing in the formatting of the annotation is wrong
newIntervals.append(i.copy(replace='""'))
continue
if first in glueright:
first = first + letters.pop(0)
mente = ''
for l in letters:
if mente != '':
# prefix current letter
first = mente + l
mente = ''
elif l in glueleft:
# Append to first
first += l
elif l in glueright:
# If $l is a prefix
mente = l
newtext.append(first)
first = l
else:
# Base case
newtext.append(first)
first = l
newtext.append(first)
intervalDuration = i.xmax - i.xmin
if len(newtext) <= 1:
newIntervals.append(i.copy(replace='"' + newtext[0] + '"'))
continue
else:
halfPhoneCount = len(newtext) * 2 + letters.count(
':') + letters.count('ː')
halfPhoneDuration = intervalDuration / halfPhoneCount
xmin = i.xmin
for phone in newtext:
xmax = xmin + halfPhoneDuration * 2
if ':' in phone or 'ː' in phone:
xmax += halfPhoneDuration
newIntervals.append(Interval(xmin, xmax, '"' + phone + '"'))
xmin = xmax
return newIntervals
| true |
2b834aef2c79fc881e5be8c603576c34666cfac3 | Python | NoahRottman/Shrimpy-Balancer | /portfolio-balancer.py | UTF-8 | 2,587 | 3.21875 | 3 | [] | no_license | from scipy.optimize import minimize
import numpy as np
np.seterr(divide='ignore', invalid='ignore') # Log of 0 may be encountered
class PortfolioManager(object):
def __init__(self, coins):
"""
Initialize instance of the PortfolioManager.
Parameters
----------
coins: list of strings
The ticker symbols for coins to be managered in portfolio
"""
self.coins = coins
self.n_coins = len(coins)
self.portfolio_allocation = None
self.p = None
self.r = None
def optimize_portfolio(self):
"""
Compute the ``allocation``, the optimal control action when given predicted
return rates ``p`` and corresponding confidence values ``r``.
Parameters
----------
r: np.array of shape (1, n_coins)
The predicted return rates for the next time period.
p: np.array of shape (1, n_coins)
The confidence level for each return rate prediction.
Returns
-------
success: bool
Weather or not a maximizing prediciton was found
allocation: np.array of shape (1, n_coins)
The optimal distribution of
"""
# Fund allocation must sum to 1
cons = ({'type': 'eq',
'fun' : lambda x: np.sum(x) - 1
})
n_coins = r.shape[1]
# Initialize allocation to be uniform amungst n_coins
x_0 = [1/n_coins]*n_coins
# Call minimization module on negated function to obtain maximization
res = minimize(fun=func, x0=x_0, constraints=cons)
# Return success and optimal array
return res['success'], res['x']
def func(self, x):
"""
Compute the (negative) expected reward given portfolio allocation ``x``.
Parameters
----------
x: np.array of shape (1, n_coins)
The allocation of portfolio value.
Returns
-------
y: float
The negated expected reward.
"""
return -np.sum(self.p*np.log(100*x*(1+self.r)))
def set_predicted_return_rates(self, r, p):
"""
Set parameters ``r``, the return rates, and ``p``, the confidence scores
for each predicted return rate.
Parameters
----------
r: np.array of shape (1, n_coins)
The predicted return rates for the next time period.
p: np.array of shape (1, n_coins)
The confidence level for each return rate prediction.
"""
self.r = r
self.p = p
| true |
65b173752d66db7c167aad9e78333a8328fbf751 | Python | Tomos-Evans/garrison | /test/apis/ingredients.py | UTF-8 | 2,860 | 2.546875 | 3 | [
"MIT"
] | permissive | from test.apis import ApiTestCase
from app.models.drinks import Ingredient
class TestGet(ApiTestCase):
def setUp(self):
super().setUp()
Ingredient.from_params('vodka', True, 40)
Ingredient.from_params('gin', True, 35)
Ingredient.from_params('orange juice', False)
def test_get_all(self):
response = self.client.get('/api/ingredients/')
target = {
'ingredients': [
{
'location': '/api/ingredients/'+Ingredient.query.all()[0].ref,
'ref': Ingredient.query.all()[0].ref,
'name': 'vodka',
'alcoholic': True,
'abs': 40,
},
{
'location': '/api/ingredients/'+Ingredient.query.all()[1].ref,
'ref': Ingredient.query.all()[1].ref,
'name': 'gin',
'alcoholic': True,
'abs': 35,
},
{
'location': '/api/ingredients/'+Ingredient.query.all()[2].ref,
'ref': Ingredient.query.all()[2].ref,
'name': 'orange juice',
'alcoholic': False,
'abs': None,
},
]
}
self.assertEqual(response.json, target)
def test_get_one_that_exists(self):
ref = Ingredient.query.all()[0].ref
response = self.client.get('/api/ingredients/'+ref)
target = {
'location': '/api/ingredients/'+ref,
'ref': ref,
'name': 'vodka',
'alcoholic': True,
'abs': 40,
}
self.assertEqual(response.json, target)
def test_one_doesnt_exist(self):
response = self.client.get('/api/ingredients/not-a-ref')
self.assertEqual(response.status_code, 404)
def test_ing_post(self):
a = {
'name': 'new ing',
'alcoholic': True,
'abs': 12
}
b = {
'name': 'new ing2',
'alcoholic': False
}
response = self.client.post('/api/ingredients/', json=a)
self.assertEqual(response.status_code, 201)
response = self.client.post('/api/ingredients/', json=b)
self.assertEqual(response.status_code, 201)
def test_abs_requirement(self):
a = {
'name': 'new ing',
'alcoholic': True
}
response = self.client.post('/api/ingredients/', json=a)
self.assertEqual(response.status_code, 400)
def test_ing_name_confilct(self):
a = {
'name': 'vodka',
'alcoholic': True,
'abs': 1
}
response = self.client.post('/api/ingredients/', json=a)
self.assertEqual(response.status_code, 409)
| true |
0795c93b5e7cb2275a35199e790416fbabd93115 | Python | CheckMateSergei/PandasTutorial | /PandasIntro.py | UTF-8 | 359 | 2.890625 | 3 | [] | no_license | #! /usr/bin/python3
import pandas as pd
import matplotlib.pyplot as plot
df = pd.read_csv("avocado.csv")
#print(df.head(3))
#print(df["AveragePrice"].head())
albany_df = df[ df['region'] == 'Albany' ]
#print(albany_df.head())
albany_df.set_index('Date', inplace=True, drop=True)
print(albany_df.head())
plot.imshow(albany_df['AveragePrice'])
plot.show()
| true |
b4431a058c01c9858e35a576fdc5e7d1de77540e | Python | Irwinlinker/Powerball | /powerball.py | UTF-8 | 5,691 | 4.3125 | 4 | [] | no_license | #Robert Marsh
#July 7, 2020
#Program simulates a lottery drawing
##The game allows the player to enter 3 regular numbers and a powerball number
##and the input is validated
##Regular numbers must be in the range 1 through 9
##Regular numbers must be unique, there can be no duplicates
##The powerball number must be in the range 1 through 3
##The program then generates a random drawing
##The player's ticket is compared against the random drawing and the winnings
##are calculated and printed
##The player can choose to play multiple times
##This program must use a list
##It must have a list of three numbers for the player, and a list of three
##numbers for the computer. It must also have an integer variable for the player's
##powerball number, and an integer variable for the computer's powerball number
import random
print("Welcome to the Powerball Lottery drawing!\n")
print("A player will either select 3 numbers and a Powerball number on their own or select a quick pick.")
play = None
while play != "n":
drawing = [] #empty drawing list
ticket = [] #empty ticket list used for manual entry or quickpick
dpowerball = None #drawing powerball
tpowerball = None #ticket powerball
matches = 0 #used for match check later
pbmatch = 0 #used for powerball match later
for i in range(3): #genreating random numbers for drawing
randnum = random.randint(1, 9)
while randnum in drawing:
randnum = random.randint(1, 9)
drawing.append(randnum)
dpowerball = random.randint(1, 3)
## print(drawing)
print("""
Please select an option below.
1) Select your own numbers
2) Quick pick selects randomly generated numbers
""")
#get user input for manual selection or quickpick
option = int(input("Please enter a number from the above list: "))
while option < 1 or option > 2: #validates entry
print("Invalid option!")
option = int(input("Please enter a number from the above list: "))
if option == 1: #user selects numbers
print("\nPlease pick 3 unique numbers from 1 - 9 then a powerball number between 1 - 3.")
for i in range(3): #player input loop and validation
num = int(input("\nEnter number " + str(i + 1) + ": "))
while num < 1 or num > 10 or num in ticket:
if num < 1 or num > 10:
num = int(input("\nEnter a number between 1 - 9 for #" + str(i + 1) + ": "))
elif num in ticket:
num = int(input("\nEnter a unique number for #" + str(i + 1) + ": "))
ticket.append(num)
tpowerball = int(input("\nEnter your powerball number: "))
while tpowerball < 1 or tpowerball > 3: #player pb input and validation
tpowerball = int(input("\nEnter your powerball number between 1 - 3: "))
for d in drawing: #drawing and ticket comparison
for t in ticket:
if d == t:
matches += 1
else:
pass
if dpowerball == tpowerball: #powerball comparison
pbmatch += 1
else:
pass
elif option == 2: #quickpick
for i in range(3): #loop for quickpick ticket and validation
randnum = random.randint(1, 9)
while randnum in ticket:
randnum = random.randint(1, 9)
ticket.append(randnum)
tpowerball = random.randint(1, 3)
for d in drawing: #drawing and ticket comparison
for t in ticket:
if d == t:
matches += 1
else:
pass
if dpowerball == tpowerball: #powerball comparison
pbmatch += 1
else:
pass
print("\nYour ticket numbers:", end=" ") #ticket print out
for i in range(3):
print(ticket[i], end=" ")
print("and PB:", tpowerball)
print("\nThe drawing numbers:", end= " ") #drawing print out
for i in range(3):
print(drawing[i], end=" ")
print("and PB:", dpowerball)
#checks for matches
if matches == 1 and pbmatch == 0:
print("\nYou matched 1 number, but not the powerball.")
print("You won $1!")
if matches == 1 and pbmatch == 1:
print("\nYou matched 1 number and the powerball!")
print("You won $2!")
if matches == 2 and pbmatch == 0:
print("\nYou matched 2 numbers, but not the powerball.")
print("You won $10!")
if matches == 2 and pbmatch == 1:
print("\nYou matched 2 number and the powerball!")
print("You won $20!")
if matches == 3 and pbmatch == 0:
print("\nYou matched 3 numbers, but not the powerball.")
print("You won $100!")
if matches == 3 and pbmatch == 1:
print("\nYou matched 3 numbers and the powerball!")
print("You won $1000!")
if matches == 0 and pbmatch == 1:
print("\nSorry, you lost.")
if matches == 0 and pbmatch == 0:
print("\nSorry, you lost.")
#asks if user wants to play again and validation
play = input("\nPlay again? (y/n): ")#.lower()
## while play not in ("y", "n"):
## play = input("\nPlay again? (y/n): ").lower()
#play = play.upper()
while play != "Y" and play != "N":
print("Invalid input!")
play = input("\nPlay again? (y/n): ")
play = play.upper()
input("\nPress Enter to exit")
| true |
cbac286867c970d8f8fecce47a042d1996930140 | Python | shananiki/spieldochmit | /1.0/CharacterSelectState.py | UTF-8 | 405 | 2.546875 | 3 | [] | no_license | from GameState import *
from SelectionRectangle import *
from Inventory import Inventory
from Interface import Interface
import pygame
class CharacterSelectState(GameState):
def __init__(self):
self.interface_list = []
def render(self, screen):
for interface in self.interface_list:
screen.blit(interface.get_image(), (interface.get_x_pos(), interface.get_y_pos()))
| true |
898e50ec48c52d05b0049d299274074cd7416e0d | Python | anmoldp7/SimpleWebCrawler | /CrackedCrawler.py | UTF-8 | 437 | 3.09375 | 3 | [] | no_license | import requests
import bs4
import re
import urllib.parse
def parse_title(title):
return ' '.join(urllib.parse.unquote(title, encoding="utf-8", errors="replace").split('+'))
url = "http://www.cracked.com/"
soup = bs4.BeautifulSoup(requests.get(url).text, 'html.parser')
x = soup.find_all('h3')
print('#' * 80)
for s in x:
if s.find('a') is not None:
p = s.find('a')
print(parse_title(p["title"]))
print('#' * 80) | true |
e8eb6946c74f21d1a8f203974f900090824bc8d1 | Python | AMYMEME/algorithm-study | /common/2021.08.10/maplejh_1516.py | UTF-8 | 1,024 | 3.125 | 3 | [] | no_license | # https://www.acmicpc.net/problem/1516
import sys
from collections import defaultdict, deque
N = int(sys.stdin.readline())
buildings = defaultdict(int) # 건물 짓는데 걸리는 시간
order = defaultdict(list) # 먼저: 나중
indegree = [0] * (N + 1) # 진입차수
q = deque() # 진입차수가 0인 노드
dp = [0] * (N + 1) # 먼저 지어져야 하는 건물들이 완성되는데 걸리는 시간
for i in range(1, N + 1):
temp = list(map(int, sys.stdin.readline().split()))
buildings[i] = temp[0]
for t in temp[1:-1]:
order[t].append(i)
indegree[i] = len(temp[1:-1])
if not indegree[i]:
q.append(i)
dp[i] = buildings[i]
while q:
pre_node = q.popleft()
for post_node in order[pre_node]:
indegree[post_node] -= 1
dp[post_node] = max(dp[post_node], buildings[pre_node])
if not indegree[post_node]:
buildings[post_node] += dp[post_node]
q.append(post_node)
for k in range (1, N + 1):
print(buildings[k])
| true |
8c7a4ba03ab1928ef199c1b6d34d9933323ce144 | Python | lyl617/SDN-TORME | /large-topo/Data/CDF.py | UTF-8 | 854 | 2.546875 | 3 | [] | no_license | import json
from collections import defaultdict
def read(path):
con=json.load(open(path))
return con
dict_of_load=defaultdict(int)
staticDyn_link_load=read("staticDyn_link_load.json")
staticDyn_linknumber=read("staticDyn_linknumber.json")
numberrate=0
for link in staticDyn_link_load["800"]:
linkload=staticDyn_link_load["800"][link]
linknumberrate=staticDyn_linknumber["800"][link]
numberrate+=linknumberrate
dict_of_load[linkload]+=linknumberrate
x=[]
y=[]
for (k,v) in dict_of_load.items():
x.append(k)
x.sort()
for i in x:
y.append(dict_of_load[i])
sum=0
final_y=[]
for i in range(len(y)):
sum+=y[i]
final_y.append(sum)
fd=open("cdf.txt",'a')
for i in range(len(x)):
fd.write(str(x[i])+' '+str(final_y[i])+'\n')
fd.close()
print final_y
#print staticDyn_linknumber["20000"] | true |
28362fe8287260732de181595c00facf998b7b69 | Python | Smookii/ParticlesEnvironnement | /particle.py | UTF-8 | 2,280 | 3.125 | 3 | [] | no_license | import random
class Particle():
def __init__(self, startpos, initspeed, col):
self.startpos = [startpos[0],startpos[1]]
self.pos = [startpos[0],startpos[1]]
scatterx = [-20,20]
scattery = [-18,8]
self.start_speed = [initspeed[0]*2 + random.uniform(scatterx[0],scatterx[1]),initspeed[1]*2 + random.uniform(scattery[0],scattery[1])]
self.actual_speed = list(self.start_speed)
self.gravity = [0,180]
self.col = col
self.time = [0,0]
self.bounce_ratio = [1,1.5]
self.out = False
def update_speed(self):
for i in range(0,2):
self.actual_speed[i] = self.start_speed[i] + self.gravity[i]*self.time[i]
def rebounce(self, max):
for i in range(0,2):
if self.pos[i] < 0:
self.startpos[i] = 0
self.start_speed[i] = -self.actual_speed[i]/2 * random.uniform(self.bounce_ratio[0],self.bounce_ratio[1])
self.time[i] = 0
if self.pos[i] > max[i]:
self.startpos[i] = max[i]
self.start_speed[i] = -self.actual_speed[i]/2 * random.uniform(self.bounce_ratio[0],self.bounce_ratio[1])
self.time[i] = 0
def color_by_y(self,ymax):
ratio_color = (self.pos[1] / ymax) * 255
if ratio_color < 0:
ratio_color = 0
if ratio_color > 255:
ratio_color = 255
self.col[0] = ratio_color
self.col[1] = 255-ratio_color
def movement(self):
for i in range(0,2):
self.pos[i] = self.startpos[i] + self.start_speed[i]*self.time[i]+(self.gravity[i]*self.time[i]*self.time[i]/2)
def update_time(self, delta):
for i in range(0,2):
self.time[i] += delta
def check_speed(self, ymax):
for i in range(0,2):
if self.actual_speed[i] > -3 and self.actual_speed[i] < 3 and self.pos[1] > ymax -5:
self.out = True
def update(self,delta, xmax, ymax):
self.update_time(delta)
self.update_speed()
self.rebounce(max=[xmax,ymax])
self.color_by_y(ymax)
self.movement()
self.check_speed(ymax)
| true |
4adfd9a5e3b922b5131c1ce182596ad844f5d035 | Python | siphera/tkinter-intro | /tkregister.py | UTF-8 | 1,191 | 3.015625 | 3 | [] | no_license | from tkinter import *
import tkinter
window = Tk()
window.geometry("300x250")
window.title("Register")
# window.configure(background="grey")
fields = {}
# Name
name_label = Label(window, text="Name")
name_field = Entry(window)
fields['name'] = name_field
name_label.grid(row=0, column=0)
name_field.grid(row=0, column=1)
# Surname
surname_label = Label(window, text="Surname")
surname_field = Entry(window)
fields['surname'] = name_field
surname_label.grid(row=1, column=0)
surname_field.grid(row=1, column=1)
# Email
email_label = Label(window, text="Email")
email_field = Entry(window)
fields['email'] = name_field
email_label.grid(row=2, column=0)
email_field.grid(row=2, column=1)
# Contact number
contact_label = Label(window, text="Contact number")
contact_field = Entry(window)
fields['contact'] = name_field
contact_label.grid(row=3, column=0)
contact_field.grid(row=3, column=1)
# Submit button functionality
def submit_command():
output = "User data:\n"
for key in fields.keys():
output += f"{key}: {fields[key].get()}\n"
print(output)
submit = Button(window, text="Submit", command=submit_command)
submit.grid(row=4, column=0)
window.mainloop() | true |
cdd26917b7acf8fd1dfa3280531fac61982aa656 | Python | neelamy/Algorithm | /Array/Find2NonRepeatingNo.py | UTF-8 | 1,136 | 3.921875 | 4 | [] | no_license | # Source : http://www.geeksforgeeks.org/?p=2457
# Find the two non-repeating elements in an array of repeating elements
# Algo/DS : Array , bit manipulation
# Complexity :O(n) , space - O(1)
# Note : x ^ x = 0 so xor will remove all even nos and only odd nos are left
# if all nos are repeated except one : xor all elements of A. This will return only one odd element of array
# if only 1 no is repeated : (xor all element of A) xor ( 1^2^3....n). This will make all elements even except one repeated element which will now be odd
class get2NonRepeatingNos:
def NonRepeatingNo(self, A):
# Get the xor of all elements
xor = reduce(lambda x, y : x^y , A)
# Get the rightmost set bit in set_bit_no
set_bit_no = xor & ~(xor - 1)
# Now divide elements in two sets by comparing rightmost set
# bit of xor with bit at same position in each element
x = reduce(lambda x, y : x^y ,[i for i in A if set_bit_no & i])
y = reduce(lambda x, y : x^y ,[i for i in A if not set_bit_no & i])
print x,y
def main():
get2NonRepeatingNos().NonRepeatingNo([2, 3, 7, 9, 11, 2, 3, 11])
if __name__ == '__main__':
main() | true |
7c07ac3249ae13cb21e3d994e21d6f01e352b504 | Python | Vincent105/python | /04_The_Path_of_Python/12_class/1316__eq__.py | UTF-8 | 259 | 3.40625 | 3 | [] | no_license | class City():
def __init__(self, name):
self.name = name
def __eq__(self, city2):
return self.name.upper() == city2.name.upper()
one = City('Taipei')
two = City('taipei')
three = City('myhome')
print(one == two)
print(one == three)
| true |
8a403cae9f65654ab5a3396553c307b853bd7160 | Python | zhongxiangboy/TKMRC-1 | /ir/put.py | UTF-8 | 3,121 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #! /user/bin/evn python
# -*- coding:utf8 -*-
"""
@Author : Lau James
@Contact : LauJames2017@whu.edu.cn
@Project : TKMRC
@File : put.py
@Time : 18-10-17 下午4:54
@Software : PyCharm
@Copyright: "Copyright (c) 2018 Lau James. All Rights Reserved"
"""
from ir.config import Config
from elasticsearch import helpers
import pandas as pd
import json
import jieba
import time
excel_path = './doc/tk-QP.xlsx'
json_path = './doc/excel2json.temp.json'
def excel2json():
"""
input: excel files: 问题 段落 all of them are string type
transform the raw data to json type including title and paragraph
:return:
"""
dataframe = pd.read_excel(excel_path,
sheet_name='Sheet1',
header=0,
dtype={'问题': str, '段落': str})
dataframe.dropna()
titles = []
paragraphs = []
for key, data in dataframe.iterrows():
titles.append(data['问题'])
paragraphs.append(data['段落'])
data_list = []
for title, paragraph in zip(titles, paragraphs):
paragraph = paragraph.replace('\n', '')
paras_data = {"paragraph": paragraph, "title": title}
data_list.append(paras_data)
with open(json_path, 'w', encoding='utf-8') as temp_json:
for json_line in data_list:
data_json = json.dumps(json_line, ensure_ascii=False)
temp_json.write(str(data_json))
temp_json.write('\n')
def get_json_obj(json_path):
"""
Read json file and load to dict list
:param json_path:
:return: dict list
"""
paras = {}
t_id = int(time.time()) # according time to set id
with open(json_path, 'r') as fin:
line = fin.readline()
while line:
line = json.loads(line.strip(), encoding='utf-8')
paragraph = ' '.join(token for token in jieba.cut(line['paragraph'].strip()))
title = ' '.join(token for token in jieba.cut(line['title'].strip()))
t_id += 1
paras[t_id] = {'title': title, 'paragraph': paragraph}
line = fin.readline()
return paras
def put2es(paras, bulk_size, config):
"""
Put paras into es
:param paras:
:param bulk_size:
:param config:
:return:
"""
count = 1
actions = []
for para_id, para in paras.items():
action = {
"_index": config.index_name,
"_type": config.doc_type,
"_id": para_id,
"_source": para
}
actions.append(action)
count += 1
if len(actions) % bulk_size == 0:
helpers.bulk(config.es, actions)
print("bulk index:" + str(count))
actions = []
if len(actions) > 0:
helpers.bulk(config.es, actions)
print("bulk index:" + str(count))
if __name__ == '__main__':
config = Config()
# excel2json()
title_paras = get_json_obj(json_path)
# for idx, title_para in title_paras.items():
# print(idx)
# print(title_para)
put2es(title_paras, bulk_size=10000, config=config)
| true |
d00aae89be3a4f4be0c8c0ffdb7551c0a8853f59 | Python | canvassanalytics/streamhist | /streamhist/utils.py | UTF-8 | 2,804 | 2.921875 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/usr/bin/env python
"""Some useful utility functions and classes."""
import ctypes as _ctypes
import sys as _sys
from sys import platform as _platform
from math import log, sqrt
import types
if _sys.version_info >= (3, 3):
from collections.abc import Iterable
else:
from collections import Iterable
iterator_types = (types.GeneratorType, Iterable)
if _sys.version_info.major >= 3:
_izip = zip
else:
from itertools import izip as _izip
try:
from itertools import accumulate
except ImportError:
# itertools.accumulate only in Py3.x
def accumulate(iterable):
it = iter(iterable)
total = next(it)
yield total
for element in it:
total += element
yield total
_E = 2.718281828459045
__all__ = ["next_after", "argmin", "bin_diff", "accumulate"]
if _platform == "linux" or _platform == "linux2":
_libm = _ctypes.cdll.LoadLibrary('libm.so.6')
_funcname = 'nextafter'
elif _platform == "darwin":
_libm = _ctypes.cdll.LoadLibrary('libSystem.dylib')
_funcname = 'nextafter'
elif _platform == "win32":
_libm = _ctypes.cdll.LoadLibrary('msvcrt.dll')
_funcname = '_nextafter'
else:
# these are the ones I have access to...
# fill in library and function name for your system math dll
print("Platform", repr(_platform), "is not supported")
_sys.exit(0)
_nextafter = getattr(_libm, _funcname)
_nextafter.restype = _ctypes.c_double
_nextafter.argtypes = [_ctypes.c_double, _ctypes.c_double]
def next_after(x, y):
"""Returns the next floating-point number after x in the direction of y."""
# This implementation comes from here:
# http://stackoverflow.com/a/6163157/1256988
return _nextafter(x, y)
def _diff(a, b, weighted):
diff = b.value - a.value
if weighted:
diff *= log(_E + min(a.count, b.count))
return diff
def bin_diff(array, weighted=False):
return [_diff(a, b, weighted) for a, b in _izip(array[:-1], array[1:])]
def argmin(array):
# Turns out Python's min and max functions are super fast!
# http://lemire.me/blog/archives/2008/12/17/fast-argmax-in-python/
return array.index(min(array))
def linspace(start, stop, num):
"""Custom version of numpy's linspace to avoid numpy depenency."""
if num == 1:
return stop
h = (stop - start) / float(num)
values = [start + h * i for i in range(num+1)]
return values
def roots(a, b, c):
"""Super simple quadratic solver."""
d = b**2.0 - (4.0 * a * c)
if d < 0:
raise(ValueError("This equation has no real solution!"))
elif d == 0:
x = (-b + sqrt(d)) / (2.0 * a)
return (x, x)
else:
x1 = (-b + sqrt(d)) / (2.0 * a)
x2 = (-b - sqrt(d)) / (2.0 * a)
return (x1, x2)
| true |
80bb56fb1eb7b546218e7151220ddcd28c34d1fb | Python | Sohieb/reversi-game | /main.py | UTF-8 | 2,496 | 3.28125 | 3 | [] | no_license | import pygame
import random
import view
import board
import common
from view import *
from board import *
from common import *
class game_manager:
"""
The Main class which handle the overall game control
"""
def __init__(self):
## create a veiw and a model objects
self.window = view.game_interface()
self.board = board.game_model()
## Show the intro screen and get the initial game parameters
self.game_mode = self.window.intro_screen()
self.current_playing = 0 # 0 -> player_1, 1 -> player_2
# The main game function handling the game loop
def play(self):
pygame.time.wait(300)
clock = pygame.time.Clock()
while True:
if self.board.is_game_ended():
count_white, count_black = self.board.get_cell_count()
if count_black > count_white:
self.winner = "BLACK"
elif count_white > count_black:
self.winner = "WHITE"
else:
self.winner = "TIE"
self.next_action = self.window.result_screen(self.winner)
pygame.time.wait(300)
if self.next_action == "Continue":
self.board.reset_game_board()
self.game_mode = self.window.intro_screen()
else:
break
if self.game_mode[self.current_playing] == "Human":
valid_moves = self.board.get_valid_moves(self.current_playing)
clicked_cell = self.window.game_screen(self.board.grid, True)
while clicked_cell[0] * 8 + clicked_cell[1] not in valid_moves:
clicked_cell = self.window.game_screen(self.board.grid, True)
self.board.make_move(clicked_cell, self.current_playing)
else: ## current player is the computer actor
pygame.time.wait(300)
if self.game_mode[2] == "Easy": # Random move
##########print "Easy"
valid_moves = self.board.get_valid_moves(self.current_playing)
rand_move = random.randint(0,len(valid_moves) - 1)
clicked_cell = (valid_moves[rand_move] // common.SIDE_LEN, \
valid_moves[rand_move] % common.SIDE_LEN)
self.board.make_move(clicked_cell, self.current_playing)
#else: #TODO # Intelligent move
##########print "Hard"
self.current_playing = 1 - self.current_playing ## switch to te next player
self.window.game_screen(self.board.grid, False)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
pygame.display.update()
clock.tick(60)
#The main Function
def main():
game = game_manager() ## create a game instance
game.play() ## run the game
if __name__ == '__main__':
main() | true |
228db9bb329234917faec0a659698473b9a0b201 | Python | chelseashin/My-Algorithm | /daily_study/samsung2021/boj/17070_파이프옮기기1.py | UTF-8 | 1,159 | 3.046875 | 3 | [] | no_license | # Memoization 풀이
# DP 연습 많이 하자..
import sys
input = sys.stdin.readline
direction = [[(0, 1), (1, 1)],
[(1, 0), (1, 1)],
[(0, 1), (1, 1), (1, 0)]]
def available(dr, dc):
if (dr, dc) == (0, 1): # 가로
return 0
elif (dr, dc) == (1, 0): # 세로
return 1
elif (dr, dc) == (1, 1): # 대각선
return 2
# Memoization
def dfs(r, c, d):
if r == N-1 and c == N-1:
return 1
if visited[r][c][d] == -1:
visited[r][c][d] = 0 # 방문
for dr, dc in direction[d]:
nr = r + dr
nc = c + dc
nd = available(dr, dc)
if not (0 <= nr < N and 0 <= nc < N):
continue
if A[nr][nc]: # 벽이면
continue
if nd == 2 and (A[nr][nc-1] or A[nr-1][nc]):
continue
visited[r][c][d] += dfs(nr, nc, nd)
return visited[r][c][d]
# main
N = int(input())
A = [list(map(int, input().split())) for _ in range(N)]
visited = [[[-1] * 3 for _ in range(N)] for _ in range(N)]
print(dfs(0, 1, 0))
for v in visited:
print(v) | true |
d68afb2abb1f3c5c42a57fd3acc1780883d87bf2 | Python | EasyPost/easypost-python | /tests/test_end_shipper.py | UTF-8 | 1,711 | 2.65625 | 3 | [
"MIT"
] | permissive | import pytest
from easypost.models import EndShipper
@pytest.mark.vcr()
def test_endshipper_create(ca_address_1, test_client):
endshipper = test_client.end_shipper.create(**ca_address_1)
assert isinstance(endshipper, EndShipper)
assert str.startswith(endshipper.id, "es_")
assert endshipper.street1 == "388 TOWNSEND ST APT 20"
@pytest.mark.vcr()
def test_endshipper_retrieve(ca_address_1, test_client):
endshipper = test_client.end_shipper.create(**ca_address_1)
retrieved_endshipper = test_client.end_shipper.retrieve(endshipper.id)
assert isinstance(retrieved_endshipper, EndShipper)
assert endshipper.street1 == retrieved_endshipper.street1
@pytest.mark.vcr()
def test_endshipper_all(page_size, test_client):
endshippers = test_client.end_shipper.all(page_size=page_size)
endshipper_array = endshippers["end_shippers"]
assert len(endshipper_array) <= page_size
assert all(isinstance(endshipper, EndShipper) for endshipper in endshipper_array)
@pytest.mark.vcr()
def test_endshipper_update(ca_address_1, test_client):
endshipper = test_client.end_shipper.create(**ca_address_1)
updated_endshipper = test_client.end_shipper.update(
endshipper.id,
name="Captain Sparrow",
company="EasyPost",
street1="388 Townsend St",
street2="Apt 20",
city="San Francisco",
state="CA",
zip="94107",
country="US",
phone="9999999999",
email="test@example.com",
)
assert isinstance(updated_endshipper, EndShipper)
assert str.startswith(updated_endshipper.id, "es")
assert updated_endshipper.name == "CAPTAIN SPARROW" # Name is capitalized becasue API will autocapitalize response
| true |
82803ac4537ba23111934a0c1f4c402aa55680fa | Python | ayuratuputri/I-Gst-Ayu-Ratu-Putri-Maharani_I0320049_Andhika_Tugas6 | /I0320049_exercise 6.7.py | UTF-8 | 172 | 4.15625 | 4 | [] | no_license | #membuat for untuk rentang nilai tertentu
for i in range (2, 9): #melakukan pengulangan nilai mulai dari i = 2 sampai i <9
print("kuadrat dari", i, "adalah", i**2)
| true |
3055e493953ef4a26361e74985d9ecc1de851cbb | Python | mjacobsen32/CS331 | /CS331-Assignment1/a_2.py | UTF-8 | 8,798 | 2.96875 | 3 | [] | no_license | import sys
class State:
def __init__(s,parent,lc,lw,lb,rc,rw,rb,d):
s.depth = d
s.parent = parent
s.lc = lc
s.lw = lw
s.lb = lb
s.rc = rc
s.rw = rw
s.rb = rb
def state_allowed(s, lc, lw, lb, rc, rw, rb):
if (lc < lw and lc > 0) or (rc < rw and rc > 0):
return False
if lc < 0 or lw < 0 or rc < 0 or rw < 0:
return False
else:
return True
def one_chick(s):
if s.lb == 1:
if s.state_allowed(s.lc-1,s.lw,0,s.rc+1,s.rw,1):
new = State(s,s.lc-1,s.lw,0,s.rc+1,s.rw,1,s.depth+1)
return(new)
elif s.rb == 1:
if s.state_allowed(s.lc+1,s.lw,1,s.rc-1,s.rw,0):
new = State(s,s.lc+1,s.lw,1,s.rc-1,s.rw,0,s.depth+1)
return(new)
return(0)
def two_chick(s):
if s.lb == 1:
if s.state_allowed(s.lc-2,s.lw,0,s.rc+2,s.rw,1):
new = State(s,s.lc-2,s.lw,0,s.rc+2,s.rw,1,s.depth+1)
return(new)
elif s.rb == 1:
if s.state_allowed(s.lc+2,s.lw,1,s.rc-2,s.rw,0):
new = State(s,s.lc+2,s.lw,1,s.rc-2,s.rw,0,s.depth+1)
return(new)
return(0)
def one_wolf(s):
if s.lb == 1:
if s.state_allowed(s.lc,s.lw-1,0,s.rc,s.rw+1,1):
new = State(s,s.lc,s.lw-1,0,s.rc,s.rw+1,1,s.depth+1)
return(new)
elif s.rb == 1:
if s.state_allowed(s.lc,s.lw+1,1,s.rc,s.rw-1,0):
new = State(s,s.lc,s.lw+1,1,s.rc,s.rw-1,0,s.depth+1)
return(new)
return(0)
def two_wolf(s):
if s.lb == 1:
if s.state_allowed(s.lc,s.lw-2,0,s.rc,s.rw+2,1):
new = State(s,s.lc,s.lw-2,0,s.rc,s.rw+2,1,s.depth+1)
return(new)
elif s.rb == 1:
if s.state_allowed(s.lc,s.lw+2,1,s.rc,s.rw-2,0):
new = State(s,s.lc,s.lw+2,1,s.rc,s.rw-2,0,s.depth+1)
return(new)
return(0)
def one_chick_one_wolf(s):
if s.lb == 1:
if s.state_allowed(s.lc-1,s.lw-1,0,s.rc+1,s.rw+1,1):
new = State(s,s.lc-1,s.lw-1,0,s.rc+1,s.rw+1,1,s.depth+1)
return(new)
elif s.rb == 1:
if s.state_allowed(s.lc+1,s.lw+1,1,s.rc-1,s.rw-1,0):
new = State(s,s.lc+1,s.lw+1,1,s.rc-1,s.rw-1,0,s.depth+1)
return(new)
return(0)
def print_state(s):
print(str(s.lc) + ',' + str(s.lw) + ',' + str(s.lb) + ',' + str(s.rc)+ ',' + str(s.rw)+ ',' + str(s.rb))
def get_string(s):
return(str(s.lc) + ',' + str(s.lw) + ',' + str(s.lb) + ',' + str(s.rc)+ ',' + str(s.rw)+ ',' + str(s.rb))
class Problem:
def __init__(river_obj, initial_state_file, goal_state_file, mode, output_file,max_d):
river_obj.moves = 0
river_obj.max_depth = 100
river_obj.curr_max_depth = max_d
river_obj.explored = []
river_obj.mode = mode
river_obj.output_file = output_file
initial_arr = []
with open(initial_state_file) as initial:
for line in initial:
initial_arr += line.strip().split(',')
river_obj.current = State(0,int(initial_arr[0]),int(initial_arr[1]),int(initial_arr[2]),int(initial_arr[3]),int(initial_arr[4]),int(initial_arr[5]),0)
river_obj.frontier = [river_obj.current]
final_arr = []
with open(goal_state_file) as final:
for line in final:
final_arr += line.strip().split(',')
river_obj.final = State(-1,int(final_arr[0]),int(final_arr[1]),int(final_arr[2]),int(final_arr[3]),int(final_arr[4]),int(final_arr[5]),-1)
def print_impossible(riv):
if riv.mode == 'dfs' or riv.mode == 'bfs' or riv.mode == 'astar':
print("no solution found")
def empty_frontier(r):
if len(r.frontier) == 0: return True
else: return False
def next_node(r):
r.current = r.frontier.pop(0)
def complete(r):
if ((r.current.lc == r.final.lc) and
(r.current.lw == r.final.lw) and
(r.current.lb == r.final.lb) and
(r.current.rc == r.final.rc) and
(r.current.rw == r.final.rw) and
(r.current.rb == r.final.rb)):
return(True)
else:
return(False)
def print_screen(r):
l = []
parent = r.current.parent
while parent != 0:
l.append(parent.get_string())
parent = parent.parent
l.reverse()
sys.stdout = open(riv.output_file, 'w')
for i in l:
print(i)
print(r.final.get_string())
print("Nodes expanded: " + str(r.moves))
print("Path Length: " + str(len(l)))
sys.stdout.close()
with open(riv.output_file, 'r') as f:
contents = f.read()
sys.stdout = sys.__stdout__
print(contents)
def add_explored(r):
r.explored.append(r.current)
def states_equal(r,s1,s2):
if (s1.lc == s2.lc and
s1.lw == s2.lw and
s1.lb == s2.lb and
s1.rc == s2.rc and
s1.rw == s2.rw and
s1.rb == s2.rb):
return True
else:
return False
def not_in_frontier(r,s):
for i in r.frontier:
if r.states_equal(s,i):
return(False)
return(True)
def not_in_explored(r,s):
for i in r.explored:
if r.states_equal(s,i):
return(False)
return(True)
def add_to_frontier(r,s):
if r.mode == "dfs":
r.frontier.insert(0,s)
elif r.mode == "bfs":
r.frontier.append(s)
elif r.mode == 'iddfs':
r.frontier.insert(0,s)
elif r.mode == 'astar':
count = 0
placed = False
for i in r.frontier:
if r.h(s) < r.h(i):
r.frontier.insert(count,s)
placed = True
break
count+=1
if placed == False:
r.frontier.append(s)
def not_passed_max(r,s):
if r.mode == 'dfs' or r.mode == 'bfs' or r.mode == 'astar':
return(True)
elif r.mode == 'iddfs':
if s.depth < r.curr_max_depth:
return(True)
else:
return(False)
def expand(r):
child = r.current.one_chick()
if child != 0:
if r.not_in_explored(child) and r.not_in_frontier(child) and r.not_passed_max(child):
r.add_to_frontier(child)
child = r.current.two_chick()
if child != 0:
if r.not_in_explored(child) and r.not_in_frontier(child) and r.not_passed_max(child):
r.add_to_frontier(child)
child = r.current.one_wolf()
if child != 0:
if r.not_in_explored(child) and r.not_in_frontier(child) and r.not_passed_max(child):
r.add_to_frontier(child)
child = r.current.one_chick_one_wolf()
if child != 0:
if r.not_in_explored(child) and r.not_in_frontier(child) and r.not_passed_max(child):
r.add_to_frontier(child)
child = r.current.two_wolf()
if child != 0:
if r.not_in_explored(child) and r.not_in_frontier(child) and r.not_passed_max(child):
r.add_to_frontier(child)
def print_frontier(r):
for i in r.frontier:
i.print_state()
def print_explored(r):
for i in r.explored:
i.print_state()
def h(r,s):
if r.final.lb == 1:
return(abs(r.final.lc - s.lc) + abs(r.final.lw - s.lw))
elif r.final.rb == 1:
return(abs(r.final.rc - s.rc) + abs(r.final.rw - s.rw))
riv = Problem(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],1)
go = True
m = 1000
counter = 2
c = True
found = False
while c == True:
go = True
while go == True:
if riv.empty_frontier() == True:
riv.print_impossible()
go = False
break
riv.next_node()
if riv.complete() == True:
riv.print_screen()
go = False
c = False
found = True
break
riv.add_explored()
riv.expand()
riv.moves+=1
if riv.mode == 'dfs' or riv.mode == 'bfs' or riv.mode == 'astar':
c = False
elif riv.mode == 'iddfs':
if counter > m:
c = False
else:
riv = Problem(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],counter)
counter+=1
if counter == m and found == fale:
print("no solution found") | true |
4c19e277d97707ededb3c37e884cad57eaa88cb9 | Python | Lehyu/pyml | /optimizer/sgd.py | UTF-8 | 1,685 | 2.671875 | 3 | [] | no_license | import sys
from base import BaseOptimizer
from .loss import LossWithSumOfSquare, LossWithLogits, LossWithSoftmax
from utils import nutils
Test = False
class SGD(BaseOptimizer):
def __init__(self, learning_rate=1e-1, eps=1e-5, max_iter=100000, batch_size=10, loss="SumOfSquares", decay='step'):
self.learning_rate = learning_rate
self.eps = eps
self.max_iter = max_iter
self.batch_size = batch_size
self._loss = loss
def solve(self, X, y, params):
"""
:param X: (n_samples, n_features)
:param y: (n_samples, n_classes)
:param params: (n_features, n_classes)
:param feval: target function
:return:
"""
n_samples, n_features = X.shape
loss = sys.maxsize
epoch = 0
learning_rate = self.learning_rate
while loss > self.eps and epoch < self.max_iter:
total_loss = 0.0
for batch in nutils.batch(n_samples, self.batch_size):
loss, coef_grad, bias_grad = self._loss.feval(X[batch], y[batch], params)
total_loss += loss * len(batch)
params['coef'] -= learning_rate * coef_grad
params['bias'] -= learning_rate * bias_grad
total_loss /= n_samples
# todo
if self._check_convergence():
break
epoch += 1
learning_rate = self._tune_learning_rate(learning_rate, epoch)
if epoch % 100 == 0 and Test:
print("epoch %d, loss %.5f" % (epoch, total_loss))
return params
def _tune_learning_rate(self, learning_rate, epoch):
return learning_rate
| true |
595645519bda048cf5cd980e31798be114bc8abb | Python | dunitian/BaseCode | /python/5.concurrent/Thread/2.lock_queue/2.Lock/Ext/4.sortlock1.py | UTF-8 | 2,354 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | from time import sleep
from multiprocessing.dummy import Pool as ThreadPool, Lock
class Account(object):
def __init__(self, name, money=5000):
self.name = name
self.lock = Lock()
self.money = money # 设置一个初始金额
class Bank(object):
tie_lock = Lock()
@classmethod
def __get_hash(cls, obj):
return id(obj) # hash_func(obj)
@classmethod
def transfer(cls, p_from, p_to, money):
"""p_from:谁转账,p_to:转给谁,money:转多少"""
from_hash = cls.__get_hash(p_from)
to_hash = cls.__get_hash(p_to)
print(f"from:{p_from.name}to{p_to.name}=>{money}")
# 规定:谁大先锁谁
if from_hash > to_hash:
print("from_hash > to_hash")
with p_from.lock:
p_from.money -= money
sleep(1) # 模拟网络延迟
with p_to.lock:
p_to.money += money
elif from_hash < to_hash:
print("from_hash < to_hash")
with p_to.lock:
p_to.money += money
sleep(1) # 模拟网络延迟
with p_from.lock:
p_from.money -= money
# hash出现碰撞时处理:(可能性很低)
else:
print("from_hash < to_hash")
# 平局的时候,大家一起抢一个中间锁,谁抢到谁先转账
with cls.tie_lock:
with p_from.lock:
p_from.money -= money
sleep(1) # 模拟网络延迟
with p_to.lock:
p_to.money += money
def main():
xiaoming = Account("小明")
xiaozhang = Account("小张")
xiaopan = Account("小潘")
print(f"[互刷前]小明:{xiaoming.money},小张:{xiaozhang.money},小潘{xiaopan.money}")
p = ThreadPool()
for i in range(3):
p.apply_async(Bank.transfer, args=(xiaoming, xiaozhang, 1000))
if i == 1: # 小潘突然间还了1000给小明
p.apply_async(Bank.transfer, args=(xiaopan, xiaoming, 1000))
p.apply_async(Bank.transfer, args=(xiaozhang, xiaoming, 1000))
p.close()
p.join()
print(f"[互刷后]小明:{xiaoming.money},小张:{xiaozhang.money},小潘{xiaopan.money}")
if __name__ == '__main__':
main()
| true |
c31a316bc737a797615e02d1234720382b6018b7 | Python | iyouyue/Python-Exercises | /codebase/格式化输出.py | UTF-8 | 374 | 3.453125 | 3 | [] | no_license | name = input("请输入你的名字:")
age = int(input("请输入你的年龄:"))
job = input("请输入你的工作:")
hobbie = input("请输入你的爱好:")
msg = """
---------------- info of %s --------------------
Name : %s
Age : %d
job : %s
Hobbie: %s
------------------ end -------------------------
""" %(name,name,age,job,hobbie)
print(msg) | true |
623b9c5d182bb7e9240ea4708457fbe2998a6a48 | Python | HamidZiyaee/Image_classifier | /train.py | UTF-8 | 5,233 | 2.53125 | 3 | [] | no_license | import argparse
parser=argparse.ArgumentParser()
parser.add_argument('-d','--data_dir', help='Directory to data', default='flowers')
parser.add_argument('-s','--save_dir', help='Directory to save checkpoints', default="")
parser.add_argument('-a','--arch', help='Choose pretrained model architecture either vgg19_bn or densenet121', default='vgg19_bn')
parser.add_argument('-l','--learn_rate', help='Set learning rate', type=float)
parser.add_argument('-u','--hidden_units', help='Set hidden units number', type=int)
parser.add_argument('-e','--epochs', help='Set epochs', type=int)
parser.add_argument('-g','--gpu', help='Use GPU for training GPU / CPU', default='gpu')
args=parser.parse_args()
print(args)
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import numpy as np
data_dir = args.data_dir
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
train_datasets = datasets.ImageFolder(train_dir, transform = train_transforms)
valid_datasets = datasets.ImageFolder(valid_dir, transform = valid_transforms)
test_datasets = datasets.ImageFolder(test_dir, transform = test_transforms)
trainloaders = torch.utils.data.DataLoader(train_datasets, batch_size=64, shuffle = True)
validloaders = torch.utils.data.DataLoader(valid_datasets, batch_size=64, shuffle = True)
testloaders = torch.utils.data.DataLoader(test_datasets, batch_size=64, shuffle = True)
#device =torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.gpu == 'gpu':
device = 'cuda'
else:
device = 'cpu'
if args.arch=='densenet121':
model = models.densenet121(pretrained=True)
input_units = 1024
elif args.arch == 'vgg19_bn' :
model = models.vgg19_bn(pretrained=True)
input_units = 25088
else:
print('Your input was different than vgg19_bn / densenet121, so by default the vgg19_bn model was used')
model = models.vgg19_bn(pretrained=True)
input_units = 25088
for param in model.parameters():
param.requires_grad = False
model.classifier=nn.Sequential(nn.Linear(input_units, args.hidden_units),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(args.hidden_units, 102),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=args.learn_rate)
model.to(device)
epochs = args.epochs
step =0
running_loss = 0
print_every = 5
for epoch in range(epochs):
for images, labels in trainloaders:
step +=1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(images)
loss = criterion (logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if step % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
valid_loss = 0
for images, labels in validloaders:
images, labels = images.to(device), labels.to(device)
logps = model.forward(images)
batch_loss = criterion(logps, labels)
valid_loss += batch_loss.item()
ps = torch.exp(logps)
top_p, top_class =ps.topk(1,dim=1)
equals=top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Validation loss: {valid_loss/len(validloaders):.3f}.. "
f"Validation accuracy: {accuracy/len(validloaders):.3f}")
running_loss = 0
model.train()
checkpoint= {'model': model,
'state_dict': model.state_dict(),
'epochs': epochs,
'optimizer': optimizer.state_dict}
torch.save(checkpoint, args.save_dir + 'checkpoints.pth')
| true |
ecc87afed9ec94063b81d346c2f190ece167af9a | Python | SaiPrahladh/scad_tot | /verification/clustering.py | UTF-8 | 10,142 | 2.546875 | 3 | [] | no_license | import os, pickle, argparse
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.cluster import KMeans
from kmodes.kmodes import KModes
from kmodes.kprototypes import KPrototypes
from sklearn_extra.cluster import KMedoids
from scipy.spatial import distance
from matplotlib import pyplot as plt
from utils import create_logger, ms_since_1970, tohex, TOTUtils
algos = {'KMeans': KMeans, 'KModes': KModes, 'KPrototypes': KPrototypes, 'KMedoids': KMedoids}
metrics = {'euclidean': distance.euclidean, 'manhattan': distance.cityblock}
init_centroid_choices = ('rand', 'first', 'none')
default_outdir = './logs/clustering'
logger = create_logger('clustering', logdir=default_outdir)
logperf = lambda s: f'({ms_since_1970() - s}ms)'
class LGKClustering:
'''
Label Guided K* Clustering
performs KMeans, KModes, or KPrototypes and divides each cluster into smaller regions that contain a single label
'''
def __init__(self, algo='KMeans', metric='euclidean'):
assert algo in algos, f'algo must be one of ({algos.keys()})'
assert metric is 'euclidean' if algo is not 'KMedoids' else metric in metrics, 'unsupported metric/algo combination'
self.__algorithm, self.__fn, self.__metric, self.__distfn = algo, algos[algo], metric, metrics[metric]
def fit(self, X, y, init_centroid='rand', categorical=[]):
X_count = X.shape[0]
assert X_count == y.shape[0], 'X & y must have same number of items'
assert X_count == len(np.unique(X, axis=0)), 'X must have no duplicates'
assert init_centroid in init_centroid_choices, 'init_centroid mode must be valid'
assert not categorical if self.__algorithm is not 'KPrototypes' else True, 'categorical only used by KPrototypes'
self.__categories = np.unique(y, axis=0)
get_centroids = (lambda X,y: LGKUtils.get_initial_centroids(X,y, rand=(init_centroid == 'rand'))) if init_centroid != 'none' else (lambda X,y: None)
logger.info(f'finding regions...')
start = ms_since_1970()
remaining, regions = [(X, y)], []
while len(remaining) > 0:
X, y = remaining.pop(0)
n = np.unique(y).shape[0]
model_params = dict(n_clusters=n)
if init_centroid:
model_params['init'] = get_centroids(X, y)
fit_params = dict()
model_data = X
if self.__algorithm == 'KPrototypes':
fit_params = dict(fit_params, categorical=categorical)
if self.__algorithm == 'KMedoids':
del model_params['init']
model_params = dict(model_params, metric=self.__metric)
model = self.__fn(**model_params).fit(model_data, **fit_params)
centroids = model.cluster_centers_ if self.__algorithm == 'KMeans' else model.cluster_centroids_
yhat = model.predict(model_data)
for c in np.unique(yhat):
xis = np.where(yhat == c)[0]
Xc, yc = X[xis], y[xis]
if len(np.unique(yc, axis=0)) == 1:
regions.append(LGKRegion(centroids[c], Xc, yc))
else:
remaining.append((Xc, yc))
# assert sum total of region sizes equals num rows in X
assert(X_count == sum([r.X.shape[0] for r in regions]))
logger.info(f'found {len(regions)} regions {logperf(start)}')
self.__regions = regions
return self
def predict(self, x, y=None):
regions = self.get_regions(category=y)
distances = {i:self.__distfn(x, r.centroid) for i,r in enumerate(regions)}
region = regions[min(distances, key=distances.get)]
return region
def get_regions(self, category=None, sort=False, sortrev=True):
regions = self.__regions
if category is not None:
assert(category in self.__categories)
regions = [r for r in regions if r.category == category]
if sort:
regions = sorted(regions, key=lambda r:(r.X.shape[0], r.density), reverse=sortrev)
return regions
def get_categories(self):
return self.__categories
class LGKRegion:
'''
LG Region
represents a label-guided 'region' which contains inputs of a single label
'''
def __init__(self, centroid, X, y, metric='euclidean'):
assert X.shape[0] == y.shape[0], 'X and y must have same number of items'
assert len(np.unique(y, axis=0)) == 1, 'all labels in y must be the same'
assert metric in metrics, 'unsupported metric'
self.__metric, self.__distfn = metric, metrics[metric]
self.centroid, self.X, self.y, self.category, self.n = centroid, X, y, y[0], X.shape[0]
self.radii = [self.__distfn(x, self.centroid) for x in X]
self.radius = max(self.radii)
self.density = (self.n / self.radius) if self.radius > 0 else 0
class LGKUtils:
@staticmethod
def find_region(lgkc, x, category, metric='euclidean'):
assert metric in metrics, 'unsupported metric'
return next([r for r in lgkc.get_regions(category=category) if metrics[metric](x, r.centroid) < r.radius])
@staticmethod
def to_categorical(y, n_cats):
return np.array([[int(yi==i) for i in range(n_cats)] for yi in y])
@staticmethod
def get_initial_centroids(X, y, rand=True):
return np.array([X[np.random.choice(cy) if rand else 0] for cy in [[i for i,yi in enumerate(y) if yi == c] for c in np.unique(y, axis=0)]])
@staticmethod
def find_input_index(x, X):
idxs = np.where((X == x).all(axis=1))[0]
return (idxs[0] if len(idxs) else None)
@staticmethod
def get_input_class(x, X, y):
assert(X.shape[0] == y.shape[0])
idx = LGKUtils.find_input_index(x, X)
return (y[idx] if idx is not None else None)
@staticmethod
def save(lgkm, outdir=default_outdir):
savepath = os.path.join(outdir, 'lgkm.pkl')
pickle.dump(lgkm, open(savepath, 'wb'))
logger.info(f'saved to {savepath}')
@staticmethod
def load(path):
lgkm = pickle.load(open(path, 'rb'))
return lgkm
@staticmethod
def print_regions(lgkm, sort=True):
regions = lgkm.get_regions(sort=sort)
logger.info(f'{len(regions)} regions:\n' + '\n'.join([f'y={r.category}, n={len(r.X)}, d={round(r.density, 2)}' for r in regions]))
@staticmethod
def print_summary(lgkm):
pass
@staticmethod
def pair_plot_regions(lgkm, save=False, show=True, inc_x=True, outdir=default_outdir, palette='rainbow_r'):
logger.info('plotting regions...')
regions = lgkm.get_regions()
n_cats = len(lgkm.get_categories())
X = ([x for r in regions for x in r.X] if inc_x else []) + [r.centroid for r in regions]
y = ([y for r in regions for y in r.y] if inc_x else []) + [n_cats+r.category for r in regions]
df = pd.DataFrame(X, columns=TOTUtils.get_feature_names())
df['y'] = y
colors = [tohex(r,g,b) for r,g,b in sns.color_palette('rainbow_r', n_cats)]
palette = {i:colors[i if i < n_cats else i-n_cats] for i in range(n_cats*(2 if inc_x else 1))}
markers = ['o' if i<n_cats else 'D' for i in range(n_cats*(2 if inc_x else 1))]
g = sns.pairplot(df, hue='y', corner=True, palette=palette, markers=markers, plot_kws=dict(alpha=0.5, s=10))
g = g.add_legend({i:l for i,l in enumerate(TOTUtils.get_category_names())})
if save:
savepath = os.path.join(outdir, 'lgkm.png')
g.savefig(savepath, dpi=300)
logger.info(f'regions plot saved to {savepath}')
if show:
plt.show()
@staticmethod
def tsne_plot_regions(lgkm, save=False):
pass
@staticmethod
def reduce_classes(y, metaclasses=[(0,1), (2,), (3,4)]):
yprime = y.copy()
for mc,classes in enumerate(metaclasses):
for c in classes:
yprime[y==c] = mc
return yprime
@staticmethod
def load_dataset(csvfile, n_outputs):
logger.info(f'reading dataset from {csvfile}...')
# read input and outputs into separate dataframes
df = pd.read_csv(csvfile, index_col=0).drop_duplicates()
output_cols = df.columns.tolist()[-n_outputs:]
output_df, input_df = df[output_cols], df.drop(output_cols, axis=1)
# drop any duplicate inputs from both dataframes
dupes = [i for i,d in enumerate(input_df.duplicated()) if d]
input_df = input_df.drop(input_df.index[dupes], axis=0)
output_df = output_df.drop(output_df.index[dupes], axis=0)
# convert to numpy arrays
X = input_df.values
y = np.array([output_cols.index(c) for c in (output_df[output_cols] == 1).idxmax(1)])
return X, y
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', required=True)
parser.add_argument('-n', '--noutputs', required=True)
parser.add_argument('-i', '--initcentroid', default='rand', nargs='?', choices=init_centroid_choices)
parser.add_argument('-o', '--outdir', default=default_outdir)
# parser.add_argument('-p', '--plot', action='store_true')
parser.add_argument('-sr', '--saveregions', action='store_true')
parser.add_argument('-sl', '--savelogs', action='store_true')
parser.add_argument('-v', '--verbosity', type=int, default=0)
args = parser.parse_args()
# configure logger
for handler in logger.handlers[:]: logger.removeHandler(handler)
logger = create_logger('clustering', to_file=args.savelogs, logdir=args.outdir)
# read dataset, and start clustering
X, y = LGKUtils.load_dataset(args.file, args.noutputs)
lgkm = LGKClustering().fit(X, y, init_centroid=args.initcentroid)
# print regions
if args.verbosity > 0: LGKUtils.print_regions(lgkm)
# generate plot png, and save regions
# if args.plot: LGKUtils.plot_regions(lgkm, save=True, show=False, outdir=args.outdir)
if args.saveregions: LGKUtils.save(lgkm, outdir=args.outdir)
| true |
04365d99e4b92c52b6887458826d87ea03dd9a52 | Python | sushi-aa/idTech-pythonMaterial | /tetris_pieces.py | UTF-8 | 3,955 | 2.75 | 3 | [] | no_license | #ALL CREDIT FOR CODE TO iD TECH
import random
# Piece shapes
types = ["I", "J", "L", "O", "S", "T", "Z"]
# dict of pieces and their rotations. Key is tile type.
pieces = {
"I": [
[[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0]],
[[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]
]
],
"J": [
[[2, 0, 0],
[2, 2, 2],
[0, 0, 0]],
[[0, 2, 2],
[0, 2, 0],
[0, 2, 0]],
[[0, 0, 0],
[2, 2, 2],
[0, 0, 2]],
[[0, 2, 0],
[0, 2, 0],
[2, 2, 0]]
],
"L": [
[[0, 0, 3],
[3, 3, 3],
[0, 0, 0]],
[[0, 3, 0],
[0, 3, 0],
[0, 3, 3]],
[[0, 0, 0],
[3, 3, 3],
[3, 0, 0]],
[[3, 3, 0],
[0, 3, 0],
[0, 3, 0]]
],
"O": [
[[0, 4, 4, 0],
[0, 4, 4, 0],
[0, 0, 0, 0]]
],
"S": [
[[0, 5, 5],
[5, 5, 0],
[0, 0, 0]],
[[0, 5, 0],
[0, 5, 5],
[0, 0, 5]],
[[0, 0, 0],
[0, 5, 5],
[5, 5, 0]],
[[5, 0, 0],
[5, 5, 0],
[0, 5, 0]]
],
"T": [
[[0, 6, 0],
[6, 6, 6],
[0, 0, 0]],
[[0, 6, 0],
[0, 6, 6],
[0, 6, 0]],
[[0, 0, 0],
[6, 6, 6],
[0, 6, 0]],
[[0, 6, 0],
[6, 6, 0],
[0, 6, 0]]
],
"Z": [
[[7, 7, 0],
[0, 7, 7],
[0, 0, 0]],
[[0, 0, 7],
[0, 7, 7],
[0, 7, 0]],
[[0, 0, 0],
[7, 7, 0],
[0, 7, 7]],
[[0, 7, 0],
[7, 7, 0],
[7, 0, 0]]
]
}
class Tetrimino:
def __init__(self):
self.type = "I"
self.rotation = 0
self.x, self.y = (3,18)
# Set grid_ref manually - if left as none, blocks will fall and ignore the grid.
self.grid_ref = None
def reset(self):
self.type = random.choice(types)
self.rotation = 0
self.x, self.y = (3,18)
# NEW: Add a return false to move
def move(self, dx, dy):
destination_x = self.x + dx
destination_y = self.y + dy
if not self.collision_check(destination_x, destination_y):
self.x = destination_x
self.y = destination_y
#move succeeded
return True
#move failed
return False
def rotate(self, dr):
new_rotation = (self.rotation + dr) % len(pieces[self.type])
prev_rotation = self.rotation
self.rotation = new_rotation
if not self.collision_check(self.x, self.y):
# rotate succeeded
return
self.rotation = prev_rotation
# rotate failed
def collision_check(self,xPos,yPos):
top_x, top_y = xPos, yPos
tetrimino = pieces[self.type][self.rotation]
tetrimino_height = len(tetrimino)
tetrimino_width = len(tetrimino[0])
for y in range(tetrimino_height):
for x in range(tetrimino_width):
# No need to check blank spaces of the tetrimino for collision.
if tetrimino[y][x] != 0:
# out of bounds (walls or floor)
if top_x + x < 0 or top_x + x >= len(self.grid_ref[0]) or top_y + y < 0 or top_y + y >= len(self.grid_ref):
return True
# Check vs grid
if self.grid_ref is not None and self.grid_ref[top_y + y][top_x + x] != 0:
return True
# If you make it out of this loop without returning True, you're in the clear.
return False | true |
b0fc2e8e0b6b64fb78c78493ab5a1043b0189af7 | Python | liujxing/KalmanFilter | /tests/test_optimize_diagonal.py | UTF-8 | 3,684 | 3.03125 | 3 | [] | no_license | import numpy as np
from KalmanFilter.kalman_filter import KalmanFilter, KalmanMatrix
from tests.matrix_generation import generate_random_kalman_matrix
if __name__ == "__main__":
# generate matrix for the process
state_dim = 4
observation_dim = 1
noise_level = 0.0001
state_transition_matrix = np.diag([0.1, 0.2, 0.4, 0.8])
transition_noise_matrix = np.diag([0.8, 0.6, 0.5, 0.4]) * noise_level
observation_output_matrix = np.array([1, 1, 1, 1]).reshape(observation_dim, state_dim)
observation_noise_matrix = np.array([noise_level])
initial_mean_matrix = np.array([0.25, 0.35, 0.3, 0.28])
initial_covariance_matrix = np.diag([0.3, 0.3, 0.3, 0.3]) * noise_level
kalman_matrix = KalmanMatrix(state_dim, observation_dim, state_transition_matrix, transition_noise_matrix,
observation_output_matrix, observation_noise_matrix, initial_mean_matrix,
initial_covariance_matrix)
# generate the sequence using kalman matrix
num_sample = 10000
initial_state, state_sequence, observation_sequence = kalman_matrix.generate_sampled_sequence(num_sample)
# generate kalman filter from kalman matrix
kalman_filter = KalmanFilter(KalmanMatrix(state_dim, observation_dim,
#state_transition_matrix=state_transition_matrix,
transition_noise_matrix=transition_noise_matrix,
observation_output_matrix=observation_output_matrix,
observation_noise_matrix=observation_noise_matrix,
initial_mean_matrix=initial_mean_matrix,
initial_covariance_matrix=initial_covariance_matrix,
))
num_iteration = 50
diagonal = True
kalman_filter.optimize_single_sequence(observation_sequence, diagonal, num_iteration)
# generate the filtered state
#posterior_means, prior_means, posterior_covs, prior_covs = kalman_filter.forward_single_sequence(observation_sequence)
# generate the smoothed state
#smooth_means, smooth_covs, smooth_lagged_covs, smooth_mean_initial, smooth_cov_initial = kalman_filter.backward_single_sequence(posterior_means, prior_means, posterior_covs, prior_covs)
# compare the original matrix and the matrix from optimization
print("True state transition matrix:\n", kalman_matrix.state_transition_matrix)
print("Estimated state transition matrix:\n", kalman_filter.kalman_matrix.state_transition_matrix)
print("True transition noise matrix:\n", kalman_matrix.transition_noise_matrix)
print("Estimated transition noise matrix:\n", kalman_filter.kalman_matrix.transition_noise_matrix)
print("True observation output matrix:\n", kalman_matrix.observation_output_matrix)
print("Estimated observation output matrix:\n", kalman_filter.kalman_matrix.observation_output_matrix)
print("True observation noise matrix:\n", kalman_matrix.observation_noise_matrix)
print("Estimated observation noise matrix:\n", kalman_filter.kalman_matrix.observation_noise_matrix)
print("True initial mean matrix:\n", kalman_matrix.initial_mean_matrix)
print("Estimated initial mean matrix:\n", kalman_filter.kalman_matrix.initial_mean_matrix)
print("True initial state:\n", state_sequence[0])
print("True initial covariance matrix:\n", kalman_matrix.initial_covariance_matrix)
print("Estimated initial covariance matrix:\n", kalman_filter.kalman_matrix.initial_covariance_matrix)
| true |
e8a9df1d27090f6dac3918c9a40172e00bf99e3d | Python | anyuhanfei/study_PyQt5 | /033~077-QWidget/043~047-QWidget-鼠标操作/046-QWidget-鼠标操作-鼠标跟踪.py | UTF-8 | 1,062 | 3.53125 | 4 | [] | no_license | '''
045-QWidget-鼠标操作-鼠标跟踪
'''
import sys
from PyQt5.QtWidgets import QApplication, QWidget
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('045_QWidget_鼠标操作_鼠标跟踪')
self.resize(700, 700)
self.move(200, 200)
# 获取鼠标是否被跟踪
print(self.hasMouseTracking())
# 设置鼠标跟踪 True 开启跟踪 False 关闭跟踪
self.setMouseTracking(True)
def mouseMoveEvent(self, mme):
'''鼠标跟踪的执行函数
当鼠标跟踪被触发时,将执行这个函数;
默认情况下,鼠标未被跟踪,只有按住鼠标左键并移动时,这个函数会被触发;
当鼠标设置了鼠标跟踪后,只要鼠标移动这个函数就会被触发;
参数一(QMouseEvent):鼠标对象
'''
print('鼠标移动了')
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| true |
0e3a32d1fafd4e1f7ab863617a49560bb7e2341d | Python | Harish4948/Guvi | /CKPRO05.py | UTF-8 | 235 | 3.28125 | 3 | [] | no_license | n=int(raw_input())
arr=map(int,raw_input().split())
l=0
for i in range(0,n-2):
for j in range(i+1,n-1):
for k in range(j+1,n):
if arr[i]<arr[j]<arr[k]:
l+=1
print(l)
| true |
739dbce9a6579c67c53e87297bf6f4890788b568 | Python | anikur93/Hackerrank_Artificial_Intelligence | /10 days of Stats/lsrl.py | UTF-8 | 557 | 3.390625 | 3 | [] | no_license | x1,y1 = list(map(int, input().split()))
x2,y2 = list(map(int, input().split()))
x3,y3 = list(map(int, input().split()))
x4,y4 = list(map(int, input().split()))
x5,y5 = list(map(int, input().split()))
sumx = x1 + x2 + x3 + x4 + x5
sumy = y1 + y2 + y3 + y4 + y5
sumxy = x1*y1 + x2*y2 + x3*y3 + x4*y4 + x5*y5
sumx2 = x1**2 + x2**2 + x3**2 + x4**2 + x5**2
sumy2 = y1**2 + y2**2 + y3**2 + y4**2 + y5**2
a = ((sumy * sumx2) - (sumx * sumxy))/((5*sumx2)-sumx**2)
b = ((5 * sumxy) - (sumx*sumy))/((5*sumx2)-sumx**2)
y = a + b * 80
print(round(y,3)) | true |
46aef3623ad6dce2540d2a37218f749c92f2562f | Python | chpark-ML/Age_Prediction_for_AD_Diagnosis | /prac/plot_logit_on_2D.py | UTF-8 | 747 | 2.796875 | 3 | [] | no_license | import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
tmp_dir_file_0 = './logit_0.nii.gz'
tmp_dir_file_1 = './logit_1.nii.gz'
logit_0_img = nib.load(tmp_dir_file_0).get_fdata()
logit_1_img = nib.load(tmp_dir_file_1).get_fdata()
x =logit_0_img
y = logit_1_img
print("x : {}".format(x.reshape(-1).sum()))
print("y : {}".format(y.reshape(-1).sum()))
## TODO: scatter logit
fig, ax = plt.subplots()
scatter = ax.scatter(x, y, s=1)
min = int(min([x.reshape(-1).min(), y.reshape(-1).min()]) * 1.2)
max = int(max([x.reshape(-1).max(), y.reshape(-1).max()]) * 1.2)
ax.plot(range(min, max), range(min, max))
ax.set_xlim([min, max])
ax.set_ylim([min, max])
ax.grid(True)
plt.axis('square')
plt.savefig('test_{}'.format(1))
print('!!')
| true |
2122147447ea19fb8214646d98a6a0c00c4fb595 | Python | mhezarei/robotics-course-2021 | /HW1/part_one.py | UTF-8 | 3,451 | 3.28125 | 3 | [] | no_license | import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
CONVERSION = math.pi / 180
NUM_ITERATIONS = 10000
def forward(constants: list, location: list) -> dict:
phi_x, phi_y, r, d, f = constants
x, y, theta = location
dt = 1 / f
theta_rad = CONVERSION * theta
x_r = r * (phi_y * CONVERSION + phi_x * CONVERSION) / 2
y_r = 0
theta_dot = r * (phi_y - phi_x) / d
sai_r = [x_r, y_r, theta_dot]
r_minus_theta = [
[math.cos(-theta_rad), math.sin(-theta_rad), 0],
[-math.sin(-theta_rad), math.cos(-theta_rad), 0],
[0, 0, 1],
]
sai_i = np.matmul(r_minus_theta, sai_r)
x_new = x + dt * sai_i[0]
y_new = y + dt * sai_i[1]
theta_new = theta + dt * sai_i[2]
return {
"new_loc": [x_new, y_new, theta_new],
"sai_i": sai_i.tolist(),
"sai_r": sai_r
}
def prompt():
# print("Please enter the following information (in order and "
# "space-separated):\n"
# "1. Angular Velocity of the left wheel (x) in deg/s\n"
# "2. Angular Velocity of the right wheel (y) in deg/s\n"
# "3. Radius of the wheels in cm\n"
# "4. Distance between the two wheels in cm\n"
# "5. Frequency of sampling in Hz\n"
# "6. Initial locations of the robot (x: cm, y: cm, theta: deg)")
# enter this for now 7 -5 2.5 20 10 1 1 30
# args = list(map(float, input().split()))
args = [10, 5, 2, 20, 10, 0, 0, 30]
return args
def plot(history: list, freq: int):
timestamps = [i / freq for i in range(NUM_ITERATIONS + 1)]
# plotting robot path
x = [record["new_loc"][0] for record in history]
y = [record["new_loc"][1] for record in history]
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set(xlabel='X (cm)', ylabel='Y (cm)', title='Robot Path')
ax.grid()
plt.show()
# plotting x-hat and y-hat in sai I vs time
# x = [record["sai_i"][0] for record in history]
# fig, ax = plt.subplots()
# ax.plot(timestamps, x)
# ax.set(xlabel='time (s)', ylabel='X-hat (cm/s)',
# title='Motion Components of Inertial Frame')
# ax.grid()
# plt.show()
# y = [record["sai_i"][1] for record in history]
# fig, ax = plt.subplots()
# ax.plot(timestamps, y)
# ax.set(xlabel='time (s)', ylabel='Y-hat (cm/s)',
# title='Motion Components of Inertial Frame')
# ax.grid()
# plt.show()
# plotting Xr and theta in sai R vs time
# v = [record["sai_r"][0] for record in history]
# fig, ax = plt.subplots()
# ax.plot(timestamps, v)
# ax.set(xlabel='time (s)', ylabel='Linear Velocity (cm/s)',
# title='Linear Velocity vs. Time')
# ax.grid()
# plt.show()
# w = [record["sai_r"][2] for record in history]
# fig, ax = plt.subplots()
# ax.plot(timestamps, w)
# ax.set(xlabel='time (s)', ylabel='Angular Velocity (deg/s)',
# title='Angular Velocity vs. Time')
# ax.grid()
# plt.show()
def main():
args = prompt()
constants = args[:5]
base_loc = args[5:]
history = [
# {"new_loc": base_loc, "sai_i": [0, 0, 0], "sai_r": [0, 0, 0]},
forward(constants, base_loc),
]
for i in range(NUM_ITERATIONS):
history.append(forward(constants, history[-1]["new_loc"]))
plot(history, args[4])
if __name__ == '__main__':
main()
| true |
f23335774f692a642760f4b5953f3ee8c1a0adc7 | Python | WebarchivCZ/grainery | /frontend/views/figures.py | UTF-8 | 4,880 | 3.21875 | 3 | [
"MIT"
] | permissive | from math import pi
from bokeh.embed import components
from bokeh.plotting import figure, ColumnDataSource
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
class HarvestFigures():
""" generate graphs with bokeh library"""
def __init__(self, data):
self.data = data
def harvestPerYear(self):
""" vytvoření širšího lineárního plot grafu pro počet sklizní za rok"""
harvest_counts = self.data.harvestCounts('year')
x, y = harvest_counts.index, harvest_counts.values
# create a new plot with a title, size and axis labels
p = figure(title="Number of harvests per year",
plot_width=700,
plot_height=450,
x_axis_label='Year',
y_axis_label='Harvests count',
tooltips=[('size', '@y')]
)
# define ticks scale sorted list of years,
# range between max a min value
p.xaxis.ticker = sorted(x)
p.yaxis.ticker = list(range(y.min()+1, y.max()+1, 1))
# add a line renderers
p.line(x, y, line_width=2, line_color='#0000ff')
p.circle(x, y, fill_color="white", size=8)
return components(p)
def sizePerYear(self):
""" vytvoření lineárního plot grafu pro velikost sklizní za rok"""
# year size lze volat s TB nebo GB
yearsize, y_axis_label = self.data.yearSize('GB')
x, y = yearsize.index, yearsize.values
# create a new plot with a title, size and axis labels
p = figure(title="Size of archive per year",
plot_width=500,
plot_height=450,
x_axis_label='Year',
y_axis_label=y_axis_label,
tooltips=[('year', '@x'), ('size', '@top')]
)
# define ticks scale sorted list of years,
# range between max a min value
p.xaxis.ticker = sorted(list(x))
p.vbar(x=x, top=y, width=0.3)
p.xgrid.grid_line_color = None
p.y_range.start = 0
return components(p)
def sizeGrowth(self):
""" vytvoření grafu s nárůstem velikost"""
yearsize, y_axis_label = self.data.yearSize('GB')
x = yearsize.index
y = self.data.growth(yearsize)
# create a new plot with a title, size and axis labels
p = figure(title="Archive Growth",
plot_width=500,
plot_height=450,
x_axis_label='Year',
y_axis_label=y_axis_label,
tooltips=[('size', '@y')]
)
# define ticks scale sorted list of years
# range between max a min value
p.xaxis.ticker = sorted(list(x))
# add a line renderer with legend and line thickness
p.line(x, y, line_width=1, line_color='#0000ff')
return components(p)
def typesPie(self, typ):
"""vytvoření koláčového grafu pro typy sklizní"""
data = typ.reset_index(name='value').rename(columns={'index': 'type'})
data['angle'] = data['value']/data['value'].sum() * 2*pi
data['color'] = Category20c[len(typ)]
p = figure(plot_height=350, plot_width=400,
title="Harvest types", toolbar_location=None,
tools="hover", tooltips="@type: @value",
x_range=(-0.5, 1.0))
p.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True),
end_angle=cumsum('angle'),
line_color="white", fill_color='color',
legend='type', source=data)
p.axis.axis_label = None
p.axis.visible = False
p.grid.grid_line_color = None
return components(p)
class ContainerFigures():
def __init__(self, data):
self.data = data
def containerCount(self):
""" vytvoření bodového grafu pro počet kontejnerů na sklizeň"""
source = ColumnDataSource(self.data.df)
p = figure(title="Number of containers per harvest",
x_range=self.data.df['_id'],
plot_width=700,
plot_height=450,
x_axis_label='Harvests (ordered by date)',
y_axis_label='Containers count',
tooltips=[("Harvest name", "@_id"),
('Conteiner count', '@count')]
)
p.vbar(x='_id', top='count', source=source, width=0.5)
p.xaxis.major_label_orientation = 1
p.y_range.start = 0
p.yaxis.ticker = list(range(self.data.df['count'].min(),
self.data.df['count'].max()+1,
1))
p.toolbar.autohide = True
return components(p)
| true |
d13e36d36ac170a9c10315758b6395ecf8edaed4 | Python | Johnny00520/CSCI3203-Artificial-Intelligence | /PS4/perception2.py | UTF-8 | 1,570 | 3.546875 | 4 | [] | no_license | #!/bash/python
#The perceptron equation is S = sum(wi x xi) from i = 0 to i = n
#The function of the separated line is f(s) = 1 if S >= 0, 0 otherwise. I call it
#a step funciton
from random import choice
from numpy import array, dot, random
unitStep = lambda x: 0 if x < 0 else 1
training_data = [
# array([A, B, C, bias]), expected output)]
# NOTE: bias is always 1
(array([0,0,0]), 0),
(array([0,1,1]), 1),
(array([1,0,1]), 1),
(array([1,1,1]), 1),
]
# uniform gives you a floating-point value from -1 to 1
# Initially, choose 3 random values for weight
w = [random.uniform(-1, 1) for i in range(3)]
print("Random weights are: ", w)
#The errors list is only used to store the error values so that they can be plotted later on
errors = []
# ETA controls the learning rate
ETA = 0.2
n = 8001
for i in xrange(n):
x, expected = choice(training_data)
result = dot(w, x)
#we can compare to the expected value. If the expected value is bigger, we need to increase the weights, if it's smaller, we need to decrease them
error = expected - unitStep(result)
errors.append(error)
w += ETA * error * x
#print("w: ", w)
if i % 250 == 0:
print(i)
print("weight is: ", w)
print("unitStep: ", unitStep(result))
#print("error array: ", errors)
for x, _ in training_data:
#print("x: ", x)
#print("_: ", _)
#print("w: ", w)
result = dot(x, w)
print("{}: {} -> {}".format(x[:3], result, unitStep(result)))
| true |
840b15ff128ac0ddfc77d5e6df72b9468fb669d3 | Python | zhoutong1996/SocketTest | /test.py | UTF-8 | 3,988 | 2.75 | 3 | [] | no_license | import socket
import argparse
from binascii import hexlify
class SodcketFunc:
def __init__(self):
pass
def get_machine_info(self):
host_name = socket.gethostname()
ip_addr = socket.gethostbyname(host_name)
return {'host_name': host_name, 'ip_addr': ip_addr}
def get_remote_host(self):
remote_host = 'www.python.org'
try:
return ('IP address of %s: %s'%(remote_host, socket.gethostbyname(remote_host)))
except socket.error, err_msg:
return ('%s:%s'%(remote_host,err_msg))
def convert_ipv4_address(self,ip_addr):
packed_ip = socket.inet_aton(ip_addr)
print type(packed_ip)
unpacked_ip = socket.inet_ntoa(packed_ip)
return ('IP address:%s => Packed ip:%s => Unpacked ip:%s'%(ip_addr, hexlify(packed_ip), unpacked_ip))
def find_server_name(self,port=[80,25]):
protocol_name = 'tcp'
for port in port:
print port, socket.getservbyport(port,protocol_name)
print '53', socket.getservbyport(53, 'udp')
def convert_integer(self,data=1234):
"""convert data from Network byte order to Host byte order ,or on the opposite"""
# 32-bit
print 'Original: %s => Long host byte order: %s,Network byte order: %s'\
%(data, socket.ntohl(data), socket.htonl(data))
# 16-bit
print 'Original: %s => Short host byte order: %s,Network byte order: %s'\
%(data, socket.ntohs(data), socket.htons(data))
def socket_timeout_test(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # socket instance
print 'Default socket timeout:%s'%s.gettimeout()
s.settimeout(100)
print 'Socket timeout after setting:%s' % s.gettimeout()
def reuse_socket_addr(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get old state of the SO_REUSEADDRA option
old_state = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
print "Old sock state: %s"%old_state
# Enable the SO_REUSEADDR option
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
new_state = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
print "New sock state: %s"%new_state
local_port = 8282
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.bind(('', local_port))
srv.listen(1)
print("Listening on port: %s"%local_port)
while(True):
try:
connection, addr = srv.accept()
print connection
print "Connected by %s:%s"%(addr[0], addr[1])
except KeyboardInterrupt:
break
except socket.error, msg:
print msg
def echo_server(self,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_address = ('localhost', port)
print "Starting up echo server on %s port %s"%server_address
sock.bind(server_address)
sock.listen(5)
while True:
print "Waiting to receive message from client"
client, address = sock.accept()
data = client.recv(2048)
if data:
print"data:%s"%data
client.send(data)
print"sent %s bytes back to %s"%(data, address)
client.close()
if __name__ == '__main__':
# =============================================for echo_server()
parser = argparse.ArgumentParser(description='Socket Server Example')
parser.add_argument('--port',action = "store", dest='port', type=int, required=True)
given_args = parser.parse_args()
port = given_args.port
# ==============================================================
info = SodcketFunc()
info.echo_server(port)
| true |
1100606d65a931053d7c6ab34ec6ebeb19d7fd67 | Python | Imaginerum/training-python | /0010_pasły_się_owce.py | UTF-8 | 2,781 | 4.25 | 4 | [] | no_license | ''' Napisz program, który wczyta liczby całkowite B, W, Z a następnie poprawnie napisze tekst:
Na łące [pasła / pasły / pasło] się B [owca / owce / owiec]. Wieczorem [przyszedł / przyszły / przyszło] W
[wilk / wilki / wilków] i [zjadł / zjadły] Z [owcę / owce / owiec]. Rano na łące [nie było / była / były /
było już tylko B-Z] [owca / owce / owiec]
kontrola danych: B >=1, W >=1, Z>=0, B>=Z
'''
def owce() :
B = int(input("Podaj liczbę owiec: "))
W = int(input("Podaj liczbę wilków: "))
Z = int(input("Podaj liczbę zjedzonych owiec: "))
# kombinacja slowa 2-warunkowe :
zja = [' zjadł ', ' zjadły ']
# kombinacja słowa 3-warunkowa :
pasł = [' pasła ', ' pasły ', ' pasło ']
owc = [' owca', ' owce', ' owiec']
przyszł = [' przyszedł ', ' przyszły ',' przyszło ']
wil = [' wilk ',' wilki ',' wilków ']
#kombinacja slowa 4-warunkowa :
był = [' była juz tylko jedna', ' były już tylko {0}'.format(B-Z), ' było już tylko {0}'.format(B-Z), ' nie było już']
def war(slowo, ile) : # 3 opcje warunkowe
if (len(str(ile)) == 1):
# 3 opcje warunkowe!!!
if ile == 1:
return (slowo[0])
elif ((str(ile)[0] == str(2)) or (str(ile)[0] == str(3)) or (str(ile)[0] == str(4))):
return (slowo[1])
# 4 opcje warunkowe!!!
elif (str(ile) == str(0) and len(slowo)>3):
return slowo[3]
# 2 opcje warunkowe!!!
elif (str(ile) == str(1) and len(slowo) < 3):
return slowo[0]
elif (str(ile) != str(1) and len(slowo) < 3):
return slowo[1]
# 3 i 4 opcje warunkowe!!!
else:
return slowo[2]
else:
# 3 i 4 opcje warunkowe!!!
if ((str(ile)[-2:] == str(12)) or (str(ile)[-2:] == str(13)) or (str(ile)[-2:] == str(14))):
return (slowo[2])
elif ((str(ile)[-1] == str(2)) or (str(ile)[-1] == str(3)) or (str(ile)[-1] == str(4))):
return (slowo[1])
# 2 opcje warunkowe!!!
elif (str(ile) != str(1) and len(slowo) < 3):
return slowo[1]
else: # 3 i 4 opcje warunkowe!!!
return (slowo[2])
if (B >=1) & (W >=1) & (Z>=0) & (B>=Z) : #kontrola danych
print('Na łące' + war(pasł, B) + 'się {0}'.format(B) + war(owc, B) +
'. Wieczorem' + war(przyszł, W) + str(W) + war(wil, W) + 'i' + war(zja, W) + str(Z) +
war(owc, Z) + '. Rano na łace' + war(był, B-Z) + war(owc, B-Z))
else :
print("Błędne dane.\nOwiec i wilków powinno być więcej, niż 1 i owiec powinno być więcej, niż wilków")
owce() | true |
7fc9427586b51f495ccced8a969f12065290faf2 | Python | zeeviiosub/advanced-system-design | /cli.py | UTF-8 | 948 | 2.84375 | 3 | [] | no_license | class CommandLineInterface:
def __init__(self):
self.functions = {}
def command(self, f):
import inspect
self.functions[f.__name__] = (inspect.getfullargspec(f).args, f)
return f
def main(self):
import sys
errmsg = 'USAGE: python example.py <command> [<key>=<value>]*'
if len(sys.argv) == 1 or sys.argv[1] not in self.functions.keys():
print(errmsg)
sys.exit(1)
kwargs = {}
for parameter in sys.argv[2:]:
partitioned_parameter = parameter.partition('=')
if not partitioned_parameter[1]:
print(errmsg)
sys.exit(1)
if partitioned_parameter[0] not in self.functions[sys.argv[1]][0]:
print(errmsg)
sys.exit(1)
kwargs[partitioned_parameter[0]] = partitioned_parameter[2]
sys.exit(self.functions[sys.argv[1]][1](**kwargs))
| true |
0c091486a33b9ff10b1a5e34ee443b787cc03e33 | Python | supertask/icpc | /recruit/2013/C.py | UTF-8 | 848 | 3.140625 | 3 | [] | no_license | T = input()
for t in range(T):
player_num = input()
cards = raw_input()
cards_len = len(cards)
modd = 0
scores = [0 for w in range(player_num)]
i = 0
while True:
if i > cards_len-1:
break
if modd >= player_num:
modd = 0
if cards[i] == "X":
i+=1
while True:
if i > cards_len-1:
break
else:
if cards[i].isdigit():
scores[modd] *= int(cards[i])
break
i+=1
elif cards[i] == "D":
i+=1
while True:
if i > cards_len-1:
break
else:
if cards[i].isdigit():
scores[modd] /= int(cards[i])
break
i+=1
elif cards[i] == "S":
i+=1
while True:
if i > cards_len-1:
break
else:
if cards[i].isdigit():
scores[modd] -= int(cards[i])
break
i+=1
else:
scores[modd] += int(cards[i])
modd+=1
i+=1
print max(scores)
| true |
a5cb001863bcb0a24f8af9ded63e71313b45b7c7 | Python | kenkoooo/twitter-utils | /twitterkenkoooo/config.py | UTF-8 | 1,352 | 2.625 | 3 | [] | no_license | import json
from typing import List
import logzero
import twitter
class Config:
def __init__(self, config_file: str):
with open(config_file, "r") as f:
config = json.load(f)
self.consumer_key = config["consumer_key"]
self.consumer_secret = config["consumer_secret"]
self.access_token_key = config["access_token_key"]
self.access_token_secret = config["access_token_secret"]
self.followers_json = config["followers_json"]
self.friends_json = config["friends_json"]
logzero.logfile(config["log"])
def get_api(self, sleep_on_rate_limit=True):
return twitter.Api(consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
access_token_key=self.access_token_key,
access_token_secret=self.access_token_secret,
sleep_on_rate_limit=sleep_on_rate_limit)
def get_followers(self) -> List[int]:
with open(self.followers_json, "r") as f:
followers = json.load(f)
followers.reverse()
return followers
def get_friends(self) -> List[int]:
with open(self.friends_json, "r") as f:
friends = json.load(f)
friends.reverse()
return friends
| true |
da81e6b407acd7f20084e2068525949d969d18b8 | Python | RichardPoulson/object-oriented-project | /tests/TestingMoveStrategyFactory.py | UTF-8 | 533 | 3.25 | 3 | [] | no_license | import sys
sys.path.append('../')
from MoveStrategyFactory import *
factory = MoveStrategyFactory('matrix')
for playerNumber in [1, 2]:
for moveType in ['moveLeft', 'moveRight', 'jumpLeft', 'jumpRight']:
print("Player {} {}: {}".format(playerNumber, moveType, factory.getMoveStrategy(playerNumber, moveType).locationChange()))
'''
key:
player1:
{'moveLeft':(1,1), 'moveRight':(1,-1), 'jumpLeft':(2,2), 'jumpRight':(2,-2)}
player2L
{'moveLeft':(-1,-1), 'moveRight':(-1,1), 'jumpLeft':(-2,-2), 'jumpRight':(-2,2)}
'''
| true |
484920d47af6ba32f9871833732e83041dbeb4c6 | Python | dev100kg/aoj | /Lesson - ITP1/ITP1_6_C/main.py | UTF-8 | 312 | 3.171875 | 3 | [] | no_license | n = int(input())
buildings = [[[0 for x in range(10)] for y in range(3)] for z in range(4)]
for x in range(n):
b, f, r, v = map(int, input().split())
buildings[b - 1][f - 1][r - 1] += v
for x in range(4):
for floor in buildings[x]:
print("", *floor)
if x != 3:
print("#" * 20)
| true |
9fe130d9beaf875c51d9763f10715cfceebf59d3 | Python | ThorstenVogt/Python | /python/835converter/835convert.py | UTF-8 | 10,624 | 2.984375 | 3 | [] | no_license |
### The purpose of this script is converting 835 messages in .x12 file format
### to csv files.
### The general idea is to iterate through all files,
### then through all transaction sets,
### then through all claims,
### then through all service line items
### Data is collected into variables at each level,
### and is appended to a data array at the lowest.
### When all the iterations are finished, data is written to a
### csv file.
import os
import csv
### FUNCTIONS
def printfile(a):
# this is a function for de-bugging
f=open(a,"r")
content=f.read()
f.close()
print content
def returnsegm(text,segmentcode):
## returns a particular segment in the input text
segmentstart=text.find(segmentcode)
segmentend=segmentstart+text[segmentstart:].find("~")
return text[segmentstart:segmentend]
def returnelem(text,n):
## returns the nth element in input text
elementcnt=text.count('*')
if text=='' or text==None:
return ''
elif n<elementcnt:
elementstart=text.replace('*', 'X', n-1).find('*')+1
elementend=text.replace('*','X',n).find("*")
return text[elementstart:elementend]
elif n==elementcnt:
elementstart=text.rfind("*")+1
return text[elementstart:]
def returnelement(text,segcode,elemno):
## returns element number elemno out of the segment named segcode in the input text
return returnelem(returnsegm(text,segcode),elemno)
def returnsubelement(text,number):
## returns a subelement (colon separated) out of text
subcnt=text.count(':')+1
if subcnt==1 and number>1:
return ''
elif number<subcnt:
if number==1:
substart=0
subend=text.find(':')
elif number > 1:
substart=text.replace(':', 'X', number-2).find(':')+1
subend=text.replace(':','X',number-1).find(':')
return text[substart:subend]
elif number==subcnt:
substart=text.rfind(':')+1
return text[substart:]
else:
return ''
def convdate(a):
## converts yyyymmdd to yyyy-mm-dd
if a==None or a=='':
return ""
else:
return a[0:4]+"-"+a[4:6]+"-"+a[6:8]
### VARIABLES
path = "D:/" # where the .x12 files are stored.
n=0 # Counter
data=[]
## This defines the columns for the csv file
datastructure = [
"Transact No","Filename","Prod Date" ## 0-2 TRN*: 2 FILENAME, DTM*405
,"Pat Ctrl No","Payer Clm No","Line Item No" ## 3-5 CLP*: 1,7 REF*6R*: 3
,"Payer Nm", "Payer Id", "Payer City", "Payer State","Crossover Payer" ## 6-10 N1*PR*: 2,4 N4*: 1,2 NM1*TT: 3
,"Product Type",'' ## 11-12 REF*CE*: 2
,"Payee Nm", "Payee id", "Payee City", "Payee State" ## 13-16 N1*PE*: 2,4 N4*: 1,2
,"Pat Last Nm","Pat First Nm","Pat Mid Init","Pat Id" ## 17-20 NM1*QC*: 3,4,5,9
,"Prov Last Nm","Prov First Nm","Prov Mid Init", "Prov Id" ## 21-24 NM1*82*: 3,4,5,9
,"Srv Date","Srv Code","Srv Cd Mod1","Srv Cd Mod2" ## 25-28 DTM*472: 2 SVC* 1 seg 2
,"Amt chrgd", "Clm Date" ## 29-30 SVC* 2 DTM*050* 2
,"Amt paid", "Pmt Date" ## 31-32 SCV* 3 BPR*: 16
,"CA Grp 1","CA Rsn 1","CA Amt 1" ## 33-35 CAS* 1,2,3
,"CA Grp 2","CA Rsn 2","CA Amt 2" ## 36-38
,"CA Grp 3","CA Rsn 3","CA Amt 3" ## 39-41
,"CA Grp 4","CA Rsn 4","CA Amt 4" ## 42-44
,"CA Grp 5","CA Rsn 5","CA Amt 5" ## 45-47
,"CA Grp 6","CA Rsn 6","CA Amt 6" ## 48-50
,"Clm Chrg","Clm Pmt", "Clm Pat"] ## 51-53 CLP 3,4,5
## add datarow to data
data.append(datastructure)
## this is the intermediate storage where data is collected for each line item
datarow=['']*54
## Okay, let's go then
## iterate over all files in target directory:
listing = os.listdir(path)
for infile in listing:
## make sure only .x12 files are processed
if infile[-4:]!=".x12":
print infile+" is not a valid file type, it seems!"
elif infile[-4:]==".x12":
## Save Filename
datarow[1]=infile
f=open(path+infile,"r")
content=f.read()
f.close
## Find number of transaction sets in file
cnttransaction=content.count("ST*835*")
## Iterate over all transaction sets in file: ST to SE
for i in range(1,cnttransaction+1):
transactioninfile=i
transactpos=content.find("ST*835*"+str(i).zfill(4))
if i < cnttransaction:
nexttransactpos=content.find("ST*835*"+str(i+1).zfill(4))
transact=content[transactpos:nexttransactpos]
elif i == cnttransaction:
transact=content[transactpos:]
## Extract Transaction No - same as check or EFT no
datarow[0]=returnelement(transact,"TRN",2)
## Extract Production Date
proddatepos=content.find("DTM*405*")
datarow[2]=convdate(content[proddatepos+8:proddatepos+16])
## Extract Payer Information
datarow[6]=returnelement(transact,"N1*PR*",2)
datarow[7]=returnelement(transact,"N1*PR*",4)
datarow[8]=returnelement(transact,"N4*",1)
datarow[9]=returnelement(transact,"N4*",2)
## Extract Payee Information
datarow[13]=returnelement(transact,"N1*PE*",2)
datarow[14]=returnelement(transact,"N1*PE*",4)
## Extract Payment Date
datarow[32]=convdate(returnelement(transact,"BPR*",16))
## Find number of claims in transaction set
cntclaim=transact.count("~CLP")
## Iterate over all claims in transaction set
for j in range(1,cntclaim+1):
claimintransaction=j
if j<cntclaim:
claimstart=transact.replace('~CLP', 'XXXX', j-1).find('~CLP')+1
claimend=transact.replace('~CLP','XXXX',j).find("~CLP")
claim=transact[claimstart:claimend]
elif j==cntclaim:
claimstart=transact.rfind("~CLP")+1
claim=transact[claimstart:]
## Extract Patient and Payer Claim Number
datarow[3]=returnelement(claim,"CLP",1)
datarow[4]=returnelement(claim,"CLP",7)
## Extract Patient Info
datarow[17]=returnelement(claim,"NM1*QC*",3)
datarow[18]=returnelement(claim,"NM1*QC*",4)
datarow[19]=returnelement(claim,"NM1*QC*",5)
datarow[20]=returnelement(claim,"NM1*QC*",9)
## Extract Provider Info
datarow[21]=returnelement(claim,"NM1*82*",3)
datarow[22]=returnelement(claim,"NM1*82*",4)
datarow[23]=returnelement(claim,"NM1*82*",5)
datarow[24]=returnelement(claim,"NM1*82*",9)
## Extract Crossover Carrier
datarow[25]=returnelement(claim,"NM1*TT*",3)
## Extract Product Type
datarow[11]=returnelement(claim,"REF*CE*",2)
## Extract Claim Payment Info
datarow[51]=returnelement(claim,"CLP",3)
datarow[52]=returnelement(claim,"CLP",4)
datarow[53]=returnelement(claim,"CLP",5)
## Extract Claim Date
datarow[30]=convdate(returnelement(claim,"DTM*050*",2))
## Find number of service line items in claim
cntitem=claim.count("~SVC")
## Iterate over all items in claim
for k in range(1,cntitem+1):
iteminclaim=k
n=n+1
if k<cntitem:
itemstart=claim.replace("~SVC", "XXXX", k-1).find("~SVC")+1
itemend=claim.replace("~SVC", "XXXX", k).find("~SVC")
item=claim[itemstart:itemend]
elif k==cntitem:
itemstart=claim.rfind("~SVC")+1
item=claim[itemstart:]
## Extract Service Line Item Number
datarow[5]=str(returnelement(item,"REF*6R*",2))
## Extract Service Date
datarow[25]=convdate(returnelement(item,"DTM*472*",2))
## Extract Service Code & Code Modifiers
datarow[26]=returnsubelement((returnelement(item,"SVC*",1)[3:]),1)
datarow[27]=returnsubelement((returnelement(item,"SVC*",1)[3:]),2)
datarow[28]=returnsubelement((returnelement(item,"SVC*",1)[3:]),3)
## Extract Amount Charged
datarow[29]=returnelement(item,"SVC*",2)
## Extract Amount Paid
datarow[31]=returnelement(item,"SVC*",3)
## Find number of claim adjustments for each item (max 6)
cntadj=item.count("~CAS")
## Iterate over all claim adjustment CAS for each item
for l in range(0,6):
if l<cntadj-1:
adjstart=item.replace("~CAS", "XXXX", l).find("~CAS")+1
adjend=item.replace("~CAS", "XXXX", l+1).find("~CAS")
adj=item[adjstart:adjend]
elif l==cntadj-1:
adjstart=item.replace("~CAS", "XXXX", l).find("~CAS")+1
adjhelp=item[adjstart:]
adj=adjhelp[:(adjhelp.find("~"))] ## <-- careful, following element varies, so only the ~-sign as terminator!
elif l>=cntadj:
adj=''
## Extract data for claim adjustment
## Extract CA Group Code
datarow[33+l*3]=returnelem(adj,1)
## Extract CA Reason Code
datarow[34+l*3]=returnelem(adj,2)
## Extract CA Amount
datarow[35+l*3]=returnelem(adj,3)
## add string to data
update=datarow[:]
data.append(update)
## write to csv file
b = open('result.csv', 'wb')
a = csv.writer(b)
a.writerows(data)
b.close()
print "done."
| true |
a792c2dfcbdcfb7f4df9babd3eca7064ef49c5fe | Python | MDomanski-dev/MDomanski_projects | /Python_Crash_Course_Eric_Matthes/many_users.py | UTF-8 | 475 | 3.59375 | 4 | [] | no_license | users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton',
},
'mcurie': {
'first': 'maria',
'last': 'skłodowska-curie',
'location': 'paryż',
},
}
for username, user_info in users.items():
print("\nNazwa użytkownika: " + username)
full_name = user_info['first'] + " " + user_info['last']
location = user_info['location']
print("\tImię i nazwisko: " + full_name.title())
print("\tMiejscowość: " + location.title())
| true |
1e07dd5507e8f3ece339ddb924f266e047cdc17e | Python | IgorPereira1997/Python-SQL-Basics | /banco_de_dados/boxplot.py | UTF-8 | 538 | 3.484375 | 3 | [] | no_license | '''
Boxplot
Boxplot (diagrama de caixa) é uma técnica de visualização de dados em que representa a variação de dados por meio de quartis.
O retângulo central concentra 50% dos dados plotados. A linha ao centro indica a mediana. Os círculos representam os outlines (valores que destoam muito dos outros valores apresentados).
Fonte: próprio autor.
'''
import matplotlib.pyplot as plt
import random
vetor = []
for i in range(10000):
vetor.append(random.randint(0, 100))
plt.boxplot(vetor)
plt.title("Boxplot")
plt.show()
| true |
126a5b9b0f6cba7410c8ec0bdb8d0288164ac008 | Python | abelchun39/Dota2-Heroes-Recommendation | /app.py | UTF-8 | 1,420 | 2.6875 | 3 | [] | no_license | from flask import Flask, render_template,request
from RandomForest.random_forest import RandomForest
from engine import Engine
import json
app = Flask(__name__)
engine = Engine(RandomForest())
#URL_PREFIX = 'http://127.0.0.1:5000'
with open('heroes.json', 'r') as fp:
heroesData = json.load(fp)
def get_api_string(recommendations, prob):
hero_objects = []
for hero_id in recommendations:
for heroData in heroesData:
if heroData["id"] == hero_id:
hero_objects.append(heroData);
return json.dumps({'x':hero_objects,'prob_x': prob})
@app.route("/")
def index():
return render_template('index.html', heroes=heroesData)
@app.route("/api/recommend/")
def api():
if 'x' not in request.args or 'y' not in request.args:
return 'Invalid request'
my_team = request.args['x'].split(',')
if len(my_team) == 1 and my_team[0] == '':
my_team = []
else:
my_team = map(int, my_team)
their_team = request.args['y'].split(',')
if len(their_team) == 1 and their_team[0] == '':
their_team = []
else:
their_team = map(int, their_team)
print my_team
print their_team
prob_recommendation_pairs = engine.recommend(my_team, their_team)
recommendations = [hero for prob, hero in prob_recommendation_pairs]
prob = engine.predict(my_team, their_team)
print prob
return get_api_string(recommendations, prob)
if __name__ == "__main__":
app.debug = True
app.run()
| true |
8e3912b5afc73d71c6a4cb5b1c49971ff6d430fd | Python | grandq33769/llh | /Python/regression/housing/training_2.py | UTF-8 | 1,882 | 3.0625 | 3 | [] | no_license | '''
Created on 2017年4月12日
@author: LokHim
'''
from llh.Python.regression.housing.data_input import TARGET_LIST
WEIGHT = -2.3272
WEIGHT_2 = 0.0434
BIAS = 42.8169
LEARNING_RATE = 0.0000000195
STEP = 200000
def predict(input_attr):
'''Function for prediction'''
return WEIGHT * input_attr + WEIGHT_2 * input_attr**2 + BIAS
def loss(input_attr, target):
'''Function for calculating loss'''
return (target - predict(input_attr))**2
def differential_of_weight(input_attr, target):
'''Function for calculating gradient of weight 1'''
return -2 * (target - predict(input_attr)) * (input_attr)
def differential_of_weight2(input_attr, target):
'''Function for calculating gradient of weight 2'''
return -2 * (target - predict(input_attr)) * (input_attr)**2
def differential_of_bias(input_attr, target):
'''Function for calculating gradient of bias'''
return -2 * (target - predict(input_attr))
def optimize(sum_of_db, sum_of_dw, sum_of_dw2):
'''Function for update the parameter'''
global WEIGHT, WEIGHT_2, BIAS, LEARNING_RATE
WEIGHT = WEIGHT - LEARNING_RATE * sum_of_dw
WEIGHT_2 = WEIGHT_2 - LEARNING_RATE * sum_of_dw2
BIAS = BIAS - LEARNING_RATE * sum_of_db
for i in range(0, STEP):
dw = dw2 = db = loss = 0
for member in TARGET_LIST:
# print(predict(member[0]),member[1])
dw += differential_of_weight(member[0], member[1])
dw2 += differential_of_weight2(member[0], member[1])
db += differential_of_bias(member[0], member[1])
loss += loss(member[0], member[1])
optimize(db, dw, dw2)
print("Step :", '{:5d}'.format(i + 1), " loss: ", '{:-.4f}'.format(loss), "weight:",
'{:-.4f}'.format(WEIGHT), "weight2:", '{:-.4f}'.format(WEIGHT_2),
"bias: ", '{:-.4f}'.format(BIAS))
| true |
087d1c8ab097e4e11d4f7c0d8250682af4c7e952 | Python | CarlosChato/Python-Linked-List | /Singles Linked List/SNode.py | UTF-8 | 291 | 3.0625 | 3 | [] | no_license | #We have to create a node, it will be like a part of a list
class SNode():
#the node only have a external parameter that is the element that it will have
#Only have a memory reference that is the next node
def __init__(self,e):
self.elem = e
self.next = None
| true |
43d607e629797da186d16043d44251590f02882a | Python | DN0000/SecureCRT | /Prefix_no.py | UTF-8 | 480 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # $language = "python"
# $interface = "1.0"
# NoToggle.py
#
# Description:
#
# Port of NoToggle.vbs
# Be "tab safe" by getting a reference to the tab for which this script
# has been launched:
objTab = crt.GetScriptTab()
strLines = objTab.Screen.Selection
if not strLines.strip():
crt.Dialog.MessageBox("No Text Selected!")
for line in strLines.splitlines():
if line.startswith("no "):
objTab.Screen.Send (line[3:]+'\r')
else:
objTab.Screen.Send ("no "+line+ '\r')
| true |
d5c0c65386c4f7b4977cc9c15dd00ab7983640bd | Python | andres-rad/Programming-Challenges | /RPC1318/horsemeet2.py | UTF-8 | 1,347 | 2.734375 | 3 | [] | no_license | import numpy as np
def m(i, j, k , l):
return i + 8*j + 8*8*k + 8*8*8*l
def r(i, j):
ans = []
dx = [1, 1, 2, 2, -1, -1, -2, -2];
dy = [2, -2, 1, -1, -2, 2, -1, 1];
for d in range(8):
if (i+dx[d] < 8 and
i + dx[d] >= 0 and
j + dy[d] < 8 and
j + dy[d] >= 0):
ans.append((i+dx[d], j+dy[d]))
return ans
a = np.zeros((64*64, 64*64))
b = np.zeros(64*64)
eq = 0
# for i in range(8):
# for j in range(8):
# a[eq][m(i, j, i ,j)] = 1
# b[eq] = 0
# eq += 1
for i in range(8):
for j in range(8):
for k in range(8):
for l in range(8):
b[eq] = 1
a[eq][m(i, j, k, l)] = 1
R = r(i, j)
for (u, v) in R:
a[eq][m(k, l, u, v)] = 1.0 / len(R)
eq+= 1
print(b)
print(eq)
print(r(0, 0))
x = np.linalg.solve(a, b)
# inp = input()
# print(inp)
i, j, k, l = map(int, input().split())
i-=1
j-=1
k-=1
l-=1
pbl = x[m(i, j, k, l)]
R = r(i, j)
pne = 0
for u, v in R:
pne += x[m(k, l, u, v)]/len(R)
print(pbl)
print(pne)
for i in range(8):
for j in range(8):
for k in range(8):
for l in range(8):
pbl = x[m(i, j, k, l)]
R = r(i, j)
pne = 0
for u, v in R:
pne += x[m(k, l, u, v)]/len(R)
# print("i: {}, j: {}, k: {}, l:{} ==> {} bl: {}, ne: {}".
# format(i, j, k, l, pbl, pne, 'BL' if pbl > pne else 'NE'))
if ((i + j + k + l) % 2 == 0 and pbl > pne):
print('EROORR')
| true |
589a76002d7e9b87080aeffdadfc62ac749fa16b | Python | rlatmd0829/algorithm | /알고리즘풀이시즌2/21.08.19/스타트와링크다른풀이.py | UTF-8 | 875 | 2.765625 | 3 | [] | no_license | N = int(input())
graph = [list(map(int, input().split())) for _ in range(N)]
check = [False]*N
curMin = 1000000
def recursive(index, howMany, curSum):
global curMin
if index == N:
if howMany != N/2:
return
else:
sum = 0
for x in range(N):
if check[x] == False:
for xx in range(x):
if check[xx] == False:
sum += graph[x][xx] + graph[xx][x]
curMin = min(curMin, abs(sum-curSum))
return
check[index] = True
addSum = 0
for i in range(index):
if check[i] == True:
addSum += graph[index][i]
addSum += graph[i][index]
recursive(index+1, howMany+1, curSum + addSum)
check[index] = False
recursive(index+1, howMany, curSum)
recursive(0, 0, 0)
print(curMin) | true |
d92aa24166c688615fd3cd3683b95e91abd48930 | Python | daniel-chuang/beaverworks | /Error/polling.py | UTF-8 | 2,392 | 3.453125 | 3 | [] | no_license | import time
import sys
import threading
#global var polled for change
keep_running_polled = True
class SigFinish(Exception):
pass
def throw_signal_function(frame, event, arg):
raise SigFinish()
def do_nothing_trace_function(frame, event, arg):
# Note: each function called will actually call this function
# so, take care, your program will run slower because of that.
return None
def interrupt_thread(thread):
for thread_id, frame in sys._current_frames().items():
if thread_id == thread.ident: # Python 2.6 or later
set_trace_for_frame_and_parents(frame, throw_signal_function)
def set_trace_for_frame_and_parents(frame, trace_func):
# Note: this only really works if there's a tracing function set in this
# thread (i.e.: sys.settrace or threading.settrace must have set the
# function before)
while frame:
if frame.f_trace is None:
frame.f_trace = trace_func
frame = frame.f_back
del frame
class MyThread_polled(threading.Thread):
def run(self):
# Note: this is important: we have to set the tracing function
# when the thread is started (we could set threading.settrace
# before starting this thread to do this externally)
sys.settrace(do_nothing_trace_function)
global keep_running_polled
try:
while keep_running_polled: #each loop it examines the state of this var
sys.stderr.write("*whistling* Doing nothing but waiting for global var to change...\n");
time.sleep(3)
except SigFinish:
sys.stderr.write('Caught interrupt signal! Finishing thread cleanly.\n')
print("Global Var Changed")
thread = MyThread_polled()
thread.start()
time.sleep(.5) # Wait a bit just to see it looping.
while True:
ans = raw_input('Change Global Variable To Break? ');
if not ans:
continue
if ans not in ['y', 'Y', 'n', 'N', 'i']:
print 'please enter y or n.'
continue
if ans == 'y' or ans == 'Y':
keep_running_polled = False
start = time.time()
break
if ans == 'n' or ans == 'N':
continue
thread.join() # Joining here: if we didn't end the thread before,
# we'd be here forever.
end = time.time()
sys.stderr.write('Finished '+str(end-start)+" second delay before it ended") | true |
4fd3f1708d00c0ff460fca4326a1381fd20608c7 | Python | hkedariya/my-captain | /fibonacci series (1).py | UTF-8 | 281 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
def fibonacci(n):
if n==0:
return 0
elif n==1:
return 1
else:
return fibonacci(n-2)+ fibonacci(n-1)
n=int(input("Enter the no terms "))
for i in range(n):
print(fibonacci(i))
input()
# In[ ]:
| true |
b513526d6f8a8f35f39da43c3bdfb943c315dbca | Python | daveymason/pythonProjects | /Slideshow/Slideshow.py | UTF-8 | 923 | 3.21875 | 3 | [
"MIT"
] | permissive | from itertools import cycle
import tkinter as tk
class App(tk.Tk):
def __init__(self, image_files, x, y, delay):
tk.Tk.__init__(self)
self.geometry('+{}+{}'.format(x, y))
self.delay = delay
self.pictures = cycle((tk.PhotoImage(file=image), image)
for image in image_files)
self.picture_display = tk.Label(self)
self.picture_display.pack()
def show_slides(self):
#cycles through the images
img_object, img_name = next(self.pictures)
self.picture_display.config(image=img_object)
self.title(img_name)
self.after(self.delay, self.show_slides)
def run(self):
self.mainloop()
delay = 3500
image_files = [
'1.gif',
'2.gif',
'3.gif',
'4.gif',
'5.gif',
'6.gif',
'7.gif'
]
x = 100
y = 50
app = App(image_files, x, y, delay)
app.show_slides()
app.run()
input('Press ENTER to exit')
| true |
b3a4d2faafd9672f5b2def16b0b826189199896a | Python | Alek96/SZR | /SZR/apps/GitLabApi/base.py | UTF-8 | 606 | 2.78125 | 3 | [
"MIT"
] | permissive | class RESTObject(object):
def __init__(self, rest_object):
self.__dict__.update({
'_rest_object': rest_object
})
def __getattr__(self, name):
return getattr(self._rest_object, name)
def __setattr__(self, name, value):
setattr(self._rest_object, name, value)
def __dir__(self):
ret = super().__dir__()
ret.extend([str(k) for k in self._rest_object.attributes.keys()])
ret.sort()
return ret
class RESTManager:
_obj_cls = None
def __init__(self, rest_manager):
self._rest_manager = rest_manager
| true |
ce6427d4ee8d5c1da771ab2b21b8f5afae50d76e | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_118/1496.py | UTF-8 | 3,902 | 3.296875 | 3 | [] | no_license | from math import log
from math import log10
from math import atan, pi
def intToList(n):
maxDigitSeat = int(log10(n))
list = []
for i in xrange(0,maxDigitSeat+1):
list.append(int(n/pow(10,(maxDigitSeat-i)))%10)
return list
def listToInt(list):
l = len(list)
num = 0
for i in xrange(l):
num += list[i]*pow(10,(l-1-i))
return num
def isPalindrome(n):
base = 10
length = int(log10(n))
for i in xrange(int((length+1)/2)):
if ((n/pow(base,i))%base) != ((n/pow(base,length-i))%base):
return False
return True
import copy
def digitsToNum(digits,lenNum):
if lenNum % 2 == 0:
return sum([digits[i]*10**i for i in xrange(len(digits))]) + sum([digits[i]*10**(lenNum-i) for i in xrange(len(digits)-1)])
else:
return sum([digits[i]*10**i for i in xrange(len(digits))]) + sum([digits[i]*10**(lenNum-i) for i in xrange(len(digits))])
def shouldContinue(palDigits,n):
if len(palDigits) <= 11:
if len(palDigits) > 3:
c = [0]*(n+1)
for i in xrange(len(c)):
c[i] = sum(palDigits[min(k,n-k)]*palDigits[min(i-k,n-i+k)] for k in xrange(i+1))
num = digitsToNum(c, n*2)
if int(log10(num)) == n*2:
p = 10**(n*2-len(palDigits)+4)
if isPalindrome((num/p)*p + (num % 10**(len(palDigits)-3))):
return True
elif int(log10(num)) == (n*2)+1:
p = 10**(n*2-len(palDigits)+4)
if isPalindrome((num/p)*p + (num % 10**(len(palDigits)-2))):
return True
else:
assert False
elif len(palDigits) >= 12:
c = [0]*(n+1)
for i in xrange(len(c)):
c[i] = sum(palDigits[min(k,n-k)]*palDigits[min(i-k,n-i+k)] for k in xrange(i+1))
num = digitsToNum(c, n*2)
if int(log10(num)) == n*2:
p = 10**(n*2-len(palDigits)+5)
if isPalindrome((num/p)*p + (num % 10**(len(palDigits)-4))):
return True
#print (num/p)*p + (num % 10**(len(palDigits)-4))
elif int(log10(num)) == (n*2)+1:
p = 10**(n*2-len(palDigits)+5)
if isPalindrome((num/p)*p + (num % 10**(len(palDigits)-3))):
return True
else:
assert False
else:
return True
#shouldContinue([1,1,2,3,4,5,6,7,8,9,1,2], 23)
#print digitsToNum([1], 0)
count = 0
def genFairSquare(palDigits,n,lb,ub):
global count
num = digitsToNum(palDigits,n)
# print num
if num < lb:
# print "a"
if n % 2 == 0:
genFairSquare(palDigits,n+1,lb,ub)
else:
for i in xrange(10):
newPD = copy.copy(palDigits)
newPD.append(i)
if shouldContinue(newPD, n+1):
genFairSquare(newPD, n+1, lb, ub)
#else if number is over or equal lowerbound:
else:
#if number is above upper bound:
if num > ub:
# print "c"
return
else:
# print "b"
#check fair square stuff
if (isPalindrome(num) and isPalindrome(num*num)):
count += 1
if n % 2 == 0:
genFairSquare(palDigits,n+1,lb,ub)
else:
for i in xrange(10):
newPD = copy.copy(palDigits)
newPD.append(i)
if shouldContinue(newPD, n+1):
genFairSquare(newPD, n+1, lb, ub)
import sys
from math import ceil,floor,sqrt
inp = sys.stdin
T = int(inp.readline())
for testCase in xrange(1,T+1):
count = 0
A,B = map(lambda x: int(x), inp.readline().split(" "))
for i in xrange(1,10):
genFairSquare([i],0,int(ceil(sqrt(A))),int(floor(sqrt(B))))
print "Case #{}: {}".format(testCase, count)
| true |
ec6dc7f0a28d47952d3e683c09dcaa24a81c71e2 | Python | Anurag14/accessfaceid | /register/register.py | UTF-8 | 3,435 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | import sys
sys.path.insert(0,'.')
import cv2
import numpy as np
from modules import face_track_server, face_describer_server, face_db, camera_server,face_align_server
from configs import configs
'''
The register app utilize all servers in model
I have a camera product and I need to use it to find all visitors in my store who came here before.
If unmatched I need to register this new face
Process function does majority of heavy lifting and call if you want to, that is where you can further customize.
'''
class Register(camera_server.CameraServer):
def __init__(self, name, *args, **kwargs):
super(Register, self).__init__(*args, **kwargs)
self.face_tracker = face_track_server.FaceTrackServer()
self.face_describer = face_describer_server.FDServer(
model_fp=configs.face_describer_model_fp,
input_tensor_names=configs.face_describer_input_tensor_names,
output_tensor_names=configs.face_describer_output_tensor_names,
device=configs.face_describer_device)
self.face_db = face_db.Model()
self.name=name
def enter_face(self,_face_description):
self.face_db.add_face(face_description=_face_description, name=self.name)
return
def processs(self, frame):
# Step1. Find and track face (frame ---> [Face_Tracker] ---> Faces Loactions)
self.face_tracker.process(frame)
_faces = self.face_tracker.get_faces()
# Uncomment below to visualize face
_faces_loc = self.face_tracker.get_faces_loc()
self._viz_faces(_faces_loc, frame)
# Step2. For each face, get the cropped face area, feeding it to face describer (insightface) to get 512-D Feature Embedding
_face_descriptions = []
_num_faces = len(_faces)
if _num_faces == 0:
print("No faces found cant register")
return
for _face in _faces:
_face_resize = cv2.resize(_face, configs.face_describer_tensor_shape)
_data_feed = [np.expand_dims(_face_resize, axis=0), configs.face_describer_drop_out_rate]
_face_description = self.face_describer.inference(_data_feed)[0][0]
_face_descriptions.append(_face_description)
# Step3. For each face, check whether there are similar faces and if not save it to db.
# Below naive and verbose implementation is to tutor you how this work
_similar_face_name = self.face_db.who_is_this_face(_face_description, cores='single')
print(f'name: {_similar_face_name}')
if _similar_face_name == "unknown" or len(self.face_db.faces_names) == 0:
self.enter_face(_face_description)
print('[Live Streaming] -----------------------------------------------------------')
def _viz_faces(self, faces_loc, frame):
for _face_loc in faces_loc:
x1 = int(_face_loc[0] * self.face_tracker.cam_w)
y1 = int(_face_loc[1] * self.face_tracker.cam_h)
x2 = int(_face_loc[2] * self.face_tracker.cam_w)
y2 = int(_face_loc[3] * self.face_tracker.cam_h)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('Register', frame)
cv2.waitKey(1)
if __name__ == '__main__':
name=input("Who do I need to register, Enter Details")
register = Register(name,camera_address=0)
register.run()
| true |
f771d07f5d14ed876b725d3f9bed85a593c4af2e | Python | NikitaMishin/Django.Practice | /Mysite/first/models.py~ | UTF-8 | 1,358 | 2.703125 | 3 | [] | no_license | from django.db import models
#CATEGORIES = ((1, "A"),(2,"B"),(3,"C") )
class Category(models.Model):
name = models.CharField(max_length =30, unique = True)
description = models.TextField()
def __str__(self):
return self.name
class Good(models.Model):
name = models.CharField(max_length = 30,unique = True,verbose_name ="Name")
in_stock = models.BooleanField(default=True, db_index = True, verbose_name= "In Stock")
description = models.TextField(blank=True)
category = models.ForeignKey(Category)
price = models.IntegerField(verbose_name="Price")
def __str__(self):
if not self.in_stock:
return self.name +" Not in Stock"
return self.name
class Meta:
ordering = ["-price","name"]
unique_together = ("category","name","price")
verbose_name = "Item"
verbose_name_plural="items"
class New(models.Model):
title = models.CharField(max_length=100,db_index=True,verbose_name="Title")
description=models.TextField(verbose_name="Description")
content = models.TextField()
pub_date=models.DateTimeField(db_index = True,auto_now_add = True,verbose_name="pub_date")
class Meta:
verbose_name = "Article"
verbose_name_plural="Articles"
def __str__(self):
return self.title+" "+ str(self.pub_date)
| true |
f58b2e42d6d76b24f4139fdaa3f4c485de0885d7 | Python | Aasthaengg/IBMdataset | /Python_codes/p02580/s324308590.py | UTF-8 | 659 | 2.71875 | 3 | [] | no_license | H, W, M = map(int, input().split())
h = [0 for _ in range(H+1)]
w = [0 for _ in range(W+1)]
hmax = 0
wmax = 0
Q = []
for _ in range(M):
a, b = map(int, input().split())
Q.append((a, b))
h[a] += 1
hmax = max(hmax, h[a])
w[b] += 1
wmax = max(wmax, w[b])
h_ok = False
w_ok = False
hm = [False for _ in range(H+1)]
wm = [False for _ in range(W+1)]
h_cnt = 0
w_cnt = 0
hw_cnt = 0
for a, b in Q:
if not hm[a] and h[a] == hmax:
hm[a] = True
h_cnt += 1
if not wm[b] and w[b] == wmax:
wm[b] = True
w_cnt += 1
if h[a] == hmax and w[b] == wmax:
hw_cnt += 1
if h_cnt*w_cnt - hw_cnt > 0:
print(hmax+wmax)
else:
print(hmax+wmax-1)
| true |
034206cadc25623d52e1b48512eed6129f39e296 | Python | omnea/distributed-service-framework | /scripts/clean_rabbit.py | UTF-8 | 1,980 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import pika
import pyrabbit
class StructureManager(object):
_host = '88.99.15.151'
_port = 5672
_username = 'test'
_password = 'test'
_connection = None
_channel = None
_api_client = None
def __init__(self):
super().__init__()
self._configure()
def __del__(self):
self._connection.close()
def _configure(self):
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=self._host,
port=self._port,
heartbeat_interval=20,
credentials=pika.PlainCredentials(
username=self._username,
password=self._password
)
)
)
channel = connection.channel()
self._connection = connection
self._channel = channel
self._api_client = pyrabbit.Client(
"{host}:15672".format(host=self._host),
self._username, self._password
)
def clean_all(self):
self._clean_queues()
self._clean_exchanges()
def _clean_exchanges(self):
exchanges_list = self._api_client.get_exchanges()
for exchange in exchanges_list:
exchange_name = exchange['name']
is_system_exchange = (
exchange_name == '' or exchange_name.startswith('amq.')
)
if is_system_exchange:
continue
self._channel.exchange_delete(exchange_name)
print("Exchange `{name}` deleted".format(name=exchange_name))
def _clean_queues(self):
queues_list = self._api_client.get_queues()
for queue in queues_list:
queue_name = queue['name']
self._channel.queue_delete(queue_name)
print("Queue `{name}` deleted".format(name=queue_name))
if __name__ == '__main__':
manager = StructureManager()
manager.clean_all()
| true |
a059dcc437e4f1dc244f608938cdd42e7b31e69f | Python | SirJakesalot/MinecraftMobIdentifier | /models/findCentroids.py | UTF-8 | 5,601 | 2.984375 | 3 | [
"MIT"
] | permissive | #This program returns the centroids of each MOB in a cropped image
image1 = [[.5,.3,.2,0,0], [.2,.1,.4,.3,0], [0,0,.9,.1,0], [.1,.9,0,0,0], [.1,.4,.4,0,.1], [.1,.1,.8,0,0], [0,1,0,0,0], [0,.6,.4,0,0]]
image2 = [[0,0,0,0,0], [0,0,0,0,0], [.2,.1,0,0,.7], [.2,0,0,.5,.3], [.9,0,0,0,.1], [0,0,0,0,1], [.4,.1,.1,.4,0], [.8,0,0,0,.2], [0,0,0,.1,.9]]
image3 = [[0,1,0,0,0], [0,.9,.1,0,0], [.4,0,0,.1,.5], [.6,0,.1,0,.3], [.1,.3,.4,.2,0], [.5,.4,0,.1,0], [.5,0,0,.2,.3], [.4,.2,0,.1,.3], [0,0,0,0,0]]
image4 = [[0,0,.4,.6,0], [0,.1,0,.9,0], [0,0,0,.9,.1], [.1,0,0,.9,0], [0,0,0,1,0], [.2,0,0,.8,0], [0,0,.1,.7,.2], [0,0,0,1,0]]
import numpy as np
class locate_identify:
def __init__(self, image, segment_size):
self.image = image
self.segment_size = segment_size
self.label_key = ['chicken', 'cow', 'mushroom_cow', 'pig', 'sheep']
self.segment_location = {'0': (segment_size *.5, segment_size*2.5),
'1': (segment_size*1.5, segment_size*2.5),
'2': (segment_size*2.5, segment_size*2.5),
'3': (segment_size *.5, segment_size*1.5),
'4': (segment_size*1.5, segment_size*1.5),
'5': (segment_size*2.5, segment_size*1.5),
'6': (segment_size *.5, segment_size *.5),
'7': (segment_size*1.5, segment_size *.5),
'8': (segment_size*2.5, segment_size *.5),}
self.adjacent_segments = {'0': [(1,'right'), (3,'down'), (4,'downright')],
'1': [(0,'left'), (2,'right'), (3,'downleft'), (4,'down'), (5,'downright')],
'2': [(1,'left'), (4,'downleft'), (5,'down')],
'3': [(0,'up'), (1,'upright'), (4,'right'), (6,'down'), (7,'downright')],
'4': [(0,'upleft'), (1,'up'), (2,'upright'), (3,'left'), (5,'right'), (6,'downleft'), (7,'down'), (8,'downright')],
'5': [(1,'upleft'), (2,'up'), (4,'left'), (7,'downleft'), (8,'down')],
'6': [(3,'up'), (4,'upright'), (7,'right')],
'7': [(3,'upleft'), (4,'up'), (5,'upright'), (6,'left'), (8,'right')],
'8': [(4,'upleft'), (5,'up'), (7,'left')]}
self.move_value = {'up': (0,segment_size),
'down': (0,segment_size*-1),
'left': (segment_size*-1,0),
'right': (segment_size,0),
'upleft': (segment_size*-1, segment_size),
'upright': (segment_size, segment_size),
'downleft': (segment_size*-1, segment_size*-1),
'downright': (segment_size, segment_size*-1)}
self.centroids = {}
def initialize_centroids(self):
'''Create a centroid for every unique MOB, with highest confidence in a segment'''
total_confidences = [0, 0, 0, 0, 0]
for segment in self.image:
self.centroids[self.label_key[np.argmax((segment))]] = (0, 0)
for index, value in enumerate(total_confidences):
total_confidences[index] = total_confidences[index] + segment[index]
return total_confidences
def remove_bad_centroids(self, total_confidences):
'''Remove all centroids in which total MOB confidence is less than 1'''
remove_list = []
for key, value in self.centroids.items():
if total_confidences[self.label_key.index(key)] < 1:
remove_list.append(key)
for key in remove_list:
del self.centroids[key]
def find_starting_centroids(self):
'''Initialize centroid coordinates to center of segment with highest confidence for each unique MOB'''
initial_segment = {}
for key, value in self.centroids.items():
max_confidence = 0
location = 0
index = self.label_key.index(key)
for count, segment in enumerate(self.image):
if segment[index] > max_confidence:
max_confidence = segment[index]
location = count
self.centroids[key] = self.segment_location[str(location)]
initial_segment[key] = location
return initial_segment
def find_best_centroids(self, initial_segment):
'''Adjust the coordinates of each centroid according to adjacent segments'''
for key, value in self.centroids.items():
for segment, direction in self.adjacent_segments[str(initial_segment[key])]:
direction_tuple = self.move_value[direction]
segment_confidence = self.image[segment][self.label_key.index(key)] * .5
self.centroids[key] = (self.centroids[key][0] + direction_tuple[0] * segment_confidence,
self.centroids[key][1] + direction_tuple[1] * segment_confidence)
def find_centroids(self):
'''Find centroids of each MOB in the view'''
self.remove_bad_centroids(self.initialize_centroids())
self.find_best_centroids(self.find_starting_centroids())
return self.centroids
if __name__ == '__main__':
identifier = locate_identify(image1,24)
centroids = identifier.find_centroids()
print(centroids)
| true |
5297b0161430d1d4869f6afd35dfc5e40d2f8d11 | Python | startrekdude/byref | /tests/test3.py | UTF-8 | 350 | 3.890625 | 4 | [
"ISC"
] | permissive | from byref import byref
@byref("x")
def add(x, /, *xs):
for y in xs:
x += y
def main():
nums = []
while True:
s = input("Enter a number? ")
if not s: break
if not s.isdigit(): continue
nums.append(int(s))
x, *xs = nums
add(x, *xs)
print(f"The sum of these numbers is {x}.")
print("Goodbye.")
if __name__ == "__main__":
main() | true |
6c42b98a4577aa8290da6a48ed568ad01856b70b | Python | ArtTheFirst/CyTech | /practice/python/100-days-of-code/tip_calculator.py | UTF-8 | 651 | 4.5 | 4 | [] | no_license | #If the bill was $150.00, split between 5 people, with 12% tip.
#Each person should pay (150.00 / 5) * 1.12 = 33.6
#Format the result to 2 decimal places = 33.60
#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪
#Write your code below this line 👇
bill = 150
persons = 5
tip = (12 / 100) * bill
print(f"Your bill is ${bill}")
print(f"There are {persons} of you")
split_bill = (bill + tip) / persons
print(f"You each tipped ${tip / 5}")
final_bill = round(split_bill, 2)
print(f"Your total bill is ${final_bill * 5}")
print(f"Your are to pay ${final_bill} each.")
| true |
43bd039079fae2e4bf05158922dbe45509e7124d | Python | nja2will/InstructionsTypographiques | /src/test.py | UTF-8 | 3,215 | 3.671875 | 4 | [] | no_license | from projet import *
def test() :
print("Pour effectuer le test, veuillez décrire 3 automates ayants le même alphabet.\n")
while True :
while True :
expr1 = input("Veuillez entrer l'expression régulière définissant le première automate utilisé pour le test :\n")
try :
aut1 = expression_vers_automate(expression_rationnelle_vers_liste(expr1))
except :
print("expression invalide")
else :
break
print("Affichage du premier automate.\n")
aut1.display()
input("Appuyez sur entrée pour continuer le test.\n")
while True :
expr2 = input("Veuillez entrer l'expression régulière définissant le second automate utilisé pour le test :\n")
try :
aut2 = expression_vers_automate(expression_rationnelle_vers_liste(expr2))
except :
print("expression invalide")
else :
break
print("Affichage du second automate.\n")
aut2.display()
input("Appuyez sur entrée pour continuer le test.\n")
while True :
expr3 = input("Veuillez entrer l'expression régulière définissant le troisième automate utilisé pour le test :\n")
try :
aut3 = expression_vers_automate(expression_rationnelle_vers_liste(expr3))
except :
print("expression invalide")
else :
break
print("Affichage du troisième automate.\n")
aut3.display()
input("Appuyez sur entrée pour continuer le test.\n")
print("Automate complet équivalent au première automate.\n")
completer(aut1).display()
input("Appuyez sur entrée pour continuer le test.\n")
if aut1.get_alphabet() == aut2.get_alphabet() and aut2.get_alphabet() == aut3.get_alphabet() :
print("Union minimisé du premier et du second automate.\n")
autU = minimiser(union(aut1 , aut2))
autU.display()
input("Appuyez sur entrée pour continuer le test.\n")
print("Intersection minimisé du troisième automate et de l'automate précédent.\n")
autI = minimiser(intersection(autU,aut3))
autI.display()
input("Appuyez sur entrée pour continuer le test.\n")
else :
printf("Les 3 alphabets ne sont pas identiques.\n")
print("Automate miroir du deuxième automate.\n")
miroir(aut2).display()
input("Appuyez sur entrée pour continuer le test.\n")
print("Automate déterministe équivalent au troisième automate.\n")
determiniser(aut3).display()
input("Appuyez sur entrée pour continuer le test.\n")
print("Automate complément du première automate.\n")
complement(aut1).display()
input("Appuyez sur entrée pour continuer le test.\n")
s = ""
while s != "oui" and s != "non" :
s = input("Voulez-vous recommencer le test ? ( Répondre par \"oui\" ou \"non\" )\n")
if s == "non" :
break
| true |
9916ae4c8904de14f9e5b49568a605d6a317bf1e | Python | mfatihaktas/anonymity-mixes | /intersection_wsenders/intersection_model.py | UTF-8 | 1,823 | 2.890625 | 3 | [] | no_license | from log_utils import *
from math_utils import *
"""
Probability of observing the delivery of NO message from a sender
during target's attack window.
Delta: Length of the attack window
ar: Message generation rate at the sender
d: Unit time epoch length
T: Delivery time of a message, a r.v.
"""
def Pr_ObservingNoDeliveryFromASender(ar, T, d=0.01):
Delta = T.M
Pr = 1
# Time-0 is the beginning of the attack window
Pr_MsgGenerated = ar*d
for t in np.arange(-Delta, 0, d):
Pr_MsgDeliveredBeforeWindow = T.cdf(abs(t))
Pr_NoDeliveryTriggered = \
Pr_MsgGenerated*Pr_MsgDeliveredBeforeWindow \
+ (1 - Pr_MsgGenerated)
Pr *= Pr_NoDeliveryTriggered
for t in np.arange(0, Delta, d):
Pr_MsgDeliveredAfterWindow = T.tail(Delta - t)
Pr_NoDeliveryTriggered = \
Pr_MsgGenerated*Pr_MsgDeliveredAfterWindow \
+ (1 - Pr_MsgGenerated)
Pr *= Pr_NoDeliveryTriggered
return Pr
"""
E[N] for when the subsequent attack windows are independent.
That is the case when adversary waits for a message to be delivered before picking
up a new message generation at the target as the beginning of a new attack window.
N ~ max of n i.i.d. Geometric r.v.'s with success prob of Pr_SenderTakenAsCandidate
Pr_SenderTakenAsCandidate = Pr{A sender is taken as a candidate in a given attack window}
"""
def EN_IndependentWindows(n, ar, T):
Pr_SenderTakenAsCandidate = 1 - Pr_ObservingNoDeliveryFromASender(ar, T)
log(INFO, "", Pr_SenderTakenAsCandidate=Pr_SenderTakenAsCandidate)
lambda_ = -math.log(Pr_SenderTakenAsCandidate)
return H(n)/lambda_
def ED_IndependentWindows(n, ar, T):
EN_IndWin = EN_IndependentWindows(n, ar, T)
return EN_IndWin*(1/ar + T.M) + T.M
def ED_LB(n, ar, T):
EN_IndWin = EN_IndependentWindows(n, ar, T)
return EN_IndWin*1/ar + T.M
| true |
4543520c9bf156e04de3627e92838824eb16da43 | Python | BojanKr/ssn | /scripts/sort_clusterONE_results.py | UTF-8 | 1,245 | 2.734375 | 3 | [] | no_license | import pandas as pd
import os
def get_root_dir():
for file in os.listdir(os.path.join(os.getcwd(), 'network/')):
if not file.endswith('xgmml'):
continue
else:
print(file)
dir = os.path.join(os.getcwd(), file+'/')
print(dir)
return dir
def main():
root_dir = get_root_dir()
print('Cleaning and reformatting ClusterONE results.\n')
# Read clusterONE results
cone_df = pd.read_csv(os.path.join(os.getcwd(), root_dir, 'results', 'clusterONE_results.csv'))
# Keep only cluster numbers and accession numbers of members of clusters
cone_df = cone_df[['Cluster', 'Members']]
# Write sorted clusterONE results to a file
out_file = os.path.join(os.getcwd(), root_dir, 'results', 'sorted_clusterONE_results.csv')
with open(out_file, 'w') as f:
f.write('shared name' + ',' + 'clusterONE' + '\n')
# Sort results
for index, row in cone_df.iterrows():
for acc in row['Members'].split():
with open(out_file, 'a') as f:
f.write(acc + ',' + str(row['Cluster']) + '\n')
print(f'Cleaned and sorted clusterONE results written to {out_file}')
if __name__ == '__main__':
main()
| true |
cea5c5c8ad99a1a22dc23878605f16b357032445 | Python | lleonova/Automation2 | /Amazon_WholeFoods_deals_practice.py | UTF-8 | 1,412 | 2.671875 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
PRODUCTS_UNDER_YELLOW_LINE = (By.CSS_SELECTOR, "#wfm-pmd_deals_section div.wfm-desktop-section-size:nth-of-type(6) li")
PRODUCT_NAME = (By.CSS_SELECTOR, "span.wfm-sales-item-card__product-name")
# init driver
driver = webdriver.Chrome(executable_path='./chromedriver')
driver.wait = WebDriverWait(driver, 10)
driver.maximize_window()
# open the url
driver.get('https://www.amazon.com/wholefoodsdeals')
try: # I use TRY and FINNALY just in case if one of the tests fail, the driver will close the window. So when I debug the program, I don't have to close many windows manually :)
# find all products under yellow line
driver.wait.until(EC.presence_of_all_elements_located(PRODUCTS_UNDER_YELLOW_LINE))
products = driver.find_elements(*PRODUCTS_UNDER_YELLOW_LINE)
for item in products:
# verify every product on the page has a text ‘Regular’
assert "Regular" in item.text, f"Expected 'Regular' in product price, but get {item.text}"
# verify every product on the page has a product name
assert item.find_element(*PRODUCT_NAME), f"Expected every product on the page has a product name, but didn't find it"
finally:
# close the window
driver.quit() | true |
abb216a2ba8299c0a39673829d685c7989e761a9 | Python | maximan3000/IntelligenceSystems | /RecSys/project/sugrate.py | UTF-8 | 7,542 | 3.359375 | 3 | [] | no_license | import numpy
class UserSuggestingRate:
"""
Класс для рекомендационной системы. Строится на основании данных конкретного пользователя
"""
def __init__(self, myName: str, usersRates: dict, usersDaysOfWeek: dict, usersPlaces: dict, kNN: int = 7) -> None:
super().__init__()
self.__myName = myName
self.kNN = kNN # Коллаборативая фильтрация: user-based, метод kNN
self.usersRates = usersRates
self.usersDaysOfWeek = usersDaysOfWeek
self.usersPlaces = usersPlaces
self.__myRates = self.__getUserRates()
self.__symmetries = self.__getSymmetries()
self.__myAverageRate = self.getAverageRate(self.__myRates)
def suggestRates(self) -> dict:
"""
Найти прогноз оценки пользователя для всех фильмов, которые он не оценил
"""
suggest = dict()
for filmNumber in range(len(self.__myRates)):
filmRate = self.__myRates[filmNumber]
if filmRate == -1:
suggest["Movie " + str(filmNumber)] = self.suggestRate(filmNumber)
return suggest
def suggestRate(self, filmNumber: int) -> float:
"""
Найти прогноз оценки пользователя для фильма номер filmNumber (нумерация с 0)
Рассчитывается по формуле в задании
"""
topSum = 0.0
downSum = 0.0
nClosestUsers = self.__getNclosestUsers(self.kNN, filmNumber)
for userName, userRates in self.usersRates.items():
if userName in nClosestUsers:
avgRate = UserSuggestingRate.getAverageRate(userRates)
rate = userRates[filmNumber]
a = self.__symmetries[userName] * (rate - avgRate)
topSum += a
b = numpy.abs(self.__symmetries[userName])
downSum += b
suggest = self.__myAverageRate + (topSum / downSum)
return numpy.round(suggest, 3)
def recommend(self):
"""
Алгоритм рекомендации:
Идем по всем пользователям и ищем у них фильм, который:
1) не смотрел наш пользователь
2) был просмотрен в sat или sun (на выходных)
3) был просмотрен дома
Если такой фильм найден, то метрика сходства умножается на оценку пользователя об этом фильме ( sim * rate )
полученное число - ценность рекомендации данного фильма - добавляется в dict (мапу). Если в мапе уже была рекомендация
данного фильма, но с меньшей ценностью, то она заменяется
В конце полученную мапу приводим к списку и сортируем по убыванию ценности.
Далее рекомендуем фильм, который имеет наибольшую ценность
"""
filmRecommendations = dict()
for userName, userRates in self.usersRates.items():
if userName != self.__myName:
for filmNumber in range(len(userRates)):
if self.verifyFilmConditions(userName, filmNumber):
recommendationCost = self.__symmetries[userName] * userRates[filmNumber]
if filmNumber not in filmRecommendations.keys() or filmRecommendations[filmNumber] < recommendationCost:
filmRecommendations[filmNumber] = recommendationCost
sortedRecommendations = list(filmRecommendations.items())
sortedRecommendations.sort(key=lambda entry: entry[1], reverse=True)
return sortedRecommendations[0][0]
def verifyFilmConditions(self, userName, filmNumber):
"""
Проверка удовлетворения фильмом условиям в алгоритме выше
"""
didMeWatch = (self.__myRates[filmNumber] == -1)
didUserWatchAtWeekend = (self.usersDaysOfWeek[userName][filmNumber].strip() == "Sat") or \
(self.usersDaysOfWeek[userName][filmNumber].strip() == "Sun")
didUserWatchAtHome = (self.usersPlaces[userName][filmNumber].strip() == "h")
if didMeWatch and didUserWatchAtHome and didUserWatchAtWeekend:
return True
else:
return False
@staticmethod
def getAverageRate(filmRates: list) -> float:
"""
Найти среднюю оценку
"""
avg = 0.0
count = 0
for filmRate in filmRates:
if -1 != filmRate:
avg += filmRate
count += 1
return avg / count
@staticmethod
def findSymmetry(filmRates1: list, filmRates2: list) -> float:
"""
Найти сходство пользователя с тем, у кого оценки = filmRates
sim(U,V) = sum(i=1..m; U[i]*V[i]) / ( sqrt sum(i=1..m; U[i]^2]) * sqrt sum(i=1..m; V[i]^2) )
где V - текущий пользователь
"""
sumUV = 0
sumU2 = 0
sumV2 = 0
for i in range(len(filmRates1)):
rate1 = filmRates1[i]
rate2 = filmRates2[i]
if rate1 != -1 and rate2 != -1:
sumUV += rate1 * rate2
sumU2 += rate1 * rate1
sumV2 += rate2 * rate2
symmetry = sumUV / (numpy.sqrt(sumV2) * numpy.sqrt(sumU2))
return symmetry
def __getUserRates(self) -> list:
"""
Получить список оценок фильмов для нашего пользователя
"""
selfUserRates = list()
for userName, userRates in self.usersRates.items():
if userName == self.__myName:
selfUserRates = userRates
break
return selfUserRates
def __getSymmetries(self) -> dict:
"""
Заполнить self.__symmetries - данные сходства пользователя со всеми остальными пользователями
"""
symmetries = dict()
for userName, userRates in self.usersRates.items():
if userName != self.__myName:
symmetries[userName] = self.findSymmetry(self.__myRates, userRates)
return symmetries
def __getNclosestUsers(self, n: int, filmNumber: int) -> list:
"""
Получить не более n пользователей, которые наиболее схожи с нашим пользователем, в убывающем порядке сходства
"""
symmetriesEntries = list(self.__symmetries.items())
symmetriesEntries.sort(key=lambda entry: entry[1], reverse=True)
symmetriesUsers = list()
i = 0
while i < n:
userName = symmetriesEntries[i][0]
rates = self.usersRates[userName]
if (rates[filmNumber] != -1):
symmetriesUsers.append(userName)
i += 1
return symmetriesUsers
| true |
5df6be989240425089f4a90bf216b86d13420df9 | Python | zhongpei0820/LeetCode-Solution | /Python/1-99/018_4Sum.py | UTF-8 | 2,479 | 3.53125 | 4 | [] | no_license | #Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
#
#Note: The solution set must not contain duplicate quadruplets.
#
#
#
#For example, given array S = [1, 0, -1, 0, -2, 2], and target = 0.
#
#A solution set is:
#[
# [-1, 0, 0, 1],
# [-2, -1, 1, 2],
# [-2, 0, 0, 2]
#]
#
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
if len(nums) < 4 : return []
nums = sorted(nums)
if nums[0] * 4 > target or nums[-1] * 4 < target : return[]
ret = []
for i in range(0,len(nums) - 3):
if nums[i] * 4 > target : break;
if nums[i] + nums[-1] + nums[-2] + nums[-3] < target : continue
if nums[i] * 4 == target and i < len(nums) - 3 and nums[i + 3] == nums[i]:
ret.append([nums[i],nums[i],nums[i],nums[i]])
break
if i != 0 and nums[i] == nums[i - 1]: continue
self.threeSum(nums[i + 1:],target - nums[i],ret,nums[i])
return ret
def threeSum(self,nums,target,ret,curr):
for i in range(0,len(nums) - 2):
if nums[i] * 3 > target : break;
if nums[i] + nums[-1] + nums[-2] < target : continue
if nums[i] * 3 == target and i < len(nums) - 3 and nums[i + 2] == nums[i]:
ret.append([curr,nums[i],nums[i],nums[i]])
break
if i != 0 and nums[i] == nums[i - 1]: continue
self.twoSum(nums[i + 1:],target - nums[i],ret,curr,nums[i])
def twoSum(self,nums,target,ret,curr,curr2):
low,high = 0,len(nums) - 1
if nums[low] * 2 > target or nums[high] * 2 <target:
return;
while low < high:
sum = nums[low] + nums[high]
if sum == target:
ret.append([curr,curr2,nums[low],nums[high]])
if sum <= target:
while low < high and nums[low] == nums[low + 1]:
low += 1
low +=1
if sum >= target:
while low < high and nums[high] == nums[high - 1]:
high -= 1
high -= 1
| true |
9929bf67b377598bf4c6b2bfdf0c44370c79c2a1 | Python | jay3ss/congenial-rotary-phone | /singlylinkedlist.py | UTF-8 | 2,732 | 4.40625 | 4 | [
"MIT"
] | permissive | class ListNode:
"""
A node in a singly-linked list.
"""
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def __repr__(self):
return repr(self.data)
class SinglyLinkedList:
def __init__(self):
"""
Create a new singly-linked list.
Takes O(1) time.
"""
self.head = None
def __repr__(self):
"""
Return a string representation of the list.
Takes O(n) time.
"""
nodes = []
curr = self.head
while curr:
nodes.append(repr(curr))
curr = curr.next
return '[' + ', '.join(nodes) + ']'
def prepend(self, data):
"""
Insert a new element at the beginning of the list.
Takes O(1) time.
"""
self.head = ListNode(data=data, next=self.head)
def append(self, data):
"""
Insert a new element at the end of the list.
Takes O(n) time.
"""
if not self.head:
self.head = ListNode(data=data)
return
curr = self.head
while curr.next:
curr = curr.next
curr.next = ListNode(data=data)
def find(self, key):
"""
Search for the first element with `data` matching
`key`. Return the element or `None` if not found.
Takes O(n) time.
"""
curr = self.head
while curr and curr.data != key:
curr = curr.next
return curr # Will be None if not found
def remove(self, key):
"""
Remove the first occurrence of `key` in the list.
Takes O(n) time.
"""
# Find the element and keep a
# reference to the element preceding it
curr = self.head
prev = None
while curr and curr.data != key:
prev = curr
curr = curr.next
# Unlink it from the list
if prev is None:
self.head = curr.next
elif curr:
prev.next = curr.next
curr.next = None
def reverse(self):
"""
Reverse the list in-place.
Takes O(n) time.
"""
curr = self.head
prev_node = None
next_node = None
while curr:
next_node = curr.next
curr.next = prev_node
prev_node = curr
curr = next_node
self.head = prev_node
if __name__ == '__main__':
lst = SinglyLinkedList()
lst.prepend(23)
lst.prepend('a')
lst.prepend(42)
lst.prepend('X')
lst.append('the')
lst.append('end')
lst.find('X')
lst.find('y')
lst.reverse()
lst.remove(42)
lst.remove('not found')
| true |
ebb8ad1925823222764ae07680f8280b74c7cc72 | Python | bp40/attendanceLog | /attendance.py | UTF-8 | 2,978 | 2.859375 | 3 | [] | no_license | import mysql.connector
import os
import sys
import RPi.GPIO as GPIO
from mfrc522 import SimpleMFRC522
from RPLCD.i2c import CharLCD
import time
import datetime
#setup
GPIO.setwarnings(False)
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S")
run = True;
print("Current Time =", current_time)
lcd = CharLCD('PCF8574', 0x27)
reader = SimpleMFRC522()
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
password = "password",
database = "attendance"
)
cursor = mydb.cursor(buffered=True)
#end setup
def createTable():
#create student list table
cursor.execute("""CREATE TABLE IF NOT EXISTS`students` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`rfidID` varchar(255) NOT NULL DEFAULT '',
`name` varchar(255) NOT NULL DEFAULT '',
PRIMARY KEY (`id`)
)""")
#create attendance table
cursor.execute("""CREATE TABLE IF NOT EXISTS`attendancelog` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`studentID` int(10) unsigned NOT NULL,
`checkinTime` datetime NOT NULL,
PRIMARY KEY (`id`),
KEY `attendanceLog_FK` (`studentID`),
CONSTRAINT `attendanceLog_FK` FOREIGN KEY (`studentID`) REFERENCES `students` (`id`)
)""")
def checkStudentExist(rfid, ):
sql = ("SELECT name FROM students WHERE rfidID = '%s'")
cursor.execute(sql, rfid)
if cursor.rowcount != 0:
return False
else:
return True
def addStudent(rfid, name):
sql = ("INSERT INTO students (rfidID, name) VALUES (%s, %s)")
values = (rfid, name)
cursor.execute(sql, values)
mydb.commit()
print(cursor.rowcount, 'rows affected')
def logAttendance(rfid):
queryID=("SELECT id FROM students WHERE rfidID LIKE %s")
cursor.execute(queryID, (rfid,))
result = cursor.fetchall()
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S")
#print(result[0][0]);
queryLog = ("INSERT INTO attendancelog (studentID, checkinTime) VALUES (%s, %s)")
values = (result[0][0], now)
cursor.execute(queryLog, values)
mydb.commit()
print(cursor.rowcount, 'rows affected')
while (run == True):
lcd.write_string("Please Scan:")
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S")
try:
id, text = reader.read()
if checkStudentExist(id):
print("log existing")
logAttendance(id)
else:
print("log new")
addStudent(id, text)
print('Welcome')
print(id)
print('Text : ')
print(text)
lcd.clear()
lcd.write_string('Hello '+text + '\r\n')
lcd.write_string('Time : ' + current_time)
time.sleep(3)
except Exception as e:
lcd.write_string('\n\rREAD ERROR')
print(e)
time.sleep(3)
finally:
lcd.clear()
GPIO.cleanup() | true |
81f7afb1489c3b3a077fc2b7ee6b09cf45434cfd | Python | NadavFeldman/Lending-Club-Issued-Loans-Analysis- | /src/models/7-Train-Test-Preparation.py | UTF-8 | 6,909 | 3.25 | 3 | [
"BSD-3-Clause"
] | permissive |
# coding: utf-8
# # Train - Dev - Test Preparation
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
##############################################################################
########## DATABASE FUNCTIONS #############
##############################################################################
#### Read function to import data from the SQL to a pandas dataframe.
def readSQL(query):
import pandas as pd
import sqlite3 as sql3
db = sql3.connect(DB_FILE)
df = pd.read_sql_query(query, db)
db.close()
return(df)
#### Write a pandas dataframe into an SQL table. Use overwrite=True if you want to delete
#### first a pre-existent table with the same name. Use append=True if you want to append
#### the data in the dataframe to a pre-existent table.
def writeSQL(df,tablename,overwrite=False, append=False):
import pandas as pd
import sqlite3 as sql
db = sql.connect(DB_FILE)
if (overwrite):
action = "replace"
elif (append):
action = "append"
else:
action = "fail"
df.to_sql(tablename, db, if_exists=action)
db.close()
# In[4]:
import os
cwd = os.getcwd()
DB_FILE = "%s\Data\loans.db" % cwd
# In[5]:
loans = readSQL('''SELECT * FROM full_dataset''')
# In[6]:
loans.head()
# In[7]:
loans = loans.drop(["index"],axis=1)
# In[52]:
df = loans.copy()
# In[53]:
## check the dtypes on the dataframe
df.dtypes
# We have to define some of the variables to be sure they are used as required by the models
# In[54]:
##Get Series of columns
cat_vars = list(df.select_dtypes(include=['object']).columns)
cat_vars
##Turn
for column in cat_vars:
df[column] = df[column].astype('category')
# In[55]:
### check missings
df.isnull().sum()
# ## Preparing the partition to test, train and dev
# In[56]:
from random import seed, shuffle,randint
##Get Thomas Library
import pyMechkar as mechkar
import time
# In[57]:
def splittDF(df):
p_value = 1
vn = df.columns.tolist()
while p_value > 0:
randomSeed = randint(100,2000)
print (randomSeed)
seed(randomSeed)
xind = [i for i in range(len(df))]
shuffle(xind,)
### We will split as follows: 10% test, 10% dev and 80% train
### test split
split_1 = int(0.1 * len(xind))
### train-dev split
split_2 = int(0.2 * len(xind))
df['dataset'] = "train"
df.iloc[xind[0:split_1],79] = "test"
df.iloc[xind[split_1:split_2],79] = "dev"
df['dataset'] = df['dataset'].astype('category')
init = time.time()
tab1 = mechkar.pyMechkar().Table1(x=vn,y="dataset",data=df,categorize=True,maxcat=7)
print("---- %s seconds -----" % (time.time() - init))
t = tab1.loc[(tab1['p_value']<0.05),]
p_value = len(t.index)
return(df)
# In[58]:
df = splittDF(df)
# In[59]:
##SEED=1207 Gave us the balanced split
df.head()
# In[61]:
### We will split as follows: 10% test, 10% dev and 80% train
X_test = df.loc[(df["dataset"] =="test"),] #power.iloc[xind[0:split_1],]
y_test = df.loc[(df["dataset"] =="test"),"default"]#power.iloc[xind[0:split_1],29]
X_dev = df.loc[(df["dataset"] =="dev"),]#power.iloc[xind[split_1:split_2],]
y_dev = df.loc[(df["dataset"] =="dev"),"default"]#power.iloc[xind[split_1:split_2],29]
X_train = df.loc[(df["dataset"] =="train"),]#power.iloc[xind[split_2:],]
y_train = df.loc[(df["dataset"] =="train"),"default"]#power.iloc[xind[split_2:],29]
# In[62]:
print(X_train.shape)
print(y_train.shape)
print(X_dev.shape)
print(y_dev.shape)
print(X_test.shape)
print(y_test.shape)
# In[63]:
X_train.head()
# In[64]:
print(y_train.describe())
print(y_train.isnull().sum())
# In[65]:
## drop default from X_xxx
X_train = X_train.drop('default',axis=1)
X_dev = X_dev.drop('default',axis=1)
X_test = X_test.drop('default',axis=1)
# In[75]:
sns.distplot(y_train.astype(int),label="train")
sns.distplot(y_dev.astype(int),label="dev")
sns.distplot(y_test.astype(int),label="test")
plt.legend()
plt.show()
# In[76]:
X_train = X_train.drop('dataset',axis=1)
X_dev = X_dev.drop('dataset',axis=1)
X_test = X_test.drop('dataset',axis=1)
# In[140]:
writeSQL(X_train,tablename="X_train")
writeSQL(X_dev,tablename="X_dev")
writeSQL(X_test,tablename="X_test")
writeSQL(y_train,tablename="Y_train")
writeSQL(y_dev,tablename="Y_dev")
writeSQL(y_test,tablename="Y_test")
# <h3>Check Inbalance
# In[78]:
train = X_train.assign(default=y_train)
# In[79]:
train.head()
# In[82]:
target_count = train.default.value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
print('Proportion:', round(target_count[0] / target_count[1], 2), ': 1')
target_count.plot(kind='bar', title='Count (target)');
plt.show()
# In[84]:
import imblearn
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import SMOTE
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import TomekLinks
# In[93]:
##Random Under Sampling
rus = RandomUnderSampler(return_indices=True)
X_rus, y_rus, id_rus = rus.fit_sample(X_train, y_train.astype(int))
# In[129]:
data_rus=pd.DataFrame(np.column_stack((X_rus,y_rus)))
target_count = data_rus[78].value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
# In[94]:
##Random Over Sampling
ros = RandomOverSampler()
X_ros, y_ros = ros.fit_sample(X_train, y_train.astype(int))
# In[130]:
data_ros=pd.DataFrame(np.column_stack((X_ros,y_ros)))
target_count = data_ros[78].value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
# In[95]:
## Under-sampling: Tomek links
tl = TomekLinks(return_indices=True, ratio='majority')
X_tl, y_tl, id_tl = tl.fit_sample(X_train, y_train.astype(int))
# In[131]:
data_tl=pd.DataFrame(np.column_stack((X_tl,y_tl)))
target_count = data_tl[78].value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
# In[96]:
smote = SMOTE(ratio='minority')
X_sm, y_sm = smote.fit_sample(X_train, y_train.astype(int))
# In[132]:
data_smote=pd.DataFrame(np.column_stack((X_sm,y_sm)))
target_count = data_smote[78].value_counts()
print('Class 0:', target_count[0])
print('Class 1:', target_count[1])
# In[133]:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(solver="liblinear", penalty='l1',max_iter=1000)
# In[134]:
model.fit(X_train, y_train)
# In[135]:
model.score(X_train,y_train)
# In[136]:
model.fit(X_rus, y_rus)
model.score(X_rus, y_rus)
# In[137]:
model.fit(X_ros, y_ros)
model.score(X_ros, y_ros)
# In[138]:
model.fit(X_tl, y_tl)
model.score(X_tl, y_tl)
# In[139]:
model.fit(X_sm, y_sm)
model.score(X_sm, y_sm)
| true |
fe98ebaeb7d190cdae0e795a23a005f4d3ed70c6 | Python | sailfish009/protein-function-prediction | /protfun/data_management/data_manager.py | UTF-8 | 24,864 | 2.59375 | 3 | [] | no_license | import shutil
import abc
import numpy as np
import os
import protfun.data_management.preprocess as prep
from protfun.data_management.label_factory import LabelFactory
from protfun.data_management.validation import EnzymeValidator
from protfun.utils import save_pickle, load_pickle, construct_hierarchical_tree
from protfun.utils.log import get_logger
log = get_logger("data_manager")
class DataManager(object):
"""
DataManager is a parent class for EnzymeDataManager which stores all data directories and
implements a *naive* split strategy described below.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, data_dir,
force_download=False, force_process=False, force_split=False,
percentage_test=10, percentage_val=20):
"""
:param data_dir: the path to the root data directory
:param force_download: forces the downloading of the enzymes
:param force_process: forces the pre-processing steps
:param force_split: forces the splitting of the data into training ,validation and test sets
:param percentage_test: the portion in % of the test data
:param percentage_val: the portion in % of the validation data
"""
self.force_download = force_download
self.force_process = force_process or force_download
self.force_split = force_split or force_process or force_download
self.p_test = percentage_test
self.p_val = percentage_val
self.dirs = {'data': data_dir,
'data_raw': os.path.join(data_dir, "raw"),
'data_processed': os.path.join(data_dir, "processed"),
'data_train': os.path.join(data_dir, "train"),
'data_test': os.path.join(data_dir, "test"),
'misc': os.path.join(data_dir, "misc")}
# ensure all directories exist
for _, d in self.dirs.items():
if not os.path.exists(d) and not os.path.islink(d):
os.makedirs(d)
def get_data_dir(self):
return self.dirs['data']
@abc.abstractmethod
def get_test_set(self):
raise NotImplementedError
@abc.abstractmethod
def get_training_set(self):
raise NotImplementedError
@abc.abstractmethod
def get_validation_set(self):
raise NotImplementedError
@staticmethod
def split_data_on_sublevel(data_dict, percentage, hierarchical_depth):
"""
Performs a "strict" split by taking the nodes in the EC tree in the level **below**
hierarchical_depth, and placing the proteins under each of those nodes **disjointly**
in either the first or second part of the split.
E.g. if hierarchical_depth=3, then each EC tree node on level 4 will be put in
either the first part, or in the second part of the split.
:param data_dict: a dictionary with keys categories and value per key - list of pdb codes
:param percentage: the portion of the data in % that should be put into the first split
:param hierarchical_depth: depth for the classification, the nodes on the level **below**
this depth are each put into one of the two parts of the split
:return: a tuple of the two splits as data dictionaries
"""
import itertools
first_data_dict = dict()
second_data_dict = dict()
target_classes = set(['.'.join(cls.split('.')[:hierarchical_depth]) for cls in data_dict])
for target_cls in target_classes:
children = [(cls, enzymes) for cls, enzymes in data_dict.items() if
cls.startswith(target_cls + '.')]
target_cls_prots = set(itertools.chain.from_iterable(zip(*children)[1]))
required_count = ((100 - percentage) * len(target_cls_prots)) // 100
sorted_children = sorted(children, key=lambda x: len(x[1]), reverse=True)
collected_so_far = set()
for cls, enzymes in sorted_children:
if len(collected_so_far) < required_count:
collected_so_far |= set(enzymes)
second_data_dict[cls] = enzymes
else:
first_data_dict[cls] = enzymes
return first_data_dict, second_data_dict
@staticmethod
def split_data_on_level(data_dict, percentage, level=3):
"""
performs a *naive* split, i.e. splitting proteins codes within a leaf-node from the
hierarchical category tree, or a *semi-naive* split when spliting on a higher level node.
In the latter case, the proteins within all sublevels of a higher-level node are merged
into a pool of protein codes and then split according to the percentage value.
:param data_dict: a dictionary with keys categories and value per key - list of pdb codes
:param percentage: the portion of the data in % that should be put into the first split
:param level: the hierarchical tree depth level on which the split is made.
:return: a tuple of the two splits as data dictionaries
"""
if not 0 <= percentage <= 100:
log.error("Bad percentage number. Must be in [0, 100]")
raise ValueError
first_data_dict = {key: [] for key in data_dict.keys()}
second_data_dict = {key: [] for key in data_dict.keys()}
if level < 4:
merged_on_level = construct_hierarchical_tree(data_dict, prediction_depth=level)
prots2classes_dict = dict()
for cls, prot_codes in data_dict.items():
for p in prot_codes:
prots2classes_dict[p] = cls
else:
merged_on_level = data_dict
# take percentage of data points from each hierarchical leaf class
for cls, samples in merged_on_level.items():
num_samples = len(samples)
first_part_size = int((num_samples * percentage) // 100)
second_part_size = num_samples - first_part_size
if first_part_size == 0 or second_part_size == 0:
log.warning(
"Data size of leaf class: {0} percentage: {1}".format(num_samples, percentage))
log.warning(
"Class {} will not be represented in one part of the split.".format(cls))
first_samples = np.random.choice(samples, replace=False,
size=int((num_samples * percentage) // 100.0))
second_samples = np.setdiff1d(samples, first_samples, assume_unique=True)
if level < 4:
for p in first_samples:
full_cls = prots2classes_dict[p]
first_data_dict[full_cls].append(p)
for p in second_samples:
full_cls = prots2classes_dict[p]
second_data_dict[full_cls].append(p)
else:
first_data_dict[cls] = list(first_samples)
second_data_dict[cls] = list(second_samples)
return first_data_dict, second_data_dict
@staticmethod
def merge_data(data=None):
"""
merges two or more data dictionaries into a single one.
:param data: a list of data dictionaries with key - EC-class and value - list of protein
codes.
:return: a single data dictionary (a union over the input dictionaries)
"""
if isinstance(data, list):
all_keys = set(sum([d.keys() for d in data], []))
merged_data_dict = {k: [] for k in all_keys}
for d in data:
for k in all_keys:
if k in d.keys():
merged_data_dict[k] += d[k]
# remove eventual duplicates from the lists of elements for each key
for k in all_keys:
merged_data_dict[k] = list(set(merged_data_dict[k]))
return merged_data_dict
else:
log.error("Provide a list of data dictionaries to be merged")
raise ValueError
class EnzymeDataManager(DataManager):
"""
EnzymeDataManager inherits from DataManager the *naive* and *sami-naive* splitting method and
implements the essential management processes, required for the EnzymeCategory and
ProteinDataBank data maintenance. Roughly the management pipeline can be described as:
[download] -> [pre-process] -> [split test/train] -> provide
where [.] designates a step that can be omitted if already done.
"""
def __init__(self, data_dir,
force_download, force_memmaps,
force_grids, force_split,
grid_size,
enzyme_classes=None,
hierarchical_depth=4,
percentage_test=30,
percentage_val=30,
split_strategy='strict',
add_sidechain_channels=True):
"""
:param data_dir: the path to the root data directory
:param force_download: forces the downloading of the protein pdb files should be done
:param force_memmaps: forces the memmapping of protein data, i.e. vdw-radii, atom coords. and charges
:param force_grids: forces the 3D maps of electron density and potential generation
:param force_split: forces the splitting into train/val/test sets
:param grid_size: number of points on the side of the computed el. density grids
:param enzyme_classes: a subset of EC given by a list of only those classes that should be considered
:param hierarchical_depth: the maximal depth of prediction
:param percentage_test: the portion of the data in % for the test set
:param percentage_val: the portion of the data in % for the validation set
:param split_strategy: split strategy to use, 'naive' or 'strict'
:param add_sidechain_channels: boolean, whether to use 24-channel grid density maps (with
all sidechains as channels) or 1 channel grid density map
"""
super(EnzymeDataManager, self).__init__(data_dir=data_dir,
force_download=force_download,
force_process=force_memmaps or force_grids,
force_split=force_split,
percentage_test=percentage_test,
percentage_val=percentage_val)
self.force_grids = force_grids or force_memmaps or force_download
self.force_memmaps = force_memmaps or force_download
self.grid_size = grid_size
self.enzyme_classes = enzyme_classes
self.max_hierarchical_depth = hierarchical_depth
self.split_strategy = split_strategy
self.add_sidechain_channels = add_sidechain_channels
self.validator = EnzymeValidator(enz_classes=enzyme_classes,
dirs=self.dirs)
self._setup_enzyme_data()
def _setup_enzyme_data(self):
"""
performs the abovementioned steps in the data management cycle.
:return:
"""
if self.enzyme_classes is None or not self.validator.check_naming(
self.enzyme_classes):
log.error("Unknown enzyme classes")
raise ValueError
# Download the data if required
if self.force_download:
ef = prep.EnzymeFetcher(categories=self.enzyme_classes,
enzyme_dir=self.dirs['data_raw'])
self.all_proteins = ef.fetch_enzymes()
prep.download_pdbs(base_dir=self.dirs['data_raw'], protein_codes=self.all_proteins)
save_pickle(file_path=os.path.join(self.dirs["data_raw"], "all_prot_codes.pickle"),
data=self.all_proteins)
self._save_enzyme_list(target_dir=self.dirs["data_raw"],
proteins_dict=self.all_proteins)
else:
log.info("Skipping downloading step")
self.all_proteins = load_pickle(
file_path=os.path.join(self.dirs["data_raw"],
"all_prot_codes.pickle"))
failed_downloads, n_successful, n_failed = self.validator.check_downloaded_codes()
self._remove_failed_downloads(failed=failed_downloads)
log.info("Total number of downloaded proteins found is {0}. Failed to download {1}".
format(n_successful, n_failed))
# Process the data if required
if self.force_memmaps or self.force_grids:
edp = prep.EnzymeDataProcessor(protein_codes=self.all_proteins,
from_dir=self.dirs['data_raw'],
target_dir=self.dirs['data_processed'],
grid_size=self.grid_size,
force_process_grids=self.force_grids,
force_process_memmaps=self.force_memmaps,
add_sidechain_channels=self.add_sidechain_channels,
use_esp=False)
self.valid_proteins = edp.process()
self.validator.check_class_representation(self.valid_proteins, clean_dict=True)
save_pickle(
file_path=os.path.join(self.dirs["data_processed"], "valid_prot_codes.pickle"),
data=self.valid_proteins)
self._save_enzyme_list(target_dir=self.dirs["data_processed"],
proteins_dict=self.valid_proteins)
else:
log.info("Skipping preprocessing step")
self.valid_proteins = load_pickle(
file_path=os.path.join(self.dirs["data_processed"], "valid_prot_codes.pickle"))
self.validator.check_class_representation(self.valid_proteins, clean_dict=True)
# Split test / val data set if required
if self.force_split:
resp = raw_input(
"Do you really want to split a test set into a separate directory?" +
" This will change the existing test set / train set split! y/[n]\n")
if resp.startswith('y'):
if self.split_strategy == 'naive':
test_dataset, trainval_data = self.split_data_on_level(
self.valid_proteins,
percentage=self.p_test, level=3)
val_dataset, train_dataset = self.split_data_on_level(
trainval_data,
percentage=self.p_val, level=3)
elif self.split_strategy == 'strict':
test_dataset, trainval_data = self.split_data_on_sublevel(
self.valid_proteins,
percentage=self.p_test, hierarchical_depth=3)
val_dataset, train_dataset = self.split_data_on_sublevel(
trainval_data,
percentage=self.p_val, hierarchical_depth=3)
else:
log.error("Split strategy can be 'naive' or 'strict'")
raise ValueError
self.validator.check_splitting(self.valid_proteins, trainval_data, test_dataset)
self.validator.check_splitting(trainval_data, train_dataset, val_dataset)
# recreate the train and test dirs
shutil.rmtree(self.dirs['data_train'])
os.makedirs(self.dirs['data_train'])
shutil.rmtree(self.dirs['data_test'])
os.makedirs(self.dirs['data_test'])
# save val and train sets under dirs["data_train"], copy over all corresponding
# data samples
self._copy_processed(target_dir=self.dirs["data_train"],
proteins_dict=trainval_data)
self._save_enzyme_list(target_dir=self.dirs["data_train"],
proteins_dict=trainval_data)
save_pickle(file_path=[os.path.join(self.dirs["data_train"],
"train_prot_codes.pickle"),
os.path.join(self.dirs["data_train"],
"val_prot_codes.pickle")],
data=[train_dataset, val_dataset])
# save test set under dirs["data_test"], copy over all
# corresponding data samples
self._copy_processed(target_dir=self.dirs["data_test"],
proteins_dict=test_dataset)
self._save_enzyme_list(target_dir=self.dirs["data_test"],
proteins_dict=test_dataset)
save_pickle(file_path=os.path.join(self.dirs["data_test"],
"test_prot_codes.pickle"),
data=test_dataset)
else:
# only reinitialize the train and validation sets
# the existing train and val pickles need to be merged and split
# again
train_dataset, val_dataset = load_pickle(
file_path=[os.path.join(self.dirs["data_train"],
"train_prot_codes.pickle"),
os.path.join(self.dirs["data_train"],
"val_prot_codes.pickle")])
trainval_data = self.merge_data(
data=[train_dataset, val_dataset])
# split them again
val_dataset, train_dataset = self.split_data_on_level(
trainval_data,
percentage=self.p_val, level=3)
self.validator.check_splitting(trainval_data, train_dataset, val_dataset)
save_pickle(
file_path=[os.path.join(self.dirs["data_train"], "train_prot_codes.pickle"),
os.path.join(self.dirs["data_train"], "val_prot_codes.pickle")],
data=[train_dataset, val_dataset])
else:
log.info("Skipping splitting step")
train_dataset, val_dataset, test_dataset = \
load_pickle(file_path=[os.path.join(self.dirs["data_train"], "train_prot_codes.pickle"),
os.path.join(self.dirs["data_train"], "val_prot_codes.pickle"),
os.path.join(self.dirs["data_test"], "test_prot_codes.pickle")])
# only select the enzymes classes we're interested in
self.train_dataset = self._select_enzymes(train_dataset)
self.val_dataset = self._select_enzymes(val_dataset)
self.test_dataset = self._select_enzymes(test_dataset)
# generate labels based on the data-sets
lf = LabelFactory(self.train_dataset, self.val_dataset, self.test_dataset,
hierarchical_depth=self.max_hierarchical_depth)
self.train_labels, self.val_labels, self.test_labels = lf.generate_hierarchical_labels()
def _select_enzymes(self, dataset):
"""
Extracts a subset of a data dictionary according to the enzyme classes of interest.
E.g. if the data dictionary contains the whole database and in the new experiment only a
subset is needed, in order not to download, process and split the data again, a subset is
extracted from the existing data.
:param dataset: a data dictionary with keys the enzyme classes and values
- lists of protein codes for each class
:return: the subset of this data dictionary
"""
filtered_set = dict()
for cls, enzymes in dataset.items():
if any([cls.startswith(enzyme_cls + '.') for enzyme_cls in
self.enzyme_classes]):
filtered_set[cls] = enzymes
return filtered_set
def _remove_failed_downloads(self, failed=None):
"""
Deprecated. It was meant to clean-up the list of all fetched proteins after the download is
completed. This was necessary since the list of all proteins was generated from the EC
database and the proteins were downloaded from the PDB database, hence some proteins might
be taken into account by fail to download.
:param failed: the list of all failed-to-download protein codes
"""
# here the protein codes are stored in a dict according to their classes
for cls in failed.keys():
self.all_proteins[cls] = list(
set(self.all_proteins[cls]) - set(failed[cls]))
def get_training_set(self):
return self.train_dataset, self.train_labels
def get_validation_set(self):
return self.val_dataset, self.val_labels
def get_test_set(self):
return self.test_dataset, self.test_labels
def _copy_processed(self, target_dir, proteins_dict):
"""
After the data is split, the test proteins are moved to a separate directory so that they do
not interfere with the training and validation proteins. This method copies the proteins
from one directory to another.
:param target_dir: the target directory to which proteins are copied
:param proteins_dict: the source directory from which proteins are copied
:return:
"""
src_dir = self.dirs["data_processed"]
for prot_codes in proteins_dict.values():
for prot_code in prot_codes:
os.system("cp -R %s %s" % (
os.path.join(src_dir, prot_code.upper()),
os.path.join(target_dir, ".")))
log.info("Copied {0} to {1}".format(prot_code, target_dir))
@staticmethod
def _save_enzyme_list(target_dir, proteins_dict):
"""
Logger of the list of proteins, so that directories are not walked later when a list of all
proteins in test or training set is needed.
:param target_dir: the directory in which the lists should be stored
:param proteins_dict: the data dictionary of protein classes and list of corresponding codes.
"""
for cls, prot_codes in proteins_dict.items():
with open(os.path.join(target_dir, cls + '.proteins'),
mode='w') as f:
for prot_code in prot_codes:
f.write(prot_code + '\n')
class GOProteinsDataManager(DataManager):
def __init__(self, data_dir, force_download=False, force_process=False,
force_split=False,
percentage_test=10,
percentage_val=20):
super(GOProteinsDataManager, self).__init__(data_dir=data_dir,
force_download=force_download,
force_process=force_process,
force_split=force_split,
percentage_test=percentage_test,
percentage_val=percentage_val)
def get_test_set(self):
raise NotImplementedError
def get_training_set(self):
raise NotImplementedError
def get_validation_set(self):
raise NotImplementedError
if __name__ == "__main__":
data_dir = os.path.join(os.path.dirname(__file__), '../../data_test')
# data_dir = "/usr/data/cvpr_shared/proteins/enzymes_w073/multichannel_density"
# enzyme_classes = list()
# for i in range(1, 100):
# enzyme_classes.append('1.%d' % i)
# for i in range(1, 11):
# enzyme_classes.append('2.%d' % i)
# for i in range(1, 14):
# enzyme_classes.append('3.%d' % i)
# for i in range(1, 8):
# enzyme_classes.append('4.%d' % i)
# enzyme_classes.append('4.99')
# for i in range(1, 6):
# enzyme_classes.append('5.%d' % i)
# enzyme_classes.append('5.99')
# for i in range(1, 7):
# enzyme_classes.append('6.%d' % i)
dm = EnzymeDataManager(data_dir=data_dir,
force_download=False,
force_memmaps=False,
force_grids=False,
force_split=True,
grid_size=64,
split_strategy='strict',
percentage_test=30,
percentage_val=30,
hierarchical_depth=3,
enzyme_classes=['3.13.1'])
| true |
3c0fdd3e01f8dbe2780cd0a2d23d60ab47234306 | Python | NelaSvozilikova/freelance | /Bash/Navegar_Sitio_JEO/PASO_NUEVOO/File_Lib.py | UTF-8 | 459 | 3.140625 | 3 | [] | no_license |
def saveFile(FILENAME, LISTA):
file = open(FILENAME,"w")
for valor in LISTA:
file.write(valor.strip() + '\n');
file.close();
return
def loadFile(FILENAME, LISTA):
try:
file = open(FILENAME, "r")
for line in file:
LISTA.append(line)
file.close();
except (FileNotFoundError):
file = open(FILENAME,"w")
for valor in LISTA:
file.write(valor.strip() + '\n');
file.close();
return
| true |
e0215c3c332f8021dc4b7cff691fce7fd17b4f3a | Python | lilin199309261023/ceshi2 | /day3/test9.py | UTF-8 | 1,798 | 2.796875 | 3 | [] | no_license | from time import sleep
from appium import webdriver
# server 启动参数
from appium.webdriver.common.touch_action import TouchAction
desired_caps = {}
# 设备信息
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = '192.168.56.101:5555'
# 输入中文
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
# app的信息
desired_caps['appPackage'] = 'com.android.settings'
desired_caps['appActivity'] = '.Settings'
# 声明我们的driver对象
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
driver.implicitly_wait(20)
"""
目标:move_to移动方法
需求:1.进入设置
2.向上滑动屏幕到可见"安全"选线
3.进入到安全
4.点击屏幕锁定方式
5.点击图案
6.绘制图案
踩得坑:
1.元素等待必须有
2.坐标点不是元素,必须添加sleep
"""
# 定位电池
dianchi=driver.find_element_by_xpath("//*[@text='电池']")
# 定位wlan
wlan=driver.find_element_by_xpath("//*[@text='WLAN']")
# 点击安全
driver.find_element_by_xpath("//*[@text='安全']").click()
# 点击屏幕锁定方式
driver.find_element_by_xpath("//*[@text='屏幕锁屏方式']").click()
# 点击安全
driver.find_element_by_xpath("//*[@text='图案']").click()
#后面坐标点减前面的坐标点
# A:x=234,y=857 x=722,y=857 x=1206,y=857
#B:x=857,y=1329
#C:x=234,y=1818 x=722,y=1818 x=1206,y=1818
TouchAction(driver).press(x=234,y=857).wait(100).move_to(x=722-857,y=0).wait(100).\
move_to(x=1206-722,y=0).wait(100).move_to(x=722-1206,y=1329-857).wait(100).\
move_to(x=234-722,y=1818-1329).wait(100).move_to(x=857-234,y=0).wait(100).move_to(x=1206-857,y=0).\
wait(100).release().perform()
sleep(2)
driver.quit()
| true |
0cc232d4254b1a1a3a2c3d9c27fc21ee6f8efc39 | Python | yue-cherry-ying/Textual-Analysis | /digital_approaches/week8_cont.py | UTF-8 | 2,187 | 2.625 | 3 | [] | no_license | # Yue "Cherry" Ying
# Python Exercise for Tuesday March 2nd
import os
import re
import sys
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import numpy
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag, Text, tokenize
import joblib as joblib
from joblib import dump, load
import csv
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
countvectorizer = joblib.load('CountVectorizerModel.joblib')
lda = joblib.load('LDAModel.joblib')
def preprocess_text(text, stopwords, lemmatizer):
tokens = re.findall(r"\w+", text)
tokens = [lemmatizer.lemmatize(token) for token in tokens]
tokens = [token for token in tokens if token not in stopwords]
return " ".join(tokens)
if __name__ == "__main__":
folders = ["talk.politics.mideast", "rec.autos", "comp.sys.mac.hardware", "alt.atheism",
"comp.os.ms-windows.misc", "rec.sport.hockey", "sci.crypt", "sci.med", "talk.politics.misc",
"rec.motorcycles", "comp.windows.x", "comp.graphics", "comp.sys.ibm.pc.hardware", "sci.electronics",
"talk.politics.guns", "sci.space", "soc.religion.christian", "misc.forsale", "talk.religion.misc"]
texts = []
categories = []
stopwords = set(stopwords.words("english"))
additional_stopwords = {"quot", "one", "two", "would", "new", "may", "he", "it", "told"} # add additional stopwords
stopwords.update(additional_stopwords)
lemmatizer = WordNetLemmatizer()
for category in folders:
for file in os.scandir("../20news/" + category):
with open(file.path, encoding="latin-1") as input_file: # texts have latin-1 encoding
text = input_file.read()
text = preprocess_text(text, stopwords, lemmatizer)
texts.append(text)
categories.append(category)
vectorizer = countvectorizer
vectorized_texts = vectorizer.fit_transform(texts)
doc_topic_distrib = lda.fit_transform(vectorized_texts)
with open("news_topic_dist.csv", 'w') as news:
print(doc_topic_distrib, file=news)
| true |
e90f6d4e663f5e9b9bb58e981f3f4a077f28d5f9 | Python | ngardiner7/google-trends | /gsheets.py | UTF-8 | 827 | 2.75 | 3 | [] | no_license | import pygsheets
# should replace this with sheet_id cause errors
def write_df_to_sheet(sheet_name, df):
# ensure that our dataframe isn't larger than the google sheets max (2,000,000).
if df.shape[0] * df.shape[1] > 18000000:
print "Number of records is too large for Google Sheets to handle. Please reduce the number of keywords or timeframe"
return
# overwrite max cell write limit. defaults to 50,000. the +1 accounts for the headers
pygsheets.client.GOOGLE_SHEET_CELL_UPDATES_LIMIT = df.shape[0] * (df.shape[1] + 1)
# auth things. will need to go through oauth if it's your first time using
gc = pygsheets.authorize()
# open sheet and add dataframe to sheet
sh = gc.open_by_key('')
f = sh.worksheet('title', sheet_name)
f.clear()
f.set_dataframe(df,(1, 1))
| true |
fdaa64a38d765b6ef241b86680345591f22fd1f5 | Python | sujayshah/BEEP_BOOP | /MP4/pong.py | UTF-8 | 8,442 | 3.046875 | 3 | [] | no_license | import random
from state import GameState as gameState
import math
#global variables
# q_table = [list([0, 0, 0])] * 10369
# N= [list([0, 0, 0])] * 10369
q_table = {}
N = {}
epsilon = 0.05
# This function udpates the ball position and checks the bounce/termination conditions. Returns a state
def play_game(state, action):
#initialize game
#check to make sure not terminal state first
if state == None or state.special == 1:
#print "TERMINAL STATE. GAME OVER"
return None
reward = 0
paddle_height = 0.2
state.paddle_y = state.paddle_y + state.actions[action]
#if paddle goes off the top of the screen
if state.paddle_y < 0:
state.paddle_y = 0
#if any part of the paddle goes off the bottom of the scr
if (state.paddle_y > 0.8):
state.paddle_y = 0.8
#at each time step:
#increment ball_x by velocity_x and ball_y by velocity_y
orig_x = state.ball_x #old points
orig_y = state.ball_y
state.ball_x = state.ball_x + state.velocity_x
state.ball_y = state.ball_y + state.velocity_y
new_x = state.ball_x #new points
new_y = state.ball_y
#bounce
#if ball is off the top of the screen
if state.ball_y < 0:
state.ball_y = -1 * state.ball_y
state.velocity_y = -1 * state.velocity_y
#if ball is off bottom of screen
if state.ball_y > 1:
state.ball_y = 2 - state.ball_y
state.velocity_y = -1 * state.velocity_y
#if ball is off left edge of screen
if state.ball_x < 0:
state.ball_x = -1 * state.ball_x
state.velocity_x = -1 * state.velocity_x
if abs(state.velocity_x) <= 0.03:
print "VELOCITY_X ERROR"
#if ball bounced off paddle:
#draw a line along direction ball is moving
slope = (new_y - orig_y)/(new_x - orig_x)
b = new_y - (slope * new_x)
y_intersection= slope + b # y = mx + b, plug in x = 1
paddle_bottom = state.paddle_y + paddle_height
#if x>1 and line intersects the x=1 line within paddle range
if state.ball_x > 1 and (state.ball_y >= state.paddle_y and state.ball_y <= paddle_bottom):
state.ball_x = 2 - state.ball_x
reward = 1
while True:
U = random.uniform(-0.015, 0.015)
V = random.uniform(-0.03, 0.03)
state.velocity_x = (-1 * state.velocity_x) + U
state.velocity_y = state.velocity_y + V
if abs(state.velocity_x) > 0.03:
break
#if ball passes paddle set state to TERMINAL
if state.ball_x > 1 and (state.ball_y > state.paddle_y and state.ball_y < paddle_bottom):
state.special = 1
reward = -1
print "w a o w"
return (state.ball_x, state.ball_y, state.velocity_x, state.velocity_y, state.paddle_y, reward)
#This function takes a game state and discretizes it
def discretize(state):
#check if terminal state
if state.special == 1:
return None
#add special state for when ball passes paddle with reward -1
#should stay in this state regardless of ball's velocity or paddle's location
if state.ball_x > 1:
state.special = 1
paddle_height = 0.2
#treat the entire board as a 12x12 grid so there are 144 possible ball locations
ball_x = math.floor(12 * state.ball_x)
ball_y = math.floor(12 * state.ball_y)
#discretize the x velocity
if state.velocity_x < 0:
velocity_x = -1
elif state.velocity_x >= 0:
velocity_x = 1
#discretize the y velocity
if abs(state.velocity_y) < 0.015:
velocity_y = 0
elif state.velocity_y < 0:
velocity_y = -1
elif state.velocity_y >=0:
velocity_y = 1
#convert paddle location
discrete_paddle = math.floor(12 * state.paddle_y/ (1- paddle_height))
if state.paddle_y == (1- paddle_height):
discrete_paddle = 11
paddle_y = discrete_paddle
return (ball_x, ball_y, velocity_x, velocity_y, paddle_y)
# def maptoidx(state):
# ten_thousand = state.ball_x * (10000)
# thousand = state.ball_y * (1000)
# hundred = state.velocity_x * (100)
# ten = state.velocity_y * 10
# final = ten_thousand + thousand + hundred + ten + state.paddle_y
# final = hash(final) % 10369
# final = int(final)
# return final
# This function returns the action you should take given a state
def exploration_policy(state, israndom):
if state is None or state.special == 1:
print "ERROR"
return None
#with probability e choose randomly
if israndom is True:
a = random.randint(0, 2)
return a
else:
#with probability (1-e) follow the greedy policy
q_max = -10000000000000
for i in range(3):
if (discretize(state), i) not in N or (discretize(state), i) not in q_table:
a = random.randint(0,2)
return a
if q_table[(discretize(state), i)] > q_max:
q_max = q_table[(discretize(state), i)]
a = i
#print "chose actual"
return a
def qmax(state):
if state is None or state.special == 1:
print "ERROR"
return None
q_max = -10000000000000
for i in range(3):
if (discretize(state), i) not in q_table:
temp_value = 0
else:
temp_value = q_table[(discretize(state), i)]
return max(temp_value, q_max)
def qlearning_agent(C, gamma):
# IN CASE OF BUGS IMPOSE ADDITIONAL VELOCITY BOUNDS
global q_table
global N
reward = 0
num_bounces = 0
n = 0 # number iterations
random_count = 0
#initialize board; board is current game state
paddle_height = 0.2
while n < 100000:
#print num_bounces
num_bounces = 0
#observe current state and convert from continuous to discrete space
#start from scratch
board = gameState(0.5, 0.5, 0.03, 0.01, 0.5 - paddle_height/2)
current_state = board
#Terminal state check
if current_state == None:
print "ERROR"
continue
if current_state.special == 1:
return None
continue
else:
while True:
choose_random = False
if current_state == None or current_state.special == 1:
random_count = 0
break
random_count +=1
if random_count > 10:
random_count = 1
if random_count/10.0 == 0.3:
choose_random = True
#choose an action based on exploration policy
a = exploration_policy(current_state, choose_random)
#given action and current state get successor state
temp_tuple= play_game(current_state, a) #final successor state
if temp_tuple == None:
random_count = 0
break
else:
successor_state = gameState(temp_tuple[0], temp_tuple[1], temp_tuple[2], temp_tuple[3], temp_tuple[4])
# print "CURRENT: ", (current_state.ball_x, current_state.ball_y, current_state.velocity_x, current_state.velocity_y)
# print "SUCCESSOR: ", (successor_state.ball_x, successor_state.ball_y, successor_state.velocity_x, successor_state.velocity_y)
reward = temp_tuple[5]
if (discretize(current_state), a) not in N:
N[(discretize(current_state), a)] = 1
alpha = 1.0
else:
N[(discretize(current_state), a)] += 1
alpha = 1.0 * (C/(C + N[(discretize(current_state), a)])) #decay if we have seen before
#update q-table with current state and successor state
if N[(discretize(current_state), a)] == 1:
q_table[(discretize(current_state), a)] = alpha * (reward + gamma * qmax(successor_state))
else:
q_table[(discretize(current_state), a)] = q_table[(discretize(current_state), a)] + (alpha * (reward + (gamma * qmax(successor_state)) - q_table[(discretize(current_state), a)]))
if reward > 0:
num_bounces +=1
#update next game state to successor state
current_state = gameState(successor_state.ball_x, successor_state.ball_y, successor_state.velocity_x, successor_state.velocity_y, successor_state.paddle_y)
n+=1
def main():
global q_table
global N
#clear out the qtable and the N table
q_table = {}
N = {}
qlearning_agent(100, 0.7)
n = 0 #basically run qlearning again but dont update qtables
while n < 1000:
board = gameState(0.5, 0.5, 0.03, 0.01, 0.5 - paddle_height/2)
current_state = board
while True:
choose_random = False
if current_state == None or current_state.special == 1:
random_count = 0
break
random_count +=1
if random_count > 10:
random_count = 1
if random_count/10.0 == 0.3:
choose_random = True
#choose an action
a = exploration_policy(current_state, False)
temp_tuple= play_game(current_state, a) #final successor state
if temp_tuple == None:
random_count = 0
break
else:
#update next game state to successor state
successor_state = gameState(temp_tuple[0], temp_tuple[1], temp_tuple[2], temp_tuple[3], temp_tuple[4])
reward = temp_tuple[5]
if reward > 0:
num_bounces += 1
n+= 1
if __name__ == '__main__':
main()
| true |
0538b71f5181ff836ece28d065ad68f815bf1ae4 | Python | kshithijiyer/widgetastic.patternfly4 | /testing/test_nav.py | UTF-8 | 1,518 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | import pytest
from widgetastic_patternfly4 import Navigation
NAVS = [
(
".//div[@id='ws-react-c-nav-default']/nav",
["Link 1", "Link 2", "Link 3", "Link 4"],
["Link 1"],
),
(
".//div[@id='ws-react-c-nav-expandable']/nav",
{
"Link 1": ["Subnav Link 1", "Subnav Link 2", "Subnav Link 3"],
"Link 2": ["Custom onClick", "Subnav Link 1", "Subnav Link 2", "Subnav Link 3"],
},
["Link 1", "Subnav Link 1"],
),
(
".//div[@id='ws-react-c-nav-mixed']/nav",
{
"Link 1 (not expandable)": None,
"Link 2 - expandable": ["Link 1", "Link 2", "Link 3"],
"Link 3 - expandable": ["Link 1", "Link 2", "Link 3"],
},
["Link 1 (not expandable)"],
),
]
@pytest.mark.parametrize("sample", NAVS, ids=lambda sample: sample[0])
def test_navigation(browser, sample):
locator, tree, currently_selected = sample
nav = Navigation(browser, locator=locator)
assert nav.currently_selected == currently_selected
assert nav.nav_item_tree() == tree
@pytest.mark.skip
def test_navigation_select(browser):
loc = ".//h2[normalize-space(.)='Navigation mixed']/following-sibling::div[1]//nav"
nav = Navigation(browser, locator=loc)
nav.select("Link 3 - expandable", "Link 2")
assert nav.currently_selected == ["Link 3 - expandable", "Link 2"]
nav.select("Link 1 (not expandable)")
assert nav.currently_selected == ["Link 1 (not expandable)"]
| true |
68a47f2e8b93d3f417598bc97c9318cc7c2257c2 | Python | prash94/HeartDiseasePrediction | /hdprediction/preprocessors.py | UTF-8 | 6,097 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[4]:
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
# from lrmodel.processing.errors import InvalidModelInputError
# In[ ]:
# catagorical variables are imputed by labling them as 'Missing'
class ImputeCategoricalVariables(BaseEstimator, TransformerMixin):
def __init__(self, variable_list=None) -> None:
if not isinstance(variable_list, list):
self.variable_list = [variable_list]
else:
self.variable_list = variable_list
# fit and transform methods are needed for using sklearn pipeline
def fit(self, X:pd.DataFrame, y:pd.Series = None) -> 'ImputeCategoricalVariables':
return self
def transform(self, X:pd.DataFrame)
X = X.copy()
for feature in self.variable_list:
X[feature] = X[feature].fillna('Missing')
return X
# Numerical variable are imputed using mode
class ImputeNumericalVariables(BaseEstimator, TransformerMixin):
def __init__(self, variable_list=None):
if not isinstance(variable_list, list):
self.variable_list = [variable_list]
else:
self.variable_list = variable_list
def fit(self, X, y=None)
self.imputer_metrc_dict = {}
for feature in self.variable_list:
self.imputer_metrc_dict[feature] = X[feature].mode()[0]
return self
def transform(self, X):
X = X.copy()
for feature in self.variable_list:
X[feature].fillna(self.imputer_metrc_dict, inplace=True)
return X
# this class is for encoding catagorical variables if they are rarely occuring
# rarity is determined is using the tolerance
class RareLableCategoricalEncoder(BaseEstimator, TransformerMixin):
def __init__(self, tol = 0.5, variable_list=None):
self.tol = tol
if not isinstance(variable_list, list):
self.variable_list = [variable_list]
else:
self.variable_list = variable_list
def fit(self, X, y=None):
self.freq_lable_dict = {}
for feature in self.variable_list:
t = pd.Series(X[feature].value.counts()/np.float(len(X)))
self.freq_lable_dict[feature] = list(t[t >= self.tol].index)
return self
def transform(self, X):
X = X.copy()
for feature in self.variable_list:
X[feature] = np.where(X[feature].isin(self.freq_lable_dict[feature]), X[feature],'Rare')
return X
class NegativeValueRemover(BaseEstimator, TransformerMixin):
def __init__(self, variable_list=None):
if not isinstance(variable_list, list):
self.variable_list = [variable_list]
else:
self.variable_list = variable_list
def fit(self, X:pd.DataFrame, y=None) -> 'NegativeValueRemover':
return self
def transform(self, x:pd.DataFrame) -> pd.DataFrame:
x = x.copy()
for col in self.variable_list:
x = x.drop(x[x[col] < 0].index, inplace=True)
return x
# this class is for encoding catagorical varialbles with a sequential number based on the mean target value per lable within the variable.
# Higher the mean more important the lable is.
# TODO: Need to check if this works for classification models
class CategoricalEncoder(BaseEstimator, TransformerMixin):
def __init__(self, variable_list=None):
if not isinstance(variable_list, list):
self.variable_list = [variable_list]
else:
self.variable_list = variable_list
def fit(self, X, y):
temp = pd.concat([X,y], axis=1)
temp.columns = list(X.columns) + ['target']
# persist transforming dictionary
self.encoder_dict = {}
for var in self.variable_list:
t = temp.groupby([var])['target'].mean().sort_values(ascending=True).index
self.encoder_dict[var] = {k: i for i, k in enumerate(t,0)}
return self
def transform(self, X):
# TODO: Add the following code to all preprocessor methods
# QUESTION: why de we need to create a copy?
X = X.copy()
for feature in self.variable_list:
X[feature] = X[feature].map(self.encoder_dict[feature])
# Check if this generates null values
# QUESTION: what does any().any() do?
if X[self.variable_list].isnull().any().any():
null_counts = X[self.variable_list].isnull().any()
vars = {key: value for (key, value) in null_counts.items() if value is True}
raise InvalidModelInputError(
f'Categorical encoder has introduced null values when'
f'transforming categorical variables: {vars.keys()}'
)
return X
class feature_scaling(BaseEstimator, TransformerMixin):
def scale_numerical_features(xtrain, xtest):
xtrain_cont = xtrain[continuous_variables + discrete_variables]
xtest_cont = xtest[continuous_variables + discrete_variables]
xtrain_non_cont = xtrain.drop(continuous_variables + discrete_variables, axis=1)
xtest_non_cont = xtest.drop(continuous_variables + discrete_variables, axis=1)
scaler = MinMaxScaler()
xtrain_scaled = scaler.fit_transform(xtrain_cont)
xtest_scaled = scaler.transform(xtest_cont)
xtrain_scaled = pd.DataFrame(xtrain_scaled, columns=xtrain_cont.columns, index=xtrain_cont.index)
xtest_scaled = pd.DataFrame(xtest_scaled, columns=xtest_cont.columns, index=xtest_cont.index)
x_train_ds = pd.concat([xtrain_non_cont, xtrain_scaled], axis=1)
x_test_ds = pd.concat([xtest_non_cont, xtest_scaled], axis=1)
print('x_train:', x_train_ds.shape)
print('x_test:', x_test_ds.shape)
return x_train_ds, x_test_ds
x_train, x_test = scale_numerical_features(x_train, x_test)
| true |
dc22c331d8d21f5569cde1626184dd5af18b524e | Python | BiYuqi/daily-practice | /Python/Python-Base-Practice/functional-programming/higher-order-functions/map-reduce.py | UTF-8 | 2,376 | 4.3125 | 4 | [] | no_license | # coding=UTF-8
"""
Python内建了map()和reduce()函数。
map()函数接收两个参数,一个是函数,一个是Iterable
map将传入的函数依次作用到序列的每个元素,并把结果作为新的Iterator返回。
"""
"""
举例说明,比如我们有一个函数f(x)=x2,
要把这个函数作用在一个list [1, 2, 3, 4, 5, 6, 7, 8, 9]上,就可以用map()实现如下:
"""
L = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def f(x):
return x * x
res = map(f, L)
# map()传入的第一个参数是f,即函数对象本身。
# 由于结果r是一个Iterator,Iterator是惰性序列,
# 因此通过list()函数让它把整个序列都计算出来并返回一个list。
print(list(res))
# to str
r = map(str, L)
print(list(r))
"""
reduce的用法。reduce把一个函数作用在一个序列[x1, x2, x3, ...]上,
这个函数必须接收两个参数,reduce把结果继续和序列的下一个元素做累积计算
ex:
reduce(f, [x1, x2, x3, x4]) = f(f(f(x1, x2), x3), x4)
"""
# 比方说对一个序列求和,就可以用reduce实现:
from functools import reduce
M = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def add(x, y):
return x + y
tt = reduce(add, M)
print(tt)
# 当然求和运算可以直接用Python内建函数sum(),没必要动用reduce。
print(sum(M))
def add_2(x, y):
# print(isinstance(str(x), str))
return str(x) + str(y)
def add_3(x, y):
return x * 10 + y
tt2 = reduce(add_2, M)
tt3 = reduce(add_3, M)
print(tt2, tt3)
print(isinstance(tt2, str))
print(isinstance(tt3, str))
print('---------------------------------')
def char2num(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
print(list(map(char2num, '13579')))
#
def normal(n):
m = n[0:1].upper()
s = n[1:].lower()
return m + s
TEST = ['biyuqi', 'caiyiingjie', 'hujintao']
result = list(map(normal, TEST))
print(result)
# 整理 2中方法
def str2stand(source):
def inner_math(r):
# m = r[0:1].upper()
# n = r[1:].lower()
# return m + n
return str.title(r)
return list(map(inner_math, source))
print(str2stand(['biyuqi', 'caiyiingjie', 'hujintao']))
# 原来有title 方法
print(str.title('biyuqi'))
# 利用reduce求和
def prod(s):
def add(x, y):
return x + y
return reduce(add, s)
print(prod([1, 2, 3, 4, 5]))
| true |
66b84e7f3256fb854a7d9754bc31915e448595a6 | Python | kurry3/meme_generator | /src/QuoteEngine/PDFIngestor.py | UTF-8 | 2,110 | 3.21875 | 3 | [] | no_license | """PDF Ingestor.
This script requires that the 'typing, 'os', and 'subprocess'
libraries be installed within the Python
environment this script is being run in.
"""
import os
import subprocess
import random
from typing import List
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
class PDFIngestor(IngestorInterface):
"""A class used if the input file has the extension .pdf.
It realizes the IngestorInterface.
...
Methods:
can_ingest(path): Returns the boolean value 'True' if the input file
can be read and 'False' if not.
parse(path=str): Returns a list of QuoteModels.
"""
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
"""Return a list of QuoteModels.
Arguments:
path{[str]}: the path where the file to be read is located
Raises:
Exception: if the file at the path is not of the correct format
or if the .pdf file at the path cannot be read
"""
if not cls.can_ingest(path):
raise Exception('cannot ingest exception')
try:
path = path.replace('./', '')
root = os.path.dirname(os.path.dirname(__file__))
path = os.path.join(root, path).replace('\\', '/')
tmp = f'_data/DogQuotes/{random.randint(0, 250)}.txt'
root = os.path.dirname(os.path.dirname(__file__))
tmp = os.path.join(root, tmp).replace('\\', '/')
call = subprocess.call(['pdftotext', path, tmp])
quotes = []
with open(tmp, "r") as f:
for line in f:
one_line = line.strip('\n').replace(u"\u2019",
"'").split(' - ')
new_quote = QuoteModel(one_line[0], one_line[1])
quotes.append(new_quote)
os.remove(tmp)
except Exception as e:
raise Exception(".pdf parsing issue occurred.")
return quotes
| true |
a7699380d4dad7a77b01909c32569a6bb4c7e157 | Python | takeller/Code-Challenges | /equal_sides_of_an_array/test_equal_sides.py | UTF-8 | 696 | 2.765625 | 3 | [] | no_license | import equal_sides
def test_how_many_words():
assert equal_sides.find_even_index([1,2,3,4,3,2,1]) == 3
assert equal_sides.find_even_index([1,100,50,-51,1,1]) == 1
assert equal_sides.find_even_index([1,2,3,4,5,6]) == -1
assert equal_sides.find_even_index([20,10,30,10,10,15,35]) == 3
assert equal_sides.find_even_index([20,10,-80,10,10,15,35]) == 0
assert equal_sides.find_even_index([10,-80,10,10,15,35,20]) == 6
assert equal_sides.find_even_index(list(range(1,100))) == -1
assert equal_sides.find_even_index([0,0,0,0,0]) == 0
assert equal_sides.find_even_index([-1,-2,-3,-4,-3,-2,-1]) == 3
assert equal_sides.find_even_index(list(range(-100,-1))) == -1 | true |
7ac0e7b957629308158b5199ba898d8122f47b90 | Python | sejaldua/advent-of-code-2020 | /day05/binary-boarding.py | UTF-8 | 1,024 | 3.390625 | 3 | [] | no_license |
def get_boarding_passes():
with open("input.txt", 'r') as file:
return file.read().split('\n')[:-1]
def binary_search(s, lo, hi):
for char in s:
if char == "F" or char == "L":
hi -= (hi - lo) // 2 + 1
else:
lo += (hi - lo) // 2 + 1
return lo if char == "F" or char == "L" else hi
def bitwise_binary_search(l):
return [int(x.replace("F", "0").replace("B", "1").replace("L", "0").replace("R", "1"), base=2) for x in l]
def get_seat_ids(l):
return [binary_search(s[:7], 0, 127) * 8 + binary_search(s[7:], 0, 7) for s in l]
def puzzle1():
l = get_boarding_passes()
seat_ids = get_seat_ids(l)
# seat_ids = bitwise_binary_search(l)
return seat_ids
def puzzle2():
seat_ids = puzzle1()
missing = [i for i in range(max(seat_ids)) if i not in seat_ids]
for x in missing:
if (x - 1) not in missing and (x + 1) not in missing:
return x
if __name__ == "__main__":
print(max(puzzle1()))
print(puzzle2()) | true |
af37135f6a6498ea9788e0c8cff5c2ccc479dde3 | Python | simonwrafter/FMN050 | /ex4/task2.py | UTF-8 | 395 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 09:59:34 2015
@author: simon
"""
from scipy import *
from pylab import *
from task1 import *
y = [1, 3, -2, 0, 1, 0, 1]
x = [0, 1, 2, 3, 4, 5, 6]
coeff = cubspline(x,y)
yval = []
xplot = array(linspace(0, x[-1], 200))
for i in range(len(xplot)):
yval.append(cubsplineval(coeff, x, xplot[i]))
figure(1)
clf()
grid(1)
plot(xplot, yval) | true |
bf8f5de115dda18feabb9b149035746d4c7457d3 | Python | 1203zy/tmcrtrl | /Flask/script/rabbitopt.py | UTF-8 | 1,159 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# auth : pangguoping
import sys
sys.path.append(r'../')
import pika
import lib.database
# ########################## 消费者 ##########################
credentials = pika.PlainCredentials('tmp', 'tmp')
# 连接到rabbitmq服务器
connection = pika.BlockingConnection(pika.ConnectionParameters('106.12.125.225',5672,'/',credentials))
channel = connection.channel()
# 声明消息队列,消息将在这个队列中进行传递。如果队列不存在,则创建
#channel.queue_declare(queue='wzg')
# 定义一个回调函数来处理,这边的回调函数就是将信息打印出来。
def callback(ch, method, properties, body):
lib.database.insertData(body.decode('utf-8'))
# 告诉rabbitmq使用callback来接收信息
channel.basic_consume(callback,
queue='tmpdata',
no_ack=True)
# no_ack=True表示在回调函数中不需要发送确认标识
print(' [*] Waiting for messages. To exit press CTRL+C')
# 开始接收信息,并进入阻塞状态,队列里有信息才会调用callback进行处理。按ctrl+c退出。
channel.start_consuming()
| true |
1bfec1f1ad8877b311745b683449672d211e9273 | Python | sheilapaiva/LabProg1 | /Unidade8/complemento_excesso_original/complemento_excesso_original.py | UTF-8 | 1,254 | 3.40625 | 3 | [] | no_license | #coding: utf-8
#UFCG - Ciência da Computação
#Programação I e laboratório de Programação I
#Aluna: Sheila Maria Mendes Paiva
#Unidade: 8 Questão: Complemento Excesso Original
#coding: utf-8
#UFCG - Ciência da Computação
#Programação I e laboratório de Programação I
#Aluna: Sheila Maria Mendes Paiva
#Unidade: 8 Questão: Complemento Excesso Original
def bits_8(string, separador):
string_nova = ""
for i in string:
if i == separador:
string_nova = ""
else:
string_nova += i
resultado_final = "0" * (8 - len(string_nova)) + string_nova
return resultado_final
def excesso_127(numero):
numero_final = int(numero) + 127
saida = str(bin(int(numero_final)))
return bits_8(saida, "b")
def complemento1(numero):
saida = str(bin(int(numero)))
if int(numero) >= 0:
return bits_8(saida, "b")
else:
saida_final = ""
for i in bits_8(saida, "b"):
if i == "0":
saida_final += "1"
else:
saida_final += "0"
return saida_final
while True:
tipo = raw_input().split()
if tipo[0] == "***":
break
elif tipo[0] == "C1":
print complemento1(tipo[1])
elif tipo[0] == "E127":
print excesso_127(tipo[1])
| true |
e705506b402547ce221e913b735e1f3c61c34c29 | Python | Shwebs/Python-practice | /BasicConcepts/3.strings/type-casting3.py | UTF-8 | 147 | 3.34375 | 3 | [] | no_license | spam = "7"
spam = spam + "0" #70
eggs = int(spam) + 3 # 70 +3
print(float(eggs)) # 73.0
x=7
print (str(x)) #O/P:- 7 ||| It's not SEVEN | true |
61cf094d69892d09156ac6d28119d6fe8e1a3f10 | Python | LuisAlvarez98/MachineLearning-course | /01-linear-regression/LinearRegressor.py | UTF-8 | 4,009 | 3.625 | 4 | [] | no_license | """
Modified by:
- Jesús Omar Cuenca Espino A01378844
- Luis Felipe Alvarez Sanchez A01194173
- Juan José González Andrews A01194101
- Rodrigo Montemayor Faudoa A00821976
Date: 03/09/2021
"""
import numpy as np
from progressbar import progressbar, streams
# Setup Progressbar wrapper function
streams.wrap_stderr()
class LinearRegressor():
def __init__(self, alpha=0.1, epochs=1):
self.alpha = alpha
self.epochs = epochs
self.costs = []
self.theta = None
def _cost_function(self, y_pred, y, m):
"""
Gets the cost for the predicted values when contrasted with the correct ones.
y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor
y: An (1 x m) vector that corresponds to the y (right) values in the dataset
m: the number of samples (it could be also inferred from the shape of y or y_pred)
TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration
"""
temp = y_pred - y
cost : np.ndarray = np.dot(temp, temp.T)
return (cost.flatten()[0])/(2*m)
def _hypothesis(self, X):
"""
Calculates the hypothesis for the given examples using the current self.theta values.
X: an m x n array of m samples/examples with n features each.
TODO: you must return a (1 x m) array, which corresponds to the estimated value for each of the m samples
"""
# * is element wise multiplication
# numpy.dot(), or @ operator will work
return np.dot(self.theta.T, X)
def _cost_function_derivative(self, y_pred, y, X, m):
"""
Calculates the derivatives (gradient) of the cost function through the obtained/predicted values.
y_pred: an (1 x m) array with the predicted values for X dataset
y: an (1 x m) array with the right values for X dataset
X: the input dataset
m: the number of samples in the dataset
TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned.
"""
temp = y_pred - y # (1 x m)
temp = np.dot(temp,X.T) # (1 x m) * (m x n) = (1 x n)
return (self.alpha/m) * temp.T # scalar * (n x 1)
def fit(self, X, y):
"""
Fits the linear regressor to the values in the dataset.
X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples
y: is an (1 x m) vector, where m is the number of samples/examples
TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions
"""
n, m = X.shape[0], X.shape[1]
# theta is (nx1) (one theta per dimension)
self.theta = np.random.uniform(-10, 10, (n, 1))
for _ in progressbar(range(self.epochs)):
# Get predictions
y_pred = self.predict(X)
# calculate cost
cost = self._cost_function(y_pred, y, m)
# gradient is an (n) x 1 array, it refers to the derivate per theta
gradient : np.ndarray = self._cost_function_derivative(y_pred, y, X, m)
# delta/update rule
self.theta = self.theta - gradient
self.costs.append(cost)
pass
print("Final theta is {} (cost: {})".format(self.theta.T, cost))
def predict(self, X):
"""
Predicts the values for the given X samples using the current configuration of the Linear Regressor.
X: an (n x m') array with m' samples of n dimensions whose value must be predicted.
TODO: You must return a (1 x m') array that includes the predictions for the given m' samples.
"""
# ! You could simply call the hypothesis here
return self._hypothesis(X)
| true |
b409a263e349ddad66cbd44c6a267045f4f327b0 | Python | Olionheart/SIFAS-Tier-and-Chill | /parking_calculator.py | UTF-8 | 5,117 | 2.734375 | 3 | [] | no_license | import numpy as np
import pandas as pd
"""
Copyright <2020> <Olionheart>
Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is
hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
def get_ep_mult(ep_bonus):
result = set()
result.add(1)
for i in range(2 ** len(ep_bonus)):
iter = [int(x) for x in bin(i)[2:]]
iter = [0 for k in range(len(ep_bonus)-len(iter))] + iter
result.add(1 + 0.01 * sum(np.array(iter) * np.array(ep_bonus)))
result = list(result)
result.sort(reverse=True)
return result
def get_possible_ep(adv_plus, ep_mult):
base_ep = np.array([875, 600, 405, 275, 860, 845, 830, 815, 581, 562, 543, 525, 390, 375, 360, 345, 262, 250, 237, 225]) if adv_plus else np.array([600, 405, 275, 581, 562, 543, 525, 390, 375, 360, 345, 262, 250, 237, 225])
result = np.outer(base_ep, ep_mult).astype(int)
return result
def ukp_solver(target, combination, get_dp_memory = False):
dp_memory = [0]
for i in range(1, target + 1):
ans = -1
for pts in combination:
if i - pts >= 0 and dp_memory[i - pts] != -1:
ans = pts
break
dp_memory.append(ans)
if get_dp_memory: # for debugging and experimentation
return dp_memory
if dp_memory[-1] == -1:
return False
else:
backtrack_dict = dict()
iter = target
while iter > 0:
pts = dp_memory[iter]
iter -= pts
if pts in backtrack_dict.keys():
backtrack_dict[pts] += 1
else:
backtrack_dict[pts] = 1
assert iter == 0
return backtrack_dict
print("This SIFAS parking calculator is made by 『TC』Olion♡ (twt: @O1ionheart)")
current_ep = 0
target_ep = 0
adv_plus = False
ep_bonus = []
get_input = True
debug = False
if get_input:
# get current_ep
try:
current_ep = int(input("Please indicate your current event point (EP): "))
except ValueError:
success = False
while not success:
try:
current_ep = int(input("Your input was not an integer, please try again: "))
success = True
except ValueError:
pass
# get adv_plus
adv_plus = input("Are adv+ songs available (T if True): ").lower() == 't'
# get ep_bonus
ep_bonus = []
new_ep_bonus = -1
print("Please input your event point bonus from cards. Do not de-duplicate! \n(i.e. if you have 2 cards that give 3% bonus each, you should enter 3 twice) \nEnter 0 when you are done, or if the event is an item exchange event.")
print("***NOTE: I haven't fixed rounding errors for marathon events yet!! Becareful!!***")
while new_ep_bonus != 0:
try:
new_ep_bonus = int(input("Card #" + str(len(ep_bonus) + 1) + " : "))
except ValueError:
success = False
while not success:
try:
new_ep_bonus = int(input("Your input was not an integer, please try again: "))
success = True
except ValueError:
pass
ep_bonus.append(new_ep_bonus)
ep_bonus.pop()
# get target_ep
try:
target_ep = int(input("Please indicate your target event point (EP): "))
except ValueError:
success = False
while not success:
try:
target_ep = int(input("Your input was not an integer, please try again: "))
success = True
except ValueError:
pass
if debug:
print(current_ep)
print(adv_plus)
print(ep_bonus)
print(get_ep_mult(ep_bonus))
df = pd.DataFrame(get_possible_ep(adv_plus, np.array(get_ep_mult(ep_bonus))))
print(get_possible_ep(adv_plus, np.array(get_ep_mult(ep_bonus))).flatten().tolist())
result = ukp_solver(target_ep - current_ep, get_possible_ep(adv_plus, np.array(get_ep_mult(ep_bonus))).flatten())
if result:
print("Park found!: ", result)
else:
print("Park not found! Try again ~")
print("Tips: you are guaranteed to find a park as long as you are at least 1329 points under your park goal!")
# exploring minimum gap from park goal to guarantee a park
# result = ukp_solver(target_ep - current_ep, get_possible_ep(adv_plus, np.array(get_ep_mult(ep_bonus))).flatten(), get_dp_memory = True)
# print(max(loc for loc, val in enumerate(result) if val == -1))
# conclusion: you are guaranteed to find a park as long as you are at least 1329 points under your park goal! | true |
17446457f8867769dc49bc23e4483716b5387be5 | Python | D3coy/Python | /algos_structures/binary_tree_ya.py | UTF-8 | 2,742 | 3.484375 | 3 | [] | no_license | # while flooding !next free! node in existing tree-structure
def newnode(memstruct):
memory, firstfree = memstruct
# get next empty node pointer from fillable and make it next empty
memstruct[1] = memory[firstfree][1]
return firstfree
# while releasing node, it shifts to first index and becomes the first free node
def delnode(memstruct, index):
memory, firstfree = memstruct
memory[index][1] = firstfree
memstruct[1] = index
# mem manager, left_child of the tree refers to next free cell( /child)
def initmemory(maxn):
memory = []
for i in range(maxn):
memory.append([0, i+1, 0]) # [X - key; Y - left_child; Z - right_child]
return [memory, 0] # [X - node, Y - pointer to first element]
# search for node in binary tree (key -> value in node;
# root -> pointer to node with key)
def find(memstruct, root, x):
key = memstruct[0][root][0]
if x == key:
return root
elif x < key:
left = memstruct[0][root][1]
if left == -1:
return -1 # if nothing found
else:
return find(memstruct, left, x)
elif x > key:
right = memstruct[0][root][2]
if right == -1:
return -1
else:
return find(memstruct, right, x)
# f_add + f_createandfillnode implements addition value to the tree
def createandfillnode(memstruct, key):
index = newnode(memstruct)
memstruct[0][index][0] = key
memstruct[0][index][1] = -1
memstruct[0][index][2] = -1
return index
def add(memstruct, root, x):
key = memstruct[0][root][0]
if x < key:
left = memstruct[0][root][1]
if left == -1:
# if nothing found - add value
memstruct[0][root][1] = createandfillnode(memstruct, x)
else:
return add(memstruct, left, x)
elif x > key:
right = memstruct[0][root][2]
if right == -1:
# if nothing found - add value
memstruct[0][root][2] = createandfillnode(memstruct, x)
else:
return add(memstruct, right, x)
def delete(memstruct, root, y):
# for "leaf" node with no child nodes (without structure with pointer to parent node)
index = find(memstruct, root, y)
if memstruct[0][index][1] == -1 and memstruct[0][index][2] == -1:
delnode(memstruct, index)
return
memstruct = initmemory(20)
root = createandfillnode(memstruct, 8) # filling root node of the tree
add(memstruct, root, 10)
add(memstruct, root, 9)
add(memstruct, root, 14)
add(memstruct, root, 13)
add(memstruct, root, 3)
add(memstruct, root, 1)
add(memstruct, root, 6)
add(memstruct, root, 4)
add(memstruct, root, 7)
delete(memstruct, root, 1) | true |
65887fd941b6b46332fed42aed93fa35f0f30eeb | Python | Humaira-Shah/ECE499 | /asst2/AX12funcADJUSTED.py | UTF-8 | 2,519 | 3.359375 | 3 | [] | no_license | # FUNCTION getChecksum(buff)
# Takes packet as a list for its parameter.
# Packet must include at least 6 elements in order to have checksum calculated
# last element of packet must be the checksum set to zero, function will return
# packet with correct checksum value.
def getChecksum(buff):
n = len(buff)
if(n >= 6):
# check that first two elements are 255 as required in order to
# signify the start of an incoming packet
if((buff[0] != 255) or (buff[1] != 255)):
print "WARNING: FIRST TWO PARAMETERS OF PACKET MUST BE 255\n"
# calculate checksum
checksum = 0
# consider all elements of buff except checksum element buff[n-1]
# and except buff[0] and buff[1] for calculation of checksum
for i in range(n-3):
# add up all considered elements besides buff[0] and buff[1]
checksum = checksum + buff[i+2]
checksum = ~checksum
# get least significant byte after notting
checksum = checksum & 0xFF
buff[n-1] = checksum
else:
# does not contain at least minimum parameters
# Should have at least the following parameters
# buff[0] = 0xFF
# buff[1] = 0xFF
# buff[2] = ID
# buff[3] = LENGTH
# buff[4] = INSTRUCTION
# buff[n-1] = CHECKSUM
print "ERROR: YOU FOOL! A PACKET MUST BE AT LEAST OF LENGTH 6\n"
#print buff
return buff
def setVelocity(ID, vel):
# make sure ID is valid
if((ID > 254) or (ID < 0)):
print "WARNING: ID is out of acceptable range of values\n"
ID = ID & 0xFF
if(ID > 254):
ID = 254
# check to see if vel is within range of possible values
# must have magnitude less than or equal to 0x3FF which is
# 1023 in decimal
if((vel > 1023) or (vel < -1023)):
print "WARNING: User has entered vel outside acceptable range [-1023,1023]\n Behavior will not be as expected...\n"
# check to see if user specified positive (CW)
# or negative (CCW) velocity
velSign = 0x04 # by default, sign bit is raised, meaning CW
if(vel < 0):
# vel negative, therefore set sign bit for CCW
velSign = 0x00
# make vel positive to obtain magnitude
vel = (~vel) + 0x01
# get 2 least significant bytes of vel
vel = vel & 0xFFFF
# break vel into high byte and lowbyte
velH = vel & 0xFF00
velH = velH >> 8
velL = vel & 0x00FF
# limit high byte to 0x03 because maximum
# velocity is 0x03FF
velH = velH & 0x03
# put sign info in velH
velH |= velSign
# make command packet for goal position and moving speed
packet = [0xFF, 0xFF, ID, 0x04, 0x20, velL, velH, 0]
packet = getChecksum(packet)
#print packet
return packet
| true |