blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
cca2269d2cb58175a0ef352f45aba6442159f2eb
|
Python
|
Tubbz-alt/docng
|
/documentation/tools/handbook-toc-creator.py
|
UTF-8
| 5,563
| 2.734375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
BSD 2-Clause License
Copyright (c) 2020, The FreeBSD Project
Copyright (c) 2020, Sergio Carlavilla
This script will generate the Table of Contents of the Handbook
"""
#!/usr/bin/env python3
import sys, getopt
import re
language = 'en' # English by default
def setAppendixTitle(language):
languages = {
'en': 'Appendix',
'de': 'Anhang',
'gr': 'Παράρτημα',
'es': 'Apéndice',
'fr': 'Annexe',
'hu': 'függelék',
'it': 'Appendice',
'jp': '付録',
'mn': 'Хавсралт',
'nl': 'Bijlage',
'br': 'Apêndice',
'ru': 'Приложение',
'cn': '附录',
'tw': '附錄'
}
return languages.get(language)
def setPartTitle(language):
languages = {
'en': 'Part',
'de': 'Teil',
'gr': 'Μέρος',
'es': 'Parte',
'fr': 'Partie',
'hu': 'rész',
'it': 'Parte',
'jp': 'パート〓',
'mn': 'хэсэг',
'nl': 'Deel',
'br': 'Parte',
'ru': 'Часть',
'cn': '部分',
'tw': '部'
}
return languages.get(language)
def setChapterTitle(language):
languages = {
'en': 'Chapter',
'de': 'Kapitel',
'gr': 'Κεφάλαιο',
'es': 'Capítulo',
'fr': 'Chapitre',
'hu': 'Fejezet',
'it': 'Capitolo',
'jp': '章',
'mn': 'Бүлэг',
'nl': 'Hoofdstuk',
'br': 'Capítulo',
'ru': 'Глава',
'cn': '章',
'tw': '章'
}
return languages.get(language)
def setTOCTitle(language):
languages = {
'en': 'Table of Contents',
'de': 'Inhaltsverzeichnis',
'gr': 'Πίνακας Περιεχομένων',
'es': 'Tabla de contenidos',
'fr': 'Table des matières',
'hu': 'Tartalom',
'it': 'Indice',
'jp': '目次',
'mn': 'Гарчиг',
'nl': 'Inhoudsopgave',
'br': 'Índice',
'ru': 'Содержание',
'cn': '目录',
'tw': '內容目錄'
}
return languages.get(language)
def getPartNumber(number):
numbers = {
1: 'I',
2: 'II',
3: 'III',
4: 'IV',
5: 'V'
}
return numbers.get(number)
def checkIsPart(chapter):
if ("part" in chapter):
return True
return False
def checkIsPreface(chapterContent):
if ("[preface]" in chapterContent):
return True
return False
def checkIsAppendix(chapterContent):
if ("[appendix]" in chapterContent):
return True
return False
def main(argv):
try:
opts, args = getopt.getopt(argv,"hl:",["language="])
except getopt.GetoptError:
print('handbook-toc-creator.py -l <language>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('handbook-toc-creator.py -l <language>')
sys.exit()
elif opt in ("-l", "--language"):
language = arg
chapters = []
with open('./content/{}/books/handbook/chapters-order.adoc'.format(language), 'r', encoding = 'utf-8') as chaptersFile:
chapters = [line.strip() for line in chaptersFile]
toc = "// Code generated by the FreeBSD Documentation toolchain. DO NOT EDIT.\n"
toc += "// Please don't change this file manually but run `make` to update it.\n"
toc += "// For more information, please read the FreeBSD Documentation Project Primer\n\n"
toc += "[.toc]\n"
toc += "--\n"
toc += '[.toc-title]\n'
toc += setTOCTitle(language) + '\n\n'
chapterCounter = 1
subChapterCounter = 1
partCounter = 1
for chapter in chapters:
with open('./content/{0}/books/handbook/{1}'.format(language, chapter), 'r', encoding = 'utf-8') as chapterFile:
chapterContent = chapterFile.read().splitlines()
chapterFile.close()
chapter = chapter.replace("/_index.adoc", "")
if (checkIsPart(chapter)):
for lineNumber, chapterLine in enumerate(chapterContent, 1):
if (re.match(r"^={1} [^\n]+", chapterLine)):
toc += "* link:{0}[{1} {2}. {3}]\n".format(chapter, setPartTitle(language), getPartNumber(partCounter), chapterLine.replace("=", "").strip())
partCounter += 1
elif (checkIsPreface(chapterContent)):
for lineNumber, chapterLine in enumerate(chapterContent, 1):
if (re.match(r"^={1} [^\n]+", chapterLine)):
toc += "* link:{0}[{1}]\n".format(chapter, chapterLine.replace("=", "").strip())
elif (checkIsAppendix(chapterContent)):
for lineNumber, chapterLine in enumerate(chapterContent, 1):
if (re.match(r"^={1} [^\n]+", chapterLine)):
toc += "** link:{0}[{1} {2}]\n".format(chapter, setAppendixTitle(language), chapterLine.replace("=", "").strip())
elif (re.match(r"^={2} [^\n]+", chapterLine)):
toc += "*** link:{0}/#{1}[{2}]\n".format(chapter, chapterContent[lineNumber-3].replace("[[", "").replace("]]", ""), chapterLine.replace("==", "").lstrip())
else: # Normal chapter
for lineNumber, chapterLine in enumerate(chapterContent, 1):
if (re.match(r"^={1} [^\n]+", chapterLine)):
toc += "** link:{0}[{1} {2}. {3}]\n".format(chapter, setChapterTitle(language), chapterCounter, chapterLine.replace("=", "").strip())
elif (re.match(r"^={2} [^\n]+", chapterLine)):
toc += "*** link:{0}/#{1}[{2}.{3}. {4}]\n".format(chapter, chapterContent[lineNumber-3].replace("[[", "").replace("]]", ""), chapterCounter, subChapterCounter, chapterLine.replace("==", "").lstrip())
chapterCounter += 1
subChapterCounter = 1
toc += "--\n"
with open('./content/{}/books/handbook/toc.adoc'.format(language), 'w', encoding = 'utf-8') as tocFile:
tocFile.write(toc)
if __name__ == "__main__":
main(sys.argv[1:])
| true
|
9b675a758147b78cf38a29213b4448fe135243dc
|
Python
|
mingxoxo/Algorithm
|
/baekjoon/15657.py
|
UTF-8
| 486
| 2.953125
| 3
|
[] |
no_license
|
# N과 M (8)
# 백트래킹
# 22.11.01
# https://www.acmicpc.net/problem/15657
import sys
input = sys.stdin.readline
def backtracking(N, M, index, num, result):
if M == 0:
print(*result, sep=' ')
return
for i in range(index, N):
result.append(num[i])
backtracking(N, M - 1, i, num, result)
result.pop()
N, M = map(int, input().split())
num = sorted(list(map(int, input().split())))
visited = [0] * N
backtracking(N, M, 0, num, [])
| true
|
420d55d91ef2abaa3263d656eb50f6614e151bc8
|
Python
|
ADragonArmy/MadLib
|
/mad_lib_two.py
|
UTF-8
| 1,783
| 2.984375
| 3
|
[] |
no_license
|
import colorama
from colorama import init,Fore,Style
init()
import _main_
from _main_ import get_inputa
import runpy
bl = Fore.LIGHTBLACK_EX
b = Fore.BLUE
r = Fore.MAGENTA
y = Fore.YELLOW
g = Fore.GREEN
c = Fore.CYAN
t = Fore.LIGHTWHITE_EX
rs = Style.RESET_ALL
print(f"{Fore.RED}Please Separate All Inputs With A Comma!{rs}")
print(" ")
noun =(get_inputa("Nouns",2,c))
verb =(get_inputa("Verbs",2,g))
adjective =(get_inputa("Adjectives",3,y))
ing = (get_inputa("Verb ending in ing",1,g))
place =(get_inputa("Place",1,r))
nouns = (get_inputa("Plural Noun",1,c))
celebrity = (get_inputa("Celebrity",1,c))
job = (get_inputa("Job",1,r))
print("**********************************")
print(" ")
print(f"""So, you want to {g}{verb[0]}{rs} Pokemon, huh? Well, it's not as
{y}{adjective[0]}{rs} as it seems. First off, you'll need to find them. You can
catch Pokemon in the wild of in (the) {r}{place[0]}{rs}. Once you have
a Charizard or a Blastoise, you'll need to gain its trust. That's not always
easy. You can try feeding it or {g}{ing[0]}{rs} it, but the best
way to gain a Pokemon's trust is to {g}{verb[1]}{rs} with it. Once you
and your Pokemon are best {c}{nouns[0]}{rs}, start training. You can
do this with a friend or even with {c}{celebrity[0]}{rs} at a/an {y}{adjective[1]}{rs}
Gym nearby. Think of it like training a/an {c}{noun[0]}{rs}! Only when
you've trained enough should you challenge another {r}{job[0]}{rs}
for real. After all that work, it would be {y}{adjective[2]}{rs} for someone to
come along and take your {c}{noun[1]}{rs} away!""")
print(" ")
print("**********************************")
Exit = input("Would you like to continue Y/N: ")
if Exit.lower() == "y":
runpy.run_module(mod_name='mad_lib_one')
else:
exit()
| true
|
67b395c16b5f929889bf0d8117498b404cfa6797
|
Python
|
382982408/myPythonStudy
|
/pythonBase/init.py
|
UTF-8
| 506
| 4.1875
| 4
|
[] |
no_license
|
'''
__init__用于向class里面穿值
'''
class Human:
name = "ren"
gender = 'male'
age = 25
__money = 5000
def __init__(self,a,b):
print('#'*50)
self.name = a
self.age = b
# Human.age = 40
print('#'*50)
def say(self):
print('my name is %s and I have %d' % (self.name,self.__money))
self.__lie()
def __lie(self):
print("I have 5000")
zhangsan = Human("zhangsan",30)
print(zhangsan.name,zhangsan.age,Human.age)
| true
|
c486a12d400d72bdeb9af4089312daa539d45d6c
|
Python
|
mbakker7/timml
|
/timml/constant.py
|
UTF-8
| 6,631
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import inspect # Used for storing the input
from .element import Element
from .equation import PotentialEquation
__all__ = ['Constant', 'ConstantStar']
class ConstantBase(Element, PotentialEquation):
def __init__(self, model, xr=0, yr=0, hr=0.0, layer=0, \
name='ConstantBase', label=None, aq=None):
Element.__init__(self, model, nparam=1, nunknowns=1, layers=layer, \
name=name, label=label)
self.nparam = 1 # Defined here and not in Element as other elements can have multiple parameters per layers
self.nunknowns = 0
self.xr = xr
self.yr = yr
self.hr = hr
self.aq = aq
self.model.add_element(self)
def __repr__(self):
return self.name + ' at ' + str(
(self.xr, self.yr)) + ' with head ' + str(self.hr)
def initialize(self):
if self.aq is None:
self.aq = self.model.aq.find_aquifer_data(self.xr, self.yr)
self.aq.add_element(self)
self.ncp = 1
self.xc = np.array([self.xr])
self.yc = np.array([self.yr])
self.pc = self.hr * self.aq.T[self.layers]
self.parameters = np.atleast_2d(self.pc)
def potinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((1, aq.naq))
if aq == self.aq:
rv[0, 0] = 1
return rv
def disvecinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, 1, aq.naq))
return rv
class Constant(ConstantBase, PotentialEquation):
"""
Specify the head at one point in the model in one layer.
The head may only be specified in an area of the model where
the aquifer system is confined.
Parameters
----------
model : Model object
model to which the element is added
xr : float
x-coordinate of the point where the head is specified
yr : float
y-coordinate of the point where the head is specified
hr : float
specified head
layer : int
layer where the head is specified
label : string or None (default: None)
label of the element
"""
def __init__(self, model, xr=0, yr=0, hr=0.0, layer=0, label=None):
self.storeinput(inspect.currentframe())
ConstantBase.__init__(self, model, xr=xr, yr=yr, hr=hr, layer=layer, \
name='Constant', label=label)
self.nunknowns = 1
def initialize(self):
ConstantBase.initialize(self)
assert self.aq.ilap, 'Constant element added to area that is ' \
'semi-confined'
self.resfac = np.zeros(1) # required for HeadEquation
self.strengthinf = np.zeros(1) # required for HeadEquation
def setparams(self, sol):
self.parameters[:, 0] = sol
class ConstantInside(Element):
# Sets constant at points xc, yc equal to the average of the potential of all elements at points xc, yc
# Used for the inside of an inhomogeneity
def __init__(self, model, xc=0, yc=0, label=None):
Element.__init__(self, model, nparam=1, nunknowns=1,
layers=list(range(model.aq.naq)), \
name='ConstantInside', label=label)
self.xc = np.atleast_1d(xc)
self.yc = np.atleast_1d(yc)
self.parameters = np.zeros((1, 1))
self.model.add_element(self)
def __repr__(self):
return self.name
def initialize(self):
self.aq = self.model.aq.find_aquifer_data(self.xc[0], self.yc[0])
self.aq.add_element(self)
self.ncp = len(self.xc)
def potinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((1, aq.naq))
if aq == self.aq:
rv[0, 0] = 1
return rv
def disvecinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, 1, aq.naq))
return rv
def equation(self):
mat = np.zeros((1, self.model.neq))
rhs = np.zeros(1) # Needs to be initialized to zero
for icp in range(self.ncp):
ieq = 0
for e in self.model.elementlist:
if e. nunknowns > 0:
if e != self:
mat[0:, ieq:ieq + e. nunknowns] += \
e.potinflayers(self.xc[icp], self.yc[icp],
self.layers).sum(0)
ieq += e. nunknowns
# else:
# mat[0, ieq:ieq+e. nunknowns] += -1
else:
rhs[0] -= \
e.potentiallayers(self.xc[icp], self.yc[icp],
self.layers).sum(0)
return mat, rhs
def setparams(self, sol):
self.parameters[:, 0] = sol
#class ConstantStar(Element, PotentialEquation):
# I don't think we need the equation
class ConstantStar(Element):
def __init__(self, model, hstar=0.0, label=None, aq=None):
Element.__init__(self, model, nparam=1, nunknowns=0, layers=0, \
name='ConstantStar', label=label)
assert hstar is not None, 'a value for hstar needs to be specified'
self.hstar = hstar
self.aq = aq
self.model.add_element(self)
def __repr__(self):
return self.name + ' with head ' + str(self.hstar)
def initialize(self):
self.aq.add_element(self)
self.aq.constantstar = self
self.parameters = np.zeros((1, 1))
self.potstar = self.hstar * self.aq.T
def potinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((1, aq.naq))
return rv
def potentiallayers(self, x, y, layers, aq=None):
'''Returns array of size len(layers) only used in building equations
Defined here as it is the particular solution inside a semi-confined aquifer
and cannot be added by using eigen vectors'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
pot = np.zeros(len(layers))
if aq == self.aq:
pot[:] = self.potstar[layers]
return pot
def disvecinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, 1, aq.naq))
return rv
| true
|
5b8a8bcd7791de6a0b19584ac0f2b28098cead9a
|
Python
|
IzaakWN/TriggerChecks
|
/python/utils.py
|
UTF-8
| 840
| 2.96875
| 3
|
[] |
no_license
|
# Author: Izaak Neutelings (July 2023)
import os, shutil
def ensureDirectory(*dirnames,**kwargs):
"""Make directory if it does not exist.
If more than one path is given, it is joined into one."""
dirname = os.path.join(*dirnames)
empty = kwargs.get('empty', False)
verbosity = kwargs.get('verb', 0 )
if not dirname:
pass
elif not os.path.exists(dirname):
os.makedirs(dirname)
if verbosity>=1:
print(">>> Made directory %r"%(dirname))
if not os.path.exists(dirname):
print(">>> Failed to make directory %r"%(dirname))
elif empty:
for filename in os.listdir(dirname):
filepath = os.path.join(dirname,filename)
if os.path.isfile(filepath) or os.path.islink(filepath):
os.unlink(filepath)
elif os.path.isdir(filepath):
shutil.rmtree(filepath)
return dirname
| true
|
ade69b4d3dfb36f5f6a1c2e014272708a8de25f5
|
Python
|
hungvo304/Instance-Search
|
/3rd_party/vggish/matching_script.py
|
UTF-8
| 1,727
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import os
from scipy import ndimage
import sys
def distance(vector_a, vector_b):
l2_vector_a = np.linalg.norm(vector_a) + 0.001
l2_vector_b = np.linalg.norm(vector_b) + 0.001
return np.linalg.norm(l2_vector_a - l2_vector_b)
def cosine(vector_a, vector_b):
l2_vector_a = np.linalg.norm(vector_a) + 0.001
l2_vector_b = np.linalg.norm(vector_b) + 0.001
return np.dot((vector_a / l2_vector_a), (vector_b.T / l2_vector_b))
def find_min(feature_a, feature_b):
row_a = feature_a.shape[0]
row_b = feature_b.shape[0]
longer, shorter = feature_a, feature_b
if row_a < row_b:
longer, shorter = feature_b, feature_a
index = 0
max_cosine=0
list_sum = []
while True:
if len(shorter) + index > len(longer):
break
sum_dis = 0
for i in range(len(shorter)):
sum_dis += cosine(shorter[i], longer[i + index])
sum_dis = sum_dis / len(shorter)
if sum_dis > max_cosine:
max_cosine = sum_dis
list_sum.append(sum_dis)
index += 1
return max_cosine, list_sum
if __name__ == '__main__':
files = os.listdir("feature")
query = sys.argv[1]
features = []
for f in files:
feature = np.load(os.path.join("feature", f))
features.append([f, feature])
ranked_list = []
for i in range(0, len(features)):
value, list_sum = find_min(np.load(query), features[i][1])
ranked_list.append((features[i][0], value, list_sum))
ranked_list = sorted(ranked_list, key=lambda x: x[1], reverse=True)
for shot in ranked_list:
print("Compare between %s and %s" % (query, shot[0]))
print(shot)
| true
|
c05dcd2ca1148d857acce5addf8cd827d683195f
|
Python
|
sashapff/mcmc-scaffolding
|
/preprocess/get_lengths.py
|
UTF-8
| 1,585
| 2.625
| 3
|
[] |
no_license
|
import h5py
if __name__ == "__main__":
chr_indexes = ['MT', 'X']
for i in range(1, 23):
chr_indexes.append(str(i))
contigs2chr = {}
output_files = {}
path_layouts = "/GWSPH/groups/cbi/Users/pavdeyev/HiCProject/layouts"
path_output = "/lustre/groups/cbi/Users/aeliseev/aivanova/data/contig_length"
matrix_filename = "/GWSPH/groups/cbi/Users/pavdeyev/HiCProject/chm13.draft_v0.9.matrix.cool"
print('Reading layouts...')
for ind in chr_indexes:
if ind != "9":
filename = path_layouts + "/chr" + ind + ".layout.txt"
else:
filename = path_layouts + "/chr9.partial_layout.txt"
with open(filename, "r") as f:
lines = f.read().splitlines()
for line in lines:
chr_name, contigs = line.split(" ")
contigs = contigs.split(",")
for contig in contigs:
contigs2chr[contig[:-1]] = ind
output_files[ind] = open(path_output + "/contig.length." + ind + ".txt", "w")
print(len(contigs2chr))
print('Reading matrix...')
with h5py.File(matrix_filename, "r") as f:
for i in range(len(f['chroms']['length'][:])):
contig_name = (f['chroms']['name'][:][i]).decode("utf-8")
string_to_write = contig_name + '\t' + str(f['chroms']['length'][:][i]) + '\n'
if contig_name in contigs2chr:
output_files[contigs2chr[contig_name]].write(string_to_write)
else:
print(contig_name)
for ind in chr_indexes:
output_files[ind].close()
| true
|
0a4837a4db9651c9fd094c7650193fd76cc78bba
|
Python
|
tjvanderende/NSkaartautomaat
|
/Applicatie/api/nsAPI.py
|
UTF-8
| 1,543
| 2.984375
| 3
|
[] |
no_license
|
import requests
import xmltodict
import threading
"""
De data wordt steeds overgezet naar een lokaal bestand (.xml).
Zodat er eigenlijk een lokale database wordt gegenereerd en de API ook zonder internet werkt.
"""
class NsRequest (threading.Thread):
def __init__(self, url, filename):
"""
init functie van threading.
:param url: URI van de NS api die ingeladen moet worden
:param filename: File waar de data in opgeslagen moet worden.
"""
super(NsRequest, self).__init__() # roep Thread aan.
filePath = "assets/database/"
self.auth_details = ('tjibbe.vanderende@student.hu.nl', 'yp9PbnRHnhWx-gBERapPKMy1o792T6U20D9Xw2W47xr8fvek-TvS9g')
self.url = url
self.filename = filePath+filename
def _request(self):
"""
request aan de NS API server doen en in een xml file zetten voor offline gebruik.
"""
try:
stations = requests.get(self.url, auth=self.auth_details)
with open(self.filename, 'w', encoding='utf-8') as stationsXML:
stationsXML.write(stations.text)
except requests.HTTPError:
print('An HTTP error has occurred')
except FileNotFoundError:
print('File could not be found')
except KeyError:
print('A Key Error has occurred')
except requests.ConnectionError:
print('A Connection Error has occurred')
except requests.Timeout:
print('Either the connection has timed out, or the server did not send any data in the allotted time.')
def run(self):
return self._request()
| true
|
0286a75a29ab2114d2b5c2863ccaea79a4cf5f18
|
Python
|
Athenagoras/quantopian_momentum
|
/RVI.py
|
UTF-8
| 9,401
| 3.03125
| 3
|
[] |
no_license
|
"""
Author: Ian Doherty
Date: April 13, 2017
This algorithm trades using RVI.
"""
import numpy as np
import pandas as pd
import math
import talib
# Instrument Class
class Stock:
# Creates default Stock
def __init__(self, sid):
self.sid = sid
self.weight = 0
self.signal_line = list()
self.should_trade = False
self.sentiment = False
self.rvi = RVI(self.sid, '1m', 4, 19)
self.rsi = RSI(self.sid, '1m', 20)
self.bbands = BBands(self.sid, '1m', 20)
pass
# Stops trading instrument
def stop_trading(self):
self.should_trade = False
pass
# Print out Stock info
def print_stock(self):
#print ('SID: ' + str(self.sid))
#print ('Price History: ' + str(self.minute_history))
#print ('Numerators: ' + str(self.rvi.numerators))
#print ('Denominators: ' + str(self.rvi.denominators))
#print ('Weight: ' + str(self.weight))
#print ('RVIS: ' + str(self.rvi.rvis))
#print ('Signal Line: ' + str(self.rvi.signal_line))
#print ('Tradable: ' + str(self.should_trade))
#print ('RSI price history: ' + str(self.rsi.price_history))
print ('RSI: ' + str(self.rsi.rsi))
print ('BBANDS' + str(self.bbands.bbands))
pass
# RSI Class
class RSI:
# Create a new RSI
def __init__(self, sid, unit, sample):
self.sid = sid
self.price_history = list()
self.unit = unit
self.sample = sample
self.rsi = list()
pass
# Get Price history based on initialiation variables
def get_price_history(self, data):
self.price_history = data.history(self.sid, 'close', self.sample + 2, self.unit)
pass
# Calculate RSI
def get_rsi(self, data):
self.get_price_history(data)
self.rsi.append(talib.RSI(self.price_history)[-1])
if(len(self.rsi) == 2):
self.rsi.pop(0)
pass
pass
# RVI Class
class RVI:
def __init__(self, sid, unit, sample, select_period):
# SID
self.sid = sid
# Price history candle type
self.unit = unit
# Sample
self.sample = sample
# Price History
self.prce_history = list()
# Period Counter
self.period = 0
# RVI Select Period
self.select_period = select_period
# RVI numerators
self.numerators = list()
# RVI denominators
self.denominators = list()
# RVI History
self.rvis = list()
# Signal line
self.signal_line = list()
pass
# Get price history
def get_price_history(self, data):
self.price_history = data.history(self.sid, ['open', 'high', 'low', 'close'], self.sample, self.unit)
pass
# Returns OHLC difference - Numerator and Denominator for RVI Calculation
def get_ohlc_difference(self, minute_history, ohlc_type1, ohlc_type2):
# Perform denominator variables and assign correspong local a, b, c, and d
for i in range(4):
if (i == 0):
a = (minute_history[3:][ohlc_type1] - minute_history[3:][ohlc_type2])
if (i == 1):
b = (minute_history[2:-1][ohlc_type1] - minute_history[2:-1][ohlc_type2])
if (i == 2):
c = (minute_history[1:-2][ohlc_type1] - minute_history[1:-2][ohlc_type2])
if (i == 3):
d = (minute_history[:-3][ohlc_type1] - minute_history[:-3][ohlc_type2])
# Formula = ( a + (2 * b) + (2 * c) + d ) / 6
differences = ((float(a)+(2*float(b))+(2*float(c))+float(d))/6)
differences = check_data(differences)
return differences
def get_factors(self, stock, ohlc_type1, ohlc_type2):
return self.get_ohlc_difference(stock.rvi.price_history, ohlc_type1, ohlc_type2)
pass
# Performs Numerator and Denominator calculations for RVI
def update_rvi_variables(self, stock):
stock.rvi.period += 1
# Check if there is enough select_period - get SMAs
if (stock.rvi.period == stock.rvi.select_period):
# Moving average for select_period
num_sma = np.average(stock.rvi.numerators)
den_sma = np.average(stock.rvi.denominators)
# Remove oldest
stock.rvi.numerators.pop(0)
stock.rvi.denominators.pop(0)
stock.rvi.period -= 1
# Return RVI
return(num_sma/den_sma)
else:
return float()
pass
# Get Signal Line for RVI
def get_rvi_signal_line(self):
if (len(self.signal_line) == 1):
self.signal_line.pop(0)
a = self.rvis[3:][0]
b = self.rvis[2:3][0]
c = self.rvis[1:2][0]
d = self.rvis[:1][0]
self.signal_line.append(check_data(float(a)+(2*float(b))+(2*float(c))+float(d))/6)
pass
pass
# Bollinger Band Class
class BBands:
def __init__(self, sid, unit, sample):
# SID
self.sid = sid
# Price history unit
self.unit = unit
# Price history sample size
self.sample = sample
# Bollinger Bands
self.bbands = list()
pass
def get_price_history(self, data):
self.price_history = data.history(self.sid, 'close', self.sample + 2, self.unit)
pass
def get_bbands(self, data):
self.get_price_history(data)
upper, middle, lower = talib.BBANDS(
self.price_history,
timeperiod = self.sample,
nbdevup = 2,
nbdevdn = 2,
matype = 0)
self.bbands = [upper[-1], middle[-1], lower[-1]]
pass
pass
def initialize(context):
"""
Called once at the start of the algorithm.
"""
# Close Trading in last 30 minutes
schedule_function(stop_trading, date_rules.every_day(), time_rules.market_close(minutes=15))
# Record variables
schedule_function(my_record_vars, date_rules.every_day(), time_rules.market_close())
# Create TVIX
tvix = Stock(sid(40515))
# Create XIV
vix = Stock(sid(40516))
# Enough data
context.enough_data = 4
# Security list
context.securities = [tvix, vix]
set_benchmark(context.securities[0].sid)
set_benchmark(context.securities[0].sid)
# Minute timer for when to execute updates
context.count = 0
pass
def before_trading_start(context, data):
"""
Called every day before market open.
"""
for stock in context.securities:
stock.should_trade = True
pass
def my_assign_weights(context, stock):
"""
Assign weights to securities that we want to order.
"""
# Get RVI indicator
rvi = stock.rvi.rvis[3:][0]
signal_line = stock.rvi.signal_line[0]
rsi = stock.rsi.rsi
bbands = stock.bbands.bbands
# Signals
sentiment = stock.sentiment
rvi_signal = (signal_line > rvi)
# Add RSI
# Add BBands
#stock.print_stock()
default_weight = (float(1)/float(len(context.securities)))
# If signal line is bearish
if (rvi_signal & sentiment):
stock.weight = default_weight
#print ('bullish')
elif (rvi_signal):
stock.sentiment = True
else:
stock.weight = 0
stock.sentiment = False
#print ('bearish')
pass
def my_rebalance(context, stock, data):
"""
Execute orders according to our schedule_function() timing.
"""
if ((data.can_trade(stock.sid)) &
(len(get_open_orders(stock.sid)) == 0) &
(stock.should_trade) &
(context.portfolio.cash > 0)):
order_target_percent(stock.sid, stock.weight)
pass
def my_record_vars(context, data):
"""
Plot variables at the end of each day.
"""
record(TVIX = data.current(context.securities[0].sid, 'price'),
XIV = data.current(context.securities[1].sid, 'price'))
pass
def handle_data(context, data):
"""
Called every minute.
"""
# Minute timer - every 4 mins
context.count += 1
if(context.count == context.enough_data):
context.count = 0 # reset timer
for stock in context.securities:
stock.rvi.get_price_history(data)
stock.rsi.get_rsi(data)
stock.bbands.get_bbands(data)
stock.rvi.numerators.append(stock.rvi.get_factors(stock, 'close', 'open'))
stock.rvi.denominators.append(stock.rvi.get_factors(stock, 'high', 'low'))
stock.rvi.rvis.append(check_data(stock.rvi.update_rvi_variables(stock)))
if ((len(stock.rvi.rvis) > 3)): # If there is enough data for Signal Line Calculation
stock.rvi.get_rvi_signal_line()
my_assign_weights(context, stock)
my_rebalance(context, stock, data)
stock.rvi.rvis.pop(0)
else:
pass
#stock.print_stock()
pass
# TODO: Need to check for Nan, None..
def check_data(data):
new = list()
if (type(data) == list): # replaces Nan values from list
for item in data:
if (item):
new.append(item)
return new
elif data is not None:
return data
else:
return float()
def stop_trading(context, data):
for stock in context.securities:
stock.stop_trading()
pass
| true
|
1ad601c9f25f39033ef7bbb2efb18cc50d218597
|
Python
|
anoopch/PythonExperiments
|
/Lab_Ex_20_biggest_of_three_nos_set.py
|
UTF-8
| 212
| 3.03125
| 3
|
[] |
no_license
|
a=float(input('Enter first number : '))
b=float(input('Enter second number : '))
c=float(input('Enter third number : '))
d=set(a,b,c)
count=len(d)
print('Unique numbers - ',count)
# TODO - Pending error
| true
|
c41c0369aa864e32cd2d42bc481c152d96caf39e
|
Python
|
Chandramani/learningApacheMahout
|
/src/python/chapter3/src/CategoricalFeatureToPercentages.py
|
UTF-8
| 549
| 3.125
| 3
|
[] |
no_license
|
__author__ = 'ctiwary'
import pandas as pd
class CategoricalFeatureToPercentages:
def __init__(self):
pass
df = pd.read_csv("../../../../data/chapter3/adult.data.merged.csv")
# df['IncomeGreaterThan50K'] = df['IncomeGreaterThan50K'].astype('category')
# df['education'] = df['education'].astype('category')
# # print df[['IncomeGreaterThan50K','education']].describe()
print pd.crosstab(df['IncomeGreaterThan50K'],df['education']).apply(lambda r: r/r.sum(), axis=0)
# Replace the categories with percentages
| true
|
0390df1029c9d786949eb6d4aacada591e553989
|
Python
|
rikithreddy/img-to-ascii
|
/log.py
|
UTF-8
| 799
| 2.671875
| 3
|
[] |
no_license
|
import logging
from constants import (
DEFAULT_LOG_LEVEL,
DEFAULT_LOG_FILE_PATH,
DEFAULT_LOG_FORMAT
)
def setup_logger(
path_to_logfile=DEFAULT_LOG_FILE_PATH,
level=DEFAULT_LOG_LEVEL,
format=DEFAULT_LOG_FORMAT
):
'''
This function is used to setup basic configurations for the logger.
Parameters
----------
filename - The path the log file.
level - Log level
format - Logging format
'''
logging.basicConfig(
level=level,
filename=path_to_logfile,
filemode='w',
format=format
)
| true
|
5a1647234cd919237145280b7c5e41142430b591
|
Python
|
eqasim-org/jakarta
|
/data/spatial/utils.py
|
UTF-8
| 2,362
| 2.578125
| 3
|
[] |
no_license
|
import shapely.geometry as geo
import numpy as np
from tqdm import tqdm
import geopandas as gpd
import pandas as pd
from sklearn.neighbors import KDTree
import multiprocessing as mp
def to_gpd(df, x = "x", y = "y", crs = {"init" : "EPSG:5330"}):
df["geometry"] = [
geo.Point(*coord) for coord in tqdm(
zip(df[x], df[y]), total = len(df),
desc = "Converting coordinates"
)]
df = gpd.GeoDataFrame(df)
df.crs = crs
if not crs == {"init" : "EPSG:5330"}:
df = df.to_crs({"init" : "EPSG:5330"})
return df
def impute(df_points, df_zones, point_id_field, zone_id_field, fix_by_distance = True, chunk_size = 10000):
assert(type(df_points) == gpd.GeoDataFrame)
assert(type(df_zones) == gpd.GeoDataFrame)
assert(point_id_field in df_points.columns)
assert(zone_id_field in df_zones.columns)
assert(not zone_id_field in df_points.columns)
df_original = df_points
df_points = df_points[[point_id_field, "geometry"]]
df_zones = df_zones[[zone_id_field, "geometry"]]
print("Imputing %d zones into %d points by spatial join..." % (len(df_zones), len(df_points)))
result = []
chunk_count = max(1, int(len(df_points) / chunk_size))
for chunk in tqdm(np.array_split(df_points, chunk_count), total = chunk_count):
result.append(gpd.sjoin(df_zones, chunk, op = "contains", how = "right"))
df_points = pd.concat(result).reset_index()
if "left_index" in df_points: del df_points["left_index"]
if "right_index" in df_points: del df_points["right_index"]
invalid_mask = np.isnan(df_points[zone_id_field])
if fix_by_distance and np.any(invalid_mask):
print(" Fixing %d points by centroid distance join..." % np.count_nonzero(invalid_mask))
coordinates = np.vstack([df_zones["geometry"].centroid.x, df_zones["geometry"].centroid.y]).T
kd_tree = KDTree(coordinates)
df_missing = df_points[invalid_mask]
coordinates = np.vstack([df_missing["geometry"].centroid.x, df_missing["geometry"].centroid.y]).T
indices = kd_tree.query(coordinates, return_distance = False).flatten()
df_points.loc[invalid_mask, zone_id_field] = df_zones.iloc[indices][zone_id_field].values
return pd.merge(df_original, df_points[[point_id_field, zone_id_field]], on = point_id_field, how = "left")
| true
|
aeff93e74b9917894f8f6c4d1a9e81ca8ca667f2
|
Python
|
AleksTk/table-logger
|
/examples.py
|
UTF-8
| 1,052
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import time
import random
import math
from datetime import datetime
from table_logger import TableLogger
def print_simple():
tpl = TableLogger(columns='a,b,c,d')
tpl(1, 'Row1', datetime.now(), math.pi)
tpl(2, 'Row2', datetime.now(), 1 / 3)
tpl(3, 'Row3', datetime.now(), random.random())
def print_time_delta():
tpl = TableLogger(columns='data', rownum=True, time_delta=True, timestamp=True)
for e in 'abcde':
time.sleep(random.randint(0, 3))
tpl(e)
def print_file_info():
"""Prints file details in the current directory"""
tpl = TableLogger(columns='file,created,modified,size')
for f in os.listdir('.'):
size = os.stat(f).st_size
date_created = datetime.fromtimestamp(os.path.getctime(f))
date_modified = datetime.fromtimestamp(os.path.getmtime(f))
tpl(f, date_created, date_modified, size)
if __name__ == "__main__":
print_simple()
print_file_info()
print_time_delta()
| true
|
9d26dfe17199458f7d5e7d009da0458215f6ded2
|
Python
|
alter4321/Goodline_test_app_dev
|
/pas_gen.py
|
UTF-8
| 4,023
| 3.65625
| 4
|
[] |
no_license
|
import random
"""
Объявляется функция генерации паролей. На
первом цикле добавляется по одному из необходимых
символов, на втором случайно добавляются символы,
пока не наберется 12. В конце получившийся пароль
перемешивается.
"""
def generate_password():
password = ''
digit = '1234567890'
lower = 'abcdefghijklmnopqrstuvwxyz'
upper = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
spesial = '!@#$%^&*()-+'
var = 0
while var < 5:
i = var
if i == 1:
password += random.choice(digit)
elif i == 2:
password += random.choice(lower)
elif i == 3:
password += random.choice(upper)
elif i == 4:
password += random.choice(spesial)
var += 1
while var < 13:
z = random.randint(1, 4)
if z == 1:
password += random.choice(digit)
elif z == 2:
password += random.choice(lower)
elif z == 3:
password += random.choice(upper)
elif z == 4:
password += random.choice(spesial)
var += 1
pass_list = list(password)
random.shuffle(pass_list)
shuffled_password = ''.join(pass_list)
return shuffled_password
# Сама программа для анализа введенного пароля
# на соответсие требованиям.
while True:
esc = 'Чтобы завершить введите "ok"\n'
s = str(input('Придумайте пароль!'
'(Чтобы завершить введите "ok")\n'))
if s == 'ok':
break
allow_digits = '1234567890'
allow_lower = 'abcdefghijklmnopqrstuvwxyz'
allow_upper = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
allow_specials = '!@#$%^&*()-+'
attention = 'Ненадёжный пароль:'
count_upper = 0
count_lower = 0
count_digit = 0
count_special = 0
bad_symbol = 0
for a in s:
if a.isupper():
count_upper += 1
if a.islower():
count_lower += 1
if a.isdigit():
count_digit += 1
for special in allow_specials:
if a == special:
count_special += 1
if a not in (allow_digits + allow_lower +
allow_upper + allow_specials):
bad_symbol += 1
if count_digit < 1:
attention += '\n-Необходимо добавить хотя бы 1 цифру!'
if count_lower < 1:
attention += '\n-Необходимо добавить хотя бы 1 букву'
if count_upper < 1:
attention += '\n-Необходимо добавить хотя бы 1 заглавную букву!'
if count_special < 1:
attention += '\n-Необходимо добавить хотя бы' \
' 1 спецсимвол " ! @ # $ % ^ & * ( ) - + "'
if len(s) < 12:
attention += '\n-Необходимо, чтобы пароль был не короче 12 ' \
'символов! Ваш текущий пароль = '\
+ str(len(s)) + ' символов!'
if bad_symbol > 0:
attention += '\n-Недопустимые символы! Используйте цифры 0-9,' \
' латинские буквы \nи специальные символы ' \
'" ! @ # $ % ^ & * ( ) - + "!'
if count_digit >= 1 and count_lower >= 1 and count_upper >= 1 \
and count_special >= 1 and len(s) >= 12 and bad_symbol == 0:
print('Этот пароль надёжный!')
break
else:
print(attention + '\n\nРекомендуемый пароль: ' + generate_password())
| true
|
d389ac339eb3ef40e8d3cb1e6cce6e8103572fa4
|
Python
|
sritchie/catenary
|
/catenary/symbolic.py
|
UTF-8
| 5,016
| 2.890625
| 3
|
[] |
no_license
|
"""Sympy version!"""
import json
import pickle
from functools import partial
import jax as j
import jax.numpy as np
import sympy as s
from jax.config import config
from pkg_resources import resource_filename
from sympy.utilities.lambdify import (MODULES, NUMPY, NUMPY_DEFAULT,
NUMPY_TRANSLATIONS)
import catenary.single as cs
# Attempt to get this working more quickly. I think it does!
MODULES['jax'] = (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, (
"import jax.numpy; from jax.numpy import *; from jax.numpy.linalg import *",
))
FILEPATH = "single_matrix_correlators.json"
PICKLEPATH = "smc.pickle"
config.update('jax_enable_x64', True)
def symbolic_path():
"""Stable path where we keep our correlators."""
return resource_filename('catenary.resources', FILEPATH)
def pickle_path():
"""Stable path where we keep our correlators."""
return resource_filename('catenary.resources', PICKLEPATH)
# Exact Solution
def generate_t2_exact():
"""Generates a symbolic expression for the exact solution to t2, given a g.
"""
g = s.symbols('g')
a_squared = 1 / (6 * g) * (s.sqrt(1 + (12 * g)) - 1)
numerator = a_squared * (4 - a_squared)
expr = numerator / 3
return expr
# This is an evaluated version of the expression.
t2_exact_expr = generate_t2_exact()
def t2_exact_sympy(g):
"""Given some exact value for g, evaluate and simplifies a symbolic version of
g.
Call evalf() on the result to get a float. For example, this stays symbolic:
t2_exact_sympy(s.Rational(12, 10))
All three of these give floats:
t2_exact_sympy(s.Rational(12, 10)).evalf()
t2_exact_sympy(1.2)
t2_exact_sympy(1.2).evalf()
"""
return s.simplify(t2_exact_expr.subs(s.symbols('g'), g))
# This version actually evaluates numerically.
t2_exact_jax = s.lambdify([s.symbols('g')], t2_exact_expr, 'jax')
# Correlators
def t_k_plus_three(k):
"""Generates the (k + 3)th term of the loop equation recursion for the single matrix
case.
"""
alpha, g = s.symbols('alpha g')
l = s.symbols('l', cls=s.Idx)
t = s.IndexedBase('t')
term = 1 / g * (s.Sum(alpha**4 * t[l] * t[k - l - 1],
(l, 0, k - 1)) - (alpha**2 * t[k + 1]))
return t[k + 3], term
def correlator_expressions(n):
"""This gets us the whole list of expressions!
This will read in strings of expressions:
s.sympify(expr_string, locals={"t": s.IndexedBase('t')})
"""
t = s.IndexedBase('t')
subs = [(t[0], 1), (t[1], t[1]), (t[2], t[2])]
for i in range(n):
k, v = t_k_plus_three(i)
subs.append((k, s.simplify(v.doit().subs(subs))))
print(i, subs[-1])
return dict(subs)
def persist_correlators(n, filename=symbolic_path()):
"""Saves correlators to JSON"""
m = correlator_expressions(n)
skv = {str(k): str(v) for k, v in m.items()}
with open(filename, 'w') as f:
json.dump(skv, f, indent=2)
def persist_to_pickle(m):
with open(pickle_path(), 'wb') as f:
pickle.dump(m, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_from_pickle():
"""This is MUCH faster. Fine if we do this a single time."""
with open(pickle_path(), 'rb') as f:
return pickle.load(f)
def load_correlators(filename=symbolic_path()):
"""Returns a dict of k => t_k generated by loading the stored data.
TODO update this to make it cache and generate.
"""
m = None
t = s.IndexedBase('t')
def parse(expr):
return s.sympify(expr, locals={"t": t})
with open(filename, 'r') as f:
pairs = json.load(f).items()
m = {parse(k).indices[0]: parse(v) for k, v in pairs}
return m
def generate_functions(m, n):
alpha, g = s.symbols('alpha g')
t = s.IndexedBase('t')
expr = [v for i, v in sorted(m.items()) if i < n]
f = s.lambdify([alpha, g, t[1], t[2]], expr, 'jax')
def ret(alpha, g, t1, t2):
"""Here's where we take care of the s1 s2 conversion."""
s1 = np.multiply(t1, alpha)
s2 = np.multiply(t2, np.square(alpha))
return np.array(f(alpha, g, s1, s2))
return j.jit(ret, static_argnums=(0, 1, 2))
def mpmath_fn(m, n):
alpha, g = s.symbols('alpha g')
t = s.IndexedBase('t')
expr = [v for i, v in sorted(m.items()) if i < n]
f = s.lambdify([alpha, g, t[1], t[2]], expr, 'mpmath')
def ret(alpha, g, t1, t2):
"""Here's where we take care of the s1 s2 conversion."""
s1 = np.multiply(t1, alpha)
s2 = np.multiply(t2, np.square(alpha))
return np.array(f(alpha, g, s1, s2))
return ret
MAX_N = 20
single_matrix_correlators = generate_functions(load_from_pickle(),
2 * MAX_N - 1)
@partial(j.jit, static_argnums=(0, 1, 2, 3))
def inner_product_matrix(alpha, g, t1, t2):
"""Returns the... inner product matrix of correlators for the single matrix
model.
We do NOT need to do the transformation of t2 by alpha for this, vs the
recursive solution. Since that happens already inside the equations.
"""
xs = single_matrix_correlators(alpha, g, t1, t2)
return cs.sliding_window_m(xs, MAX_N)
| true
|
388f770f6c2a04335edd19866751263cdba0b7ef
|
Python
|
AgnieszkaWojno/KonwersjaTypy-Python
|
/venv/NaBinarny.py
|
UTF-8
| 284
| 3.671875
| 4
|
[] |
no_license
|
def convert_to_bin (digit):
w = []
while digit>0:
w = [digit % 2] + w # % - reszta z dzielenia
digit //= 2 #część całkowita
t = [];
t = [5] + t
t=[8] + t
print(t)
return w
print("liczba binarna 125=",convert_to_bin(125))
| true
|
2615ed97a15e34a83459a58eb9e699ceccccb9b2
|
Python
|
TheTurtle3/Python-Mega-Course
|
/Section 6 - User Input/str_format.py
|
UTF-8
| 65
| 2.65625
| 3
|
[] |
no_license
|
def foo(string):
string = "Hi %s" % string
return string
| true
|
ed2a11b5bd5a26c3c400dd29db982c0d73e89038
|
Python
|
Aohanseven/Fql_Project
|
/Scrapys/yaozh/yaozh/pipelines.py
|
UTF-8
| 2,338
| 2.640625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
import redis
import pandas as pd
from scrapy.conf import settings
from scrapy.exceptions import DropItem
redis_db = redis.Redis(host='127.0.0.1', port=6379, db=4)
redis_data_dict = "hospital_id"
#通过redis进行去重
class DuplicatesPipeline(object):
host = settings['MYSQL_HOST']
user = settings['MYSQL_USER']
psd = settings['MYSQL_PASSWORD']
db = settings['MYSQL_DB']
c = settings['CHARSET']
port = settings['MYSQL_PORT']
conn = pymysql.connect(host=host, user=user, password=psd, db=db, charset=c, port=port)
def __init__(self):
redis_db.flushdb()
if redis_db.hlen(redis_data_dict) == 0:
sql = "SELECT id FROM hospital_data"
df = pd.read_sql(sql, self.conn)
for id in df['id'].get_values():
redis_db.hset(redis_data_dict, id, 0)
def process_item(self, item, spider):
if redis_db.hexists(redis_data_dict, item['id']):
raise DropItem("Duplicate book found:%s" % item)
return item
#存入mysql数据库
class DBPipeline(object):
def process_item(self, item, spider):
host = settings['MYSQL_HOST']
user = settings['MYSQL_USER']
psd = settings['MYSQL_PASSWORD']
db = settings['MYSQL_DB']
c = settings['CHARSET']
port = settings['MYSQL_PORT']
con=pymysql.connect(host=host,user=user,passwd=psd,db=db,charset=c,port=port)
cue=con.cursor()
print("mysql connect succes")
try:
cue.execute("INSERT INTO hospital_data(id, hospital_name, grade, hospital_type, create_year, Bed_number, outpatient_number, department_number, personnel_number, address) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(item['id'],item['hospital_name'],item['grade'],item['hospital_type'],item['create_year'],item['Bed_number'],item['outpatient_number'],item['department_number'],item['personnel_number'],item['address']))
print("Insert success")
except Exception as e:
print("Insert error:",e)
con.rollback()
else:
con.commit()
con.close()
return item
| true
|
a8254c8fa4faec089504d9e4783f7f105ac07a92
|
Python
|
Karom45/NASAmet
|
/NASAmet/apps/meteorites/models.py
|
UTF-8
| 1,558
| 2.515625
| 3
|
[] |
no_license
|
from django.db import models
import datetime
# Create your models here.
class Classes(models.Model):
class Meta:
db_table = 'Classes'
verbose_name = 'Класс метеорита'
verbose_name_plural = 'Классы метеоритов'
classes_id = models.AutoField('id', primary_key=True, db_column='id' ,unique= True , default = 0)
class_name = models.CharField('Название', max_length=100, db_column='class_name')
information = models.TextField('Дополнительная информация', db_column='information')
def __str__(self):
return self.class_name
class Meteorite(models.Model):
class Meta:
db_table = 'Meteorites'
verbose_name = 'Метеорит'
verbose_name_plural = 'Метеориты'
meteorite_id = models.AutoField('id' ,primary_key=True, db_column='id')
name = models.CharField('Название метеорита' ,max_length=100, db_column='name')
# recclass_id = models.IntegerField('Класс', db_column='recclass_id')
recclass_id = models.ForeignKey('Classes' , db_column='recclass_id', on_delete = models.CASCADE)
mass = models.FloatField('Масса', db_column='mass (g)')
fall = models.CharField('Статус' , max_length=10, db_column='fall')
year = models.DateTimeField('Дата', db_column='year')
reclat = models.FloatField('Широта',db_column='reclat')
reclong = models.FloatField('Долгота', db_column='reclong')
def __str__(self):
return self.name
| true
|
b5521f90f54f42deba342cdfd16f6da2bb9e2b3a
|
Python
|
kyoonkwon/TTT
|
/parse.py
|
UTF-8
| 2,991
| 2.671875
| 3
|
[] |
no_license
|
import openpyxl
from urllib.request import urlopen
from bs4 import BeautifulSoup
c_wb = openpyxl.load_workbook('./course.xlsx')
c_sheet = c_wb["Courses Offered"]
'''
과목번호 과목명 AU 강:실:학 담당교수 강의시간 강의실 시험시간 성적 널널 강의 종합
'''
courses = []
for row in c_sheet.rows:
row_value = []
for cell in row:
row_value.append(cell.value)
courses.append(row_value)
c_wb.close()
print("otl searching....")
tot = len(courses)
with open("db_rating_by_prof.csv", 'w', encoding='utf-8') as f:
for i, course in enumerate(courses):
if(i == 0): continue
code = course[0]
course_name = course[1]
prof_name = course[4]
if("개별연구" in course_name or "졸업연구" in course_name or "논문연구" in course_name or "URP" in course_name):
sung, nul, kang, jong = "?", "?", "?", "?"
else:
html = urlopen("https://otl.kaist.ac.kr/review/result/?q={}&type=ALL&department=ALL&grade=ALL&semester=NOW".format(code))
bsObject = BeautifulSoup(html, "html.parser")
scores = bsObject.find_all("div", {"class":"col-xs-12 col-sm-6 score-elem"})
if(len(scores) < 4):
sung, nul, kang, jong = "?", "?", "?", "?"
else:
course_id = int(str(bsObject.find("input", {"name":"course_id"})).split("value=")[1][1:-3])
course_html = urlopen("https://otl.kaist.ac.kr/review/result/course/{}/-1/".format(course_id))
course_bsObject = BeautifulSoup(course_html, "html.parser")
prof_list = course_bsObject.find_all("span", {"class": "professors"})
for prof in prof_list:
if(prof_name in str(prof)):
review_uri = str(prof.find("a")).split('"')[1]
break
review_html = urlopen("https://otl.kaist.ac.kr{}".format(review_uri))
review_bsObject = BeautifulSoup(review_html, "html.parser")
scores_by_prof = review_bsObject.find_all("div", {"class":"col-xs-12 col-sm-6 score-elem"})
sung = scores_by_prof[0].text.split()[-1]
nul = scores_by_prof[1].text.split()[-1]
kang = scores_by_prof[2].text.split()[-1]
jong = scores_by_prof[3].text.split()[-1]
course[8] = sung
course[9] = nul
course[10] = kang
course[11] = jong
print("{}/{} {} {} {} {} {} {}".format(str(i), tot , course[1] , course[4] , course[8] , course[9] , course[10] , course[11]))
for j in range(12):
text = course[j]
if(text == None): text = " "
text = text.replace(",", "/")
text = text.replace("_x000D_\n", "/")
text = text.replace(u'\xa0', ' ')
f.write(text)
if(j != 11): f.write(",")
f.write("\n")
| true
|
44a4cdb8bd957dff8af976eaf0ae20fa6ec12f50
|
Python
|
falondarville/practicePython
|
/guessinggame.py
|
UTF-8
| 863
| 4.6875
| 5
|
[] |
no_license
|
# Generate a random number between 1 and 9 (including 1 and 9). Ask the user to guess the number, then tell them whether they guessed too low, too high, or exactly right.
import random
random_number = random.randint(1, 10)
user_guesses = 0
print("Please choose a number between 1 and 9. Enter 'exit' to stop playing")
user_input = input()
while user_input != "exit":
if random_number != int(user_input):
if random_number > int(user_input):
print("You guessed too low.")
user_guesses += 1
user_input = input()
elif random_number < int(user_input):
print("You guessed too high.")
user_guesses += 1
user_input = input()
elif random_number == int(user_input):
user_guesses += 1
print("That's the same number the computer chose.")
print(f"You guessed a total of {user_guesses} times.")
break
elif user_input == "exit":
break
| true
|
e0a26975717a4560f4f184dfd94261cb0577016c
|
Python
|
ColeRichardson/CSC148
|
/mini-exercises/mini exercise 2/chain.py
|
UTF-8
| 4,474
| 4.09375
| 4
|
[] |
no_license
|
# Exercise 2, Task 2- A Chain of People
#
# CSC148 Summer 2019, University of Toronto
# Instructor: Sadia Sharmin
# ---------------------------------------------
"""
This module contains the following classes to represent a chain of people --
Person: a person in the chain.
PeopleChain: ordered chain consisting of people.
ShortChainError: indicates chain is too short to perform action.
"""
from __future__ import annotations
class ShortChainError(Exception):
pass
class Person:
def __init__(self, name, next=None):
self.name = name
self.next = next
class PeopleChain:
"""A chain of people.
=== Attributes ===
leader: Person | None
The first person in the chain, or None if the chain is empty.
"""
leader: Optional[Person]
def __init__(self, names: List[str]) -> None:
"""Create people linked together in the order provided in <names>.
The leader of the chain is the first person in <names>.
"""
if len(names) == 0:
# No leader, representing an empty chain!
self.leader = None
else:
# Set leader
self.leader = Person(names[0])
current_person = self.leader
for name in names[1:]:
# Set the link for the current person
current_person.next = Person(name)
# Update the current person
# Note that current_person always refers to
# the LAST person in the chain
current_person = current_person.next
# TODO: Implement the following four methods!
def get_leader(self) -> str:
"""Return the name of the leader of the chain.
Raise ShortChainError if chain has no leader.
>>> chain = PeopleChain(['Iron Man', 'Heather', 'Kevan'])
>>> chain.get_leader()
'Iron Man'
>>> chain = PeopleChain([])
>>> chain.get_leader()
Traceback (most recent call last):
...
ShortChainError
"""
# YOUR CODE HERE
if not self.leader:
raise ShortChainError
else:
return self.leader.name
def get_second(self) -> str:
"""Return the name of the second person in the chain.
That is, return the name of the person the leader is holding onto.
Raise ShortChainError if chain has no second person.
>>> chain = PeopleChain(['Iron Man', 'Heather', 'Kevan'])
>>> chain.get_second()
'Heather'
>>> chain = PeopleChain(['Iron Man'])
>>> chain.get_second()
Traceback (most recent call last):
...
ShortChainError
"""
# YOUR CODE HERE
if not self.leader or not self.leader.next:
raise ShortChainError
else:
return self.leader.next.name
def get_third(self) -> str:
"""Return the name of the third person in the chain.
Raise ShortChainError if chain has no third person.
>>> chain = PeopleChain(['Iron Man', 'Heather', 'Kevan'])
>>> chain.get_third()
'Kevan'
>>> chain = PeopleChain(['Iron Man'])
>>> chain.get_third()
Traceback (most recent call last):
...
ShortChainError
"""
# YOUR CODE HERE
if not self.leader or not self.leader.next or not self.leader.next.next:
raise ShortChainError
else:
return self.leader.next.next.name
def get_nth(self, n: int) -> str:
"""Return the name of the n-th person in the chain.
Precondition: n >= 0
Raise ShortChainError if chain doesn't have n people.
>>> chain = PeopleChain(['Iron Man', 'Heather', 'Kevan'])
>>> chain.get_nth(0)
'Iron Man'
>>> chain = PeopleChain(['Iron Man', 'Heather', 'Kevan'])
>>> chain.get_nth(2)
'Kevan'
>>> chain = PeopleChain(['Iron Man', 'Heather', 'Kevan'])
>>> chain.get_nth(5)
Traceback (most recent call last):
...
ShortChainError
"""
# YOUR CODE HERE
if not self.leader:
raise ShortChainError
elif self.leader:
curr = self.leader
for i in range(0, n):
if not curr.next:
raise ShortChainError
else:
curr = curr.next
return curr.name
import doctest
doctest.testmod()
| true
|
8324f0938243bb7c2d61e5254e1c5ba95bec5f19
|
Python
|
shengxu0518/OpenAI-Projects
|
/cartpole/lunarlander.py
|
UTF-8
| 10,060
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 14:37:41 2019
@author: supperxxxs
"""
import numpy as np
import copy
import os
import pickle
import sys
from reinforce_lib import func_approx_library as funclib
class QLearner():
# load in simulator, initialize global variables
def __init__(self,simulator,savename,**kwargs):
# make simulator global
self.simulator = simulator
# Q learn params
self.explore_val = 1
self.explore_decay = 0.99
self.num_episodes = 500
self.gamma = 1
if "gamma" in kwargs:
self.gamma = args['gamma']
if 'explore_val' in kwargs:
self.explore_val = kwargs['explore_val']
if 'explore_decay' in kwargs:
self.explore_decay = kwargs['explore_decay']
if 'num_episodes' in kwargs:
self.num_episodes = kwargs['num_episodes']
# other training variables
self.num_actions = self.simulator.action_space.n
state = self.simulator.reset()
self.state_dim = np.size(state)
self.training_reward = []
# setup memory params
self.memory_length = 10 # length of memory replay (in episodes)
self.episode_update = 1 # when to update (in episodes)
self.memory = []
if 'memory_length' in kwargs:
self.memory_length = kwargs['memory_length']
if 'episode_update' in kwargs:
self.episode_update = kwargs['episode_update']
### initialize logs ###
# create text file for training log
self.logname = 'training_logs/' + savename + '.txt'
self.reward_logname = 'reward_logs/' + savename + '.txt'
self.weight_name = 'saved_model_weights/' + savename + '.pkl'
self.model_name = 'models/' + savename + '.json'
self.init_log(self.logname)
self.init_log(self.reward_logname)
self.init_log(self.weight_name)
self.init_log(self.model_name)
##### logging functions #####
def init_log(self,logname):
# delete log if old version exists
if os.path.exists(logname):
os.remove(logname)
def update_log(self,logname,update):
if type(update) == str:
logfile = open(logname, "a")
logfile.write(update)
logfile.close()
else:
weights = []
if os.path.exists(logname):
with open(logname,'rb') as rfp:
weights = pickle.load(rfp)
weights.append(update)
with open(logname,'wb') as wfp:
pickle.dump(weights, wfp)
##### functions for creating / updating Q #####
def initialize_Q(self,**kwargs):
# default parameters for network
layer_sizes = [10,10,10,10] # two hidden layers, 10 units each, by default
activation = 'relu'
if 'layer_sizes' in kwargs:
layer_sizes = kwargs['layer_sizes']
if 'activation' in kwargs:
activation = kwargs['activation']
# default parameters for optimizer - reset by hand
loss = 'mse'
self.lr = 10**(-2)
if 'alpha' in kwargs:
self.lr = kwargs['alpha']
# input / output sizes of network
input_dim = self.state_dim
output_dim = self.num_actions
# setup network
layer_sizes.insert(0,input_dim)
layer_sizes.append(output_dim)
# setup architecture, choose cost, and setup architecture
self.model = funclib.super_setup.Setup()
self.model.choose_cost(name = 'least_squares')
self.model.choose_features(layer_sizes = layer_sizes,activation = activation)
# initialize Q
self.Q = self.model.predict
# update Q function
def update_Q(self):
# generate q_values based on most recent Q
q_vals = []
states = []
for i in range(len(self.memory)):
# get episode_data
episode_data = self.memory[i]
# loop over episode data and create input/output pairs
for j in range(len(episode_data)):
# get next sample of episode
sample = episode_data[j]
# strip sample for parts
state = sample[0]
next_state = sample[1]
action = sample[2]
reward = sample[3]
done = sample[4]
### for cartpole only - check if done, and alter reward to improve learning ###
done,reward = self.check_done(done,reward)
# compute and store q value
q = reward
if done == False:
qs = self.Q(next_state.T)
q += self.gamma*np.max(qs)
# clamp all other models to their current values for this input/output pair
q_update = self.Q(state.T).flatten()
q_update[action] = q
q_vals.append(q_update)
states.append(state.T)
# convert lists to numpy arrays for regressor
s_in = np.array(states).T
q_vals = np.array(q_vals).T
s_in = s_in[0,:,:]
# take descent step
self.model.fit(s_in,q_vals,algo = 'RMSprop',max_its = 1,alpha = self.lr,verbose = False)
# update Q based on regressor updates
self.Q = self.model.predict
##### functions for adjusting replay memory #####
# update memory - add sample to list, remove oldest samples
def update_memory(self,episode_data):
# add most recent trial data to memory
self.memory.append(episode_data)
# clip memory if it gets too long
num_episodes = len(self.memory)
if num_episodes >= self.memory_length:
num_delete = num_episodes - self.memory_length
self.memory[:num_delete] = []
##### Q Learning functionality #####
# state normalizer
def state_normalizer(self,states):
states = np.array(states)[np.newaxis,:]
return states
# choose next action
def choose_action(self,state):
# pick action at random
p = np.random.rand(1)
action = np.random.randint(self.num_actions)
# pick action based on exploiting
qs = self.Q(state.T)
if p > self.explore_val:
action = np.argmax(qs)
return action
# special function to check done
def check_done(self,done,reward):
if done == True:
reward = -100
return done,reward
# main training function
def train(self,**kwargs):
### start main Q-learning loop ###
for n in range(self.num_episodes):
# pick this episode's starting position - randomly initialize from f_system
state = self.simulator.reset()
state = self.state_normalizer(state)
total_episode_reward = 0
done = False
# get out exploit parameter for this episode
if self.explore_val > 0.01:
self.explore_val *= self.explore_decay
# run episode
step = 0
episode_data = []
while done == False and step < 500:
self.simulator.render()
# choose next action
action = self.choose_action(state)
# transition to next state, get associated reward
next_state,reward,done,info = self.simulator.step(action)
next_state = self.state_normalizer(next_state)
# store data for transition after episode ends
episode_data.append([state,next_state,action,reward,done])
# update total reward from this episode
total_episode_reward+=reward
state = copy.deepcopy(next_state)
step+=1
# update memory with this episode's data
self.update_memory(episode_data)
# update Q function
if np.mod(n,self.episode_update) == 0:
self.update_Q()
# print out update
update = 'training episode ' + str(n+1) + ' of ' + str(self.num_episodes) + ' complete, ' + ' explore val = ' + str(np.round(self.explore_val,3)) + ', episode reward = ' + str(np.round(total_episode_reward,2))
self.update_log(self.logname,update + '\n')
print (update)
update = str(total_episode_reward) + '\n'
self.update_log(self.reward_logname,update)
### store this episode's computation time and training reward history
self.training_reward.append(total_episode_reward)
# save latest weights from this episode
update = self.model.weight_history[-1]
self.update_log(self.weight_name,update)
### save weights ###
update = 'q-learning algorithm complete'
self.update_log(self.logname,update + '\n')
print (update)
# imports
import gym
# savename
savename = 'Lunar_Lander_experiment_1'
# initialize simulator
simulator = gym.make('LunarLander-v2')
# initialize Q Learn process
num_episodes = 500
explore_decay = 0.995
explore_val = 1
# initialize memory
replay_length = 100
memory_length = 1000
# load into instance of learner
demo = QLearner(simulator,savename,num_episodes=num_episodes,explore_decay=explore_decay,explore_val=explore_val,memory_length=memory_length,replay_length=replay_length)
# initialize Q function
layer_sizes = [64,64,64,64]
alpha = 10**(-3)
activation = 'relu'
demo.initialize_Q(layer_sizes=layer_sizes,alpha=alpha,activation=activation)
demo.train()
| true
|
756b1ff17d5f9801c108b4c1c8c51ee3565ffa36
|
Python
|
matali1/adventofcode
|
/day02/checksum.py
|
UTF-8
| 1,288
| 3.421875
| 3
|
[
"Unlicense"
] |
permissive
|
import click
@click.command()
@click.option('--file', help='File to get the checksum')
def provide_checksum(file):
if file:
matrix = []
with open(file) as myfile:
for line in myfile:
row = [int(val) for val in line.split("\t")]
matrix.append(row)
# print matrix
value = checksum(matrix)
print "********* Checksum is %d" % value
value = checksum_evenly_distribute(matrix)
print "********* Checksum evenly distribute is %d" % value
def checksum(matrix):
checksum = 0
for row in matrix:
value = max(row) - min(row)
checksum += value
return checksum
def find_evenly_distribute(row):
sorted_list = sorted(row)
for idx, num in enumerate(sorted_list):
for val in sorted_list[idx + 1:]:
#if modulo is 0, so they divide by each other
if (val % num) == 0:
return (num, val)
return (0, 1)
def checksum_evenly_distribute(matrix):
checksum = 0
for row in matrix:
print row
(val1, val2) = find_evenly_distribute(row)
print val1, val2
checksum += (val2 / val1)
return checksum
if __name__ == '__main__':
value = provide_checksum()
| true
|
5c9ff36d6710c334e72abc5b9b58abc8a94758bd
|
Python
|
jjspetz/digitalcrafts
|
/dict-exe/error_test.py
|
UTF-8
| 343
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
def catch_error():
while 1:
try:
x = int(input("Enter an integer: "))
except ValueError:
print("Enter an integer!")
except x == 3:
raise myError("This is not an integer!")
else:
x += 13
if __name__ == "__main__":
catch_error()
| true
|
2baeee1bd66fdba6de8af1758e90b31f58013b16
|
Python
|
w1nteRR/algoLabs
|
/Lab1/Pool.py
|
UTF-8
| 3,034
| 3.578125
| 4
|
[] |
no_license
|
import time
class Pool:
def __init__(self, address, volume, max_number):
self.address = address
self.volume = volume
self.max_number = max_number
def print_object(pool: Pool):
print("Address: " + pool.address + ", water volume: " + str(pool.volume) + ", max number: " + str(pool.max_number))
def swap(list, i, min_index):
tmp = list[i]
list[i] = list[min_index]
list[min_index] = tmp
return list
def selection(path):
file = open(path, "r")
list = []
for line in file:
string = line
str_list = string.split(",")
pool = Pool(str_list[0], str_list[1], str_list[2])
list.append(pool)
file.close()
n = len(list)
comparing_times = 0
change_times = 0
for i in range(0, n - 1):
min_index = i
for j in range(i + 1, n):
if list[j].volume > list[min_index].volume:
min_index = j
comparing_times = comparing_times + 1
list = swap(list, i, min_index)
change_times = change_times + 1
merge_time_start = time.process_time()
merge_time_stop = time.process_time()
print("Selection Sort")
print("Change number: " + str(change_times))
print("Compare number: " + str(comparing_times))
print("Sorting time: " + str((merge_time_stop - merge_time_start)))
print("Result")
for i in range(0, len(list)):
print_object(list[i])
def merge_sort(pools_list):
comparing_times = 0
change_times = 0
if len(pools_list) > 1:
mid = len(pools_list) // 2
lefthalf = pools_list[:mid]
righthalf = pools_list[mid:]
merge_sort(lefthalf)
merge_sort(righthalf)
i = 0
j = 0
k = 0
while i < len(lefthalf) and j < len(righthalf):
comparing_times = comparing_times + 1
if lefthalf[i].max_number > righthalf[j].max_number:
change_times = change_times + 1
pools_list[k] = lefthalf[i]
i = i + 1
else:
change_times = change_times + 1
pools_list[k] = righthalf[j]
j = j + 1
k = k + 1
while i < len(lefthalf):
change_times = change_times + 1
pools_list[k] = lefthalf[i]
i = i + 1
k = k + 1
while j < len(righthalf):
change_times = change_times + 1
pools_list[k] = righthalf[j]
j = j + 1
k = k + 1
selection("file.cvs")
file = open("file.cvs", "r")
pools_list = []
for line in file:
string = line
str_list = string.split(",")
pool = Pool(str_list[0], str_list[1], str_list[2])
pools_list.append(pool)
merge_time_start = time.process_time()
merge_sort(pools_list)
merge_time_stop = time.process_time()
print("Merge Sort")
print("Sorting time: " + str((merge_time_stop - merge_time_start)))
print("Result")
for i in range(0, len(pools_list)):
print_object(pools_list[i])
| true
|
2956aa2adf0422794b93d301beafe0ee9dac93f3
|
Python
|
wwllong/py-design-pattern
|
/others/Callable.py
|
UTF-8
| 338
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
def funTest(name):
print("This is test function, name", name)
if __name__ == "__main__":
print(callable(filter))
print(callable(max))
print(callable(object))
print(callable(funTest))
var = "Test"
print(callable(var))
funTest("Python")
"""
True
True
True
True
False
This is test function, name Python
"""
| true
|
e70722c2fb60ecc50b9175b21c47db2246e60898
|
Python
|
OYuZhe/some-easy-code
|
/sieve_of_Eratosthenes.py
|
UTF-8
| 468
| 3.5
| 4
|
[] |
no_license
|
MaxLen = 10000000
#init Array
primeArray = [1]*10250000
primeArray[0]=0
primeArray[1]=0
def findPrime():
#sieve of Eratosthenes
for i in range(0,3200):
if primeArray[i] == 1:
mul = 2
IsNotPrime = i * mul
while IsNotPrime <= MaxLen:
IsNotPrime = i * mul
primeArray[IsNotPrime]=0
mul += 1
def printPrime():
#print prime
for i in range(0,MaxLen):
if primeArray[i] == 1:
print(i)
if __name__ == '__main__':
findPrime()
printPrime()
| true
|
5deb4a0e5b2c9d62cae4406feb4aba1752c07a40
|
Python
|
cgarrido2412/PythonPublic
|
/Challenges/Advent Of Code/adventofcode2020/day15.py
|
UTF-8
| 1,268
| 3.609375
| 4
|
[] |
no_license
|
#! /usr/bin/env python3
import os
def memory_game(starting_sequence, n):
first_time_spoken = {}
most_recently_spoken = {}
current_index = 0
for number in starting_sequence:
current_index += 1
if number in first_time_spoken:
most_recently_spoken[number] = first_time_spoken[number]
first_time_spoken[number] = current_index
previous_number = number
while current_index < n:
current_index += 1
if previous_number in most_recently_spoken:
current = first_time_spoken[previous_number] - most_recently_spoken[previous_number]
else:
current = 0
if current in first_time_spoken:
most_recently_spoken[current] = first_time_spoken[current]
first_time_spoken[current] = current_index
previous_number = current
return previous_number
if __name__ == "__main__":
#Starting numbers are: 18,8,0,5,4,1,20
starting_numbers = open(os.getenv('HOMEDRIVE')+os.getenv('HOMEPATH')+'\Desktop\Prisma API\Documents\puzzle_input.txt').read()
start = [int(x) for x in starting_numbers.split(',')]
part_one = memory_game(start, 2020)
part_two = memory_game(start, 30000000)
print(part_one)
print(part_two)
| true
|
9f7d4c40cb58888265d447c7c3374536f39171b2
|
Python
|
OaklandPeters/task_logger
|
/task_logger/enum/enum.py
|
UTF-8
| 18,504
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
"""
@todo: Refactor data, name, aliases
--> name: getter, setter, deleter
--> aliases: getter, setter, deleter
--> data: getter only - returning [name] + aliases
@todo: Requires changing the way validation determines whether to
extract name from 'data'. I will have to carefully think about this case:
state.data = ['attempting', 'attempt', 'attempted']
#? Should this change the name? IE be the same as:
state = EnumGroup(['attempting', 'attempt', 'attempted'])
--YES-- hence,
self.data = ['a','b','c']
==
self.name, self.aliases = self._validate(data, name=name)
@todo: EnumGroupInterface: self._validate_name, self._validate_aliases
Difference between _validate(data, name=None) and _validate_data(data)
is _validate_data *assumes* it will pop off the front to make a name
_validate(data, name=None)
_validate_data(data)
_validate_aliases(aliases)
_validate_name(name)
@todo: Allow EnumGroup __init__ to accept *args, **kwargs:
EnumGroup('attempting', 'attempt', 'attempted')
@todo: Consider making (optional) hooks from EnumGroup back to it's parent.
So each group would know it's .parent, and .index
@todo: Make EnumGroup count as child of it's parent somehow (to isinstance?)
------ Future: --------
@todo: RegexEnum() + RegexEnumGroup()
@todo: support 'simplify'ing functions (like str.lower()), via a callback mechanism.
--> SmartEnum()/TransformEnum()
Should maintain a list of callbacks that it applies before comparison.
Will have to apply the callbacks on the 'name' and 'aliases' after updating callback list.
@todo: Implement this via: self._keytransform(key) - used only inside
__getitem__, __setitem__, __delitem__ (and maybe whatever sets the initial data)
Hard issue:
I would like States['attempting'] to be valid syntax.
But... this gets complicated because what if the name or alias
for a group is an integer?
(actually, this is a similar issue to atlas's)
SOLUTION: (inelegant, but workable)
(1) if isinstance(key, int):
index = key
(2) else:
index =
#check alias
(3) throw error if a name is ever an integer
"""
import collections
from abc import ABCMeta, abstractproperty, abstractmethod
#----
from local_packages import rich_collections
from local_packages import rich_core
#==============================================================================
# Interfaces
#==============================================================================
class EnumInterface(collections.MutableSequence):
__metaclass__ = ABCMeta
groups = abstractproperty(lambda self: NotImplemented)
names = abstractproperty(lambda self: NotImplemented)
aliases = abstractproperty(lambda self: NotImplemented)
_find_group = abstractmethod(lambda self, alias: NotImplemented) #returns EnumGroup()
group = abstractmethod(lambda self, alias: NotImplemented) #returns EnumGroup(). Same as _find_group
_find_all = abstractmethod(lambda self, alias: NotImplemented) #yields EnumGroup()
index = abstractmethod(lambda self, alias: NotImplemented) #returns integer
_make_group = abstractmethod(lambda self, data, name=None: NotImplemented) #constructor for groups
keys = abstractmethod(lambda: NotImplemented)
values = abstractmethod(lambda: NotImplemented)
class EnumGroupInterface(collections.Sequence):
"""
Complication:
self.data: returns self.name + self.alias
self.alias: is stored in self._data (because of inheritance from BasicMutableSequence)
@todo: Refactor data, name, aliases
--> name: getter, setter, deleter
--> aliases: getter, setter, deleter
--> data: getter only - returning [name] + aliases
"""
__metaclass__ = ABCMeta
name = abstractproperty(lambda: NotImplemented)
__eq__ = abstractmethod(lambda: NotImplemented)
#--- Name property and validation
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = self._validate_name(value)
@name.deleter
def name(self):
del self._name
def _validate_name(self, value):
if isinstance(value, int):
raise TypeError(str.format(
"{0} 'name' cannot be an integer, because it conflicts with Sequence indexes.",
self.__class__.__name__
))
return value
#--- Aliases property and validation
aliases = abstractproperty(lambda self: NotImplemented)
#--- Data property and validation
data = abstractproperty(lambda self: NotImplemented)
#==============================================================================
# Concrete Classes
#==============================================================================
class Enum(EnumInterface, rich_collections.BasicMutableSequence):
"""
A sequence of EnumGroups.
Potentially complicated 'corner-case' error:
If name of a group is an integer
Since that integer could also be the index of a different group
@todo: Consider moving the group validiation from EnumGroup._validate()
into here (Enum._validate_group())
"""
def __init__(self, data):
self.data = self._validate(data)
def _validate(self, data):
"""
"""
rich_core.AssertKlass(data,
(rich_core.NonStringSequence, collections.Mapping), name='data'
)
if isinstance(data, collections.Mapping):
return [
self._validate_group(group, name=name)
for name, group in data.items()
]
elif isinstance(data, rich_core.NonStringSequence):
return [
self._validate_group(group)
for group in data
]
else:
raise TypeError("Switch error on type of 'data' ({0}).".format(type(data)))
def _validate_group(self, group, name=None):
"""Ensure sequence is a valid group."""
return self._make_group(group, name=name)
def _make_group(self, data, name=None):
"""Constructor for a single group."""
return EnumGroup(data, name=name)
@property
def groups(self):
"""Alias to self.data property."""
return self.data
@groups.setter
def groups(self, value):
"""Alias to self.data property setter."""
self.data = value
@groups.deleter
def groups(self):
"""Alias to self.data property deleter."""
del self.data
@property
def names(self):
return [group.name for group in self]
@property
def aliases(self):
return [group.aliases for group in self]
#--------------------------------------------------------------------------
# Problematic Item Getter/Setter functions
#--------------------------------------------------------------------------
#__getitem__, __setitem__, __delitem__
#... these are best (partly) resolved, by first, resolving keys/aliases to
#an index number, then calling the normal __getitem__ functions. Example:
#def __getitem__(self, key):
# index = self.index(key)
# return self.data[index]
def _resolve_index(self, key):
if isinstance(key, int):
return key
else:
return self.index(key)
def __getitem__(self, key):
return self.data[self._resolve_index(key)]
def __setitem__(self, key, value):
self.data[self._resolve_index(key)]
def __delitem__(self, key):
self.data[self._resolve_index(key)]
def _find_group(self, alias):
"""Find first group matching alias.
Primarily, this matches on name, but allows for the
possibility of groups which accept alternatives."""
return self._find_all(alias).next()
def _find_all(self, alias):
"""Find all groups matching alias. Iterator."""
for group in self:
if alias in group:
yield group
def group(self, alias):
"""Find first group matching alias (~name)."""
return self._find_group(alias)
def keys(self):
return self.names
def values(self):
return [elm for group in self.groups for elm in group]
def items(self):
return [(group.name, group.aliases) for group in self.groups]
def index(self, alias):
"""Find index for alias."""
for i, group in enumerate(self):
if alias in group:
return i
raise ValueError("alias not contained in any group.")
def __eq__(self, other):
return (self.groups == other)
class EnumGroup(rich_collections.BasicMutableSequence, EnumGroupInterface):
"""
Organization of data, names, aliases is confusing:
@todo: Refactor data, name, aliases
--> name: getter, setter, deleter
--> aliases: getter, setter, deleter
--> data: getter only - returning [name] + aliases
@todo: Requires changing the way validation determines whether to
extract name from 'data'. I will have to carefully think about this case:
state.data = ['attempting', 'attempt', 'attempted']
#? Should this change the name? IE be the same as:
state = EnumGroup(['attempting', 'attempt', 'attempted'])
--YES-- hence,
self.data = ['a','b','c']
==
self.name, self.aliases = self._validate(data, name=name)
"""
def __init__(self, data, name=None):
"""Initialize groups contained in
If name == None, then first entry of data is used as name.
"""
#self.name, self.aliases = self._validate(data, name=name)
self.name, self.data = self._validate(data, name=name)
def _validate(self, data, name=None):
data = _ensure_list(data)
rich_core.AssertKlass(data, collections.MutableSequence, name='data')
# If name misisng, using the first entry in data
if name == None:
assert(len(data) > 0), "If 'name' not provided, data must be non-empty."
name = data.pop(0)
else:
name = name
# If name exists in data, remove it.
try:
data.pop(data.index(name))
except ValueError:
pass
return name, data
def _validate_data(self, data, name=None):
"""
Difference between _validate(data, name=None) and _validate_data(data)
is _validate_data *assumes* it will pop off the front to make a name
"""
#[] Process name - if not provided, extract from data
#[] Assign remaining to aliases
return self._validate_name(name), self._validate_aliases(aliases)
#Special case for delitem: if length == 1, ie consists only of name
@property
def data(self):
return [self._name] + self._data
@data.setter
def data(self, value):
self._data = value
@data.deleter
def data(self):
del self._data
@property
def aliases(self):
return self._data
class BasicEnumGroup(EnumGroupInterface, rich_collections.BasicSequence):
"""Very simple. Only consists of name."""
#def __init__(self, data, name=None):
def __init__(self, *data, **kwargs):
"""name is an unused keyword in BasicEnumGroup."""
self.data = data
def __eq__(self, other):
return (self.data == other)
#@todo: self._validate_name, self._validate_aliases
#-- Getters
@property
def data(self):
return tuple([self.name]) + self.aliases
@data.setter
def data(self, value):
self.name, self.aliases = self._validate_data(value)
@data.deleter
def data(self):
del self.name
del self.aliases
def _validate_data(self, data):
rich_core.AssertKlass(data, rich_core.NonStringSequence, name='data')
assert(len(data) == 1), "'data' must be length 1."
return data[0], tuple() #name, aliases
@property
def aliases(self):
return self._aliases
@aliases.setter
def aliases(self, value):
self._aliases = self._validate_aliases(value)
@aliases.deleter
def aliases(self):
del self._aliases
def _validate_aliases(self, aliases):
rich_core.AssertKlass(aliases, rich_core.NonStringSequence, name='aliases')
return aliases
#------------------------------------------------------------------------------
# Local Utility Functions
#------------------------------------------------------------------------------
def _ensure_list(value):
"""Return value as a list; if not a NonStringSequence, wraps in a list."""
if isinstance(value, list):
return value
elif isinstance(value, rich_core.NonStringSequence):
return list(value)
else:
return [value]
import unittest
import operator
class BasicEnumGroupTests(unittest.TestCase):
constructor = BasicEnumGroup
def setUp(self):
self.g1a = self.constructor('attempting')
self.g1b = self.constructor('attempting')
self.g1c = self.constructor('attempting')
self.g2 = self.constructor('attempt')
def test_group_equality(self):
self.assertEqual(self.g1a, self.g1b)
self.assertEqual(self.g1a, self.g1c)
self.assertEqual(self.g1b, self.g1c)
self.assertNotEqual(self.g1a, self.g2)
def test_contains(self):
self.assert_(
'attempt' not in self.g1a
)
self.assert_(
'attempt' in self.g2
)
self.assert_(
'non' not in self.g1a
)
def test_name(self):
self.assertEqual(self.g1a.name, 'attempting')
self.assertEqual(self.g1b.name, 'attempting')
self.assertEqual(self.g2.name, 'attempt')
def test_string_comparison(self):
pass
def test_errors(self):
self.assertRaises(TypeError, lambda: self.g1a['attempting'])
self.assertRaises(IndexError, lambda: self.g1b[1])
self.assertRaises(TypeError, lambda: operator.delitem(self.g1c, 0) )
class EnumGroupTests(unittest.TestCase):
constructor = EnumGroup
def setUp(self):
self.g1a = self.constructor(('attempting',))
self.g1b = self.constructor(['attempting'])
self.g1c = self.constructor('attempting',)
self.g2 = self.constructor('attempt',)
def test_group_equality(self):
self.assert_(self.g1a == self.g1b)
self.assert_(self.g1a == self.g1c)
self.assert_(self.g1b == self.g1c)
self.assert_(self.g1a != self.g2)
def test_contains(self):
self.assert_(
'attempt' not in self.g1a
)
self.assert_(
'attempt' in self.g2
)
self.assert_(
'non' not in self.g1a
)
def test_name(self):
self.assertEqual(self.g1a.name, 'attempting')
self.assertEqual(self.g1b.name, 'attempting')
self.assertEqual(self.g2.name, 'attempt')
def test_string_comparison(self):
pass
def test_errors(self):
self.assertRaises(TypeError, lambda: self.g1a['attempting'])
self.assertRaises(IndexError, lambda: self.g1b[1])
class EnumTests(unittest.TestCase):
def setUp(self):
self.States = Enum((
('attempting', 'attempt','attempted'),
('errored', 'error'),
('completed', 'complete')
))
self.expected = [
['attempting', 'attempt','attempted'],
['errored', 'error'],
['completed', 'complete']
]
self.groups = [
EnumGroup(['attempting', 'attempt','attempted']),
EnumGroup(['errored', 'error']),
EnumGroup(['completed', 'complete'])
]
def test_getitem_repr(self):
state = self.States['attempting']
self.assert_(isinstance(state, EnumGroup),
"Is not instance of EnumGroup"
)
self.assertEqual(
repr(state),
"['attempting', 'attempt', 'attempted']",
"States does not match repr of expectation."
)
def test_iter(self):
names = ['attempting','errored','completed']
for state, name, expected in zip(self.States,self.States.names, names):
self.assert_(
state.name == name == expected,
"Inequality among: {0}, {1}, {2}".format(
state.name, name, expected
)
)
def test_group_comparison(self):
#For Enum
self.assertEqual(
self.States,
self.groups,
"States does not match expected groups"
)
#Group comparison
self.assertEqual(
self.States['attempting'],
EnumGroup(['attempting', 'attempt','attempted']),
"group comparison"
)
def test_string_comparison(self):
#For Enum collection
self.assertEqual(
self.States,
self.expected,
"Enum() does not match expectation"
)
#For single groups
compared = all(
(exp == state)
for exp, state in zip(self.expected, self.States)
)
self.assert_(compared, "Iterator failed.")
self.assertEqual(
list(elm for elm in self.States),
self.expected,
"Groups from iter do not match expectations."
)
#def test_group_containment(self):
# States['attempting'] in States
#def test_string_containment(self):
# 'attempting' in States
# def test_corner_case(self):
# Problems = Enum([
# (2,),
# (1,),
# ('c',)
# ])
# Problems['2']
# print(Problems[1])
# import pdb
# pdb.set_trace()
# print(Problems[1])
if __name__ == "__main__":
unittest.main()
| true
|
faf9b6c43594848415bcf914e117c63b073caec9
|
Python
|
CollinErickson/LeetCode
|
/Python/9_palindrome.py
|
UTF-8
| 411
| 3.421875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 23:45:29 2017
@author: cbe117
"""
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
s = list(str(abs(x)))
s.reverse()
return abs(x) == int(''.join(s))
sol = Solution()
print sol.isPalindrome(1234)
print sol.isPalindrome(1234321)
print sol.isPalindrome(-2147483648)
| true
|
bd5b45792d360bf1e28ec9ea95f6f684dd3a36c0
|
Python
|
abeagomez/nonograms_solver
|
/solution.py
|
UTF-8
| 5,973
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
from game import Game
from bitset import Bitset
import numpy as np
import random as rd
from generator import generate_game
class LocalSearchSolution:
def __init__(self, game: Game, initial: str = 'random', next: str = 'random'):
self.game = game
self.lists = []
self.generate_initial(initial)
self.board_updated = False
self._board = None
self.max_random_generations = 10
self.old_state = None
self.last_changed_row = 0
self.next_type = next
@property
def board(self):
if self._board and self.board_updated:
return self._board
return self.generate_board()
def leftmost(self):
solution = []
# Go through the horizontals
for line in self.game.lists[0]:
s = sum(line)
rem = self.game.columns - s
l = np.ones(len(line) + 1, int)
rem -= (len(line) - 1)
l[0] = 0
l[-1] = 0
l[-1] += rem
solution.append(l)
self.lists = solution
def rightmost(self):
solution = []
# Go through the horizontals
for line in self.game.lists[0]:
s = sum(line)
rem = self.game.columns - s
l = np.ones(len(line) + 1, int)
rem -= (len(line) - 1)
l[0] = 0
l[-1] = 0
l[0] += rem
solution.append(l)
self.lists = solution
def generate_board(self):
game = self.game
solution = self.lists
board = Bitset(game.rows, game.columns)
for i, line in enumerate(solution):
game_line = game.lists[0][i]
sol_line = solution[i]
idx = 0
for k in range(len(sol_line) - 1):
idx += sol_line[k]
for j in range(game_line[k]):
board[i, idx] = True
idx += 1
self._board = board
self.board_updated = True
return board
def generate_row(self, line: np.ndarray):
s = sum(line)
rem = self.game.columns - s
l = np.ones(len(line) + 1, int)
rem -= (len(line) - 1)
l[0] = 0
l[-1] = 0
while rem > 0:
idx = rd.randint(0, len(l) - 1)
l[idx] += 1
rem -= 1
return l
def generate_initial(self, kind: str):
if kind == 'leftmost':
self.leftmost()
elif kind == 'rightmost':
self.rightmost()
else:
game = self.game
solution = []
# Go through the horizontals
for line in game.lists[0]:
solution.append(self.generate_row(line))
self.lists = solution
def generate_random_next(self):
self.board_updated = False
i = rd.randint(0, len(self.lists) - 1)
solution = self.lists
game = self.game
ct = 0
while np.all(np.array(game.lists[0][i]) <= 1):
i = rd.randint(0, len(solution) - 1)
if ct == self.max_random_generations: return False
ct += 1
while True:
l = self.generate_row(game.lists[0][i])
b = l == solution[i]
if not np.all(b):
self.old_state = i, solution[i]
solution[i] = l
return True
if ct == self.max_random_generations: return False
i = rd.randint(0, len(solution) - 1)
ct += 1
def generate_iterative_next(self):
self.board_updated = False
game = self.game
solution = self.lists
i = self.last_changed_row
initial_i = i
while np.all(np.array(game.lists[0][i]) <= 1):
self.last_changed_row = (self.last_changed_row + 1) % self.game.columns
i = self.last_changed_row
if i == initial_i: return False
while True:
l = self.generate_row(game.lists[0][i])
b = l == solution[i]
if not np.all(b):
self.old_state = i, solution[i]
solution[i] = l
self.last_changed_row = (self.last_changed_row + 1) % self.game.columns
return True
self.last_changed_row = (self.last_changed_row + 1) % self.game.columns
i = self.last_changed_row
if i == initial_i: return False
def next(self):
if self.next_type == 'iterative':
return self.generate_iterative_next()
else:
return self.generate_random_next()
def go_back(self):
if self.old_state:
self.lists[self.old_state[0]] = self.old_state[1]
self.old_state = None
self.board_updated = False
def eval(self):
_, v = generate_game(self.board)
k = self.game.lists[1]
error = 0
for i in range(len(v)):
line = v[i]
correct = k[i]
error += abs(len(line) - len(correct))
for j in range(min(len(line), len(correct))):
if line[j] != correct[j]: error += 1
return error
if __name__ == '__main__':
from generator import generate_board
board = generate_board(5, 5)
game = Game(board)
sol = LocalSearchSolution(game)
lsol = LocalSearchSolution(game, initial='leftmost')
rsol = LocalSearchSolution(game, initial='rightmost')
print('leftmost', lsol.board, sep='\n')
print('rightmost', rsol.board, sep='\n')
board2 = sol.board
game.print()
print(board, board2, sep='\n\n')
print(game.check_horizontal(board2))
print(game.check_vertical(board2), '\n')
sol.next()
print(sol.board, '\n')
sol.next()
print(sol.board, '\n')
sol.next()
print(sol.board, '\n')
sol.go_back()
print(sol.board, '\n')
sol.next()
print(sol.board, '\n')
sol.next()
print(sol.board, '\n')
print(sol.eval())
| true
|
b45b7d4cf718eec5976fafc045b8fea6ff04ee42
|
Python
|
joshuastay/Login-Data-Storage
|
/Password.py
|
UTF-8
| 10,838
| 2.859375
| 3
|
[] |
no_license
|
import tkinter as tk
import tkinter.ttk as ttk
import pickle
import os
# global variables
frame = tk.Tk()
user = "Username"
passw = "Password"
open_labs = open("label.data", "rb")
open_users = open("user.data", "rb")
open_passwords = open("password.data", "rb")
top_button_pos = [125, 280]
top_back_color = "#80bfff"
add_back_color = "Teal"
edit_back_color = "Light Blue"
head_font_1 = "Arial, 35"
head_font_2 = "Arial, 20"
user_label_text = "Username: "
pass_label_text = "Password: "
add_edit_status = "normal"
add_edit_size = "400x400"
add_edit_field_pos = [200, 112]
#Login Class, main operator in program
class Login:
'''
Login class, stores usernames, passwords and a label
organized by a common index
relies on all aspects to be insterted into the lists in the same order
'''
def __init__(self):
self.users = []
self.passes = []
self.labels = []
def get_user(self, ind):
v = self.labels.index(ind)
return self.users[v]
def get_pass(self, ind):
v = self.labels.index(ind)
return self.passes[v]
def get_lab(self, ind):
v = self.labels.index(ind)
return self.labels[v]
def get_ind(self, index):
v = self.labels.index(index)
return v
def add_login(self, label, user, passw ):
self.users.append(user)
self.passes.append(passw)
self.labels.append(label)
def pull_lab(self):
l_lst = list(self.labels)
return l_lst
def update_login(self, place, upd_lab, upd_use, upd_pas):
self.labels[place] = upd_lab
self.users[place] = upd_use
self.passes[place] = upd_pas
def get_lab_list(self):
v = self.labels
return v
def get_user_list(self):
v = self.users
return v
def get_pass_list(self):
v = self.passes
return v
def upload_lab(self, labs):
self.labels = labs
def upload_use(self, user):
self.users = user
def upload_pas(self, pas):
self.passes = pas
def delete_login(self, ind):
self.labels.pop(ind)
self.users.pop(ind)
self.passes.pop(ind)
def get_entry_ind(self, ind):
return self.labels[ind]
#Label class, easy setup of multiple labels
class Prgrm_labels(tk.Label):
'''
Takes a location, content and formatting to make custom labels in frame
'''
def __init__(self, parent, posx , posy, content, font, color = "Light Gray", font_color = "Black"):
self.text_content = content
tk.Label.__init__(self, parent)
self.config(text = self.text_content, font = font, background = color, fg = font_color)
self.place(x = posx, y = posy)
#Combobox for list of login labels
class Pass_list(ttk.Combobox):
'''
pulls values from Login class label list
'''
def __init__(self, parent):
self.current_table = tk.StringVar(frame, "Select desired info")
ttk.Combobox.__init__(self, parent)
self.config(textvariable=self.current_table, state="readonly",
postcommand = self.check_vals)
self.place(x=150, y=110, anchor = "w")
def get_current(self):
return self.get()
def check_vals(self):
self.config(values = new_log.pull_lab())
def start_over(self):
self.set(new_log.get_entry_ind(0))
#Field class, easy setup of multiple fields
class Field_entry(tk.Entry):
'''
Takes positional arguments, default displayed text and read/write only
'''
def __init__(self, parent, posx, posy, status, default):
tk.Entry.__init__(self, parent)
self.def_entry = tk.StringVar(frame, default)
self.config(textvariable = self.def_entry, state = status, width = 30)
self.place(x = posx, y = posy)
def re_fill(self, new):
self.config(textvariable = new)
def clear_entry(self):
self.config(textvariable = "")
#Button class for creation of multiple buttons
class New_button(tk.Button):
'''
Takes positional arguments and a label
'''
def __init__(self, parent, posx, posy, label):
tk.Button.__init__(self, parent)
self.config(text = label, width = 25)
self.place(x = posx, y = posy)
#Return login function on update of combobox
def ret_log(event):
'''
takes global user and passw varibles and assigns them to corresponding list entry
'''
global user, passw, user_field, pass_field, cmb_login
user = tk.StringVar(frame, new_log.get_user(cmb_login.get_current()))
user_field.re_fill(user)
passw = tk.StringVar(frame, new_log.get_pass(cmb_login.get_current()))
pass_field.re_fill(passw)
#Add login function, lets you add an entry to Login class
def add_login(event):
'''
Opens new window, with several fields, labels and a button to add to login class
'''
add_win = tk.Toplevel(frame)
add_win.title("Add Login Info")
add_win.geometry(add_edit_size)
add_win.config(background = add_back_color)
add_new_button = New_button(add_win, 110, 275, "Add")
add_new_title = Prgrm_labels(add_win, 50, 15, "Add Login Info", head_font_1, add_back_color)
add_new_label = Prgrm_labels(add_win, 25, 100, "Login Label: ", head_font_2, add_back_color)
add_user_label = Prgrm_labels(add_win, 40, 150, user_label_text, head_font_2, add_back_color)
add_pass_label = Prgrm_labels(add_win, 45, 200, pass_label_text, head_font_2, add_back_color)
new_l_field = Field_entry(add_win, add_edit_field_pos[0], add_edit_field_pos[1], add_edit_status, "")
new_u_field = Field_entry(add_win, add_edit_field_pos[0], add_edit_field_pos[1] + 50, add_edit_status, "")
new_p_field = Field_entry(add_win, add_edit_field_pos[0], add_edit_field_pos[1] + 100, add_edit_status, "")
#button handler for button press event, calls login class to add field entries
def button_handler(event):
new_log.add_login(new_l_field.get(), new_u_field.get(), new_p_field.get())
add_win.destroy()
add_new_button.bind("<Button-1>", button_handler)
#Edit login function, opens window to edit current selection
def edit_login(event):
'''
opens a new window, with several text fields to update current combobox selection
'''
edit_win = tk.Toplevel(frame)
edit_win.title("Edit Login Info")
edit_win.geometry(add_edit_size)
edit_win.config(background = edit_back_color)
edit_new_button = New_button(edit_win, 110, 275, "Save Changes")
edit_new_title = Prgrm_labels(edit_win, 50, 15, "Edit Login Info", head_font_1, edit_back_color)
edit_new_label = Prgrm_labels(edit_win, 25, 100, "Login Label: ", head_font_2, edit_back_color)
edit_user_label = Prgrm_labels(edit_win, 40, 150, user_label_text, head_font_2, edit_back_color)
edit_pass_label = Prgrm_labels(edit_win, 45, 200, pass_label_text, head_font_2, edit_back_color)
edit_l_field = Field_entry(edit_win, add_edit_field_pos[0], add_edit_field_pos[1],
add_edit_status, new_log.get_lab(cmb_login.get_current()))
edit_u_field = Field_entry(edit_win, add_edit_field_pos[0], add_edit_field_pos[1] + 50,
add_edit_status, new_log.get_user(cmb_login.get_current()))
edit_p_field = Field_entry(edit_win, add_edit_field_pos[0], add_edit_field_pos[1] + 100,
add_edit_status, new_log.get_pass(cmb_login.get_current()))
#edit button even handler, calls login class to update the selected entry
def edit_button_handler(event):
new_log.update_login(new_log.get_ind(cmb_login.get_current()), edit_l_field.get(),
edit_u_field.get(), edit_p_field.get())
edit_win.destroy()
edit_new_button.bind("<Button-1>", edit_button_handler)
#Function to load pickled files
def load_info():
#intiates lists
load_labels = []
load_users = []
load_passwords = []
#populates lists with opened pickle files
if os.path.getsize("label.data") > 0:
load_labels = list(pickle.load(open_labs))
if os.path.getsize("user.data") > 0:
load_users = list(pickle.load(open_users))
if os.path.getsize("password.data") > 0:
load_passwords = list(pickle.load(open_passwords))
#calls Login class to upload lists
new_log.upload_lab(load_labels)
new_log.upload_use(load_users)
new_log.upload_pas(load_passwords)
#Function to save data into pickled files
def save_info():
#pulls lists from Login class
add_lab_list = new_log.get_lab_list()
add_use_list = new_log.get_user_list()
add_pas_list = new_log.get_pass_list()
#dumps lists into pickled files
pickle.dump(add_lab_list, pick_labs)
pickle.dump(add_use_list, pick_users)
pickle.dump(add_pas_list, pick_passwords)
#Delete info function, called from delete button
def delete_info(event):
'''
Deletes current selection from Login class
updates combobox and text fields after deletion
'''
global user, passw, user_field, pass_field, cmb_login
new_log.delete_login(new_log.get_ind(cmb_login.get_current()))
cmb_login.start_over()
user = tk.StringVar(frame, new_log.get_user(cmb_login.get_current()))
user_field.re_fill(user)
passw = tk.StringVar(frame, new_log.get_pass(cmb_login.get_current()))
pass_field.re_fill(passw)
new_log = Login()
load_info()
cmb_login = Pass_list(frame)
title = Prgrm_labels(frame, 25, 15, "Password Storage", head_font_1, top_back_color)
user_label = Prgrm_labels(frame, 25, 150, user_label_text, head_font_2, top_back_color)
pass_label = Prgrm_labels(frame, 30, 200, pass_label_text, head_font_2, top_back_color)
add_button = New_button(frame, top_button_pos[0], top_button_pos[1] + 40, "Add Login")
edit_button = New_button(frame, top_button_pos[0], top_button_pos[1], "Edit Current Selection")
delete_button = New_button(frame, top_button_pos[0], top_button_pos[1] + 80, "Delete Current Selection")
user_field = Field_entry(frame, 200, 161, "readonly", user)
pass_field = Field_entry(frame, 200, 211, "readonly", passw)
cmb_login.bind("<<ComboboxSelected>>", ret_log)
add_button.bind("<Button-1>", add_login)
edit_button.bind("<Button-1>", edit_login)
delete_button.bind("<Button-1>", delete_info)
frame.title("Password Reference")
frame.geometry("450x450")
frame.config(background = top_back_color)
frame.mainloop()
pick_labs = open("label.data", "wb")
pick_users = open("user.data", "wb")
pick_passwords = open("password.data", "wb")
save_info()
pick_labs.close()
pick_users.close()
pick_passwords.close()
| true
|
8fcddaeb5e47c0c05a89b146e91277cc7f427ca8
|
Python
|
coronafighter/coronaSEIR
|
/deaths_per_capita.py
|
UTF-8
| 3,338
| 2.9375
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import population
import world_data
countries, provinces = world_data.get_countries_provinces()
countryPopulation = population.get_all_population_data()
countries.extend(['Hubei'])
# todo: single loop, cleanup
countryDeaths = []
for country in countries:
try:
if countryPopulation[country] < 1000000:
continue
province = 'all'
country2 = country
if country == 'Hubei':
country2 = 'China'
province = 'Hubei'
XCDR_data = np.array(world_data.get_country_xcdr(country2, province=province,
returnDates=True))
cases = int(XCDR_data[-1, 1]) # last row, third column
deaths = int(XCDR_data[-1, 2]) # last row, third column
deathDelta = int(XCDR_data[-1, 2] - XCDR_data[-8, 2])
if deaths < 10:
continue
recovered = int(XCDR_data[-1, 3]) # last row, third column
date = XCDR_data[-1, 0]
countryDeaths.append((country, cases, deaths, recovered, date, deathDelta))
except Exception as e:
print("fail: ", country, sys.exc_info()[0], e)
countryDeathsPC = []
countryDeathsDeltaPC = []
for ccdrd in countryDeaths:
country, cases, deaths, recovered, date, deathDelta = ccdrd
try:
pop = population.get_population(country)
countryDeathsPC.append((country, deaths * 1.0e6 / pop, deaths, pop, date))
countryDeathsDeltaPC.append((country, deathDelta * 1.0e6 / pop, deathDelta, pop, date))
#countryDeathrate.append((country, 100.0 * deaths / cases, deaths, pop))
except KeyError:
print("fail: ", country)
print()
countryDeathsPC = sorted(countryDeathsPC, key = lambda x: x[1]) # sort by second subitem
countryDeathsPC.reverse() # in place
countryDeathsDeltaPC = sorted(countryDeathsDeltaPC, key = lambda x: x[1]) # sort by second subitem
countryDeathsDeltaPC.reverse() # in place
dCountryDeathsPCXY = {}
for country, trash, trash, trash, trash in countryDeathsPC[0:20]:
province = 'all'
country2 = country
if country == 'Hubei':
country2 = 'China'
province = 'Hubei'
XCDR_data = np.array(world_data.get_country_xcdr(country2, province=province, returnDates=True))
pop = population.get_population(country)
#Y = 100.0 * XCDR_data[:,2] / XCDR_data[:,1]
Y = XCDR_data[:,2] / pop * 1.0e6
dCountryDeathsPCXY[country] = (XCDR_data[:,0], Y)
fig = plt.figure(dpi=75, figsize=(20,16))
ax = fig.add_subplot(111)
#ax.set_yscale("log", nonposy='clip')
for country in dCountryDeathsPCXY:
ax.plot(dCountryDeathsPCXY[country][0], dCountryDeathsPCXY[country][1],
alpha=0.5, lw=2, label=country)
legend = ax.legend(title='deaths per 1M capita (beta)')
print()
print('beta, there might be bugs')
print('current deaths per capita')
for country, deathsPC, deaths, pop, date in countryDeathsPC[0:20]:
print("%-15s" % country, ': %10.1f %5d %10d %s' % (deathsPC, deaths, pop, date.strftime("%Y-%m-%d")))
print()
print('new deaths per capita per week')
for country, deathsDeltaPC, deathsDelta, pop, date in countryDeathsDeltaPC[0:20]:
print("%-15s" % country, ': %10.1f %5d %10d %s' % (deathsDeltaPC, deathsDelta, pop, date.strftime("%Y-%m-%d")))
plt.show()
| true
|
38fc5f84127a8546ec308ca7e97d2d0e11a29522
|
Python
|
AmanoTeam/amanobot
|
/examples/chat/countera.py
|
UTF-8
| 960
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
import sys
import asyncio
import amanobot
from amanobot.aio.loop import MessageLoop
from amanobot.aio.delegate import per_chat_id, create_open, pave_event_space
"""
$ python3 countera.py <token>
Counts number of messages a user has sent. Starts over if silent for 10 seconds.
Illustrates the basic usage of `DelegateBot` and `ChatHandler`.
"""
class MessageCounter(amanobot.aio.helper.ChatHandler):
def __init__(self, *args, **kwargs):
super(MessageCounter, self).__init__(*args, **kwargs)
self._count = 0
async def on_chat_message(self, msg):
self._count += 1
await self.sender.sendMessage(self._count)
TOKEN = sys.argv[1] # get token from command-line
bot = amanobot.aio.DelegatorBot(TOKEN, [
pave_event_space()(
per_chat_id(), create_open, MessageCounter, timeout=10),
])
loop = asyncio.get_event_loop()
loop.create_task(MessageLoop(bot).run_forever())
print('Listening ...')
loop.run_forever()
| true
|
8db7fccd94c8cfa0b8142f6cc74dc4902f5320d5
|
Python
|
Porkenstein/EurogameAI
|
/rl_mcs/explorer.py
|
WINDOWS-1252
| 9,374
| 2.828125
| 3
|
[] |
no_license
|
# Wrapper for PyBrain meant for Puerto Rico simulation
# http://pybrain.org/docs/tutorial/reinforcement-learning.html
#
# Based on examples from:
# http://simontechblog.blogspot.com/2010/08/pybrain-reinforcement-learning-tutorial_15.html
# and the given simons_blackjack_example.py
#
# actionvaluenetwork
#
from pybrain.rl import *
from pybrain.rl.environments import *
from pybrain.rl.environments.task import *
from numpy import *
from scipy import *
import sys
NUM_STATES = 22
GAME_STATES_LENGTH = ( 2 ** NUM_STATES ) # the number of possible states (2^NUM_STATES) if too large, further the level of abstraction in the game state
ENV_INDIMS = [6, 24, 7, 6, 5, 5, 30, 2, 2, 2] # see ai_controller.py
DBGOUT = sys.stdout
#class PuertoRicoExplorer():
# """Contains all environments,tasks,and experiments necessary to build policy
# while simeltaneously playing a game with said policy. Represents a single player.
# """
#
# def __init__( self, ai, agent ):
# """ai is the 10-dimensional set of sets of ai weights loaded from the pickle file.
# agent is the PyBrain RL agent used.
# """
# self.ai = ai
#
#
# # create the game object
# game = Game(3, 0, ai)
#
# def gameTurn( self ):
# """do one turn of the game.
# """
# self.game.game_turn()
class PlayerState():
#contains all scoring data for a player
def __init__(self, playernum):
self.playernum = playernum
# in order to calculate the reward, we need to store parts of the board state which are desireable
self.last_goods = 0 # the combined values of all goods owned by the player from the last reward
self.last_vp = 0 # the last held amount of victory points from the last reward
self.last_gold = 2 # the last held amount of doubloons from the last reward
self.last_pvp = 0 # the last held amount of potential victory points from the last reward (vp giving buildings)
self.last_disc = 0 # the last amount of goods discarded in the captain phase
self.cur_goods = 0 # from the last observation
self.cur_vp = 0
self.cur_gold = 2
self.cur_pvp = 0
self.cur_disc = 0
# only update these if the game has ended:
self.firstplace = 0
self.lastplace = 0
def updatePlayerstate(self, boardstate):
# given the player's board state, update its scoring state
self.last_goods = self.cur_goods # the combined values of all goods owned by the player from the last reward
self.last_vp = self.cur_vp # the last held amount of victory points from the last reward
self.last_gold = self.cur_gold # the last held amount of doubloons from the last reward
self.last_pvp = self.cur_pvp # the last held amount of potential victory points from the last reward (vp giving buildings)
self.last_disc = self.cur_disc # the last amount of goods discarded in the captain phase
[ self.cur_goods, self.cur_vp, self.cur_gold, self.cur_pvp, self.cur_disc, self.firstplace, self.lastplace ] = boardstate
def getReward(self):
# Rewards:
# +1 for new goods
# +1 for new doubloons
# +1 for new VP
# +1 for new potential VP
# -1 for throwing away crops
# +5 for game ended, and in first place
# -5 for game ended, and in last place
max_reward = 10
new_goods = max(0, self.cur_goods - self.last_goods)
new_vp = max(0, self.cur_vp - self.last_vp)
new_gold = max(0, self.cur_gold - self.last_gold)
new_pvp = max(0, self.cur_pvp - self.last_pvp)
new_disc = max(0, self.cur_disc - self.last_disc)
#DBGOUT.write("\n" + str(new_goods) + " " + str(new_gold) + " " + str(new_vp) + " " + str(new_pvp) + " " + str(10 * self.firstplace) + " " + str(new_disc + (10 * self.lastplace)) + "\n")
reward = (new_goods + new_gold + new_vp + new_pvp + (5 * self.firstplace) - (new_disc + (5 * self.lastplace)))
reward = reward / max_reward # magnitude reduction
return reward
class PuertoRicoTask(Task):
"""Represents as single transaction between an agent and the board, affecting
the board state and changing the agent's gameplay policy by returning a reward."""
def __init__(self, environment):
""" Create the task and couple it to the puerto rico board. """
self.env = environment
self.playerstate = None # throw an error if we try to perform action before setting the player state
# we will store the last reward given, remember that "r" in the Q learning formula is the one from the last interaction, not the one given for the current interaction!
self.last_reward = 0
def setPlayerState(self, playerstate):
# sets the reference to the object which holds all of the current player's scoring data
self.playerstate = playerstate
def performAction(self, action):
""" A filtered mapping towards performAction on the board. """
self.env.performAction(action)
def getObservation(self):
""" A filtered mapping to getSample of the board. """
rewardstate = []
rewardstate.append(sum(self.env.game.goods[self.playerstate.playernum])) # the number of goods
rewardstate.append(self.env.game.victory_points[self.playerstate.playernum]) # the number of victory points
rewardstate.append(self.env.game.gold[self.playerstate.playernum]) # the amount of gold
rewardstate.append(self.env.game.get_end_game_vp_bonus( self.playerstate.playernum )) # the current potential vp (in level 4 buildings)
rewardstate.append(self.env.game.discards[self.playerstate.playernum]) # the number of discarded goods
rewardstate.append(int(self.env.game.winner == self.playerstate.playernum))
rewardstate.append(int(self.env.game.loser == self.playerstate.playernum))
self.playerstate.updatePlayerstate(rewardstate)
sensors = self.env.getSensors()
return sensors
def getReward(self):
""" Compute and return the "current" reward, corresponding to the last action performed """
reward = self.playerstate.getReward()
# retrieve last reward, and save current given reward
cur_reward = self.last_reward
self.last_reward = reward
return cur_reward
@property
def indim(self):
return self.env.indim
@property
def outdim(self):
return self.env.outdim
# 2 implement your own derived class of Environment
class PuertoRicoEnvironment(Environment):
""" An implementation of the Puerto Rico Board. """
# the number of action values the environment accepts.
# should be changed with setIndim to reflect the possible
# choices which can be taken. varies with each decision.
indim = None
# the dimensionality of the board state (or the number of states? Damnit pybrain)
outdim = 2147483648 # same for all decisions
# the associated game
game = None
# which player I am
playernum = None
# which choices am I making
choice_idx = None
# the last decision I made
lastdecision = None
def setIndim( self, indim ):
"""
:key indim: the number of action values the environment accepts.
:type indim: an unsigned int
"""
self.indim = indim
return
def setGame(self, game):
""" couple an initialized game to this environment. The game is the underlying environment
which unifies all Pybrain RL Environments in the simulation.
:key game: a game simulation of puerto rico to associate with the environment.
:type game: a Game object from game.py
"""
self.game = game
return
def setPlayer(self, idx):
self.playernum = idx
def setChoiceIdx(self, idx): # this environment makes this choice
self.choice_idx = idx
def getSensors(self):
""" return the board state.
:rtype: a numpy array of doubles corresponding to get_game_state in game.py
"""
game_state = self.game.get_game_state(self.playernum)
game_state_value = 0
for i in range(0, len(game_state)): # use the state values as bits
game_state_value += (2 ** i)
return [game_state_value,] # returned as a double array
#return game_state # returned as a double array
def performAction(self, action):
""" perform a move on the board that changes the board state.
:key action: the move performed up the board. The board will modify itself as a side-affect.
"""
self.lastdecision = int(action[0]) # so the Player object knows how to affect the board state
return
def reset(self):
""" Most environments will implement this optional method that allows for reinitialization.
"""
# which choices am I making
choice_idx = None
# the last decision I made
lastdecision = None
return
| true
|
d40b0516376ee279ab83c6d03a1904a76a4b39ae
|
Python
|
sechours/GamestonkTerminal
|
/gamestonk_terminal/stocks/behavioural_analysis/ba_controller.py
|
UTF-8
| 10,568
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
"""Behavioural Analysis Controller Module"""
__docformat__ = "numpy"
import argparse
import os
from typing import List
from datetime import datetime
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import get_flair
from gamestonk_terminal.menu import session
from gamestonk_terminal.common.behavioural_analysis import (
google_view,
reddit_view,
stocktwits_view,
finbrain_view,
finnhub_view,
sentimentinvestor_view,
)
class BehaviouralAnalysisController:
"""Behavioural Analysis Controller class"""
# Command choices
CHOICES = [
"?",
"cls",
"help",
"q",
"quit",
"watchlist",
"spac",
"spac_c",
"wsb",
"popular",
"bullbear",
"messages",
"trending",
"stalker",
"infer",
"sentiment",
"mentions",
"regions",
"queries",
"rise",
"finbrain",
"stats",
"metrics",
"social",
"historical",
"emerging",
"popular",
"popularsi",
"getdd",
]
def __init__(self, ticker: str, start: datetime):
"""Constructor"""
self.ticker = ticker
self.start = start
self.ba_parser = argparse.ArgumentParser(add_help=False, prog="ba")
self.ba_parser.add_argument(
"cmd",
choices=self.CHOICES,
)
@staticmethod
def print_help():
"""Print help"""
print(
"https://github.com/GamestonkTerminal/GamestonkTerminal/tree/main/gamestonk_terminal"
"/stocks/behavioural_analysis"
)
print("\nBehavioural Analysis:")
print(" cls clear screen")
print(" ?/help show this menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("")
print("Finbrain:")
print(" finbrain sentiment from 15+ major news headlines")
print(" stats sentiment stats including comparison with sector")
print("Reddit:")
print(
" wsb show what WSB gang is up to in subreddit wallstreetbets"
)
print(" watchlist show other users watchlist")
print(" popular show popular tickers")
print(
" spac_c show other users spacs announcements from subreddit SPACs community"
)
print(" spac show other users spacs announcements from other subs")
print(" getdd gets due diligence from another user's post")
print("Stocktwits:")
print(
" bullbear estimate quick sentiment from last 30 messages on board"
)
print(" messages output up to the 30 last messages on the board")
print(" trending trending stocks")
print(" stalker stalk stocktwits user's last messages")
print("Twitter:")
print(" infer infer about stock's sentiment from latest tweets")
print(" sentiment in-depth sentiment prediction from tweets over time")
print("Google:")
print(" mentions interest over time based on stock's mentions")
print(" regions regions that show highest interest in stock")
print(" queries top related queries with this stock")
print(" rise top rising related queries with stock")
print("SentimentInvestor:")
print(" popularsi show most popular stocks on social media right now")
print(
" emerging show stocks that are being talked about more than usual"
)
print(" metrics core social sentiment metrics for this stock")
print(" social social media figures for stock popularity")
print(" historical plot the past week of data for a selected metric")
print("")
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
True, False or None
False - quit the menu
True - quit the program
None - continue in the menu
"""
# Empty command
if not an_input:
print("")
return None
(known_args, other_args) = self.ba_parser.parse_known_args(an_input.split())
# Help menu again
if known_args.cmd == "?":
self.print_help()
return None
# Clear screen
if known_args.cmd == "cls":
os.system("cls||clear")
return None
return getattr(
self, "call_" + known_args.cmd, lambda: "Command not recognized!"
)(other_args)
def call_help(self, _):
"""Process Help command"""
self.print_help()
def call_q(self, _):
"""Process Q command - quit the menu"""
return False
def call_quit(self, _):
"""Process Quit command - quit the program"""
return True
def call_watchlist(self, other_args: List[str]):
"""Process watchlist command"""
reddit_view.watchlist(other_args)
def call_spac(self, other_args: List[str]):
"""Process spac command"""
reddit_view.spac(other_args)
def call_spac_c(self, other_args: List[str]):
"""Process spac_c command"""
reddit_view.spac_community(other_args)
def call_wsb(self, other_args: List[str]):
"""Process wsb command"""
reddit_view.wsb_community(other_args)
def call_popular(self, other_args: List[str]):
"""Process popular command"""
reddit_view.popular_tickers(other_args)
def call_getdd(self, other_args: List[str]):
"""Process getdd command"""
reddit_view.get_due_diligence(other_args, self.ticker)
def call_bullbear(self, other_args: List[str]):
"""Process bullbear command"""
stocktwits_view.bullbear(other_args, self.ticker)
def call_messages(self, other_args: List[str]):
"""Process messages command"""
stocktwits_view.messages(other_args, self.ticker)
def call_trending(self, other_args: List[str]):
"""Process trending command"""
stocktwits_view.trending(other_args)
def call_stalker(self, other_args: List[str]):
"""Process stalker command"""
stocktwits_view.stalker(other_args)
def call_mentions(self, other_args: List[str]):
"""Process mentions command"""
google_view.mentions(other_args, self.ticker, self.start)
def call_regions(self, other_args: List[str]):
"""Process regions command"""
google_view.regions(other_args, self.ticker)
def call_queries(self, other_args: List[str]):
"""Process queries command"""
google_view.queries(other_args, self.ticker)
def call_rise(self, other_args: List[str]):
"""Process rise command"""
google_view.rise(other_args, self.ticker)
def call_infer(self, other_args: List[str]):
"""Process infer command"""
if not gtff.ENABLE_PREDICT:
print("Predict is not enabled in feature_flags.py")
print("Twitter inference menu is disabled", "\n")
return
try:
# pylint: disable=import-outside-toplevel
from gamestonk_terminal.common.behavioural_analysis import twitter_view
except ModuleNotFoundError as e:
print("Optional packages need to be installed")
print(e, "\n")
return
except Exception as e:
print(e, "\n")
return
twitter_view.inference(other_args, self.ticker)
def call_sentiment(self, other_args: List[str]):
"""Process sentiment command"""
if not gtff.ENABLE_PREDICT:
print("Predict is not enabled in feature_flags.py")
print("Twitter inference menu is disabled", "\n")
return
try:
# pylint: disable=import-outside-toplevel
from gamestonk_terminal.common.behavioural_analysis import twitter_view
except ModuleNotFoundError as e:
print("Optional packages need to be installed")
print(e, "\n")
return
except Exception as e:
print(e, "\n")
return
twitter_view.sentiment(other_args, self.ticker)
def call_finbrain(self, other_args: List[str]):
"""Process finbrain command"""
finbrain_view.sentiment_analysis(other_args, self.ticker)
def call_stats(self, other_args: List[str]):
"""Process stats command"""
finnhub_view.sentiment_stats(other_args, self.ticker)
def call_metrics(self, other_args: List[str]):
"""Process metrics command"""
sentimentinvestor_view.metrics(self.ticker, other_args)
def call_social(self, other_args: List[str]):
"""Process social command"""
sentimentinvestor_view.socials(self.ticker, other_args)
def call_historical(self, other_args: List[str]):
"""Process historical command"""
sentimentinvestor_view.historical(self.ticker, other_args)
def call_popularsi(self, other_args: List[str]):
"""Process popular command"""
sentimentinvestor_view.sort_sentiment("AHI", other_args, "popular")
def call_emerging(self, other_args: List[str]):
"""Process emerging command"""
sentimentinvestor_view.sort_sentiment("RHI", other_args, "emerging")
def menu(ticker: str, start: datetime):
"""Behavioural Analysis Menu"""
ba_controller = BehaviouralAnalysisController(ticker, start)
ba_controller.call_help(None)
while True:
# Get input command from user
if session and gtff.USE_PROMPT_TOOLKIT:
completer = NestedCompleter.from_nested_dict(
{c: None for c in ba_controller.CHOICES}
)
an_input = session.prompt(
f"{get_flair()} (stocks)>(ba)> ",
completer=completer,
)
else:
an_input = input(f"{get_flair()} (stocks)>(ba)> ")
try:
process_input = ba_controller.switch(an_input)
if process_input is not None:
return process_input
except SystemExit:
print("The command selected doesn't exist\n")
continue
| true
|
7420e34457816aada823101943e308daeb8dc9d5
|
Python
|
xeroxzen/fluffy-octo-computing-scripts
|
/reverse.py
|
UTF-8
| 63
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#! python3
for i in range(1, 13):
print('*' * (12-(i-1)))
| true
|
fdb0654a4f1998fbcbbe7019b3150ac3db6d2d4b
|
Python
|
shiva16/BciPy
|
/bcipy/acquisition/datastream/tcpclient.py
|
UTF-8
| 2,294
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
"""Test client for the TCP server."""
import logging
import socket
from bcipy.acquisition.protocols import dsi
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
class Signal:
running = True
count = 0
def tcp_client(host, port, signal):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
# Most protocols (DSI and BrainVision) do not require the client to send
# any messages
response = receive_packet(client)
while signal.running and len(response) > 0:
if response.type == 'EVENT' and response.event_code == 'SENSOR_MAP':
logging.debug(response.message)
elif response.type == 'EVENT' and response.event_code == 'DATA_RATE':
logging.debug(response.message)
elif response.type == 'EEG_DATA':
signal.count += 1
data = [i for i in response.sensor_data]
print(data)
else:
logging.debug(response)
response = receive_packet(client)
client.close()
logging.debug("Total records: %d" % (signal.count))
def receive_packet(socket, header_len=12):
"""Reads the header to get the payload length, then reads the payload."""
header_buf = receive(socket, header_len)
header = dsi.header.parse(header_buf)
payload_buf = receive(socket, header.payload_length)
return dsi.packet.parse(header_buf + payload_buf)
def receive(socket, msglen, chunksize=2048):
"""Receive an entire message from a socket, which may be chunked."""
chunks = []
bytes_received = 0
while bytes_received < msglen:
recv_len = min(msglen - bytes_received, chunksize)
chunk = socket.recv(recv_len)
if chunk == '':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_received = bytes_received + len(chunk)
return ''.join(chunks)
if __name__ == '__main__':
"""Run with: python -m daq.datastream.tcpclient"""
host = '127.0.0.1'
port = 8844
signal = Signal()
try:
tcp_client(host, port, signal)
except KeyboardInterrupt:
print("Keyboard Interrupt")
print("Total records: %d" % (signal.count))
signal.running = False
| true
|
c46047ae3e5dd82e9282607e3b433d60621d935c
|
Python
|
kaydee0502/Data-Structure-and-Algorithms-using-Python
|
/DSA/Stack/stockspan.py
|
UTF-8
| 579
| 3.28125
| 3
|
[] |
no_license
|
class Solution:
def calculateSpan(self,a,n):
stack = []
spans = []
for i in range(n):
if not stack:
spans.append(1)
else:
while stack:
if stack[-1][0] > a[i]:
spans.append(i - stack[-1][1])
break
stack.pop()
else:
spans.append(i+1)
stack.append([a[i],i])
return spans
sol = Solution()
print(sol.calculateSpan([100,80,60,70,60,75,85],7))
| true
|
51056f963acf46627ce4b392823fad2df3bb9907
|
Python
|
LvivD/GalaxyZoo
|
/src/.ipynb_checkpoints/dataloader-checkpoint.py
|
UTF-8
| 8,165
| 2.53125
| 3
|
[] |
no_license
|
import os
import pandas as pd
import torch
from torch.utils.data import Dataset
from skimage import io
class GalaxyZooDatasetTrain(Dataset):
def __init__(self, csv_file, root_dir, first_elem=0, last_elem=1):
self.annotations = pd.read_csv(csv_file)
self.index_shift = int(len(self.annotations)*first_elem)
self.annotations = self.annotations[self.index_shift:int(len(self.annotations)*last_elem)]
self.root_dir = root_dir
def __len__(self):
return len(self.annotations)*4
def __getitem__(self, index):
if index >= len(self.annotations)*4:
print('dataset index ' + str(index + self.index_shift) + ' out of range ' + str(len(self.annotations)*4))
raise IndexError('dataset index ' + str(index) + ' out of range')
real_index, rotate_type = divmod(index, 4)
img_path = os.path.join(self.root_dir, str(self.annotations["GalaxyID"][real_index + self.index_shift]) + ".jpg")
image = io.imread(img_path)
x_crop, y_crop = 96, 96
x_point, y_point = (image.shape[0] - x_crop) // 2, (image.shape[1] - y_crop) // 2
image = image[x_point:x_point + x_crop, y_point:y_point + y_crop]
x_image = torch.as_tensor(image, dtype=torch.float32)
x_image = torch.rot90(x_image, rotate_type)
x_image = x_image.permute(2, 0, 1)
x_image = x_image.unsqueeze(0)
y_lable = torch.tensor(self.annotations.iloc[real_index, 1:], dtype=torch.float32)
return (x_image, y_lable)
class GalaxyZooDatasetTest(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
self.files_in_dir = [f for f in os.listdir(root_dir) if os.path.isfile(os.path.join(root_dir, f))]
def __len__(self):
return len(self.files_in_dir)
def __getitem__(self, index):
if index >= len(self.files_in_dir):
raise IndexError('dataset index ' + str(index) + ' out of range')
img_path = os.path.join(self.root_dir, self.files_in_dir[index])
image = io.imread(img_path)
x_crop, y_crop = 96, 96
x_point, y_point = (image.shape[0] - x_crop) // 2, (image.shape[1] - y_crop) // 2
image = image[x_point:x_point + x_crop, y_point:y_point + y_crop]
x_image = torch.as_tensor(image, dtype=torch.float32)
x_image = x_image.permute(2, 0, 1)
x_image = x_image.unsqueeze(0)
return self.files_in_dir[index][:-4], x_image
# ________________________________________________________________________________________________________________________________
class GalaxyZooDatasetTrainNNV2(Dataset):
def __init__(self, csv_file, root_dir, first_elem=0, last_elem=1):
self.annotations = pd.read_csv(csv_file)
self.index_shift = int(len(self.annotations)*first_elem)
self.annotations = self.annotations[self.index_shift:int(len(self.annotations)*last_elem)]
self.root_dir = root_dir
def __len__(self):
return len(self.annotations)*4
def __getitem__(self, index):
if index >= len(self.annotations)*4:
print('dataset index ' + str(index + self.index_shift) + ' out of range ' + str(len(self.annotations)*4))
raise IndexError('dataset index ' + str(index) + ' out of range')
real_index, rotate_type = divmod(index, 4)
img_path = os.path.join(self.root_dir, str(self.annotations["GalaxyID"][real_index + self.index_shift]) + ".jpg")
image = io.imread(img_path)
x_crop, y_crop = 132, 132
x_point, y_point = (image.shape[0] - x_crop) // 2, (image.shape[1] - y_crop) // 2
image = image[x_point:x_point + x_crop, y_point:y_point + y_crop]
x_image = torch.as_tensor(image, dtype=torch.float32)
x_image = torch.rot90(x_image, rotate_type)
x_image = x_image.permute(2, 0, 1)
x_image = x_image.unsqueeze(0)
y_lable = torch.tensor(self.annotations.iloc[real_index, 1:], dtype=torch.float32)
return (x_image, y_lable)
class GalaxyZooDatasetTestNNV2(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
self.files_in_dir = [f for f in os.listdir(root_dir) if os.path.isfile(os.path.join(root_dir, f))]
def __len__(self):
return len(self.files_in_dir)
def __getitem__(self, index):
if index >= len(self.files_in_dir):
raise IndexError('dataset index ' + str(index) + ' out of range')
img_path = os.path.join(self.root_dir, self.files_in_dir[index])
image = io.imread(img_path)
x_crop, y_crop = 132, 132
x_point, y_point = (image.shape[0] - x_crop) // 2, (image.shape[1] - y_crop) // 2
image = image[x_point:x_point + x_crop, y_point:y_point + y_crop]
x_image = torch.as_tensor(image, dtype=torch.float32)
x_image = x_image.permute(2, 0, 1)
x_image = x_image.unsqueeze(0)
return self.files_in_dir[index][:-4], x_image
# ________________________________________________________________________________________________________________________________
from albumentations import (
RandomRotate90, Flip, Compose, Rotate, Crop
)
import numpy as np
def aug(prob=1):
return Compose([
RandomRotate90(p=1*prob),
Flip(p=0.75*prob),
Rotate(p=0.75*prob),
Crop(x_min=149, x_max=245, y_min=149, y_max=245)
], p=1)
class GalaxyZooDatasetTrainV2(Dataset):
def __init__(self, csv_file, root_dir, first_elem=0, last_elem=1, transform_prob=1):
self.annotations = pd.read_csv(csv_file)
self.index_shift = int(len(self.annotations)*first_elem)
self.annotations = self.annotations[self.index_shift:int(len(self.annotations)*last_elem)]
self.root_dir = root_dir
self.transform_prob = transform_prob
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
if index >= len(self.annotations):
print('dataset index ' + str(index + self.index_shift) + ' out of range ' + str(len(self.annotations)))
raise IndexError('dataset index ' + str(index) + ' out of range')
img_path = os.path.join(self.root_dir, str(self.annotations["GalaxyID"][index + self.index_shift]) + ".jpg")
image = io.imread(img_path)
augmentation = aug(prob=1)
augmented = augmentation(**{"image":image})
res = augmented["image"]
x_image = torch.as_tensor(res, dtype=torch.float32)
x_image = x_image.permute(2, 0, 1)
x_image = x_image.unsqueeze(0)
y_lable = torch.tensor(self.annotations.iloc[index, 1:], dtype=torch.float32)
return (x_image, y_lable)
class GalaxyZooDatasetTestV2(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
self.files_in_dir = [f for f in os.listdir(root_dir) if os.path.isfile(os.path.join(root_dir, f))]
self.transform_prob = 0
def __len__(self):
return len(self.files_in_dir)
def __getitem__(self, index):
if index >= len(self.files_in_dir):
raise IndexError('dataset index ' + str(index) + ' out of range')
img_path = os.path.join(self.root_dir, self.files_in_dir[index])
image = io.imread(img_path)
augmentation = aug(prob=1)
augmented = augmentation(**{"image":image})
res = augmented["image"]
x_image = torch.as_tensor(res, dtype=torch.float32)
x_image = x_image.permute(2, 0, 1)
x_image = x_image.unsqueeze(0)
return self.files_in_dir[index][:-4], x_image
| true
|
d5d535f6d85f413f423551543db64228a978d6b6
|
Python
|
bpontes93/exCurso120Hrs
|
/exercícios/4 - Funções, args, kwargs/ex 2.py
|
UTF-8
| 536
| 4.46875
| 4
|
[] |
no_license
|
# Resolução referente 4 - Funções, args, kwargs
"""
2 - Crie uma função1 que recebe uma função2 como parâmetro e retorne o valor da
função2 executada. Faça a função1 executar duas funções que recebam um número
diferente de argumentos.
"""
def mestre(funcao, *args, **kwargs):
return funcao(*args, **kwargs)
def fala_oi(nome):
return f'Oi {nome}'
def saudacao(nome, saudacao):
return f'{saudacao} {nome}'
ex = mestre(fala_oi, 'Bruno')
ex1 = mestre(saudacao, 'Bruno', saudacao='Olá')
print(ex, ex1)
| true
|
f24bab25c5e30aa1a6b0b19df48925f039b8af4d
|
Python
|
luoqp123456/api_autotest
|
/commen/readexcel.py
|
UTF-8
| 1,597
| 3.421875
| 3
|
[] |
no_license
|
# -*- coding:utf8 -*-
import xlrd
from xlutils.copy import copy
def excel_to_list(xls_name, sheet_name): #读取Excel表格sheet下的用例数据
data_list = []
worksheet = xlrd.open_workbook(xls_name) #打开表
sheet = worksheet.sheet_by_name(sheet_name) #获取表的sheet
header = sheet.row_values(0) #获取sheet的第一行的数据
for i in range(1, sheet.nrows): # 跳过标题行,从第二行开始取数据
d = dict(zip(header, sheet.row_values(i))) # 将标题和每行数据组装成字典
data_list.append(d) #添加到list中
return data_list # 列表嵌套字典格式,每个元素是一个字典
def get_test_data(data_list, casename): #传递表格的全部数据,根据用例的名称获取数据
for case_data in data_list:
if casename == case_data['casename']: # 如果字典数据中case_name与参数一致
return case_data
# 如果查询不到会返回None
def write_excel(sheetindex, row, col, value):
# excel_path = r"F:\ceshi.xls" # 文件路径
excel_path = r"C:\Users\Administrator\Desktop\添加学员.xlsx"
# excel_path=unicode('D:\\测试.xls','utf-8')#识别中文路径
rbook = xlrd.open_workbook(excel_path) # 打开文件
wbook = copy(rbook) # 复制文件并保留格式
w_sheet = wbook.get_sheet(sheetindex)# 索引sheet表
w_sheet.write(row, col, value)
wbook.save(excel_path) # 保存文件
if __name__ == '__main__':
write_excel('Sheet1', 4, 0 ,'ceshi')
| true
|
9712341874f48b86b4e9105c58bb8b066d8b0ed3
|
Python
|
usnistgov/optbayesexpt
|
/optbayesexpt/obe_socket.py
|
UTF-8
| 5,688
| 3.265625
| 3
|
[
"NIST-PD"
] |
permissive
|
from json import dumps, loads
from socket import socket, AF_INET, SOCK_STREAM
class Socket:
"""Handles TCP communications
The :code:`Socket` can be configured either as a 'server' or a 'client'.
Server sockets wait for connections, receive messages and send replies.
Client sockets initiate connections and receive replies.
The message protocol uses messages formatted as JSON strings, each
prependend by the string length as a zero-padded, 10-digit decimal
number. The general form is
dddddddddd<JSON-formatted string>
Command messages from the client use a JSON :obj:`object`:
dddddddddd{"command": <command_str>[, <label_str>: <value_str>[, ...]].
Example messages
* :code:`0000000038{"command": "goodset", "pickiness": 4}`
* :code:`0000000019{"command": "done"}`
* :code:`0000000004"OK"`
Args:
role (str): either 'client' to configure the Socket to initiate
communications or 'server' to listen and respond.
ip_address (str): Identifies the computer host to communicate with.
The default of '127.0.0.1' is the localhost, enabling
communications between processes on the host computer.
port (int): the TCP port used for communications. The default value
61981 was chosen chosen randomly in the unassigned port range
49152 to 65535.
Attributes:
server: for the 'server' role, a :code:`socket.socket` object
configured to listen and accept connections
connection: for the 'client' role, a :code:`socket.socket` object
configured to initiate connections and send messages
"""
def __init__(self, role, ip_address='127.0.0.1', port=61981):
self.role = role
self.ip_address = ip_address
self.port = port
self.connection = None
if self.role == 'client':
pass
# Client will connect as needed.
elif self.role == 'server':
self.server = socket(AF_INET, SOCK_STREAM)
self.server.bind((self.ip_address, self.port))
self.server.listen(1)
else:
raise Exception(
'Invalid role {}. Valid choices are \
client or server.'.format(role))
def send(self, contents):
"""
Formats and sends a message
This method formats the :code:`contents` argument into the message
format, opens a connection and sends the :code:`contents` as a message.
Args:
contents: Any JSON format-able object. Briefly, python's
:obj:`str`, :obj:`int`, :obj:`float`, :obj:`list`,
:obj:`tuple`, and :obj:`dict` objects.
Important:
json.dumps() is not able to format numpy arrays. To send numpy
arrays, the :code:`numpy.tolist()` method is a convenient way to
list-ify a numpy array. For example::
mySocket.send(myarray.tolist())
"""
if self.role == 'client':
self.connection = socket(AF_INET, SOCK_STREAM)
self.connection.connect((self.ip_address, self.port))
json = dumps(contents).encode()
jdatalen = '{:0>10d}'.format(len(json)).encode()
message = jdatalen + json
# print(message)
self.connection.sendall(message)
def receive(self):
"""Wait for and process messages on the TCP port
Blocks until a connection is made, then reads the number of bytes
specified in the first 10 characters. Reads the connection until
the full message is received, then decodes the messages string.
Returns:
The message string decoded and repackaged as a python object
"""
gulp = 1024
while True:
if self.role == 'server':
# accept() method blocks until a connection is made
self.connection, address = self.server.accept()
bitcount = b''
bytes_recd = 0
while bytes_recd < 10:
chunk = self.connection.recv(10 - bytes_recd)
if chunk == b'':
raise RuntimeError("socket connection broken")
bitcount += chunk
bytes_recd = bytes_recd + len(chunk)
message_len = int(bitcount)
raw_message = b''
bytes_recd = 0
while bytes_recd < message_len:
chunk = self.connection.recv(
min(message_len - bytes_recd, gulp))
if chunk == b'':
raise RuntimeError("socket connection broken")
raw_message += chunk
bytes_recd = bytes_recd + len(chunk)
return loads(raw_message.decode())
def close(self):
"""Close the communication connection.
Only clients need to close connections once they're done communicating.
No need to call this for servers.
"""
self.connection.close()
self.connection = None
def tcpcmd(self, command):
"""Sends a command and receives a response.
Run from a client socket, this method sends a command message and
receives a response. The connection is then closed.
Args:
command: a JSON-ifiable python object to be interpreted by the
recipient.
Returns:
a pyhton object decoded from the reply message
"""
if self.role == 'client':
self.send(command)
reply = self.receive()
self.connection.close()
return reply
| true
|
3387ad7cdeb1434cb9bed7b878390ecab5f5bbd9
|
Python
|
hysl/IntroToCompProg
|
/assign5/assign5_problem2b.py
|
UTF-8
| 371
| 3.984375
| 4
|
[] |
no_license
|
# Helen Li
# March 25, 2015
# Assignment #5: Problem #2b: Find all Prime Numbers between 1 and 1000
# This code finds all prime numbers between 1 and 1000
for p in range(2,1001): # All numbers to test
for i in range(2,p): # All numbers between 2 and whatever the test number is
if p%i == 0:
break
else:
print (p, "is a prime number.")
| true
|
ff86a7857244805a39a641c59bdd5f93a63addd6
|
Python
|
stavgrossfeld/baby_name_predictor
|
/serving_model.py
|
UTF-8
| 1,966
| 2.828125
| 3
|
[] |
no_license
|
# export FLASK_APP=serving_model.py
# run flask
from flask import Flask, request, render_template
import pandas as pd
import nltk
from sklearn.tree import DecisionTreeClassifier
#from sklearn.cross_validation import train_test_split
import pickle
f = open('feature_names.pickle', 'rb')
FEATURE_NAMES = pickle.load(f)
f.close()
f = open('name_classifier.pickle', 'rb')
model = pickle.load(f)
f.close()
app = Flask(__name__)
# predict on any name code
def predict_name(name, clf):
"""predict name and pass classifier """
name_grams = []
for gram in nltk.ngrams(name, 2):
name_grams.append("".join(gram))
feature_names = [col for col in FEATURE_NAMES if col not in ["is_girl", "names"]]
pred_features = pd.DataFrame(pd.Series({feature: 1 if feature in name_grams else 0 for feature in feature_names})).transpose()
#print clf.predict(pred_features)[0]
return clf.predict(pred_features)[0]
app = Flask(__name__)
@app.route('/', methods = ["GET","POST"])
def serve_template():
if request.method == 'GET':
return render_template('form.html')
else:
clf = model
baby_name = request.get_data()
baby_names = str(baby_name).replace("'","").replace("%2C"," ").split("=")[1].rstrip().split("+")
predicted_list = [(name, predict_name(name.lower(), clf)) for name in baby_names]
#print predicted_list
pred_df = pd.DataFrame([pred for pred in predicted_list])
pred_df.columns = ["name","is_girl"]
return pred_df.to_html()
@app.route("/gender/<message>")
def gender(message):
clf = model
baby_name = str(message)
baby_names = baby_name.replace("+"," ").replace("'","").replace("%2C"," ").split(" ")
predicted_list = [(name, predict_name(name.lower(), clf)) for name in baby_names]
#print predicted_list
pred_df = pd.DataFrame([pred for pred in predicted_list])
pred_df.columns = ["name","is_girl"]
return pred_df.to_html()
if __name__ == "__main__":
app.run()
| true
|
c58390a68fd7531a0087462850be08d45b419633
|
Python
|
lottege/smartgrid
|
/main.py
|
UTF-8
| 1,610
| 2.9375
| 3
|
[] |
no_license
|
from algorithms import random_and_hillclimber_batteries, cable_list, cable_list_sneller, verre_huizen_eerst, \
buiten_naar_binnen, brute_force_batterijen, batterij_allerlei_hillclimber
# import visualisatie as vis
print("how would you like to place the batteries? "
"\nto use: "
"\n - cable_list press 0"
"\n - cable_list_sneller press 1 "
"\n - verre_huizen_eerst press 2"
"\n - buiten_naar_binnen press 3"
"\n - random_and_hillclimber_batteries press 4"
"\n - brute_force_batterijen press 5"
"\n - batterij_allerlei_hillclimber press 6")
algorithm = input()
print("thanks")
if algorithm == "0":
segments, vis_houses, vis_batteries, vis_cables = cable_list.cable_list()
elif algorithm == "1":
segments, vis_houses, vis_batteries, vis_cables = cable_list_sneller.cable_list_sneller()
elif algorithm == "2":
segments, vis_houses, vis_batteries, vis_cables = verre_huizen_eerst.verre_huizen_eerst()
elif algorithm == "3":
segments, vis_houses, vis_batteries, vis_cables = buiten_naar_binnen.buiten_naar_binnen()
elif algorithm == "4":
segments, vis_houses, vis_batteries, vis_cables = random_and_hillclimber_batteries.random_and_hillclimber_batteries()
elif algorithm == "5":
segments, vis_houses, vis_batteries, vis_cables = brute_force_batterijen.brute_force_batterijen()
elif algorithm == "6":
segments, vis_houses, vis_batteries, vis_cables = batterij_allerlei_hillclimber.batterij_allerlei_hillclimber()
print("amount of needed cable segments: ", segments)
# vis.visualisation(vis_houses, vis_batteries, vis_cables)
| true
|
e9cd83a2ac24770e8f5262a6683f551c3be3888c
|
Python
|
dancb10/ppscu.com
|
/Stuff/memorview.py
|
UTF-8
| 159
| 2.5625
| 3
|
[] |
no_license
|
from array import array
numbers = array.array('h', [-2, -1, 0, 1, 2])
memv = memoryview(numbers)
memv_oct = memv.cast('B')
memv_oct.tolist()
memv_oct[5] = 4
| true
|
cffd0917dec97cf2085926bc69775e07078915ff
|
Python
|
azetter/retailer_enterprise
|
/Ham - Hmart/_Useful Jupyter and Python Scripts/mysql_test.py
|
UTF-8
| 982
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
import MySQLdb
try:
db = MySQLdb.connect(host="127.0.0.1", # your host, usually localhost
user="root", # your username
password="Hammania", # your password
db="projectcoffee") # name of the data base
# you must create a Cursor object. It will let
# you execute all the queries you need
cur = db.cursor()
# Use all the SQL you like
cur.execute("SELECT productUPC FROM product")
# File to print ProductUPCs to
path = '/home/hameed/Documents/Github/retailer_enterprise/upcs.txt'
upc_file = open(path, 'w')
# print all the first cell of all the rows
for productUPC in cur:
upc = str(productUPC)[1:-2]
print (('Query result: {}').format(upc))
upc_file.write(upc + ',\n')
upc_file.close()
db.close()
except Exception as e:
print("Failed to connect to database...", "Error:", e)
| true
|
4ffb581772ee58aaeb9dcf7f294e95e7c94c8458
|
Python
|
PeterSzakacs/convnet_euso
|
/src/cmdint/cmd_interface_visualizer.py
|
UTF-8
| 2,986
| 2.578125
| 3
|
[] |
no_license
|
import os
import argparse
import cmdint.common.argparse_types as atypes
import cmdint.common.dataset_args as dargs
class CmdInterface():
def __init__(self):
parser = argparse.ArgumentParser(description="Visualize dataset items")
# input dataset settings
group = parser.add_argument_group(title="Input dataset")
in_aliases = {'dataset name': 'name', 'dataset directory': 'srcdir'}
dset_args = dargs.DatasetArgs(input_aliases=in_aliases)
dset_args.add_dataset_arg_double(group, dargs.arg_type.INPUT,
required=True,
dir_default=os.path.curdir)
item_args = dargs.ItemTypeArgs()
item_args.add_item_type_args(group, dargs.arg_type.INPUT)
group.add_argument('--start_item', default=0, type=int,
help=('index of first item to visualize.'))
group.add_argument('--stop_item', default=None, type=int,
help=('index of the item after the last item to '
'visualize.'))
# output settings
group = parser.add_argument_group(title="Output settings")
group.add_argument('--outdir', default=os.path.curdir,
help=('directory to store output images. If a '
'non-default directory is used, it must '
'exist prior to calling this script. '
'Default: current directory. Images '
'are stored under outdir/img/<item_type>'))
group.add_argument('-f', '--force_overwrite', action='store_true',
help=('overwrite any existing items under outdir '
'having the same name as generated items'))
# metadat to text converter
group = parser.add_argument_group(title="Metadata to text converter")
m_conv = group.add_mutually_exclusive_group(required=False)
m_conv.add_argument('--simu', action='store_const', const='simu',
help=('Simu metadata converter'))
m_conv.add_argument('--synth', action='store_const', const='synth',
help=('Synth metadata converter'))
m_conv.add_argument('--flight', action='store_const', const='flight',
help=('Flight metadata converter'))
self.parser = parser
self.dset_args = dset_args
self.item_args = item_args
def get_cmd_args(self, argsToParse):
args = self.parser.parse_args(argsToParse)
if not os.path.isdir(args.outdir):
raise ValueError("Invalid output directory {}".format(args.outdir))
atype = dargs.arg_type.INPUT
args.item_types = self.item_args.get_item_types(args, atype)
args.meta_to_text_conv = args.simu or args.flight or args.synth
return args
| true
|
330c94a9a3f16502e5432e9a136c784e60baf01f
|
Python
|
yixinj/cs4417asn2
|
/part3/mapper.py
|
UTF-8
| 732
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env python
import sys
for line in sys.stdin:
# Processes each line of the file
movie_id, movie_name, movie_genres = line.split('::', 2)
# Strip blank spaces
movie_id = movie_id.strip()
movie_name = movie_name.strip()
movie_genres = movie_genres.strip()
# Processes the genres
movie_genres = movie_genres.split('|')
for genre in movie_genres:
# If genre is blank, replace it with 'None'
# A design choice was 'None' be one of many genres for a movie
if genre == '':
genre = 'None'
# Convert genre to upper
genre = genre.upper()
# Print genre and movie name, separated by a tab
print '%s\t%s' % (genre, movie_name)
| true
|
dc9bdc6484875c10528903a1263872ab4481df79
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02555/s978207014.py
|
UTF-8
| 278
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
s = int(input())
mod = 10**9+7
if s == 1 or s == 2:
print(0)
exit()
dp = [0 for _ in range(s+1)]
dp[0] = dp[1] = dp[2] = 0
for i in range(s+1):
for j in range(i-3, 2, -1):
dp[i] += dp[j]
dp[i] += 1
ans = dp[s]%mod
print(ans)
| true
|
d7a8c4005eccdb55e28dee43413d07b51062e6ea
|
Python
|
rasul-sharifzade/LearnPython
|
/list/demo_list_del.py
|
UTF-8
| 62
| 2.625
| 3
|
[] |
no_license
|
thislist = ["apple", "banana"]
del thislist[0]
print(thislist)
| true
|
2aeb91608537d3623ec7070b2f3b5f801deea707
|
Python
|
songquanhe-gitstudy/python_spider_demo
|
/pythonSpider_demo/DianPing.py
|
UTF-8
| 1,448
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/python3
#-*- coding:utf-8 -*-
import requests,sys #python Http客户端库,编写爬虫和测试服务器响应数据经常会用到
import re
from bs4 import BeautifulSoup
import urllib.parse
import urllib.request
#参考网址:http://blog.csdn.net/u010154424/article/details/52273868
print("正在从豆瓣电影中抓取数据...")
#Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36
#https://movie.douban.com/top250?start=0
for page in range(50):
url = 'http://www.dianping.com/shanghai/ch10/p'+str(page)
print('--------------------------------正在爬取第'+str(page+1)+'页数据----------------------------------')
headers = {'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
req = urllib.request.Request(url=url, headers=headers)
html = urllib.request.urlopen(req).read()
#html = requests.get(url)#根据URL网址获取网页的源代码
html.raise_for_status()
soup = BeautifulSoup(html, 'html.parser') #解析HTML
#soup = str(soup)#转换成字符串
#types = soup.findall(attrs={"class":"shop-list"}).string
#all_div = soup.find_all("div", class_="pic")
#all_div = soup.select('.pic')
#print(all_div)
f = open("song3.txt", 'a', encoding='utf-8')
f.write(str(soup))
print('爬取完毕!')
| true
|
21f7922b8f31e524d83df8a17847a16fe7375712
|
Python
|
YorkSu/hat
|
/utils/timer.py
|
UTF-8
| 884
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import time
class Timer(object):
def __init__(self, Log, *args, **kwargs):
self.Log = Log
return super().__init__(*args, **kwargs)
@property
def time(self):
return time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
def mktime(self, timex):
return time.mktime(time.strptime(timex, '%Y-%m-%d-%H-%M-%S'))
def timer(self, text, func, *args, **kwargs):
start_time = self.time
self.Log(start_time, _T=f'{text} Start:')
result = func(*args, **kwargs)
stop_time = self.time
self.Log(stop_time, _T=f'{text} Stop:')
cost_time = self.mktime(stop_time) - self.mktime(start_time)
self.Log(cost_time, _T=f'{text} cost time (second):')
time_dict = {f'{text}_start_time'.upper(): start_time,
f'{text}_stop_time'.upper(): stop_time,
f'{text}_cost_time'.upper(): cost_time}
return time_dict, result
| true
|
4d7b8fd2ab01ec7fab9912ac627a55bfa7e79c1e
|
Python
|
MMTObservatory/pyINDI
|
/example_clients/blob.py
|
UTF-8
| 1,788
| 2.6875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/python3.8
from pathlib import Path
from tornado.web import StaticFileHandler
from pyindi.webclient import INDIWebApp, INDIHandler
# Configuration
WEBPORT = 5905 # The port for the web app
INDIPORT = 7624 # The indiserver port
INDIHOST = "localhost" # Where the indiserver is running
DEVICES = ["*"] # All devices is called by an asterisk
CURRENT_DIR = Path.cwd() # The current directory
TEMPLATE = "blob.html"
# Build handlers with path for rendering, each path should have a handler
class GUI(INDIHandler):
def get(self):
# Pass additional variables to appear in the html template
self.indi_render(CURRENT_DIR / TEMPLATE, devices=DEVICES,
example_variable="Hello World", title="Test GUI")
class bhandler:
def __init__(self, imgdir):
self.imgcnt = 1
self.imgdir = imgdir
def handle_blob(self, blob):
imgname = f"image{str(self.imgcnt).zfill(3)}.{blob['format']}"
with (self.imgdir/imgname).open('wb') as fd:
fd.write(blob["data"])
print(f"Saved {blob['format']} image to {str(self.imgdir/imgname)}.")
self.imgcnt+=1
# If storing images, create image directory
imgs = Path("/tmp/imgs")
imgs.mkdir(exist_ok=True)
blob_handler = bhandler(imgs)
web_app = INDIWebApp(
webport=WEBPORT,
indihost=INDIHOST,
indiport=INDIPORT,
handle_blob=blob_handler.handle_blob
)
print(f"Go to http://<server_name>:{WEBPORT}")
print("If the server is on localhost go to:")
print(f"http://localhost:{WEBPORT}/")
# Attach handlers and build the application
# For images, use tornado.web.StaticFileHandler and link the path
web_app.build_app(
[
(r"/", GUI),
(r"/imgs/(.*)", StaticFileHandler, {"path": imgs})
],
)
| true
|
5f6700bce21f8e2af19b111c620c7b816e6078b8
|
Python
|
tpopenfoose/OptionSuite
|
/optionPrimitives/optionPrimitive.py
|
UTF-8
| 2,058
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
from abc import ABCMeta, abstractmethod
class OptionPrimitive(object):
"""This class is a generic type for any primitive that can be made using
a PUT or CALL option and/or stock, e.g., iron condor or strangle.
"""
__metaclass__ = ABCMeta
@abstractmethod
def getUnderlyingTicker(self):
"""Get the name (string) of the underlying being used for the option primitive.
"""
pass
@abstractmethod
def getBuyingPower(self):
"""Used to calculate the buying power needed for the
option primitive.
"""
pass
@abstractmethod
def getDelta(self):
"""Used to get the delta for the option primitive.
"""
pass
@abstractmethod
def getVega(self):
"""Used to get the vega for the option primitive.
"""
pass
@abstractmethod
def getTheta(self):
"""Used to get the theta for the option primitive.
"""
pass
@abstractmethod
def getGamma(self):
"""Used to get the gamma for the option primitive.
"""
pass
@abstractmethod
def calcProfitLoss(self):
"""Calculate the profit and loss for the option primitive based on option values when the trade
was placed and new option values.
:return: Profit / loss (positive decimal for profit, negative decimal for loss).
"""
pass
@abstractmethod
def calcProfitLossPercentage(self):
"""Calculate the profit and loss for the option primitive based on option values when the trade
was placed and new option values.
:return: Profit / loss as a percentage of the initial option prices. Returns negative percentage for a loss.
"""
pass
@abstractmethod
def updateValues(self, tickData):
"""Based on the latest pricing data, update the option values.
:param tickData: option chain with pricing information.
:return True if we were able to update values, false otherwise.
"""
pass
| true
|
d231971612d6b0843802dcf04b2ad9ca29cec8ce
|
Python
|
limgeonho/Algorithm
|
/inflearn_practice/section6/수들의 조합.py
|
UTF-8
| 455
| 3.078125
| 3
|
[] |
no_license
|
#수들의 조합
#기존의 조합문제와 같다 + sum이라는 변수를 추가해서 배수인지 여부 판단만...
def DFS(L, s, sum):
global cnt
if L == k:
if sum % m == 0:
cnt += 1
else:
for i in range(s, n):
DFS(L+1, i+1, sum + a[i])
n, k = map(int, input().split())
a = [0] * k
for i in range(n):
p = list(map(int, input().split()))
m = int(input())
cnt = 0
DFS(0, 0, 0)
print(cnt)
| true
|
cc5ec31461ddd8a6c0fdeacd93bad9e8d6599c7d
|
Python
|
mostafa-elhaiany/blobVolleyBallAi
|
/ball.py
|
UTF-8
| 1,986
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
import pygame
import os
import random
ballImage= pygame.transform.scale( pygame.image.load( os.path.join( "imgs","ball.png" ) ), (50,50) )
RADIUS=10
class Ball:
velocity=15
def __init__(self,x,y):
self.x=x
self.y=y
self.r=RADIUS
self.height=0
self.top=0
self.bottom=0
self.image=ballImage
self.deltaX=0
self.deltaY=0
self.tickCount=0
self.onTheLeft=True
def move(self):
self.x+=self.deltaX
self.y+=self.deltaY
def gravity(self,windowHeight):
self.deltaY+=2
self.tickCount+=1
displacement = self.deltaY*self.tickCount + 1.5*(self.tickCount**2)
if(displacement>=16):
displacement=16
elif(displacement<0):
displacement-=2
self.y+=displacement
if(self.y>windowHeight-self.height):
self.y=windowHeight-self.height-10
self.deltaX=0
def bounce(self,intersectionX,blobVel,blobJumpVel):
self.y+=self.velocity+blobJumpVel
self.deltaY=-self.velocity
if(intersectionX>=20):
self.deltaX+=self.velocity+blobVel
elif(intersectionX<=18):
self.deltaX-=self.velocity+blobVel
else:
self.deltaX=random.randint(-2,2)
self.tickCount=0
def draw(self, window):
window.blit(self.image,(self.x,self.y))
def getMask(self):
return pygame.mask.from_surface(self.image)
def collide(self,blob):
blobMask=blob.getMask()
ballMask= self.getMask()
offset = ( round(self.x)-round(blob.x) , round(self.y) - round(blob.y)-10 )
point=ballMask.overlap(blobMask,offset)
if(point):
self.bounce(point[0],blob.velocity,blob.jumpVel)
return True
return False
| true
|
356478a4639f0b28988439f5afb957adf1c425c0
|
Python
|
callanmurphy/Darkness-Game
|
/classes.py
|
UTF-8
| 3,496
| 3.296875
| 3
|
[] |
no_license
|
# Callan Murphy
# 21/11/19
# Classes File
import pygame
import random
WIDTH = 1276
HEIGHT = 800
class Thing:
"""Parent class for any object on screen"""
def __init__(self, img, sizex, sizey):
self.img = pygame.transform.scale(
pygame.image.load(img).convert_alpha(), (sizex, sizey))
self.rect = self.img.get_rect()
self.rect.x = random.randint(sizex, WIDTH-sizex)
self.rect.y = random.randint(sizey, HEIGHT-sizey)
def collided(self, obj):
return self.rect.colliderect(obj)
def change_img(self, img):
self.img = pygame.transform.scale(
pygame.image.load(img).convert_alpha(), (45, 100))
def collide_fix(self, x):
# TODO - needs all to be "if" but needs to be fixed
if self.rect.bottom - 5 < x.rect.top < self.rect.bottom:
self.rect.y = x.rect.top - self.rect.height
elif self.rect.top < x.rect.bottom < self.rect.top + 5:
self.rect.y = x.rect.bottom
elif x.rect.right > self.rect.x + self.rect.width > x.rect.x:
self.rect.x = x.rect.x - self.rect.width
elif x.rect.x < self.rect.x < x.rect.right:
self.rect.x = x.rect.right
def collide_top(self, x):
if self.rect.bottom - 5 < x.rect.top < self.rect.bottom:
self.rect.y = x.rect.top - self.rect.height
def collide_bottom(self, x):
if self.rect.top < x.rect.bottom < self.rect.top + 5:
self.rect.y = x.rect.bottom
def collide_right(self, x):
if x.rect.right > self.rect.x + self.rect.width > x.rect.x:
self.rect.x = x.rect.x - self.rect.width
def collide_left(self, x):
if x.rect.x < self.rect.x < x.rect.right:
self.rect.x = x.rect.right
def new_pos(self):
self.rect.x = random.randint(0, WIDTH)
self.rect.y = random.randint(0, HEIGHT)
return self
def get_position(self):
return self.rect.x, self.rect.y,
def check_boundary(self):
if self.rect.x > WIDTH - self.rect.width:
self.rect.x = WIDTH - self.rect.width
if self.rect.x < 0:
self.rect.x = 0
if self.rect.y > HEIGHT - self.rect.height:
self.rect.y = HEIGHT - self.rect.height
if self.rect.y < 0:
self.rect.y = 0
class Mob(Thing):
"""Child class for any onscreen mobs"""
def __init__(self, name, health, sizex, sizey, img, x=None, y=None):
Thing.__init__(self, img, sizex, sizey)
self.name = name
self.health = health
# if no x or y provided, random values will be set by parent
if x is not None and y is not None:
self.rect.x = x
self.rect.y = y
def move_to_obj(self, obj):
if self.rect.x < obj.rect.x:
self.rect.x += 1
if self.rect.x > obj.rect.x:
self.rect.x -= 1
if self.rect.y < obj.rect.y:
self.rect.y += 1
if self.rect.y > obj.rect.y:
self.rect.y -= 1
class Barrier(Thing):
"""Child class for any barriers"""
def __init__(self, img, sizex, sizey):
Thing.__init__(self, img, sizex, sizey)
class Coin(Thing):
"""Child class for any barriers"""
def __init__(self, img, sizex, sizey):
Thing.__init__(self, img, sizex, sizey)
self.new_pos()
def new_pos(self):
self.rect.x = random.randint(0, WIDTH)
self.rect.y = random.randint(85, HEIGHT)
return self
| true
|
8dadd0c6b6dba647654d64ea9612c81759052d15
|
Python
|
erlendw/INF3331-erlenwe
|
/Assignment4/c_in_python_cython/mandelbrot_3.py
|
UTF-8
| 325
| 2.765625
| 3
|
[] |
no_license
|
import m
import time
from PIL import Image
def createMandelbrot(startx = -2.0,endx = 2.0,starty = -2.0,endy = 2.0):
starttime = time.time()
data = m.me(400,400, startx,endx,starty,endy)
img = Image.fromarray(data, 'RGB')
img.save('mandelbrotinC.png')
endtime = time.time()
print endtime-starttime
| true
|
04110f3d95f4ffff0cd4d7196db4c969f198d089
|
Python
|
lemony3650/basic_python
|
/basic/lesson_9.py
|
UTF-8
| 820
| 2.65625
| 3
|
[] |
no_license
|
# 拿json数据
# from __future__ import (absolute_import, division, print_function, unicode_literals)
from urllib.request import urlopen # 1
import json
import requests
json_url = 'https://raw.githubusercontent.com/muxuezi/btc/master/btc_close_2017.json'
response = urlopen(json_url) # 2
# 读取数据
req = response.read()
# 将数据写入文件
with open('text_file/btc_close_2017_urllib.json', 'wb') as f: # 3
f.write(req)
# 加载json格式
file_urllib = json.loads(req.decode('utf8')) # 4
print(file_urllib)
json_url = 'https://raw.githubusercontent.com/muxuezi/btc/master/btc_close_2017.json'
req = requests.get(json_url) # 1
# 将数据写入文件
with open('btc_close_2017_request.json', 'w') as f:
f.write(req.text) # 2
file_requests = req.json() # 3
print(file_urllib == file_requests)
| true
|
cbe135e0e0017bf7031b2c2c29030747d8f32bb5
|
Python
|
TypMitSchnurrbart/SchedulerSim
|
/main.py
|
UTF-8
| 6,500
| 2.703125
| 3
|
[] |
no_license
|
"""
Main for a small scheduler sim with gui; this its mainly for testing at the moment
Author: Alexander Müller
Version: 0.7.1
Date: 14.02.2021
"""
# Load system libraries---------------------------------------------------
import sys
import time
from datetime import datetime
# Load PyQt5 Library Classes----------------------------------------------
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
# Load Constants----------------------------------------------------------
from src.const import PROCESS_LIST, FCFS, SJF, HELPER, TEXT_DELAY, SRTF
from src.const import RR, RR_QUANTUM, PBS, EDF, HELP_URL, HRRN
# Load different Classes--------------------------------------------------
from src.process import Process
from src.scheduler import Scheduler
from src.helper import Helper
from src.rr_popup import RRPopup
from scheduler_sim_gui import Ui_main_window
#-------------------------------------------------------------------------
# Main Window Class
# importing from QtDesginer Created UI translated to python
#-------------------------------------------------------------------------
class Window(QMainWindow, Ui_main_window):
"""
Class of our GUI
parent QMainWindow
parent Ui_main_window
"""
# Class Constructor with parent Constructor as super
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
# Click Event for Add Button
self.pushButton_add.clicked.connect(self.add_process_to_queue)
self.pushButton_start.clicked.connect(self.determine_scheduler)
self.pushButton_cancel.clicked.connect(self.cancel_all)
self.pushButton_help.clicked.connect(self.open_help)
def add_process_to_queue(self):
"""
Function to create a Process Object from the inserted data in the Window
"""
# Get Data From Window
arrival_time = self.spin_arrival_time.value()
burst_time = self.spin_burst_time.value()
niceness = self.spin_niceness.value()
deadline = self.spin_deadline.value()
# Check if the given deadline is even possible
if arrival_time + burst_time > deadline:
self.display_text("Couldnt add Process because its not doable within its deadline!")
return
# Create Process based on Data
PROCESS_LIST.append(Process(arrival_time, burst_time, niceness, deadline))
# Prep output for text box
pid = PROCESS_LIST[-1].get_pid()
arrival_time = PROCESS_LIST[-1].get_arrival_time()
burst_time = PROCESS_LIST[-1].get_burst_time()
niceness = PROCESS_LIST[-1].get_niceness()
deadline = PROCESS_LIST[-1].get_deadline()
# Give user feedback to successful creatin an Process
self.terminal_output.append(f"{HELPER[0].get_current_time()}Process added.\tPID: {pid}\tArrival: {arrival_time}\tBurst: {burst_time}\tPriority: {niceness}\tDeadline: {deadline}")
self.terminal_output.ensureCursorVisible()
# Reset the spinboxes values
self.reset_spin_boxes()
return
def determine_scheduler(self):
"""
Decide which scheduler has been choosen
"""
# First check if there is even a process
if len(PROCESS_LIST) == 0:
self.display_text("Please add at least a Process first!")
return
if self.radio_fcfs.isChecked():
self.start_scheduling(FCFS)
elif self.radio_sjf.isChecked():
self.start_scheduling(SJF)
elif self.radio_srtf.isChecked():
self.start_scheduling(SRTF)
elif self.radio_rrobin.isChecked():
# Creatin a Dialog for the Time Quantum
dialog = RRPopup()
dialog.exec()
if RR_QUANTUM[0] != 0:
self.start_scheduling(RR)
elif self.radio_pbs.isChecked():
self.start_scheduling(PBS)
elif self.radio_edf.isChecked():
self.start_scheduling(EDF)
elif self.radio_hrrn.isChecked():
self.start_scheduling(HRRN)
else:
self.display_text("Choose a Scheduler Algorithm!")
def start_scheduling(self, chosen_scheduler):
"""
Start the scheduling thread so the window doesnt freeze
param - {int} - chosen_scheduler- short variable for the scheduler, look in consts
"""
# Start Scheduliung progress; is a class even necessary? dont know
self.thread_handler = QThreadPool()
scheduler = Scheduler(self, chosen_scheduler)
self.thread_handler.start(scheduler)
def cancel_all(self):
"""
Function to clear all add Processes
"""
# Delete all added Processes from the back to have it nice and smooth
max_range = len(PROCESS_LIST)
for i in range(0, max_range):
index = (max_range - 1) - i
del PROCESS_LIST[index]
# Reset all SpinBoxes
self.reset_spin_boxes()
self.display_text("Cleared all processes in Queue!")
def reset_spin_boxes(self):
"""
Reset all Spinboxes to their default values
"""
# Reset the spinboxes values
self.spin_arrival_time.setValue(0)
self.spin_burst_time.setValue(1)
self.spin_niceness.setValue(0)
self.spin_deadline.setValue(1)
def display_text(self, output):
"""
Display the Text with the current time
param - {string} - output - Text to display
return - {int} - default Zero
"""
# Give user feedback
time.sleep(TEXT_DELAY)
self.terminal_output.append(f"{HELPER[0].get_current_time()}{output}")
self.terminal_output.ensureCursorVisible()
def display_end_line(self):
"""
Possibility to end the line without the timestamp
"""
#Create and of Line
time.sleep(TEXT_DELAY)
self.terminal_output.append("<br>")
self.terminal_output.ensureCursorVisible()
def open_help(self):
"""
Method to open the help site
"""
# Load URL from consts and open it
url = QUrl(HELP_URL)
QDesktopServices.openUrl(url)
if __name__ == "__main__":
# Create our Helper object
HELPER.append(Helper())
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec())
| true
|
1c2b75ce551e6ce7cffe1a2c8e8162b2b634580d
|
Python
|
asadalarma/python_crash_course
|
/print-triangle.py
|
UTF-8
| 141
| 3.21875
| 3
|
[] |
no_license
|
# Print Statement
print("\n---------------A simple triangle---------------\n")
print(" /|")
print(" / |")
print(" / |")
print("/___|\n")
| true
|
8643c0d591aa18ea3bb94cdc7887df830f83418c
|
Python
|
myounus96/Network-Security-Labs
|
/S-Des/main.py
|
UTF-8
| 3,308
| 2.59375
| 3
|
[] |
no_license
|
class Main:
_IP = [2, 6, 3, 1, 4, 8, 5, 7]
_EP = [4, 1, 2, 3, 2, 3, 4, 1]
_P_10 = [3, 5, 2, 7, 4, 10, 1, 9, 8, 7]
_P_8 = [6, 3, 7, 4, 8, 5, 10, 9]
_P_4 = [2, 4, 3, 1]
_IP_1 = [4, 1, 3, 5, 7, 2, 8, 6]
_S0 = {"00": {"00": "01", "01": "00", "10": "11", "11": "10"},
"01": {"00": "11", "01": "10", "10": "01", "11": "00"},
"10": {"00": "00", "01": "10", "10": "01", "11": "11"},
"11": {"00": "11", "01": "01", "10": "11", "11": "10"}}
_S1 = {"00": {"00": "00", "01": "01", "10": "10", "11": "11"},
"01": {"00": "10", "01": "00", "10": "01", "11": "11"},
"10": {"00": "11", "01": "00", "10": "01", "11": "00"},
"11": {"00": "10", "01": "01", "10": "00", "11": "11"}}
def main(self):
self.PT = str(input("P.T:"))
self.key = str(input("Key:"))
self.key1, self.key2 = self.keyMethod(self.key)
afterIP = ""
for i in range(len(Main._IP)):
afterIP += self.PT[Main._IP[i] - 1]
self.L,self.R=afterIP[:4],afterIP[4:]
# encryption
ansrLastXOR = self.Round(self.R, self.key1)
afterRound1=self.R+ansrLastXOR
self.L, self.R = afterRound1[:4], afterRound1[4:]
round2EText = self.Round(self.R, self.key2)
temp=round2EText+self.R
self.CT=""
for i in range(len(Main._IP_1)):
self.CT+=temp[Main._IP_1[i]-1]
afterIPD = ""
for i in range(len(Main._IP)):
afterIPD += self.CT[Main._IP[i] - 1]
self.L, self.R = afterIPD[:4], afterIPD[4:]
# decryption
ansrLastXORD = self.Round(self.R, self.key2)
afterRound1D=self.R+ansrLastXORD
self.L, self.R = afterRound1D[:4], afterRound1D[4:]
round2DText = self.Round(self.R, self.key1)
temp=round2DText+self.R
self.PTD=""
for i in range(len(Main._IP_1)):
self.PTD+=temp[Main._IP_1[i]-1]
print("Key 1:"+self.key1, "Key 2:"+self.key2, "CT :"+self.CT, self.PTD)
def keyMethod(self, key):
key1 = key2 = temp = ""
for i in range(len(Main._P_10)):
temp += key[Main._P_10[i] - 1]
key = temp
L, R = key[:5], key[5:]
L, R = L[1:] + L[:1], R[1:] + R[:1]
temp = L + R
for i in range(len(Main._P_8)):
key1 += temp[Main._P_8[i] - 1]
L, R = L[2:] + L[:2], R[2:] + R[:2]
temp = L + R
for i in range(len(Main._P_8)):
key2 += temp[Main._P_8[i] - 1]
return key1, key2
def Round(self, text, key):
temp = ""
for i in range(len(Main._EP)):
temp += text[Main._EP[i] - 1]
text = self.convertXOR(temp, key)
s0, s1 = text[:4], text[4:]
s0R, s0C = s0[0] + s0[3], s0[1] + s0[2]
s1R, s1C = s1[0] + s1[3], s1[1] + s1[2]
s0Text=Main._S0[s0R][s0C];
s1Text=Main._S1[s1R][s1C];
joinS=s0Text+s1Text
temp=""
for i in range(len(Main._P_4)):
temp+=joinS[Main._P_4[i]-1]
return self.convertXOR(temp,self.L)
def convertXOR(self, t1, t2):
ansr = ""
for i in range(len(t1)):
ansr += "0" if t1[i] == t2[i] else "1"
return ansr
obj = Main()
obj.main()
| true
|
4e4e93a3e8a7d3ce0c51ee5780d4fec4ef0a009c
|
Python
|
fdongyu/PCSE_final
|
/transport_visualization.py
|
UTF-8
| 2,835
| 2.828125
| 3
|
[] |
no_license
|
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import utm
import numpy as np
import pdb
def convert_to_latlon(x, y):
x = x.flatten()
y = y.flatten()
nn = len(x)
lat = np.zeros_like(x)
lon = np.zeros_like(y)
for i in range(len(x)):
lat[i], lon[i] = utm.to_latlon(x[i], y[i], 15, 'U')
return lat, lon
west = -95.218; east = -94.412
south = 28.979; north = 29.789
filename='Output/Euler/xy_Euler.nc'
nc = Dataset(filename, 'r')
print nc
time = nc.variables['time'][:]
x = nc.variables['X'][:]
y = nc.variables['Y'][:]
filename2='Output/RK4/xy_RK4.nc'
nc2 = Dataset(filename2, 'r')
print nc2
time2 = nc2.variables['time'][:]
x2 = nc2.variables['X'][:]
y2 = nc2.variables['Y'][:]
lat, lon = convert_to_latlon(x, y)
lat2, lon2 = convert_to_latlon(x2, y2)
lat_tem, lon_tem = convert_to_latlon(x[:,0], y[:,0])
fig = plt.figure(figsize=(10,9.5))
ax = fig.add_subplot(111)
basemap = Basemap(projection='merc',llcrnrlat=south,urcrnrlat=north,\
llcrnrlon=west,urcrnrlon=east, resolution='h')
basemap.drawcoastlines()
basemap.fillcontinents()
basemap.drawcountries()
basemap.drawstates()
llons, llats=basemap(*(lon,lat))
llons2, llats2=basemap(*(lon2,lat2))
llons_tem, llats_tem=basemap(*(lon_tem,lat_tem))
basemap.plot(llons,llats, '.', color='r', markersize=4) # Euler
basemap.plot(llons2,llats2, '.', color='b', markersize=4) # RK4
basemap.plot(llons_tem,llats_tem, '.', color='y', markersize=8)
## add coordination
lats = np.linspace(south, north,4)
lons = np.linspace(west, east,4)
lonsnew, latsnew = basemap(lons, lats)
sw = utm.from_latlon(south, west)[:2]
ne = utm.from_latlon(north, east)[:2]
xlims = np.asarray([sw[0], ne[0]])
ylims = np.asarray([sw[1], ne[1]])
originx = 327500
originy = 3244000
xs = (np.linspace(xlims[0], xlims[1], 4) - originx)/1000.
ys = (np.linspace(ylims[0], ylims[1], 4) - originy)/1000.
xlabel = []
ylabel = []
for i in range(len(xs)):
xlabel.append(str(round(xs[i],1)))
ylabel.append(str(round(ys[i],1)))
ax.set_xticks((lonsnew))
ax.set_yticks((latsnew))
ax.set_xticklabels(xlabel, fontsize=22)
ax.set_yticklabels(ylabel, fontsize=22)
ax.set_aspect('equal')
ax.set_xlabel('Easting (km)', fontsize=22)
ax.set_ylabel('Northing (km)', fontsize=22)
basemap.plot(llons[0],llats[0], '.', color='r', markersize=8, label='Euler') # Euler
basemap.plot(llons2[0],llats2[0], '.', color='b', markersize=8, label='RK4') # RK4
ax.legend(fontsize=20,numpoints=1)
ax.grid()
plt.tight_layout()
#plt.plot(x,y, 'ob',markersize=2.5)
#plt.plot(x2,y2, 'or',markersize=2.0)
#plt.plot(x[:,0],y[:,0], 'or', markersize=5)
#plt.show()
#plt.savefig('figures/Eular.png')
#plt.savefig('figures/RK4.png')
plt.savefig('comparison_basemap_new.png')
plt.close()
#pdb.set_trace()
| true
|
660c099c3b1f9e21b42050263f257366250fa114
|
Python
|
sunshineDrizzle/CommonTools
|
/commontool/algorithm/tool.py
|
UTF-8
| 12,832
| 3.375
| 3
|
[] |
no_license
|
import numpy as np
from scipy.spatial.distance import cdist, pdist
# --------metrics--------
def _overlap(c1, c2, index='dice'):
"""
Calculate overlap between two collections
Parameters
----------
c1, c2 : collection (list | tuple | set | 1-D array etc.)
index : string ('dice' | 'percent')
This parameter is used to specify index which is used to measure overlap.
Return
------
overlap : float
The overlap between c1 and c2
"""
set1 = set(c1)
set2 = set(c2)
intersection_num = float(len(set1 & set2))
try:
if index == 'dice':
total_num = len(set1 | set2) + intersection_num
overlap = 2.0 * intersection_num / total_num
elif index == 'percent':
overlap = 1.0 * intersection_num / len(set1)
else:
raise Exception("Only support 'dice' and 'percent' as overlap indices at present.")
except ZeroDivisionError as e:
print(e)
overlap = np.nan
return overlap
def calc_overlap(data1, data2, label1=None, label2=None, index='dice'):
"""
Calculate overlap between two sets.
The sets are acquired from data1 and data2 respectively.
Parameters
----------
data1, data2 : collection or numpy array
label1 is corresponding with data1
label2 is corresponding with data2
label1, label2 : None or labels
If label1 or label2 is None, the corresponding data is supposed to be
a collection of members such as vertices and voxels.
If label1 or label2 is a label, the corresponding data is always a numpy array with same shape and meaning.
And we will acquire set1 elements whose labels are equal to label1 from data1
and set2 elements whose labels are equal to label2 from data2.
index : string ('dice' | 'percent')
This parameter is used to specify index which is used to measure overlap.
Return
------
overlap : float
The overlap of data1 and data2
"""
if label1 is not None:
positions1 = np.where(data1 == label1)
data1 = list(zip(*positions1))
if label2 is not None:
positions2 = np.where(data2 == label2)
data2 = list(zip(*positions2))
# calculate overlap
overlap = _overlap(data1, data2, index)
return overlap
def loocv_overlap(X, prob, metric='dice'):
"""
Calculate overlaps for leave-one-out cross validation.
Each sample has its own region of interest (ROI). For each iteration,
overlap between the ROI in the left sample and the ROI in remaining samples
will be calculated. The ROI in remaining samples is defined as below:
Calculate probability map for the remaining samples, regard locations whose
probability is suprathreshold as the ROI.
Parameters:
----------
X[ndarray]: shape=(n_sample, n_location)
Its data type must be bool. Each row is a sample.
Each sample's region of interest consists of the locations with True values.
prob[float]: the threshold probability
metric[str]: string ('dice' | 'percent')
Specify a metric which is used to measure overlap.
Return:
------
overlaps[ndarray]: shape=(n_sample,)
"""
assert X.ndim == 2, 'The input X must be a 2D array!'
assert X.dtype == np.bool, "The input X's data type must be bool!"
n_samp, _ = X.shape
remain_idx_arr = np.ones((n_samp,), dtype=np.bool)
overlaps = np.zeros((n_samp,))
for left_idx in range(n_samp):
# get roi of the left sample
roi_left = np.where(X[left_idx])[0]
# get roi of the remaining samples
remain_idx_arr[left_idx] = False
prob_map = np.mean(X[remain_idx_arr], 0)
roi_remain = np.where(prob_map > prob)[0]
remain_idx_arr[left_idx] = True
# calculate overlap
overlaps[left_idx] = calc_overlap(roi_left, roi_remain, index=metric)
return overlaps
def elbow_score(X, labels, metric='euclidean', type=('inner', 'standard')):
"""
calculate elbow score for a partition specified by labels
https://en.wikipedia.org/wiki/Elbow_method_(clustering)
:param X: array, shape = (n_samples, n_features)
a feature array
:param labels: array, shape = (n_samples,)
Predicted labels for each sample.
:param metric: string
Specify how to calculate distance between samples in a feature array.
Options: 'euclidean', 'correlation'
:param type: tuple of two strings
Options:
('inner', 'standard') - Implement Wk in (Tibshirani et al., 2001b)
('inner', 'centroid') - For each cluster, calculate the metric between samples within it
with the cluster's centroid. Finally, average all samples.
('inner', 'pairwise') - For each cluster, calculate the metric pairwise among samples within it.
Finally, average all samples.
('inter', 'centroid') - Calculate the metric between cluster centroids with their centroid.
Finally, average all clusters.
('inter', 'pairwise') - Calculate the metric pairwise among cluster centroids.
Finally, average all clusters.
:return: score:
elbow score of the partition
"""
if type == ('inner', 'standard'):
score = 0
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
dists = cdist(sub_samples, sub_samples, metric=metric)
tmp_score = np.sum(dists) / (2.0 * sub_samples.shape[0])
score += tmp_score
elif type == ('inner', 'centroid'):
# https://stackoverflow.com/questions/19197715/scikit-learn-k-means-elbow-criterion
# formula-1 in (Goutte, Toft et al. 1999 - NeuroImage)
sub_scores = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_samples_centroid = np.atleast_2d(np.mean(sub_samples, 0))
tmp_scores = cdist(sub_samples_centroid, sub_samples, metric=metric)[0]
sub_scores.extend(tmp_scores)
score = np.mean(sub_scores)
elif type == ('inner', 'pairwise'):
sub_scores = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_scores.extend(pdist(sub_samples, metric=metric))
score = np.mean(sub_scores)
elif type == ('inter', 'centroid'):
# adapted from formula-2 in (Goutte, Toft et al. 1999 - NeuroImage)
sub_centroids = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_centroids.append(np.mean(sub_samples, 0))
centroid = np.atleast_2d(np.mean(sub_centroids, 0))
tmp_scores = cdist(centroid, np.array(sub_centroids), metric=metric)[0]
score = np.mean(tmp_scores)
elif type == ('inter', 'pairwise'):
sub_centroids = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_centroids.append(np.mean(sub_samples, 0))
sub_centroids = np.array(sub_centroids)
if sub_centroids.shape[0] == 1:
sub_centroids = np.r_[sub_centroids, sub_centroids]
score = np.mean(pdist(sub_centroids, metric=metric))
else:
raise TypeError('Type-{} is not supported at present.'.format(type))
return score
def gap_statistic(X, cluster_nums, ref_num=10, cluster_method=None):
"""
do clustering with gap statistic assessment according to (Tibshirani et al., 2001b)
https://blog.csdn.net/baidu_17640849/article/details/70769555
https://datasciencelab.wordpress.com/tag/gap-statistic/
https://github.com/milesgranger/gap_statistic
:param X: array, shape = (n_samples, n_features)
a feature array
:param cluster_nums: a iterator of integers
Each integer is the number of clusters to try on the data.
:param ref_num: integer
The number of random reference data sets used as inertia reference to actual data.
:param cluster_method: callable
The cluster method to do clustering on the feature array. And the method returns
labels_list (cluster results of each cluster_num in cluster_nums).
If is None, a default K-means method will be used.
:return: labels_list: list
cluster results of each cluster_num in cluster_nums
:return: Wks: array, shape = (len(cluster_nums),)
within-cluster dispersion of each cluster_num clustering on the feature array X
:return: Wks_refs_log_mean: array, shape = (len(cluster_nums),)
mean within-cluster dispersion of each cluster_num clustering on ref_num reference data sets
:return: gaps: array, shape = (len(cluster_nums),)
Wks_refs_log_mean - np.log(Wks)
:return: s: array, shape = (len(cluster_nums),)
I think elements in s can be regarded as standard errors of gaps.
:return: k_selected: integer
cluster k_selected clusters on X may be the best choice
"""
if cluster_method is None:
def k_means(data, cluster_nums):
"""
http://scikit-learn.org/stable/modules/clustering.html#k-means
"""
from sklearn.cluster import KMeans
labels_list = []
for cluster_num in cluster_nums:
kmeans = KMeans(cluster_num, random_state=0, n_init=10).fit(data)
labels_list.append(kmeans.labels_ + 1)
print('KMeans finished: {}'.format(cluster_num))
return labels_list
cluster_method = k_means
print('Start: calculate W\u2096s')
Wks = []
labels_list = cluster_method(X, cluster_nums)
for labels in labels_list:
Wks.append(elbow_score(X, labels))
Wks = np.array(Wks)
Wks_log = np.log(Wks)
print('Finish: calculate W\u2096s')
print("Start: calculate references' W\u2096s")
Wks_refs_log = []
minimums = np.atleast_2d(np.min(X, axis=0))
maximums = np.atleast_2d(np.max(X, axis=0))
bounding_box = np.r_[minimums, maximums]
for i in range(ref_num):
X_ref = uniform_box_sampling(X.shape[0], bounding_box)
labels_list_ref = cluster_method(X_ref, cluster_nums)
Wks_ref_log = []
for labels in labels_list_ref:
Wks_ref_log.append(np.log(elbow_score(X_ref, labels)))
Wks_refs_log.append(Wks_ref_log)
print('Finish reference: {}/{}'.format(i+1, ref_num))
print("Finish: calculate references' W\u2096s")
print('Start: calculate gaps')
Wks_refs_log = np.array(Wks_refs_log)
Wks_refs_log_mean = np.mean(Wks_refs_log, axis=0)
Wks_refs_log_std = np.std(Wks_refs_log, axis=0)
gaps = Wks_refs_log_mean - Wks_log
print('Finish: calculate gaps')
print('Start: select optimal k')
s = Wks_refs_log_std * np.sqrt(1 + 1.0 / ref_num)
idx_selected = np.where(gaps[:-1] >= gaps[1:] - s[1:])[0][0]
k_selected = cluster_nums[idx_selected]
print('Finish: select optimal k')
return labels_list, Wks, Wks_refs_log_mean, gaps, s, k_selected
# --------sampling--------
def uniform_box_sampling(n_sample, bounding_box=((0,), (1,))):
"""
create n_sample samples with uniform distribution in the box
https://blog.csdn.net/baidu_17640849/article/details/70769555
https://datasciencelab.wordpress.com/tag/gap-statistic/
:param n_sample: integer
the number of samples
:param bounding_box: array-like, shape = (2, n_dim)
Shape[1] is the number of dimensions.
Bounding_box[0] are n_dim minimums of their own dimensions.
Bounding_box[1] are n_dim maximums of their own dimensions.
:return: samples: array, shape = (n_sample, n_dim)
"""
bounding_box = np.array(bounding_box)
dists = np.diag(bounding_box[1] - bounding_box[0])
samples = np.random.random_sample((n_sample, bounding_box.shape[1]))
samples = np.matmul(samples, dists) + bounding_box[0]
return samples
# ---common---
def intersect(arr, mask, label=None, substitution=np.nan):
"""
reserve values in the mask and replace values out of the mask with substitution
:param arr: numpy array
:param mask: numpy array
:param label:
specify the mask value in the mask array
:param substitution:
:return:
"""
assert arr.shape == mask.shape
if label is None:
mask_idx_mat = mask != 0
else:
mask_idx_mat = mask == label
if substitution == 'min':
substitution = np.min(arr[mask_idx_mat])
elif substitution == 'max':
substitution = np.max(arr[mask_idx_mat])
new_arr = arr.copy()
new_arr[np.logical_not(mask_idx_mat)] = substitution
return new_arr
| true
|
5347863cac79111a29b85eeb6ccc28791f1a4c6d
|
Python
|
elinaaleksejevski/frog
|
/frog.py
|
UTF-8
| 2,008
| 3.140625
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
fail=open(r"C:\Users\User\Documents\visualstudio\text_num.txt","r")
mas1=[]
mas2=[]
for line in fail:
n=line.find(",")
mas1.append(line[0:n].strip())
mas2.append(int(line[n+1:len(line)].strip()))
fail.close()
plt.grid(True)
color_rectangle = np.random.rand(7, 3)
plt.bar(mas1,mas2,color=color_rectangle)
plt.title("Данные о ИТ безопасности")
plt.tick_params(labelrotation=20)
plt.subplots_adjust(left=0.125, bottom=0.3, right=0.9, top=0.88, wspace=0.2, hspace=0.2)
plt.show()
x1=np.arange(-7,7,0.1)
x2=np.arange(-7,7,0.1)
x3=np.arange(-6.8,-2,0.1)
x4=np.arange(2,6.8,0.1)
x5=np.arange(-5.8,-2.8,0.1)
x6=np.arange(2.8,5.8,0.1)
x7=np.arange(-4,4,0.1)
x8=np.arange(-5.2,5.2,0.1)
x9=np.arange(-7,-2.8,0.1)
x10=np.arange(2.8,7,0.1)
x11=np.arange(-7,0,0.1)
x12=np.arange(0,7,0.1)
x13=np.arange(-7,-4.5,0.1)
x14=np.arange(4.5,7,0.1)
x15=np.arange(-3,3,0.1)
y1=-(3/49)*(x1**2)+8
y2=(4/49)*(x2**2)+1
y3=-0.75*((x3+4)**2)+11
y4=-0.75*((x4-4)**2)+11
y5=-((x5+4)**2)+9
y6=-((x6-4)**2)+9
y7=(4/9)*((x7)**2)-5
y8=(4/9)*((x8)**2)-9
y9=-(1/16)*((x9+3)**2)-6
y10=-(1/16)*((x10-3)**2)-6
y11=(1/9)*((x11+4)**2)-11
y12=(1/9)*((x12-4)**2)-11
y13=-((x13+5)**2)
y14=-((x14-5)**2)
y15=(2/9)*((x15)**2)+2
plt.subplots()
plt.title('Лягушка')
plt.plot(x1,y1,'--c',linewidth=2,label='Frog')
plt.plot(x2,y2,'--c',linewidth=2)
plt.plot(x3,y3,'--c',linewidth=2)
plt.plot(x4,y4,'--c',linewidth=2)
plt.plot(x5,y5,'--c',linewidth=2)
plt.plot(x6,y6,'--c',linewidth=2)
plt.plot(x7,y7,'--c',linewidth=2)
plt.plot(x8,y8,'--c',linewidth=2)
plt.plot(x9,y9,'--c',linewidth=2)
plt.plot(x10,y10,'--c',linewidth=2)
plt.plot(x11,y11,'--c',linewidth=2)
plt.plot(x12,y12,'--c',linewidth=2)
plt.plot(x13,y13,'--c',linewidth=2)
plt.plot(x14,y14,'--c',linewidth=2)
plt.plot(x15,y15,'--c',linewidth=2)
plt.grid()
plt.legend()
plt.savefig("my_image.png")
plt.show()
| true
|
6038ace0521ac7b30766816fe5c17fbf8c845ffe
|
Python
|
UmasouTTT/prefetch_in_wuhao_configuration
|
/eval-scripts/config.py
|
UTF-8
| 602
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import argparse, os
class Config:
def __init__(self):
parser = argparse.ArgumentParser(description=
"This script is used to evaluate results from Champsim.")
parser.add_argument('-d', '--desc', required = True, help="Descriptor JSON File Path.")
parser.add_argument('-o', '--output', help="Results output directory.")
args = parser.parse_args()
self.desc_path = args.desc
# if (args.output):
# self.output_path = args.output
# else:
# self.output_path = os.path.dirname(self.desc_path)
| true
|
863f546c6a6980eaea574846d4da1dad9f8fa98c
|
Python
|
xinw3/deep-learning
|
/hw3/code/pre_mapper.py
|
UTF-8
| 623
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sys
import re
def mapper():
'''
Input: train
'''
word_count = dict()
tags = ['UNK', 'START', 'END']
for line in sys.stdin:
words = line.split()
words = [word.lower() for word in words]
for word in words:
word_count[word] = word_count.get(word,0) + 1
sorted_word_count = sorted(word_count, key=word_count.get, reverse=True) # list
word_set = set(sorted_word_count[:7997])
for i in range(len(tags)):
word_set.add(tags[i])
for word in word_set:
print word
if __name__ == "__main__":
mapper()
| true
|
80bd3e9d3cd8e0581c40a5b110ecbcef4a88646c
|
Python
|
youseop/Problem_solutions
|
/BAEKJOON/3197_백조의호수(미해결).py
|
UTF-8
| 2,144
| 2.796875
| 3
|
[] |
no_license
|
import sys
sys.stdin = open("text.txt","rt")
read=sys.stdin.readline
from collections import deque
def find(a):
if union[a] == a:
return a
union[a] = find(union[a])
return union[a]
def merge(a,b):
root_a,root_b = find(a),find(b)
if root_a == root_b:
return
if level[root_a] >= level[root_b]:
if level[root_a] == level[root_b]:
level[root_a] += 1
union[root_b] = root_a
else:
union[root_a] = root_b
return
def union_water(a,b):
for x,y in [(1,0),(0,1),(-1,0),(0,-1)]:
ax,by = a+x,b+y
if 0<=ax<n and 0<=by<m and MAP[ax][by]=='.':
merge(ax*m+by,a*m+b)
def melt_ice(a,b):
point = deque([(a,b)])
visit[a][b] = 0
while point:
a,b = point.popleft()
save_water_point = []
for x,y in [(1,0),(0,1),(-1,0),(0,-1)]:
ax,by = a+x,b+y
if 0<=ax<n and 0<=by<m and visit[ax][by]:
if MAP[ax][by]=='.':
save_water_point.append((ax,by))
else:
point.append((ax,by))
visit[ax][by] = 0
if save_water_point:
for water in save_water_point:
MAP[a][b] = '.'
union_water(*water)
else:
ice.append((a,b))
n,m = map(int,read().split())
MAP = list(list(read().strip()) for _ in range(n))
union = list(i for i in range(n*m+1))
level = list(1 for _ in range(n*m+1))
#find(a*m + b)
swan = []
ice = deque()
for i in range(n):
for j in range(m):
if MAP[i][j] == 'L':
swan.append((i,j))
MAP[i][j] = '.'
union_water(i,j)
elif MAP[i][j] == '.':
union_water(i,j)
else:
ice.append((i,j))
time = 0
while ice:
#for mm in MAP:
# print(mm)
if find(swan[0][0]*m+swan[0][1]) == find(swan[1][0]*m+swan[1][1]):
break
visit = list(list(1 for _ in range(m)) for _ in range(n))
for _ in range(len(ice)):
i,j = ice.popleft()
if MAP[i][j] == 'X':
melt_ice(i,j)
time += 1
print(time)
| true
|
54a37f170245c882ef1c570ec6793764778a8311
|
Python
|
dh434/tensorflowLearning
|
/nn_rnn_mnist.py
|
UTF-8
| 2,345
| 2.734375
| 3
|
[] |
no_license
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
lr = 0.001
training_iters = 1000000
batch_size = 128
n_inputs = 28
n_steps = 28
n_hidden_units = 128
n_classes = 10
xs = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
ys = tf.placeholder(tf.float32, [None, n_classes])
weights = {
"in":tf.Variable(tf.random_normal([n_inputs,n_hidden_units])),
"out":tf.Variable(tf.random_normal([n_hidden_units,n_classes]))
}
biases = {
"in":tf.Variable(tf.constant(0.1,shape=[n_hidden_units,])),
"out":tf.Variable(tf.constant(0.1,shape=[n_classes,])),
}
def RNN(X,weights, biases):
X = tf.reshape(X,[-1,n_inputs])
X_in = tf.matmul(X, weights["in"]) + biases["in"]
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units,forget_bias=1.0,state_is_tuple=True)
init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
outputs, final_states = tf.nn.dynamic_rnn(lstm_cell, X_in, initial_state = init_state, time_major=False)
results = tf.matmul(final_states[1], weights["out"]) + biases["out"]
return results
prediction = RNN(xs, weights,biases)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys, logits=prediction))
train_step = tf.train.AdamOptimizer(learning_rate=lr).minimize(cross_entropy)
correct_pred = tf.equal(tf.argmax(prediction, axis=1), tf.argmax(ys, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
training_accus = []
with tf.Session() as sess:
sess.run(init)
step = 0
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = batch_xs.reshape([batch_size, n_steps,n_inputs])
sess.run(train_step, feed_dict={xs:batch_xs,ys:batch_ys})
if step % 20 == 0:
# batch_test_xs, batch_test_ys = mnist.test.images,mnist.test.labels
# batch_test_xs = batch_test_xs.reshape([ -1, n_steps, n_inputs])
temp = sess.run(accuracy, feed_dict={xs:batch_xs, ys:batch_ys})
print(temp)
training_accus.append(temp)
step += 1
plt.plot(training_accus)
plt.show()
| true
|
3fb922902d6f1c978d7a30e7ab63bcb2feba7f14
|
Python
|
zhangbc/ip_pools
|
/update_citycarrier.py
|
UTF-8
| 6,183
| 2.75
| 3
|
[] |
no_license
|
# /usr/local/python2.7.11/bin/python
# coding: utf-8
"""
将ip_info中归中属地和运营商信息
提取并写入对应的表,并写入日志表
author: zhangbc
create_time: 2017-05-19
"""
import sys
import time
from utils.ip_processor import IpProcessor
reload(sys)
sys.setdefaultencoding('utf8')
class UpdateCityCarrier(IpProcessor):
"""
将ip_info中归中属地和运营商信息提取并写入对应的表
"""
def __init__(self):
IpProcessor.__init__(self)
def update_city(self, ia=0, ib=0):
"""
更新ip_city信息
:param ia: IP的A段地址
:param ib: IP的B段地址
:return:
"""
condition = 'ip like \'{0}.{1}.%\''.format(ia, ib)
insert_city_sql = (r'''INSERT INTO ip_city(city) '''
'''SELECT DISTINCT addr FROM ip_info '''
'''WHERE addr NOT IN (SELECT city FROM ip_city) '''
'''AND {0};'''.format(condition))
row_count = self.exec_no_query(insert_city_sql)
if row_count:
print u'表ip_city已插入{0}条记录!'.format(row_count)
# 更新成功,写入log
query_city_sql = (r'''SELECT DISTINCT addr FROM ip_info '''
'''WHERE {0};'''.format(condition))
city_count = len(self.exec_query(query_city_sql))
condition_log = 'WHERE ip_range = \'{0}.{1}.x.x\''.format(ia, ib)
query_sql = r'SELECT * FROM ip_log_info {0};'.format(condition_log)
rows = self.exec_query(query_sql)
if len(rows):
update_log_sql = (r'''UPDATE ip_log_info SET city_count={0}, '''
'''city_finished=\'Y\' {1}''').format(city_count, condition_log)
update_log_rows = self.exec_no_query(update_log_sql)
if update_log_rows:
print u'表ip_log_info已更新{0}条记录!'.format(update_log_rows)
else:
insert_log_sql = ('INSERT INTO ip_log_info('
'ip_range, city_count, city_finished, '
'carrier_count, carrier_finished) VALUES '
'(\'{0}.{1}.x.x\',{2},\'Y\',0,\'N\')').format(ia, ib, city_count)
insert_log_rows = self.exec_no_query(insert_log_sql)
if insert_log_rows:
print u'表ip_log_info已插入{0}条记录!'.format(insert_log_rows)
def update_carrier(self, ia, ib):
"""
更新ip_carrier信息
:param ia: IP的A段地址
:param ib: IP的B段地址
:return:
"""
condition = 'ip like \'{0}.{1}.%\''.format(ia, ib)
insert_carrier_sql = (r'''INSERT INTO ip_carrier(carrieroperator) '''
'''SELECT DISTINCT carrieroperator FROM ip_info '''
'''WHERE carrieroperator NOT IN (SELECT carrieroperator FROM ip_carrier) '''
'''AND {0};'''.format(condition))
row_count = self.exec_no_query(insert_carrier_sql)
if row_count:
print u'表ip_carrier已插入{0}条记录!'.format(row_count)
# 更新成功,写入log
query_carrier_sql = (r'''SELECT DISTINCT carrieroperator FROM ip_info '''
'''WHERE {0};'''.format(condition))
rows = self.exec_query(query_carrier_sql)
carrier_count = len(rows)-1 if ('',) in rows else len(rows)
condition_log = 'WHERE ip_range = \'{0}.{1}.x.x\''.format(ia, ib)
query_sql = r'SELECT * FROM ip_log_info {0};'.format(condition_log)
rows = self.exec_query(query_sql)
if len(rows):
update_log_sql = (r'''UPDATE ip_log_info SET carrier_count={0}, '''
'''carrier_finished=\'Y\' {1}''').format(carrier_count, condition_log)
update_log_rows = self.exec_no_query(update_log_sql)
if update_log_rows:
print u'表ip_log_info已更新{0}条记录!'.format(update_log_rows)
else:
insert_log_sql = (r'''INSERT INTO ip_log_info('''
'''ip_range, city_count, city_finished, '''
'''carrier_count, carrier_finished) VALUES '''
'''(\'{0}.{1}.x.x\',0,\'N\',{2},\'Y\')''').format(ia, ib, carrier_count)
insert_log_rows = self.exec_no_query(insert_log_sql)
if insert_log_rows:
print u'表ip_log_info已插入{0}条记录!'.format(insert_log_rows)
def get_updated_list(self):
"""
在日志中获取已完成的ip段信息
:return:
"""
query_updated_sql = ('SELECT ip_range FROM ip_log_info '
'WHERE city_finished=\'Y\' AND carrier_finished=\'Y\';')
rows = self.exec_query(query_updated_sql)
updated_list = list()
if len(rows):
for index, row in enumerate(rows):
updated_list.append(row[0].replace('.x.x', ''))
return updated_list
def work(self):
"""
批量更新
:return:
"""
# 所有完成的IP段list
all_list = [ip[0] for ip in self.get_count_by_group() if ip[1] == 65536]
# 已更新的IP段list
updated_list = self.get_updated_list()
# 待更新的IP段list
to_list = list(set(all_list).difference(set(updated_list)))
for index, item in enumerate(to_list):
ia, ib = item.split('.')
self.update_city(ia, ib)
self.update_carrier(ia, ib)
print u'{0}.{1}.x.x已更新完成! --{2}'\
.format(ia, ib, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
def main():
"""
函数实现
:return:
"""
UpdateCityCarrier().work()
if __name__ == '__main__':
start_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print 'Begin:{0}'.format(start_time)
main()
end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print 'Begin:{0}\nEnd:{1}'.format(start_time, end_time)
| true
|
4603e89c5a787492da0a41ddaded0be6529ca41a
|
Python
|
yokub-sobirjonov/python-tutorial
|
/Hello World.py
|
UTF-8
| 494
| 3.21875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 9 19:29:50 2021
@author: User
"""
#print('Salom\tdunyo')
#ism = input("Ismingiz nima ? ")
#print("Assalomu aleykum , " + ism)
#ism = input("Ismingiz nima ?")
#print("Assalomu aleykum , " + ism.title())
kocha=input("kochangiz : ")
mahalla=input("mahallangiz : ")
tuman=input("tunmaningiz : ")
viloyat=input("viloyatingiz : ")
manzil = f"{kocha} kochasi {mahalla} mahallasi, {tuman} tumani, {viloyat} viloyati"
print(manzil)
| true
|
7d7345e069991fa81e85641c8a50449267e471b8
|
Python
|
ishritam/python-Programming--The-hard-way-Exercises
|
/ex12.py
|
UTF-8
| 86
| 3.875
| 4
|
[] |
no_license
|
name=input("Name?")
age=input("age?")
print(f"So, Mr.{name}, You are {age} years old")
| true
|
9868a0d77d5aaae3a70d685bb637249098aaf12e
|
Python
|
jermenkoo/spoj.pl_solutions
|
/CANDY3.py
|
UTF-8
| 208
| 2.78125
| 3
|
[] |
no_license
|
for i in range(int(input())):
mlst = []
space = input
for j in range(reps):
mlst.append(int(input()))
if sum(mlst) % len(mlst) == 0:
print("YES")
else:
print("NO")
| true
|
dea7af2b186902f3d2be9be3ea406644eb8c890f
|
Python
|
mglodziak/python
|
/02/02_python.py
|
UTF-8
| 1,523
| 3.46875
| 3
|
[] |
no_license
|
import sys
import getopt
import turtle
def print_help():
print('HELP - this script draws regulars polygons.')
print('-h help')
print('-n <number> -> number of sides, default is 4')
print('-r <number> -> length of each side, default is 50')
print('-m <number> -> move center, default is 0')
print('-t <number> -> turn at start, default is 0')
def draw_figure(length, number, move, turn):
angle=((int(number)-2)*180)/int(number)
draw_angle=180-int(angle)
print(draw_angle)
x.right(int(turn))
for i in range(1,int(number)+1):
x.forward(int(length))
x.right(int(draw_angle))
def move_start_point(move):
x.up()
x.forward(int(move))
x.down()
try:
opts, args = getopt.getopt(sys.argv[1:], 't:m:r:n:h', ['turn=','move=', 'length=', 'number', 'help'])
except getopt.GetoptError:
print_help()
sys.exit(1)
var_n=4
var_length=50
var_move=0
var_turn=0
x=turtle.Turtle()
for opt, arg in opts:
if opt in ('-h', '--help'):
print_help()
sys.exit(2)
elif opt in ('-n', '--number'):
var_n=arg
elif opt in ('-r', '--length'):
var_length=arg
elif opt in ('-t', '--turn'):
var_turn=arg
elif opt in ('-m', '--move'):
var_move=arg
else:
print_help()
sys.exit(2)
move_start_point(var_move)
draw_figure(var_length, var_n, var_move, var_turn)
#x.circle(50)
input("Press Enter to continue...")
| true
|
0e5d0a6f57c1a8c837753ec946aafb9030b43395
|
Python
|
qtothec/pyomo
|
/pyomo/common/dependencies.py
|
UTF-8
| 4,413
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import importlib
import logging
class DeferredImportError(ImportError):
pass
class ModuleUnavailable(object):
"""Dummy object that raises a DeferredImportError upon attribute access
This object is returned by attempt_import() in liu of the module in
the case that the module import fails. Any attempts to access
attributes on this object will raise a DeferredImportError
exception.
Parameters
----------
message: str
The string message to return in the raised exception
"""
def __init__(self, message):
self._error_message_ = message
def __getattr__(self, attr):
raise DeferredImportError(self._error_message_)
def generate_import_warning(self, logger='pyomo.common'):
logging.getLogger(logger).warning(
self._error_message_)
try:
from packaging import version as _version
_parser = _version.parse
except ImportError:
# pkg_resources is an order of magnitude slower to import than
# packaging. Only use it if the preferred (but optional) packaging
# library is not present
from pkg_resources import parse_version as _parser
def _check_version(module, min_version):
return _parser(min_version) <= _parser(module.__version__)
def attempt_import(name, error_message=None, only_catch_importerror=True,
minimum_version=None):
"""Attempt to import the specified module.
This will attempt to import the specified module, returning a
(module, available) tuple. If the import was successful, `module`
will be the imported module and `available` will be True. If the
import results in an exception, then `module` will be an instance of
:py:class:`ModuleUnavailable` and `available` will be False
The following is equivalent to ``import numpy as np``:
.. doctest::
>>> from pyomo.common.dependencies import attempt_import
>>> np, numpy_available = attempt_import('numpy')
Parameters
----------
name: `str`
The name of the module to import
error_message: `str`, optional
The message for the exception raised by ModuleUnavailable
only_catch_importerror: `bool`, optional
If True, exceptions other than ImportError raised during module
import will be reraised. If False, any exception will result in
returning a ModuleUnavailable object.
Returns
-------
: module
the imported module or an instance of :py:class:`ModuleUnavailable`
: bool
Boolean indicating if the module import succeeded
"""
try:
module = importlib.import_module(name)
if minimum_version is None:
return module, True
elif _check_version(module, minimum_version):
return module, True
elif error_message:
error_message += " (version %s does not satisfy the minimum " \
"version %s)" % (
module.__version__, minimum_version)
else:
error_message = "The %s module version %s does not satisfy " \
"the minimum version %s" % (
name, module.__version__.minimum_version)
except ImportError:
pass
except:
if only_catch_importerror:
raise
if not error_message:
error_message = "The %s module (an optional Pyomo dependency) " \
"failed to import" % (name,)
return ModuleUnavailable(error_message), False
#
# Common optional dependencies used throughout Pyomo
#
yaml, yaml_available = attempt_import('yaml')
if yaml_available and hasattr(yaml, 'SafeLoader'):
yaml_load_args = {'Loader': yaml.SafeLoader}
else:
yaml_load_args = {}
numpy, numpy_available = attempt_import('numpy')
scipy, scipy_available = attempt_import('scipy')
| true
|
29d8180054e6284ab55e5a93a582c782f76f39f9
|
Python
|
Guangyun-Xu/Learn-Test
|
/PyCode/Open3d/transform_visualization.py
|
UTF-8
| 1,766
| 2.859375
| 3
|
[] |
no_license
|
import transforms3d as t3d # pip install transform3d
import open3d as o3d # pip install open3d-python==0.5
import numpy as np
import math
# compose transform matrix
T = [20, 30, 40]
R = [[0, -1, 0], [1, 0, 0], [0, 0, 1]] # rotation matrix
Z = [1.0, 1.0, 1.0] # zooms
A = t3d.affines.compose(T, R, Z)
print(A)
# rotation matrix to euler
rx, ry, rz = t3d.euler.mat2euler(R, axes='sxyz')
print(rx, ry, rz)
# euler to rotation matrix
R1 = t3d.euler.euler2mat(rx, ry, rz, axes='sxyz')
print(R1.astype(float))
# visualization, x, y, z axis will be rendered as red, green, and blue
base_coordinate = o3d.create_mesh_coordinate_frame(size=1000)
coordinate1 = o3d.create_mesh_coordinate_frame(size=500)
coordinate2 = o3d.create_mesh_coordinate_frame(size=300)
# r_xyz = np.array([180.272, 9.67795, 270.592]) # camera in base pose
# r_xyz = r_xyz/180*math.pi
R_1 = np.array([[0.010182, -0.999944, 0.003005],
[-0.985716, -0.009532, 0.168148],
[-0.168110, -0.004674, -0.985757]])
T_1 = np.array([393.100000, -280.894000, 1338.030000])
H_1 = t3d.affines.compose(T_1, R_1, Z) # camera in base pose
rx_1, ry_1, rz_1 = t3d.euler.mat2euler(R_1, axes='sxyz')
r_xyz_1 = np.array([rx_1, ry_1, rz_1])/math.pi*180
print("camera in base matrix:{}".format(H_1))
print("rx_1, ry_1, rz_1:{}".format(r_xyz_1))
coordinate1.transform(H_1)
R_2in1 = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, -1]])
R_2 = R_1.dot(R_2in1)
H_2 = t3d.affines.compose(T_1, R_2, Z)
coordinate2.transform(H_2)
o3d.visualization.draw_geometries([coordinate1, base_coordinate, coordinate2])
rx_2, ry_2, rz_2 = t3d.euler.mat2euler(R_2, axes='sxyz')
r_xyz_2 = np.array([rx_2, ry_2, rz_2])/math.pi*180
print("rx_2, ry_2, rz_2:{}".format(r_xyz_2))
| true
|
a111ae4c8bb0300c38ac7ed26029e3d1c3e026d9
|
Python
|
stephendwillson/ProjectEuler
|
/python_solutions/problem_1.py
|
UTF-8
| 524
| 3.796875
| 4
|
[] |
no_license
|
def main():
total = 0
for i in range(1, 1000):
if i % 5 == 0 or i % 3 == 0:
total += i
return total
def description():
desc = """
https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
print(desc, end="")
PE_NAME = "MULTIPLES OF 3 OR 5"
PE_SOLUTION = 233168
if __name__ == "__main__":
print(main())
| true
|
7dc12822b7d115d7a6dba71ebeb23ee066d21b4b
|
Python
|
eugeneALU/CECNL_RealTimeBCI
|
/filterbank.py
|
UTF-8
| 2,444
| 2.765625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 10:16:21 2019
@author: ALU
"""
import warnings
import scipy.signal
import numpy as np
def filterbank(eeg, fs, idx_fb):
if idx_fb == None:
warnings.warn('stats:filterbank:MissingInput '\
+'Missing filter index. Default value (idx_fb = 0) will be used.')
idx_fb = 0
elif (idx_fb < 0 or 9 < idx_fb):
raise ValueError('stats:filterbank:InvalidInput '\
+'The number of sub-bands must be 0 <= idx_fb <= 9.')
if (len(eeg.shape)==2):
num_chans = eeg.shape[0]
num_trials = 1
else:
num_chans, _, num_trials = eeg.shape
# Nyquist Frequency = Fs/2N
Nq = fs/2
passband = [6, 14, 22, 30, 38, 46, 54, 62, 70, 78]
stopband = [4, 10, 16, 24, 32, 40, 48, 56, 64, 72]
Wp = [passband[idx_fb]/Nq, 90/Nq]
Ws = [stopband[idx_fb]/Nq, 100/Nq]
[N, Wn] = scipy.signal.cheb1ord(Wp, Ws, 3, 40) # band pass filter StopBand=[Ws(1)~Ws(2)] PassBand=[Wp(1)~Wp(2)]
[B, A] = scipy.signal.cheby1(N, 0.5, Wn, 'bandpass') # Wn passband edge frequency
y = np.zeros(eeg.shape)
if (num_trials == 1):
for ch_i in range(num_chans):
#apply filter, zero phass filtering by applying a linear filter twice, once forward and once backwards.
# to match matlab result we need to change padding length
y[ch_i, :] = scipy.signal.filtfilt(B, A, eeg[ch_i, :], padtype = 'odd', padlen=3*(max(len(B),len(A))-1))
else:
for trial_i in range(num_trials):
for ch_i in range(num_chans):
y[ch_i, :, trial_i] = scipy.signal.filtfilt(B, A, eeg[ch_i, :, trial_i], padtype = 'odd', padlen=3*(max(len(B),len(A))-1))
return y
if __name__ == '__main__':
from scipy.io import loadmat
D = loadmat("sample.mat")
eeg = D['eeg']
eeg = eeg[:, :, (33):(33+125), :]
eeg = eeg[:,:,:,0] #first bank
eeg = eeg[0, :, :] #first target
y1 = filterbank(eeg, 250, 0)
y2 = filterbank(eeg, 250, 9)
y1_from_matlab = loadmat("y1_from_matlab.mat")['y1']
y2_from_matlab = loadmat("y2_from_matlab.mat")['y2']
dif1 = y1 - y1_from_matlab
dif2 = y2 - y2_from_matlab
print("Difference between matlab and python = ", np.sum(dif1))
print("Difference between matlab and python = ", np.sum(dif2))
| true
|
1150cb44e9d944266f0798860cb9cf4b39441857
|
Python
|
nilimapradipm/ReportComparison
|
/venv/Lib/site-packages/printline.py
|
UTF-8
| 441
| 3.578125
| 4
|
[] |
no_license
|
'''这个函数的作用是列表(包含N个嵌套)'''
#递归函数
def print_dg(the_list,indent=False,level=0,fh=sys.stdout):
#sys.stdout如果没有指定文件对象则会写至屏幕
for ea_it in the_list:
if isinstance(ea_it,list):
print_dg(ea_it,indent,level+1,fh)
else:
if indent:
for ta_st in range(level):
print("\t",end='',file=fh)
print(ea_it,file=fh)
#结束
| true
|
48d5719a5cfa54b9a101b0eff021ca4cf650c4d3
|
Python
|
hang0522/AlgorithmQIUZHAO
|
/Week_05/homework/231_isPowerOfTwo.py
|
UTF-8
| 690
| 3.421875
| 3
|
[] |
no_license
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
#方法2
# 若 x 为 2 的幂,则它的二进制表示中只包含一个 1,则有 x & (-x) = x;
# 若x 不是2 的幂,则它的二进制中不止一个1,则有x &(-x) !=x
#时间复杂度:O(1)
#空间复杂度:O(1)
#if n==0:
# return False
#return n&(-n)==n
#方法1
#去除二进制中最右边的 1
#2 的幂二进制表示只含有一个 1
#x & (x - 1) 操作会将 2 的幂设置为 0,因此判断是否为 2 的幂是:判断 x & (x - 1) == 0
if n==0:
return False
return n&(n-1)==0
| true
|
a11504790a384f4317ed01099f7d7cb1e61e2ca6
|
Python
|
labbealexandre/dna-algorithms
|
/src/sparseToBND2.py
|
UTF-8
| 2,635
| 2.734375
| 3
|
[] |
no_license
|
import numpy as np
import networkx as nx
from src import evaluate as ev
from src import utils as ut
from src import melhorn as ml
def replaceInitialEdges(G, M, subGraphs, avgDegree):
# By definition of the algorithm, a node can help at most
# avgDegree / 2
n = len(M)
limit = int(avgDegree/2)+1 # this is not usefull in this first naive implementation
balance = np.zeros(n)
rerouters = []
edges = list(G.edges)
for edge in edges:
# We need to find a helper node
# We search for the node which as the less helped yet
# It can be one of the ends of the edge
helper = np.argmin(balance)
rerouters.append(helper)
if helper != edge[1]:
weight = M[helper, edge[1]] + M[edge[0], edge[1]]
subGraphs[edge[1]].add_edge(helper, edge[1], weight=weight)
if helper != edge[0]:
weight = M[edge[0], helper] + M[edge[0], edge[1]]
subGraphs[edge[0]].add_edge(edge[0], helper, weight=weight)
balance[helper] += 1
return [edges, rerouters]
def addBinaryTrees(subGraphs, resMarices):
finalSubGraphs = []
for i in range(len(subGraphs)):
subGraph = subGraphs[i]
M = nx.to_numpy_matrix(subGraph)
_M = np.copy(M)
outChildren = ut.arrayToDictArray(_M[i,:])
rows = np.where(outChildren[:,1] > 0)
outChildren = outChildren[rows]
inChildren = ut.arrayToDictArray(_M[:,i])
rows = np.where(inChildren[:,1] > 0)
inChildren = inChildren[rows]
ml.melhornTree(outChildren, i, resMarices[i], direction=ut.Direction.OUTGOING)
ml.melhornTree(inChildren, i, resMarices[i], direction=ut.Direction.INCOMING)
finalSubGraph = nx.from_numpy_matrix(resMarices[i])
finalSubGraphs.append(finalSubGraph)
return finalSubGraphs
def sparseToBND2(G):
"""
New algorithm for sparse graphs
"""
M = nx.to_numpy_matrix(G)
n = len(M)
avgDegree = ev.getAverageDegree(G)
# N will be the result unweighted graph
N = np.zeros((n, n))
# For each node we create a auxilliary graph which will be completed
# with the new edges which replace the initial ones
subGraphs = [nx.empty_graph(n, create_using=nx.DiGraph) for i in range(n)]
res = replaceInitialEdges(G, M, subGraphs, avgDegree)
edges, rerouters = res[0], res[1]
# Now for each subgraph, we create at most two melhorn trees
resMatrices = [np.zeros((n,n)) for i in range(n)]
finalSubGraphs = addBinaryTrees(subGraphs, resMatrices)
# We now compute the union of these subgraphs
resGraphs = []
for matrix in resMatrices:
resGraphs.append(nx.from_numpy_matrix(matrix))
res = nx.compose_all(resGraphs)
return [res, edges, rerouters, finalSubGraphs]
| true
|
7ad86c5de738b7ec21bce484cb7fe45516892740
|
Python
|
shapiromatron/hawc
|
/hawc/apps/common/templatetags/url_replace.py
|
UTF-8
| 745
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def url_replace(context, *args, **kwargs):
"""
Add new parameters to a get URL, or removes if None.
Example usage:
<a href="?{% url_replace page=paginator.next_page_number %}">
Source: http://stackoverflow.com/questions/2047622/
"""
dict_ = context["request"].GET.copy()
def handle_replace(dict_, key, value):
dict_[key] = value
if value is None:
dict_.pop(key)
for arg in args:
for key, value in arg.items():
handle_replace(dict_, key, value)
for key, value in kwargs.items():
handle_replace(dict_, key, value)
return dict_.urlencode()
| true
|
63e8ddfeacbc2d6e2d678a42b0fcc2a736f9d3c5
|
Python
|
CAECOMP/provas
|
/S08 - Padrões de projeto/interpreter/principal.py
|
UTF-8
| 744
| 3.796875
| 4
|
[] |
no_license
|
from operador import Operador
from numero import Numero
from soma import Somar
from subtracao import Subtrair
from multiplicacao import Multiplicar
from divisao import Dividir
if __name__ == '__main__':
somar: Operador = Somar(Numero(1), Numero(4))
# 1 + 4 = 5
print(f"resultado da soma: {somar.interpretar()}")
# 5 - 2 = 3
subtrair: Operador = Subtrair(somar, Numero(2))
print(f"resultado da subtração: {subtrair.interpretar()}")
# 5 * 3 = 15
multiplicar: Operador = Multiplicar(subtrair, somar)
print(f"resultado da multiplicação: {multiplicar.interpretar()}")
# 30 / 15 = 2
dividir: Operador = Dividir(Numero(30), multiplicar)
print(f"resultado da divisão: {dividir.interpretar()}")
| true
|
da5bc97537218a4eddfb9d6e4a63065171c25744
|
Python
|
yongyuandelijian/mystudy
|
/lx20171110/com/lpc/sjk/czsjk20180531.py
|
UTF-8
| 3,400
| 3.359375
| 3
|
[] |
no_license
|
import pymysql
import datetime
# create tables
def createTable(tablename):
# get connect
connect=pymysql.connect(host="localhost",user="root",passwd="123456",db="test",port=3306)
# get cursor
cursor=connect.cursor()
# execute sql
cursor.execute("DROP TABLE IF EXISTS %s"%tablename) # 开始使用了,追加的方式,提示错误,更换成%形式传入参数,正常
# 使用预处理语句创建表
tablesql="""
create table %s
(
id INT(11),
col1 VARCHAR(50),
col2 VARCHAR(300),
col3 VARCHAR(500),
bak1 VARCHAR(20)
)
"""%tablename # 要执行的sql字符串
# execute sql
try:
cursor.execute(tablesql)
print("create table success")
except Exception as e:
print("create table failed",e)
finally:
connect.close()
# insert
def insertDate(data):
connect=pymysql.connect(host="localhost",user="root",passwd="123456",port=3306,db="test")
cursor=connect.cursor()
insertSQL="INSERT INTO pylx_20180531 (id,col1,col2,col3) VALUES(%d,'%s','%s','%s')"%(data[0],data[1],data[2],data[3])
try:
cursor.execute(insertSQL)
connect.commit()
print("insert success")
except Exception as e:
connect.rollback()
print("insert failed:",e)
finally:
connect.close()
# query
def queryData():
connect=pymysql.connect(host="localhost",user="root",passwd="123456",port=3306,db="test")
cursor=connect.cursor()
querySQL="SELECT * FROM pylx_20180531"
try:
cursor.execute(querySQL)
result = cursor.fetchall() # 返回一个全部行的结果集
except Exception as e:
print("发现了一个错误",e)
finally:
connect.close()
return result
# update
def updateData(id):
connect=pymysql.connect(host="localhost",port=3306,user="root",passwd="123456",db="test")
cursor=connect.cursor()
updateSQl="UPDATE pylx_20180531 SET bak1='bakcol' WHERE id=%d"%id
try:
cursor.execute(updateSQl)
connect.commit()
print("update success")
except Exception as e:
connect.rollback()
print("update failed",e)
finally:
connect.close()
# delete
def deleteData(id):
connect=pymysql.connect(host="localhost",port=3306,user="root",passwd="123456",db="test")
cursor=connect.cursor()
deleteSQL = "DELETE FROM pylx_20180531 WHERE id=%d"%id
try:
cursor.execute(deleteSQL)
connect.commit()
print("delete success,影响的行数是",cursor.rowcount)
except Exception as e:
connect.rollback()
print("delete failed",e)
finally:
connect.close()
def main():
# createTable('pylx_20180531') 创建表
# 插入数据
# data=(1,'xiaoding',"23",datetime.datetime.now().strftime("%Y-%m-%d"))
# if type(data).__name__!="tuple" or len(data)<4:
# print("对不起,传入的数据有误,请重新传入")
# else:
# insertDate(data)
# # 查询数据
# result=queryData()
# for row in result:
# for i in row:
# print("当前取出的元素是》》》",i)
# 修改
# updateData(1)
# 删除
deleteData(1)
if __name__ == '__main__':
main()
| true
|
8b413c3ef0f9bef812e5f73abec7af4e7778609c
|
Python
|
westgate458/LeetCode
|
/P0096.py
|
UTF-8
| 1,484
| 3.3125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 21:39:22 2019
@author: Tianqi Guo
"""
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
# def construct(begin, end):
# if (begin, end) in self.dict:
# return self.dict[(begin, end)]
# elif begin >= end:
# return 1
# else:
# tree_nums = 0
# for mid in range(begin,end+1):
# tree_nums = tree_nums + construct(begin, mid-1) * construct(mid+1, end)
# self.dict[(begin, end)] = tree_nums
# return tree_nums
#
# if n == 0:
# return 0
# else:
# self.dict = {}
# return construct(1,n)
# f[n]: number of BST for n
f = [0] * (n+1)
# one BST for n = 0
f[0] = 1
# f[n] is constructed from all previous f[0..n-1]
for num in range(1,n+1):
# try all numbers as the mid number
for mid in range(1,num+1):
# add the number of all combinations formed by one BST from left and one BST from right
# to number of BST for current n
f[num] += f[mid-1] * f[num-mid]
# the number of BSTs for current n
return f[n]
n = 9
test = Solution()
print test.numTrees(n)
| true
|
40161165093dda1150422fc6e1e878fe51a7a19c
|
Python
|
JointEntropy/author_identification2
|
/extra_notebooks/logreg.py
|
UTF-8
| 1,775
| 2.9375
| 3
|
[] |
no_license
|
"""
Ссылки по теме:
- [Работа с текстом в scikit](http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html)
"""
# appendix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
corpus = data.reset_index(drop=True)
X, y = corpus['text'],corpus['author']
# encode classes
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
# do train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
# sanity check for stratified split
plt.subplots(1,2,figsize=(15,5))
plt.subplot(1,2,1)
sns.countplot(y_train);
plt.subplot(1,2,2)
sns.countplot(y_test);
%%time
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
vectorizer = TfidfVectorizer(max_features=10**5)
vectorizer.fit(X_train)
X_train, X_test = vectorizer.transform(X_train), vectorizer.transform(X_test)
%%time
from sklearn.linear_model import LogisticRegression, RidgeClassifier, Lasso
lr = LogisticRegression()
#lr.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
lr_params = {
'C': np.logspace(-1,1, 3),#np.logspace(-4,3, 8),
'penalty': ['l1','l2']
}
grid = GridSearchCV(lr, lr_params, verbose=2, n_jobs=-1)#, random_state=8)
grid.fit(X_train, y_train)
grid.best_score_, grid.best_params_
from sklearn.metrics import accuracy_score
grid.score(X_test,y_test)
import pickle as pkl
with open('data/logtest90.pkl','wb') as f:
pkl.dump(grid, f)
with open('data/labelencoder.pkl','wb') as f:
pkl.dump(le, f)
with open('data/tfidf.pkl','wb') as f:
pkl.dump(vectorizer, f)
with open('data/logtest90.pkl', 'rb') as f:
classifier = pkl.load(f)
classifier.score(X_test, y_test)
le.inverse_transform(2)
| true
|
ee9055a3ee8153697794ae42924bdaea9f2fd30b
|
Python
|
PFTL/LMU_Group_2
|
/Model/IV_Measurement.py
|
UTF-8
| 1,667
| 2.625
| 3
|
[] |
no_license
|
import numpy as np
import yaml
from Model.analog_daq import AnalogDaq
import pint
ur = pint.UnitRegistry()
class Experiment:
def __init__(self):
self.scan_running = False
def load_config(self, filename):
with open(filename, 'r') as f:
self.params = yaml.load(f)
def load_daq(self):
port = self.params['DAQ']['port']
resistance = self.params['DAQ']['resistance']
self.daq = AnalogDaq(port, resistance)
self.daq.initialize()
def do_scan(self):
self.scan_running = True
start = ur(self.params['Scan']['start'])
stop = ur(self.params['Scan']['stop'])
step = ur(self.params['Scan']['step'])
self.voltages = np.arange(start.m_as('V'), stop.m_as('V')+step.m_as('V'), step.m_as('V'))
self.currents = np.zeros((len(self.voltages)))
print(self.voltages)
channel_out = self.params['Scan']['channel_out']
channel_in = self.params['Scan']['channel_in']
self.stop_scan = False
for i in range(len(self.voltages)):
volt = self.voltages[i]
self.daq.set_voltage(channel_out, volt*ur('V'))
self.currents[i] = self.daq.read_current(channel_in).m_as('A')
if self.stop_scan:
break
self.scan_running = False
def save_data(self, filename):
np.savetxt(filename, [self.voltages, self.currents])
def save_metadata(self, filename):
with open(filename, 'w') as f:
yaml.dump(self.params, f)
def finish(self):
pass
if __name__ == "__main__":
exp = Experiment()
exp.load_config('config.txt')
| true
|
60baacc2910e2389ae3e99638fa455b40172615b
|
Python
|
jmacdotorg/volity
|
/server-python/zymb/jabber/keepalive.py
|
UTF-8
| 7,480
| 2.796875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
import service
import interface
# Namespace for the <iq> packets used for keepalive queries. This can be
# anything, because keepalive queries are sent only to oneself.
NS_ZYMB_KEEPALIVE = 'zymb:keepalive'
# If the keepalive period is 60 seconds, we wake up every 15 seconds
# to check for activity. 60/4 is 15.
DIVISION = 4
class KeepAliveService(service.Service):
"""KeepAliveService: A high-level Jabber facility for tickling the
Jabber socket every so often.
(Service label: 'keepaliveservice'.)
On some machines, because of the peculiarities of their network
configurations or firewall, a TCP connection that stands for a long
time without activity will choke and die. That's a sad thing.
Most machines don't have this problem. Furthermore, some Jabber
servers -- notably jabber.org -- push a whitespace character down
the Jabber socket once per minute. That prevents the problem even if
your machine has it.
However, if your machine does, and your Jabber server doesn't,
then your Jabber client has to do something about it. This service
does something. Once per 90 seconds, it sends a simple Jabber
query to itself. The contents of the query are ignored, and there
is no reply; it's just socket activity.
The service is clever: if there is actual Jabber traffic coming in,
it doesn't bother sending keepalive queries. More precisely, it
guarantees that no more than 90 seconds will pass without *some*
socket activity.
KeepAliveService(interval=90, panic=False) -- constructor.
The default *interval* of 90 seconds was chosen because it works
(on the machine I use which requires keepalive activity). Also,
it's longer than 60 seconds. So if you use this service on a
connection to jabber.org, this service will always see the once-
per-minute whitespace from jabber.org, and will never need to wake
up.
If *panic* is True, the service will actually kill the Jabber agent
if interval*2 seconds go by without an incoming Jabber message.
(This would imply that the keepalive query was sent, but never
delivered.)
In addition to creating and attaching the service, you must start it
up. Any time after the Jabber agent reaches the 'authresource' state
(i.e., when it's connected and ready to do Jabber work) you must
call the service's start() method. For example, you might use code
like this:
serv = KeepAliveService()
jstream.addservice(serv)
jstream.addhandler('authresource', serv.start)
Public methods:
start() -- begin watching the connection and doing work.
stop() -- cease watching the connection and doing work.
getinterval() -- get the interval associated with the service.
setinterval() -- change the interval associated with the service.
Internal methods:
attach() -- attach this Service to a JabberStream.
activity() -- 'handle' event handler.
check() -- timer handler.
handleping() -- keepalive stanza handler.
"""
label = 'keepaliveservice'
logprefix = 'zymb.jabber.keepalive'
def __init__(self, interval=90, panic=False):
if (interval < 10):
raise ValueError('KeepAliveService interval must be 10 or higher')
service.Service.__init__(self)
self.interval = interval
self.panic = panic
self.counter = 0
self.heartbeat = None
self.action = None
def attach(self, agent):
"""attach() -- internal method to attach this Service to a
JabberStream. Do not call this directly. Instead, call
jstream.addservice(service).
This calls the inherited class method, and then sets up the
stanza dispatcher which catches incoming keepalive queries,
and the handler that watches for socket activity.
"""
service.Service.attach(self, agent)
self.agent.adddispatcher(self.handleping, name='iq', type='set')
self.agent.conn.addhandler('handle', self.activity)
def start(self):
"""start() -> None
Begin watching the connection and doing work. If the service is
already started, this does nothing.
"""
if (not self.action):
self.counter = 0
self.heartbeat = self.interval / DIVISION
self.action = self.agent.addtimer(self.check, delay=self.heartbeat)
self.log.debug('starting up, interval %d, heartbeat %d',
self.interval, self.heartbeat)
def stop(self):
"""stop() -> None
Cease watching the connection and doing work. If the service is
already stopped, this does nothing.
"""
self.counter = 0
if (self.action):
self.action.remove()
self.action = None
self.log.debug('shutting down')
def getinterval(self):
"""getinterval() -> int
Get the interval associated with the service.
"""
return self.interval
def setinterval(self, interval=90):
"""setinterval(interval=90) -> None
Change the interval associated with the service.
"""
if (interval < 10):
raise ValueError('KeepAliveService interval must be 10 or higher')
self.interval = interval
if (self.action):
self.stop()
self.start()
def activity(self, data):
"""activity(data) -- internal 'handle' event handler. Do not call.
This handler is attached to the socket agent which underlies the
Jabber stream. When it sees any activity (even a partial Jabber
message, or whitespace between Jabber stanzas), it resets the
keepalive timer.
"""
self.counter = 0
def check(self):
"""check() -- internal timer handler. Do not call.
This method is called every interval/4 seconds. (It is not put
into a periodic timer; it invokes a new timer for itself every
time it runs.) If it is called 4 times in a row with no network
activity, it fires off an <iq> query, from the Jabber agent to
itself. If it gets up to 8 times with no activity, and the panic
option is True, it shuts down the agent.
"""
if (not self.action):
return
self.action = self.agent.addtimer(self.check, delay=self.heartbeat)
self.counter += 1
if (self.counter >= DIVISION):
msg = interface.Node('iq',
attrs={'type':'set', 'to':self.agent.jid})
nod = msg.setchild('query', namespace=NS_ZYMB_KEEPALIVE)
self.agent.send(msg)
if (self.panic and self.counter >= DIVISION*2):
self.log.error('no activity in %d seconds -- shutting down',
self.heartbeat*DIVISION*2)
self.agent.stop()
def handleping(self, msg):
"""handleping() -- keepalive stanza handler. Do not call.
This dispatcher accepts the <iq> queries sent by the service.
It does nothing with them, since the only reason they exist is
to tickle the network socket.
"""
nod = msg.getchild('query')
if (not nod or nod.getnamespace() != NS_ZYMB_KEEPALIVE):
# Not addressed to us
return
raise interface.StanzaHandled
| true
|
c7707956ff013b128a9e2d0d1bea2200213c4e03
|
Python
|
ronaldoussoren/modulegraph2
|
/testsuite/test_virtual_environments.py
|
UTF-8
| 3,830
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
import contextlib
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
# - Create virtual environment (venv, virtualenv)
# - Install minimal stuff
# - Create graph with subprocess in the
# virtual environment
# - Verify graph structure, primarily
# check that stdlib nodes refer to stuff
# in the global installation.
# - Expectation is that a lot of code can
# be shared between tests.
if sys.platform == "win32":
BIN_DIR = "Scripts"
else:
BIN_DIR = "bin"
@contextlib.contextmanager
def temporary_directory():
dirname = tempfile.mkdtemp()
try:
yield os.path.realpath(dirname)
finally:
shutil.rmtree(dirname)
def create_virtualenv(environment_module, workdir, name):
if environment_module == "venv" and hasattr(sys, "real_prefix"):
# For some reason venv doesn't install pip when run
# from a virtualenv environment. Explicitly launch
# global version
subprocess.check_call(
[
os.path.join(sys.real_prefix, BIN_DIR, "python3"),
"-m",
environment_module,
name,
],
cwd=workdir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
else:
subprocess.check_call(
[sys.executable, "-m", environment_module, name],
cwd=workdir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
venv_dir = os.path.join(workdir, name)
subprocess.check_call(
[
os.path.join(venv_dir, BIN_DIR, "python"),
"-mpip",
"install",
"-qqq",
"objectgraph",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if sys.version_info[:2] < (3, 7):
subprocess.check_call(
[
os.path.join(venv_dir, BIN_DIR, "python"),
"-mpip",
"install",
"-qqq",
"dataclasses",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return venv_dir
def run_scriptlet(venv_dir):
output = subprocess.check_output(
[
os.path.join(venv_dir, BIN_DIR, "python"),
"-c",
"import modulegraph2; mg = modulegraph2.ModuleGraph(); mg.add_module('pip'); mg.add_module('distutils'); mg.add_module('distutils.command.bdist'); mg.report()", # noqa: B950
],
stderr=subprocess.DEVNULL,
)
lines = output.decode("utf-8").splitlines()
assert lines[2].startswith("-----")
for ln in lines[3:]:
yield ln.split(None, 2)[-1]
class TestVirtualEnv(unittest.TestCase):
# virtualenv from PyPI
environment_module = "virtualenv"
def test_graph_in_virtual_env(self):
with temporary_directory() as tmpdir:
venv_dir = create_virtualenv(self.environment_module, tmpdir, "environ")
for module_path in run_scriptlet(venv_dir):
with self.subTest(module_path):
# Stdlib should be outside the virtualenv, other modules should
# be inside
if "site-packages" in module_path:
self.assertTrue(
module_path.startswith(tmpdir),
f"{module_path!r} not in virtual environment {tmpdir!r}",
)
else:
self.assertFalse(
module_path.startswith(tmpdir),
f"{module_path!r} in virtual environment {tmpdir!r}",
)
class TestVenv(TestVirtualEnv):
# venv from the stdlib
environment_module = "venv"
| true
|
0e684995994a3c14c7904dda48ec9df400ea480e
|
Python
|
jim0409/PythonLearning-DataStructureLearning
|
/Algorithm_Learning/chapter07_tree_algorithm/practice/funTree.py
|
UTF-8
| 1,138
| 4.15625
| 4
|
[] |
no_license
|
def Btree_create(tree_deep, data):
btree = [0]*pow(2, tree_deep) # 因為deep是4,所以產生2^4個0在一維array作為btree,內容:[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(1, len(data)): # 從data的第一個值開始讀取到最後一個值
level = 1 # 設定一個level常數,控制deep的層級的初始化
while btree[level] is not 0: # 當該層級的btree不是0的時候執行以下;0則直接賦予該位置值 ==> btree[level]= data[i]
if data[i] > btree[level]: # 1.判斷data對應值是否大於該內容的值
level = level*2+1 # 2.是的話就將該值放在右邊(level*2+1),否則放在左邊(level)
else:
level = level*2
btree[level] = data[i] # 賦予該btree[level]值
return btree
def show_tree(tree, tree_deep):
for i in range(0, tree_deep):
for j in range(pow(2, i), pow(2, i+1)):
print('[{}]'.format(tree[j]), end='')
print()
data = [0, 9, 3, 4, 5]
tree = Btree_create(4, data)
show_tree(tree, 4)
| true
|
237a170ab52673946ed8b8411f7656b1f21d0256
|
Python
|
RIMEL-UCA/RIMEL-UCA.github.io
|
/chapters/2023/Qualité logicielle dans les notebooks Jupyter/assets/python-scripts/BERT-Squad.py
|
UTF-8
| 5,238
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# # Running BERT-Squad model
# **This tutorial shows how to run the BERT-Squad model on Onnxruntime.**
#
# To see how the BERT-Squad model was converted from tensorflow to onnx look at [BERTtutorial.ipynb](https://github.com/onnx/tensorflow-onnx/blob/master/tutorials/BertTutorial.ipynb)
# # Step 1 - Write the input file that includes the context paragraph and the questions for the model to answer.
# In[6]:
%%writefile inputs.json
{
"version": "1.4",
"data": [
{
"paragraphs": [
{
"context": "In its early years, the new convention center failed to meet attendance and revenue expectations.[12] By 2002, many Silicon Valley businesses were choosing the much larger Moscone Center in San Francisco over the San Jose Convention Center due to the latter's limited space. A ballot measure to finance an expansion via a hotel tax failed to reach the required two-thirds majority to pass. In June 2005, Team San Jose built the South Hall, a $6.77 million, blue and white tent, adding 80,000 square feet (7,400 m2) of exhibit space",
"qas": [
{
"question": "where is the businesses choosing to go?",
"id": "1"
},
{
"question": "how may votes did the ballot measure need?",
"id": "2"
},
{
"question": "By what year many Silicon Valley businesses were choosing the Moscone Center?",
"id": "3"
}
]
}
],
"title": "Conference Center"
}
]
}
# # Step 2 - Download the uncased file
# In[ ]:
!wget -q https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip
!unzip uncased_L-12_H-768_A-12.zip
# # Step 3 - Preprocessing
# Extract parameters from the given input and convert it into features.
# In[7]:
import numpy as np
import onnxruntime as ort
import tokenization
import os
from run_onnx_squad import *
import json
input_file = 'inputs.json'
with open(input_file) as json_file:
test_data = json.load(json_file)
print(json.dumps(test_data, indent=2))
# preprocess input
predict_file = 'inputs.json'
# Use read_squad_examples method from run_onnx_squad to read the input file
eval_examples = read_squad_examples(input_file=predict_file)
max_seq_length = 256
doc_stride = 128
max_query_length = 64
batch_size = 1
n_best_size = 20
max_answer_length = 30
vocab_file = os.path.join('uncased_L-12_H-768_A-12', 'vocab.txt')
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=True)
my_list = []
# Use convert_examples_to_features method from run_onnx_squad to get parameters from the input
input_ids, input_mask, segment_ids, extra_data = convert_examples_to_features(eval_examples, tokenizer,
max_seq_length, doc_stride, max_query_length)
# # Step 4 - Run the ONNX model under onnxruntime
# Create an onnx inference session and run the model
# In[8]:
# run inference
# Start from ORT 1.10, ORT requires explicitly setting the providers parameter if you want to use execution providers
# other than the default CPU provider (as opposed to the previous behavior of providers getting set/registered by default
# based on the build flags) when instantiating InferenceSession.
# For example, if NVIDIA GPU is available and ORT Python package is built with CUDA, then call API as following:
# ort.InferenceSession(path/to/model, providers=['CUDAExecutionProvider'])
session = ort.InferenceSession('bert.onnx')
for input_meta in session.get_inputs():
print(input_meta)
n = len(input_ids)
bs = batch_size
all_results = []
start = timer()
for idx in range(0, n):
item = eval_examples[idx]
# this is using batch_size=1
# feed the input data as int64
data = {"unique_ids_raw_output___9:0": np.array([item.qas_id], dtype=np.int64),
"input_ids:0": input_ids[idx:idx+bs],
"input_mask:0": input_mask[idx:idx+bs],
"segment_ids:0": segment_ids[idx:idx+bs]}
result = session.run(["unique_ids:0","unstack:0", "unstack:1"], data)
in_batch = result[1].shape[0]
start_logits = [float(x) for x in result[1][0].flat]
end_logits = [float(x) for x in result[2][0].flat]
for i in range(0, in_batch):
unique_id = len(all_results)
all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits))
# # Step 5 - Postprocessing
# Write the predictions (answers to the input questions) in a file
# In[9]:
# postprocessing
output_dir = 'predictions'
os.makedirs(output_dir, exist_ok=True)
output_prediction_file = os.path.join(output_dir, "predictions.json")
output_nbest_file = os.path.join(output_dir, "nbest_predictions.json")
write_predictions(eval_examples, extra_data, all_results,
n_best_size, max_answer_length,
True, output_prediction_file, output_nbest_file)
# Print the results
# In[10]:
# print results
import json
with open(output_prediction_file) as json_file:
test_data = json.load(json_file)
print(json.dumps(test_data, indent=2))
| true
|
0ac6e3b85af9beeeb7e32d80d30d79c5ecf4db83
|
Python
|
irhadSaric/Instrukcije
|
/pok4.2.py
|
UTF-8
| 268
| 3.59375
| 4
|
[] |
no_license
|
def funkcija():
n = input("Unesi br: ") # n string
nalaziSe = False
while n:
if int(n) == 2:
nalaziSe = True
n = input("Unesi broj: ")
return nalaziSe
if funkcija():
print("Nalazi se")
else:
print("Ne nalazi se")
| true
|
1f43f194bb31c62aee8ef208a1ab1f8e40a3a549
|
Python
|
johansten/cryptopals
|
/set 2/16.py
|
UTF-8
| 1,252
| 3.078125
| 3
|
[] |
no_license
|
import cryptopals
class Api(object):
def __init__(self):
self.key = cryptopals.get_random_key(16)
self.iv = cryptopals.get_random_key(16)
def encrypt_string(self, s):
raw = ("comment1=cooking%20MCs;userdata=" +
s + ";comment2=%20like%20a%20pound%20of%20bacon")
raw = cryptopals.pkcs7_pad(raw, 16)
c = cryptopals.encrypt_cbc(raw, self.key, self.iv)
return c
def is_admin(self, s):
p = cryptopals.decrypt_cbc(s, self.key, self.iv)
return ';admin=true;' in p
#
# Isolate ";admin-true" in it's own block, w/ a whole block of padding in front of it.
#
# Change '-' to '=' by flipping a bit in the cipher output.
# This will scramble the previous block, but that's padding anyway.
# Just need to check that none of the scrambled data messes up the parsing later on
#
userdata = "----------------;admin-true"
raw = ("comment1=cooking%20MCs;userdata=" +
userdata + ";comment2=%20like%20a%20pound%20of%20bacon")
print list(cryptopals.chunks(raw, 16))
#
api = Api()
cipher_text = api.encrypt_string(userdata)
# the position of the byte we need to modify
pos = 2 * 16 + 6
modification = chr(ord(cipher_text[pos]) ^ 16)
cipher_text = cipher_text[:pos] + modification + cipher_text[pos+1:]
print api.is_admin(cipher_text)
| true
|
6806049983fb9fe8b93121034c9c9a66d4b3b093
|
Python
|
Schulich-Ignite/spark
|
/spark/util/Errors.py
|
UTF-8
| 3,606
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
class ArgumentError(Exception):
def __init__(self, message=""):
self.message = message
super().__init__(self.message)
class ArgumentTypeError(ArgumentError):
def __init__(self, func_name, argument_name, allowed_types, actual_type, arg):
if type(argument_name) == str and len(argument_name) > 0:
argname = "{} to be of type".format(argument_name)
else:
argname = "type"
type_str = ""
if type(allowed_types) == list:
allowed_type_names = [t.__name__ for t in allowed_types]
if len(allowed_type_names) == 1:
type_str += allowed_type_names[0]
elif len(allowed_type_names) == 2:
type_str += "{} or {}".format(*allowed_type_names)
else:
type_str = ", ".join([str(t) for t in allowed_type_names[:-1]])
type_str += ", or {}".format(allowed_type_names[-1])
elif type(allowed_types) == type:
type_str = allowed_types.__name__
else:
type_str = str(allowed_types)
self.message = "{} expected {} {}, got {} of type {}".format(
func_name, argname, type_str, arg, actual_type.__name__)
super().__init__(self.message)
class ArgumentTypeListError(ArgumentError):
def __init__(self, func_name, valid_fmts, actual_fmt, actual_vals=None):
arg_plural = "argument"
if len(actual_fmt) > 1:
arg_plural += "s"
s = "Invalid types for {} with {} {}, expected".format(func_name, len(actual_fmt), arg_plural)
if len(valid_fmts) >= 1:
if len(valid_fmts) > 1:
s += " one of"
s += " \n"
s += "".join(["\t{}({})\n".format(func_name, ", ".join([t.__name__ for t in fmt])) for fmt in valid_fmts])
else:
s += "{}()\n".format(func_name)
s += "received {}(".format(func_name)
if actual_vals is not None and len(actual_vals) == len(actual_fmt):
s += ", ".join(["{}: {}".format(arg, t.__name__) for arg, t in zip(actual_vals, actual_fmt)])
else:
s += ", ".join([t.__name__ for t in actual_fmt])
s += ")"
self.message = s
super().__init__(self.message)
class ArgumentNumError(ArgumentError):
def __init__(self, func_name, allowed_nums, actual_num):
num_str = ""
if type(allowed_nums) == list:
if len(allowed_nums) == 1:
num_str += str(allowed_nums[0])
elif len(allowed_nums) == 2:
num_str += "{} or {}".format(*allowed_nums)
else:
num_str = ", ".join([str(n) for n in allowed_nums[:-1]])
num_str += ", or {}".format(allowed_nums[-1])
else:
num_str = str(allowed_nums)
self.message = "{} expected {} arguments, got {}".format(
func_name,
num_str,
actual_num
)
super().__init__(self.message)
class ArgumentConditionError(ArgumentError):
def __init__(self, func_name, arg_name, expected_condition, actual_value):
if type(arg_name) == str and len(arg_name) > 0:
argname = "{}".format(arg_name)
else:
argname = "argument"
self.message = "{} expected {} to match \"{}\", got {}".format(
func_name,
argname,
expected_condition,
actual_value
)
super().__init__(self.message)
pass
| true
|
fc8842e810a57ce3a175b3fadef2d9e000a0eba3
|
Python
|
nh273/caro-ai
|
/lib/test_mcts.py
|
UTF-8
| 1,431
| 3.4375
| 3
|
[] |
no_license
|
import pytest
from unittest.mock import MagicMock, patch
from lib.mcts import MCTS
@pytest.fixture
def tree():
"""Let's mock a game with 2 possible actions: 0 & 1
Let's construct a tree that started from state 1,
took action 1 once and that led to state 2,
then from state 2 take action 0 that led to state 3
"""
mock_game = MagicMock()
tree = MCTS(mock_game)
tree.visit_count = {1: [0, 1], 2: [1, 0], 3: [0, 0]}
tree.value = {1: [0.0, 0.5], 2: [0.6, 0.0], 3: [0.0, 0.0]}
tree.value_avg = {1: [0.0, 0.5], 2: [0.6, 0.0], 3: [0.0, 0.0]}
# Remember prior probabilities of actions at each state sum to 1
# State 3 has not been visited so everything is 0 except prior probs
# queried from neural network
tree.probs = {1: [0.1, 0.9], 2: [0.8, 0.2], 3: [0.7, 0.3]}
return tree
class TestBackup:
def test_back_up(self, tree):
value = 0.2
states = [1, 2, 3]
# Let's say we take the same actions again (1 --1--> 2 --0--> 3)
# then 0
actions = [1, 0, 0]
tree._backup(value, states, actions)
assert tree.visit_count == {1: [0, 2], 2: [2, 0], 3: [1, 0]}
# Remember to flip the sign of value at each turn
assert tree.value == {1: [0.0, 0.3], 2: [0.8, 0.0], 3: [-0.2, 0.0]}
# Mean value over visit_count
assert tree.value_avg == {
1: [0.0, 0.15], 2: [0.4, 0.0], 3: [-0.2, 0.0]}
| true
|