max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
pennylane/transforms/hamiltonian_expand.py
|
rmoyard/pennylane
| 0
|
12776651
|
<reponame>rmoyard/pennylane
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the hamiltonian expand tape transform
"""
# pylint: disable=protected-access
import pennylane as qml
def hamiltonian_expand(tape, group=True):
r"""
Splits a tape measuring a Hamiltonian expectation into mutliple tapes of Pauli expectations,
and provides a function to recombine the results.
Args:
tape (.QuantumTape): the tape used when calculating the expectation value
of the Hamiltonian
group (bool): whether to compute groups of non-commuting Pauli observables, leading to fewer tapes
Returns:
tuple[list[.QuantumTape], function]: Returns a tuple containing a list of
quantum tapes to be evaluated, and a function to be applied to these
tape executions to compute the expectation value.
**Example**
Given a Hamiltonian,
.. code-block:: python3
H = qml.PauliY(2) @ qml.PauliZ(1) + 0.5 * qml.PauliZ(2) + qml.PauliZ(1)
and a tape of the form,
.. code-block:: python3
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
We can use the ``hamiltonian_expand`` transform to generate new tapes and a classical
post-processing function for computing the expectation value of the Hamiltonian.
>>> tapes, fn = qml.transforms.hamiltonian_expand(tape)
We can evaluate these tapes on a device:
>>> dev = qml.device("default.qubit", wires=3)
>>> res = dev.batch_execute(tapes)
Applying the processing function results in the expectation value of the Hamiltonian:
>>> fn(res)
-0.5
.. Warning::
Note that defining Hamiltonians inside of QNodes using arithmetic can lead to errors.
See :class:`~pennylane.Hamiltonian` for more information.
The ``group`` keyword argument toggles between the creation of one tape per Pauli observable, or
one tape per group of non-commuting Pauli observables computed by the :func:`.measurement_grouping`
transform:
.. code-block:: python3
H = qml.Hamiltonian([1., 2., 3.], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
# split H into observable groups [qml.PauliZ(0)] and [qml.PauliX(1), qml.PauliX(0)]
tapes, fn = qml.transforms.hamiltonian_expand(tape)
print(len(tapes)) # 2
# split H into observables [qml.PauliZ(0)], [qml.PauliX(1)] and [qml.PauliX(0)]
tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)
print(len(tapes)) # 3
"""
hamiltonian = tape.measurements[0].obs
if not isinstance(hamiltonian, qml.Hamiltonian) or len(tape.measurements) > 1:
raise ValueError(
"Passed tape must end in `qml.expval(H)`, where H is of type `qml.Hamiltonian`"
)
if group:
hamiltonian.simplify()
return qml.transforms.measurement_grouping(tape, hamiltonian.ops, hamiltonian.coeffs)
# create tapes that measure the Pauli-words in the Hamiltonian
tapes = []
for ob in hamiltonian.ops:
new_tape = tape.copy()
new_tape._measurements = [
qml.measure.MeasurementProcess(return_type=qml.operation.Expectation, obs=ob)
]
tapes.append(new_tape)
# create processing function that performs linear recombination
def processing_fn(res):
dot_products = [
qml.math.dot(qml.math.squeeze(res[i]), hamiltonian.coeffs[i]) for i in range(len(res))
]
return qml.math.sum(qml.math.stack(dot_products), axis=0)
return tapes, processing_fn
| 2.46875
| 2
|
Tests/Environments/Connect4/test_createMirroredStateAndPolicy.py
|
ikaroszhang96/Convex-AlphaZero
| 0
|
12776652
|
from Main.Environments.Connect4 import Constants, Utils
from Tests.Environments.Connect4 import testCasesRawEvaluate
from unittest import TestCase
import numpy as np
class TestCreateMirroredStateAndPolicy(TestCase):
def testMirrorState(self):
AMOUNT_OF_TESTS_PER_CASE = 10
for case in testCasesRawEvaluate.TEST_CASES:
board = np.array(case[0])
for p in [-1, 1]:
convState = Utils.state2ConvState(board, p)
convStates = [convState for i in range(AMOUNT_OF_TESTS_PER_CASE)]
randomPolices = [np.random.random(7) for i in range(AMOUNT_OF_TESTS_PER_CASE)]
mirrorStates, mirrorPolices = Utils.createMirroredStateAndPolicy(convStates, randomPolices)
reMirrorStates, reMirrorPolices = Utils.createMirroredStateAndPolicy(mirrorStates, mirrorPolices)
for i in range(len(randomPolices)):
assert np.array_equal(randomPolices[i], reMirrorPolices[i])
for m in reMirrorStates:
assert np.array_equal(convState, m)
| 2.390625
| 2
|
Server/Sock_Conn.py
|
vinaysb/DroidStreamDeck
| 1
|
12776653
|
from PyQt5.QtCore import QThread, pyqtSignal
import settings
import socket
import Hotkey_Press
class Sock_Conn(QThread):
closeDiag = pyqtSignal()
def __init__(self):
QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((settings.ip, settings.port))
s.listen(5)
while settings.conn_stat:
c, addr = s.accept()
code_encoded = c.recv(1024)
code_decoded = code_encoded.decode('utf-8')
if (code_decoded == "Conn"):
settings.socket_flag = 1
self.closeDiag.emit()
else:
Hotkey_Press.Hotkey(code_decoded)
c.close()
s.close()
| 2.5625
| 3
|
catalog/urls.py
|
edwildson/djangosecommerce
| 1
|
12776654
|
<reponame>edwildson/djangosecommerce
from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.product_list, name='product_list'),
url(r'^(?P<slug>[\w_-]+)$', views.category, name='category'),
url(r'^produto/(?P<slug>[\w_-]+)$', views.product, name='product'),
]
| 1.789063
| 2
|
graph4nlp/pytorch/test/seq_decoder/graph2seq/src/g2s_v2/core/utils/constants.py
|
stjordanis/graph4nlp
| 18
|
12776655
|
"""
Module to handle universal/general constants used across files.
"""
################################################################################
# Constants #
################################################################################
# GENERAL CONSTANTS:
VERY_SMALL_NUMBER = 1e-31
INF = 1e20
_PAD_TOKEN = '#pad#'
# _PAD_TOKEN = '<P>'
_UNK_TOKEN = '<unk>'
_SOS_TOKEN = '<s>'
_EOS_TOKEN = '</s>'
# LOG FILES ##
_CONFIG_FILE = "config.json"
_SAVED_WEIGHTS_FILE = "params.saved"
_PREDICTION_FILE = "test_pred.txt"
_REFERENCE_FILE = "test_ref.txt"
| 1.875
| 2
|
tests/test_catch_server.py
|
kiwicom/pytest-catch-server
| 5
|
12776656
|
def test_catch_server__get(testdir):
testdir.makepyfile(
"""
import urllib.request
def test_get(catch_server):
url = "http://{cs.host}:{cs.port}/get_it".format(cs=catch_server)
request = urllib.request.Request(url, method="GET")
with urllib.request.urlopen(request) as response:
assert response.status == 200
assert response.read() == b"OK"
assert catch_server.requests == [
{"method": "GET", "path": "/get_it", "data": b""},
]
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(["*::test_get PASSED*"])
assert result.ret == 0
def test_catch_server__post(testdir):
testdir.makepyfile(
"""
import urllib.request
def test_post(catch_server):
url = "http://{cs.host}:{cs.port}/post_it".format(cs=catch_server)
request = urllib.request.Request(url, method="POST", data=b"something")
with urllib.request.urlopen(request) as response:
assert response.status == 200
assert response.read() == b"OK"
assert catch_server.requests == [
{"method": "POST", "path": "/post_it", "data": b"something"},
]
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(["*::test_post PASSED*"])
assert result.ret == 0
def test_catch_server__put(testdir):
testdir.makepyfile(
"""
import urllib.request
def test_put(catch_server):
url = "http://{cs.host}:{cs.port}/put_it".format(cs=catch_server)
request = urllib.request.Request(url, method="PUT", data=b"other data")
with urllib.request.urlopen(request) as response:
assert response.status == 200
assert response.read() == b"OK"
assert catch_server.requests == [
{"method": "PUT", "path": "/put_it", "data": b"other data"},
]
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(["*::test_put PASSED*"])
assert result.ret == 0
def test_catch_server__patch(testdir):
testdir.makepyfile(
"""
import urllib.request
def test_patch(catch_server):
url = "http://{cs.host}:{cs.port}/patch_it".format(cs=catch_server)
request = urllib.request.Request(url, method="PATCH", data=b'{"x": 42}')
with urllib.request.urlopen(request) as response:
assert response.status == 200
assert response.read() == b"OK"
assert catch_server.requests == [
{"method": "PATCH", "path": "/patch_it", "data": b'{"x": 42}'},
]
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(["*::test_patch PASSED*"])
assert result.ret == 0
def test_catch_server__delete(testdir):
testdir.makepyfile(
"""
import urllib.request
def test_delete(catch_server):
url = "http://{cs.host}:{cs.port}/delete_it".format(cs=catch_server)
request = urllib.request.Request(url, method="DELETE")
with urllib.request.urlopen(request) as response:
assert response.status == 200
assert response.read() == b"OK"
assert catch_server.requests == [
{"method": "DELETE", "path": "/delete_it", "data": b""},
]
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(["*::test_delete PASSED*"])
assert result.ret == 0
def test_catch_server__multiple_requests(testdir):
testdir.makepyfile(
"""
import urllib.request
def test_multiple_requests(catch_server):
for n, method in enumerate(["PUT", "POST", "PATCH"]):
url = "http://{cs.host}:{cs.port}/req_{n}".format(cs=catch_server, n=n)
data = "{} {}".format(n, method).encode("utf-8")
request = urllib.request.Request(url, method=method, data=data)
with urllib.request.urlopen(request) as response:
assert response.status == 200
assert response.read() == b"OK"
assert catch_server.requests == [
{"method": "PUT", "path": "/req_0", "data": b"0 PUT"},
{"method": "POST", "path": "/req_1", "data": b"1 POST"},
{"method": "PATCH", "path": "/req_2", "data": b"2 PATCH"},
]
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(["*::test_multiple_requests PASSED*"])
assert result.ret == 0
| 2.71875
| 3
|
tests/unit_tests/sql_parse_tests.py
|
mis-esta/superset
| 1
|
12776657
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
import sqlparse
from superset.sql_parse import ParsedQuery
def test_cte_with_comments_is_select():
"""
Some CTES with comments are not correctly identified as SELECTS.
"""
sql = ParsedQuery(
"""WITH blah AS
(SELECT * FROM core_dev.manager_team),
blah2 AS
(SELECT * FROM core_dev.manager_workspace)
SELECT * FROM blah
INNER JOIN blah2 ON blah2.team_id = blah.team_id"""
)
assert sql.is_select()
sql = ParsedQuery(
"""WITH blah AS
/*blahblahbalh*/
(SELECT * FROM core_dev.manager_team),
--blahblahbalh
blah2 AS
(SELECT * FROM core_dev.manager_workspace)
SELECT * FROM blah
INNER JOIN blah2 ON blah2.team_id = blah.team_id"""
)
assert sql.is_select()
def test_cte_is_select():
"""
Some CTEs are not correctly identified as SELECTS.
"""
# `AS(` gets parsed as a function
sql = ParsedQuery(
"""WITH foo AS(
SELECT
FLOOR(__time TO WEEK) AS "week",
name,
COUNT(DISTINCT user_id) AS "unique_users"
FROM "druid"."my_table"
GROUP BY 1,2
)
SELECT
f.week,
f.name,
f.unique_users
FROM foo f"""
)
assert sql.is_select()
def test_unknown_select():
"""
Test that `is_select` works when sqlparse fails to identify the type.
"""
sql = "WITH foo AS(SELECT 1) SELECT 1"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert ParsedQuery(sql).is_select()
sql = "WITH foo AS(SELECT 1) INSERT INTO my_table (a) VALUES (1)"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert not ParsedQuery(sql).is_select()
sql = "WITH foo AS(SELECT 1) DELETE FROM my_table"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert not ParsedQuery(sql).is_select()
| 1.960938
| 2
|
step0/changes/updateByLine.py
|
funderburkjim/boesp-prep
| 0
|
12776658
|
"""updateByLine.py Begun Apr 10, 2014
This program is intended to be rather general.
The 'changein' file consists of a sequence of line pairs:
nn old old-text
nn new new-text
nn is the line number (starting at 1) in the input vcp file.
'old' and 'new' are fixed.
old-text should be identical to the text of line nn in input vcp file.
new-text is the replacement for line nn, written to the output vcp file.
'changein' file should be utf-8 encoded.
Nov 16, 2014 comment line
May 30, 2017. Allow for 'ins' (insert) and 'del' (delete) in addition to 'new'
1234 old xyz
1234 ins uvw
1234 old xyz
1234 del
NOTE: This introduces complications regarding line numbers.
The interpretation is that
(a) the line number (1234) represents the line number in the INPUT file
(b) For 'ins', the inserted line ('uvw') is inserted AFTER this line
(c) For 'del', the text part is ignored (should typically be blank,
and there should be a space character after 'del': '1234 del '
Nov 27, 2018. Changed print X to print(X), for python3 compatibility.
"""
#
from __future__ import print_function
import re,sys
import codecs
class Change(object):
def __init__(self,n,oldline,newline):
self.n = n
m = re.search(r'^([0-9]+) old (.*)$',oldline)
m1 = re.search(r'^([0-9]+) (new|ins|del) (.*)$',newline)
if (not m) or (not m1):
print('Change error(1) @ line %s:' % n)
out= 'oldline=%s' % oldline
print(out.encode('utf-8'))
out= 'newline=%s' % newline
print(out.encode('utf-8'))
exit(1)
self.chgcode = m1.group(2)
nold = m.group(1)
m = re.search(r'^([0-9]+) old (.*)$',oldline)
oldtext = m.group(2)
nnew = m1.group(1)
newtext = m1.group(3)
if nold != nnew:
print('Change error(2) @ line %s:' % n)
print('nold(%s) != nnew(%s)' % (nold,nnew))
out= 'oldline=%s' % oldline
print(out.encode('utf-8'))
out= 'newline=%s' % newline
print(out.encode('utf-8'))
exit(1)
if (not m) or (not m1):
print('Change error(2) @ line %s:' % n)
out= 'oldline=%s' % oldline
print(out.encode('utf-8'))
out= 'newline=%s' % newline
print(out.encode('utf-8'))
exit(1)
self.lnumstr = nold # same as nnew
self.oldtext = oldtext
self.newtext = newtext
def init_changein(changein ):
changes = [] # ret
f = codecs.open(changein,encoding='utf-8',mode='r')
n = 0
sep='XXXX'
for line in f:
line = line.rstrip('\r\n')
if line.startswith(';'): # skip comment line
continue
n = n + 1
if (n % 2) == 1:
oldline = line
else:
newline = line
chgrec = Change(n-1,oldline,newline)
changes.append(chgrec)
f.close()
if (n % 2) != 0:
print("ERROR init_changein: Expected EVEN number of lines in",changein)
exit(1)
return changes
def update(filein,changein,fileout):
# determine change structure from changein file
changes = init_changein(changein)
# initialize input records
with codecs.open(filein,encoding='utf-8',mode='r') as f:
# recs is a list of lines, to accomodate 'ins' and 'del'
recs = [[line.rstrip('\n\r')] for line in f]
print(len(recs),"lines read from",filein)
# process change records
# counter for each type ('new','ins','del') of change record
counter = {}
for change in changes:
lnum = int(change.lnumstr)
irec = lnum - 1 # since lnum assumed to start at 1
try:
oldrec = recs[irec]
except:
print("lnum error: ",change.lnumstr)
exit(1)
# oldrec is a list of lines, typically with just 1 line.
# We assume there is always at least 1 element in this tuple, and
# that it's text matches the 'oldtext' of the change
if len(oldrec)==0:
print("update ERROR #1. record has been deleted for linenum=",lnum)
exit(1)
oldtext = oldrec[0]
if oldtext != change.oldtext:
print("CHANGE ERROR #2: Old mismatch line %s of %s" %(change.n,changein))
print("Change record lnum =",lnum)
out = "Change old text:\n%s" % change.oldtext
print(out.encode('utf-8'))
out = "Change old input:\n%s" % oldtext
print(out.encode('utf-8'))
out = "line from %s:" % filein
print(out.encode('utf-8'))
exit(1)
code = change.chgcode
# update counter
if code not in counter:
counter[code] = 0
counter[code] = counter[code] + 1
if code == 'new':
# a simple change. Make this to the last in list of oldrecs
oldrec.pop() # remove last record
oldrec.append(change.newtext) # insert new text at end
recs[irec] = oldrec
elif code == 'ins':
# insert new text onto end of oldrec
oldrec.append(change.newtext)
recs[irec] = oldrec
elif code == 'del':
# remove text from end
oldrec.pop() # remove last record
recs[irec] = oldrec
# write all records to fileout
fout = codecs.open(fileout,'w','utf-8')
nout = 0
for rec in recs:
# rec is a list of strings, possibly empty
for text in rec:
fout.write("%s\n" % text)
nout = nout + 1
fout.close()
# write summary of changes performed
print(nout,"records written to",fileout)
print("%s change transactions from %s" % (len(changes),changein))
# summary of types of changes transacted
codes = counter.keys()
outarr = ["%s of type %s"%(counter[key],key) for key in codes]
out = ', '.join(outarr)
print(out)
if __name__=="__main__":
filein = sys.argv[1]
changein = sys.argv[2]
fileout = sys.argv[3]
update(filein,changein,fileout)
| 2.984375
| 3
|
ox_mon/common/__init__.py
|
emin63/ox_mon
| 0
|
12776659
|
"""Package with toosl common to various areas of ox_mon
"""
| 1.03125
| 1
|
Python/CCC - Roll the Dice.py
|
RobinNash/Solutions-to-Competition-Problems
| 0
|
12776660
|
<reponame>RobinNash/Solutions-to-Competition-Problems<filename>Python/CCC - Roll the Dice.py
# Roll the Dice #
# November 17, 2018
# By <NAME>
n = int(input())
m = int(input())
if n > 10:
n = 9
if m > 10:
m = 9
ways = 0
for n in range (1,n+1):
for m in range(1,m+1):
if n + m == 10:
ways+=1
if ways == 1:
print("There is 1 way to get the sum 10.")
else:
print("There are",ways,"ways to get the sum 10.")
#1542488930.0
| 3.640625
| 4
|
source/code/elm327/358-turn-signal.py
|
wosk/nissan-leaf-obd-manual
| 4
|
12776661
|
<reponame>wosk/nissan-leaf-obd-manual
#!/usr/bin/env python
"""358 turn signal
Query the turn signal status of a Nissan Leaf using an ELM327 compatible diagnostic
tool.
Tested on the following vehicles:
* AZE0
"""
import serial
elm = serial.Serial("/dev/ttyUSB0", 38400, timeout=5)
elm.write(b"ATZ\r") # reset all
print(elm.read_until(b"\r\r>").decode())
elm.write(b"ATI\r") # print version ID
print(elm.read_until(b"\r\r>").decode())
elm.write(b"ATL1\r") # line feeds on
print(elm.read_until(b"\r\n>").decode())
elm.write(b"ATH1\r") # headers on
print(elm.read_until(b"\r\n>").decode())
elm.write(b"ATS1\r") # print spaces on
print(elm.read_until(b"\r\n>").decode())
elm.write(b"ATAL\r") # allow long messages
print(elm.read_until(b"\r\n>").decode())
elm.write(b"ATSP6\r") # set protocol ISO 15765-4 CAN (11/500)
print(elm.read_until(b"\r\n>").decode())
elm.write(b"ATCRA 358\r") # set CAN receive address
print(elm.read_until(b"\r\n>").decode())
elm.write(b"ATMA\r") # monitor all messages
try:
while True:
print(elm.read_until(b"\r\n").decode(), flush=True)
except KeyboardInterrupt:
print("Keyboard interrupt")
elm.close()
| 2.671875
| 3
|
snake/constants.py
|
ajutras/plexsnake
| 0
|
12776662
|
<filename>snake/constants.py
from enum import Enum
from typing import Type, Union
from plexapi.library import MovieSection, MusicSection, PhotoSection, ShowSection
SECTION_TYPE = Union[Type[MovieSection], Type[MusicSection], Type[PhotoSection], Type[ShowSection]]
VIDEO_EXTENSIONS = ["mkv", "mp4", "avi", "mpeg", "flv", "webm", "ogv", "gifv", "mov", "wmv", "mpv", "m4v"]
WINDOWS_ILLEGAL_CHARACTERS = ["<", ">", ":", '"', "/", "\\", "|", "?", "*"]
class Operator(str, Enum):
in_ = "in"
gt = ">"
lt = "<"
gte = ">="
lte = "<="
eq = "="
| 2.453125
| 2
|
satdetect/viz/VizUtil.py
|
michaelchughes/satdetect
| 3
|
12776663
|
'''
VizUtil.py
Utilities for displaying satellite images,
with (optional) bound-box annotations
'''
import numpy as np
from matplotlib import pylab
import os
import skimage.color
def imshow(Im, block=False, figID=1):
figH = pylab.figure(num=figID)
figH.clf()
pylab.imshow(Im)
pylab.draw()
pylab.show(block=block)
def showExamples(PMat, Nsubplots=9, block=False, figID=1, W=1, H=1):
nRow = int(np.floor(np.sqrt(Nsubplots)))
nCol = int(np.ceil(Nsubplots/ float(nRow)))
figH, axH = pylab.subplots(nRow, nCol, num=figID, figsize=(W*nCol, H*nRow))
Kplot = np.minimum(PMat.shape[0], Nsubplots)
for kk in range(Kplot):
pylab.subplot(nRow, nCol, kk+1)
if PMat[kk].ndim == 3:
pylab.imshow(PMat[kk], interpolation='nearest')
else:
pylab.imshow(PMat[kk], interpolation='nearest', cmap='gray')
pylab.axis('image')
pylab.xticks([])
pylab.yticks([])
# Disable visibility for unused subplots
for kk in range(Kplot, nRow*nCol):
pylab.subplot(nRow, nCol, kk+1)
pylab.axis('off')
pylab.draw()
pylab.show(block=block)
def save_fig_as_png(savepath, figID=1):
figH = pylab.figure(num=figID)
pylab.draw()
if not os.path.exists(savepath) and not savepath.count(os.path.sep):
savepath = os.path.join(DEFAULTSAVEPATH, savepath)
pylab.xticks([])
pylab.yticks([])
pylab.savefig(savepath, bbox_inches = 'tight', pad_inches = 0)
def makeImageWithBBoxAnnotations(Im, BBox, BBox2=None,
boxcolor=[0,1,0], # green
boxcolor2=[1,1,0], # yellow
**kwargs):
''' Create color image with bounding boxes highlighted in color
'''
if Im.ndim < 3:
AIm = skimage.color.gray2rgb(Im)
else:
AIm = Im.copy() # annotation shouldn't happen to original array
_add_bbox_to_im_inplace(AIm, BBox, boxcolor)
if BBox2 is not None:
_add_bbox_to_im_inplace(AIm, BBox2, boxcolor2)
return AIm
def _add_bbox_to_im_inplace(Im, BBox, boxcolor, doThickLines=1):
BBox = np.asarray(BBox, dtype=np.int32)
boxcolor = np.asarray(boxcolor, dtype=np.float64)
if boxcolor.max() > 1:
boxcolor = boxcolor / 255
for r in xrange(BBox.shape[0]):
Im[BBox[r,0]:BBox[r,1], BBox[r,2]] = boxcolor[np.newaxis,:]
Im[BBox[r,0]:BBox[r,1], BBox[r,3]-1] = boxcolor[np.newaxis,:]
Im[BBox[r,0], BBox[r,2]:BBox[r,3]] = boxcolor[np.newaxis,:]
Im[BBox[r,1]-1, BBox[r,2]:BBox[r,3]] = boxcolor[np.newaxis,:]
## Draw thick lines by repeating this cmd
## but slightly shifting BBox coords +1 or -1 pixel
if doThickLines:
for inc in [-1, +1]:
ABox = BBox + inc
np.maximum(ABox, 0, out=ABox)
np.minimum(ABox[:,1], Im.shape[0], out=ABox[:,1])
np.minimum(ABox[:,3], Im.shape[1], out=ABox[:,3])
_add_bbox_to_im_inplace(Im, ABox, boxcolor, doThickLines=0)
"""
def showMostConfidentFalseNegatives(Ytrue, Phat, Nsubplots=9):
if Phat.ndim > 1:
Phat = Phat[:,-1] # use final column, which is probability of 1
Yhat = np.asarray(Phat > 0.5, dtype=Ytrue.dtype)
falseNegIDs = np.flatnonzero( np.logical_and(Yhat == 0, Yhat != Ytrue))
print 'FALSE NEG: %d/%d' % (len(falseNegIDs), np.sum(Ytrue==1))
if len(falseNegIDs) == 0:
return None
# Sort false positives from smallest probability to largest
sortIDs = np.argsort( Phat[falseNegIDs] )
falseNegIDs = falseNegIDs[sortIDs[:Nsubplots]]
#print ' ', falseNegIDs, Phat[falseNegIDs]
PosIms, _ = loadTestImages(testGroupIDs, falseNegIDs, None)
return plotImages(PosIms, Nsubplots=Nsubplots)
def showMostConfidentFalsePositives(Ytrue, Phat, Nsubplots=9):
if Phat.ndim > 1:
Phat = Phat[:,-1] # use final column, which is probability of 1
Yhat = np.asarray(Phat > 0.5, dtype=Ytrue.dtype)
falsePosIDs = np.flatnonzero( np.logical_and(Yhat == 1, Yhat != Ytrue))
print 'FALSE POS: %d/%d' % (len(falsePosIDs), np.sum(Ytrue==0))
if len(falsePosIDs) == 0:
return None
# Sort false positives from largest probability to smallest
sortIDs = np.argsort( -1*Phat[falsePosIDs] )
falsePosIDs = falsePosIDs[sortIDs[:Nsubplots]]
#print ' ', falsePosIDs, Phat[falsePosIDs]
_, NegIms = loadTestImages(testGroupIDs, None, falsePosIDs)
return plotImages(NegIms, Nsubplots=Nsubplots)
"""
| 2.5
| 2
|
ex075b.py
|
wtomalves/exerciciopython
| 1
|
12776664
|
<filename>ex075b.py
núm = (int(input('Digite um número: ')), \
int(input('Digite outro número: ')), \
int(input('Digite mais um número: ')),\
int(input('Digite o último número: ')))
print(f'Você digitou os valores {núm}')
print(f'O valor 9 apareceu {núm.count(9)} vezes!')
if 3 in núm:
print(f'O valor 3 apareceu na {núm.index(3)+1}º posição')
else:
print('O valor 3 não foi digitado em nenhuma posição!')
print(f'os números pares sao:', end= ' ')
for n in núm:
if n % 2 == 0:
print(f'{n}', end= ' ')
#Código do professor Guaná!
'''Considerações: Nestecódigo temos uma diminuição de linhas devido ao uso das funções:
núm.count(9) contar quantas vezes o número foi digitado.
{núm.index(3) fará a verificação em qual posição o número aparece.'''
| 4.28125
| 4
|
tests/test_d12f.py
|
doismellburning/django12factor
| 70
|
12776665
|
from __future__ import absolute_import
import django12factor
import unittest
import django
from .env import env
d12f = django12factor.factorise
def debugenv(**kwargs):
return env(DEBUG="true", **kwargs)
class TestD12F(unittest.TestCase):
def test_object_no_secret_key_prod(self):
with env(DEBUG="false"):
self.assertRaises(SystemExit, d12f)
def test_debug(self):
with debugenv():
self.assertTrue(d12f()['DEBUG'])
def test_debug_defaults_to_off(self):
"""
Ensure that by default, DEBUG is false (for safety reasons)
"""
with env(SECRET_KEY="x"):
self.assertFalse(d12f()['DEBUG'])
def test_template_debug(self):
# for this test, we pretend to be Django < 1.8
oldversion = django.VERSION
django.VERSION = (1, 7, 0, "test_template_debug", 1)
with debugenv():
# Unless explicitly set, TEMPLATE_DEBUG = DEBUG
self.assertTrue(d12f()['TEMPLATE_DEBUG'])
with debugenv(TEMPLATE_DEBUG="false"):
s = d12f()
self.assertFalse(s['TEMPLATE_DEBUG'])
self.assertTrue(s['DEBUG'])
django.VERSION = oldversion
def test_db(self):
with debugenv():
self.assertIn("sqlite", d12f()['DATABASES']['default']['ENGINE'])
with debugenv(DATABASE_URL="sqlite://:memory:"):
self.assertIn("sqlite", d12f()['DATABASES']['default']['ENGINE'])
postgenv = debugenv(
DATABASE_URL="postgres://username:password@host:1234/dbname",
)
with postgenv:
db = d12f()['DATABASES']['default']
self.assertIn("postgres", db['ENGINE'])
self.assertEquals("dbname", db['NAME'])
def test_custom_key(self):
with debugenv(CUSTOM_KEY="banana"):
settings = d12f(['CUSTOM_KEY'])
self.assertIn("banana", settings['CUSTOM_KEY'])
def test_missing_custom_keys(self):
present = 1
with debugenv(PRESENT=present):
settings = d12f(['PRESENT', 'MISSING'])
self.assertEquals(present, settings['PRESENT'])
self.assertIsNone(settings['MISSING'])
def test_multiple_db_support(self):
"""
Explicit test that multiple DATABASE_URLs are supported.
https://github.com/doismellburning/django12factor/issues/36 turned out
to be due to using an incorrect version of the library, BUT it made me
realise that there was no explicit test for multiple named databases.
So this is one.
"""
e = {
"CLIENT_DATABASE_URL": "mysql://root@127.0.0.1:3306/apps",
"DATABASE_URL": "mysql://root@127.0.0.1:3306/garage",
"BRD_DATABASE_URL": "mysql://root@127.0.0.1:3306/brd",
}
with debugenv(**e):
dbs = d12f()['DATABASES']
self.assertEquals(len(dbs), 3)
def test_named_db_support(self):
DBNAME = "test"
DB_URL_NAME = "%s_DATABASE_URL" % DBNAME.upper()
e = {DB_URL_NAME: "postgres://username:password@host:1234/dbname"}
with debugenv(**e):
dbs = d12f()['DATABASES']
self.assertIn(
'sqlite',
dbs['default']['ENGINE'],
"Failed to load default DATABASE"
)
self.assertIn(
DBNAME,
dbs,
"Failed to parse a database called '%s' from the environment "
"variable %s" % (DBNAME, DB_URL_NAME)
)
self.assertIn('postgres', dbs[DBNAME]['ENGINE'])
def test_multiple_default_databases(self):
"""
Ensure if DEFAULT_DATABASE_URL and DATABASE_URL are set, latter wins.
"""
IGNORED_DB_NAME = "should_be_ignored"
DATABASE_URL = "postgres://username:password@host:1234/dbname"
IGNORED = "postgres://username:password@host:1234/%s" % IGNORED_DB_NAME
with debugenv(DATABASE_URL=DATABASE_URL, DEFAULT_DATABASE_URL=IGNORED):
default_db = d12f()['DATABASES']['default']
self.assertNotEquals(
default_db['NAME'],
IGNORED_DB_NAME,
"Parsed the contents of DEFAULT_DATABASE_URL instead of "
"ignoring it in favour of DATABASE_URL"
)
def test_non_capitalised_database_ignored(self):
"""
Ensure "malformed" X_DATABASE_URLs aren't parsed.
"""
e = {
'invalid_DATABASE_URL': "",
'AlsoInValid_DATABASE_URL': "",
'ALMOST_CORRECt_DATABASE_URL': "",
}
with debugenv(**e):
dbs = d12f()['DATABASES']
self.assertEquals(
len(dbs),
1,
"Loaded %d databases instead of just 1 (default) - got %s "
"from environment %s" % (len(dbs), dbs.keys(), e)
)
| 2.34375
| 2
|
build_tools/fix_info_plist.py
|
im-hjk/coffeegrindsize
| 44
|
12776666
|
<reponame>im-hjk/coffeegrindsize<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-#
###############################################################################
#
# fix_info_plist.py: Support script for coffeegrindsize Mac executable build
#
###############################################################################
#
# This file contains a Python script to assist in the process of
# building a Mac executable for the coffeegrindsize application.
#
# Its input is the default Info.plist file that pyinstaller generates.
# It modifies that file as follows:
#
# - Changes the value of CFBundleShortVersionString to the version
# number in the version.txt file
# - Adds NSHumanReadableCopyright with the copyright string
# - Adds NSHighResolutionCapable, set to True
# - Adds NSRequiresAquaSystemAppearance, set to True (NO Dark Mode)
#
# usage: fix_info_plist.py [-h] info_plist_file
#
# positional arguments:
# info_plist_file (full or relative path)
#
# optional arguments:
# -h, --help show this help message and exit
#
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import plistlib
import os
def get_version():
try:
version_file = os.path.join(os.environ['GITHUB'],
"coffeegrindsize",
"build_tools",
"version.txt")
except KeyError:
print("**************************************************************")
print("* ERROR: you must have the environment variable $GITHUB set *")
print("* e.g.: export GITHUB=\"$HOME/GitHub\" *")
print("**************************************************************")
raise
try:
with open(version_file, "r") as f:
lines = f.read().splitlines()
if len(lines) != 1:
print("ERROR: {} has {} lines".format(version_file,
len(lines)))
return "vFIXME"
version = lines[0]
if len(version) == 0 or version[0] != 'v':
print("ERROR: {} has invalid version: {}"
.format(version_file, version))
return "vFIXME"
print("Application version: {}".format(version))
return version
except IOError:
print("ERROR: {} doesn't exist".format(version_file))
return "vFIXME"
# Parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("info_plist", metavar='info_plist_file',
type=str, nargs=1,
help=("(full or relative path)"))
args = parser.parse_args()
# Get the version number
app_path = os.path.join(".", "dist", "coffeegrindsize")
version_from_file = get_version() # vX.X.X
version = version_from_file[1:] # X.X.X
# Read Info.plist into a plist object
try:
# Python 3
with open(args.info_plist[0], 'rb') as fp:
plist = plistlib.load(fp)
except AttributeError:
# Python 2
plist = plistlib.readPlist(args.info_plist[0])
# Change version number
plist['CFBundleShortVersionString'] = version
# Add copyright string
plist['NSHumanReadableCopyright'] = u"Copyright © 2021 <NAME>"
# Enable retina display resolution
plist['NSHighResolutionCapable'] = True
# Write the modified plist back to the Info.plist file
if hasattr(plistlib, 'dump'):
# Python 3
plist['NSRequiresAquaSystemAppearance'] = True # DISABLE dark mode
with open(args.info_plist[0], 'wb') as fp:
plistlib.dump(plist, fp)
else:
# Python 2
plistlib.writePlist(plist, args.info_plist[0])
| 2.140625
| 2
|
module/caffe/module.py
|
dividiti/ck-caffe
| 212
|
12776667
|
#
# Collective Knowledge (caffe CK front-end)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: cTuning foundation, <EMAIL>, http://cTuning.org
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowd-benchmark caffe
def crowdbench(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['action']='crowdsource'
i['module_uoa']=cfg['module_deps']['experiment.bench.caffe']
return ck.access(i)
##############################################################################
# TBD: classification demo using webcam + benchmarking/tuning via CK
def demo(i):
"""
Input: {
(camera_id) - camera ID
(delay) - delay
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Deps
import time
import cv2
import os
# Prepare tmp entry if doesn't exist
duoa=cfg['demo']['data_uoa']
image_name=cfg['demo']['image_name']
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['tmp'],
'data_uoa':duoa})
if r['return']>0:
if r['return']!=16: return r
r=ck.access({'action':'add',
'module_uoa':cfg['module_deps']['tmp'],
'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
pf=os.path.join(p, image_name)
# Initialize web cam
ci=int(i.get('camera_id',0))
dl=int(i.get('delay',1))
wcam = cv2.VideoCapture(ci)
# Permanent loop
while True:
ck.out('Obtaining picture from webcam ...')
s, img = wcam.read()
if s: # frame captured without any errors
# cv2.namedWindow("cam-test")
# cv2.imshow("cam-test",img)
# destroyWindow("cam-test")
cv2.imwrite(pf,img)
time.sleep(dl)
return {'return':0}
##############################################################################
# autotune Caffe workloads
def autotune(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['module_uoa']=cfg['module_deps']['program']
i['data_uoa']='caffe'
i['explore']='yes'
i['extra_tags']='dnn'
i['skip_collaborative']='yes'
i['skip_pruning']='yes'
i['iterations']=-1
i['new']='yes'
i['cmd_keys']=['time_cpu','time_gpu']
return ck.access(i)
| 2
| 2
|
multiscale/toolkits/cw_ssim.py
|
uw-loci/multiscale_imaging
| 1
|
12776668
|
<filename>multiscale/toolkits/cw_ssim.py
# -*- coding: utf-8 -*-
"""
Complex-wavelet structural similarity metric
Created on Tue Mar 20 10:50:24 2018
@author: mpinkert
"""
import multiscale.bulk_img_processing as blk
import os
from PIL import Image
from ssim.ssimlib import SSIM
import csv
def compare_ssim(one_path, two_path):
"""Calculate the complex wavelet structural similarity metric
Inputs:
one_path -- Path to image one
two_path -- Path to image two
Output:
ssim -- The Complex wavelet structural similarity metric
"""
one = Image.open(one_path)
two = Image.open(two_path)
print('Calculating CW-SSIM between {0} and {1}'.format(
os.path.basename(one_path),
os.path.basename(two_path)))
ssim = SSIM(one).cw_ssim_value(two)
print('CW-SSIM = {0}'.format(str(ssim)))
return ssim
def bulk_compare_ssim(dir_list,
output_dir, output_name='CW-SSIM Values.csv'):
"""Calculate CW-SSIM between images in several file directories
Inputs:
dir_list -- The list of dirs to compare between
output_dir -- Directory to save the cw-ssim values
output_name -- Filename for the CW-SSIM value file
"""
path_lists = blk.find_bulk_shared_images(dir_list)
num_images = len(path_lists[0])
num_dirs = len(dir_list)
output_path = os.path.join(output_dir, output_name)
for image_index in range(num_images):
core_name = blk.get_core_file_name(path_lists[0][image_index])
for index_one in range(num_dirs - 1):
for index_two in range(index_one + 1, num_dirs):
ssim = compare_ssim(path_lists[index_one][image_index],
path_lists[index_two][image_index])
modality_one = blk.file_name_parts(
path_lists[index_one][image_index])[1]
modality_two = blk.file_name_parts(
path_lists[index_two][image_index])[1]
column = modality_one + '-' + modality_two
blk.write_pandas_value(output_path, core_name, ssim, column,
'Sample')
def calculate_ssim_across_two_lists(list_one: list, list_two: list, writer: csv.writer):
num_images = len(list_one)
for image_index in range(num_images):
ssim_value = compare_ssim(list_one[image_index], list_two[image_index])
sample, modality_one, tile = blk.file_name_parts(list_one[image_index])
modality_two = blk.file_name_parts(list_two[image_index])[1]
mouse, slide = sample.split('-')
modality_pair = modality_one + '-' + modality_two
writer.writerow([mouse, slide, tile, modality_pair, ssim_value])
def calculate_ssim_across_multiple_directories(list_input_dirs, dir_output, name_output, file_parts_to_compare=[0]):
"""Calculate CW-SSIM between images in several file directories
Inputs:
dir_list -- The list of dirs to compare between
output_dir -- Directory to save the cw-ssim values
output_name -- Filename for the CW-SSIM value file
"""
path_lists = blk.find_bulk_shared_images(list_input_dirs, file_parts_to_compare=file_parts_to_compare,
subdirs=True)
num_dirs = len(list_input_dirs)
output_path = os.path.join(dir_output, name_output)
with open(output_path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['Mouse', 'Slide', 'Tile', 'Modality pair', 'CW-SSIM'])
for index_one in range(num_dirs - 1):
for index_two in range(index_one + 1, num_dirs):
calculate_ssim_across_two_lists(path_lists[index_one], path_lists[index_two], writer)
| 2.390625
| 2
|
convert.py
|
CTCSU/anaylyse_file_structure
| 0
|
12776669
|
import re
from node import Node
last_result = {'line':'','level':0}
def convertStringListToNode(str_list, rex_list, current_level=0, node=Node()):
while len(str_list) > 0:
line = str_list[0]
line_level = getLineLevel(line, rex_list)
if (line_level > current_level):
childNode = Node()
node.addChild(childNode)
convertStringListToNode(str_list,rex_list,current_level+1,childNode)
if (line_level == current_level):
if line_level == len(rex_list):
str_list.remove(line)
if node.val == '':
node.val = line
else:
node.val = node.val + "\n" + line
elif line_level == 0:
str_list.remove(line)
if node.val == '':
node.val = line
else:
node.setNext(Node())
node = node.next
node.val = line
elif node.val != '':
return
elif node.val == '':
str_list.remove(line)
node.val = line
continue
if line_level < current_level:
return
def getLineLevel(str, rex_list):
if str == last_result['line']:
return last_result['level']
last_result['line'] = str
for x in rex_list:
if x.match(str):
last_result['level'] = rex_list.index(x)
return last_result['level']
last_result['level'] = len(rex_list)
return last_result['level']
| 3.125
| 3
|
curso/aula_29.py
|
ealgarve/GUI-Python
| 0
|
12776670
|
Carros = ['HRV', 'Polo', 'Jetta', 'Palio', 'Fusca']
itCarros = iter(Carros)
while itCarros:
try:
print(next(itCarros))
except StopIteration:
print('Fim da Lista.')
break
| 3.8125
| 4
|
laLiga.py
|
mlavador/Practica1TD
| 0
|
12776671
|
<reponame>mlavador/Practica1TD
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox import options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from time import sleep
from time import time as get_timestamp
from bs4 import BeautifulSoup
import pandas as pd
import requests
import sys, getopt
class element_has_css_class(object):
"""
Espera a que el elemento tenga la clase indicada
"""
def __init__(self, locator, css_class):
self.locator = locator
self.css_class = css_class
def __call__(self, driver):
element = driver.find_element(*self.locator)
if self.css_class in element.get_attribute("class"):
return element
else:
return False
def main(args):
user, password = arguments_parser(args)
if (user == "" or password == ""):
print("Using without logging")
min_time=30
# Obtener cookies de sesion
session = requests.Session()
t = get_timestamp()
session.post("https://fanslaliga.laliga.com/api/v2/loginMail", data=dict(
email=user,
password=password
), headers={"AppId": "6457fa17-1224-416a-b21a-ee6ce76e9bc0",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:94.0) Gecko/20100101 Firefox/94.0"})
r_delay = get_timestamp() - t
sleep(min_time+r_delay*2)
cookies = session.cookies.get_dict()
# Hay que instalar el driver geckodriver previamente (Para firefox)
profile = webdriver.FirefoxProfile()
profile.set_preference("general.useragent.override", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:94.0) Gecko/20100101 Firefox/94.0")
driver = webdriver.Firefox(profile)
#Accedemos a la liga antes de poner la cookie para evitar cookie-averse
t = get_timestamp()
driver.get("https://www.laliga.com/")
r_delay = get_timestamp() - t
for key in cookies:
driver.add_cookie({"name" : key, "value" : cookies[key]})
#recargamos la página con la cookie ya añadida
driver.get("https://www.laliga.com/")
WebDriverWait(driver, 20)\
.until(EC.element_to_be_clickable((By.CSS_SELECTOR,
"#onetrust-accept-btn-handler")))\
.click()
categorias = driver.find_elements(By.CSS_SELECTOR,".styled__CompetitionMenuItem-sc-7qz1ev-3>a")
columns = ["liga","jornada","tipo_partido","posición", "id_equipo","equipo","puntos","pj","pg","pe","pp","gf","gc","dg"]
df = pd.DataFrame(columns = columns)
sleep(min_time+r_delay*2)
for el in categorias:
try:
t = get_timestamp()
wait_spinner_ends(driver)
WebDriverWait(driver, 20).until(EC.element_to_be_clickable(el)).click()
r_delay = get_timestamp() - t
sleep(min_time+r_delay*2)
submenu = el.find_elements(By.XPATH,"../div/div/span/a")
for sub_el in submenu:
if sub_el.get_attribute("innerHTML") == "Clasificación":
t = get_timestamp()
wait_spinner_ends(driver)
WebDriverWait(driver, 20).until(EC.element_to_be_clickable(sub_el)).click()
r_delay = get_timestamp() - t
sleep(min_time+r_delay*2)
break
jornadas_menu = driver.find_element(By.CSS_SELECTOR,".styled__DropdownContainer-sc-1engvts-6 ul")
jornadas = jornadas_menu.find_elements(By.XPATH,"./li")
for jornada in jornadas:
t = get_timestamp()
wait_spinner_ends(driver)
WebDriverWait(driver, 20)\
.until(EC.element_to_be_clickable((By.CSS_SELECTOR,
".styled__DropdownContainer-sc-1engvts-6")))\
.click()
r_delay = get_timestamp() - t
sleep(min_time+r_delay*2)
t = get_timestamp()
wait_spinner_ends(driver)
WebDriverWait(driver, 20).until(EC.element_to_be_clickable(jornada)).click()
r_delay = get_timestamp() - t
sleep(min_time+r_delay*2)
wait_table_load(driver)
page_content = driver.page_source
soup = BeautifulSoup(page_content, 'html.parser')
league = get_league_name(soup)
game_type_list = get_name_games(soup)
df_table = get_classification_table(soup,league,jornada.get_attribute("innerHTML"),game_type_list)
df = df.append(df_table, ignore_index=True)
except Exception as e:
print(e)
pass
df.to_csv("clasificacion_por_jornadas_LaLiga_21-22.csv",header=True,index=False)
driver.close()
def arguments_parser(argv):
user = ''
password = ''
try:
opts, args = getopt.getopt(argv,"hu:p:",["user=","password="])
except getopt.GetoptError:
print("Usage -u User -p Password")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print("Usage -u User -p Password")
sys.exit()
elif opt in ("-u", "--user"):
user = arg
elif opt in ("-p", "--password"):
password = arg
return user, password
def wait_spinner_ends(driver):
WebDriverWait(driver, 20).until(element_has_css_class((By.CSS_SELECTOR, '.styled__SpinnerContainer-uwgd15-0'), "hide"))
check_spots(driver)
def wait_table_load(driver):
WebDriverWait(driver, 20).until(element_has_css_class((By.CSS_SELECTOR, '.styled__StandingTableBody-e89col-5'), "cDiDQb"))
def check_spots(driver):
driver.execute_script("""
var element = document.querySelector('#rctfl-widgets-container');
if (element)
element.parentNode.removeChild(element);
var element = document.getElementsByTagName("body")
element[0].classList.remove("rctfl-blur-page")
var element = document.querySelector('#rctfl-block-page');
if (element)
element.parentNode.removeChild(element);
""")
def get_league_name(soup):
"""
Devuelve un string
Recoge el nombre de la liga dado el soup
"""
league = soup.find('h1', attrs={'class': 'styled__TextHeaderStyled-sc-1edycnf-0 idvFtg'})
return league.text
def get_name_games(soup):
"""
Devuelve una lista
Recoge el nombre de las distintas tablas según el tipo de partido
"""
game_type_list = []
game_type_row=soup.find('ul', attrs={'class': 'styled__ListTabs-bcjnby-2 jRIEjJ'})
for game_type in game_type_row.find_all('li'):
game_type_list.append(game_type.text)
return game_type_list
def get_drop_down(soup):
"""
Devuelve una lista
Recoge las jornadas del drop down.
Esta función está implementada pero no se usa a la espera de implementar selenium
"""
dropDown = soup.find('div', attrs={'class': 'styled__DropdownContainer-sc-1engvts-6 iOlTMZ'})
journeys=[]
for item in dropDown.findAll('li'):
journeys.append(item.text)
return journeys
def get_classification_table(soup,league,journey,game_type_list):
"""
Devuelve una DataFrame
Recoge la tabla de clasificación de las diferentes ligas.
"""
tables = soup.findAll('div', attrs={'class': 'styled__StandingTableBody-e89col-5 cDiDQb'})
columns = ["liga","jornada","tipo_partido","posición", "id_equipo","equipo","puntos","pj","pg","pe","pp","gf","gc","dg"]
rows = []
game_type_number = 0
for table in tables:
for row in table.find_all('div', attrs={'class':'styled__ContainerAccordion-e89col-11 HquGF'}):
data_row=[league,journey,game_type_list[game_type_number]]
for cell in row.find_all('p'):
data_row.append(cell.text)
rows.append(data_row)
game_type_number=game_type_number + 1
return pd.DataFrame.from_records(rows,columns=columns)
if __name__=="__main__":
main(sys.argv[1:])
| 2.859375
| 3
|
narwhal/plotting/colors.py
|
njwilson23/narwhal
| 10
|
12776672
|
<reponame>njwilson23/narwhal
def default_colors(n):
n = max(n, 8)
clist = ["#1b9e77", "#d95f02", "#7570b3", "#e7298a", "#66a61e", "#e6ab02",
"#a6761d", "#666666"]
return clist[:n]
| 2.40625
| 2
|
libraries/instagram/api.py
|
cca/libraries_wagtail
| 9
|
12776673
|
<filename>libraries/instagram/api.py<gh_stars>1-10
import logging
import re
import requests
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from .models import InstagramOAuthToken
# these functions will be used inside management scripts exclusively
logger = logging.getLogger('mgmt_cmd.script')
validate_url = URLValidator()
# @me -> <a href=link>@me</a> etc.
def linkify_text(text):
html = text
username_regex = r"(^|\s)(@[a-zA-Z0-9._]+)"
hashtag_regex = r"(^|\s)(#[a-zA-Z0-9_]+)"
url_regex = r"(^|\s)(https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*))"
ig_url = 'https://www.instagram.com/'
def replace_username(match):
leading_space = match.group(1)
username = match.group(2).replace('@', '')
return leading_space + '<a href="' + ig_url + username + '/">@' + username + '</a>'
def replace_hashtag(match):
leading_space = match.group(1)
hashtag = match.group(2).replace('#', '')
return leading_space + '<a href="' + ig_url + 'explore/tags/' + hashtag + '/">#' + hashtag + '</a>'
def replace_url(match):
leading_space = match.group(1)
url = match.group(2)
try:
validate_url(url)
except ValidationError:
logger.warning('Regex found invalid URL "{}" in Instagram post.'.format(url))
# return unprocessed string
return match.string
return leading_space + '<a href="' + url + '">' + url + '</a>'
html = re.sub(username_regex, replace_username, html)
html = re.sub(hashtag_regex, replace_hashtag, html)
html = re.sub(url_regex, replace_url, html)
return html
def get_instagram():
# just grab the latest OAuth token we have
token = InstagramOAuthToken.objects.last().token
url = 'https://graph.instagram.com/me/media?fields=id,caption,media_url,permalink,thumbnail_url,username&access_token=' + token
response = requests.get(url)
insta = response.json()
if 'data' in insta:
gram = insta['data'][0]
text = gram['caption']
output = {
# link hashtags & usernames as they'd appear on IG itself
'html': linkify_text(text),
'id': gram['id'],
'image': gram['media_url'],
'text': text,
'username': gram['username'],
}
elif 'error' in insta:
output = {
'error_type': insta['error']['type'],
'error_message': insta['error']['message']
}
else:
output = {
'error_type': 'GenericError',
'error_message': 'No "error" object containing an error type or message was present in the Instagram API response. This likely means a network connection problem or that Instagram changed the structure of their error messages.'
}
return output
def get_token_from_code(code):
""" Turn a code from the app's redirect URI into a long-lived OAuth access token.
Parameters
----------
code : str
the "code" parameter in the app's redirect URI, DO NOT include the final two
"#_" characters
Returns
-------
boolean
True if token was successfully obtained, False if an error occurred.
"""
if len(code) == 0:
logger.info('No response code provided.')
return False
data = {
"client_id": settings.INSTAGRAM_APP_ID,
"client_secret": settings.INSTAGRAM_APP_SECRET,
"code": code,
"grant_type": "authorization_code",
"redirect_uri": settings.INSTAGRAM_REDIRECT_URI
}
logger.info('obtaining short-lived Instagram access token')
response = requests.post('https://api.instagram.com/oauth/access_token', data=data)
shortlived_token = response.json().get("access_token")
if not shortlived_token:
logger.error('Failed to acquire shortlived access token. Response JSON: {}'.format(response.json()))
return False
# https://developers.facebook.com/docs/instagram-basic-display-api/reference/access_token
# exchange this worthless shortlived token for a long-lived one
# Facebook is GREAT at API design, by the way, really love their work
logger.info('obtaining long-lived Instagram access token')
ll_response = requests.get('https://graph.instagram.com/access_token?grant_type=ig_exchange_token&client_secret={}&access_token={}'.format(settings.INSTAGRAM_APP_SECRET, shortlived_token))
token = ll_response.json().get("access_token")
if token:
InstagramOAuthToken.objects.create(token=token)
return True
logger.error('Failed to acquire long-lived OAuth token. Response JSON: {}'.format(response.json()))
return False
def refresh_token(token):
""" refresh Instagram long-lived access token
where the word "refresh" means "replace", it is not the same token """
response = requests.get('https://graph.instagram.com/refresh_access_token?grant_type=ig_refresh_token&access_token={}'.format(token))
new_token = response.json().get("access_token")
if new_token:
InstagramOAuthToken.objects.create(token=new_token)
logger.info('Successfully refreshed long-lived Instagram access token.')
return new_token
logger.critical('Unable to refresh long-lived Instagram access token. Response JSON: {}'.format(response.json()))
return None
| 2.609375
| 3
|
examples/utility_scripts/make_h5sig.py
|
bendichter/api-python
| 32
|
12776674
|
<filename>examples/utility_scripts/make_h5sig.py<gh_stars>10-100
# This script runs utility 'nwb.h5diffsig' to generate a text summary of
# hdf5 (nwb) file contents which can be used to compare one hdf5 to another.
import sys
import glob
import os, fnmatch
from subprocess import check_output
from sys import version_info
def error_exit(msg):
if msg:
print(msg)
print ( "Format is")
print ("%s <dir_or_file> <output_dir> [<extension>]" % sys.argv[0])
print ("where:")
print (" <dir_or_file> - either a directory containing hdf5/nwb files or")
print (" a single hdf5/nwb file.")
print (" <output_dir> - output directory for storing generated h5diffsig output file(s)")
print (" <extensions> - comma separated list of extension to use to find nwb or hdf5 files.")
print (" Default is 'nwb,h5'. Other common values may include 'hdf5'.")
sys.exit(1)
# the command that is run
command = ['python', '-m', 'nwb.h5diffsig']
# py3: convert bytes to str (unicode) if Python 3
def make_str3(val):
if isinstance(val, bytes) and version_info[0] > 2:
return val.decode('utf-8')
else:
return val
def process_file(dirpath, filename, output_dir, output_path, extension):
# output path is path to create inside output_dir
global command
# print("process_file output_path=%s, dirpath=%s, filename=%s, output_dir=%s, extension=%s\n" %
# (output_path, dirpath, filename, output_dir, extension))
output_file_name = "%stxt" % filename[0:-len(extension)]
# print ("output_file_name=%s" % output_file_name)
input_file = os.path.join(dirpath, filename)
# N option to filter NWB 'variable' datasets, e.g. /file_create_date
# a option, to sort output (groups / datasets) alphabetically
cmd = command + [input_file] + ["-Na"]
command_str = " ".join(cmd)
if output_path != "":
output_dir = os.path.join(output_dir, output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
outpath = os.path.join(output_dir, output_file_name)
print ("doing %s > %s" % (command_str, outpath))
output = make_str3(check_output(cmd))
with open(outpath, "w") as f:
f.write(output)
def get_extension(file, extensions):
# returns extension in list extensions file ends with or None
extension = [e for e in extensions if file.endswith(".%s" % e)]
assert len(extension) <= 1
extension = extension[0] if len(extension) == 1 else None
return extension
def process_files(input_dir_or_file, output_dir, extensions):
# convert comma separated string to list for use with get_extension
extensions = extensions.split(",")
if os.path.isfile(input_dir_or_file):
dirpath, filename = os.path.split(input_dir_or_file)
extension = get_extension(filename, extensions)
if extension is None:
print("Single file specified, but does not end with extension '%s': %s" % (
extension, filename))
sys.exit(1)
output_path = ""
process_file(dirpath, filename, output_dir, output_path, extension)
else:
# input_dir_or_file is a directory, processes files within it
for dirpath, dirnames, filenames in os.walk(input_dir_or_file):
#for filename in [f for f in filenames if f.endswith(textensions)]:
for filename in filenames:
extension = get_extension(filename, extensions)
if extension is not None:
assert dirpath.startswith(input_dir_or_file)
output_path = dirpath[len(input_dir_or_file):].lstrip("/")
process_file(dirpath, filename, output_dir, output_path, extension)
def clear_directory(path):
if os.path.isdir(path):
cwd = os.getcwd()
os.chdir(path)
for f in os.listdir("."):
if f.endswith(".txt"):
os.remove(f)
os.chdir(cwd)
print ("cleared %s" % path)
else:
os.mkdir(path)
print ("created %s" % path)
def clear_output_directory(output_option):
global txt_output_dir
if output_option in ("f", "b"):
clear_directory(txt_output_dir)
# filelist = [ f for f in os.listdir(".") if f.endswith(".bak") ]
# for f in filelist:
# os.remove(f)
if __name__ == '__main__':
if len(sys.argv) not in (3,4):
error_exit("Invalid number of command line arguments: %s" % len(sys.argv))
input_dir_or_file = sys.argv[1]
output_dir = sys.argv[2]
extensions = "nwb,h5" if len(sys.argv) == 3 else sys.argv[3]
if not os.path.exists(input_dir_or_file):
error_exit("Input <dir_or_file> does not exist: %s" % input_dir_or_file)
if not os.path.isdir(output_dir):
error_exit("Output <dir_or_file> does not exist: %s" % output_dir)
# clear_output_directory(output_dir)
process_files(input_dir_or_file, output_dir, extensions)
| 3.140625
| 3
|
habitrac/habits/migrations/0002_auto_20210224_2212.py
|
IgnisDa/habitrac
| 0
|
12776675
|
<gh_stars>0
# Generated by Django 3.1 on 2021-02-24 16:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('habits', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dailyhabit',
name='name_slug',
field=models.SlugField(default='ok', editable=False, help_text='The unique slug that will be used to identify the habit', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='hourlyhabit',
name='name_slug',
field=models.SlugField(default='pl', editable=False, help_text='The unique slug that will be used to identify the habit', max_length=100),
preserve_default=False,
),
]
| 1.851563
| 2
|
tests/units/test_rotated_files.py
|
IOTs-Projects/fiware-skuld
| 1
|
12776676
|
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
from mock import patch
from unittest import TestCase
from fiwareskuld.utils.rotated_files import rotate_files as rf
import os
class TestRotatedFiles(TestCase):
def test_rotated_files_complete_files(self):
# Given
name = 'kk'
max_level = 100
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['kk', 'kk.001', 'kk.002', 'kk.003', 'kk.004', 'kk.005']
expected_value = ['kk.001', 'kk.002', 'kk.003', 'kk.004', 'kk.005', 'kk.006']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
# Check the number of calls to the os.rename method.
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
def test_rotated_files_with_only_one_file_with_number(self):
# Given
name = 'fake'
max_level = 100
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['fake.001']
expected_value = ['fake.002']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
def test_rotated_files_with_only_one_file_without_number(self):
# Given
name = 'fake'
max_level = 100
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['fake']
expected_value = ['fake.001']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
def test_rotated_files_with_max_level(self):
# Given
name = 'kk'
max_level = 4
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['kk', 'kk.001', 'kk.002', 'kk.003']
expected_value = ['kk.001', 'kk.002', 'kk.003', 'foo']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
| 2.328125
| 2
|
fibonacci.py
|
haverfordcs/105lab2-marisleysisdelacruz16
| 0
|
12776677
|
def nth_fibonacci_using_recursion(n):
if n < 0:
raise ValueError("n should be a positive number or zero")
else:
if n == 1:
return 0
elif n == 2:
return 1
else:
return nth_fibonacci_using_recursion(n - 2) + nth_fibonacci_using_recursion(n - 1)
# Implement the following function using iteration i.e. loop
# You can use any loop constructs
def nth_fibonacci_using_iteration(n):
# You can completely remove the following code if needed
if n < 0:
raise ValueError("n should be a positive number or zero")
elif n == 1:
return 0
elif n == 2:
return 1
elif n > 2:
i=0
first_number = 0
second_number = 1
sum = second_number
for i in range(n-2):
sum = first_number + second_number
first_number = second_number
second_number= sum
return sum
| 4.34375
| 4
|
Machine_Learning/svm.py
|
AndrewQuijano/ML_Module
| 0
|
12776678
|
<reponame>AndrewQuijano/ML_Module
from sklearn import svm
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from joblib import dump
import time
import numpy as np
from .misc import plot_grid_search
def get_svm(train_x, train_y, n_fold=10, slow=False):
start_time = time.time()
best_svm = tune_svm(train_x, train_y, n_fold, slow)
print("--- Best Parameter SVM parameters time to complete: %s seconds ---" % (time.time() - start_time))
print("Best SVM Parameters: " + str(best_svm.best_params_))
print("[SVM] Training Mean Test Score: " + str(best_svm.score(train_x, train_y)))
with open("results.txt", "a+") as my_file:
my_file.write("[SVM_Radial] Best Parameters: " + str(best_svm.best_params_) + '\n')
my_file.write("[SVM Radial] Training Mean Test Score: " + str(best_svm.score(train_x, train_y)) + '\n')
return best_svm
def tune_svm(train_x, train_y, n_folds=10, slow=False):
c = np.arange(0.1, 1, 0.1)
gammas = np.arange(0.1, 1, 0.1)
random_grid = {
'C': c,
'gamma': gammas,
'kernel': ["rbf", "linear", "poly", "sigmoid"]
}
model = svm.SVC(probability=True)
if slow:
svm_search = GridSearchCV(model, param_grid=random_grid, cv=n_folds,
n_jobs=-1, error_score='raise', verbose=2)
else:
svm_search = RandomizedSearchCV(model, param_distributions=random_grid,
cv=n_folds, n_jobs=-1, error_score='raise', verbose=2)
svm_search.fit(train_x, train_y)
if slow:
plot_grid_search(svm_search, 'C', 'SVM_RBF')
plot_grid_search(svm_search, 'gamma', 'SVM_RBF')
dump(svm_search, "./Classifiers/" + type(model).__name__ + ".joblib")
return svm_search
| 2.84375
| 3
|
03Friclass.py
|
Ayon134/code_for_Kids
| 0
|
12776679
|
<filename>03Friclass.py<gh_stars>0
'''
import turtle
wn = turtle.Screen()
color=["orange","blue","red"]
t = turtle.Turtle()
t.goto(100,100)
t.forward(100)
t.fd(100)
t.shapesize(1,5,10)
'''
import turtle
t=turtle.Turtle()
turtle.bgcolor("#CD853F")
#t.shapesize(1,5,5)
t.fillcolor("red")
#t.shape("triangle")
t.pen(pencolor="purple", fillcolor="red", pensize=50, speed=9)
c=t.clone()
c.goto(100,100)
t.circle(10)
c.circle(10)
| 3.375
| 3
|
mangle-infra-agent/Faults/helper/FaultHelper.py
|
vmaligireddy/mangle
| 0
|
12776680
|
<reponame>vmaligireddy/mangle
'''
Created on Jan 5, 2021
@author: jayasankarr
'''
import logging
import os
import subprocess
log = logging.getLogger("python_agent")
def add_standard_sub_directories_to_path():
if os.path.isdir("/sbin"):
os.environ["PATH"] += os.pathsep + "/sbin"
if os.path.isdir("/usr/sbin"):
os.environ["PATH"] += os.pathsep + "/usr/sbin"
if os.path.isdir("/usr/local/sbin"):
os.environ["PATH"] += os.pathsep + "/usr/local/sbin"
log.info("Path variable:{}".format(os.environ["PATH"]))
def is_sudo_available():
res = subprocess.call('sudo -nv >/dev/null 2>&1', shell = True)
sudo_command = ''
if res == 0:
sudo_command = 'sudo '
log.info("sudo available")
return sudo_command
| 2.078125
| 2
|
zeex/core/views/actions/analyze.py
|
zbarge/dbtrix
| 10
|
12776681
|
<filename>zeex/core/views/actions/analyze.py
"""
MIT License
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
from zeex.core.compat import QtGui
from zeex.core.models.dataframe import DataFrameDescriptionModel, DataFrameModel
from zeex.core.ui.actions.analyze_ui import Ui_FileAnalyzerDialog
class FileAnalyzerDialog(QtGui.QDialog, Ui_FileAnalyzerDialog):
def __init__(self, source_model: DataFrameModel, parent=None):
self.df_model = source_model
self.analyze_model = DataFrameDescriptionModel(source_model=source_model)
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.configure()
@property
def df(self):
return self.df_model.dataFrame()
def configure(self):
self.tableView.setModel(self.analyze_model)
self.btnRefresh.clicked.connect(self.sync)
# TODO: Make these buttons work and show them.
self.btnExport.setVisible(False)
self.btnPivot.setVisible(False)
self.df_model.dataChanged.connect(self.sync)
self.sync()
def sync(self):
self.setWindowTitle("Analyze {}".format(os.path.basename(self.df_model.filePath)))
| 1.632813
| 2
|
app/views/reports.py
|
jubbp/maker-hub
| 4
|
12776682
|
<filename>app/views/reports.py
import fastapi
from fastapi_chameleon import template
from starlette.requests import Request
from app.viewmodels.reports.overview_viewmodel import OverviewViewModel
router = fastapi.APIRouter()
@router.get("/reports")
@template()
async def overview(request: Request):
vm = OverviewViewModel(request)
await vm.load()
return vm.to_dict()
| 2.203125
| 2
|
pdf/trade_price.py
|
byegates/ark
| 3
|
12776683
|
<filename>pdf/trade_price.py
import tabula
import pandas as pd
import numpy as np
ip_dir = 'docs/'
op_dir = 'csv/'
files = {
"ARK_Trades_asof_20200521.pdf",
"ARK_Trades_asof_20201111.pdf",
}
floats = ['price', 'low', 'high', 'close']
keys = ['date', 'action', 'symbol']
cols0 = keys + floats
cols1 = cols0 + ['last']
cols = keys + ['price']
invalid = {
'#N/A N/A',
'#########',
}
def fix_symbol(l):
start_pos_price = 2 + (len(l) - 8) + 1
return l[:2] + [' '.join(l[2:start_pos_price])] + l[start_pos_price:]
def to_float(s):
return np.nan if s in invalid else s.replace('"', '').replace('$', '').replace(',', '')
def editdf0(df):
df = df[8:]
l = [[to_float(s) for s in df.iloc[i,0].split()] for i in range(len(df.index))]
l = [ _ if len(_) == 8 else fix_symbol(_) for _ in l]
df = pd.DataFrame(l, columns=cols1)
return df[cols0]
def editdf(df):
df = df[1:]
df.columns = cols1
return df[cols0]
def editdfn(l):
df = pd.DataFrame()
for _df in l:
_df = editdf(_df)
df = df.append(_df, ignore_index=True)
_ = df[floats].applymap(lambda s: to_float(s)).astype(float)
return pd.concat([df[keys], _], axis=1)
def edit_n_merge(l):
df = pd.DataFrame()
df0 = editdf0(l[0]) # first table is read differently than the rest
dfn = editdfn(l[1:]) # rest of the table are read in same format
df = df.append(df0, ignore_index=True).append(dfn, ignore_index=True)
df['date'] = pd.to_datetime(df.date)
return df.set_index(keys)
def read_pdf(f):
l = tabula.read_pdf(f, pages='all')
return edit_n_merge(l)
def main():
for f in files:
read_pdf(f"{ip_dir}{f}").to_csv(f"{op_dir}{f.split('.')[0]}.csv")
if __name__ == "__main__":
main()
| 2.578125
| 3
|
pycgnat/translator/direct.py
|
williamabreu/routeros-cgnat
| 3
|
12776684
|
<filename>pycgnat/translator/direct.py
from collections import OrderedDict
from ipaddress import IPv4Address, IPv4Network
from pycgnat.utils.vlsm import split_subnet
def cgnat_direct(
private_net: IPv4Network, public_net: IPv4Network, private_ip: IPv4Address
) -> OrderedDict:
"""Calculate the public IP and port range from private IP given.
Args:
private_net: Private address pool from CGNAT shared space address.
public_net: Public adddress pool target from netmap.
private_ip: Unique private IP from CGNAT to be converted to the public
one.
Returns:
Dict containing the public_ip and port_range for the query.
Raises:
ValueError: When the private IP is out of the private net given.
ValueError: When the networks given do not satisfy the 1:32 ratio.
"""
if private_ip not in private_net:
raise ValueError("Private IP is out of the network given")
if public_net.prefixlen - private_net.prefixlen != 5:
raise ValueError("Only works to netmaps for 1:32 CGNAT ratio")
private_ips = split_subnet(private_net, public_net.netmask)
index = None # to discover the port range
for i, pool in enumerate(private_ips):
if private_ip in pool:
index = i
break
port_base = 1536 + 2000 * index
port_range = (port_base, port_base + 1999)
pool = private_ips[index]
delta = int(private_ip) - int(pool.network_address)
public_ip = IPv4Address(int(public_net.network_address) + delta)
return OrderedDict(public_ip=public_ip, port_range=port_range)
| 2.703125
| 3
|
projects/authorization.py
|
catami/catami
| 1
|
12776685
|
<reponame>catami/catami
import logging
from django.contrib.auth.models import Group, User
from django.db import transaction
from django.dispatch import receiver
import guardian
from guardian.models import UserObjectPermission
from guardian.shortcuts import assign_perm, remove_perm, get_users_with_perms, get_perms
from userena.signals import signup_complete
logger = logging.getLogger(__name__)
# default permissions for project objects
def apply_project_permissions(user, project):
#assign all permissions view, add, change, delete
logger.debug("Applying owner permissions to project: " + project.name + " " + project.id.__str__())
assign_perm('view_project', user, project)
assign_perm('add_project', user, project)
assign_perm('change_project', user, project)
assign_perm('delete_project', user, project)
#assign view permissions to the Anonymous user
#logger.debug("Making project public: " + project.name)
#public_group, created = Group.objects.get_or_create(name='Public')
#assign_perm('view_project', public_group, project)
def project_is_public(project):
"""
True is project is public, false if not.
"""
public_group, created = Group.objects.get_or_create(name='Public')
return 'view_project' in get_perms(public_group, project)
def set_project_is_public(is_public, project):
if is_public == "true" or is_public == True:
make_project_public(project)
else:
make_project_private(project)
@transaction.commit_on_success
def set_detailed_project_permissions(current_user, detailed_permissions, project):
"""
Resets the permissions for the project, based on the given structure
in get_detailed_project_permissions.
{
username: "",
display_name: "",
permissions: ""
}
"""
logger.debug("Setting detailed permissions for project: " + project.name)
## clean out the permissions
for item in get_detailed_project_permissions(current_user, project):
user = User.objects.get(username=item['username'])
remove_perm("view_project", user, project)
remove_perm("add_project", user, project)
remove_perm("change_project", user, project)
remove_perm("delete_project", user, project)
## re apply permissions
for item in detailed_permissions:
user = User.objects.get(username=item['username'])
perms = item['permissions']
# set the permissions
for perm in perms:
assign_perm(perm, user, project)
def get_detailed_project_permissions(current_user, project):
"""
Builds up a list of users and permissions for the project.
{
username: "",
display_name: "",
permissions: ""
}
"""
permission_table = []
# ignore permissions for those in the public group
users = get_users_with_perms(project, with_group_users=False)
for user in users:
display_name = user.first_name + " " + user.last_name
# ignore the current user
if user != current_user:
permission_table.append({
"username": user.username,
"display_name": display_name,
"permissions": get_perms(user, project)
})
return permission_table
def make_project_public(project):
"""
Makes a given project public.
"""
logger.debug("Making project public: " + project.name)
public_group, created = Group.objects.get_or_create(name='Public')
assign_perm('view_project', public_group, project)
def make_project_private(project):
"""
Makes a given project private.
"""
logger.debug("Making project private: " + project.name)
public_group, created = Group.objects.get_or_create(name='Public')
remove_perm('view_project', public_group, project)
def give_permission_to_project(user, project, permission):
"""
Given a user and the defined permission, apply that to the project.
"""
logger.debug("Giving permission to a project: " + project.name + " - " + user.name + " - " + permission)
assign_perm(permission, user, project)
def apply_annotation_set_permissions(user, annotation_set):
#assign all permissions view, add, change, delete
logger.debug("Applying owner permissions to annotation set: " + annotation_set.name)
assign_perm('view_annotationset', user, annotation_set)
assign_perm('add_annotationset', user, annotation_set)
assign_perm('change_annotationset', user, annotation_set)
assign_perm('delete_annotationset', user, annotation_set)
#assign view permissions to the Anonymous user
#logger.debug("Making annotation set public: " + annotation_set.name)
#public_group, created = Group.objects.get_or_create(name='Public')
#assign_perm('view_annotationset', public_group, annotation_set)
def annotation_set_is_public(annotation_set):
"""
True is project is public, false if not.
"""
public_group, created = Group.objects.get_or_create(name='Public')
return 'view_annotationset' in get_perms(public_group, annotation_set)
def set_annotation_set_is_public(is_public, annotation_set):
if is_public == "true" or is_public == True:
make_annotation_set_public(annotation_set)
else:
make_annotation_set_private(annotation_set)
def make_annotation_set_public(annotation_set):
"""
Makes a given annotation_set public.
"""
logger.debug("Making annotation_set public: " + annotation_set.name)
public_group, created = Group.objects.get_or_create(name='Public')
assign_perm('view_annotationset', public_group, annotation_set)
def make_annotation_set_private(annotation_set):
"""
Makes a given project private.
"""
logger.debug("Making annotation_set private: " + annotation_set.name)
public_group, created = Group.objects.get_or_create(name='Public')
remove_perm('view_annotationset', public_group, annotation_set)
@transaction.commit_on_success
def set_detailed_annotation_set_permissions(current_user, detailed_permissions, annotation_set):
"""
Resets the permissions for the annotation_set, based on the given structure.
{
username: "",
display_name: "",
permissions: ""
}
"""
logger.debug("Setting detailed permissions for project: " + annotation_set.name)
## clean out the permissions
for item in get_detailed_project_permissions(current_user, annotation_set):
user = User.objects.get(username=item['username'])
remove_perm("view_annotationset", user, annotation_set)
remove_perm("add_annotationset", user, annotation_set)
remove_perm("change_annotationset", user, annotation_set)
remove_perm("delete_annotationset", user, annotation_set)
## apply the permissions
for item in detailed_permissions:
user = User.objects.get(username=item['username'])
perms = item['permissions']
# set the permissions
for perm in perms:
# replace project with annotation set, in case the permissions come in wrong
perm = perm.replace("project", "annotationset")
assign_perm(perm, user, annotation_set)
def get_detailed_annotation_set_permissions(current_user, annotation_set):
"""
Builds up a list of users and permissions for the annotation set.
"""
permission_table = []
# ignore permissions for those in the public group
users = get_users_with_perms(annotation_set, with_group_users=False)
for user in users:
display_name = user.first_name + " " + user.last_name
# ignore the current user
if user != current_user:
permission_table.append({
"username": user.username,
"display_name": display_name,
"permissions": get_perms(user, annotation_set)
})
return permission_table
def give_permission_to_annotation_set(user, annotation_set, permission):
"""
Given a user and the defined permission, apply that to the annotation_set.
"""
logger.debug("Giving permission to a project: " + annotation_set.name + " - " + user.name + " - " + permission)
assign_perm(permission, user, annotation_set)
| 2.109375
| 2
|
Dijkstra's_Shortest_Path/Python/paveldedik/dijkstra.py
|
Mynogs/Algorithm-Implementations
| 1,184
|
12776686
|
<reponame>Mynogs/Algorithm-Implementations
def initialize(G, s):
"""Initialize graph G and vertex s."""
V, E = G
d = {v: float('inf') for v in V}
p = {v: None for v in V}
d[s] = 0
return d, p
def dijkstra(G, w, s):
"""Dijkstra's algorithm for shortest-path search."""
d, p = initialize(G, s)
V, E = G
S = set(V)
while S:
u = min(S, key=lambda x: d[x])
S = S - {u}
for (t, v) in E:
if t == u and d[v] > d[u] + w[u, v]:
d[v] = d[u] + w[u, v]
p[v] = u
return d, p # return distances and a tree representing shortest paths
if __name__ == '__main__':
V = ['A', 'B', 'C', 'D'] # vertexes
E = [('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B')] # edges
w = {('A', 'B'): 1, ('B', 'C'): 3, ('B', 'D'): 1,
('C', 'D'): 8, ('D', 'B'): 2} # weights
print dijkstra((V, E), w, 'A')
| 3.875
| 4
|
integration-test/tests/test_integration_test.py
|
chatchai-hub/tmkms-light
| 11
|
12776687
|
from integration_test import __version__
import os
import subprocess
import urllib.request
import json
import time
from pathlib import Path
def test_basic():
tm = os.getenv('TENDERMINT')
tmhome = os.getenv('TMHOME')
tmkms = os.getenv('TMKMS')
kmsconfig = os.getenv('TMKMSCONFIG')
tmkms_proc = subprocess.Popen([tmkms, "start", "-c", kmsconfig], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tm_proc = subprocess.Popen([tm, "node", "--home", tmhome], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
contents = None
start_time = time.perf_counter()
timeout = 30
rpc_base = "http://127.0.0.1:26657"
status_url = rpc_base + "/status"
block_url = rpc_base + "/block"
while True:
try:
contents = urllib.request.urlopen(status_url).read()
break
except Exception as e:
time.sleep(1)
if time.perf_counter() - start_time >= timeout:
print(e)
tm_output = tm_proc.stdout.readlines()
os.system("pkill -9 " + tmkms)
tmkms_output = tmkms_proc.stdout.readlines()
tmkms_err = tmkms_proc.stderr.readlines()
raise TimeoutError('Waited too long for the RPC port.\n tm: {}\ntmkms output:{}\ntmkms error: {}'.format(tm_output, tmkms_output, tmkms_err)) from e
time.sleep(5)
contents = urllib.request.urlopen(status_url).read()
status = json.loads(contents)
block_height = int(status["result"]["sync_info"]["latest_block_height"])
assert block_height >= 1
contents = urllib.request.urlopen(block_url).read()
block = json.loads(contents)
validator_address = block['result']['block']['last_commit']['signatures'][0]['validator_address']
genesis_path = tmhome + "/config/genesis.json"
genesis = json.loads(Path(genesis_path).read_text())
assert validator_address == genesis["validators"][0]["address"].upper()
| 2.21875
| 2
|
tests/test_excel.py
|
hacklabr/django-rest-pandas
| 1,097
|
12776688
|
<gh_stars>1000+
from rest_framework.test import APITestCase
from tests.testapp.models import TimeSeries
from wq.io import load_file
class ExcelTestCase(APITestCase):
def setUp(self):
data = (
('2014-01-01', 0.5),
('2014-01-02', 0.4),
('2014-01-03', 0.6),
('2014-01-04', 0.2),
('2014-01-05', 0.1),
)
for date, value in data:
TimeSeries.objects.create(date=date, value=value)
def test_xls(self):
response = self.client.get("/timeseries.xls")
self.assertEqual(
'attachment; filename="Time Series.xls"',
response['content-disposition'],
)
xlfile = open('tests/output.xls', 'wb')
xlfile.write(response.content)
xlfile.close()
data = load_file("tests/output.xls")
self.assertEqual(len(data), 5)
self.assertEqual(data[0].date.year, 2014)
self.assertEqual(data[0].value, 0.5)
def test_xlsx(self):
response = self.client.get("/timeseries.xlsx")
self.assertEqual(
'attachment; filename="Time Series.xlsx"',
response['content-disposition'],
)
xlfile = open('tests/output.xlsx', 'wb')
xlfile.write(response.content)
xlfile.close()
data = load_file("tests/output.xlsx")
self.assertEqual(len(data), 5)
self.assertEqual(data[0].date.year, 2014)
self.assertEqual(data[0].value, 0.5)
| 2.375
| 2
|
main.py
|
diegossl/Compiler
| 0
|
12776689
|
<filename>main.py
from src.compiler import Compiler
compiler = Compiler()
compiler.run()
| 1.40625
| 1
|
setup.py
|
orgito/1forge-client
| 0
|
12776690
|
# pylint: disable=C0111
from setuptools import setup
with open("README.md", "r") as fh:
README = fh.read()
setup(
name='oneforge',
version='0.1.0',
description='1Forge REST API wrapper',
long_description=README,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/orgito/1forge-client',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
],
keywords='1forge forex',
packages=['oneforge'],
setup_requires=['setuptools>=38.6.0'],
install_requires=['requests'],
python_requires='>=3.6',
project_urls={
'Bug Reports': 'https://github.com/orgito/1forge-client/issues',
'Source': 'https://github.com/orgito/1forge-client',
},
)
| 1.351563
| 1
|
src/config.py
|
stupiding/insightface
| 0
|
12776691
|
<reponame>stupiding/insightface<filename>src/config.py
import numpy as np
import os
from easydict import EasyDict as edict
config = edict()
config.bn_mom = 0.9
config.workspace = 256
config.emb_size = 512
config.ckpt_embedding = True
config.net_se = 0
config.net_act = 'prelu'
config.net_unit = 3
config.net_input = 1
config.net_output = 'E'
config.net_multiplier = 1.0
config.val_targets = ['lfw', 'cfp_fp', 'agedb_30', '9374']
config.ce_loss = True
config.fc7_lr_mult = 10.0
config.fc7_wd_mult = 1.0
config.fc7_no_bias = True
config.max_steps = 70000
config.data_rand_mirror = True
config.data_cutoff = True
config.data_crop = True
config.data_mask = True
config.data_color = 0
config.data_images_filter = 0
config.downsample_back = 0.0
config.motion_blur = 0.0
config.use_global_stats = True
config.use_bgr = False
crop = edict()
crop.crop_h = 112
crop.crop_w = 112
crop.hrange = 2
crop.wrange = 2
cutoff = edict()
cutoff.ratio = 0.3
cutoff.size = 32
cutoff.mode = 'fixed' # 'uniform'
cutoff.filler = 127.5
mask = edict()
mask.ratio = 0.3
mask.size = 0.5
mask.value = 0
# network settings
network = edict()
network.r100 = edict()
network.r100.net_name = 'fresnet'
network.r100.num_layers = 100
network.r100.emb_size = 512
network.r100.shake_drop = False
network.r100.version_se = False
network.r100.version_input = 1
network.r100.version_output = 'E'
network.r100.version_unit = 3
network.r100.version_act = 'prelu'
network.r100.width_mult = 1
network.r100.version_bn = 'bn'
network.r50 = edict()
network.r50.net_name = 'fresnet'
network.r50.num_layers = 50
network.r50v1 = edict()
network.r50v1.net_name = 'fresnet'
network.r50v1.num_layers = 50
network.r50v1.net_unit = 1
network.d169 = edict()
network.d169.net_name = 'fdensenet'
network.d169.num_layers = 169
network.d169.per_batch_size = 64
network.d169.densenet_dropout = 0.0
network.d201 = edict()
network.d201.net_name = 'fdensenet'
network.d201.num_layers = 201
network.d201.per_batch_size = 64
network.d201.densenet_dropout = 0.0
network.y1 = edict()
network.y1.net_name = 'fmobilefacenet'
network.y1.emb_size = 128
network.y1.net_output = 'GDC'
network.z1 = edict()
network.z1.net_name = 'fsmall'
network.z1.emb_size = 512
network.z1.net_output = 'E'
network.z1.num_layers = 11
network.m1 = edict()
network.m1.net_name = 'fmobilenet'
network.m1.emb_size = 256
network.m1.net_output = 'GDC'
network.m1.net_multiplier = 1.0
network.m05 = edict()
network.m05.net_name = 'fmobilenet'
network.m05.emb_size = 256
network.m05.net_output = 'GDC'
network.m05.net_multiplier = 0.5
network.mnas = edict()
network.mnas.net_name = 'fmnasnet'
network.mnas.emb_size = 256
network.mnas.net_output = 'GDC'
network.mnas.net_multiplier = 1.0
network.mnas05 = edict()
network.mnas05.net_name = 'fmnasnet'
network.mnas05.emb_size = 256
network.mnas05.net_output = 'GDC'
network.mnas05.net_multiplier = 0.5
network.mnas025 = edict()
network.mnas025.net_name = 'fmnasnet'
network.mnas025.emb_size = 256
network.mnas025.net_output = 'GDC'
network.mnas025.net_multiplier = 0.25
# dataset settings
dataset = edict()
dataset.emore = edict()
dataset.emore.dataset = 'emore'
dataset.emore.dataset_path = '../datasets/faces_emore'
dataset.emore.num_classes = 85742
dataset.emore.image_shape = (112,112,3)
dataset.emore.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
dataset.left_eye = edict()
dataset.left_eye.dataset = 'left_eye'
dataset.left_eye.dataset_path = '../datasets/left_eye'
dataset.left_eye.num_classes = 85742
dataset.left_eye.image_shape = (112,112,3)
dataset.left_eye.val_targets = []
dataset.glint_cn = edict()
dataset.glint_cn = edict()
dataset.glint_cn.dataset = 'glint_cn'
dataset.glint_cn.dataset_path = '../datasets/glint_cn'
dataset.glint_cn.num_classes = 93979
dataset.glint_cn.image_shape = (112,112,3)
dataset.glint_cn.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
dataset.gy100 = edict()
dataset.gy100 = edict()
dataset.gy100.dataset = 'gy100'
dataset.gy100.dataset_path = '../datasets/gy100_final'
dataset.gy100.num_classes = 26679
dataset.gy100.image_shape = (112,112,3)
dataset.gy100.val_targets = ['9374']
dataset.bjz_20W = edict()
dataset.bjz_20W = edict()
dataset.bjz_20W.dataset = 'bjz_20W'
dataset.bjz_20W.dataset_path = '../datasets/bjz+grid_128x128'
dataset.bjz_20W.num_classes = 1460480
dataset.bjz_20W.image_shape = (112,112,3)
dataset.bjz_20W.val_targets = ['9374']
dataset.bjz30W_20W = edict()
dataset.bjz30W_20W.dataset = 'bjz30w_grid2'
dataset.bjz30W_20W.dataset_path = '../datasets/bjz30w+grid2_128x128'
dataset.bjz30W_20W.num_classes = 560480
dataset.bjz30W_20W.image_shape = (112,112,3)
dataset.bjz30W_20W.val_targets = ['9374']
dataset.bjz30W_20W_GY = edict()
dataset.bjz30W_20W_GY.dataset = 'bjz30w_grid2_gy'
dataset.bjz30W_20W_GY.dataset_path = '../datasets/bjz30w+grid2+gy50' #_id1st'
dataset.bjz30W_20W_GY.num_classes = 654872
dataset.bjz30W_20W_GY.image_shape = (112,112,3)
dataset.bjz30W_20W_GY.val_targets = ['9374']
loss = edict()
loss = edict()
loss.softmax = edict()
loss.softmax.loss_name = 'softmax'
loss.nsoftmax = edict()
loss.nsoftmax.loss_name = 'margin_softmax'
loss.nsoftmax.loss_s = 64.0
loss.nsoftmax.loss_m1 = 1.0
loss.nsoftmax.loss_m2 = 0.0
loss.nsoftmax.loss_m3 = 0.0
loss.arcface = edict()
loss.arcface.loss_name = 'margin_softmax'
loss.arcface.loss_s = 60.0
loss.arcface.loss_m1 = 1.0
loss.arcface.loss_m2 = 0.65
loss.arcface.loss_m3 = 0.0
loss.cosface = edict()
loss.cosface.loss_name = 'margin_softmax'
loss.cosface.loss_s = 64.0
loss.cosface.loss_m1 = 1.0
loss.cosface.loss_m2 = 0.0
loss.cosface.loss_m3 = 0.35
loss.combined = edict()
loss.combined.loss_name = 'margin_softmax'
loss.combined.loss_s = 64.0
loss.combined.loss_m1 = 1.0
loss.combined.loss_m2 = 0.3
loss.combined.loss_m3 = 0.2
loss.triplet = edict()
loss.triplet.loss_name = 'triplet'
loss.triplet.images_per_identity = 5
loss.triplet.triplet_alpha = 0.3
loss.triplet.triplet_bag_size = 7200
loss.triplet.triplet_max_ap = 0.0
loss.triplet.per_batch_size = 60
loss.triplet.lr = 0.05
loss.atriplet = edict()
loss.atriplet.loss_name = 'atriplet'
loss.atriplet.images_per_identity = 5
loss.atriplet.triplet_alpha = 0.35
loss.atriplet.triplet_bag_size = 7200
loss.atriplet.triplet_max_ap = 0.0
loss.atriplet.per_batch_size = 60
loss.atriplet.lr = 0.05
# default settings
default = edict()
# default network
default.network = 'r100'
default.pretrained = '' #'../models/r100-arcface-bjz_20W/model,0'
# default dataset
#default.dataset = 'bjz_20W'
default.dataset = 'bjz30W_20W_GY'
#default.dataset = 'gy100'
#default.dataset = 'emore'
default.loss = 'arcface'
default.frequent = 2000
default.verbose = 10000
default.kvstore = 'device'
default.end_epoch = 100
default.lr = 0.0001
default.wd = 0.0005
default.mom = 0.9
default.per_batch_size = 32
default.ckpt = 2
default.lr_steps = '50000,60000,70000'
default.models_root = '../models'
default.cutoff = cutoff
default.crop = crop
default.mask = mask
def generate_config(_network, _dataset, _loss):
for k, v in loss[_loss].items():
config[k] = v
if k in default:
default[k] = v
for k, v in network[_network].items():
config[k] = v
if k in default:
default[k] = v
for k, v in dataset[_dataset].items():
config[k] = v
if k in default:
default[k] = v
config.loss = _loss
config.network = _network
config.dataset = _dataset
config.num_workers = 1
if 'DMLC_NUM_WORKER' in os.environ:
config.num_workers = int(os.environ['DMLC_NUM_WORKER'])
| 1.875
| 2
|
dsio/dashboard/kibana.py
|
ufoioio/datastream.io
| 897
|
12776692
|
<filename>dsio/dashboard/kibana.py<gh_stars>100-1000
import elasticsearch
from kibana_dashboard_api import Visualization, Dashboard
from kibana_dashboard_api import VisualizationsManager, DashboardsManager
from ..exceptions import KibanaConfigNotFoundError
def generate_dashboard(es_conn, sensor_names, index_name, timefield='time',
update=True):
""" Generate a Kibana dashboard given a list of sensor names """
es_conn.index(index='.kibana', doc_type="index-pattern",
id=index_name,
body={
"title": index_name,
"timeFieldName": "time"
})
dashboards = DashboardsManager(es_conn)
dashboard = Dashboard()
dashboard.id = "%s-dashboard" % index_name
dashboard.title = "%s dashboard" % index_name
dashboard.panels = []
dashboard.options = {"darkTheme": True}
dashboard.time_from = "now-15m"
dashboard.refresh_interval_value = 5000
dashboard.search_source = {
"filter": [{
"query": {
"query_string": {
"analyze_wildcard": True,
"query": "*"
}
}
}]
}
visualizations = VisualizationsManager(es_conn)
vis_list = visualizations.get_all() # list all visualizations
panels = []
i = 0
for sensor in sensor_names:
viz_id = "%s-%s" % (index_name, sensor)
# Check if visualization exists
viz = next((v for v in vis_list if v.id == viz_id), None)
if not viz: # If not, create it
viz = Visualization()
viz.id = viz_id
viz.title = "%s-%s" % (index_name, sensor)
viz.search_source = {
"index": index_name,
"query":{
"query_string":{
"analyze_wildcard": True,
"query":"*"
}
},
"filter":[]
}
viz.vis_state = {
"title": "%s-%s" % (index_name, sensor),
"type": "line",
"params": {
"addLegend": True,
"addTimeMarker": True,
"addTooltip": True,
"defaultYExtents": True,
"drawLinesBetweenPoints": True,
"interpolate": "linear",
"radiusRatio": 9,
"scale": "linear",
"setYExtents": False,
"shareYAxis": True,
"showCircles": True,
"smoothLines": True,
"times":[],
"yAxis":{}
},
"aggs": [
{
"id": "1",
"type": "avg",
"schema":"metric",
"params": {
"field": sensor,
"customLabel": sensor.replace('_', ' ')
}
}, {
"id": "2",
"type": "max",
"schema":"radius",
"params": {
"field":"SCORE_%s" % sensor
}
}, {
"id": "3",
"type": "date_histogram",
"schema": "segment",
"params":{
"field": timefield,
"interval": "custom",
"customInterval": "5s",
"min_doc_count": 1,
"extended_bounds": {}
}
}
],
"listeners": {}
}
try:
res = visualizations.add(viz)
assert res['_id'] == viz_id
except elasticsearch.exceptions.ConflictError:
if update:
res = visualizations.update(viz)
panel = {
"id": viz_id,
"panelIndex": i,
"row": i,
"col": i,
"size_x": 7,
"size_y": 4,
"type": "visualization"
}
panels.append(panel)
ret = dashboard.add_visualization(viz)
i += 1
# Create the index if it does not exist
if not es_conn.indices.exists(index_name):
index_properties = {"time" : {"type": "date"}}
body = {"mappings": {index_name: {"properties": index_properties}}}
es_conn.indices.create(index=index_name, body=body)
try:
ret = dashboards.add(dashboard)
except elasticsearch.exceptions.ConflictError:
# Dashboard already exists, let's update it if we have to
if update:
ret = dashboards.update(dashboard)
# Create the index pattern
es_conn.index(index='.kibana', doc_type="index-pattern", id=index_name,
body={"title": index_name, "timeFieldName": "time"})
# Search for kibana config
kibana_config = es_conn.search(index='.kibana',
sort={'_uid': {'order': 'desc'}},
doc_type='config')
try:
kibana_id = kibana_config['hits']['hits'][0]['_id']
except:
raise KibanaConfigNotFoundError()
es_conn.update(index='.kibana', doc_type='config', id=kibana_id,
body={"doc": {"defaultIndex" : index_name}})
return ret
| 2.5625
| 3
|
backend/billparser/importers/statuses.py
|
Congress-Dev/congress-dev
| 9
|
12776693
|
<reponame>Congress-Dev/congress-dev
import os
from billparser.status_parser import parse_archive
url_format = "https://www.govinfo.gov/bulkdata/BILLSTATUS/{congress}/{prefix}/BILLSTATUS-{congress}-{prefix}.zip"
congresses = [116]
def download_path(url: str):
os.makedirs("statuses", exist_ok=True)
output_name = url.split("/")[-1]
if os.path.exists(f"statuses/{output_name}"):
return output_name
os.system(f"wget {url} --output-document statuses/{output_name}")
return output_name
if __name__ == "__main__":
for congress in congresses:
for prefix in ["s", "hr"]:
url = url_format.format(**{"congress": congress, "prefix": prefix})
output_name = download_path(url)
parse_archive(f"statuses/{output_name}")
| 3.078125
| 3
|
uranium_quantum/circuit_exporter/cirq-exporter.py
|
radumarg/uranium_quantum
| 0
|
12776694
|
<reponame>radumarg/uranium_quantum
import importlib
BaseExporter = importlib.import_module("uranium_quantum.circuit_exporter.base-exporter")
class Exporter(BaseExporter.BaseExporter):
def _define_import_code_section(self):
return f"\
import cirq\n\
import numpy as np\n\
\n\
q = [cirq.NamedQubit('q' + str(i)) for i in range({self._qubits})]\n\
\n"
def _define_u3_gates_code_section(self):
return "\
# define the u3 gate\n\
def u3(theta_radians, phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[np.cos(theta_radians/2), -np.exp(1j * lambda_radians) * np.sin(theta_radians/2)], [np.exp(1j * phi_radians) * np.sin(theta_radians/2), np.exp(1j * lambda_radians+1j * phi_radians) * np.cos(theta_radians/2)]]))\n\
\n"
def _define_u2_gates_code_section(self):
return "\
# define the u2 gate\n\
def u2(phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[1/np.sqrt(2), -np.exp(1j * lambda_radians) * 1/np.sqrt(2)], [np.exp(1j * phi_radians) * 1/np.sqrt(2), np.exp(1j * lambda_radians + 1j * phi_radians) * 1/np.sqrt(2)]]))\n\
\n"
def _define_u1_gates_code_section(self):
return "\
def u1(lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0], [0, np.exp(1j * lambda_radians)]]))\n\
\n"
def _define_crtl_u1(self):
return "\
# define ctrl-u1 gate\n\
def cu1(lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, np.exp(1j * lambda_radians)]]))\n\
\n"
def _define_crtl_u2(self):
return "\
# define ctrl-u2 gate\n\
def cu2(phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1/np.sqrt(2), -np.exp(1j * lambda_radians) * 1/np.sqrt(2)], [0, 0, np.exp(1j * phi_radians) * 1/np.sqrt(2), np.exp(1j * lambda_radians + 1j * phi_radians) * 1/np.sqrt(2)]]))\n\
\n"
def _define_crtl_u3(self):
return "\
# define ctrl-u3 gate\n\
def cu3(theta_radians, phi_radians, lambda_radians):\n\
return cirq.MatrixGate(np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta_radians/2), -np.exp(1j * lambda_radians) * np.sin(theta_radians/2)], [0, 0, np.exp(1j * phi_radians) * np.sin(theta_radians/2), np.exp(1j * lambda_radians+1j * phi_radians) * np.cos(theta_radians/2)]]))\n\
\n"
def start_code(self):
return (
self._define_import_code_section()
+ "\n"
+ self._define_u3_gates_code_section()
+ "\n"
+ self._define_u2_gates_code_section()
+ "\n"
+ self._define_u1_gates_code_section()
+ "\n"
+ self._define_crtl_u1()
+ "\n"
+ self._define_crtl_u2()
+ "\n"
+ self._define_crtl_u3()
+ "\n"
+ "circuit = cirq.Circuit(\n\n"
)
def end_code(self):
return f"\
)\n\
\n\
simulator = cirq.Simulator()\n\
simulator.run(circuit, repetitions=1000)\n"
@staticmethod
def _gate_u3(
target, theta_radians, phi_radians, lambda_radians, add_comments=True
):
out = " # u3 gate\n" if add_comments else ""
out += (
f" u3({theta_radians}, {phi_radians}, {lambda_radians})(q[{target}]),\n"
)
return out
@staticmethod
def _gate_u2(target, phi_radians, lambda_radians, add_comments=True):
out = " # u2 gate\n" if add_comments else ""
out += f" u2({phi_radians}, {lambda_radians})(q[{target}]),\n"
return out
@staticmethod
def _gate_u1(target, lambda_radians, add_comments=True):
out = " # u1 gate\n" if add_comments else ""
out += f" u1({lambda_radians})(q[{target}]),\n"
return out
@staticmethod
def _gate_identity(target, add_comments=True):
out = " # identity gate\n" if add_comments else ""
out += f" cirq.I(q[{target}]),\n"
return out
@staticmethod
def _gate_hadamard(target, add_comments=True):
out = " # hadamard gate\n" if add_comments else ""
out += f" cirq.H(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_x(target, add_comments=True):
out = " # pauli-x gate\n" if add_comments else ""
out += f" cirq.X(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_y(target, add_comments=True):
out = " # pauli-y gate\n" if add_comments else ""
out += f" cirq.Y(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_z(target, add_comments=True):
out = " # pauli-z gate\n" if add_comments else ""
out += f" cirq.Z(q[{target}]),\n"
return out
@staticmethod
def _gate_pauli_x_root(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-x-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_y_root(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-y-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_z_root(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-z-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_x_root_dagger(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-x-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_y_root_dagger(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-y-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_pauli_z_root_dagger(target, root, add_comments=True):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# pauli-z-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_sqrt_not(target, add_comments=True):
out = " # sqrt-not gate\n" if add_comments else ""
out += f" (cirq.X**(1/2))(q[{target}]),\n"
return out
@staticmethod
def _gate_t(target, add_comments=True):
out = " # t gate\n" if add_comments else ""
out += f" cirq.T(q[{target}]),\n"
return out
@staticmethod
def _gate_t_dagger(target, add_comments=True):
out = " # t-dagger gate\n" if add_comments else ""
out += f" u1(-np.pi / 4)(q[{target}]),\n"
return out
@staticmethod
def _gate_rx_theta(target, theta, add_comments=True):
out = " # rx-theta gate\n" if add_comments else ""
out += f" cirq.rx(0)(q[{target}]),\n"
return out
@staticmethod
def _gate_ry_theta(target, theta, add_comments=True):
out = " # ry-theta gate\n" if add_comments else ""
out += f" cirq.ry(0)(q[{target}]),\n"
return out
@staticmethod
def _gate_rz_theta(target, theta, add_comments=True):
out = " # rz-theta gate\n" if add_comments else ""
out += f" cirq.rz(0)(q[{target}]),\n"
return out
@staticmethod
def _gate_s(target, add_comments=True):
out = " # s gate\n" if add_comments else ""
out += f" cirq.S(q[{target}]),\n"
return out
@staticmethod
def _gate_s_dagger(target, add_comments=True):
out = " # s-dagger gate\n" if add_comments else ""
out += f" u1(-np.pi / 2)(q[{target}]),\n"
return out
@staticmethod
def _gate_swap(target, target2, add_comments=True): ##
out = " # swap gate\n" if add_comments else ""
out += f" cirq.SWAP(q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_iswap(target, target2, add_comments=True):
out = " # iswap gate\n" if add_comments else ""
out += f" cirq.ISWAP(q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_swap_phi(target, target2, phi, add_comments=True):
raise BaseExporter.ExportException("The swap-phi gate is not implemented.")
@staticmethod
def _gate_sqrt_swap(target, target2, add_comments=True):
out = " # sqrt-swap gate\n" if add_comments else ""
out += f" (cirq.SWAP**(1/2))(q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_xx(target, target2, theta, add_comments=True):
out = "# xx gate\n" if add_comments else ""
return out
@staticmethod
def _gate_yy(target, target2, theta, add_comments=True):
out = "# yy gate\n" if add_comments else ""
return out
@staticmethod
def _gate_zz(target, target2, theta, add_comments=True):
out = "# zz gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_hadamard(control, target, controlstate, add_comments=True):
out = " # ctrl-hadamard gate\n" if add_comments else ""
out += f" cirq.H.controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_u3(
control,
target,
controlstate,
theta_radians,
phi_radians,
lambda_radians,
add_comments=True,
):
out = " # ctrl-u3 gate\n" if add_comments else ""
out += f" cu3({theta_radians}, {phi_radians}, {lambda_radians})(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_u2(
control, target, controlstate, phi_radians, lambda_radians, add_comments=True
):
out = " # ctrl-u2 gate\n" if add_comments else ""
out += f" cu2({phi_radians}, {lambda_radians})(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_u1(
control, target, controlstate, lambda_radians, add_comments=True
):
out = " # ctrl-u1 gate\n" if add_comments else ""
out += f" cu1({lambda_radians})(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_t(control, target, controlstate, add_comments=True):
out = " # ctrl-t gate\n" if add_comments else ""
out += f" cu1(np.pi / 4)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_t_dagger(control, target, controlstate, add_comments=True):
out = " # ctrl-t-dagger gate\n" if add_comments else ""
out += f" cu1(-np.pi / 4)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_x(control, target, controlstate, add_comments=True):
out = " # ctrl-pauli-x gate\n" if add_comments else ""
out += f" cirq.CNOT(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_y(control, target, controlstate, add_comments=True):
out = " # ctrl-pauli-y gate\n" if add_comments else ""
out += f" cirq.Y.controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_z(control, target, controlstate, add_comments=True):
out = " # ctrl-pauli-z gate\n" if add_comments else ""
out += f" cirq.CZ(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_pauli_x_root(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-x-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_y_root(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-y-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_z_root(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-z-root gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_x_root_dagger(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-x-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_y_root_dagger(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-y-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_pauli_z_root_dagger(
control, target, controlstate, root, add_comments=True
):
# TODO
root = f"(2**{root[4:]})" if '^' in root else root[2:]
out = "# ctrl-pauli-z-root-dagger gate\n" if add_comments else ""
return out
@staticmethod
def _gate_ctrl_sqrt_not(control, target, controlstate, add_comments=True):
out = " # ctrl-sqrt-not gate\n" if add_comments else ""
out += f" (cirq.X**(1/2)).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_rx_theta(
control, target, controlstate, theta_radians, add_comments=True
):
out = " # ctrl-rx-theta gate\n" if add_comments else ""
out += f" cirq.rx({theta_radians}).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_ry_theta(
control, target, controlstate, theta_radians, add_comments=True
):
out = " # ctrl-ry-theta gate\n" if add_comments else ""
out += f" cirq.ry({theta_radians}).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_rz_theta(
control, target, controlstate, theta_radians, add_comments=True
):
out = " # ctrl-rz-theta gate\n" if add_comments else ""
out += f" cirq.rz({theta_radians}).controlled().on(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_s(control, target, controlstate, add_comments=True):
out = " # ctrl-s gate\n" if add_comments else ""
out += f" cu1(np.pi / 2)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_ctrl_s_dagger(control, target, controlstate, add_comments=True):
out = " # ctrl-s-dagger gate\n" if add_comments else ""
out += f" cu1(-np.pi / 2)(q[{control}], q[{target}]),\n"
return out
@staticmethod
def _gate_toffoli(
control, control2, target, controlstate, controlstate2, add_comments=True
):
out = " # toffoli gate\n" if add_comments else ""
out += f" cirq.CSWAP(q[{control}], q[{control2}], q[{target}]),\n"
return out
@staticmethod
def _gate_fredkin(control, target, target2, controlstate, add_comments=True):
out = " # fredkin gate\n" if add_comments else ""
out += f" cirq.CCX(q[{control}], q[{target}], q[{target2}]),\n"
return out
@staticmethod
def _gate_measure_x(target, classic_bit, add_comments=True):
raise BaseExporter.ExportException("The measure-x gate is not implemented.")
@staticmethod
def _gate_measure_y(target, classic_bit, add_comments=True):
raise BaseExporter.ExportException("The measure-y gate is not implemented.")
@staticmethod
def _gate_measure_z(target, classic_bit, add_comments=True):
out = " # measure-z gate\n" if add_comments else ""
out += f" cirq.measure(q[{target}], key='c{classic_bit}'),\n"
return out
| 2.453125
| 2
|
fortnite_api/cosmetics.py
|
Fortnite-API/py-wrapper
| 20
|
12776695
|
import math
from datetime import datetime
from fortnite_api.enums import BrCosmeticType, BrCosmeticRarity
class NewBrCosmetics:
def __init__(self, data):
self.build = data.get('build')
self.previous_build = data.get('previousBuild')
self.hash = data.get('hash')
try:
self.date = datetime.strptime(data.get('date'), '%Y-%m-%dT%H:%M:%S%z')
except (ValueError, TypeError):
self.date = None
try:
self.last_addition = datetime.strptime(data.get('lastAddition'), '%Y-%m-%dT%H:%M:%S%z')
except (ValueError, TypeError):
self.last_addition = None
self.items = [BrCosmetic(i) for i in data.get('items')] if data.get('items') else None
self.raw_data = data
class BrCosmetic:
"""Represents a Battle Royale Cosmetic.
Attributes
-----------
id: :class:`str`
The id of the cosmetic.
type: :class:`BrCosmeticType`
The type of the cosmetic.
backend_type: :class:`str`
The internal type of the cosmetic.
rarity: :class:`BrCosmeticRarity`
The rarity of the cosmetic.
backend_rarity: :class:`str`
The internal rarity of the cosmetic.
name: :class:`str`
The name of the cosmetic in the chosen language.
description: :class:`str`
The description of the cosmetic in the chosen language.
set: Optional[:class:`str`]
The set of the cosmetic in the chosen language.
set_text: Optional[:class:`str`]
The text of the set of the cosmetic in the chosen language.
series: Optional[:class:`str`]
The series of the cosmetic in the chosen language.
backend_series: Optional[:class:`str`]
The internal series of the cosmetic.
small_icon: :class:`BrCosmeticImage`
The icon image in 128x128 resolution of the cosmetic.
icon: Optional[:class:`BrCosmeticImage`]
The icon image in 512x512 resolution of the cosmetic.
featured: Optional[:class:`BrCosmeticImage`]
The featured image in 1024x1024 resolution of the cosmetic.
background: Optional[:class:`BrCosmeticImage`]
The background image in 2048x1024 resolution of a loading screen.
cover_art: Optional[:class:`BrCosmeticImage`]
The cover art image in 512x512 resolution of a music pack.
decal: Optional[:class:`BrCosmeticImage`]
The decal in 512x512 resolution of a spray.
variants: Optional[List[:class:`BrCosmeticVariant`]]
A :class:`list` of :class:`BrCosmeticVariant` of the cosmetic.
gameplay_tags: Optional[List[:class:`str`]]
A :class:`list` of gameplay tags of the cosmetics.
display_asset_path: Optional[:class:`str`]
The path of the display asset.
path: :class:`str`
The path of the asset.
added: :class:`datetime.datetime`
The timestamp when the item was added to the Fortnite-API.com database.
raw_data: :class:`dict`
The raw data from request. Can be used for saving and recreating the class.
"""
def __init__(self, data):
self.id = data.get('id')
self.name = data.get('name')
self.description = data.get('description')
cosmetic_type = data.get('type', {}) if data.get('type') else {}
try:
self.type = BrCosmeticType(cosmetic_type.get('value'))
except ValueError:
self.type = BrCosmeticType.UNKNOWN
self.display_type = cosmetic_type.get('displayValue')
self.backend_type = cosmetic_type.get('backendValue')
rarity = data.get('rarity', {}) if data.get('rarity') else {}
try:
self.rarity = BrCosmeticRarity(rarity.get('value'))
except ValueError:
self.rarity = BrCosmeticRarity.UNKNOWN
self.rarity_text = rarity.get('displayValue')
self.backend_rarity = rarity.get('backendValue')
series = data.get('series', {}) if data.get('series') else {}
self.series = series.get('value')
self.series_image = series.get('image')
self.backend_series = series.get('backendValue')
cosmetic_set = data.get('set', {}) if data.get('set') else {}
self.set = cosmetic_set.get('value')
self.set_text = cosmetic_set.get('text')
self.backend_set = cosmetic_set.get('backendValue')
introduction = data.get('introduction', {}) if data.get('introduction') else {}
self.introduction_chapter = introduction.get('chapter')
self.introduction_season = introduction.get('season')
self.introduction_text = introduction.get('text')
self.backend_introduction = introduction.get('backendValue')
images = data.get('images', {}) if data.get('images') else {}
self.small_icon = BrCosmeticImage(images.get('smallIcon')) if images.get('smallIcon') else None
self.icon = BrCosmeticImage(images.get('icon')) if images.get('icon') else None
self.featured = BrCosmeticImage(images.get('featured')) if images.get('featured') else None
other_images = images.get('other', {}) if images.get('other') else {}
self.background = BrCosmeticImage(other_images.get('background')) if other_images.get('background') else None
self.cover_art = BrCosmeticImage(other_images.get('coverart')) if other_images.get('coverart') else None
self.decal = BrCosmeticImage(other_images.get('decal')) if other_images.get('decal') else None
self.background = BrCosmeticImage(other_images.get('background')) if other_images.get('background') else None
self.variants = [BrCosmeticVariant(variant) for variant in data.get('variants')] \
if data.get('variants') is not None else None
self.gameplay_tags = [gameplay_tag for gameplay_tag in data.get('gameplayTags')] \
if data.get('gameplayTags') is not None else None
self.meta_tags = [meta_tag for meta_tag in data.get('metaTags')] \
if data.get('metaTags') is not None else None
self.showcase_video = 'https://youtube.com/watch?v=' + data.get('showcaseVideo') \
if data.get('showcaseVideo') else None
self.display_asset_path = data.get('displayAssetPath')
self.definition_path = data.get('definitionPath')
self.path = data.get('path')
try:
self.added = datetime.strptime(data.get('added'), '%Y-%m-%dT%H:%M:%S%z')
except (ValueError, TypeError):
self.added = None
self.shop_history = []
for date in data.get('shopHistory', []) if data.get('shopHistory') else []:
try:
self.shop_history.append(datetime.strptime(date, '%Y-%m-%dT%H:%M:%S%z').replace(tzinfo=None))
except (ValueError, TypeError):
pass
self.appearances = len(self.shop_history)
self.first_appearance = self.shop_history[0] if self.appearances > 0 else None
self.last_appearance = self.shop_history[self.appearances - 1] if self.appearances > 0 else None
self.unseen_for = (datetime.utcnow() - self.last_appearance).days if self.last_appearance else None
self.raw_data = data
class BrCosmeticImage:
"""Represents a Battle Royale cosmetic image.
Attributes
-----------
url: :class:`str`
The hash of the image.
"""
def __init__(self, url):
self.url = url
def url_as(self, size):
if size < 0 or type(math.sqrt(size)) is float:
raise TypeError('Size must be a power of 2.')
url_without_type = self.url.replace('.png', '')
return url_without_type + '_' + size + '.png'
def __str__(self):
return self.url
class BrCosmeticVariant:
"""Represents a Battle Royale cosmetic image.
Attributes
-----------
channel: :class:`str`
The channel of the variant.
type: Optional[:class:`str`]
The type of the variant in the chosen language.
options: List[:class:`BrCosmeticVariantOption`]
A :class:`list` of :class:`BrCosmeticVariantOption` of the variant.
raw_data: :class:`dict`
The raw data from request. Can be used for saving and re-creating the class.
"""
def __init__(self, data):
self.channel = data.get('channel')
self.type = data.get('type')
self.options = [BrCosmeticVariantOption(option) for option in data.get('options')] \
if data.get('options') is not None else None
self.raw_data = data
class BrCosmeticVariantOption:
"""Represents a Battle Royale cosmetic image.
Attributes
-----------
tag: :class:`str`
The tag of the option.
name: :class:`str`
The name of the option in the chosen language.
image: :class:`BrCosmeticImage`
A image of the option.
raw_data: :class:`dict`
The raw data from request. Can be used for saving and re-creating the class.
"""
def __init__(self, data):
self.tag = data.get('tag')
self.name = data.get('name')
self.image = BrCosmeticImage(data.get('image'))
self.raw_data = data
| 2.859375
| 3
|
test/imgkit_test.py
|
guilhermef/imgkit
| 0
|
12776696
|
<filename>test/imgkit_test.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
import io
import sys
import codecs
import unittest
import tempfile
import aiounittest
# Prepend ../ to PYTHONPATH so that we can import IMGKIT form there.
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.realpath(os.path.join(TEST_ROOT, '..')))
import async_imgkit
class TestIMGKitInitialization(aiounittest.AsyncTestCase):
"""Test init"""
async def test_html_source(self):
r = await async_imgkit.AsyncIMGKit.create('<h1>Oh hai</h1>', 'string')
self.assertTrue(r.source.isString())
async def test_url_source(self):
r = await async_imgkit.AsyncIMGKit.create('http://ya.ru', 'url')
self.assertTrue(r.source.isUrl())
async def test_file_source(self):
r = await async_imgkit.AsyncIMGKit.create('fixtures/example.html', 'file')
self.assertTrue(r.source.isFile())
async def test_file_object_source(self):
with open('fixtures/example.html') as fl:
r = await async_imgkit.AsyncIMGKit.create(fl, 'file')
self.assertTrue(r.source.isFileObj())
async def test_file_source_with_path(self):
r = await async_imgkit.AsyncIMGKit.create('test', 'string')
with io.open('fixtures/example.css') as f:
self.assertTrue(r.source.isFile(path=f))
with codecs.open('fixtures/example.css', encoding='UTF-8') as f:
self.assertTrue(r.source.isFile(path=f))
async def test_options_parsing(self):
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options={'format': 'jpg'})
test_command = r.command('test')
idx = test_command.index('--format') # Raise exception in case of not found
self.assertTrue(test_command[idx + 1] == 'jpg')
async def test_options_parsing_with_dashes(self):
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options={'--format': 'jpg'})
test_command = r.command('test')
idx = test_command.index('--format') # Raise exception in case of not found
self.assertTrue(test_command[idx + 1] == 'jpg')
async def test_options_parsing_with_tuple(self):
options = {
'--custom-header': [
('Accept-Encoding', 'gzip')
]
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=options)
command = r.command()
idx1 = command.index('--custom-header') # Raise exception in case of not found
self.assertTrue(command[idx1 + 1] == 'Accept-Encoding')
self.assertTrue(command[idx1 + 2] == 'gzip')
async def test_options_parsing_with_tuple_no_dashes(self):
options = {
'custom-header': [
('Accept-Encoding', 'gzip')
]
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=options)
command = r.command()
idx1 = command.index('--custom-header') # Raise exception in case of not found
self.assertTrue(command[idx1 + 1] == 'Accept-Encoding')
self.assertTrue(command[idx1 + 2] == 'gzip')
async def test_repeatable_options(self):
roptions = {
'--format': 'jpg',
'cookies': [
('test_cookie1', 'cookie_value1'),
('test_cookie2', 'cookie_value2'),
]
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=roptions)
test_command = r.command('test')
idx1 = test_command.index('--format') # Raise exception in case of not found
self.assertTrue(test_command[idx1 + 1] == 'jpg')
self.assertTrue(test_command.count('--cookies') == 2)
idx2 = test_command.index('--cookies')
self.assertTrue(test_command[idx2 + 1] == 'test_cookie1')
self.assertTrue(test_command[idx2 + 2] == 'cookie_value1')
idx3 = test_command.index('--cookies', idx2 + 2)
self.assertTrue(test_command[idx3 + 1] == 'test_cookie2')
self.assertTrue(test_command[idx3 + 2] == 'cookie_value2')
async def test_custom_config(self):
conf = await async_imgkit.config()
self.assertEqual('imgkit-', conf.meta_tag_prefix)
conf = await async_imgkit.config(meta_tag_prefix='prefix-')
self.assertEqual('prefix-', conf.meta_tag_prefix)
with self.assertRaises(IOError):
await async_imgkit.config(wkhtmltoimage='wrongpath')
class TestIMGKitCommandGeneration(aiounittest.AsyncTestCase):
"""Test command() method"""
async def test_command_construction(self):
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options={'format': 'jpg', 'toc-l1-font-size': 12})
command = r.command()
self.assertEqual(command[0], r.wkhtmltoimage)
self.assertEqual(command[command.index('--format') + 1], 'jpg')
self.assertEqual(command[command.index('--toc-l1-font-size') + 1], '12')
async def test_lists_of_input_args(self):
urls = ['http://ya.ru', 'http://google.com']
paths = ['fixtures/example.html', 'fixtures/example.html']
r = await async_imgkit.AsyncIMGKit.create(urls, 'url')
r2 = await async_imgkit.AsyncIMGKit.create(paths, 'file')
cmd = r.command()
cmd2 = r2.command()
self.assertEqual(cmd[-3:], ['http://ya.ru', 'http://google.com', '-'])
self.assertEqual(cmd2[-3:], ['fixtures/example.html', 'fixtures/example.html', '-'])
async def test_read_source_from_stdin(self):
r = await async_imgkit.AsyncIMGKit.create('html', 'string')
self.assertEqual(r.command()[-2:], ['-', '-'])
async def test_url_in_command(self):
r = await async_imgkit.AsyncIMGKit.create('http://ya.ru', 'url')
self.assertEqual(r.command()[-2:], ['http://ya.ru', '-'])
async def test_file_path_in_command(self):
path = 'fixtures/example.html'
r = await async_imgkit.AsyncIMGKit.create(path, 'file')
self.assertEqual(r.command()[-2:], [path, '-'])
async def test_output_path(self):
out = '/test/test2/out.jpg'
r = await async_imgkit.AsyncIMGKit.create('html', 'string')
self.assertEqual(r.command(out)[-1:], ['/test/test2/out.jpg'])
async def test_imgkit_meta_tags(self):
body = """
<html>
<head>
<meta name="imgkit-format" content="jpg"/>
<meta name="imgkit-orientation" content="Landscape"/>
</head>
"""
r = await async_imgkit.AsyncIMGKit.create(body, 'string')
command = r.command()
self.assertEqual(command[command.index('--format') + 1], 'jpg')
self.assertEqual(command[command.index('--orientation') + 1], 'Landscape')
async def test_imgkit_meta_tags_in_bad_markup(self):
body = """
<html>
<head>
<meta name="imgkit-format" content="jpg"/>
<meta name="imgkit-orientation" content="Landscape"/>
</head>
<br>
</html>
"""
r = await async_imgkit.AsyncIMGKit.create(body, 'string')
command = r.command()
self.assertEqual(command[command.index('--format') + 1], 'jpg')
self.assertEqual(command[command.index('--orientation') + 1], 'Landscape')
async def test_skip_nonimgkit_tags(self):
body = """
<html>
<head>
<meta name="test-page-size" content="Legal"/>
<meta name="imgkit-orientation" content="Landscape"/>
</head>
<br>
</html>
"""
r = await async_imgkit.AsyncIMGKit.create(body, 'string')
command = r.command()
self.assertEqual(command[command.index('--orientation') + 1], 'Landscape')
async def test_toc_handling_without_options(self):
r = await async_imgkit.AsyncIMGKit.create('hmtl', 'string', toc={'xsl-style-sheet': 'test.xsl'})
self.assertEqual(r.command()[1], 'toc')
self.assertEqual(r.command()[2], '--xsl-style-sheet')
async def test_toc_with_options(self):
options = {
'format': 'jpg',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8"
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=options, toc={'xsl-style-sheet': 'test.xsl'})
command = r.command()
self.assertEqual(command[1 + len(options) * 2], 'toc')
self.assertEqual(command[1 + len(options) * 2 + 1], '--xsl-style-sheet')
async def test_cover_without_options(self):
r = await async_imgkit.AsyncIMGKit.create('html', 'string', cover='test.html')
command = r.command()
self.assertEqual(command[1], 'cover')
self.assertEqual(command[2], 'test.html')
async def test_cover_with_options(self):
options = {
'format': 'jpg',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8"
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=options, cover='test.html')
command = r.command()
self.assertEqual(command[1 + len(options) * 2], 'cover')
self.assertEqual(command[1 + len(options) * 2 + 1], 'test.html')
async def test_cover_and_toc(self):
options = {
'format': 'jpg',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8"
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=options, toc={'xsl-style-sheet': 'test.xsl'}, cover='test.html')
command = r.command()
self.assertEqual(command[-7:], ['toc', '--xsl-style-sheet', 'test.xsl', 'cover', 'test.html', '-', '-'])
async def test_cover_and_toc_cover_first(self):
options = {
'format': 'jpg',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8"
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=options, toc={'xsl-style-sheet': 'test.xsl'}, cover='test.html',
cover_first=True)
command = r.command()
self.assertEqual(command[-7:], ['cover', 'test.html', 'toc', '--xsl-style-sheet', 'test.xsl', '-', '-'])
async def test_outline_options(self):
options = {
'outline': None,
'outline-depth': 1
}
r = await async_imgkit.AsyncIMGKit.create('ya.ru', 'url', options=options)
cmd = r.command()
# self.assertEqual(cmd[1:], ['--outline', '--outline-depth', '1', 'ya.ru', '-'])
self.assertIn('--outline', cmd)
self.assertEqual(cmd[cmd.index('--outline-depth') + 1], '1')
async def test_filter_empty_and_none_values_in_opts(self):
options = {
'outline': '',
'footer-line': None,
'quiet': False
}
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options=options)
cmd = r.command()
self.assertEqual(len(cmd), 6)
class TestIMGKitGeneration(aiounittest.AsyncTestCase):
"""Test to_img() method"""
def setUp(self):
self.file_path = tempfile.NamedTemporaryFile(suffix=".jpg").name
async def test_img_generation(self):
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options={'format': 'jpg'})
pic = await r.to_img(self.file_path)
self.assertTrue(pic)
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
async def test_img_generation_xvfb(self):
r = await async_imgkit.AsyncIMGKit.create('html', 'string', options={'format': 'jpg', 'xvfb': ''})
pic = await r.to_img(self.file_path)
self.assertTrue(pic)
async def test_raise_error_with_invalid_url(self):
r = await async_imgkit.AsyncIMGKit.create('wrongurl', 'url')
with self.assertRaises(IOError):
await r.to_img(self.file_path)
async def test_raise_error_with_invalid_file_path(self):
paths = ['frongpath.html', 'wrongpath2.html']
with self.assertRaises(IOError):
await async_imgkit.AsyncIMGKit.create('wrongpath.html', 'file')
with self.assertRaises(IOError):
await async_imgkit.AsyncIMGKit.create(paths, 'file')
async def test_stylesheet_adding_to_the_head(self):
# TODO rewrite this part of pdfkit.py
r = await async_imgkit.AsyncIMGKit.create('<html><head></head><body>Hai!</body></html>', 'string',
css='fixtures/example.css')
with open('fixtures/example.css') as f:
css = f.read()
r._prepend_css('fixtures/example.css')
self.assertIn('<style>%s</style>' % css, r.source.to_s())
async def test_stylesheet_adding_without_head_tag(self):
r = await async_imgkit.AsyncIMGKit.create('<html><body>Hai!</body></html>', 'string',
options={'quiet': None}, css='fixtures/example.css')
with open('fixtures/example.css') as f:
css = f.read()
r._prepend_css('fixtures/example.css')
self.assertIn('<style>%s</style><html>' % css, r.source.to_s())
async def test_multiple_stylesheets_adding_to_the_head(self):
# TODO rewrite this part of pdfkit.py
css_files = ['fixtures/example.css', 'fixtures/example2.css']
r = await async_imgkit.AsyncIMGKit.create('<html><head></head><body>Hai!</body></html>', 'string',
css=css_files)
css = []
for css_file in css_files:
with open(css_file) as f:
css.append(f.read())
r._prepend_css(css_files)
self.assertIn('<style>%s</style>' % "\n".join(css), r.source.to_s())
async def test_multiple_stylesheet_adding_without_head_tag(self):
css_files = ['fixtures/example.css', 'fixtures/example2.css']
r = await async_imgkit.AsyncIMGKit.create('<html><body>Hai!</body></html>', 'string',
options={'quiet': None}, css=css_files)
css = []
for css_file in css_files:
with open(css_file) as f:
css.append(f.read())
r._prepend_css(css_files)
self.assertIn('<style>%s</style><html>' % "\n".join(css), r.source.to_s())
async def test_stylesheet_throw_error_when_url(self):
r = await async_imgkit.AsyncIMGKit.create('http://ya.ru', 'url', css='fixtures/example.css')
with self.assertRaises(r.SourceError):
await r.to_img()
async def test_stylesheet_adding_to_file_with_option(self):
css = 'fixtures/example.css'
r = await async_imgkit.AsyncIMGKit.create('fixtures/example.html', 'file', css=css)
self.assertEqual(r.css, css)
r._prepend_css(css)
self.assertIn('font-size', r.source.to_s())
async def test_wkhtmltoimage_error_handling(self):
r = await async_imgkit.AsyncIMGKit.create('clearlywrongurl.asdf', 'url')
with self.assertRaises(IOError):
await r.to_img()
async def test_pdf_generation_from_file_like(self):
with open('fixtures/example.html', 'r') as f:
r = await async_imgkit.AsyncIMGKit.create(f, 'file')
output = await r.to_img()
self.assertEqual(output[:4], b'\xff\xd8\xff\xe0') # TODO img
async def test_raise_error_with_wrong_css_path(self):
css = 'fixtures/wrongpath.css'
r = await async_imgkit.AsyncIMGKit.create('fixtures/example.html', 'file', css=css)
with self.assertRaises(IOError):
await r.to_img()
async def test_raise_error_if_bad_wkhtmltoimage_option(self):
r = await async_imgkit.AsyncIMGKit.create('<html><body>Hai!</body></html>', 'string',
options={'bad-option': None})
with self.assertRaises(IOError) as cm:
await r.to_img()
raised_exception = cm.exception
self.assertRegex(str(raised_exception),
'^wkhtmltoimage exited with non-zero code 1. error:\nUnknown long argument '
'--bad-option\r?\n')
class TestIMGKitAPI(aiounittest.AsyncTestCase):
"""Test API"""
def setUp(self):
self.file_path = tempfile.NamedTemporaryFile(suffix=".jpg").name
async def test_from_string(self):
pic = await async_imgkit.from_string('hello imgkit!', self.file_path)
self.assertTrue(pic)
async def test_from_url(self):
pic = await async_imgkit.from_url('https://github.com', self.file_path)
self.assertTrue(pic)
async def test_from_file(self):
pic = await async_imgkit.from_file('fixtures/example.html', self.file_path)
self.assertTrue(pic)
if __name__ == "__main__":
unittest.main()
| 2.484375
| 2
|
tests/trac/test-trac-0132.py
|
eLBati/pyxb
| 123
|
12776697
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import sys
import pyxb
import unittest
class TestTrac0132 (unittest.TestCase):
message = 'bad character \u2620'
def testDecode (self):
e = pyxb.PyXBException(self.message)
if sys.version_info[:2] > (2, 4):
self.assertEqual(self.message, e.args[0])
if __name__ == '__main__':
unittest.main()
| 2.546875
| 3
|
lib/pics.py
|
MuffinAmor/nellie
| 1
|
12776698
|
<gh_stars>1-10
import json
import os
import sys
def create():
if not os.path.isfile('pics'):
try:
os.mkdir('pics')
except:
pass
def add_pic(token, name, author_id: str, time: str, datas):
try:
create()
if not os.path.isfile("pics/{}.json".format(token)):
data = {
'room': name,
'author': author_id,
'time': time,
'data': datas
}
with open("pics/{}.json".format(token), "w+") as fp:
json.dump(data, fp, indent=4)
except:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def remove_pic(token):
if os.path.isfile("pics/{}.json".format(token)):
os.remove("Nellie/pics/{}.json".format(token))
def request_pic_room(token: str):
if os.path.isfile("pics/{}.json".format(token)):
with open("pics/{}.json".format(token), encoding='utf-8') as fp:
data = json.load(fp)
return data['room']
else:
return False
def request_pic_msg(token: str):
if os.path.isfile("pics/{}.json".format(token)):
with open("pics/{}.json".format(token), encoding='utf-8') as fp:
data = json.load(fp)
return data['data']
else:
return False
| 2.671875
| 3
|
server/plugins/gatekeeper/gatekeeper.py
|
nathandarnell/sal
| 215
|
12776699
|
<gh_stars>100-1000
from django.db.models import Q
import sal.plugin
TITLES = {
'ok': 'Machines with Gatekeeper enabled',
'alert': 'Machines without Gatekeeper enabled',
'unknown': 'Machines with unknown Gatekeeper status'}
PLUGIN_Q = Q(pluginscriptsubmission__plugin='Gatekeeper')
SCRIPT_Q = Q(pluginscriptsubmission__pluginscriptrow__pluginscript_name='Gatekeeper')
class Gatekeeper(sal.plugin.Widget):
supported_os_families = [sal.plugin.OSFamilies.darwin]
def get_context(self, queryset, **kwargs):
queryset = queryset.filter(os_family='Darwin')
context = self.super_get_context(queryset, **kwargs)
context['ok'] = self._filter(queryset, 'ok').count()
context['alert'] = self._filter(queryset, 'alert').count()
context['unknown'] = queryset.count() - context['ok'] - context['alert']
return context
def filter(self, machines, data):
if data not in TITLES:
return None, None
return self._filter(machines, data), TITLES[data]
def _filter(self, machines, data):
machines = machines.filter(os_family='Darwin')
if data == 'ok':
machines = (
machines
.filter(PLUGIN_Q,
SCRIPT_Q,
pluginscriptsubmission__pluginscriptrow__pluginscript_data='Enabled'))
elif data == 'alert':
machines = (
machines
.filter(PLUGIN_Q,
SCRIPT_Q,
pluginscriptsubmission__pluginscriptrow__pluginscript_data='Disabled'))
elif data == 'unknown':
machines = (
machines
.exclude(pk__in=self._filter(machines, 'ok').values('pk'))
.exclude(pk__in=self._filter(machines, 'alert').values('pk')))
return machines
| 2.015625
| 2
|
comicolorization/extensions/__init__.py
|
DwangoMediaVillage/Comicolorization
| 122
|
12776700
|
<gh_stars>100-1000
from .save_images import SaveGeneratedImageExtension, SaveRawImageExtension
| 1.09375
| 1
|
omnikinverter/models.py
|
klaasnicolaas/python-omnikinverter
| 5
|
12776701
|
<gh_stars>1-10
"""Models for Omnik Inverter."""
from __future__ import annotations
import json
import re
from dataclasses import dataclass
from typing import Any
from .exceptions import OmnikInverterWrongSourceError, OmnikInverterWrongValuesError
@dataclass
class Inverter:
"""Object representing an Inverter response from Omnik Inverter."""
serial_number: str | None
model: str | None
firmware: str | None
firmware_slave: str | None
solar_rated_power: int | None
solar_current_power: int | None
solar_energy_today: float | None
solar_energy_total: float | None
@staticmethod
def from_json(data: dict[str, Any]) -> Inverter:
"""Return Inverter object from the Omnik Inverter response.
Args:
data: The JSON data from the Omnik Inverter.
Returns:
An Inverter object.
Raises:
OmnikInverterWrongValuesError: Inverter pass on
incorrect data (day and total are equal).
"""
data = json.loads(data)
def get_value(search):
if data[search] != "":
return data[search]
return None
def validation(data_list):
"""Check if the values are not equal to each other.
Args:
data_list: List of values to check.
Returns:
Boolean value.
"""
res = all(ele == data_list[0] for ele in data_list)
return res
if validation([data["i_eday"], data["i_eall"]]):
raise OmnikInverterWrongValuesError(
"Inverter pass on incorrect data (day and total are equal)"
)
return Inverter(
serial_number=get_value("i_sn"),
model=get_value("i_modle"),
firmware=get_value("i_ver_m"),
firmware_slave=get_value("i_ver_s"),
solar_rated_power=get_value("i_pow"),
solar_current_power=int(get_value("i_pow_n")),
solar_energy_today=float(get_value("i_eday")),
solar_energy_total=float(get_value("i_eall")),
)
@staticmethod
def from_html(data: dict[str, Any]) -> Inverter:
"""Return Inverter object from the Omnik Inverter response.
Args:
data: The HTML (webscraping) data from the Omnik Inverter.
Returns:
An Inverter object.
"""
def get_value(search_key):
match = re.search(f'(?<={search_key}=").*?(?=";)', data.replace(" ", ""))
try:
value = match.group(0)
if value != "":
if search_key in ["webdata_now_p", "webdata_rate_p"]:
return int(value)
if search_key in ["webdata_today_e", "webdata_total_e"]:
return float(value)
return value
return None
except AttributeError as exception:
raise OmnikInverterWrongSourceError(
"Your inverter has no data source from a html file."
) from exception
return Inverter(
serial_number=get_value("webdata_sn"),
model=get_value("webdata_pv_type"),
firmware=get_value("webdata_msvn"),
firmware_slave=get_value("webdata_ssvn"),
solar_rated_power=get_value("webdata_rate_p"),
solar_current_power=get_value("webdata_now_p"),
solar_energy_today=get_value("webdata_today_e"),
solar_energy_total=get_value("webdata_total_e"),
)
@staticmethod
def from_js(data: dict[str, Any]) -> Inverter:
"""Return Inverter object from the Omnik Inverter response.
Args:
data: The JS (webscraping) data from the Omnik Inverter.
Returns:
An Inverter object.
"""
def get_value(position):
if data.find("webData") != -1:
matches = re.search(r'(?<=webData=").*?(?=";)', data)
else:
matches = re.search(r'(?<=myDeviceArray\[0\]=").*?(?=";)', data)
try:
data_list = matches.group(0).split(",")
if data_list[position] != "":
if position in [4, 5, 6, 7]:
if position in [4, 5]:
return int(data_list[position])
if position == 6:
energy_value = float(data_list[position]) / 100
if position == 7:
energy_value = float(data_list[position]) / 10
return energy_value
return data_list[position].replace(" ", "")
return None
except AttributeError as exception:
raise OmnikInverterWrongSourceError(
"Your inverter has no data source from a javascript file."
) from exception
return Inverter(
serial_number=get_value(0),
model=get_value(3),
firmware=get_value(1),
firmware_slave=get_value(2),
solar_rated_power=get_value(4),
solar_current_power=get_value(5),
solar_energy_today=get_value(6),
solar_energy_total=get_value(7),
)
@dataclass
class Device:
"""Object representing an Device response from Omnik Inverter."""
signal_quality: int | None
firmware: str | None
ip_address: str | None
@staticmethod
def from_json(data: dict[str, Any]) -> Device:
"""Return Device object from the Omnik Inverter response.
Args:
data: The JSON data from the Omnik Inverter.
Returns:
An Device object.
"""
data = json.loads(data)
return Device(
signal_quality=None,
firmware=data["g_ver"].replace("VER:", ""),
ip_address=data["ip"],
)
@staticmethod
def from_html(data: dict[str, Any]) -> Device:
"""Return Device object from the Omnik Inverter response.
Args:
data: The HTML (webscraping) data from the Omnik Inverter.
Returns:
An Device object.
"""
for correction in [" ", "%"]:
data = data.replace(correction, "")
def get_value(search_key):
match = re.search(f'(?<={search_key}=").*?(?=";)', data)
value = match.group(0)
if value != "":
return value
return None
return Device(
signal_quality=get_value("cover_sta_rssi"),
firmware=get_value("cover_ver"),
ip_address=get_value("cover_sta_ip"),
)
@staticmethod
def from_js(data: dict[str, Any]) -> Device:
"""Return Device object from the Omnik Inverter response.
Args:
data: The JS (webscraping) data from the Omnik Inverter.
Returns:
An Device object.
"""
for correction in [" ", "%"]:
data = data.replace(correction, "")
def get_value(search_key):
match = re.search(f'(?<={search_key}=").*?(?=";)', data)
value = match.group(0)
if value != "":
if search_key == "m2mRssi":
return int(value)
return value
return None
return Device(
signal_quality=get_value("m2mRssi"),
firmware=get_value("version"),
ip_address=get_value("wanIp"),
)
| 2.8125
| 3
|
bumblebee_status/util/algorithm.py
|
rosalogia/bumblebee-status
| 1,089
|
12776702
|
import copy
def merge(target, *args):
"""Merges arbitrary data - copied from http://blog.impressiver.com/post/31434674390/deep-merge-multiple-python-dicts
:param target: the data structure to fill
:param args: a list of data structures to merge into target
:return: target, with all data in args merged into it
:rtype: whatever type was originally passed in
"""
if len(args) > 1:
for item in args:
merge(target, item)
return target
item = args[0]
if not isinstance(item, dict):
return item
for key, value in item.items():
if key in target and isinstance(target[key], dict):
merge(target[key], value)
else:
if not key in target:
target[key] = copy.deepcopy(value)
return target
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 3.359375
| 3
|
blitz_api/migrations/0014_model_translation.py
|
Jerome-Celle/Blitz-API
| 3
|
12776703
|
<gh_stars>1-10
# Generated by Django 2.0.2 on 2018-10-26 01:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blitz_api', '0013_historicalacademicfield_historicalacademiclevel_historicalactiontoken_historicaldomain_historicalorg'),
]
operations = [
migrations.AddField(
model_name='academicfield',
name='name_en',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='academicfield',
name='name_fr',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='academiclevel',
name='name_en',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='academiclevel',
name='name_fr',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='historicalacademicfield',
name='name_en',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='historicalacademicfield',
name='name_fr',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='historicalacademiclevel',
name='name_en',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='historicalacademiclevel',
name='name_fr',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='historicalorganization',
name='name_en',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='historicalorganization',
name='name_fr',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='organization',
name='name_en',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='organization',
name='name_fr',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
]
| 1.65625
| 2
|
src/build/lib/binance_f/model/openinterest.py
|
Han1018/Cryptocurrency-Automated-Trading
| 13
|
12776704
|
<reponame>Han1018/Cryptocurrency-Automated-Trading
class OpenInterest:
def __init__(self):
self.symbol = ""
self.openInterest = 0.0
@staticmethod
def json_parse(json_data):
result = OpenInterest()
result.symbol = json_data.get_string("symbol")
result.openInterest = json_data.get_float("openInterest")
return result
| 3.171875
| 3
|
player.py
|
tterava/PokerTrainingFramework
| 5
|
12776705
|
'''
Created on Jan 26, 2017
@author: tommi
'''
from enum import Enum
from handeval import pcg_brand
class Action(Enum):
CHECKFOLD = 1
CHECKCALL = 2
BETRAISE = 3
class Street(Enum):
PREFLOP = 0
FLOP = 3
TURN = 4
RIVER = 5
SHOWDOWN = 6
class PlayerState:
STACK_SIZE = 10000
MONEY_BEHIND = 990000
def __init__(self):
self.stack = self.STACK_SIZE
self.moneyLeft = self.MONEY_BEHIND
self.reset()
def bet(self, amount):
diff = amount - self.betSize
if diff >= self.stack:
self.betSize += self.stack
self.stack = 0
self.isAllIn = True
else:
self.betSize += diff
self.stack -= diff
def add_money(self, amount):
if amount > 0:
self.stack += amount
def reset(self):
self.betSize = 0
self.isAllIn = False
self.hasActed = False
self.hasFolded = False
self.cards = []
self.boardCards = []
return self.reload_stack()
def reload_stack(self):
if self.stack == 0:
if self.moneyLeft <= 0:
return False
else:
self.stack += min(self.STACK_SIZE, self.moneyLeft)
self.moneyLeft -= self.stack
return True
return True
class Agent: # Base class for all AI and human players. Plays random moves. Agent never modifies PlayerStates.
def __init__(self):
self.state = PlayerState()
def set_enemy_state(self, state):
self.enemyState = state
def get_action(self): # AI implementation
return Action(pcg_brand(3) + 1), pcg_brand(10000)
def update(self, street, pot):
pass
| 3.171875
| 3
|
drv/rpg/west_end.py
|
pelegm/drv
| 1
|
12776706
|
"""
.. west_end.py
"""
## Framework
import drv.game.base
## Sugar
ndk = drv.game.base.ndk
def test(skill, target):
""" Return a random variable which rolls a *skill* d6 dice, sums it, and
checks whether it is at least *target*. """
dice = ndk(skill, 6)
tst = (dice + skill) >= target
tst.name("d6 test: skill {} against target {}".format(skill, target))
tst.mask = {1: 'Success', 0: 'Failure'}
return tst
| 2.890625
| 3
|
src/usecases/update/update_engagement.py
|
lokaimoma/BLOGG
| 13
|
12776707
|
from typing import Callable
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.domain_logic.engagement_domain import EngagementDomain
from src.model import get_database_session
from src.model.engagement import Engagement
async def update_engagement(engagement_domain: EngagementDomain,
func: Callable[[], AsyncSession] = get_database_session) -> bool:
async with func() as session:
query = select(Engagement).filter(Engagement.blog_id ==
engagement_domain.blog_id,
Engagement.user_id == engagement_domain.user_id)
result = await session.execute(query)
# Crash In Case Of None
engagement: Engagement = result.scalar_one_or_none()
if not engagement:
return False
engagement.isLiked = engagement_domain.isLiked
await session.commit()
return True
| 2.359375
| 2
|
api/kubeops_api/migrations/0021_merge_20190923_0906.py
|
240325184/KubeOperator
| 3
|
12776708
|
<reponame>240325184/KubeOperator
# Generated by Django 2.1.2 on 2019-09-23 09:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kubeops_api', '0019_deployexecution_params'),
('kubeops_api', '0020_auto_20190920_0946'),
]
operations = [
]
| 0.859375
| 1
|
takeoff/tests/generators/web/web_project_generator_test.py
|
themarceloribeiro/takeoff-py
| 0
|
12776709
|
from unittest import TestCase
from unittest.mock import MagicMock
from unittest.mock import patch
from takeoff import *
import os
class WebProjectGeneratorTest(TestCase):
def setUp(self):
os.system('rm -rf test_dist/blog')
self.g = WebProjectGenerator('blog', [])
self.real_system_call = self.g.system_call
self.g.system_call = MagicMock()
self.g.base_dist_folder = MagicMock(return_value='test_dist')
def setup_project(self):
self.g.system_call = self.real_system_call
self.g.create_structure_folders()
self.g.create_django_project()
self.g.prepare_settings()
def line_block(self, starting_line, finishing_line, lines):
block = []
started = False
for line in lines:
if line == starting_line:
started = True
if started:
block.append(line)
if line == finishing_line:
started = False
return block
def test_project_folder(self):
self.assertEqual(self.g.project_folder(), 'test_dist/blog/web/blog')
def test_create_structure_folders(self):
self.g.create_structure_folders()
self.g.system_call.assert_called_with('mkdir -p test_dist/blog/web/')
def test_migrate_call(self):
self.g.migrate()
self.g.system_call.assert_called_with('cd test_dist/blog/web/blog && python3 manage.py migrate')
def test_install_libraries_call(self):
self.g.install_required_libraries()
self.g.system_call.assert_called_with('pip3 install django-bootstrap4')
def test_start_django_project(self):
self.g.start_django_project()
self.g.system_call.assert_called_with(f"cd test_dist/blog/web && {self.g.django_admin} startproject blog")
def test_start_main_app(self):
self.g.start_main_app()
self.g.system_call.assert_called_with('cd test_dist/blog/web/blog && python3 manage.py startapp main')
def test_create_admin(self):
self.g.create_admin()
self.g.system_call.assert_called_with('cd test_dist/blog/web/blog && python3 manage.py createsuperuser')
def test_prepare_settings(self):
self.setup_project()
file = open('test_dist/blog/web/blog/blog/settings.py', 'r')
lines = list(file)
file.close()
self.assertIn(" 'main',\n", lines)
self.assertIn(" 'bootstrap4',\n", lines)
def test_generate_main_urls(self):
self.setup_project()
self.g.generate_main_urls()
file = open('test_dist/blog/web/blog/main/urls.py', 'r')
lines = list(file)
file.close()
expected_lines = [
'from django.urls import path\n',
'from . import views\n',
"app_name = 'main'\n",
'\n',
'urlpatterns = [\n', ']'
]
self.assertEqual(expected_lines, lines)
def test_prepare_urls(self):
self.setup_project()
self.g.prepare_urls()
file = open('test_dist/blog/web/blog/blog/urls.py', 'r')
lines = self.line_block(
'urlpatterns = [\n',
']\n',
list(file)
)
file.close()
expected_lines = [
'urlpatterns = [\n',
" path('', include('main.urls')),\n",
" path('admin/', admin.site.urls),\n",
']\n'
]
self.assertEqual(expected_lines, lines)
| 2.453125
| 2
|
laa_court_data_api_app/models/hearing_events/hearing_events_result.py
|
ministryofjustice/laa-court-data-api
| 1
|
12776710
|
<reponame>ministryofjustice/laa-court-data-api<gh_stars>1-10
from typing import Optional
from uuid import UUID
from pydantic import BaseModel
from laa_court_data_api_app.models.hearing_events.hearing_event.result.hearing_event import HearingEvent
class HearingEventsResult(BaseModel):
hearing_id: Optional[UUID] = None
has_active_hearing: Optional[bool] = None
events: Optional[list[HearingEvent]] = None
| 2.21875
| 2
|
tofawiki/domain/wikidata_translator.py
|
Nintendofan885/tofawiki
| 0
|
12776711
|
<filename>tofawiki/domain/wikidata_translator.py
import re
import sys
import pywikibot
from pywikibot import ItemPage
from SPARQLWrapper import JSON, SPARQLWrapper
class WikidataTranslator:
def __init__(self, repo, cache=None):
self.repo = repo
self.cache = cache
self.endpoint_url = "https://query.wikidata.org/sparql"
self.user_agent = "Tofawiki Python/%s.%s" % (sys.version_info[0], sys.version_info[1])
def data2fa(self, number, strict=False, loose=False):
if not number:
return ''
if isinstance(number, ItemPage):
number = number.getID(True)
cache_key = 'translate:fawiki:wikidatawiki:linktrans:'
if strict:
cache_key += 'strict:'
else:
cache_key += 'no_strict:'
cache_key += str(number)
if self.cache and self.cache.get_value(cache_key):
return self.cache.get_value(cache_key)
item_id = 'Q%d' % int(number)
params = {
'action': 'wbgetentities',
'ids': item_id,
'props': 'sitelinks|labels',
'languages': 'fa|en'
}
query_res = pywikibot.data.api.Request(site=self.repo, **params).submit()['entities'][item_id]
if query_res.get('sitelinks', {}).get('fawiki'):
name = query_res['sitelinks']['fawiki']['title']
if self.cache:
self.cache.write_new_cache(cache_key, name)
return name
if strict:
return ''
if query_res.get('labels', {}).get('fa'):
name = query_res['labels']['fa']['value']
if self.cache:
self.cache.write_new_cache(cache_key, name)
return name
if not loose:
return ''
if query_res.get('labels', {}).get('en'):
name = query_res['labels']['en']['value']
return name
return ''
def getRefferedItems(self, item, property_):
res = []
query = """SELECT ?item ?itemLabel
WHERE
{
?item wdt:""" + property_ + """ wd:"""+item.title()+""".
SERVICE wikibase:label { bd:serviceParam wikibase:language "fa". }
}"""
sparql = SPARQLWrapper(self.endpoint_url, agent=self.user_agent)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = []
for case in [i['itemLabel']['value'] for i in sparql.query().convert()["results"]["bindings"]]:
if re.search(r'^Q\d+$', case):
continue
result.append(case)
return result
| 2.671875
| 3
|
pytorch_layers/config.py
|
shuohan/pytorch-layers
| 0
|
12776712
|
# -*- coding: utf-8 -*-
"""Configurations and Enums"""
from enum import Enum
from singleton_config import Config as _Config
class ActivMode(str, Enum):
"""Enum of the activation names."""
RELU = 'relu'
LEAKY_RELU = 'leaky_relu'
class NormMode(str, Enum):
"""Enum of the normalization names."""
BATCH = 'batch'
INSTANCE = 'instance'
GROUP = 'group'
NONE = 'none'
class Dim(int, Enum):
"""Enum of the operation dimensionality."""
ONE = 1
TWO = 2
THREE = 3
class InterpMode(str, Enum):
"""Enum of the interpolate mode."""
LINEAR = 'linear'
NEAREST = 'nearest'
CUBIC = 'cubic'
AREA = 'area'
class PaddingMode(str, Enum):
"""Enum of the padding mode."""
ZEROS = 'zeros'
CIRCULAR = 'circular'
REFLECT = 'reflect'
REPLICATE = 'replicate'
class Config(_Config):
"""Global configurations for layer creation.
Attributes:
dim (int): Dimensionality of the operations.
activ_mode (ActivMode): Activation mode.
activ_kwargs (dict): Activation parameters.
norm_mode (NormMode): Normalization mode.
norm_kwargs (dict): Normalization parameters.
interp_mode (InterpMode): Interpolation mode.
interp_kwargs (dict): Interpolation parameters.
padding_mode (PaddingMode): Padding mode.
avg_pool_kwargs (dict): The average pooling kwargs.
"""
def __init__(self):
super().__init__()
self.add_config('dim', Dim.THREE, True)
self.add_config('activ_mode', ActivMode.RELU, True)
self.add_config('activ_kwargs', dict(), False)
self.add_config('norm_mode', NormMode.INSTANCE, True)
self.add_config('norm_kwargs', dict(affine=True), False)
self.add_config('interp_mode', InterpMode.NEAREST, True)
self.add_config('interp_kwargs', dict(), False)
self.add_config('dropout', 0.2, False)
self.add_config('padding_mode', PaddingMode.ZEROS, True)
self.add_config('avg_pool_kwargs', dict(), False)
sep_conv_kwargs = dict(norm_between=False, activ_between=False)
self.add_config('sep_conv_kwargs', sep_conv_kwargs, False)
@property
def dim(self):
return self._dim
@dim.setter
def dim(self, d):
if isinstance(d, Dim):
self._dim = d
elif isinstance(d, int):
self._dim = Dim(d)
else:
assert False
@property
def activ_mode(self):
return self._activ_mode
@activ_mode.setter
def activ_mode(self, m):
if isinstance(m, ActivMode):
self._activ_mode = m
elif isinstance(m, str):
self._activ_mode = ActivMode(m.lower())
else:
assert False
@property
def norm_mode(self):
return self._norm_mode
@norm_mode.setter
def norm_mode(self, m):
if isinstance(m, NormMode):
self._norm_mode = m
elif isinstance(m, str):
self._norm_mode = NormMode(m.lower())
else:
assert False
@property
def interp_mode(self):
return self._interp_mode
@interp_mode.setter
def interp_mode(self, m):
if isinstance(m, InterpMode):
self._interp_mode = m
elif isinstance(m, str):
self._interp_mode = InterpMode(m.lower())
else:
assert False
@property
def padding_mode(self):
return self._padding_mode
@padding_mode.setter
def padding_mode(self, m):
if isinstance(m, PaddingMode):
self._padding_mode = m
elif isinstance(m, str):
self._padding_mode = PaddingMode(m.lower())
else:
assert False
| 2.546875
| 3
|
datastructures/doubly_linked_list/doubly_linked_list.py
|
abhishekmulay/ds-algo-study
| 0
|
12776713
|
<gh_stars>0
class Node(object):
def __init__(self, data, next, prev):
self.data = data
self.next = next
self.previous = prev
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_next(self):
return self.next
def set_next(self, next):
self.next = next
def get_prev(self):
return self.previous
def set_previous(self, prev):
self.previous = prev
class DoublyLinkedList(object):
def __init__(self):
self.head = None
self.tail = None
self.count = 0
def _create_node(self, data):
return Node(data, None, None)
def add(self, data):
# there are no items in the list yet
if self.count == 0:
self.add_at_beginning(data)
# there are more than one items in the list
self.add_at_end(data)
def add_at_beginning(self, data):
node = self._create_node(data)
self.head = node
self.tail = node
node.previous = None
node.next = None
self.count += 1
def add_at_end(self, data):
print "self.head = " + str(self.head.get_data()) + " | self.tail = " + str(
self.tail.get_data()) + " data = " + str(data)
if self.count == 1:
# this is the second element being added in the list
node = self._create_node(data)
node.previous = self.tail
self.head = node
node.next = self.tail
self.count += 1
return
# there are more than one items in the list
node = self._create_node(data)
self.tail.next = node
node.previous = self.tail
self.tail = node
node.next = self.head
self.count += 1
# print "after add : " + self.display()
def display(self):
output = ""
pointer = self.head
# there is only one element
if self.count == 1:
output = str(self.head.get_data())
else:
while pointer != self.tail:
output += str(pointer.get_data()) + ", "
pointer = pointer.get_next()
return output
def __str__(self):
return self.display()
if __name__ == '__main__':
lst = DoublyLinkedList()
lst.add("a")
lst.add("b")
lst.add("c")
lst.add("d")
print str(lst)
| 3.921875
| 4
|
metacells/pipeline/clean.py
|
orenbenkiki/metacells
| 0
|
12776714
|
<gh_stars>0
"""
Clean
-----
Raw single-cell RNA sequencing data is notoriously noisy and "dirty". The pipeline steps here
performs initial analysis of the data and extract just the "clean" data for actually computing the
metacells. The steps provided here are expected to be generically useful, but as always specific
data sets may require custom cleaning steps on a case-by-case basis.
"""
from re import Pattern
from typing import Collection
from typing import List
from typing import Optional
from typing import Union
from anndata import AnnData # type: ignore
import metacells.parameters as pr
import metacells.tools as tl
import metacells.utilities as ut
__all__ = [
"analyze_clean_genes",
"pick_clean_genes",
"analyze_clean_cells",
"pick_clean_cells",
"extract_clean_data",
]
@ut.logged()
@ut.timed_call()
@ut.expand_doc()
def analyze_clean_genes(
adata: AnnData,
what: Union[str, ut.Matrix] = "__x__",
*,
properly_sampled_min_gene_total: int = pr.properly_sampled_min_gene_total,
noisy_lonely_max_sampled_cells: int = pr.noisy_lonely_max_sampled_cells,
noisy_lonely_downsample_min_samples: int = pr.noisy_lonely_downsample_min_samples,
noisy_lonely_downsample_min_cell_quantile: float = pr.noisy_lonely_downsample_min_cell_quantile,
noisy_lonely_downsample_max_cell_quantile: float = pr.noisy_lonely_downsample_max_cell_quantile,
noisy_lonely_min_gene_total: int = pr.noisy_lonely_min_gene_total,
noisy_lonely_min_gene_normalized_variance: float = pr.noisy_lonely_min_gene_normalized_variance,
noisy_lonely_max_gene_similarity: float = pr.noisy_lonely_max_gene_similarity,
excluded_gene_names: Optional[Collection[str]] = None,
excluded_gene_patterns: Optional[Collection[Union[str, Pattern]]] = None,
random_seed: int = pr.random_seed,
) -> None:
"""
Analyze genes in preparation for picking the "clean" subset of the ``adata``.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes.
**Returns**
Sets the following in the data:
Variable (gene) annotations:
``properly_sampled_gene``
A mask of the "properly sampled" genes.
``noisy_lonely_gene``
A mask of the "noisy lonely" genes.
``excluded_gene``
A mask of the genes which were excluded by name.
**Computation Parameters**
1. Invoke :py:func:`metacells.tools.properly_sampled.find_properly_sampled_genes` using
``properly_sampled_min_gene_total`` (default: {properly_sampled_min_gene_total}).
2. Invoke :py:func:`metacells.tools.noisy_lonely.find_noisy_lonely_genes` using
``noisy_lonely_max_sampled_cells`` (default: {noisy_lonely_max_sampled_cells}),
``noisy_lonely_downsample_min_samples`` (default: {noisy_lonely_downsample_min_samples}),
``noisy_lonely_downsample_min_cell_quantile`` (default:
{noisy_lonely_downsample_min_cell_quantile}), ``noisy_lonely_downsample_max_cell_quantile``
(default: {noisy_lonely_downsample_max_cell_quantile}), ``noisy_lonely_min_gene_total``
(default: {noisy_lonely_min_gene_total}), ``noisy_lonely_min_gene_normalized_variance``
(default: {noisy_lonely_min_gene_normalized_variance}), and
``noisy_lonely_max_gene_similarity`` (default: {noisy_lonely_max_gene_similarity}).
3. Invoke :py:func:`metacells.tools.named.find_named_genes` to exclude genes based on their
name, using the ``excluded_gene_names`` (default: {excluded_gene_names}) and
``excluded_gene_patterns`` (default: {excluded_gene_patterns}). This is stored in a
per-variable (gene) ``excluded_genes`` boolean mask.
"""
tl.find_properly_sampled_genes(adata, what, min_gene_total=properly_sampled_min_gene_total)
excluded_genes_mask: Optional[str]
if excluded_gene_names is not None or excluded_gene_patterns is not None:
excluded_genes_mask = "excluded_gene"
tl.find_named_genes(adata, to="excluded_gene", names=excluded_gene_names, patterns=excluded_gene_patterns)
else:
excluded_genes_mask = None
tl.find_noisy_lonely_genes(
adata,
what,
excluded_genes_mask=excluded_genes_mask,
max_sampled_cells=noisy_lonely_max_sampled_cells,
downsample_min_samples=noisy_lonely_downsample_min_samples,
downsample_min_cell_quantile=noisy_lonely_downsample_min_cell_quantile,
downsample_max_cell_quantile=noisy_lonely_downsample_max_cell_quantile,
min_gene_total=noisy_lonely_min_gene_total,
min_gene_normalized_variance=noisy_lonely_min_gene_normalized_variance,
max_gene_similarity=noisy_lonely_max_gene_similarity,
random_seed=random_seed,
)
CLEAN_GENES_MASKS = ["properly_sampled_gene?", "~noisy_lonely_gene?", "~excluded_gene?"]
@ut.timed_call()
@ut.expand_doc(masks=", ".join(CLEAN_GENES_MASKS))
def pick_clean_genes( # pylint: disable=dangerous-default-value
adata: AnnData, *, masks: List[str] = CLEAN_GENES_MASKS, to: str = "clean_gene"
) -> None:
"""
Create a mask of the "clean" genes that will be used to actually compute the metacells.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes.
**Returns**
Sets the following in the data:
Variable (gene) annotations:
``to`` (default: {to})
A mask of the "clean" genes to use for actually computing the metacells.
**Computation Parameters**
1. This simply AND-s the specified ``masks`` (default: {masks}) using
:py:func:`metacells.tools.mask.combine_masks`.
"""
tl.combine_masks(adata, masks, to=to)
@ut.logged()
@ut.timed_call()
def analyze_clean_cells(
adata: AnnData,
what: Union[str, ut.Matrix] = "__x__",
*,
properly_sampled_min_cell_total: Optional[int],
properly_sampled_max_cell_total: Optional[int],
properly_sampled_max_excluded_genes_fraction: Optional[float],
) -> None:
"""
Analyze cells in preparation for extracting the "clean" subset of the ``adata``.
Raw single-cell RNA sequencing data is notoriously noisy and "dirty". This pipeline step
performs initial analysis of the cells to allow us to extract just the "clean" data for
processing. The steps provided here are expected to be generically useful, but as always
specific data sets may require custom cleaning steps on a case-by-case basis.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes.
**Returns**
Sets the following in the full data:
Observation (cell) annotations:
``properly_sampled_cell``
A mask of the "properly sampled" cells.
**Computation Parameters**
1. If ``properly_sampled_max_excluded_genes_fraction`` is not ``None``, then consider all the
genes not covered by the ``clean_gene`` per-variable mask as "excluded" for computing the
excluded genes fraction for each cell.
2. Invoke :py:func:`metacells.tools.properly_sampled.find_properly_sampled_cells` using
``properly_sampled_min_cell_total`` (no default), ``properly_sampled_max_cell_total`` (no
default) and ``properly_sampled_max_excluded_genes_fraction`` (no default).
"""
excluded_adata: Optional[AnnData] = None
if properly_sampled_max_excluded_genes_fraction is not None:
excluded_genes = tl.filter_data(adata, name="dirty_genes", top_level=False, var_masks=["~clean_gene"])
if excluded_genes is not None:
excluded_adata = excluded_genes[0]
if excluded_genes is None:
max_excluded_genes_fraction = None
else:
max_excluded_genes_fraction = properly_sampled_max_excluded_genes_fraction
tl.find_properly_sampled_cells(
adata,
what,
min_cell_total=properly_sampled_min_cell_total,
max_cell_total=properly_sampled_max_cell_total,
excluded_adata=excluded_adata,
max_excluded_genes_fraction=max_excluded_genes_fraction,
)
CLEAN_CELLS_MASKS = ["properly_sampled_cell"]
@ut.timed_call()
@ut.expand_doc(masks=", ".join(CLEAN_CELLS_MASKS))
def pick_clean_cells( # pylint: disable=dangerous-default-value
adata: AnnData, *, masks: List[str] = CLEAN_CELLS_MASKS, to: str = "clean_cell"
) -> None:
"""
Create a mask of the "clean" cells that will be used to actually compute the metacells.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes.
**Returns**
Sets the following in the data:
Observation (cell) annotations:
``to`` (default: {to})
A mask of the "clean" cells to use for actually computing the metacells.
**Computation Parameters**
1. This simply AND-s the specified ``masks`` (default: {masks}) using
:py:func:`metacells.tools.mask.combine_masks`.
"""
tl.combine_masks(adata, masks, to=to)
@ut.logged()
@ut.timed_call()
@ut.expand_doc()
def extract_clean_data(
adata: AnnData,
obs_mask: str = "clean_cell",
var_mask: str = "clean_gene",
*,
name: Optional[str] = ".clean",
top_level: bool = True,
) -> Optional[AnnData]:
"""
Extract a "clean" subset of the ``adata`` to compute metacells for.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes.
**Returns**
Annotated sliced data containing the "clean" subset of the original data. By default, the
``name`` of this data is {name}. If this starts with a ``.``, this will be appended to the
current name of the data (if any).
The returned data will have ``full_cell_index`` and ``full_gene_index`` per-observation (cell)
and per-variable (gene) annotations to allow mapping the results back to the original data.
**Computation Parameters**
1. This simply :py:func:`metacells.tools.filter.filter_data` to slice just the
``obs_mask`` (default: {obs_mask}) and ``var_mask`` (default: {var_mask}) data using the
``name`` (default: {name}), and tracking the original ``full_cell_index`` and
``full_gene_index``.
"""
results = tl.filter_data(
adata,
name=name,
top_level=top_level,
track_obs="full_cell_index",
track_var="full_gene_index",
obs_masks=[obs_mask],
var_masks=[var_mask],
)
if results is None:
return None
return results[0]
| 2.40625
| 2
|
tests/utils/require.py
|
AmyYH/phantoscope
| 0
|
12776715
|
import time
from functools import wraps
from operators.operator import register_operators, delete_operators, operator_detail
from pipeline.pipeline import create_pipeline, delete_pipeline
from application.application import new_application, delete_application
def pre_operator(name="pytest_op_1", type="encoder",
addr="psoperator/vgg16-encoder:latest", author="phantoscope",
version="0.1", description="test operator"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
register_operators(name=name, type=type, addr=addr, author=author,
version=version, description=description)
func(*args, **kwargs)
delete_operators(name=name)
return wrapper
return decorator
def pre_instance(operator_name="pytest_op_1", name="ins1"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
operator = operator_detail(operator_name)
operator.new_instance(name)
func(*args, **kwargs)
operator.delete_instance(name)
return wrapper
return decorator
def pre_pipeline(name="pytest_pipe_1", processors="",
encoder={"name": "pytest_op_1", "instance": "ins1"},
description="test pipeline"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
create_pipeline(name=name, processors=processors, encoder=encoder)
func(*args, **kwargs)
delete_pipeline(name)
return wrapper
return decorator
def pre_application(name="pytest_app_1",
fields={"full": {"type": "pipeline", "value": "pytest_pipe_1"}},
s3_buckets="test-bucket"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
time.sleep(5) # wait for opertaor instance start
new_application(app_name=name, fields=fields, s3_bucket=s3_buckets)
func(*args, **kwargs)
delete_application(name, True)
return wrapper
return decorator
def sleep_time(seconds):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
time.sleep(seconds)
func(*args, **kwargs)
return wrapper
return decorator
| 2.265625
| 2
|
polling_stations/apps/data_importers/management/commands/import_tewkesbury.py
|
smsmith97/UK-Polling-Stations
| 29
|
12776716
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "TEW"
addresses_name = (
"2021-04-07T14:05:20.464410/Democracy_Club__06May2021_Tewkesbury Borough.tsv"
)
stations_name = (
"2021-04-07T14:05:20.464410/Democracy_Club__06May2021_Tewkesbury Borough.tsv"
)
elections = ["2021-05-06"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
if record.addressline6.strip() in [
"WR12 7NA",
"GL3 2HG",
"GL53 9QR",
"GL20 6JL",
"GL52 9HN",
"GL2 9FG",
"GL3 4SX",
]:
return None
return super().address_record_to_dict(record)
| 2.703125
| 3
|
aoc/day17/__init__.py
|
scorphus/advent-of-code-2020
| 9
|
12776717
|
<filename>aoc/day17/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Advent of Code 2020
# https://github.com/scorphus/advent-of-code-2020
# Licensed under the BSD-3-Clause license:
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2020, <NAME> <<EMAIL>>
import itertools
def part1(lines):
space = set()
for x, line in enumerate(lines):
for y, cube in enumerate(line):
if cube == "#":
space.add((x, y, 0))
for _ in range(6):
space = cycle(space)
return len(space)
def part2(lines):
space = set()
for x, line in enumerate(lines):
for y, cube in enumerate(line):
if cube == "#":
space.add((x, y, 0, 0))
for _ in range(6):
space = cycle(space, 4)
return len(space)
def cycle(space, dim=3):
active, next_space = {}, set()
for cube in space:
for deltas in itertools.product((-1, 0, 1), repeat=dim):
if any(deltas):
neighbor = tuple(c + d for c, d in zip(cube, deltas))
active[neighbor] = active.get(neighbor, 0) + 1
for cube, neighbors in active.items():
if neighbors == 3 or neighbors == 2 and cube in space:
next_space.add(cube)
return next_space
| 3.40625
| 3
|
libs/yowsup/yowsup/yowsup/demos/contacts/__init__.py
|
akshitpradhan/TomHack
| 22
|
12776718
|
from .stack import YowsupSyncStack
| 1.023438
| 1
|
python_scripts/layer_utils.py
|
rwilliams01/isogeometric_application
| 0
|
12776719
|
<reponame>rwilliams01/isogeometric_application
import math
from KratosMultiphysics import *
from KratosMultiphysics.StructuralApplication import *
from KratosMultiphysics.DiscontinuitiesApplication import *
from KratosMultiphysics.IsogeometricApplication import *
#
# Collapse each layer in Layers; every layer maintains its local numbering and connectivities
#
def CollapseLocal(Layers):
tol = 1.0e-6
# collapse each layer specified in layer list
for str_layer in Layers.layer_list:
node_map = {}
non_repetitive_nodes = []
# iterate all the nodes to check for node repetition
for i_node in self.layer_nodes_sets[str_layer]:
n = self.layer_nodes_sets[str_layer][i_node]
for i_other_node in self.layer_nodes_sets[str_layer]:
n1 = self.layer_nodes_sets[str_layer][i_other_node]
d = math.sqrt(math.pow(n[0] - n1[0], 2) + math.pow(n[1] - n1[1], 2) + math.pow(n[2] - n1[2], 2))
if d < tol:
node_map[i_node] = i_other_node
if i_other_node == i_node:
non_repetitive_nodes.append(i_node)
break
# reform the layer nodal set
new_nodes_set = {}
node_map_nonrepetitive = {}
cnt = 1
for i_node in non_repetitive_nodes:
new_nodes_set[cnt] = self.layer_nodes_sets[str_layer][i_node]
node_map_nonrepetitive[i_node] = cnt
cnt = cnt + 1
self.layer_nodes_sets[str_layer] = new_nodes_set
# reform the entity connectivities
for i_entity in self.layer_entities_sets[str_layer]:
new_entity = []
for i_node in self.layer_entities_sets[str_layer][i_entity]:
new_entity.append(node_map_nonrepetitive[node_map[i_node]])
self.layer_entities_sets[str_layer][i_entity] = new_entity
# turn on local collapse flag
Layers.is_collapse_local = True
#
# Collapse all layers in Layers; all the layers nodal set will be reformed after collapsing. For this to work correctly, the model needs to be collapsed locally first to keep a list of non-repeated nodes
#
def Collapse(Layers):
if not Layers.is_collapse_local == False:
print("Error: the layers are not locally collapsed yet")
sys.exit(0)
tol = 1.0e-6
binning_util = SpatialBinningUtility(0, 0, 0, 0.5, 0.5, 0.5, tol)
# extract all the nodes and put into the spatial binning
node_map = {}
for str_layer in Layer.layer_list:
node_map[str_layer] = {}
for i_node in Layer.layer_nodes_sets[str_layer]:
n = Layer.layer_nodes_sets[str_layer][i_node]
new_id = binning_util.AddNode(n[0], n[1], n[2])
node_map[str_layer][i_node] = new_id
| 2.328125
| 2
|
2020/13/solution1.py
|
frenzymadness/aoc
| 2
|
12776720
|
with open("input.txt") as input_file:
time = int(input_file.readline().strip())
busses = input_file.readline().strip().split(",")
def departures(bus):
multiplier = 0
while True:
multiplier += 1
yield multiplier * bus
def next_after(bus, time):
for departure in departures(bus):
if departure >= time:
return departure
future_departures = []
for bus in busses:
if bus == "x":
continue
bus = int(bus)
future_departures.append((next_after(bus, time), bus))
departure, bus = min(future_departures)
print((departure - time) * bus)
| 3.609375
| 4
|
test_wsgi.py
|
MLGB3/buildout.mlgb
| 0
|
12776721
|
<gh_stars>0
import sys
def application(environ, start_response):
status = '200 OK'
output = ''
output += 'sys.version = %s\n' % repr(sys.version)
output += 'sys.prefix = %s\n' % repr(sys.prefix)
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
| 2.25
| 2
|
diff_ana.py
|
shun60s/glottal-source-spectrum
| 2
|
12776722
|
#coding:utf-8
# return candidate position set of one pitch duration near center of the frame
# by differential change point and threshold from bottom line.
# return 0 if there is no.
#
# 中心付近の1ピッチ分の候補インデックス[sp,ep]を返す。
# 候補が無いときは零を返す。
#
# 微分の変化点と閾値により候補を選出する。
import numpy as np
import matplotlib.pyplot as plt
# Check version
# Python 3.6.4, 64bit on Win32 (Windows 10)
# numpy (1.14.0)
def diff_ana(y, sr, show=False):
# (1) 傾きの変化より選択
f_prime=np.gradient(y) # 数値勾配(傾き)
indices_diff0 = np.where( np.diff(np.sign(f_prime)) > 0.0 )[0] # 符号(-1,0,1)化したものの差分をとり、正値の変化点を検出する
# (2) 底辺に近い値を選択
thres0= (np.amax(y) - np.amin(y)) * 0.25 + np.amin(y) # 最小値から振幅幅の25%までの値を候補として使う。
indices_thres0 = np.where( y < thres0 )[0]
# (3) 上記の条件を満たす 論理積 を取る
indices=np.sort(np.array(list( set(indices_diff0) & set(indices_thres0))))
infections = y[indices]
if len(indices) >= 2: # 候補が2個以上のときに、探す。
index0= np.argmin(np.abs(indices - len(y)/2)) # 中心に一番近いインデックスを求める
if len(indices) == 2: # 候補が2個しかないときは
sp= indices[0]
ep= indices[1]
elif index0 < len(y)/2 and indices[-1] > len(y)/2 : # そのインデックスが中心より前ならば
sp= indices[index0]
ep= indices[index0+1]
else:
sp= indices[index0-1]
ep= indices[index0]
else: # 候補が無い
sp=0
ep=0
indices1=np.array([sp,ep])
infections1 = y[indices1]
#print ( indices, indices1)
#print ('select index, [Hz]', indices1, (sr / (indices1[1]-indices1[0])) )
if show:
fig = plt.figure()
ax1 = fig.add_subplot(311)
plt.title('diff: two red cirles shows selected portion')
plt.xlabel('mSec')
plt.ylabel('level')
ax1.plot(np.arange(len(y)) * 1000.0 / sr, y, 'bo-', ms=2)
ax1.plot(indices * 1000.0 / sr, infections, 'yo', ms=5)
ax1.plot(indices1 * 1000.0 / sr, infections1, 'ro', ms=5)
ax2 = fig.add_subplot(312)
ax2.plot(np.arange(len(f_prime)) * 1000.0 / sr, f_prime, 'ro', ms=5)
ax3 = fig.add_subplot(313)
f_prime2=np.gradient(f_prime)
indices2 = np.where(np.diff(np.sign(f_prime2)))[0]
infections2 = y[indices2]
ax3.plot(np.arange(len(y)) * 1000.0 / sr, y, 'bo-', ms=2)
ax3.plot(indices2 * 1000.0 / sr, infections2, 'ro', ms=5)
plt.show()
return int(sp), int(ep)
| 2.578125
| 3
|
plugins/example/models.py
|
collingreen/yaib_ludumdare
| 1
|
12776723
|
<reponame>collingreen/yaib_ludumdare
from sqlalchemy import Table, Column, String, Integer
from modules.persistence import Base, getModelBase
"""
Specify custom database tables for your plugins by creating classes here that
subclass Base and the custom ModelBase and include sqlalchemy fields. See the
sqlalchemy docs about declarative schema for more info.
This creates a table in the database called example_thing with two columns, a 50
character string call name and an integer called count.
Importing Thing in the plugin allows manipulating the database using
standard sqlalchemy. See the sqlalchemy persistence module and the sqlalchemy
docs for more info.
"""
ModelBase = getModelBase('example')
class Thing(Base, ModelBase):
name = Column(String(50))
count = Column(Integer)
| 2.53125
| 3
|
class9/ex4/mytest/world.py
|
patrebert/pynet_cert
| 0
|
12776724
|
def func3():
print "world.py func3"
class MyClass:
def __init__(self,arg1, arg2, arg3):
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
def hello(self):
print "hello"
print " %s %s %s" %(self.arg1,self.arg2,self.arg3)
def not_hello(self):
print "not_hello"
print " %s %s %s" %(self.arg1,self.arg2,self.arg3)
def testclass():
hell = MyClass('1', '2', '3')
hell.hello()
hell.not_hello()
def main():
func3()
hell = MyClass('1','2','3')
hell.hello()
hell.not_hello()
if __name__ == '__main__':
main()
| 3.796875
| 4
|
leetcode/lessons/linked_list/092_reverse_between/__init__.py
|
wangkuntian/leetcode
| 0
|
12776725
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__project__ = 'leetcode'
__file__ = '__init__.py'
__author__ = 'king'
__time__ = '2019/11/18 16:54'
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
佛祖保佑 永无BUG
"""
from leetcode.lessons.linked_list import ListNode
from leetcode.utils.timeutils import time_interval
'''
难度:中等
反转从位置 m 到 n 的链表。请使用一趟扫描完成反转。
说明:
1 ≤m≤n≤ 链表长度。
示例:
输入: 1->2->3->4->5->NULL, m = 2, n = 4
输出: 1->4->3->2->5->NULL
'''
class Solution(object):
@time_interval
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
result = node = ListNode(None)
result.next = head
n -= m
while m > 1:
node = node.next
m -= 1
tail = None
reversed_head = None
next_reverse = node.next
while n >= 0:
tail = next_reverse.next
next_reverse.next = reversed_head
reversed_head = next_reverse
next_reverse = tail
n -= 1
node.next.next = tail
node.next = reversed_head
return result.next
@time_interval
def reverseBetween2(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if not head:
return None
cur, pre = head, None
while m > 1:
pre = cur
cur = cur.next
m -= 1
n -= 1
tail, con = cur, pre
while n:
temp = cur.next
cur.next = pre
pre = cur
cur = temp
n -= 1
if con:
con.next = pre
else:
head = pre
tail.next = cur
return head
temp = ListNode(None)
def reverse_n(self, head, n):
if n == 1:
self.temp = head.next
return head
last = self.reverse_n(head.next, n - 1)
head.next.next = head
head.next = self.temp
return last
@time_interval
def reverseBetween3(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if m == 1:
return self.reverse_n(head, n)
head.next = self.reverseBetween3(head.next, m - 1, n - 1)
return head
l1 = ListNode.generate([1, 2, 3, 4, 5])
print(Solution().reverseBetween(l1, 2, 4))
l2 = ListNode.generate([1, 2, 3, 4, 5])
print(Solution().reverseBetween2(l2, 2, 4))
l2 = ListNode.generate([1, 2, 3, 4, 5])
print(Solution().reverseBetween3(l2, 2, 4))
| 2.359375
| 2
|
tests/CallejeroTestCase.py
|
santiagocastellano/normalizador-amba-Python3
| 4
|
12776726
|
# coding: UTF-8
import unittest
from usig_normalizador_amba.Callejero import Callejero
from usig_normalizador_amba.Partido import Partido
from usig_normalizador_amba.Calle import Calle
from tests.test_commons import cargarCallejeroEstatico
class CallejeroTestCase(unittest.TestCase):
p = Partido('jose_c_paz', '<NAME>', 'Partido de <NAME>', 2430431)
c = Callejero(p)
cargarCallejeroEstatico(c)
p = Partido('general_san_martin', 'General San Martin', 'Partido de General San Martin', 1719022)
c_san_martin = Callejero(p)
cargarCallejeroEstatico(c_san_martin)
def _checkCalle(self, calle, codigo, nombre, codigo_partido, localidad):
self.assertTrue(isinstance(calle, Calle))
self.assertEqual(calle.codigo, codigo)
self.assertEqual(calle.nombre, nombre)
self.assertEqual(calle.partido.codigo, codigo_partido)
self.assertEqual(calle.localidad, localidad)
def testCallejero_callejero_inexistent(self):
p = Partido('jose_paz', '<NAME>', 'Partido de José C. Paz', 2430431)
self.assertRaises(ValueError, Callejero, p)
def testCallejero_buscarCalle_calle_inexistente(self):
res = self.c.buscarCalle('kokusai dori')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 0, 'No debería haber matching.')
def testCallejero_buscarCalle_unica_calle_existente(self):
res = self.c.buscarCalle('<NAME> Compostela')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 53658, 'S<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_permutado(self):
res = self.c.buscarCalle('Compostela Santiago de')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 53658, 'Santiago de Compostela', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_incompleto(self):
res = self.c.buscarCalle('Compos Santi')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 53658, 'Santiago de Compostela', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_con_acento_y_case(self):
res = self.c.buscarCalle('PoToSÍ')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 341221, 'Potosí', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_con_enie(self):
res = self.c.buscarCalle('<NAME>')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 77440, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_multiples_calles_existentes(self):
res = self.c.buscarCalle('San')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 16, 'Debería haber 16 matchings.')
resCalles = ['San Lorenzo', 'San Nicolás', 'San Blas', 'San Salvador', 'San Luis', 'San Marino', 'San Agustín',
'Santiago del Estero', 'Santiago de Compostela', 'Santiago L. Copello', 'Santa Marta', 'Santo Domingo',
'Santa Ana', 'Santiago de Liniers', 'Santa María', 'S<NAME>']
for calle in res:
self.assertTrue(isinstance(calle, Calle))
self.assertTrue(calle.nombre in resCalles)
def testCallejero_buscarCalle_calles_con_y_01(self):
res = self.c.buscarCalle('Gelly y Obes')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matchings.')
self._checkCalle(res[0], 77481, 'Gelly y Obes', 'jose_c_paz', '<NAME>')
res = self.c.buscarCalle('g y o')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matchings.')
self._checkCalle(res[0], 77481, 'Gelly y Obes', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calles_con_y_02(self):
res = self.c.buscarCalle('<NAME>')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matchings.')
self._checkCalle(res[0], 11702, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calles_con_e_01(self):
res = self.c.buscarCalle('<NAME>')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 78817, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCodigo_codigo_valido(self):
res = self.c.buscarCodigo(314724)
self.assertTrue(isinstance(res, list))
self.assertTrue(res[0][0] == 314724)
self.assertTrue(res[0][1] == '<NAME> (M) / <NAME> (JCP)')
def testCallejero_buscarCodigo_codigo_invalido(self):
res = self.c.buscarCodigo(666)
self.assertTrue(res == [])
def testCallejero_buscarCalle_sinonimos_01(self):
res1 = self.c.buscarCalle('11')
self.assertTrue(isinstance(res1, list))
self.assertEqual(len(res1), 1, 'Debería haber 1 matching.')
res2 = self.c.buscarCalle('once')
self.assertTrue(isinstance(res2, list))
self.assertEqual(len(res2), 1, 'Debería haber 1 matching.')
self.assertEqual(res1[0].codigo, res2[0].codigo)
def testCallejero_buscarCalle_sinonimos_02(self):
res1 = self.c.buscarCalle('3') # 3 de Febrero, Tres Sargentos y Las Tres Marías
self.assertTrue(isinstance(res1, list))
self.assertEqual(len(res1), 3, 'Debería haber 1 matching.')
self.assertTrue(res1[0].codigo in [78879, 53341, 237007])
self.assertTrue(res1[1].codigo in [78879, 53341, 237007])
self.assertTrue(res1[2].codigo in [78879, 53341, 237007])
def testCallejero_buscarCalle_muchos_espacios(self):
res = self.c.buscarCalle(' puerto principe ')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 183044, 'P<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calle_con_parentesis(self):
res = self.c.buscarCalle('Coliqueo (JCP)')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 186501, 'Intendente Arricau (SM) / Cacique Coliqueo (JCP)', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_caracteres_raros(self):
res = self.c.buscarCalle('puerto principe |°¬!#$%&/()=?\¿¡*¸+~{[^}]\'`-_.:,;<>·@')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 183044, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calle_con_acente_escrito_sin_acento(self):
res = self.c.buscarCalle('potosi')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 341221, 'Potosí', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calle_con_numeros(self):
res = self.c_san_martin.buscarCalle('26 de Julio de 1890')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 70996, '103 - 26 de Julio de 1890', 'general_san_martin', 'General San Martín')
| 2.578125
| 3
|
SoundServer_test.py
|
yoyoberenguer/SoundServer
| 0
|
12776727
|
try:
import pygame
except ImportError:
raise ImportError("\n<pygame> library is missing on your system."
"\nTry: \n C:\\pip install pygame on a window command prompt.")
from SoundServer import *
if __name__ == "__main__":
pygame.mixer.init()
sound1 = pygame.mixer.Sound('Alarm9.ogg')
SCREENRECT = pygame.Rect(0, 0, 800, 1024)
pygame.display.set_mode((SCREENRECT.w, SCREENRECT.h))
SND = SoundControl(SCREENRECT, 8)
# SND.play(sound1, -1, volume_=1.0, panning_=False)
SND.play(sound1, -1, volume_=1.0, panning_=True, x_=400, fade_in_ms=0, fade_out_ms=0)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=100)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=200)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=400)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=800)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=100)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=450)
x = 0
v = 1.0
FRAME = 0
while 1:
# SND.show_sounds_playing()
pygame.event.pump()
pygame.display.flip()
# SND.update_sounds_panning(x, v)
SND.update_sound_panning(x, 1.0, name_="", id_=id(sound1))
if x < SCREENRECT.w:
SND.update_sound_panning(x, 0.2, None, id(sound1))
x += 0.1
else:
SND.update_volume(1)
SND.update()
if 4000 < FRAME < 9000:
SND.pause_sounds()
else:
SND.unpause_sound(id_=id(sound1))
SND.show_free_channels()
SND.show_sounds_playing()
print(SND.return_time_left(id(sound1)))
print(FRAME)
if FRAME == 1000:
SND.stop_all()
print(SND.get_identical_sounds(sound1))
print(SND.get_identical_id(id(sound1)))
x += 1
x %= SCREENRECT.w
FRAME += 1
| 2.8125
| 3
|
neurolib/models/multimodel/builder/aln.py
|
FabianKamp/neurolib
| 0
|
12776728
|
<reponame>FabianKamp/neurolib
import logging
import os
from copy import deepcopy
import numba
import numpy as np
import symengine as se
from h5py import File
from jitcdde import input as system_input
from ....utils.stimulus import OrnsteinUhlenbeckProcess
from ..builder.base.constants import EXC, INH, LAMBDA_SPEED
from ..builder.base.network import Network, SingleCouplingExcitatoryInhibitoryNode
from ..builder.base.neural_mass import NeuralMass
DEFAULT_QUANTITIES_CASCADE_FILENAME = "quantities_cascade.h5"
ALN_EXC_DEFAULT_PARAMS = {
# number of inputs per neuron from EXC/INH
"Ke": 800.0,
"Ki": 200.0,
# postsynaptic potential amplitude for global connectome
"c_gl": 0.4,
# number of incoming EXC connections (to EXC population) from each area
"Ke_gl": 250.0,
# synaptic time constant EXC/INH
"tau_se": 2.0, # ms
"tau_si": 5.0, # ms
# internal Fokker-Planck noise due to random coupling
"sigmae_ext": 1.5, # mV/sqrt(ms)
# maximum synaptic current between EXC/INH nodes in mV/ms
"Jee_max": 2.43,
"Jei_max": -3.3,
# single neuron parameters
"C": 200.0,
"gL": 10.0,
# external drives
"ext_exc_current": 0.0,
"ext_exc_rate": 0.0,
# adaptation current model parameters
# subthreshold adaptation conductance
"a": 15.0, # nS
# spike-triggered adaptation increment
"b": 40.0, # pA
"EA": -80.0,
"tauA": 200.0,
"lambda": LAMBDA_SPEED,
}
ALN_INH_DEFAULT_PARAMS = {
# number of inputs per neuron from EXC/INH
"Ke": 800.0,
"Ki": 200.0,
# postsynaptic potential amplitude for global connectome
"c_gl": 0.4,
# number of incoming EXC connections (to EXC population) from each area
"Ke_gl": 250.0,
# synaptic time constant EXC/INH
"tau_se": 2.0, # ms
"tau_si": 5.0, # ms
# internal Fokker-Planck noise due to random coupling
"sigmai_ext": 1.5, # mV/sqrt(ms)
# maximum synaptic current between EXC/INH nodes in mV/ms
"Jie_max": 2.60,
"Jii_max": -1.64,
# single neuron parameters
"C": 200.0,
"gL": 10.0,
# external drives
"ext_inh_current": 0.0,
"ext_inh_rate": 0.0,
"lambda": LAMBDA_SPEED,
}
# matrices as [to, from], masses as (EXC, INH)
# EXC is index 0, INH is index 1
ALN_NODE_DEFAULT_CONNECTIVITY = np.array([[0.3, 0.5], [0.3, 0.5]])
# same but delays, in ms
ALN_NODE_DEFAULT_DELAYS = np.array([[4.0, 2.0], [4.0, 2.0]])
@numba.njit()
def _get_interpolation_values(xi, yi, sigma_range, mu_range, d_sigma, d_mu):
"""
Return values needed for interpolation: bilinear (2D) interpolation
within ranges, linear (1D) if "one edge" is crossed, corner value if
"two edges" are crossed. Defined as jitted function due to compatibility
with numba backend.
:param xi: interpolation value on x-axis, i.e. I_sigma
:type xi: float
:param yi: interpolation value on y-axis, i.e. I_mu
:type yi: float
:param sigma_range: range of x-axis, i.e. sigma values
:type sigma_range: np.ndarray
:param mu_range: range of y-axis, i.e. mu values
:type mu_range: np.ndarray
:param d_sigma: grid coarsness in the x-axis, i.e. sigma values
:type d_sigma: float
:param d_mu: grid coarsness in the y-axis, i.e. mu values
:type d_mu: float
:return: index of the lower interpolation value on x-axis, index of the
lower interpolation value on y-axis, distance of xi to the lower
value, distance of yi to the lower value
:rtype: (int, int, float, float)
"""
# within all boundaries
if xi >= sigma_range[0] and xi < sigma_range[-1] and yi >= mu_range[0] and yi < mu_range[-1]:
xid = (xi - sigma_range[0]) / d_sigma
xid1 = np.floor(xid)
dxid = xid - xid1
yid = (yi - mu_range[0]) / d_mu
yid1 = np.floor(yid)
dyid = yid - yid1
return int(xid1), int(yid1), dxid, dyid
# outside one boundary
if yi < mu_range[0]:
yid1 = 0
dyid = 0.0
if xi >= sigma_range[0] and xi < sigma_range[-1]:
xid = (xi - sigma_range[0]) / d_sigma
xid1 = np.floor(xid)
dxid = xid - xid1
elif xi < sigma_range[0]:
xid1 = 0
dxid = 0.0
else: # xi >= x(end)
xid1 = -1
dxid = 0.0
return int(xid1), int(yid1), dxid, dyid
if yi >= mu_range[-1]:
yid1 = -1
dyid = 0.0
if xi >= sigma_range[0] and xi < sigma_range[-1]:
xid = (xi - sigma_range[0]) / d_sigma
xid1 = np.floor(xid)
dxid = xid - xid1
elif xi < sigma_range[0]:
xid1 = 0
dxid = 0.0
else: # xi >= x(end)
xid1 = -1
dxid = 0.0
return int(xid1), int(yid1), dxid, dyid
if xi < sigma_range[0]:
xid1 = 0
dxid = 0.0
yid = (yi - mu_range[0]) / d_mu
yid1 = np.floor(yid)
dyid = yid - yid1
return int(xid1), int(yid1), dxid, dyid
if xi >= sigma_range[-1]:
xid1 = -1
dxid = 0.0
yid = (yi - mu_range[0]) / d_mu
yid1 = np.floor(yid)
dyid = yid - yid1
return int(xid1), int(yid1), dxid, dyid
@numba.njit()
def _table_lookup(
current_mu,
current_sigma,
sigma_range,
mu_range,
d_sigma,
d_mu,
transfer_function_table,
):
"""
Translate mean and std. deviation of the current to selected quantity using
linear-nonlinear lookup table for ALN. Defined as jitted function due to
compatibility with numba backend.
"""
x_idx, y_idx, dx_idx, dy_idx = _get_interpolation_values(
current_sigma, current_mu, sigma_range, mu_range, d_sigma, d_mu
)
return (
transfer_function_table[y_idx, x_idx] * (1 - dx_idx) * (1 - dy_idx)
+ transfer_function_table[y_idx, x_idx + 1] * dx_idx * (1 - dy_idx)
+ transfer_function_table[y_idx + 1, x_idx] * (1 - dx_idx) * dy_idx
+ transfer_function_table[y_idx + 1, x_idx + 1] * dx_idx * dy_idx
)
class ALNMass(NeuralMass):
"""
Adaptive linear-nonlinear neural mass model of exponential integrate-and-fire (AdEx)
neurons.
References:
<NAME>., <NAME>. (2020). Biophysically grounded mean-field models of
neural populations under electrical stimulation. PLoS Comput Biol, 16(4),
e1007822.
<NAME>., <NAME>., <NAME>., & <NAME>. (2017).
Low-dimensional spike rate models derived from networks of adaptive
integrate-and-fire neurons: comparison and implementation. PLoS Comput Biol,
13(6), e1005545.
"""
name = "ALN neural mass model"
label = "ALNMass"
# define python callback function name for table lookup (linear-nonlinear
# approximation of Fokker-Planck equation)
python_callbacks = ["firing_rate_lookup", "voltage_lookup", "tau_lookup"]
num_noise_variables = 1
def __init__(self, params, lin_nonlin_transfer_function_filename=None, seed=None):
"""
:param lin_nonlin_transfer_function_filename: filename for precomputed
transfer functions of the ALN model, if None, will look for it in this
directory
:type lin_nonlin_transfer_function_filename: str|None
:param seed: seed for random number generator
:type seed: int|None
"""
params = self._rescale_strengths(params)
super().__init__(params=params, seed=seed)
# use the same file as neurolib's native
lin_nonlin_transfer_function_filename = lin_nonlin_transfer_function_filename or os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"..",
"aln",
"aln-precalc",
DEFAULT_QUANTITIES_CASCADE_FILENAME,
)
self._load_lin_nonlin_transfer_function(lin_nonlin_transfer_function_filename)
def _load_lin_nonlin_transfer_function(self, filename):
"""
Load precomputed transfer functions from h5 file.
"""
# load transfer functions from file
logging.info(f"Loading precomputed transfer functions from {filename}")
loaded_h5 = File(filename, "r")
self.mu_range = np.array(loaded_h5["mu_vals"])
self.d_mu = self.mu_range[1] - self.mu_range[0]
self.sigma_range = np.array(loaded_h5["sigma_vals"])
self.d_sigma = self.sigma_range[1] - self.sigma_range[0]
self.firing_rate_transfer_function = np.array(loaded_h5["r_ss"])
self.voltage_transfer_function = np.array(loaded_h5["V_mean_ss"])
self.tau_transfer_function = np.array(loaded_h5["tau_mu_exp"])
logging.info("All transfer functions loaded.")
# close the file
loaded_h5.close()
self.lin_nonlin_fname = filename
def describe(self):
return {
**super().describe(),
"lin_nonlin_transfer_function_filename": self.lin_nonlin_fname,
}
def _callbacks(self):
"""
Construct list of python callbacks for ALN model.
"""
callbacks_list = [
(self.callback_functions["firing_rate_lookup"], self.firing_rate_lookup, 2),
(self.callback_functions["voltage_lookup"], self.voltage_lookup, 2),
(self.callback_functions["tau_lookup"], self.tau_lookup, 2),
]
self._validate_callbacks(callbacks_list)
return callbacks_list
def _numba_callbacks(self):
"""
Define numba callbacks - has to be different than jitcdde callbacks
because of the internals.
"""
def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function):
"""
Function generator for numba callbacks. This works similarly as
`functools.partial` (i.e. sets some of the arguments of the inner
function), but afterwards can be jitted with `numba.njit()`, while
partial functions cannot.
"""
def inner(current_mu, current_sigma):
return _table_lookup(
current_mu,
current_sigma,
sigma_range,
mu_range,
d_sigma,
d_mu,
transfer_function,
)
return inner
return [
(
"firing_rate_lookup",
numba.njit(
_table_numba_gen(
self.sigma_range,
self.mu_range,
self.d_sigma,
self.d_mu,
self.firing_rate_transfer_function,
)
),
),
(
"voltage_lookup",
numba.njit(
_table_numba_gen(
self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function
)
),
),
(
"tau_lookup",
numba.njit(
_table_numba_gen(
self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function
)
),
),
]
def firing_rate_lookup(self, y, current_mu, current_sigma):
"""
Translate mean and std. deviation of the current to firing rate using
linear-nonlinear lookup table for ALN.
"""
return _table_lookup(
current_mu,
current_sigma,
self.sigma_range,
self.mu_range,
self.d_sigma,
self.d_mu,
self.firing_rate_transfer_function,
)
def voltage_lookup(self, y, current_mu, current_sigma):
"""
Translate mean and std. deviation of the current to voltage using
precomputed transfer functions of the aln model.
"""
return _table_lookup(
current_mu,
current_sigma,
self.sigma_range,
self.mu_range,
self.d_sigma,
self.d_mu,
self.voltage_transfer_function,
)
def tau_lookup(self, y, current_mu, current_sigma):
"""
Translate mean and std. deviation of the current to tau - membrane time
constant using precomputed transfer functions of the aln model.
"""
return _table_lookup(
current_mu,
current_sigma,
self.sigma_range,
self.mu_range,
self.d_sigma,
self.d_mu,
self.tau_transfer_function,
)
class ExcitatoryALNMass(ALNMass):
"""
Excitatory ALN neural mass. Contains firing rate adaptation current.
"""
name = "ALN excitatory neural mass"
label = f"ALNMass{EXC}"
num_state_variables = 7
coupling_variables = {6: f"r_mean_{EXC}"}
mass_type = EXC
state_variable_names = [
"I_mu",
"I_A",
"I_syn_mu_exc",
"I_syn_mu_inh",
"I_syn_sigma_exc",
"I_syn_sigma_inh",
"r_mean",
]
required_couplings = [
"node_exc_exc",
"node_exc_exc_sq",
"node_exc_inh",
"node_exc_inh_sq",
"network_exc_exc",
"network_exc_exc_sq",
]
required_params = [
"Ke",
"Ki",
"c_gl",
"Ke_gl",
"tau_se",
"tau_si",
"sigmae_ext",
"Jee_max",
"Jei_max",
"taum",
"C",
"ext_exc_current",
"ext_exc_rate",
"a",
"b",
"EA",
"tauA",
"lambda",
]
_noise_input = [OrnsteinUhlenbeckProcess(mu=0.4, sigma=0.0, tau=5.0)]
@staticmethod
def _rescale_strengths(params):
"""
Rescale connection strengths.
"""
params = deepcopy(params)
assert isinstance(params, dict)
params["c_gl"] = params["c_gl"] * params["tau_se"] / params["Jee_max"]
params["taum"] = params["C"] / params["gL"]
return params
def __init__(self, params=None, lin_nonlin_transfer_function_filename=None, seed=None):
super().__init__(
params=params or ALN_EXC_DEFAULT_PARAMS,
lin_nonlin_transfer_function_filename=lin_nonlin_transfer_function_filename,
seed=seed,
)
def update_params(self, params_dict):
"""
Update parameters as in the base class but also rescale.
"""
# if we are changing C_m or g_L, update tau_m as well
if any(k in params_dict for k in ("C", "gL")):
C_m = params_dict["C"] if "C" in params_dict else self.params["C"]
g_L = params_dict["gL"] if "gL" in params_dict else self.params["gL"]
params_dict["taum"] = C_m / g_L
# if we are changing any of the J_exc_max, tau_syn_exc or c_global, rescale c_global
if any(k in params_dict for k in ("c_gl", "Jee_max", "tau_se")):
# get original c_global
c_global = (
params_dict["c_gl"]
if "c_gl" in params_dict
else self.params["c_gl"] * (self.params["Jee_max"] / self.params["tau_se"])
)
tau_syn_exc = params_dict["tau_se"] if "tau_se" in params_dict else self.params["tau_se"]
J_exc_max = params_dict["Jee_max"] if "Jee_max" in params_dict else self.params["Jee_max"]
params_dict["c_gl"] = c_global * tau_syn_exc / J_exc_max
# update all parameters finally
super().update_params(params_dict)
def _initialize_state_vector(self):
"""
Initialize state vector.
"""
np.random.seed(self.seed)
self.initial_state = (
np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 200.0, 0.5, 0.5, 0.001, 0.001, 0.01])
).tolist()
def _compute_couplings(self, coupling_variables):
"""
Helper that computes coupling from other nodes and network.
"""
exc_coupling = (
self.params["Ke"] * coupling_variables["node_exc_exc"]
+ self.params["c_gl"] * self.params["Ke_gl"] * coupling_variables["network_exc_exc"]
+ self.params["c_gl"] * self.params["Ke_gl"] * self.params["ext_exc_rate"]
)
inh_coupling = self.params["Ki"] * coupling_variables["node_exc_inh"]
exc_coupling_squared = (
self.params["Ke"] * coupling_variables["node_exc_exc_sq"]
+ self.params["c_gl"] ** 2 * self.params["Ke_gl"] * coupling_variables["network_exc_exc_sq"]
+ self.params["c_gl"] ** 2 * self.params["Ke_gl"] * self.params["ext_exc_rate"]
)
inh_coupling_squared = self.params["Ki"] * coupling_variables["node_exc_inh_sq"]
return (
exc_coupling,
inh_coupling,
exc_coupling_squared,
inh_coupling_squared,
)
def _derivatives(self, coupling_variables):
(
I_mu,
I_adaptation,
I_syn_mu_exc,
I_syn_mu_inh,
I_syn_sigma_exc,
I_syn_sigma_inh,
firing_rate,
) = self._unwrap_state_vector()
exc_inp, inh_inp, exc_inp_sq, inh_inp_sq = self._compute_couplings(coupling_variables)
I_sigma = se.sqrt(
2
* self.params["Jee_max"] ** 2
* I_syn_sigma_exc
* self.params["tau_se"]
* self.params["taum"]
/ ((1.0 + exc_inp) * self.params["taum"] + self.params["tau_se"])
+ 2
* self.params["Jei_max"] ** 2
* I_syn_sigma_inh
* self.params["tau_si"]
* self.params["taum"]
/ ((1.0 + inh_inp) * self.params["taum"] + self.params["tau_si"])
+ self.params["sigmae_ext"] ** 2
)
# get values from linear-nonlinear lookup table
firing_rate_now = self.callback_functions["firing_rate_lookup"](I_mu - I_adaptation / self.params["C"], I_sigma)
voltage = self.callback_functions["voltage_lookup"](I_mu - I_adaptation / self.params["C"], I_sigma)
tau = self.callback_functions["tau_lookup"](I_mu - I_adaptation / self.params["C"], I_sigma)
d_I_mu = (
self.params["Jee_max"] * I_syn_mu_exc
+ self.params["Jei_max"] * I_syn_mu_inh
+ system_input(self.noise_input_idx[0])
+ self.params["ext_exc_current"]
- I_mu
) / tau
d_I_adaptation = (
self.params["a"] * (voltage - self.params["EA"])
- I_adaptation
+ self.params["tauA"] * self.params["b"] * firing_rate_now
) / self.params["tauA"]
d_I_syn_mu_exc = ((1.0 - I_syn_mu_exc) * exc_inp - I_syn_mu_exc) / self.params["tau_se"]
d_I_syn_mu_inh = ((1.0 - I_syn_mu_inh) * inh_inp - I_syn_mu_inh) / self.params["tau_si"]
d_I_syn_sigma_exc = (
(1.0 - I_syn_mu_exc) ** 2 * exc_inp_sq
+ (exc_inp_sq - 2.0 * self.params["tau_se"] * (exc_inp + 1.0)) * I_syn_sigma_exc
) / (self.params["tau_se"] ** 2)
d_I_syn_sigma_inh = (
(1.0 - I_syn_mu_inh) ** 2 * inh_inp_sq
+ (inh_inp_sq - 2.0 * self.params["tau_si"] * (inh_inp + 1.0)) * I_syn_sigma_inh
) / (self.params["tau_si"] ** 2)
# firing rate as dummy dynamical variable with infinitely fast
# fixed-point dynamics
d_firing_rate = -self.params["lambda"] * (firing_rate - firing_rate_now)
return [
d_I_mu,
d_I_adaptation,
d_I_syn_mu_exc,
d_I_syn_mu_inh,
d_I_syn_sigma_exc,
d_I_syn_sigma_inh,
d_firing_rate,
]
class InhibitoryALNMass(ALNMass):
"""
Inhibitory ALN neural mass. In contrast to excitatory, inhibitory mass do
not contain fiting rate adaptation current.
"""
name = "ALN inhibitory neural mass"
label = f"ALNMass{INH}"
num_state_variables = 6
coupling_variables = {5: f"r_mean_{INH}"}
mass_type = INH
state_variable_names = [
"I_mu",
"I_syn_mu_exc",
"I_syn_mu_inh",
"I_syn_sigma_exc",
"I_syn_sigma_inh",
"r_mean",
]
required_couplings = [
"node_inh_exc",
"node_inh_exc_sq",
"node_inh_inh",
"node_inh_inh_sq",
]
required_params = [
"Ke",
"Ki",
"c_gl",
"Ke_gl",
"tau_se",
"tau_si",
"sigmai_ext",
"Jie_max",
"Jii_max",
"taum",
"C",
"ext_inh_current",
"ext_inh_rate",
"lambda",
]
_noise_input = [OrnsteinUhlenbeckProcess(mu=0.3, sigma=0.0, tau=5.0)]
@staticmethod
def _rescale_strengths(params):
"""
Rescale connection strengths.
"""
params = deepcopy(params)
assert isinstance(params, dict)
params["c_gl"] = params["c_gl"] * params["tau_se"] / params["Jie_max"]
params["taum"] = params["C"] / params["gL"]
return params
def __init__(self, params=None, lin_nonlin_transfer_function_filename=None, seed=None):
super().__init__(
params=params or ALN_INH_DEFAULT_PARAMS,
lin_nonlin_transfer_function_filename=lin_nonlin_transfer_function_filename,
seed=seed,
)
def update_params(self, params_dict):
"""
Update parameters as in the base class but also rescale.
"""
# if we are changing C_m or g_L, update tau_m as well
if any(k in params_dict for k in ("C", "gL")):
C_m = params_dict["C"] if "C" in params_dict else self.params["C"]
g_L = params_dict["gL"] if "gL" in params_dict else self.params["gL"]
params_dict["taum"] = C_m / g_L
# if we are changing any of the J_exc_max, tau_syn_exc or c_global, rescale c_global
if any(k in params_dict for k in ("c_gl", "Jie_max", "tau_se")):
# get original c_global
c_global = (
params_dict["c_gl"]
if "c_gl" in params_dict
else self.params["c_gl"] * (self.params["Jie_max"] / self.params["tau_se"])
)
tau_syn_exc = params_dict["tau_se"] if "tau_se" in params_dict else self.params["tau_se"]
J_exc_max = params_dict["Jie_max"] if "Jie_max" in params_dict else self.params["Jie_max"]
params_dict["c_gl"] = c_global * tau_syn_exc / J_exc_max
# update all parameters finally
super().update_params(params_dict)
def _initialize_state_vector(self):
"""
Initialize state vector.
"""
np.random.seed(self.seed)
self.initial_state = (
np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 0.5, 0.5, 0.01, 0.01, 0.01])
).tolist()
def _compute_couplings(self, coupling_variables):
"""
Helper that computes coupling from other nodes and network.
"""
exc_coupling = (
self.params["Ke"] * coupling_variables["node_inh_exc"]
+ self.params["c_gl"] * self.params["Ke_gl"] * self.params["ext_inh_rate"]
)
inh_coupling = self.params["Ki"] * coupling_variables["node_inh_inh"]
exc_coupling_squared = (
self.params["Ke"] * coupling_variables["node_inh_exc_sq"]
+ self.params["c_gl"] ** 2 * self.params["Ke_gl"] * self.params["ext_inh_rate"]
)
inh_coupling_squared = self.params["Ki"] * coupling_variables["node_inh_inh_sq"]
return (
exc_coupling,
inh_coupling,
exc_coupling_squared,
inh_coupling_squared,
)
def _derivatives(self, coupling_variables):
(
I_mu,
I_syn_mu_exc,
I_syn_mu_inh,
I_syn_sigma_exc,
I_syn_sigma_inh,
firing_rate,
) = self._unwrap_state_vector()
exc_inp, inh_inp, exc_inp_sq, inh_inp_sq = self._compute_couplings(coupling_variables)
I_sigma = se.sqrt(
2
* self.params["Jie_max"] ** 2
* I_syn_sigma_exc
* self.params["tau_se"]
* self.params["taum"]
/ ((1.0 + exc_inp) * self.params["taum"] + self.params["tau_se"])
+ 2
* self.params["Jii_max"] ** 2
* I_syn_sigma_inh
* self.params["tau_si"]
* self.params["taum"]
/ ((1.0 + inh_inp) * self.params["taum"] + self.params["tau_si"])
+ self.params["sigmai_ext"] ** 2
)
# get values from linear-nonlinear lookup table
firing_rate_now = self.callback_functions["firing_rate_lookup"](I_mu, I_sigma)
tau = self.callback_functions["tau_lookup"](I_mu, I_sigma)
d_I_mu = (
self.params["Jie_max"] * I_syn_mu_exc
+ self.params["Jii_max"] * I_syn_mu_inh
+ system_input(self.noise_input_idx[0])
+ self.params["ext_inh_current"]
- I_mu
) / tau
d_I_syn_mu_exc = ((1.0 - I_syn_mu_exc) * exc_inp - I_syn_mu_exc) / self.params["tau_se"]
d_I_syn_mu_inh = ((1.0 - I_syn_mu_inh) * inh_inp - I_syn_mu_inh) / self.params["tau_si"]
d_I_syn_sigma_exc = (
(1.0 - I_syn_mu_exc) ** 2 * exc_inp_sq
+ (exc_inp_sq - 2.0 * self.params["tau_se"] * (exc_inp + 1.0)) * I_syn_sigma_exc
) / (self.params["tau_se"] ** 2)
d_I_syn_sigma_inh = (
(1.0 - I_syn_mu_inh) ** 2 * inh_inp_sq
+ (inh_inp_sq - 2.0 * self.params["tau_si"] * (inh_inp + 1.0)) * I_syn_sigma_inh
) / (self.params["tau_si"] ** 2)
# firing rate as dummy dynamical variable with infinitely fast
# fixed-point dynamics
d_firing_rate = -self.params["lambda"] * (firing_rate - firing_rate_now)
return [
d_I_mu,
d_I_syn_mu_exc,
d_I_syn_mu_inh,
d_I_syn_sigma_exc,
d_I_syn_sigma_inh,
d_firing_rate,
]
class ALNNode(SingleCouplingExcitatoryInhibitoryNode):
"""
Default ALN network node with 1 excitatory (featuring adaptive current) and
1 inhibitory population.
"""
name = "ALN neural mass node"
label = "ALNNode"
sync_variables = [
"node_exc_exc",
"node_inh_exc",
"node_exc_inh",
"node_inh_inh",
# squared variants
"node_exc_exc_sq",
"node_inh_exc_sq",
"node_exc_inh_sq",
"node_inh_inh_sq",
]
default_network_coupling = {
"network_exc_exc": 0.0,
"network_exc_exc_sq": 0.0,
}
default_output = f"r_mean_{EXC}"
output_vars = [f"r_mean_{EXC}", f"r_mean_{INH}", f"I_A_{EXC}"]
def _rescale_connectivity(self):
"""
Rescale connection strengths for ALN. Should work also for ALN nodes
with arbitrary number of masses of any type.
"""
# create tau and J_max matrices for rescaling
tau_mat = np.zeros_like(self.connectivity)
J_mat = np.zeros_like(self.connectivity)
for col, mass_from in enumerate(self.masses):
# taus are constant per col and depends only on "from" mass
tau_mat[:, col] = mass_from.params[f"tau_s{mass_from.mass_type.lower()[0]}"]
# Js are specific: take J from "to" mass but of type "from" mass
for row, mass_to in enumerate(self.masses):
J_mat[row, col] = mass_to.params[f"J{mass_to.mass_type.lower()[0]}{mass_from.mass_type.lower()[0]}_max"]
# multiplication with tau makes the increase of synaptic activity
# subject to a single input spike invariant to tau and division by J
# ensures that mu = J*s will result in a PSP of exactly c for a single
# spike
self.connectivity = (self.connectivity * tau_mat) / np.abs(J_mat)
def __init__(
self,
exc_params=None,
inh_params=None,
exc_lin_nonlin_transfer_function_filename=None,
inh_lin_nonlin_transfer_function_filename=None,
connectivity=ALN_NODE_DEFAULT_CONNECTIVITY,
delays=ALN_NODE_DEFAULT_DELAYS,
exc_seed=None,
inh_seed=None,
):
"""
:param exc_params: parameters for the excitatory mass
:type exc_params: dict|None
:param inh_params: parameters for the inhibitory mass
:type inh_params: dict|None
:param exc_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer functions for excitatory ALN mass, if None, will
look for it in this directory
:type exc_lin_nonlin_transfer_function_filename: str|None
:param inh_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer functions for inhibitory ALN mass, if None, will
look for it in this directory
:type inh_lin_nonlin_transfer_function_filename: str|None
:param connectivity: local connectivity matrix
:type connectivity: np.ndarray
:param delays: local delay matrix
:type delays: np.ndarray
:param exc_seed: seed for random number generator for the excitatory
mass
:type exc_seed: int|None
:param inh_seed: seed for random number generator for the inhibitory
mass
:type inh_seed: int|None
"""
excitatory_mass = ExcitatoryALNMass(
params=exc_params,
lin_nonlin_transfer_function_filename=exc_lin_nonlin_transfer_function_filename,
seed=exc_seed,
)
excitatory_mass.index = 0
inhibitory_mass = InhibitoryALNMass(
params=inh_params,
lin_nonlin_transfer_function_filename=inh_lin_nonlin_transfer_function_filename,
seed=inh_seed,
)
inhibitory_mass.index = 1
super().__init__(
neural_masses=[excitatory_mass, inhibitory_mass],
local_connectivity=connectivity,
local_delays=delays,
)
self._rescale_connectivity()
def update_params(self, params_dict):
"""
Rescale connectivity after params update if connectivity was updated.
"""
rescale_flag = "local_connectivity" in params_dict
super().update_params(params_dict)
if rescale_flag:
self._rescale_connectivity()
def _sync(self):
"""
Apart from basic EXC<->INH connectivity, construct also squared
variants.
"""
connectivity_sq = self.connectivity ** 2 * self.inputs
sq_connectivity = [
(
# exc -> exc squared connectivity
self.sync_symbols[f"node_exc_exc_sq_{self.index}"],
sum([connectivity_sq[row, col] for row in self.excitatory_masses for col in self.excitatory_masses]),
),
(
# exc -> inh squared connectivity
self.sync_symbols[f"node_inh_exc_sq_{self.index}"],
sum([connectivity_sq[row, col] for row in self.inhibitory_masses for col in self.excitatory_masses]),
),
(
# inh -> exc squared connectivity
self.sync_symbols[f"node_exc_inh_sq_{self.index}"],
sum([connectivity_sq[row, col] for row in self.excitatory_masses for col in self.inhibitory_masses]),
),
(
# inh -> inh squared connectivity
self.sync_symbols[f"node_inh_inh_sq_{self.index}"],
sum([connectivity_sq[row, col] for row in self.inhibitory_masses for col in self.inhibitory_masses]),
),
]
return super()._sync() + sq_connectivity
class ALNNetwork(Network):
"""
Whole brain network of adaptive exponential integrate-and-fire mean-field
excitatory and inhibitory nodes.
"""
name = "ALN neural mass network"
label = "ALNNet"
sync_variables = ["network_exc_exc", "network_exc_exc_sq"]
output_vars = [f"r_mean_{EXC}", f"r_mean_{INH}", f"I_A_{EXC}"]
def __init__(
self,
connectivity_matrix,
delay_matrix,
exc_mass_params=None,
inh_mass_params=None,
exc_lin_nonlin_transfer_function_filename=None,
inh_lin_nonlin_transfer_function_filename=None,
local_connectivity=ALN_NODE_DEFAULT_CONNECTIVITY,
local_delays=ALN_NODE_DEFAULT_DELAYS,
exc_seed=None,
inh_seed=None,
):
"""
:param connectivity_matrix: connectivity matrix for coupling between
nodes, defined as [from, to]
:type connectivity_matrix: np.ndarray
:param delay_matrix: delay matrix between nodes, if None, delays are
all zeros, in ms, defined as [from, to]
:type delay_matrix: np.ndarray|None
:param exc_mass_params: parameters for each excitatory ALN neural
mass, if None, will use default
:type exc_mass_params: list[dict]|dict|None
:param inh_mass_params: parameters for each inhibitory ALN neural
mass, if None, will use default
:type inh_mass_params: list[dict]|dict|None
param exc_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer_function for excitatory ALN mass, if None, will
look for it in this directory
:type exc_lin_nonlin_transfer_function_filename: list[str]|str|None
:param inh_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer_function for inhibitory ALN mass, if None, will
look for it in this directory
:type inh_lin_nonlin_transfer_function_filename: list[str]|str|None
:param local_connectivity: local within-node connectivity matrix
:type local_connectivity: np.ndarray
:param local_delays: local within-node delay matrix
:type local_delays: list[np.ndarray]|np.ndarray
:param exc_seed: seed for random number generator for the excitatory
masses
:type exc_seed: int|None
:param inh_seed: seed for random number generator for the excitatory
masses
:type inh_seed: int|None
"""
num_nodes = connectivity_matrix.shape[0]
exc_mass_params = self._prepare_mass_params(exc_mass_params, num_nodes)
inh_mass_params = self._prepare_mass_params(inh_mass_params, num_nodes)
exc_lin_nonlin_transfer_function_filename = self._prepare_mass_params(
exc_lin_nonlin_transfer_function_filename, num_nodes, native_type=str
)
inh_lin_nonlin_transfer_function_filename = self._prepare_mass_params(
inh_lin_nonlin_transfer_function_filename, num_nodes, native_type=str
)
local_connectivity = self._prepare_mass_params(local_connectivity, num_nodes, native_type=np.ndarray)
local_delays = self._prepare_mass_params(local_delays, num_nodes, native_type=np.ndarray)
exc_seeds = self._prepare_mass_params(exc_seed, num_nodes, native_type=int)
inh_seeds = self._prepare_mass_params(inh_seed, num_nodes, native_type=int)
nodes = []
for (
i,
(exc_params, inh_params, exc_transfer_function, inh_transfer_function, local_conn, local_dels),
) in enumerate(
zip(
exc_mass_params,
inh_mass_params,
exc_lin_nonlin_transfer_function_filename,
inh_lin_nonlin_transfer_function_filename,
local_connectivity,
local_delays,
)
):
node = ALNNode(
exc_params=exc_params,
inh_params=inh_params,
exc_lin_nonlin_transfer_function_filename=exc_transfer_function,
inh_lin_nonlin_transfer_function_filename=inh_transfer_function,
connectivity=local_conn,
delays=local_dels,
exc_seed=exc_seeds[i],
inh_seed=inh_seeds[i],
)
node.index = i
node.idx_state_var = i * node.num_state_variables
# set correct indices of noise input
for mass in node:
mass.noise_input_idx = [2 * i + mass.index]
nodes.append(node)
super().__init__(nodes=nodes, connectivity_matrix=connectivity_matrix, delay_matrix=delay_matrix)
# assert we have 2 sync variable
assert len(self.sync_variables) == 2
def _sync(self):
"""
Overload sync method since the ALN model requires
squared coupling weights and non-trivial coupling indices.
"""
# get coupling variable index from excitatory mass within each node
coupling_var_idx = set(sum([list(node[0].coupling_variables.keys()) for node in self], []))
assert len(coupling_var_idx) == 1
coupling_var_idx = next(iter(coupling_var_idx))
return (
# regular additive coupling
self._additive_coupling(within_node_idx=coupling_var_idx, symbol="network_exc_exc")
# additive coupling with squared weights
+ self._additive_coupling(
within_node_idx=coupling_var_idx,
symbol="network_exc_exc_sq",
# use squared connectivity
connectivity=self.connectivity * self.connectivity,
)
+ super()._sync()
)
| 1.859375
| 2
|
1080.py
|
Juniorr452/URI-Online-Judge
| 0
|
12776729
|
# -*- coding: utf-8 -*-
maior = -1
index_maior = -1
for i in range(1, 101):
n = int(input())
if n > maior:
maior = n
index_maior = i
print(maior)
print(index_maior)
| 3.625
| 4
|
synapse/tests/test_lookup_iso3166.py
|
larrycameron80/synapse
| 0
|
12776730
|
<filename>synapse/tests/test_lookup_iso3166.py
from synapse.tests.common import *
import synapse.lookup.iso3166 as s_l_country
class CountryLookTest(SynTest):
def test_lookup_countries(self):
self.eq(s_l_country.country2iso.get('united states of america'), 'us')
self.eq(s_l_country.country2iso.get('mexico'), 'mx')
self.eq(s_l_country.country2iso.get('vertexLandia'), None)
| 2.03125
| 2
|
Task_6_Website_Testing/seleniumscript/demoscript.py
|
shwetathikekar/Sparks_Foundation_Task_6
| 0
|
12776731
|
<gh_stars>0
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
import time
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
driver.get("https://www.thesparksfoundationsingapore.org/")
sitetitle = driver.title
# Test Case 1: Title Testing
print("\nTest Case 1:Title Testing:")
if sitetitle:
print("Title is present:", sitetitle)
else:
print("Title not found")
if sitetitle=="The Sparks Foundation | Home":
print("Present title verified successfully:",sitetitle)
else:
print("Verification failed")
# Test Case 2: Logo Testing
print("\nTest Case 2: Logo Testing:")
try:
driver.find_element(By.XPATH,'//*[@id="home"]/div/div[1]/h1/a/img')
print("Logo is verified \n")
except NoSuchElementException:
print("Logo is not available")
# Test case 3: Navbar Testing
print("\nTest case 3: Navbar Testing:")
try:
driver.find_element(By.CLASS_NAME,"navbar")
print("Navigation Bar is available")
except NoSuchElementException:
print("Navigation Bar is not available")
# Test case 4: Back to Home Link testing
print("\nTest case 4: Back to Home Link testing:")
try:
driver.find_element(By.PARTIAL_LINK_TEXT,"The Sparks Foundation")
print("Successfully Visit back to Home Page")
except NoSuchElementException:
print("Can't open Home Page")
# Test case 5: About Us page Testing
print("\nTest case 5: About Us page Testing:")
try:
driver.find_element(By.LINK_TEXT,'About Us').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT,'Vision, Mission and Values').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Guiding Principles').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Advisors and Patrons').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, "Executive Team").click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, "Corporate Partners").click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, "Expert Mentors").click()
time.sleep(1)
print("Expert Mentors Page is Not Yet Ready!")
driver.find_element(By.LINK_TEXT, 'About Us').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, "News").click()
time.sleep(1)
print("All about us pages are available and verified successfully")
except NoSuchElementException:
print("Can't open the pages")
# Test case 6: Policies and Code page Testing
print("\nTest case 6: Policies and Code page Testing:")
try:
driver.find_element(By.LINK_TEXT,'Policies and Code').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT,'Policies').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Code of Ethics and Conduct').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Personal Data Policy').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Whistle Blowing Policy').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Service Quality Values').click()
time.sleep(1)
print("All Policies and Code pages are available and verified successfully")
except NoSuchElementException:
print("Can't open the pages")
# Test case 7: Programs pages Testing
print("\nTest case 7: Programs pages Testing:")
try:
driver.find_element(By.LINK_TEXT,'Programs').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT,'Student Scholarship Program').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Student Mentorship Program').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Student SOS Program').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Workshops').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Corporate Programs').click()
time.sleep(1)
print("All Programs pages are available and verified successfully")
except NoSuchElementException:
print("Can't open the pages")
# Test case 8: Links page Testing
print("\nTest case 8: Links page Testing:")
try:
driver.find_element(By.LINK_TEXT, 'LINKS').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Software & App').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Salient Features').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'AI in Education').click()
time.sleep(1)
print("All Links pages are available and verified successfully")
except NoSuchElementException:
print("Can't open the pages")
# Test case 9: Join Us page Testing
print("\nTest case 9: Join Us page Testing:")
try:
driver.find_element(By.LINK_TEXT, 'Join Us').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Why Join Us').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Expert Mentor').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Events Volunteer').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Management Volunteer').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Internship Positions').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Brand Ambassador').click()
time.sleep(1)
print("All Join Us pages are available and verified successfully")
except NoSuchElementException:
print("Can't open the pages")
# Test Case 10: Contact Us page Testing
print("\nTest Case 10: Contact Us page Testing")
try:
driver.find_element(By.LINK_TEXT, "Contact Us")
time.sleep(1)
print("Contact Us page is available")
except NoSuchElementException:
print("Can't open the page")
# Test case 11: Contact Text Testing
print("\nTest case 11: Contact Text Testing")
try:
driver.find_element(By.LINK_TEXT, "Contact Us").click()
time.sleep(1)
info1 = driver.find_element(By.XPATH, '/html/body/div[2]/div/div/div[3]/div[2]/p[1]')
info2 = driver.find_element(By.XPATH, '/html/body/div[2]/div/div/div[3]/div[2]/p[2]')
time.sleep(1)
print("Info1:", info1.text)
print("Info2:", info2.text)
if info1.text == "(for Non-Internship/GRIP Queries Only)" and info2.text == "+65-8402-8590, <EMAIL>":
print('Info1 and Info2 are corrects!')
else:
print("Contact information is Incorrect!")
print("Contact Page Verification Successful!\n")
except NoSuchElementException:
print("Contact Page Verification Unsuccessful!")
# Test case 12: Address Text Testing
print("\nTest case 12: Address Text Testing")
try:
driver.find_element(By.LINK_TEXT, "Contact Us").click()
time.sleep(1)
info = driver.find_element(By.XPATH, '/html/body/div[2]/div/div/div[2]/div[2]/p')
time.sleep(1)
print("Info:", info.text)
if info.text != info.text:
print("Info is Incorrect")
print("Contact Page Verification Successful!\n")
except NoSuchElementException:
print("Contact Page Verification Unsuccessful!")
# TestCase 13: Check the Form
print("\nTestCase 13:")
try:
driver.find_element(By.LINK_TEXT, 'Join Us').click()
time.sleep(1)
driver.find_element(By.LINK_TEXT, 'Why Join Us').click()
time.sleep(1)
driver.find_element(By.NAME, 'Name').send_keys('<NAME>')
time.sleep(1)
driver.find_element(By.NAME, 'Email').send_keys('<EMAIL>')
time.sleep(1)
select = Select(driver.find_element(By.CLASS_NAME, 'form-control'))
time.sleep(1)
select.select_by_visible_text('Intern')
time.sleep(1)
driver.find_element(By.CLASS_NAME, 'button-w3layouts').click()
time.sleep(1)
print("Form Verification Successful!\n")
except NoSuchElementException:
print("Form Verification Failed!\n")
time.sleep(1)
driver.quit()
| 3
| 3
|
mqttclient.py
|
pallebh/rflink2mqtt
| 0
|
12776732
|
import paho.mqtt.client as mqttw
class MqttClient :
def __init__( self , address = "localhost", port = 1883 , id_ = "" , subscribe = "" , message = None ) :
self.address = address
self.port = port
self.subscribe = subscribe
self.message = message
self.client = mqttw.Client( client_id = id_ )
self.client.connect( self.address , self.port )
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.loop_start()
def on_connect( self , client , userdata , flags , rc ) :
for subscribe in self.subscribe :
self.client.subscribe( subscribe )
def on_message( self , client , userdata , msg ) :
if self.message is None :
return
self.message( client , userdata , msg )
def publish( self , topic, payload=None, qos=0, retain=False ) :
self.client.publish( topic , payload , qos , retain )
| 2.828125
| 3
|
Lab_02/gcd_fsm.py
|
SadequrRahman/advance-SoC
| 0
|
12776733
|
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
#
# This file is part of Advance SoC Design Lab Soultion.
#
# SoC Design Lab Soultion can not be copied and/or distributed without the express
# permission of <NAME>
#
# File: gcd_fsm.py
# This is a pymtl gcd gloden algo. implementation.
#
# Inputs:
# a -> first value to calculated gcd
# b -> second value to calculated gcd
# en -> to enable the block. After updating a and b value
# assert en signal
# Outputs:
# out -> output of the block. contain gcd result of a and b
# ack -> asserted high to indicate current gcd calculation is done.
#
from pymtl3 import *
class Gcd_fsm( Component ):
def construct(s, dType ):
s.a = InPort(dType)
s.b = InPort(dType)
s.en = InPort(b1)
s.out = OutPort(dType)
s.ack = OutPort(b1)
s.ra = Wire(dType)
s.rb = Wire(dType)
s.cState = Wire(b3)
s.nState = Wire(b3)
s.S0 = b3(0)
s.S1 = b3(1)
s.S2 = b3(2)
s.S3 = b3(3)
s.S4 = b3(4)
s.S5 = b3(5)
@s.update_ff
def state_memory():
if s.reset :
s.cState <<= s.S0
else:
s.cState <<= s.nState
@s.update
def next_state_logic():
if s.cState == s.S0:
if s.en == b1(1):
s.nState = s.S1
else:
s.nState = s.S0
elif s.cState == s.S1:
s.nState = s.S2
elif s.cState == s.S2:
if s.ra < s.rb :
s.nState = s.S3
elif s.rb != dType(0):
s.nState = s.S4
else:
s.nState = s.S5
elif s.cState == s.S3:
s.nState = s.S2
elif s.cState == s.S4:
s.nState = s.S2
elif s.cState == s.S5:
if s.en == b1(0):
s.nState = s.S0
else:
s.nState = s.S5
@s.update
def output_logic():
if s.cState == s.S0:
s.ack = b1(0)
elif s.cState == s.S1:
s.ra = s.a
s.rb = s.b
# elif s.cState == s.S2:
# pass
elif s.cState == s.S3:
s.ra = s.ra + s.rb
s.rb = s.ra - s.rb
s.ra = s.ra - s.rb
elif s.cState == s.S4:
s.ra = s.ra - s.rb
elif s.cState == s.S5:
s.out = s.ra
s.ack = b1(1)
| 2.203125
| 2
|
day5/day5.py
|
zLuke2000/aoc-2020
| 0
|
12776734
|
<reponame>zLuke2000/aoc-2020
import os
""" PARTE COMUNE """
f = open((os.path.dirname(__file__) + '\day5_input'), 'r')
inputNum = []
temp = f.read()
temp = temp.split("\n")
f.close()
""" PARTE UNO """
seatID = []
for i in temp:
seatX = [0,127]
seatY = [0,7]
seatXY = [0,0]
currentChar = 0
for index in i:
currentChar += 1
if(index == 'F'):
if(currentChar == 7):
seatXY[0] = seatX[0]
else:
seatX[1] -= int(((seatX[1]-seatX[0])/2)+0.5)
if(index == 'B'):
if(currentChar == 7):
seatXY[0] = seatX[1]
else:
seatX[0] += int(((seatX[1]-seatX[0])/2)+0.5)
if(index == 'R'):
if(currentChar == 10):
seatXY[1] = seatY[1]
else:
seatY[0] += int(((seatY[1]-seatY[0])/2)+0.5)
if(index == 'L'):
if(currentChar == 10):
seatXY[1] = seatY[0]
else:
seatY[1] -= int(((seatY[1]-seatY[0])/2)+0.5)
seatID.append(seatXY[0]*8+seatXY[1])
print("SeatID più alto:", max(seatID))
""" PARTE DUE """
seatID = sorted(seatID)
j = min(seatID)
for i in seatID:
if i != j:
print("SeatID mancante:", i)
break
j += 1
| 2.734375
| 3
|
backups/urls.py
|
TheEdu/python-mysql-backups-django-admin
| 0
|
12776735
|
<filename>backups/urls.py<gh_stars>0
from django.urls import path
from . import views
app_name = 'backups'
urlpatterns = [
path('', views.index, name='index'),
path('task/<int:task_id>/command', views.get_task_command, name='command'),
]
| 1.734375
| 2
|
dags/testDevice.py
|
brendasanchezs/Capstonev2
| 0
|
12776736
|
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.postgres_operator import PostgresOperator
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import boto3
from io import StringIO
def S3_toRedShift(*args, **kwargs):
aws_hook = AwsHook("aws_credentials")
credentials = aws_hook.get_credentials()
redshift_hook = PostgresHook("redshift")
day = {kwargs['ds']}.pop()
day = datetime.strptime(day, '%Y-%m-%d').date() - timedelta(days=2)
day = day.strftime("%m-%d-%Y")
sql = """
COPY {}
FROM '{}'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
IGNOREHEADER 1
DELIMITER ';'
TIMEFORMAT 'auto'
"""
redshift_hook.run(sql.format("log_review",
"s3://data-raw-bucket/log_reviews.csv/",
credentials.access_key,
credentials.secret_key))
return
def insertUSdata(*args, **kwargs):
# aws_hook = AwsHook("aws_credentials")
# credentials = aws_hook.get_credentials()
redshift_hook = PostgresHook("redshift")
day = {kwargs['ds']}.pop()
day = datetime.strptime(day, '%Y-%m-%d').date() - timedelta(days=2)
day = day.strftime("%Y-%m-%d")
sql = """
INSERT INTO device
SELECT
device
FROM log_reviw """
redshift_hook.run(sql.format("device"))
return
default_args = {
'owner': 'ashwath',
'depends_on_past': False,
'start_date': datetime(2018, 11, 1),
'end_date': datetime(2018, 11, 30),
'email_on_failure': True,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=1),
'catchup': False
}
# hourly: cron is '0 * * * *': https://airflow.apache.org/docs/stable/scheduler.html
dag = DAG('log_reviews',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
max_active_runs=1,
# https://airflow.apache.org/docs/stable/scheduler.html
schedule_interval='0 0 * * *'
#schedule_interval=timedelta(days=1),
#schedule_interval='0 * * * *'
)
create_table_main = PostgresOperator(
task_id="create_log",
dag=dag,
postgres_conn_id="redshift",
sql=""" CREATE TABLE IF NOT EXISTS log_review (
id_log INTEGER,
device VARCHAR,
location VARCHAR,
os DATE,
ip VARCHAR,
phone_number VARCHAR,
browser VARCHAR);""")
create_table_device = PostgresOperator(
task_id="create_device",
dag=dag,
postgres_conn_id="redshift",
sql=""" CREATE TABLE IF NOT EXISTS device (
id_dim_devices INTEGER IDENTITY(1,1),
device VARCHAR,
);""")
MovetoRedShift = PythonOperator(
task_id="S3_toRedShift",
provide_context=True,
python_callable=S3_toRedShift,
dag=dag
)
insert_Devices = PythonOperator(
task_id="insert_Devices",
provide_context=True,
python_callable=insertUSdata,
dag=dag
)
create_table_main >> create_table_device >> MovetoRedShift >> insert_Devices
| 2.296875
| 2
|
aligner/default.py
|
BryceGo/Natural_Language_Class
| 0
|
12776737
|
#!/usr/bin/env python
import optparse, sys, os, logging
from collections import defaultdict
optparser = optparse.OptionParser()
optparser.add_option("-d", "--datadir", dest="datadir", default="data", help="data directory (default=data)")
optparser.add_option("-p", "--prefix", dest="fileprefix", default="hansards", help="prefix of parallel data files (default=hansards)")
optparser.add_option("-e", "--english", dest="english", default="en", help="suffix of English (target language) filename (default=en)")
optparser.add_option("-f", "--french", dest="french", default="fr", help="suffix of French (source language) filename (default=fr)")
optparser.add_option("-l", "--logfile", dest="logfile", default=None, help="filename for logging output")
optparser.add_option("-t", "--threshold", dest="threshold", default=0.5, type="float", help="threshold for alignment (default=0.5)")
optparser.add_option("-n", "--num_sentences", dest="num_sents", default=sys.maxint, type="int", help="Number of sentences to use for training and alignment")
(opts, _) = optparser.parse_args()
f_data = "%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.french)
e_data = "%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.english)
if opts.logfile:
logging.basicConfig(filename=opts.logfile, filemode='w', level=logging.INFO)
sys.stderr.write("Training using EM algorithm...")
bitext = [[sentence.strip().split() for sentence in pair] for pair in zip(open(f_data), open(e_data))[:opts.num_sents]]
f_count = defaultdict(int)
e_count = defaultdict(int)
fe_count = defaultdict(int)
qa_count = defaultdict(int) #Counts for alignments q(j|i,l,m)
q_count = defaultdict(int) #Counts for alignments q(i,l,m)
#Where j is the alignment number of the english sentence,
#i is the alignment number of the french sentence
#l is the length of the english sentence
#m is the length of the french sentence
t_k = defaultdict(int)
q_k = defaultdict(int)
iterations = 10
k = 0
#Initialize
sys.stderr.write("\n")
sys.stderr.write("Initializing...")
for(a,(b,c)) in enumerate(bitext):
for (i,f_i) in enumerate(b):
for (j,e_j) in enumerate(c):
t_k[(f_i,e_j)] = 1.0
q_k[(j,i,len(c),len(b))] = 1.0
if a%1000 == 0: sys.stderr.write(".")
sys.stderr.write("\n")
sys.stderr.write("Done initializing\n")
sys.stderr.write("Training " + str(iterations) + " iterations.\n")
while(k < iterations):
k += 1
sys.stderr.write("Iteration " + str(k) + "...\n")
e_count = defaultdict(int)
fe_count = defaultdict(int)
for (n,(f,e)) in enumerate(bitext):
for (i,f_i) in enumerate(f):
Z = 0
for (j,e_j) in enumerate(e):
Z += t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))]
for (j,e_j) in enumerate(e):
c = (t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))])/Z
fe_count[(f_i,e_j)] += c
e_count[e_j] += c
qa_count[(j,i,len(e),len(f))] += c
q_count[(i,len(e),len(f))] += c
for (f,e) in fe_count.keys():
t_k[(f,e)] = fe_count[(f,e)]/e_count[e]
for (j,i,l,m) in qa_count.keys():
q_k[(j,i,l,m)] = qa_count[(j,i,l,m)]/q_count[(i,l,m)]
sys.stderr.write("Training Complete...\n")
sys.stderr.write("Aligning...\n")
for (k,(f,e)) in enumerate(bitext):
for (i,f_i) in enumerate(f):
#print("Number of french: " + str(i))
bestp = 0
bestj = 0
for (j,e_j) in enumerate(e):
#print(j)
if t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))] > bestp:
bestp = t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))]
bestj = j
#print("Chosen J: " + str(bestj))
sys.stdout.write("%i-%i " %(i,bestj))
sys.stdout.write("\n")
if False: """
for (n, (f, e)) in enumerate(bitext):
for f_i in set(f):
f_count[f_i] += 1
for e_j in set(e):
fe_count[(f_i,e_j)] += 1
for e_j in set(e):
e_count[e_j] += 1
if n % 500 == 0:
sys.stderr.write(".")
dice = defaultdict(int)
for (k, (f_i, e_j)) in enumerate(fe_count.keys()):
dice[(f_i,e_j)] = 2.0 * fe_count[(f_i, e_j)] / (f_count[f_i] + e_count[e_j])
if k % 5000 == 0:
sys.stderr.write(".")
sys.stderr.write("\n")
for (f, e) in bitext:
for (i, f_i) in enumerate(f):
for (j, e_j) in enumerate(e):
if dice[(f_i,e_j)] >= opts.threshold:
sys.stdout.write("%i-%i " % (i,j))
sys.stdout.write("\n")
"""
| 2.421875
| 2
|
miraw.py
|
jadrian/mipy
| 0
|
12776738
|
<gh_stars>0
"""Functions for dealing with raw medical imaging datasets.
This module is particularly focused on working with diffusion-weighted images
and derived images, which are typically 4-D (3 for space, plus one dimension for
arbitrary sample vectors). Its default metadata format, "size_info", is a hacky
thing custom-built just for DWIs, and does not accommodate N-D volumes for N < 3
or N > 4. The readRaw() and saveRaw() functions, however, will work for any N.
"""
import numpy as np
import os.path
import mipy._miraw_helpers as _miraw_helpers
try:
from nibabel import nifti1
has_nifti = True
except ImportError:
import warnings
w = """You don't have nibabel installed, so reading and writing nifti files
won't be supported for you. nibabel exists for both Python 2 and 3, though,
so look into installing it."""
warnings.warn(w, RuntimeWarning)
has_nifti = False
def readRaw(f, shape, dtype=None, diskorder='F', memorder='C'):
"""Loads array data from a raw binary file on disk.
This is a wrapper around numpy.fromfile, and returns a numpy.ndarray that
owns its own memory. Its particular purpose is to work with differing
dimension orderings on disk and in memory. The default is to interpret
the file as "Fortran-ordered" (Matlab's default; column-major; first index
is fastest-changing) and to produce an ndarray that is "C-ordered" (numpy's
default; row-major; last index is fastest-changing).
This function does not support memory mapping (yet), so it's not
appropriate to use if your array is too big to fit comfortably in memory.
numpy.load() and the h5py package are alternatives, but they put
restrictions on the file format. numpy.memmap() may be the right option.
Arguments:
f: An open file object or a filename.
shape: A tuple of dimension extents. One dimension may be given
extent -1; if so, this dimension stretches to fit all the
voxel values.
dtype: A numpy data type (like numpy.float32). If None, dtype is
inferred from the filename.
diskorder: 'F' or 'C', default 'F'.
memorder: 'F' or 'C', default 'C'.
Throws a ValueError if shape does not match the number of voxels stored on
disk, or if the product of the non-negative values in shape does not divide
the number of voxels evenly.
Returns a numpy.ndarray with the given shape and order=memorder.
"""
# Read the data into a flat array.
if dtype is None:
dtype = _miraw_helpers.inferDtypeFromFilename(f)
raw = np.fromfile(f, dtype=dtype)
# Resolve the shape argument.
shape = np.array(shape)
num_voxels = np.prod(shape)
if num_voxels < 0:
num_voxels = -num_voxels
missing_dim = int(raw.shape[0] / num_voxels)
if num_voxels * missing_dim != raw.shape[0]:
err = (('File has %i voxels; you gave me shape = %s = %i voxels,\n' +
'which does not divide evenly.') %
(raw.shape[0], repr(shape.tolist()), num_voxels))
raise ValueError(err)
# Replace the missing dimension.
shape = np.where(shape < 0, missing_dim, shape)
# Reshape the flat array, interpreting according to the disk order.
try:
X = np.ndarray(shape=shape, dtype=dtype, buffer=raw.data, order=diskorder)
except TypeError:
num_voxels = np.prod(shape)
if num_voxels != raw.shape[0]:
err = ('File has %i voxels; you gave me shape = %s = %i voxels.' %
(raw.shape[0], repr(shape.tolist()), num_voxels))
raise ValueError(err)
else:
raise
# Now convert to the memory order and return.
return _miraw_helpers.ndcopyWithOrder(X, memorder)
def saveRaw(f, X, order='F', dtype_as_ext=False):
"""Saves array data to a raw binary file on disk.
This is a wrapper around numpy.ndarray.tofile. Its particular purpose is
to enable the creation of "Fortran-ordered" raw files, (aka column-major;
Matlab's default), in which the fastest-changing index in the source array,
with respect to the linear order in which data are stored on disk, is the
first index, rather than the last index ("C-ordered", numpy's default).
Arguments:
f: An open file object or a filename.
X: A numpy ndarray, with any shape and storage order.
order: 'F' or 'C' --- the order for storage on disk.
dtype_as_ext: If "True" and f is a string, appends the dtype as an
extensions for the filename. Raises TypeError if True and
f is not a string.
"""
if dtype_as_ext:
if isinstance(f, str):
f += '.' + str(X.dtype)
else:
raise TypeError("Can't append extension to an open file object.")
X.flatten(order=order).tofile(f)
def readRawWithSizeInfo(f, sizefile=None, dtype=None, cropped=None, dimorder=None, diskorder='F', memorder='C'):
"""Loads a raw image file from disk, using a size_info metadata file.
Arguments:
f: A filename or open file object for the raw data file.
sizefile: A filename for the size_info metadata file.
If None, looks for a file called "size_info" in f's directory.
dtype: The numpy dtype of the raw data, or None.
cropped: A boolean: whether f is a cropped or full volume (as described in
the sizefile), or None (in which case this will be inferred).
dimorder: Four-character string that is a permutation of "XYZI",
indicating the dimension order of the image being read from
disk.
The purpose of this argument is to map from the dimension
extents stored in the size_info file, which are always stored
in XYZI order, to the actual shape of the ndarray we create.
Namely, if we create a 4-tuple "dimmap" by converting each
character X->0, Y->1, Z->2, I->3, then
vol.shape[i] = sz[dimmap[i]]
for i from 0 to 3, where vol is the returned volume and sz is
the volume size, in (X,Y,Z,I) order, read from size_info.
The default value, None, is equivalent to "XYZI".
This can be a confusing argument, so please note:
- dimorder is overridden if the size_info file specifies a
"dimension_order" value.
- dimorder only indicates a rearrangement of dimension
extents from the default order (as read in the size_info
file) to the order that dictates the shape attribute of the
returned array. Though it interacts in complicated ways
with the diskorder and memorder arguments, ultimately it is
not equivalent to calling transpose(dimmap) on the returned
array.
- dimorder does not change the order of the dimension extents
as stored in the returned dictionary "cfg".
diskorder: The array traversal order of the file.
memorder: The desired traversal order of the output array.
(See readRaw for more explanation of the last two arguments.)
This function attempts, usually successfully, to infer the values of
arguments left None.
Returns (vol, cfg), where vol is a numpy ndarray and cfg is the dict of
settings in sizefile. In addition, this function defines an additional
key, cfg['cropped'], with Boolean value.
"""
# Read the image into a 1-D array.
raw = readRaw(f, (-1,1), dtype=dtype, diskorder=diskorder, memorder=memorder)
# Read the size file.
imgname = _miraw_helpers.getFilename(f)
if sizefile is None:
try:
sizefile = os.path.join(os.path.dirname(imgname), 'size_info')
except:
raise TypeError("Can't infer sizefile from filename '%s'." % imgname)
cfg = readConfigFile(sizefile)
sz = cfg['full_image_size_(voxels)']
sz_c = cfg['cropped_image_size_(voxels)']
try:
n_imgs = cfg['num_dwis']
except KeyError:
n_imgs = 1
# Try to figure out whether the image is cropped.
cropped, threeD = _miraw_helpers.detectShapeAndCropping(raw.size,
np.prod(sz), np.prod(sz_c), n_imgs, cropped)
if cropped:
sz = sz_c
sz = sz + [n_imgs]
cfg['cropped'] = cropped
# Finally set the size and return.
try:
dimorder = cfg['dimension_order']
except KeyError:
if dimorder is None:
dimorder = _miraw_helpers.DIM_DEFAULT_ORDER
if not _miraw_helpers.isValidDimorder(dimorder):
raise ValueError('"%s" is not a valid dimorder argument.' % repr(dimorder))
if threeD:
sz = sz[0:3]
else:
sz = np.take(sz, _miraw_helpers.dimorderToDimmap(dimorder), axis=0)
return (_miraw_helpers.ndcopyWithOrder(raw.reshape(sz, order=diskorder),
memorder),
cfg)
def saveSizeInfo(f, img, vox_sz=(1,1,1), dimorder=None, size_cfg={}, infer_name=False):
"""Writes a size_info metadata file to disk for a given array.
A size_info file stores image (array) dimensions for raw images, as well as
voxel size and cropping information (indicating that the array is cropped
from a larger volume). Note that size_info is designed for 3-D or 4-D
arrays only, and stores the extents of the first three dimensions
separately from that of the fourth.
Arguments:
f: An open file handle or a filename for the destination file.
img: A numpy.ndarray.
vox_sz: Optional array-like object with 2 or 3 entries.
dimorder: Four-character string that is a permutation of "XYZI",
indicating the dimension order of the image "img".
The purpose of this argument is to map from the dimension
extents represented in img.shape to the extents stored in the
size_info file, which are always stored in a canonical "XYZI"
order. Namely, if we create a 4-tuple "dimmap" by converting
each character X->0, Y->1, Z->2, I->3, then
sz[dimmap[i]] = img.shape[i]
for i from 0 to 3, where sz is the volume size, in (X,Y,Z,I)
order, that will be stored in the size_info.
The default value, None, is equivalent to "XYZI".
size_cfg: Optional dictionary of other config key-value pairs. The data
stored in this dictionary override all values computed by or
passed into this function, even if they're inconsistent with
the size of the image. Be careful! This includes the
"dimension_order" value, which overrides the dimorder argument
above.
infer_name: Optional boolean. If True, and f is a filename, then the
file actually written will be in the same directory as f
(or in f if f is a path ending with a slash), and named
"size_info". If f is not a string, this option has no effect.
"""
# Deal with filenames and open a file for writing, if necessary.
if isinstance(f, str):
if infer_name:
f = os.path.join(os.path.dirname(f), 'size_info')
fid = open(f, 'w')
close_after = True
else:
fid = f
close_after = False
# Set up dimension mapping.
shape = list(img.shape) + [1]*4
try:
dimorder = size_cfg['dimension_order']
except KeyError:
if dimorder is None:
dimorder = _miraw_helpers.DIM_DEFAULT_ORDER
if not _miraw_helpers.isValidDimorder(dimorder):
raise ValueError('"%s" is not a valid dimorder argument.' % repr(dimorder))
shape = np.take(shape, _miraw_helpers.dimorderToReverseDimmap(dimorder), axis=0).tolist()
# Extract metadata from the arguments.
base_keys = ['voxel_size_(mm)', 'full_image_size_(voxels)',
'low_end_crop_(voxels)', 'cropped_image_size_(voxels)',
'num_dwis', 'dimension_order']
auto_cfg = {
base_keys[0] : vox_sz,
base_keys[1] : shape[:3],
base_keys[2] : [0, 0, 0],
base_keys[3] : shape[:3],
base_keys[4] : shape[3],
base_keys[5] : dimorder
}
# Overwrite metadata with what the user gave us.
for (k,v) in size_cfg.items():
auto_cfg[k] = v
# Now write the key-value pairs and we're done!
def spaceSepStr(a):
if isinstance(a, list) or isinstance(a, tuple):
return ' '.join([str(x) for x in a])
return str(a)
for k in base_keys:
fid.write(k + ': ' + spaceSepStr(auto_cfg[k]) + '\n')
del(auto_cfg[k])
for (k, v) in auto_cfg.items():
fid.write(k + ': ' + spaceSepStr(v) + '\n')
if close_after:
fid.close()
def applyDimOrder(img, dimorder):
"""Permutes the data dimensions of img as specified by dimorder.
Arguments:
img: numpy ndarray with four dimensions.
dimorder: Four-character string that is a permutation of "XYZI",
indicating the desired dimension order of the output.
Preconditions:
- The current dimension order of "img" is "XYZI".
Returns new_img:
new_img: numpy ndarray with four dimensions. The values in new_img will
be rearranged so that the dimension order is as specified by
dimorder.
Note that if you have any metadata about the image, like voxel sizes, the
order of values in your metadata will no longer match the order of the
dimensions in the output image. You'll need to rearrange those manually with
applyDimOrderToList().
"""
return img.transpose(_miraw_helpers.dimorderToDimmap(dimorder))
def applyDimOrderToList(L, dimorder):
"""Permutes the values in L as specified by dimorder.
Arguments:
L: A list of four values, corresponding (respectively) to the
X, Y, Z, and I dimensions of some dataset.
If you've only got values for X, Y, and Z, pad before calling.
dimorder: Four-character string that is a permutation of "XYZI",
indicating the desired dimension order of the output.
Returns a permuted version of L.
"""
return [L[i] for i in _miraw_helpers.dimorderToDimmap(dimorder)]
def undoDimOrder(img, dimorder):
"""Permutes the data dimensions of img, which currently has the given
dimension order, to match the default "XYZI" dimension order.
Arguments:
img: numpy ndarray with four dimensions.
dimorder: Four-character string that is a permutation of "XYZI",
indicating the current dimension order of img.
Returns new_img:
new_img: numpy ndarray with four dimensions. The values in new_img will
be rearranged so that the dimension order is XYZI.
"""
return img.transpose(_miraw_helpers.dimorderToReverseDimmap(dimorder))
def undoDimOrderOnList(L, dimorder):
"""Permutes the values in L to restore them to a default dimorder.
Arguments:
L: A list of four values, corresponding (in order) to the dimensions
of some dataset.
dimorder: Four-character string that is a permutation of "XYZI",
indicating the current dimension order of the dataset.
Returns a permuted version of L, with values corresponding (respectively) to
the X, Y, Z, and I.
"""
return [L[i] for i in _miraw_helpers.dimorderToReverseDimmap(dimorder)]
def parseBvecs(f):
"""Parses a b vector file's contents.
Arguments:
f: An open file handle, a filename string, or a string of the contents of
a b-vector file.
Prerequisites: this function supports two different plaintext formats
for describing a list of three-element vectors (optionally, with an
additional scalar associated with each vector):
A: three (or four) lines of N space- or comma-delimited values each
B: N lines of three or four space- or comma-delimited values each
Comments (with #, //, or %) are okay, as are blank lines.
Returns a 2-tuple, (vecs, b):
vecs: a numpy Nx3 array, each row being a 3-vector (of length 1 or 0)
b: an array of length N, each element of which is a float. If no
b-values are found in the file, then each element is None.
"""
string = ''
if hasattr(f, 'read'):
string = f.read()
elif isinstance(f, str):
if f.find('\n') < 0:
# f is a filename
fid = open(f, 'r')
string = fid.read()
fid.close()
else:
string = f
else:
raise TypeError('f argument must be either a string or a file object.')
lines = _miraw_helpers.cleanlines(string.splitlines())
vecs = np.array([[float(y) for y in x.replace(',',' ').split()] for x in lines])
if vecs.shape[0] <= 4:
# Format A: transpose to make it match format B.
vecs = vecs.T
if vecs.shape[1] < 3 or vecs.shape[1] > 4:
raise IndexError('Vectors must each have three components.')
if vecs.shape[1] == 4:
# Separate out the b-values.
b = vecs[:,3]
vecs = lines[:,0:3]
else:
b = [None] * vecs.shape[0]
# Normalize the vectors: sum the squares along each row, then divide by
# nonzero ones.
norms = np.array(np.sqrt(np.sum(vecs**2, axis=1)))
norms = np.where(norms < 1e-6, 1, norms)
vecs = vecs / norms[:, np.newaxis]
return (vecs, b)
if has_nifti:
def readNifti(infile, dtype=None, memorder=None):
"""Reads a NIfTI file into a numpy array.
Arguments:
infile: Filename of a NIfTI file, or a list of strings. The list
indicates a sequence of files to be concatenated together,
in the order given, in the I dimension (the 4th dimension).
dtype: A numpy data type to cast to. If None, the data type remains
whatever the NIfTI header specifies.
memorder: 'F' or 'C' --- the order for storage in memory. If None, the
order remains whatever the NIfTI library read it as (most
likely 'F', the standard layout order for NIfTI).
Returns (vol, cfg, header):
vol: numpy ndarray.
cfg: dict storing information that you would find in a size_info file.
header: the NIfTI header of infile (or of the first-listed file, with the
fourth dimension size changed).
Note that if the NIfTI header encodes a dimension flip or exchange, this
function DOES NOT apply it to the image before returning. You'll want to
check that with niftiGetXform() and perhaps fix it with applyNiftiXform().
If you ultimately want a raw volume with a non-standard dimension order,
you should apply that AFTER you apply the NIfTI transform, since
applyNiftiXform() assumes that the dimension order is precisely as
represented in the original raw data from the NIfTI file.
Here's the recommended procedure: read, apply the transform, and then
remap to the desired dimension order:
(vol, cfg, header) = readNifti(fname)
(vol, xform, vox_sz) = applyNiftiXform(vol, niftiGetXform(header), cfg)
vol = applyDimOrder(vol, dimorder)
This procedure is what niftiToRaw() does.
"""
if isinstance(infile, str):
# Read this one file.
nii = nifti1.load(infile)
raw = nii.get_data()
header = nii.header
elif isinstance(infile, list):
# Read a list of files: first read in file 0...
nii = nifti1.load(infile[0])
raw = nii.get_data()
header = nii.header
raw.resize(raw.shape + (1,)*(4-raw.ndim))
# ... then concatenate on each other one.
for i in range(1, len(infile)):
nii = nifti1.load(infile[i])
newraw = nii.get_data()
newraw.resize(newraw.shape + (1,)*(4-newraw.ndim))
raw = np.concatenate((raw, newraw), axis=3)
header.set_data_shape(raw.shape)
else:
raise ValueError('"%s" is not a valid infile argument.' % repr(infile))
curr_dtype = raw.dtype
if np.isfortran(raw):
curr_memorder = "F"
else:
curr_memorder = "C"
if dtype is None:
dtype = curr_dtype
if memorder is None:
memorder = curr_memorder
# Create the size_info config dict.
cfg = {}
cfg['voxel_size_(mm)'] = header['pixdim'][1:4].tolist()
cfg['full_image_size_(voxels)'] = raw.shape[:3]
cfg['low_end_crop_(voxels)'] = [0,0,0]
cfg['cropped_image_size_(voxels)'] = cfg['full_image_size_(voxels)']
if raw.ndim > 3:
cfg['num_dwis'] = raw.shape[3]
else:
cfg['num_dwis'] = 1
cfg['dimension_order'] = _miraw_helpers.DIM_DEFAULT_ORDER
return (raw.astype(dtype, order=memorder), cfg, header)
def applyNiftiXform(img, xform, cfg=None):
"""Flips and exchanges dimensions in the given raw image according to a
NIfTI-style 4x4 transform matrix. The resulting image should conform to
the NIfTI standard interpretation: the fastest-changing index is X, going
left to right; the next-fastest is Y, going posterior to anterior, and the
slowest is Z, going inferior to superior.
Arguments:
img: numpy.ndarray with at least three dimensions.
xform: 4x4 numpy array. xform[:3][:3] must contain exactly three nonzero
entries, one on each row.
cfg: Optional dictionary of image metadata.
Returns (new_img, new_xform, vox_sz, cfg):
new_img: Transformed image.
new_xform: Modified transform. new_xform[:3,:3] is diagonal with positive
entries.
vox_sz: Length-3 numpy vector of positive voxel sizes.
If cfg is provided, this function overwrites the values for the following
keys:
'voxel_size_(mm)' -> Voxel size with new dimension order.
'full_image_size_(voxels)' -> Image size with new dimension order.
'low_end_crop_(voxels)' -> [0,0,0]
'cropped_image_size_(voxels)' -> (Same as full image size)
'num_dwis' -> Size of 4th dimension (shouldn't change).
'dimension_order' -> "XYZI"
In the case of a transform that has an oblique rotation or affine component,
this function raises a ValueError.
"""
# According to the NIfTI spec*, the xform applies by left-multiplication to
# the column vector [i,j,k,1]' to specify how spatial coordinates [x,y,z]'
# may be computed from the raw image indices:
# [ s_x[0], s_x[1], s_x[2], s_x[3] ] [ i ] [ x ]
# [ s_y[0], s_y[1], s_y[2], s_y[3] ] * [ j ] = [ y ]
# [ s_z[0], s_z[1], s_z[2], s_z[3] ] [ k ] [ z ]
# [ 1 ]
# For example, this matrix
# [ 0 -3 0 0 ]
# [ 0 0 5 0 ]
# [ -4 0 0 0 ]
# means that
# j encodes the x direction, with an x-flip and a voxel size of 3
# k encodes the y direction, with no y-flip and a voxel size of 5
# i encodes the z direction, with a z-flip and a voxel size of 4
# In other words, the dimension order is ZXY.
#
# * http://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/srow.html
if xform.shape != (4,4):
raise ValueError('xform must be 4x4.')
# --- First, exchange dimensions as specified. ---
# Turn all nonzero entries in the upper part to ones.
perm = np.array(np.abs(xform[:3,:3]) > 1e-6, dtype=int)
dim_map = np.nonzero(perm)[1]
# dim_map[d] says which column of perm is nonzero in row d. In context,
# dim_map[d] indicates which volume index encodes direction d. For example,
# dim_map = [1,2,0] means that j (index=1) encodes x (d=0), k encodes y, and
# i encodes z.
if dim_map.size != 3:
# There should be exactly three nonzero entries in the upper-left 3x3
# portion of the transform matrix.
# TODO Add support for affine transforms.
raise ValueError('Bad transform --- too many nonzero entries.\n%s', repr(xform))
# Tack on extra un-permuted entries for any non-spatial dimensions.
dim_map = dim_map.tolist() + list(range(3,img.ndim))
# N-D transpose so that direction d is encoded by index d, for all d.
new_img = img.transpose(dim_map)
# Permute the xform matrix to match.
perm = np.pad(perm, ((0,1),(0,1)), mode='constant')
perm[3,3] = 1
new_xform = xform.dot(perm.transpose())
# The result here should be:
# - new_img has had its dimensions rearranged appropriately.
# - new_xform[:3,:3] is a diagonal matrix of (possibly negative) voxel sizes.
off_diag = np.nonzero((np.ones((3,3))-np.eye(3))*new_xform[:3,:3])[0].size
if off_diag != 0:
raise ValueError('The transformation failed. This should never happen!')
# --- Now flip axes as specified. ---
vox_sz = np.diag(new_xform[:3,:3]).tolist()
for d in range(3):
if vox_sz[d] < 0.0:
# To reverse a specified dimension, we need to swap it with dimension
# zero, flip it, and then swap back. Further explanation:
# http://stackoverflow.com/questions/13240117/reverse-an-arbitrary-dimension-in-an-ndarray
new_img = new_img.swapaxes(d, 0)
new_img = new_img[::-1, ...]
vox_sz[d] *= -1.0
new_xform[d,d] *= -1.0
new_xform[d,3] = new_xform[d,3] - (new_img.shape[0] * vox_sz[d])
new_img = new_img.swapaxes(0, d)
if cfg is not None:
cfg['voxel_size_(mm)'] = vox_sz
cfg['full_image_size_(voxels)'] = new_img.shape[:3]
cfg['low_end_crop_(voxels)'] = [0,0,0]
cfg['cropped_image_size_(voxels)'] = new_img.shape[:3]
if new_img.ndim > 3:
cfg['num_dwis'] = new_img.shape[3]
else:
cfg['num_dwis'] = 1
cfg['dimension_order'] = _miraw_helpers.DIM_DEFAULT_ORDER
return (new_img, new_xform, vox_sz)
def niftiGetXform(hdr):
"""Extracts a single 4x4 transform matrix from a NIfTI header object.
"""
(qform, qcode) = hdr.get_qform(True)
(sform, scode) = hdr.get_sform(True)
if qcode + scode == 0:
# Neither gave us an answer.
return np.eye(4)
elif scode == 1:
# We prefer the sform, since it can represent an affine matrix, so we
# return it even if qform is also defined.
return sform
else:
return qform
def niftiToRaw(infile, outfile=None, sizefile=None, dimorder=None, diskorder='F', dtype=None, dtype_as_ext=False):
"""Converts a NIfTI file (or set of files) to a raw file.
Arguments:
infile: Filename of a NIfTI file, or a list of strings. The list
indicates a sequence of files to be concatenated together,
in the order given, along dimension 4 (where 1 is the
fastest-changing).
outfile: Filename of a raw file to generate. If None, the filename
will be copied from infile, but with an extension indicating
the dtype. See also dtype_as_ext.
sizefile: Filename of a size_info metadata file. If None, it will go
in the same directory as outfile. If empty string, no
size_info file will be generated.
dimorder: Four-character string that is a permutation of "XYZI",
indicating the desired dimension order of the output image.
The default value, None, is equivalent to "XYZI".
diskorder: 'F' or 'C' --- the order for storage on disk.
dtype: A numpy data type to cast to. If None, the data type
either remains whatever the NIfTI header specifies, or is
cast to the type specified by the extension on outfile.
dtype_as_ext: If True, and if outfile is not None, then this appends the
dtype to the end of outfile.
"""
if dimorder is None:
dimorder = _miraw_helpers.DIM_DEFAULT_ORDER
if not _miraw_helpers.isValidDimorder(dimorder):
raise ValueError('"%s" is not a valid dimorder argument.' % repr(dimorder))
# Figure out the desired dtype.
fname_dtype = None
try:
fname_dtype = _miraw_helpers.inferDtypeFromFilename(outfile)
except:
pass
if dtype is not None and fname_dtype is not None:
if fname_dtype != np.dtype(dtype):
raise ValueError("Arguments specify contradictory dtypes:\n outfile: {}\n dtype: {}".format(outfile, dtype))
elif dtype is None:
dtype = fname_dtype
# Now either dtype is None, because both the outfile and dtype arguments
# failed to set it, or it and the outfile agree. If it's None, then we'll
# just keep the dtype from the NIfTI file.
# Read the file and set the dtype once and for all.
(img, cfg, header) = readNifti(infile, dtype)
dtype = img.dtype
# Apply any dimension flips or permutations according to the header.
(img, xform, vox_sz) = applyNiftiXform(img, niftiGetXform(header), cfg)
# And finally put the data in the requested storage order.
img = applyDimOrder(img, dimorder)
# Generate new names for the output files as necessary.
if outfile is None:
if not isinstance(infile, str):
raise ValueError("No outfile specified, but infile %s is not a string!" % repr(infile))
(base, ext) = os.path.splitext(infile)
if ext == ".gz":
(base, ext) = os.path.splitext(base)
outfile = base + "." + str(dtype)
elif dtype_as_ext:
outfile += "." + str(dtype)
if sizefile is None:
sizefile = os.path.join(os.path.dirname(outfile), "size_info")
# Write the size_info file.
if len(sizefile) > 0:
saveSizeInfo(sizefile, img, size_cfg=cfg, infer_name=False)
# And finally write the raw file.
saveRaw(outfile, img, diskorder, dtype_as_ext)
def rawToNifti(infile, sizefile=None, outfile=None, dimorder=None, diskorder='F', dtype=None, split4=False):
"""Converts a raw file to a NIfTI file.
Arguments:
infile: Filename of a raw file.
sizefile: Filename of a size_info config file. If None, attempts to find
this file in the same directory as infile.
outfile: Filename (including .nii) of the NIfTI file to generate.
If None, it will be generated from infile.
dimorder: Four-character string that is a permutation of "XYZI",
indicating the dimension order of the image in "infile".
The purpose of this argument is to rearrange the order of the
dimensions in the infile to match the NIfTI canonical order of
(X, Y, Z, I), where I is the dimension along which multiple
acquisitions are concatenated.
The default value, None, is equivalent to "XYZI".
Note that this argument will be overridden if the size_info
file contains a "dimension_order" value.
diskorder: A string, 'F' or 'C', representing the order in which the data
values are stored in the raw file.
dtype: The numpy dtype for the infile. If None, it is inferred from
infile's extension.
split4: If True, output numbered 3-D images from 4-D input.
"""
(raw, cfg) = readRawWithSizeInfo(infile, sizefile=sizefile, dtype=dtype,
dimorder=dimorder, diskorder=diskorder,
memorder='C')
vox_sz = cfg['voxel_size_(mm)']
# Rearrange dimensions.
try:
dimorder = cfg['dimension_order']
except KeyError:
if dimorder is None:
dimorder = _miraw_helpers.DIM_DEFAULT_ORDER
if not _miraw_helpers.isValidDimorder(dimorder):
raise ValueError('"%s" is not a valid dimorder argument.'%repr(dimorder))
raw_transp = raw.transpose(_miraw_helpers.dimorderToReverseDimmap(dimorder))
if split4 and len(raw_transp.shape) == 4:
raw_stack = [raw_transp[:,:,:,i] for i in range(raw_transp.shape[3])]
else:
raw_stack = [raw_transp]
i = 0
for img in raw_stack:
nii = nifti1.Nifti1Pair(img, np.diag(vox_sz + [0.0]))
nii.get_header().set_xyzt_units('mm')
outfname = outfile
if outfname is None:
outfname = os.path.splitext(infile)[0] + '.nii'
if split4:
outfname = os.path.splitext(outfname)[0] + ('_%03i.nii' % i)
i += 1
nifti1.save(nii, outfname)
def parseConfig(s):
"""Parses a simple config file.
The expected format encodes a simple key-value store: keys are strings,
one per line, and values are arrays. Keys may not have colons in them;
everything before the first colon on each line is taken to be the key,
and everything after is considered a space-separated list of value-array
entries. Leading and trailing whitespace are stripped on each key and
value entry.
No special handling of comments is implemented, but non-conforming lines
(those with no colon) will be silently ignored.
Arguments:
s: A string containing the full contents of a config file.
Returns a dictionary mapping strings to lists. The lists, which may be
singletons, contain ints, floats, and/or strings.
"""
def stringToNumberMaybe(s):
if s.lower() in ['true', 'yes']:
return True
if s.lower() in ['false', 'no']:
return False
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
lines = s.splitlines()
d = {}
for line in lines:
kv = [x.strip() for x in line.split(':',1)]
try:
val_list = [stringToNumberMaybe(x) for x in kv[1].split()]
if len(val_list) != 1:
d[kv[0]] = val_list
else:
d[kv[0]] = val_list[0]
except IndexError:
pass
return d
def readConfigFile(filename):
"""Reads a config file and parse it with parseConfig()."""
fid = open(filename, 'r')
s = fid.read()
fid.close()
return parseConfig(s)
| 2.171875
| 2
|
CursoEmVideo-Python3-Mundo1/desafio014.py
|
martinsnathalia/Python
| 0
|
12776739
|
<gh_stars>0
# Escreva um programa que converta uma temperatura digitando em graus Celsius e converta para graus Fahrenheit.
t = float(input('Digite a temperatura em °C: '))
print('A temperatura {}ºC equivale a {}°F'.format(t,((( 9 / 5 ) * t) + 32)))
| 4.1875
| 4
|
starkplot/utils/subplots/corner.py
|
nstarman/starkplot
| 2
|
12776740
|
<reponame>nstarman/starkplot<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : functions for shaped subplots grids
# PROJECT : starkplot
#
# ----------------------------------------------------------------------------
### Docstring and Metadata
r"""functions for corner plots
adapted from code provided by <NAME>
TODO
make this work more closely with the corner package
make a function which returns the axis grid and necessary info to make the plot
make this work with bovy_plot scatterplot
"""
__author__ = "<NAME>"
##############################################################################
### IMPORTS
## General
import numpy as np
from scipy import stats
from itertools import product as iter_product
from matplotlib import pyplot
## Project-Specific
from ...decorators import mpl_decorator
##############################################################################
def _type_of_plot(orientation, n_var, i, j):
"""internal helper function for determining plot type in a corner plot
Parameters
----------
orientation : str
the orientation
options: 'lower left', 'lower right', 'upper left', 'upper right'
i, j : int
the row, column index
Returns
-------
plot type : str
'remove' : do not show this plot
'same' : the axes are the same
'compare' : compare the two different axes
"""
if orientation == "lower left":
if j > i:
return i, j, "remove"
elif j == i:
return i, j, "same"
else: # j < i
return i, j, "compare"
elif orientation == "lower right":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'remove'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
elif orientation == "upper left":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'compare'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'remove'
elif orientation == "upper right":
raise ValueError("not yet supported orientation")
# if j < i:
# return i, j, 'remove'
# elif j == i:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
else:
raise ValueError("not supported orientation")
# /def
# Staircase plotting function
def corner_plot(
data,
data_labels=None,
orientation="lower left",
draw_contours=False,
fig=None,
axs=None,
savefig=False,
**kw
):
"""corner_plot
Take in N variables in M samples and plot their correlations.
Parameters
----------
data : (mxn) array
The input data. The first axis should be the sample
number and the second axis should be the variable
data_labels : (length n array)
the variable labels
orientation : str
the orientation about which this is `centered'
options: 'lower left', 'lower right', 'upper left', 'upper right'
fig : matplotlib Figure, optional
The input figure to plot on.
If None then make one
axs : matplotlib axes ndarray, optional
The input axis to plot on.
If None then make one
**kw : passed to correlation plots
Returns
-------
fig : Figure
matplotlib figure
axs : Axes array
array of matplotlib axes
"""
# Figure out the number of variables
n_var = len(data[0, :])
if data_labels is None:
data_labels = ["q " + i + 1 for i in range(n_var)]
# Check if the figure was provided
if fig is None:
fig = pyplot.figure(figsize=(int(n_var + 3), int(n_var + 3)))
# /if
if axs is None:
axs = fig.subplots(nrows=n_var, ncols=n_var)
# /if
# loop over the number of variables
# i = index along columns (down)
# j = index along rows (across)
for i, j in iter_product(range(n_var), range(n_var)):
i, j, plot_type = _type_of_plot(orientation, n_var, i, j)
# Maxima and minima
xmin = np.min(data[:, j])
xmax = np.max(data[:, j])
ymin = np.min(data[:, i])
ymax = np.max(data[:, i])
# If this is an upper-right plot it is a duplicate, remove it
if plot_type == "remove":
axs[i, j].set_axis_off()
continue
# If the two indices are equal just make a histogram of the data
elif plot_type == "same":
# Make and plot the kernel
kernel = stats.gaussian_kde(data[:, i])
kernel_grid = np.linspace(
np.min(data[:, i]), np.max(data[:, i]), 1000
)
kernel_evaluate = kernel.evaluate(kernel_grid)
axs[i, j].plot(kernel_grid, kernel_evaluate, color="Black")
# Decorate
axs[i, j].set_xlim(np.min(data[:, i]), np.max(data[:, i]))
axs[i, j].tick_params(labelleft=False, labelright=True)
axs[i, j].set_ylabel("KDE")
axs[i, j].yaxis.set_label_position("right")
# If the two indices are not equal make a scatter plot
elif plot_type == "compare":
if draw_contours:
# Make the 2D gaussian KDE
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([data[:, j], data[:, i]])
kernel = stats.gaussian_kde(values)
kernel_evaluate = np.reshape(kernel(positions).T, xx.shape)
# Make contours out of the KDE
cfset = axs[i, j].contourf(
xx, yy, kernel_evaluate, cmap="Blues"
)
cset = axs[i, j].contour(
xx, yy, kernel_evaluate, colors="Black"
)
# Decorate
axs[i, j].set_xlim(xmin, xmax)
axs[i, j].set_ylim(ymin, ymax)
else: # no contours
axs[i, j].scatter(data[:, j], data[:, i], **kw)
# /if
# /if
# Make X axis
if i == n_var - 1:
axs[i, j].set_xlabel(data_labels[j])
else:
axs[i, j].tick_params(labelbottom=False)
# /if
# Make Y axis
if (j == 0) and (i != 0):
axs[i, j].set_ylabel(data_labels[i])
else:
axs[i, j].tick_params(labelleft=False)
# /if
# /for
# TODO replace by proper starkplot functions
if isinstance(savefig, str):
fig.savefig(savefig)
return fig, axs
# /def
# --------------------------------------------------------------------------
def staircase_plot(
data,
data_labels=None,
draw_contours=False,
fig=None,
axs=None,
savefig=False,
**kw
):
"""staircase_plot
Take in N variables in M samples and plot their correlations.
Parameters
----------
data : (mxn) array
The input data. The first axis should be the sample
number and the second axis should be the variable
data_labels : (length n array)
the variable labels
fig : matplotlib Figure, optional
The input figure to plot on.
If None then make one
axs : matplotlib axes ndarray, optional
The input axis to plot on.
If None then make one
Returns
-------
fig : Figure
matplotlib figure
axs : Axes array
array of matplotlib axes
"""
return corner_plot(
data,
data_labels=data_labels,
draw_contours=draw_contours,
fig=fig,
axs=axs,
savefig=savefig,
orientation="lower left",
**kw
)
##############################################################################
# End
| 2.328125
| 2
|
dbaas/drivers/factory.py
|
jaeko44/python_dbaas
| 0
|
12776741
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
import re
__all__ = ['DriverFactory']
class DriverFactory(object):
@classmethod
def is_driver_available(cls, name):
try:
cls.get_driver_class(name)
return True
except NotImplementedError:
return False
@classmethod
def get_driver_class(cls, driver_name):
driver_name = driver_name.lower()
# TODO: import Engines dynamically
if re.match(r'^mongo.*', driver_name):
from .mongodb import MongoDB
return MongoDB
elif re.match(r'^mysql.*', driver_name):
from .mysqldb import MySQL
return MySQL
elif re.match(r'^redis.*', driver_name):
from .redis import Redis
return Redis
elif re.match(r'^fake.*', driver_name):
from .fake import FakeDriver
return FakeDriver
raise NotImplementedError()
@classmethod
def factory(cls, databaseinfra):
if not (databaseinfra and databaseinfra.engine and databaseinfra.engine.engine_type):
raise TypeError(_("DatabaseInfra is not defined"))
driver_name = databaseinfra.engine.engine_type.name
driver_class = cls.get_driver_class(driver_name)
return driver_class(databaseinfra=databaseinfra)
| 2.234375
| 2
|
packages/pyright-internal/src/tests/samples/assignment7.py
|
lipovsek/pytea
| 0
|
12776742
|
# This sample tests a particularly difficult set of dependent
# assignments that involve tuple packing and unpacking.
# pyright: strict
v1 = ""
v3 = ""
v2, _ = v1, v3
v4 = v2
for _ in range(1):
v1 = v4
v2, v3 = v1, ""
| 2.40625
| 2
|
tests/test_mysql_connection_pool.py
|
maypimentel/mysql_connection_pool
| 0
|
12776743
|
<gh_stars>0
import pytest
from mysql_connection_pool import MysqlPool
from mysql.connector import MySQLConnection
from mysql.connector.errors import PoolError
class TestMysqlConnectionPool:
def setup_method(self, method):
self.pool = MysqlPool(pool_size=2, pool_max_size=2)
def test_cnx_type(self):
cnx = self.pool.get_connection()
assert isinstance(cnx, MySQLConnection)
def test_cnx_and_cursor(self):
cnx = self.pool.get_connection()
cursor = cnx.cursor()
cursor.execute('SELECT * FROM book LIMIT 1;')
cursor.fetchall()
assert cursor.rowcount == 1
def test_pool_empty(self):
cnx1 = self.pool.get_connection()
cnx2 = self.pool.get_connection()
with pytest.raises(PoolError, match='Pool exhausted'):
cnx3 = self.pool.get_connection()
| 2.421875
| 2
|
python_temel_project.py
|
kazimanilaydin/python_temel_project
| 0
|
12776744
|
"""
1- Bir listeyi düzleştiren (flatten) fonksiyon yazın. Elemanları birden çok katmanlı listelerden ([[3],2] gibi) oluşabileceği gibi, non-scalar verilerden de oluşabilir. Örnek olarak:
input: [[1,'a',['cat'],2],[[[3]],'dog'],4,5]
output: [1,'a','cat',2,3,'dog',4,5]
2- Verilen listenin içindeki elemanları tersine döndüren bir fonksiyon yazın. Eğer listenin içindeki elemanlar da liste içeriyorsa onların elemanlarını da tersine döndürün. Örnek olarak:
input: [[1, 2], [3, 4], [5, 6, 7]]
output: [[[7, 6, 5], [4, 3], [2, 1]]
"""
def flatten(lst):
if bool(lst) == False:
return lst
if isinstance(lst[0], list):
return flatten(*lst[:1]) + flatten(lst[1:])
return lst[:1] + flatten(lst[1:])
"""
TESTING
"""
print("TEST1\n", end="\n")
testArr1Initial = [[1,2,[3],4],[[[5]],6],7,8]
print("Test1 > Initial state of the array: ", testArr1Initial)
testArr1Result = flatten(testArr1Initial)
print("Test1 > Latest status of the array: ", testArr1Result)
print("\nTEST2\n", end="\n")
testArr2Initial = [[1,'a',['cat'],2],[[[3]],'dog'],4,5]
print("Test2 > Initial state of the array: ", testArr2Initial)
testArr2Result = flatten(testArr2Initial)
print("Test2 > Latest status of the array: ", testArr2Result)
def reverse(lst):
lst.reverse()
for item in lst:
if isinstance(item, list):
for sublist in item:
if isinstance(sublist, list):
sublist.reverse()
item.reverse()
return lst
"""
TESTING
"""
print("TEST1\n", end="\n")
testArr1Initial = [[1, 4, [4,2]], [3, 4], [5, 6, 7]]
print("Test1 > Initial state of the array: ", testArr1Initial)
testArr1Result = reverse(testArr1Initial)
print("Test1 > Latest status of the array: ", testArr1Result)
print("\nTEST2\n", end="\n")
testArr2Initial = [[1, 2], [3, 4], [5, 6, 7]]
print("Test2 > Initial state of the array: ", testArr2Initial)
testArr2Result = reverse(testArr2Initial)
print("Test2 > Latest status of the array: ", testArr2Result)
| 4.09375
| 4
|
Desafios/des001.py
|
vitormrts/ExerciciosPython
| 1
|
12776745
|
<reponame>vitormrts/ExerciciosPython
nome = input('\033[1;31mOlá! Qual é o seu nome? ')
n1 = int(input(f'\033[1;31mMuito prazer, {nome}!\n\033[34mPor favor, poderia digitar um número? '))
n2 = int(input('\033[34mCerto! Digite outro número: '))
print('\033[32mHm... Deixe-me pensar...\033[m')
s = n1+n2
print('.')
print('.')
print('.')
print('.')
print('.')
print('.')
print(f'\033[33mJá sei!!! A soma de \033[1;35m{n1}\033[m e \033[1;36m{n2}\033[m é \033[1;31m{s}\033[m! Correto? ')
| 3.296875
| 3
|
models/CC_LCM.py
|
Fang-Lansheng/C-3-Framework
| 0
|
12776746
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from config import cfg
from misc.utils import *
if cfg.DATASET == 'SHHB':
from datasets.SHHB.setting import cfg_data
elif cfg.DATASET == 'SHHA':
from datasets.SHHA.setting import cfg_data
elif cfg.DATASET == 'UCSD':
from datasets.UCSD.setting import cfg_data
elif cfg.DATASET == 'Mall':
from datasets.Mall.setting import cfg_data
elif cfg.DATASET == 'FDST':
from datasets.FDST.setting import cfg_data
class CrowdCounter(nn.Module):
def __init__(self, gpus, model_name, pretrained=True):
super(CrowdCounter, self).__init__()
self.model_name = model_name
net = None
if model_name == 'AMRNet':
from .SCC_Model.AMRNet import AMRNet as net
self.CCN = net(pretrained)
if len(gpus) > 1: # for multi gpu
self.CCN = torch.nn.DataParallel(self.CCN, device_ids=gpus).cuda(gpus[0])
else: # for one gpu
self.CCN = self.CCN.cuda()
self.loss_sum_fn = nn.L1Loss().cuda()
self.SumLoss = True
@property
def loss(self):
return self.loss_total
def loss_sum(self):
return self.loss_sum
def forward(self, img, gt_map):
count_map = self.CCN(img)
gt_map = torch.unsqueeze(gt_map, 1)
self.loss_total, self.loss_sum = self.build_loss(count_map, gt_map)
return count_map
def build_loss(self, count_map, gt_map):
loss_total, loss_sum_all = 0., 0.
if self.SumLoss:
gt_map_ = gt_map / cfg_data.LOG_PARA
kernel3, kernel4, kernel5 = 2, 4, 8
# filter3 = torch.ones(1, 1, kernel3, kernel3, requires_grad=False).cuda()
# filter4 = torch.ones(1, 1, kernel4, kernel4, requires_grad=False).cuda()
filter5 = torch.ones(1, 1, kernel5, kernel5, requires_grad=False).cuda()
# gt_lcm_3 = F.conv2d(gt_map_, filter3, stride=kernel3)
# gt_lcm_4 = F.conv2d(gt_map_, filter4, stride=kernel4)
gt_lcm_5 = F.conv2d(gt_map_, filter5, stride=kernel5)
loss_sum_all = self.loss_sum_fn(count_map, gt_lcm_5)
loss_total += loss_sum_all
return loss_total, loss_sum_all
def test_forward(self, img):
count_map = self.CCN(img)
return count_map
| 2.078125
| 2
|
Code/Regression.py
|
Kaamraan19064/Analysis-And-Prediction-of-Delhi-Climate-using-ML
| 0
|
12776747
|
import numpy as np
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import statsmodels.api as sm
import matplotlib.dates as mdates
import warnings
import itertools
import dateutil
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV as gsc
from sklearn.linear_model import Ridge,Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
def main ():
# Using svm
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
S1,S2=AQI_SVM(data)
S3,S4=AQI_Feature_importance_SVM(data)
S5,S6=AQI_Domain_Knowledge_SVM(data)
S7,S8=AQI_without_Domain_Knowledge_SVM(data)
##Linear Regression
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
LR1,LR2=AQI(data)
LR3,LR4=AQI_Feature_importance(data)
LR5,LR6==AQI_Domain_Knowledge(data)
LR7,LR8=AQI_without_Domain_Knowledge(data)
## Predincting for next day
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
normalize(data)
y=pd.read_csv('AQI_prediction_add.csv')
LR_F1,LR_F2=AQI_Future(data,y.AQI_predicted)
LR_F3,LR_F4=AQI_Feature_importance_Future(data,y.AQI_predicted)
LR_F5,LR_F6=AQI_Domain_Knowledge_Future(data,y.AQI_predicted)
LR_F7,LR_F8=AQI_without_Domain_Knowledge_Future(data,y.AQI_predicted)
##Predicting for Autumn Season
data=pd.read_csv('autumn_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_A1,LR_A2=AQI(data)
LR_A3,LR_A4=AQI_Feature_importance(data)
LR_A5,LR_A6=AQI_Domain_Knowledge(data)
LR_A7,LR_A8=AQI_without_Domain_Knowledge(data)
##Predicting for Summer Season
data=pd.read_csv('summer_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_S1,LR_S2=AQI(data)
LR_S3,LR_S4=AQI_Feature_importance(data)
LR_S5,LR_S6=AQI_Domain_Knowledge(data)
LR_S7,LR_S8=AQI_without_Domain_Knowledge(data)
##Predicting for Winter Season
data=pd.read_csv('winter_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_W1,LR_W2=AQI(data)
LR_W3,LR_W4=AQI_Feature_importance(data)
LR_W5,LR_W6=AQI_Domain_Knowledge(data)
LR_W7,LR_W8=AQI_without_Domain_Knowledge(data)
##Using Ridge
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
## Using all features
R1,R2=AQI_Ridge(data,h)
R3,R4=AQI_Feature_importance_Ridge(data,h)
R5,R6=AQI_Domain_Knowledge_Ridge(data,h)
R7,R8=AQI_without_Domain_Knowledge_Ridge(data,h)
##Future
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
y = pd.read_csv('AQI_prediction_add.csv')
R_F1,R_F2=AQI_Future_Ridge(data, y.AQI_predicted,h)
R_F3,R_F4=AQI_Feature_importance_Future_Ridge(data, y.AQI_predicted,h)
R_F5,R_F6=AQI_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
R_F7,R_F8=AQI_without_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
##using Lasso
data=pd.read_csv('Original_with_dummies.csv')
y=data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI']=y
h=BestParams(data)
L1,L2=AQI_Lasso(data,h)
L3,L4=AQI_Feature_importance_Lasso(data,h)
L5,L6=AQI_Domain_Knowledge_Lasso(data,h)
L7,L8=AQI_without_Domain_Knowledge_Lasso(data,h)
## Predincting for nxt day
data=pd.read_csv('Original_with_dummies.csv')
normalize(data)
h=BestParams(data)
y=pd.read_csv('AQI_prediction_add.csv')
L_F1,L_F2=AQI_Future_Lasso(data,y.AQI_predicted,h)
L_F3,L_F4=AQI_Feature_importance_Future_Lasso(data,y.AQI_predicted,h)
L_F5,L_F6=AQI_Domain_Knowledge_Future_Lasso(data,y.AQI_predicted,h)
L_F7,L_F8=AQI_without_Domain_Knowledge_Future_Lasso(data,y.AQI_predicted,h)
##Random forest
#All feautres
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
F1,F2=AQI_RF(data)
F3,F4=AQI_Feature_importance_RF(data)
F5,F6=AQI_Domain_Knowledge_RF(data)
F7,F8=AQI_without_Domain_Knowledge_RF(data)
## Predincting for nxt day
data = pd.read_csv('Original_with_dummies.csv')
normalize(data)
y = pd.read_csv('AQI_prediction_add.csv')
F_F1,F_F2=AQI_Future_RF(data, y.AQI_predicted)
F_F3,F_F4=AQI_Feature_importance_Future_RF(data, y.AQI_predicted)
F_F5,F_F6=AQI_Domain_Knowledge_Future_RF(data, y.AQI_predicted)
F_F7,F_F8=AQI_without_Domain_Knowledge_Future_RF(data, y.AQI_predicted)
##NN
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
layer = [4,4,4]
NN1,NN2=AQI_NN(data, layer)
NN3,NN4=AQI_Feature_importance_NN(data, layer)
NN5,NN6=AQI_Domain_Knowledge_NN(data, layer)
NN7,NN8=AQI_without_Domain_Knowledge_NN(data, layer)
## Predincting for nxt day
data=pd.read_csv('Original_with_dummies.csv')
y=pd.read_csv('AQI_prediction_add.csv')
normalize(data)
NN_F1,NN_F2=AQI_Future_NN(data,y.AQI_predicted, layer)
NN_F3,NN_F4=AQI_Feature_importance_Future_NN(data,y.AQI_predicted,layer)
NN_F5,NN_F6=AQI_Domain_Knowledge_Future_NN(data,y.AQI_predicted,layer)
NN_F7,NN_F8=AQI_without_Domain_Knowledge_Future_NN(data,y.AQI_predicted, layer)
##All features v/s all models
Bar_graph (LR1,LR2,L1,L2,R1,R2,S1,S2,F1,F2,NN1,NN2)
##iMPORTANT FEATURES V/S ALL MODELS
Bar_graph (LR3,LR4,L3,L4,R3,R4,S3,S4,F3,F4,NN3,NN4)
##Future with important features V/S ALL MODELS except svm
Bar_graph_without_svm (LR_F3,LR_F4,L_F3,L_F4,R_F3,R_F4,F_F3,F_F4,NN_F3,NN_F4)
##Autumn winter and summer
Bar_graph_season (LR_A3,LR_A4,LR_S3,LR_S4,LR_W3,LR_W4)
##Best Model Analysis using Data
data = pd.read_csv('Original_with_dummies.csv')
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
train=90
test=18
tips=[]
LABELS=[]
d=[0,1,2,3,4,5,6,7,8,9]
for i in range (10):
train=train+30
test=test+6
LABELS.append(train)
tips.append(train_test_data_prepare(data, train, test, 15))
plt.plot(tips)
plt.xticks(d, LABELS)
plt.xlabel("No of Days")
plt.ylabel("RMSE")
plt.title("Models")
plt.legend()
plt.show()
#Predicting AQI using all features
def AQI(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future(data,y):
tree_clf = ExtraTreesRegressor()
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area','month_10','month_11',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
data=data.drop('month_10',axis=1)
data=data.drop('month_11',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def graph_training(y_pred,y_train):
all_samples = [i for i in range(0, 250)]
y_pred=y_pred[0:250]
y_train=y_train[0:250]
plt.plot(all_samples, y_pred,label='Predicted')
plt.plot(all_samples , y_train,label='Expected')
plt.xlabel("No of Samples")
plt.ylabel("AQI")
plt.title("Training")
plt.legend()
plt.show()
def graph_testing(y_pred,y_val):
all_samples = [i for i in range(0, 250)]
y_pred=y_pred[0:250]
y_val=y_val[0:250]
plt.plot(all_samples, y_pred,label='Predicted')
plt.plot(all_samples , y_val,label='Expected')
plt.xlabel("No of Samples")
plt.ylabel("AQI")
plt.title("Validation")
plt.legend()
plt.show()
## svm
def AQI_SVM(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_SVM(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_SVM(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
# df[['Name', 'Qualification']]
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_SVM(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
# data=data.drop('month_10',axis=1)
# data=data.drop('month_11',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def BestParams(data):
y = data.AQI
data = data.drop('AQI', axis=1)
Hyper_params = np.array(
[ 0.011, 0.1, 0.001, 0.01,.3, .2, 0.6, .8, 0.001, 0.0001, 3, 4,1,2.4])
Reg_model = Ridge()
GSCgrid = gsc(estimator=Reg_model, param_grid=dict(alpha=Hyper_params))
GSCgrid.fit(data, y)
# print('Hyper Parameter for Ridge:', GSCgrid.best_estimator_.alpha)
return GSCgrid.best_estimator_.alpha
def normalize(data):
for c in data.columns:
mean = data[c].mean()
max = data[c].max()
min = data[c].min()
data[c] = (data[c] - min) / (max - min)
return data
def AQI_Ridge(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Ridge(data,h):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Ridge(data,h):
y=data.AQI
# df[['Name', 'Qualification']]
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Ridge(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_Ridge(data,y,h):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_Ridge(data,y,h):
tree_clf = ExtraTreesRegressor()
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_Ridge(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_Ridge(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Lasso(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Lasso(data,h):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Lasso(data,h):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Lasso(data,h):
y=data.AQI
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_Lasso(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_Lasso(data,y,h):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_Lasso(data,y,h):
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_Lasso(data,y,h):
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_RF(data):
y=data.AQI
data=data.drop('AQI',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_RF(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_RF(data):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_RF(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_RF(data,y):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_RF(data,y):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_RF(data,y):
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_RF(data,y):
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_NN(data,layer):
y=data.AQI
data=data.drop('AQI',axis=1)
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_NN(data, layer):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_NN(data, layer):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_NN(data,layer):
y=data.AQI
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_NN(data,y, layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_NN(data,y, layer):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_NN(data,y,layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_NN(data,y, layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def Bar_graph (a1,a2,b1,b2,c1,c2,d1,d2,e1,e2,f1,f2):
barWidth = 0.2
bars2 = [a2,b2,c2,d2,e2,f2]
bars1 = [a1,b1,c1,d1,e1,f1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black', capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['LinearRegression', 'LR with Lasso','LR with Ridge','SVM','random forest', 'Neural Network'])
plt.ylabel('RMSE')
plt.xlabel('Models')
plt.legend()
plt.show()
def Bar_graph_without_svm(a1,a2,b1,b2,c1,c2,d1,d2,e1,e2):
barWidth = 0.2
bars2 = [a2,b2,c2,d2,e2]
bars1 = [a1,b1,c1,d1,e1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black',capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['LinearRegression', 'LR with Lasso','LR with Ridge','random forest', 'Neural Network'])
plt.ylabel('RMSE')
plt.xlabel('Models')
plt.legend()
plt.show()
def Bar_graph_season(a1,a2,b1,b2,c1,c2):
barWidth = 0.2
bars2 = [a2,b2,c2]
bars1 = [a1,b1,c1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black',capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['Autumn', 'Summer','Winter'])
plt.ylabel('RMSE')
plt.xlabel('Seasons')
plt.legend()
plt.show()
def train_test_data_prepare(data, train, test, folds):
d_y = pd.read_csv('AQI_prediction_add.csv')
y = d_y.AQI_predicted
x_data = []
y_data = []
errors = []
for i in range(folds):
x_train = data.loc[i*(train+test):(i*(train+test)+train - 1), :]
x_test = data.loc[(i*(train+test)+train):(i+1)*(train+test)-1, :]
y_train = y.loc[i * (train + test):(i * (train + test) + train - 1)]
y_test = y.loc[(i * (train + test) + train):(i + 1) * (train + test) - 1]
regr = MLPRegressor(hidden_layer_sizes=(4, 4),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
# batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(x_train, y_train)
print("xxxx")
y_pred = regr.predict(x_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
y_pred = regr.predict(x_test)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
errors.append(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("Cross validation test error = ", sum(errors)/len(errors))
return sum(errors)/len(errors)
main()
| 2.671875
| 3
|
invenio_rdm_records/records/__init__.py
|
kprzerwa/invenio-rdm-records
| 0
|
12776748
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Data access layer."""
from .api import BibliographicDraft, BibliographicRecord
__all__ = (
"BibliographicDraft",
"BibliographicRecord",
)
| 1.140625
| 1
|
examples/xml_parsing/parser.py
|
abhiabhi94/learn-python
| 0
|
12776749
|
<filename>examples/xml_parsing/parser.py
import xml.etree.ElementTree as ET
tree = ET.parse('data.xml')
| 1.984375
| 2
|
compiler/dna/components/DNABattleCell.py
|
AnonymousDeveloper65535/libpandadna
| 36
|
12776750
|
<filename>compiler/dna/components/DNABattleCell.py
class DNABattleCell:
COMPONENT_CODE = 21
def __init__(self, width, height, pos):
self.width = width
self.height = height
self.pos = pos
def setWidth(self, width):
self.width = width
def setHeight(self, height):
self.height = height
def setWidthHeight(self, width, height):
self.setWidth(width)
self.setHeight(height)
def setPos(self, pos):
self.pos = pos
| 2.34375
| 2
|