max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
SBEX.py
|
RyanTreadwell/MetaViewer
| 1
|
12776851
|
<reponame>RyanTreadwell/MetaViewer
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 10:02:29 2018
@author: Ryan
"""
from tkinter import *
root=Tk()
frame=Frame(root,width=300,height=300)
frame.grid(row=0,column=0)
canvas=Canvas(frame,bg='#FFFFFF',width=300,height=300,scrollregion=(0,0,500,500))
hbar=Scrollbar(frame,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(frame,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
canvas.config(width=300,height=300)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
root.mainloop()
| 2.453125
| 2
|
search_insert_position/solution.py
|
mahimadubey/leetcode-python
| 528
|
12776852
|
# -*- coding: utf-8 -*-
"""
Given a sorted array and a target value, return the index if the target is
found. If not, return the index where it would be if it were inserted in
order.
You may assume no duplicates in the array.
Here are few examples.
[1,3,5,6], 5 → 2
[1,3,5,6], 2 → 1
[1,3,5,6], 7 → 4
[1,3,5,6], 0 → 0
"""
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
n = len(nums)
if not nums:
return 0
else:
left = 0
right = n - 1
while left <= right:
mid = (left + right) / 2
if nums[mid] == target:
return mid
elif (mid < n - 1 and nums[mid] < target
and nums[mid + 1] > target):
return mid + 1
elif target < nums[mid]:
right = mid - 1
else:
left = mid + 1
if left > n - 1:
return n
elif right < 0:
return 0
a = [1, 3, 5, 6]
s = Solution()
print(s.searchInsert(a, 5))
print(s.searchInsert(a, 2))
print(s.searchInsert(a, 7))
print(s.searchInsert(a, 0))
| 4.15625
| 4
|
h2o-py/tests/testdir_algos/modelselection/pyunit_PUBDEV_8427_modelselection_coefs.py
|
LongerVision/h2o-3
| 1
|
12776853
|
<filename>h2o-py/tests/testdir_algos/modelselection/pyunit_PUBDEV_8427_modelselection_coefs.py
from __future__ import print_function
from __future__ import division
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.model_selection import H2OModelSelectionEstimator as modelSelection
# test the modelselection coef() and coef_norm() work properly.
def test_modelselection_gaussian_coefs():
d = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
my_y = "GLEASON"
my_x = ["AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"]
allsubsets_model = modelSelection(seed=12345, max_predictor_number=7, mode="allsubsets")
allsubsets_model.train(training_frame=d, x=my_x, y=my_y)
coefs_allsubsets = allsubsets_model.coef()
coefs_norm_allsubsets = allsubsets_model.coef_norm()
maxr_model = modelSelection(seed=12345, max_predictor_number=7, mode="maxr")
maxr_model.train(training_frame=d, x=my_x, y=my_y)
coefs_maxr = maxr_model.coef()
coefs_norm_maxr = maxr_model.coef_norm()
for ind in list(range(len(coefs_allsubsets))):
one_coef_allsubsets = coefs_allsubsets[ind]
one_coef_norm_allsubsets = coefs_norm_allsubsets[ind]
one_coef_maxr = coefs_maxr[ind]
one_coef_norm_maxr = coefs_norm_maxr[ind]
# coefficients obtained from accessing model_id, generate model and access the model coeffs
one_model = h2o.get_model(allsubsets_model._model_json["output"]["best_model_ids"][ind]['name'])
model_coef = one_model.coef()
model_coef_norm = one_model.coef_norm()
# get coefficients of individual predictor subset size
subset_size = ind+1
one_model_coef = allsubsets_model.coef(subset_size)
one_model_coef_norm = allsubsets_model.coef_norm(subset_size)
# check coefficient dicts are equal
pyunit_utils.assertCoefDictEqual(one_coef_allsubsets, model_coef, 1e-6)
pyunit_utils.assertCoefDictEqual(one_coef_norm_allsubsets, model_coef_norm, 1e-6)
pyunit_utils.assertCoefDictEqual(one_model_coef, model_coef, 1e-6)
pyunit_utils.assertCoefDictEqual(one_model_coef_norm, model_coef_norm, 1e-6)
pyunit_utils.assertCoefDictEqual(one_model_coef, one_coef_maxr, 1e-6)
pyunit_utils.assertCoefDictEqual(one_model_coef_norm, one_coef_norm_maxr, 1e-6)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_modelselection_gaussian_coefs)
else:
test_modelselection_gaussian_coefs()
| 2.5
| 2
|
app/constants.py
|
trongdth/python-flask
| 0
|
12776854
|
# -*- coding: utf-8 -*-
USER_ROLE = {
'USER': 0,
'MODERATOR': 1,
'ADMINISTRATOR': 2,
}
| 1.140625
| 1
|
goutdotcom/flareaid/tests/test_models.py
|
Spiewart/goutdotcom
| 0
|
12776855
|
<gh_stars>0
from decimal import *
import pytest
from .factories import FlareAidFactory
pytestmark = pytest.mark.django_db
class TestFlareAidMethods:
def test_get_absolute_url(self):
FlareAid = FlareAidFactory()
assert FlareAid.get_absolute_url() == f"/flareaid/{FlareAid.pk}/"
def test__str__(self):
FlareAid = FlareAidFactory()
assert FlareAid.__str__() == str(FlareAid.decision_aid())
def test_monoarticular_aid(self):
FlareAid = FlareAidFactory(monoarticular=True)
assert (
FlareAid.monoarticular_aid()
== "Any monoarticular flare can be effectively treated with a corticosteroid injection by a rheumatologist or other provider."
)
def test_decision_aid_perfect_health(self):
FlareAid = FlareAidFactory(perfect_health=True)
assert FlareAid.decision_aid() == "NSAID"
def test_decision_aid_perfect_health_isnone(self):
FlareAid = FlareAidFactory(perfect_health=None)
assert FlareAid.decision_aid() == "Need More Information"
def test_decision_aid_CKD_no_diabetes(self):
FlareAid = FlareAidFactory(perfect_health=False, ckd__value=True, diabetes__value=False)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_CKD_and_diabetes(self):
FlareAid = FlareAidFactory(perfect_health=False, ckd__value=True, diabetes__value=True)
assert FlareAid.decision_aid() == "doctor"
def test_decision_aid_bleed(self):
FlareAid = FlareAidFactory(
perfect_health=False,
bleed__value=True,
heartattack__value=False,
stroke__value=False,
anticoagulation__value=False,
ibd__value=False,
ckd__value=False,
colchicine_interactions__value=False,
)
assert FlareAid.decision_aid() == "colchicine"
def test_decision_aid_bleed_CKD_no_diabetes(self):
FlareAid = FlareAidFactory(
perfect_health=False,
bleed__value=True,
ckd__value=True,
diabetes__value=False,
colchicine_interactions__value=False,
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_bleed_colchicine_interactions(self):
FlareAid = FlareAidFactory(
perfect_health=False, bleed__value=True, ckd__value=False, colchicine_interactions__value=True
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_bleed_colchicine_interactions(self):
FlareAid = FlareAidFactory(
perfect_health=False, bleed__value=True, ckd__value=False, colchicine_interactions__value=True
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_heartattack_CKD_no_diabetes(self):
FlareAid = FlareAidFactory(
perfect_health=False,
heartattack__value=True,
ckd__value=True,
diabetes__value=False,
colchicine_interactions__value=False,
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_heartattack_colchicine_interactions(self):
FlareAid = FlareAidFactory(
perfect_health=False, heartattack__value=True, ckd__value=False, colchicine_interactions__value=True
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_stroke_CKD_no_diabetes(self):
FlareAid = FlareAidFactory(
perfect_health=False,
stroke__value=True,
ckd__value=True,
diabetes__value=False,
colchicine_interactions__value=False,
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_stroke_colchicine_interactions(self):
FlareAid = FlareAidFactory(
perfect_health=False, stroke__value=True, ckd__value=False, colchicine_interactions__value=True
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_anticoagulation_CKD_no_diabetes(self):
FlareAid = FlareAidFactory(
perfect_health=False,
anticoagulation__value=True,
ckd__value=True,
diabetes__value=False,
colchicine_interactions__value=False,
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_anticoagulation_colchicine_interactions(self):
FlareAid = FlareAidFactory(
perfect_health=False, anticoagulation__value=True, ckd__value=False, colchicine_interactions__value=True
)
assert FlareAid.decision_aid() == "steroids"
# NEED TO CHANGE MODEL TO MAKE IBD WORK, IT IS COMMENTED OUT FOR SOME REASON NOT WORKING
"""
def test_decision_aid_ibd_CKD_no_diabetes(self):
FlareAid = FlareAidFactory(
perfect_health=False, ibd__value=True, ckd__value=True, colchicine_interactions__value=False, diabetes__value=False
)
assert FlareAid.decision_aid() == "steroids"
def test_decision_aid_ibd_colchicine_interactions(self):
FlareAid = FlareAidFactory(
perfect_health=False, ibd__value=True, ckd__value=False, colchicine_interactions__value=True
)
assert FlareAid.decision_aid() == "steroids"
"""
| 2.265625
| 2
|
bodies/__init__.py
|
cburggie/py3D
| 0
|
12776856
|
<gh_stars>0
from Plane import Plane
from CheckPlane import CheckPlane
from CheckCircle import CheckCircle
from ConcCircle import ConcCircle
from Sphere import Sphere
from hmSphere import hmSphere
from TruncSphere import TruncSphere
| 1.265625
| 1
|
Project1/entertainment_center.py
|
Lluna89/full-stack-web-developer-nanodegree
| 9
|
12776857
|
<filename>Project1/entertainment_center.py
import fresh_tomatoes
import media
# Movie local variables
storyline = '''A nameless disillusioned young urban male (<NAME>) fights
insomnia by attending disease support groups until he meets a
kindred spirit -and soap salesman (<NAME>). Together they form
Fight Club, where young men can exert their frustrations and angst
upon one another.'''
image = "https://upload.wikimedia.org/wikipedia/en/thumb/f/fc/Fight_Club_poster.jpg/220px-Fight_Club_poster.jpg"# noqa
trailer = "https://www.youtube.com/watch?v=SUXWAEX2jlg"
# Object instance
fight_club = media.Movie("Fight Club", storyline, image, trailer)
# Movie local variables
storyline = '''Director <NAME>'s labyrinthine crime drama centers
on five career criminals who meet after being rounded up for a
standard police line-up. Upon their release, the men band together to
pull off an intricate heist involving 3 million dollars worth of
emeralds. Their success brings them to the attention of the enigmatic
Keyser Soze, an unseen, nefarious, and mythic underworld crime figure
who coerces them into pulling off an important and highly dangerous
job. The scenes that follow make 'The Usual Suspects' one of the most
fascinating crime thrillers in cinema history.'''
image = "https://upload.wikimedia.org/wikipedia/en/thumb/9/9c/Usual_suspects_ver1.jpg/220px-Usual_suspects_ver1.jpg"# noqa
trailer = "https://www.youtube.com/watch?v=oiXdPolca5w"
# Object instance
usual_suspects = media.Movie("The Usual Suspects", storyline, image, trailer)
# Movie local variables
storyline = '''A nameless disillusioned young urban male (<NAME>)
fights insomnia by attending disease support groups until he meets a
kindred spirit -and soap salesman (<NAME>). Together they form Fight
Club, where young men can exert their frustrations and angst upon one
another.'''
<<<<<<< HEAD
image = '''https://upload.wikimedia.org/wikipedia/en/thumb/2/2e/Inception_%282010%29_theatrical_poster.jpg/220px-Inception_%28201029_theatrical_poster.jpg'''
=======
image = "https://upload.wikimedia.org/wikipedia/en/thumb/2/2e/Inception_%282010%29_theatrical_poster.jpg/220px-Inception_%282010%29_theatrical_poster.jpg"# noqa
>>>>>>> 0985fe4861ce81f1124ee156de88e23397afb0fa
trailer = "https://www.youtube.com/watch?v=YoHD9XEInc0"
# Object instance
inception = media.Movie("Inception", storyline, image, trailer)
# Movie local variables
storyline = '''Caleb, a 24 year old coder at the world's largest internet
company, wins a competition to spend a week at a private mountain
retreat belonging to Nathan, the reclusive CEO of the company. But when
Caleb arrives at the remote location he finds that he will have to
participate in a strange and fascinating experiment in which he must
interact with Ava, the world's first true artificial intelligence,
housed in the body of a beautiful robot girl.'''
image = "https://upload.wikimedia.org/wikipedia/en/thumb/b/ba/Ex-machina-uk-poster.jpg/220px-Ex-machina-uk-poster.jpg"# noqa
trailer = "https://www.youtube.com/watch?v=gyKqHOgMi4g"
# Object instance
ex_machina = media.Movie("Ex Machina", storyline, image, trailer)
# Movie local variables
storyline = '''Maximus (Crowe) is a brave and loyal military general to
the Emperor <NAME> (Harris). His loyalty does not go
unnoticed as the Emperor makes preparations for Maximus to succeed
him after his death. But when the Emperor's son, Commodus (Phoenix)
finds out, he kills his father and orders the death of Maximus. While
he escapes, his wife and son are brutally murdered. He then ends up a
slave in North Africa and is sold to become a gladiator. Trained by
Proximo (Reed), a former gladiator himself, Maximus wins every battle
and soon finds himself sent to Rome to take part in the Gladitorial
Games. It is here that he plans to plot his vengeance and gain his
freedom. While all this is going on, his past lover, the Emperor's
daughter (Nielsen) works to restore democratic rule with the help
of a civil-minded senator (Jacobi).'''
image = "https://upload.wikimedia.org/wikipedia/en/thumb/8/8d/Gladiator_ver1.jpg/220px-Gladiator_ver1.jpg"# noqa
trailer = "https://www.youtube.com/watch?v=0BLZbrLogTo"
# Object instance
gladiator = media.Movie("Gladiator", storyline, image, trailer)
# Movie's array
movies = [fight_club, usual_suspects, inception, ex_machina, gladiator]
# Open web interface
fresh_tomatoes.open_movies_page(movies)
| 2.609375
| 3
|
gglex.py
|
adityadutta/BostonHacksFall18
| 2
|
12776858
|
<reponame>adityadutta/BostonHacksFall18<filename>gglex.py
from __future__ import print_function
import datetime
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/calendar'
Id = '<EMAIL>'
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Refer to the Python quickstart on how to setup the environment:
# https://developers.google.com/calendar/quickstart/python
# Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any
# stored credentials.
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId=Id, timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
elist=list(range(len(events)))
for i in range(len(events)):
start = events[i]['start'].get('dateTime', events[i]['start'].get('date'))
elist[i]= [start,events[i]['summary'],events[i]['description']]
# print(start, events[i]['summary'],events[i]['description'])
return elist
if __name__ == '__main__':
main()
| 3.1875
| 3
|
retrieve_tps/config.py
|
PedroMLF/guided-nmt
| 1
|
12776859
|
import os
from dotenv import find_dotenv
from dotenv import load_dotenv
# Find and load dotenv
load_dotenv(find_dotenv())
class Config:
def __init__(self):
# Source and target languages
self.SRC = os.environ.get("SRC")
self.TGT = os.environ.get("TGT")
# Dirs
self.BASE_DIR = os.environ.get("BASE_DIR")
self.TP_DIR = os.environ.get("TP_DIR")
# Paths
self.FASTTEXT_MODEL_PATH = os.environ.get("FASTTEXT_MODEL_PATH")
self.STOPWORDS_PATH = os.environ.get("STOPWORDS_PATH")
self.EXTRA_ALIGN_PATH = os.environ.get("EXTRA_ALIGN_PATH")
| 2.40625
| 2
|
test/memcache/memcache.py
|
bianhaoyi/neproxy
| 1
|
12776860
|
# Copyright 2012 Mixpanel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
a minimal, pure python client for memcached, kestrel, etc.
Usage example::
import memcache
mc = memcache.Client("127.0.0.1", 11211, timeout=1, connect_timeout=5)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.delete("another_key")
'''
import errno
import re
import socket
class ClientException(Exception):
'''
Raised when the server does something we don't expect
| This does not include `socket errors <http://docs.python.org/library/socket.html#socket.error>`_
| Note that ``ValidationException`` subclasses this so, technically, this is raised on any error
'''
def __init__(self, msg, item=None):
if item is not None:
msg = '%s: %r' % (msg, item) # use repr() to better see special chars
super(ClientException, self).__init__(msg)
class ValidationException(ClientException):
'''
Raised when an invalid parameter is passed to a ``Client`` function
'''
def __init__(self, msg, item):
super(ValidationException, self).__init__(msg, item)
class Client(object):
def __init__(self, host, port, timeout=None, connect_timeout=None):
'''
If ``connect_timeout`` is None, ``timeout`` will be used instead
(for connect and everything else)
'''
self._addr = (host, port)
self._timeout = timeout
self._connect_timeout = connect_timeout
self._socket = None
def __del__(self):
self.close()
def _get_addr(self):
return self._addr
address = property(_get_addr)
''' A read-only (str, int) tuple representing the host operations are performed on '''
def _get_timeout(self):
return self._timeout
def _set_timeout(self, timeout):
# presumably this should fail rarely
# set locally before on socket
# b/c if socket fails, it will probably be closed/reopened
# and will want to use last intended value
self._timeout = timeout
if self._socket:
self._socket.settimeout(timeout)
timeout = property(_get_timeout, _set_timeout)
'''
A float representing the timeout in seconds for reads and sends on the underlying socket
(``connect_timeout`` cannot be changed once init)
Setting a timeout can raise a ``TypeError`` (non-float) or a ``ValueError`` (negative)
'''
def _connect(self):
# buffer needed since we always ask for 4096 bytes at a time
# thus, might read more than the current expected response
# cleared on every reconnect since old bytes are part of old session and can't be reused
self._buffer = ''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect_timeout = self._connect_timeout if self._connect_timeout is not None else self._timeout
self._socket.settimeout(connect_timeout) # passing None means blocking
try:
self._socket.connect(self._addr)
self._socket.settimeout(self._timeout)
except (socket.error, socket.timeout):
self._socket = None # don't want to hang on to bad socket
raise
def _read(self, length=None):
'''
Return the next length bytes from server
Or, when length is None,
Read a response delimited by \r\n and return it (including \r\n)
(Use latter only when \r\n is unambiguous -- aka for control responses, not data)
'''
result = None
while result is None:
if length: # length = 0 is ambiguous, so don't use
if len(self._buffer) >= length:
result = self._buffer[:length]
self._buffer = self._buffer[length:]
else:
delim_index = self._buffer.find('\r\n')
if delim_index != -1:
result = self._buffer[:delim_index+2]
self._buffer = self._buffer[delim_index+2:]
if result is None:
try:
tmp = self._socket.recv(4096)
except (socket.error, socket.timeout) as e:
self.close()
raise e
if not tmp:
# we handle common close/retry cases in _send_command
# however, this can happen if server suddenly goes away
# (e.g. restarting memcache under sufficient load)
raise socket.error, 'unexpected socket close on recv'
else:
self._buffer += tmp
return result
def _send_command(self, command):
'''
Send command to server and return initial response line
Will reopen socket if it got closed (either locally or by server)
'''
if self._socket: # try to find out if the socket is still open
try:
self._socket.settimeout(0)
self._socket.recv(0)
# if recv didn't raise, then the socket was closed or there is junk
# in the read buffer, either way, close
self.close()
except socket.error as e:
if e.errno == errno.EAGAIN: # this is expected if the socket is still open
self._socket.settimeout(self._timeout)
else:
self.close()
if not self._socket:
self._connect()
self._socket.sendall(command)
return self._read()
# key supports ascii sans space and control chars
# \x21 is !, right after space, and \x7e is -, right before DEL
# also 1 <= len <= 250 as per the spec
_valid_key_re = re.compile('^[\<KEY>')
def _validate_key(self, key):
if not isinstance(key, str): # avoid bugs subtle and otherwise
raise ValidationException('key must be str', key)
m = self._valid_key_re.match(key)
if m:
# in python re, $ matches either end of line or right before
# \n at end of line. We can't allow latter case, so
# making sure length matches is simplest way to detect
if len(m.group(0)) != len(key):
raise ValidationException('trailing newline', key)
else:
raise ValidationException('invalid key', key)
return key
def close(self):
'''
Closes the socket if its open
| Sockets are automatically closed when the ``Client`` object is garbage collected
| Sockets are opened the first time a command is run (such as ``get`` or ``set``)
| Raises socket errors
'''
if self._socket:
self._socket.close()
self._socket = None
def delete(self, key):
'''
Deletes a key/value pair from the server
Raises ``ClientException`` and socket errors
'''
# req - delete <key> [noreply]\r\n
# resp - DELETED\r\n
# or
# NOT_FOUND\r\n
key = self._validate_key(key)
command = 'delete %s\r\n' % key
resp = self._send_command(command)
if resp != 'DELETED\r\n' and resp != 'NOT_FOUND\r\n':
raise ClientException('delete failed', resp)
def get(self, key):
'''
Gets a single value from the server; returns None if there is no value
Raises ``ValidationException``, ``ClientException``, and socket errors
'''
return self.multi_get([key])[0]
def multi_get(self, keys):
'''
Takes a list of keys and returns a list of values
Raises ``ValidationException``, ``ClientException``, and socket errors
'''
if len(keys) == 0:
return []
# req - get <key> [<key> ...]\r\n
# resp - VALUE <key> <flags> <bytes> [<cas unique>]\r\n
# <data block>\r\n (if exists)
# [...]
# END\r\n
keys = [self._validate_key(key) for key in keys]
if len(set(keys)) != len(keys):
raise ClientException('duplicate keys passed to multi_get')
command = 'get %s\r\n' % ' '.join(keys)
received = {}
resp = self._send_command(command)
error = None
while resp != 'END\r\n':
terms = resp.split()
if len(terms) == 4 and terms[0] == 'VALUE': # exists
key = terms[1]
flags = int(terms[2])
length = int(terms[3])
if flags != 0:
error = ClientException('received non zero flags')
val = self._read(length+2)[:-2]
if key in received:
error = ClientException('duplicate results from server')
received[key] = val
else:
raise ClientException('get failed', resp)
resp = self._read()
if error is not None:
# this can happen if a memcached instance contains items set by a previous client
# leads to subtle bugs, so fail fast
raise error
if len(received) > len(keys):
raise ClientException('received too many responses')
# memcache client is used by other servers besides memcached.
# In the case of kestrel, responses coming back to not necessarily
# match the requests going out. Thus we just ignore the key name
# if there is only one key and return what we received.
if len(keys) == 1 and len(received) == 1:
response = received.values()
else:
response = [received.get(key) for key in keys]
return response
def getex(self, key):
'''
Gets a single value from the server; returns None if there is no value
Raises ``ValidationException``, ``ClientException``, and socket errors
'''
return self.multi_getex([key])[0]
def multi_getex(self, keys):
'''
Takes a list of keys and returns a list of values
Raises ``ValidationException``, ``ClientException``, and socket errors
'''
if len(keys) == 0:
return []
# req - getex <key> [<key> ...]\r\n
# resp - VALUE <key> <flags> <bytes> <cas unique> <expire time>\r\n
# <data block>\r\n (if exists)
# [...]
# END\r\n
keys = [self._validate_key(key) for key in keys]
if len(set(keys)) != len(keys):
raise ClientException('duplicate keys passed to multi_get')
command = 'getex %s\r\n' % ' '.join(keys)
received = {}
resp = self._send_command(command)
error = None
while resp != 'END\r\n':
terms = resp.split()
if len(terms) == 6 and terms[0] == 'VALUE': # exists
key = terms[1]
flags = int(terms[2])
length = int(terms[3])
if flags != 0:
error = ClientException('received non zero flags')
val = self._read(length+2)[:-2]
if key in received:
error = ClientException('duplicate results from server')
received[key] = val
else:
raise ClientException('get failed', resp)
resp = self._read()
if error is not None:
# this can happen if a memcached instance contains items set by a previous client
# leads to subtle bugs, so fail fast
raise error
if len(received) > len(keys):
raise ClientException('received too many responses')
# memcache client is used by other servers besides memcached.
# In the case of kestrel, responses coming back to not necessarily
# match the requests going out. Thus we just ignore the key name
# if there is only one key and return what we received.
if len(keys) == 1 and len(received) == 1:
response = received.values()
else:
response = [received.get(key) for key in keys]
return response
def set(self, key, val, exptime=0):
'''
Sets a key to a value on the server with an optional exptime (0 means don't auto-expire)
Raises ``ValidationException``, ``ClientException``, and socket errors
'''
# req - set <key> <flags> <exptime> <bytes> [noreply]\r\n
# <data block>\r\n
# resp - STORED\r\n (or others)
key = self._validate_key(key)
# the problem with supporting types is it oftens leads to uneven and confused usage
# some code sites use the type support, others do manual casting to/from str
# worse yet, some sites don't even know what value they are putting in and mis-cast on get
# by uniformly requiring str, the end-use code is much more uniform and legible
if not isinstance(val, str):
raise ValidationException('value must be str', val)
# typically, if val is > 1024**2 bytes server returns:
# SERVER_ERROR object too large for cache\r\n
# however custom-compiled memcached can have different limit
# so, we'll let the server decide what's too much
if not isinstance(exptime, int):
raise ValidationException('exptime not int', exptime)
elif exptime < 0:
raise ValidationException('exptime negative', exptime)
command = 'set %s 0 %d %d\r\n%s\r\n' % (key, exptime, len(val), val)
resp = self._send_command(command)
if resp != 'STORED\r\n':
raise ClientException('set failed', resp)
def stats(self, additional_args=None):
'''
Runs a stats command on the server.
``additional_args`` are passed verbatim to the server.
See `the memcached wiki <http://code.google.com/p/memcached/wiki/NewCommands#Statistics>`_ for details
or `the spec <https://github.com/memcached/memcached/blob/master/doc/protocol.txt>`_ for even more details
Raises ``ClientException`` and socket errors
'''
# req - stats [additional args]\r\n
# resp - STAT <name> <value>\r\n (one per result)
# END\r\n
if additional_args is not None:
command = 'stats %s\r\n' % additional_args
else:
command = 'stats\r\n'
resp = self._send_command(command)
result = {}
while resp != 'END\r\n':
terms = resp.split()
if len(terms) == 2 and terms[0] == 'STAT':
result[terms[1]] = None
elif len(terms) == 3 and terms[0] == 'STAT':
result[terms[1]] = terms[2]
else:
raise ClientException('stats failed', resp)
resp = self._read()
return result
| 2.890625
| 3
|
02-basics/my_package/a_module.py
|
vicente-gonzalez-ruiz/python-tutorial
| 4
|
12776861
|
<filename>02-basics/my_package/a_module.py
a = 1
print("a_module: Hi from my_package/" + __name__ + ".py!")
if __name__ == "__main__":
print("a_module: I was invoked from a script.")
else:
print("a_module: I was invoked from a Pyton module (probably using 'import').")
print("a_module: My name is =", __name__)
| 3.109375
| 3
|
counterfeit.py
|
wenima/interview-questions
| 0
|
12776862
|
<filename>counterfeit.py
"""Tests for for https://www.codewars.com/kata/number-of-measurements-to-spot-the-counterfeit-coin/"""
from math import ceil, log
def how_many_measurements(n):
"""Return the number of measurements it would take to find the counterfeit coin within n coins."""
if n == 1: return 0
if n in [2, 3]: return 1
if n < 10: return 2
i = 0
while n > 9:
i += 1
n = ceil(n / 3)
return i + 2
def how_many_measurements_short(n):
"""Return the number of measurements it would take to find the counterfeit coin within n coins."""
return ceil(log(n, 3))
| 3.8125
| 4
|
tests/test_parser/test_method_statement.py
|
vbondarevsky/ones_analyzer
| 12
|
12776863
|
<filename>tests/test_parser/test_method_statement.py
from analyzer.syntax_kind import SyntaxKind
from tests.utils import TestCaseParser
class TestParserMethodStatement(TestCaseParser):
def test_procedure_with_export(self):
code = \
"""Процедура МояПроцедура() Экспорт
КонецПроцедуры"""
self.parse_source(code)
self.assertNode(self.syntax_tree.methods, [SyntaxKind.ProcedureBlock])
self.assertNode(self.syntax_tree.methods[0].begin.declaration_keyword, SyntaxKind.ProcedureKeyword)
self.assertNode(self.syntax_tree.methods[0].begin.identifier, SyntaxKind.IdentifierToken)
self.assertNode(self.syntax_tree.methods[0].begin.export, SyntaxKind.ExportKeyword)
def test_procedure_without_export(self):
code = \
"""Процедура МояПроцедура()
КонецПроцедуры"""
self.parse_source(code)
self.assertNode(self.syntax_tree.methods, [SyntaxKind.ProcedureBlock])
self.assertNode(self.syntax_tree.methods[0].begin.declaration_keyword, SyntaxKind.ProcedureKeyword)
self.assertNode(self.syntax_tree.methods[0].begin.identifier, SyntaxKind.IdentifierToken)
self.assertNode(self.syntax_tree.methods[0].begin.export, SyntaxKind.Empty)
def test_function_with_export(self):
code = \
"""Функция МояПроцедура() Экспорт
КонецФункции"""
self.parse_source(code)
self.assertNode(self.syntax_tree.methods, [SyntaxKind.FunctionBlock])
self.assertNode(self.syntax_tree.methods[0].begin.declaration_keyword, SyntaxKind.FunctionKeyword)
self.assertNode(self.syntax_tree.methods[0].begin.identifier, SyntaxKind.IdentifierToken)
self.assertNode(self.syntax_tree.methods[0].begin.export, SyntaxKind.ExportKeyword)
def test_function_without_export(self):
code = \
"""Функция МояПроцедура()
КонецФункции"""
self.parse_source(code)
self.assertNode(self.syntax_tree.methods, [SyntaxKind.FunctionBlock])
self.assertNode(self.syntax_tree.methods[0].begin.declaration_keyword, SyntaxKind.FunctionKeyword)
self.assertNode(self.syntax_tree.methods[0].begin.identifier, SyntaxKind.IdentifierToken)
self.assertNode(self.syntax_tree.methods[0].begin.export, SyntaxKind.Empty)
| 2.8125
| 3
|
ReceiptAutoInfoExtract.py
|
colorofnight86/eisms-ocr
| 8
|
12776864
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from cnocr import CnOcr
# 后续生成票据图像时的大小,按照标准增值税发票版式240mmX140mm来设定
height_resize = 1400
width_resize = 2400
# 实例化不同用途CnOcr对象
ocr = CnOcr(name='') # 混合字符
ocr_numbers = CnOcr(name='numbers', cand_alphabet='0123456789.') # 纯数字
ocr_UpperSerial = CnOcr(name='UpperSerial',
cand_alphabet='0123456789ABCDEFGHIJKLMNPQRSTUVWXYZ') # 编号,只包括大写字母(没有O)与数字
# 销售方字典
purchaser_dict = ['purchaserName', 'purchaserCode', 'purchaserAddrTel', 'purchaserBankCode']
seller_dict = ['sellerName', 'sellerCode', 'sellerAddrTel', 'sellerBankCode']
invoice_dict = ['invoiceCode', 'invoiceNumber', 'invoiceDate', 'checkCode']
# 截取图片中部分区域图像-字段
crop_range_list_name = ['invoice', 'purchaser', 'seller',
'totalExpense', 'totalTax', 'totalTaxExpenseZh', 'totalTaxExpense',
'remark', 'title', 'machineCode']
# 截取图片中部分区域图像-坐标
crop_range_list_data = [[1750, 20, 500, 250], [420, 280, 935, 220], [420, 1030, 935, 230],
[1500, 880, 390, 75], [2000, 880, 330, 75], [750, 960, 600, 65], [1870, 960, 300, 70],
[1455, 1045, 400, 180], [760, 50, 900, 110], [280, 200, 250, 75]]
# 截取图片中部分区域图像-使用ocr的类型,0:混合字符,1:纯数字,2:编号
crop_range_list_type = [3, 3, 3,
1, 1, 0, 1,
0, 0, 1]
# 调整原始图片尺寸
def resizeImg(image, height=height_resize):
h, w = image.shape[:2]
pro = height / h
size = (int(w * pro), int(height))
img = cv2.resize(image, size)
return img
# 边缘检测
def getCanny(image):
# 高斯模糊
binary = cv2.GaussianBlur(image, (3, 3), 2, 2)
# 边缘检测
binary = cv2.Canny(binary, 60, 240, apertureSize=3)
# 膨胀操作,尽量使边缘闭合
kernel = np.ones((3, 3), np.uint8)
binary = cv2.dilate(binary, kernel, iterations=1)
# 二值图
cv2.imwrite('result/binary.jpg', binary)
return binary
# 求出面积最大的轮廓
def findMaxContour(image):
# 寻找边缘
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# 计算面积
max_area = 0.0
max_contour = []
for contour in contours:
current_area = cv2.contourArea(contour)
if current_area > max_area:
max_area = current_area
max_contour = contour
return max_contour, max_area
# 多边形拟合凸包的四个顶点
def getBoxPoint(contour):
# 多边形拟合凸包
hull = cv2.convexHull(contour)
epsilon = 0.02 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(hull, epsilon, True)
approx = approx.reshape((len(approx), 2))
return approx
# 适配原四边形点集
def adapPoint(box, pro):
box_pro = box
if pro != 1.0:
box_pro = box / pro
box_pro = np.trunc(box_pro)
return box_pro
# 四边形顶点排序,[top-left, top-right, bottom-right, bottom-left]
def orderPoints(pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
# 计算长宽
def pointDistance(a, b):
return int(np.sqrt(np.sum(np.square(a - b))))
# 透视变换
def warpImage(image, box):
w, h = pointDistance(box[0], box[1]), \
pointDistance(box[1], box[2])
dst_rect = np.array([[0, 0],
[w - 1, 0],
[w - 1, h - 1],
[0, h - 1]], dtype='float32')
M = cv2.getPerspectiveTransform(box, dst_rect)
warped = cv2.warpPerspective(image, M, (w, h))
return warped
# 根据四点画四边形
def drawRect(img, pt1, pt2, pt3, pt4, color, line_width):
cv2.line(img, pt1, pt2, color, line_width)
cv2.line(img, pt2, pt3, color, line_width)
cv2.line(img, pt3, pt4, color, line_width)
cv2.line(img, pt1, pt4, color, line_width)
# 统合图片预处理
def imagePreProcessing(path):
image = cv2.imread(path)
# 转灰度、降噪
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# image = cv2.GaussianBlur(image, (3,3), 0)
# 边缘检测、寻找轮廓、确定顶点
ratio = height_resize / image.shape[0]
img = resizeImg(image)
binary_img = getCanny(img)
max_contour, max_area = findMaxContour(binary_img)
box = getBoxPoint(max_contour)
boxes = adapPoint(box, ratio)
boxes = orderPoints(boxes)
# 透视变化
warped = warpImage(image, boxes)
# 调整最终图片大小
size = (width_resize, height_resize)
warped = cv2.resize(warped, size, interpolation=cv2.INTER_CUBIC)
# 画边缘框
drawRect(image, tuple(boxes[0]), tuple(boxes[1]), tuple(boxes[2]), tuple(boxes[3]), (0, 0, 255), 2)
cv2.imwrite("result/outline.jpg", image)
return warped
# 截取图片中部分区域图像,测试阶段使用,包括显示与保存图片,实际使用时不使用这个函数,使用下面的正式版函数
def cropImage_test(img, crop_range, filename='Undefined'):
xpos, ypos, width, height = crop_range
crop = img[ypos:ypos + height, xpos:xpos + width]
if filename == 'Undefined': # 如果未指定文件名,采用坐标来指定文件名
filename = 'crop-' + str(xpos) + '-' + str(ypos) + '-' + str(width) + '-' + str(height) + '.jpg'
cv2.imshow(filename, crop) # 展示截取区域图片---测试用
# cv2.imwrite(filename, crop) #imwrite在文件名含有中文时会有乱码,应该采用下方imencode---测试用
# 保存截取区域图片---测试用
cv2.imencode('.jpg', crop)[1].tofile(filename)
return crop
# 截取图片中部分区域图像
def cropImage(img, crop_range):
xpos, ypos, width, height = crop_range
crop = img[ypos:ypos + height, xpos:xpos + width]
return crop
# 从截取图片中识别文字
def cropOCR(crop, ocrType):
text_crop = ''
if ocrType == 0:
text_crop_list = ocr.ocr_for_single_line(crop)
elif ocrType == 1:
text_crop_list = ocr_numbers.ocr_for_single_line(crop)
elif ocrType == 2:
text_crop_list = ocr_UpperSerial.ocr_for_single_line(crop)
elif ocrType == 3:
text_crop_list = ocr.ocr(crop)
for i in range(len(text_crop_list)):
ocr_text = ''.join(text_crop_list[i]).split(':')[-1].split(';')[-1]
# 如果出现- — _ ― 一律算作边框
if '-' in ocr_text or '—' in ocr_text or '_' in ocr_text or '―' in ocr_text:
continue
text_crop = text_crop + ocr_text + ','
return text_crop
text_crop = ''.join(text_crop_list)
return text_crop
def imageOcr(path):
# 预处理图像
# path = 'test.jpg'
warped = imagePreProcessing(path)
# 分块识别
receipt = {}
for i in range(len(crop_range_list_data)):
crop = cropImage(warped, crop_range_list_data[i])
crop_text = cropOCR(crop, crop_range_list_type[i])
# 发票中不会有小写字母o l O,凡是出现o的都使用0替代,凡是出现l的都使用1替代,凡是出现O的都使用0替代,并去掉空格和冒号前面的字符
crop_text = crop_text.replace('o', '0').replace(' ', '').replace('l', '1').replace('O', '0').split(':')[-1]
# 销售方信息
if crop_range_list_name[i] == 'seller':
crop_text = crop_text.split(',')
for i in range(4):
if i < len(crop_text):
receipt.update({seller_dict[i]: crop_text[i]})
else:
receipt.update({seller_dict[i]: ''})
elif crop_range_list_name[i] == 'invoice':
crop_text = crop_text.split(',')
for i in range(4):
if i < len(crop_text):
receipt.update({invoice_dict[i]: crop_text[i]})
else:
receipt.update({invoice_dict[i]: ''})
elif crop_range_list_name[i] == 'purchaser':
crop_text = crop_text.split(',')
for i in range(4):
if i < len(crop_text):
receipt.update({purchaser_dict[i]: crop_text[i]})
else:
receipt.update({purchaser_dict[i]: ''})
else:
if crop_range_list_name[i] == 'title':
crop_text = crop_text[0:2] + '增值税普通发票'
receipt.update({crop_range_list_name[i]: crop_text})
receipt['sellerCode'] = receipt['sellerCode'].replace('工', '1').replace('.', '')
receipt['purchaserCode'] = receipt['purchaserCode'].replace('工', '1').replace('.', '')
for key in receipt:
print(key + ':' + receipt[key])
receipt.update({"serviceDetails": []})
cv2.imwrite('result/block.jpg', warped)
# 展示识别区域
for i in range(len(crop_range_list_data)):
warped = cv2.rectangle(warped, (crop_range_list_data[i][0], crop_range_list_data[i][1]),
(crop_range_list_data[i][0] + crop_range_list_data[i][2],
crop_range_list_data[i][1] + crop_range_list_data[i][3]),
(0, 0, 255), 2)
# 展示与保存预处理的图片---测试用,生产环境会报错
# cv2.namedWindow("warpImage", 0)
# cv2.resizeWindow("warpImage", 1200, 700)
# cv2.imshow('warpImage', warped)
# 保存图片到本地
cv2.imwrite('result/result.jpg', warped)
return receipt
if __name__ == '__main__':
print(imageOcr("test0.jpg"))
# cv2.waitKey(0)
| 2.4375
| 2
|
day10/syntax_scoring.py
|
pranasziaukas/advent-of-code-2021
| 0
|
12776865
|
from collections import deque
from dataclasses import dataclass
from enum import Enum, auto
class Type(Enum):
ERROR = auto()
INCOMPLETE = auto()
@dataclass
class SyntaxScore:
type: Type
value: int
OPENERS_CLOSERS = {
"(": ")",
"[": "]",
"{": "}",
"<": ">",
}
def get_score(entry: str) -> SyntaxScore:
"""Get score of a known bad navigation subsystem entry."""
queue = deque()
# error checker
error_scores = {
")": 3,
"]": 57,
"}": 1197,
">": 25137,
}
for symbol in entry:
if symbol in OPENERS_CLOSERS.keys():
queue.append(symbol)
else:
if OPENERS_CLOSERS[queue.pop()] != symbol:
return SyntaxScore(Type.ERROR, error_scores[symbol])
# no errors found, must be incomplete
incomplete_scores = {
")": 1,
"]": 2,
"}": 3,
">": 4,
}
value = 0
while queue:
symbol = queue.pop()
value *= 5
value += incomplete_scores[OPENERS_CLOSERS[symbol]]
return SyntaxScore(Type.INCOMPLETE, value)
if __name__ == "__main__":
from aocd import models, transforms
puzzle = models.Puzzle(year=2021, day=10)
data = transforms.lines(puzzle.input_data)
error_value_total = 0
incomplete_values = []
for data_entry in data:
score = get_score(data_entry)
if score.type == Type.ERROR:
error_value_total += score.value
else:
incomplete_values.append(score.value)
# sum of all error scores
puzzle.answer_a = error_value_total
# median of all incomplete scores
puzzle.answer_b = sorted(incomplete_values)[len(incomplete_values) // 2]
| 3.03125
| 3
|
redis_metrics/__init__.py
|
bradmontgomery/django-redis-metrics
| 52
|
12776866
|
<reponame>bradmontgomery/django-redis-metrics
__version__ = "2.0.0"
try:
from .utils import gauge, metric, set_metric # NOQA
except ImportError: # pragma: no cover
pass # pragma: no cover
default_app_config = 'redis_metrics.apps.RedisMetricsConfig'
| 1.328125
| 1
|
notion_properties/tests.py
|
marcphilippebeaujean-abertay/recur-notion
| 2
|
12776867
|
from django.test import TestCase
from .dto import NotionPropertyDto
TEST_NOTION_API_RESP_PROPERTIES_DICT = {
"Comment": {"id": "!vXu", "type": "rich_text", "rich_text": []},
"Amount": {"id": "%225%3C%7B", "type": "number", "number": 690},
"Category": {
"id": "93%3D%3E",
"type": "multi_select",
"multi_select": [
{
"id": "6d112c07-5a69-44d7-8d02-42895b6be454",
"name": "Home",
"color": "yellow",
}
],
},
"Created Time": {
"id": "hoWJ",
"type": "created_time",
"created_time": "2021-12-25T07:15:00.000Z",
},
"Expense": {
"id": "title",
"type": "title",
"title": [
{
"type": "text",
"text": {"content": "Rent", "link": None},
"annotations": {
"bold": False,
"italic": False,
"strikethrough": False,
"underline": False,
"code": False,
"color": "default",
},
"plain_text": "Rent",
"href": None,
}
],
},
}
# Create your tests here.
class TestNotionPropertiesDtoConversions(TestCase):
def setUp(self):
self.id = "helloworld"
self.notion_type = "checkbox"
self.value = True
self.name = "test checkbox"
def test_does_convert_from_dto_dict(self):
dto_dict = {
"id": self.id,
"type": self.notion_type,
"value": self.value,
"name": self.name,
"html_form_type": "blalba",
"html_value": "asdfsa",
"options": "asdfasd",
}
property_dto = NotionPropertyDto.from_dto_dict(dto_dict=dto_dict)
self.assertEqual(property_dto.id, self.id)
self.assertEqual(property_dto.notion_type, self.notion_type)
self.assertEqual(property_dto.value, self.value)
self.assertEqual(property_dto.name, self.name)
def test_deos_convert_number_from_api_resp_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Amount"], property_name_str="Amount"
)
self.assertEqual(property_dto.name, "Amount")
self.assertEqual(property_dto.value, 690)
self.assertEqual(property_dto.notion_type, "number")
self.assertEqual(property_dto.id, "%225%3C%7B")
def test_deos_convert_multi_select_from_api_resp_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Category"],
property_name_str="Category",
)
self.assertEqual(property_dto.name, "Category")
self.assertEqual(property_dto.value, "6d112c07-5a69-44d7-8d02-42895b6be454")
self.assertEqual(property_dto.notion_type, "multi_select")
self.assertEqual(property_dto.id, "93%3D%3E")
def test_deos_convert_rich_text_from_api_resp_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Comment"], property_name_str="Comment"
)
self.assertEqual(property_dto.name, "Comment")
self.assertEqual(property_dto.value, "")
self.assertEqual(property_dto.notion_type, "rich_text")
self.assertEqual(property_dto.id, "!vXu")
def test_deos_convert_title_from_api_resp_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Expense"], property_name_str="Expense"
)
self.assertEqual(property_dto.name, "Expense")
self.assertEqual(property_dto.value, "Rent")
self.assertEqual(property_dto.notion_type, "title")
self.assertEqual(property_dto.id, "title")
def test_convert_multi_select_property_dto_to_api_create_page_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Category"],
property_name_str="Category",
)
self.assertEqual(
property_dto.get_notion_property_api_dict_for_create_page_request(),
[{"id": "6d112c07-5a69-44d7-8d02-42895b6be454"}],
)
def test_convert_number_property_dto_to_create_page_api_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Amount"], property_name_str="Amount"
)
self.assertEqual(
property_dto.get_notion_property_api_dict_for_create_page_request(), 690
)
def test_convert_rich_text_property_dto_to_create_page_api_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Comment"], property_name_str="Comment"
)
self.assertEqual(
property_dto.get_notion_property_api_dict_for_create_page_request(),
[{"text": {"content": ""}}],
)
def test_convert_title_property_dto_to_create_page_api_dict(self):
property_dto = NotionPropertyDto.from_notion_api_property_dict(
TEST_NOTION_API_RESP_PROPERTIES_DICT["Expense"], property_name_str="Expense"
)
self.assertEqual(
property_dto.get_notion_property_api_dict_for_create_page_request(),
[{"text": {"content": "Rent"}}],
)
| 2.1875
| 2
|
__init__.py
|
tommmlij/xbmc-gamepass
| 40
|
12776868
|
<gh_stars>10-100
# dummy file to init the directory
| 0.941406
| 1
|
c3/utils/logging.py
|
thetalorian/c3
| 0
|
12776869
|
<reponame>thetalorian/c3
# Copyright 2016 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Logging interface for C3 '''
import sys
import datetime
def error(message):
''' Prints error messages to STDERR '''
print >> sys.stderr, (
'%s ERROR: %s' % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
message))
def warn(message):
''' Prints error messages to STDERR '''
print >> sys.stderr, (
'%s WARN: %s' % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
message))
def info(message):
''' Prints informational messages to STDOUT '''
print >> sys.stdout, (
'%s INFO: %s' % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
message))
def debug(message, verbose):
''' Prints verbose messaging to STDOUT '''
if verbose:
print >> sys.stdout, (
'%s DEBUG: %s' % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
message))
| 1.898438
| 2
|
split_bill_calculator.py
|
lorenanda/split-the-bill
| 0
|
12776870
|
from tkinter import *
class BillCalculator:
def __init__(self):
window = Tk()
window.title("Bill Calculator")
# input fields
Label(window, text = "How much is the bill?").grid(row = 1,column = 1, sticky = W)
Label(window, text = "How many people?").grid(row = 2, column = 1, sticky = W)
Label(window, text = "How much % tip?").grid(row = 3, column = 1, sticky = W)
Label(window, text = "Bill per person:").grid(row = 4, column = 1, sticky = W)
# for taking inputs
self.billVar = StringVar()
Entry(window, textvariable = self.billVar, justify = RIGHT).grid(row = 1, column = 2)
self.peopleVar = StringVar()
Entry(window, textvariable = self.peopleVar, justify = RIGHT).grid(row = 2, column = 2)
self.tipVar = StringVar()
Entry(window, textvariable = self.tipVar, justify = RIGHT).grid(row = 3, column = 2)
self.splitBillVar = StringVar()
lblSplitBill = Label(window, textvariable = self.splitBillVar).grid(row = 4, column = 2, sticky = E)
# calculate button
button_calculate = Button(window, text = "Calculate", command = self.calculateBill).grid(row = 6, column = 2, sticky = E)
window.mainloop() # Create an event loop
# calculate total payment
def calculateBill(self):
splitBill = self.totalSum(float(self.billVar.get()), float(self.tipVar.get()), int(self.peopleVar.get()))
self.splitBillVar.set(splitBill)
def totalSum(self, bill, tip, people):
splitBill = round(((bill + ((tip * bill) / 100)) / people), 2)
return splitBill
root = Tk() # create the widget
# run the program
BillCalculator()
| 3.96875
| 4
|
thespian/system/transport/test/test_resultcallback.py
|
dendron2000/Thespian
| 210
|
12776871
|
from thespian.system.transport import ResultCallback
from datetime import datetime, timedelta
from time import sleep
class TestUnitResultCallback(object):
def _good(self, result, value):
if not hasattr(self, 'goods'): self.goods = []
self.goods.append( (result, value) )
def _fail(self, result, value):
if not hasattr(self, 'fails'): self.fails = []
self.fails.append( (result, value) )
def testGoodCallback(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc.resultCallback(True, 5)
assert self.goods == [(True, 5)]
assert self.fails == []
def testFailCallback(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc.resultCallback(False, 9)
assert self.goods == []
assert self.fails == [(False, 9)]
def testGoodCallbackReCall(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc.resultCallback(True, 5)
assert self.goods == [(True, 5)]
assert self.fails == []
rc.resultCallback(True, 4)
assert self.goods == [(True, 5)]
assert self.fails == []
def testFailCallbackReCall(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc.resultCallback(False, 9)
assert self.goods == []
assert self.fails == [(False, 9)]
rc.resultCallback(False, 8)
assert self.goods == []
assert self.fails == [(False, 9)]
def testGoodCallbackReCallFail(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc.resultCallback(True, 5)
assert self.goods == [(True, 5)]
assert self.fails == []
rc.resultCallback(False, 4)
assert self.goods == [(True, 5)]
assert self.fails == []
def testFailCallbackReCallGood(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc.resultCallback(False, 9)
assert self.goods == []
assert self.fails == [(False, 9)]
rc.resultCallback(True, 8)
assert self.goods == []
assert self.fails == [(False, 9)]
def testManyGoodCallbacks(self):
self.goods = []
self.fails = []
rc = [ResultCallback(self._good, self._fail) for N in range(20)]
for num,each in enumerate(rc):
each.resultCallback(True, num)
assert self.goods == [(True, N) for N in range(20)]
assert self.fails == []
def testManyFailCallbacks(self):
self.goods = []
self.fails = []
rc = [ResultCallback(self._good, self._fail) for N in range(20)]
for num,each in enumerate(rc):
each.resultCallback(False, num)
assert self.goods == []
assert self.fails == [(False, N) for N in range(20)]
def testManyGoodAndFailCallbacks(self):
self.goods = []
self.fails = []
rc = [ResultCallback(self._good, self._fail) for N in range(20)]
for num,each in enumerate(rc):
each.resultCallback(0 == num % 3, num)
assert self.goods == [(True, N) for N in range(20) if N % 3 == 0]
assert self.fails == [(False, N) for N in range(20) if N % 3]
def testChainedGoodCallbacks(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc2 = ResultCallback(self._good, self._fail, rc)
rc3 = ResultCallback(self._good, self._fail, rc2)
rc3.resultCallback(True, 'good')
assert self.goods == [(True, 'good')] * 3
assert self.fails == []
def testChainedFailCallbacks(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc2 = ResultCallback(self._good, self._fail, rc)
rc3 = ResultCallback(self._good, self._fail, rc2)
rc3.resultCallback(False, 'oops')
assert self.goods == []
assert self.fails == [(False, 'oops')] * 3
def testChainedGoodCallbacksDoNotDuplicate(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc2 = ResultCallback(self._good, self._fail, rc)
rc3 = ResultCallback(self._good, self._fail, rc2)
rc2.resultCallback(True, 'ok')
assert self.goods == [(True, 'ok'), (True, 'ok')]
assert self.fails == []
rc3.resultCallback(True, 'good')
assert self.goods == [(True, 'ok'), (True, 'ok'), (True, 'good')]
assert self.fails == []
def testChainedFailCallbacksDoNotDuplicate(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc2 = ResultCallback(self._good, self._fail, rc)
rc3 = ResultCallback(self._good, self._fail, rc2)
rc2.resultCallback(False, 'bad')
assert self.goods == []
assert self.fails == [(False, 'bad'), (False, 'bad')]
rc3.resultCallback(False, 'oops')
assert self.goods == []
assert self.fails == [(False, 'bad'), (False, 'bad'), (False, 'oops')]
def testChainedGoodCallbacksDoNotDuplicateOnFail(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc2 = ResultCallback(self._good, self._fail, rc)
rc3 = ResultCallback(self._good, self._fail, rc2)
rc2.resultCallback(True, 'ok')
assert self.goods == [(True, 'ok'), (True, 'ok')]
assert self.fails == []
rc3.resultCallback(False, 'bad')
assert self.goods == [(True, 'ok'), (True, 'ok')]
assert self.fails == [(False, 'bad')]
def testChainedFailCallbacksDoNotDuplicateOnGood(self):
self.goods = []
self.fails = []
rc = ResultCallback(self._good, self._fail)
rc2 = ResultCallback(self._good, self._fail, rc)
rc3 = ResultCallback(self._good, self._fail, rc2)
rc2.resultCallback(False, 'bad')
assert self.goods == []
assert self.fails == [(False, 'bad'), (False, 'bad')]
rc3.resultCallback(True, 'yippee')
assert self.goods == [(True, 'yippee')]
assert self.fails == [(False, 'bad'), (False, 'bad')]
| 2.453125
| 2
|
ml/code/svm/classifiers/HpNGram.py
|
cyberdeception/deepdig
| 5
|
12776872
|
import wekaAPI
import arffWriter
from statlib import stats
from Trace import Trace
from Packet import Packet
import math
import numpy as np
from sklearn.decomposition import PCA
import config
from Utils import Utils
from EventTrace import EventTrace
class HpNGram:
@staticmethod
def traceToInstance( eventTrace ):
instance = {}
if eventTrace.getEventCount()==0:
instance = {}
instance['class'] = 'webpage'+str(eventTrace.getId())
return instance
#print 'webpage'+str(eventTrace.getId())
numMostFreqFeatures = 50
oneGramHistogram = dict(eventTrace.getNGramHistogram(N=1, sortReverseByValue=True)) # [:numMostFreqFeatures] )
twoGramHistogram = dict(eventTrace.getNGramHistogram(N=2, sortReverseByValue=True)) # [:numMostFreqFeatures] )
threeGramHistogram = dict(eventTrace.getNGramHistogram(N=3, sortReverseByValue=True)) # [:numMostFreqFeatures] )
#fourGramHistogram = dict(eventTrace.getNGramHistogram(N=4, sortReverseByValue=True)) # [:numMostFreqFeatures] )
#fiveGramHistogram = dict(eventTrace.getNGramHistogram(N=5, sortReverseByValue=True)) # [:numMostFreqFeatures] )
#sixGramHistogram = dict(eventTrace.getNGramHistogram(N=6, sortReverseByValue=True)) # [:numMostFreqFeatures] )
#sevenGramHistogram = dict(eventTrace.getNGramHistogram(N=7, sortReverseByValue=True)) # [:numMostFreqFeatures] )
instance.update(oneGramHistogram)
instance.update(twoGramHistogram)
instance.update(threeGramHistogram)
#instance.update(fourGramHistogram)
#instance.update(fiveGramHistogram)
#instance.update(sixGramHistogram)
#instance.update(sevenGramHistogram)
# label
instance['class'] = 'webpage'+str(eventTrace.getId())
return instance
'''
@staticmethod
def classify( runID, trainingSet, testingSet ):
[trainingFile,testingFile] = arffWriter.writeArffFiles( runID, trainingSet, testingSet )
return wekaAPI.execute( trainingFile,
testingFile,
"weka.Run weka.classifiers.functions.LibSVM",
['-K','2', # RBF kernel
'-G','0.0000019073486328125', # Gamma
##May20 '-Z', # normalization 18 May 2015
'-C','131072'] ) # Cost
@staticmethod
def classify( runID, trainingSet, testingSet ):
[trainingFile,testingFile] = arffWriter.writeArffFiles( runID, trainingSet, testingSet )
return wekaAPI.execute( trainingFile, testingFile, "weka.classifiers.bayes.NaiveBayes", ['-K'] )
@staticmethod
def classify( runID, trainingSet, testingSet ):
[trainingFile,testingFile] = arffWriter.writeArffFiles( runID, trainingSet, testingSet )
return wekaAPI.execute( trainingFile,
testingFile,
"weka.classifiers.trees.RandomForest",
['-I','10', #
'-K','0', #
'-S','1'] ) #
'''
@staticmethod
def classify( runID, trainingSet, testingSet ):
[trainingFile,testingFile] = arffWriter.writeArffFiles( runID, trainingSet, testingSet )
if config.NUM_MONITORED_SITES != -1: #no need to classify as this is for generating openworld datasets. See the line above (arffWriter)
[accuracy,debugInfo] = ['NA', []]
return [accuracy,debugInfo]
if config.n_components_PCA != 0:
[trainingFile,testingFile] = Utils.calcPCA2([trainingFile,testingFile])
if config.n_components_LDA != 0:
[trainingFile,testingFile] = Utils.calcLDA4([trainingFile,testingFile])
if config.n_components_QDA != 0:
[trainingFile,testingFile] = Utils.calcQDA([trainingFile,testingFile])
if config.lasso != 0:
#[trainingFile,testingFile] = Utils.calcLasso3([trainingFile,testingFile])
#[trainingFile,testingFile] = Utils.calcLogisticRegression([trainingFile,testingFile])
Utils.calcLogisticRegression([trainingFile,testingFile])
#Utils.plotDensity([trainingFile,testingFile])
#Utils.plot([trainingFile,testingFile])
if config.NUM_FEATURES_RF != 0:
[trainingFile,testingFile] = Utils.calcTreeBaseRF([trainingFile,testingFile], config.NUM_FEATURES_RF)
if config.OC_SVM == 0: # multi-class svm
if config.CROSS_VALIDATION == 0:
return wekaAPI.execute( trainingFile,
testingFile,
"weka.Run weka.classifiers.functions.LibSVM",
['-K','2', # RBF kernel
'-G','0.0000019073486328125', # Gamma
##May20 '-Z', # normalization 18 May 2015
'-C','131072', # Cost
'-B'] ) # confidence
else:
file = Utils.joinTrainingTestingFiles(trainingFile, testingFile) # join and shuffle
return wekaAPI.executeCrossValidation( file,
"weka.Run weka.classifiers.functions.LibSVM",
['-x',str(config.CROSS_VALIDATION), # number of folds
'-K','2', # RBF kernel
'-G','0.0000019073486328125', # Gamma
##May20 '-Z', # normalization 18 May 2015
'-C','131072', # Cost
'-B'] ) # confidence
else: # one-class svm
if config.CROSS_VALIDATION == 0:
print str(config.SVM_KERNEL)
print str(config.OC_SVM_Nu)
return wekaAPI.executeOneClassSVM( trainingFile,
testingFile,
"weka.Run weka.classifiers.functions.LibSVM",
['-K',str(config.SVM_KERNEL),
#'-K','0', # kernel
#'-G','0.0000019073486328125', # Gamma
##May20 '-Z', # normalization 18 May 2015
#'-C','131072', # Cost
#'-N','0.01', # nu
'-N',str(config.OC_SVM_Nu), # nu
'-S','2'])#, # one-class svm
#'-B'] ) # confidence
else:
file = Utils.joinTrainingTestingFiles(trainingFile, testingFile) # join and shuffle
return wekaAPI.executeCrossValidation( file,
"weka.Run weka.classifiers.functions.LibSVM",
['-x',str(config.CROSS_VALIDATION), # number of folds
'-K','2', # RBF kernel
'-G','0.0000019073486328125', # Gamma
##May20 '-Z', # normalization 18 May 2015
'-C','131072', # Cost
'-B'] ) # confidence
'''
#one class svm
if config.CROSS_VALIDATION == 0:
return wekaAPI.executeOneClassSVM( trainingFile,
testingFile,
"weka.Run weka.classifiers.functions.LibSVM",
['-K','2', # RBF kernel
'-G','0.0000019073486328125', # Gamma
##May20 '-Z', # normalization 18 May 2015
'-C','131072', # Cost
#'-N','0.2', # nu, def: 0.5
'-S','2'])#, # one-class svm
#'-B'] ) # confidence
else:
file = Utils.joinTrainingTestingFiles(trainingFile, testingFile) # join and shuffle
return wekaAPI.executeCrossValidation( file,
"weka.Run weka.classifiers.functions.LibSVM",
['-x',str(config.CROSS_VALIDATION), # number of folds
'-K','2', # RBF kernel
'-G','0.0000019073486328125', # Gamma
##May20 '-Z', # normalization 18 May 2015
'-C','131072', # Cost
'-B'] ) # confidence
'''
'''
@staticmethod
def classify(runID, trainingSet, testingSet):
print 'DT'
[trainingFile, testingFile] = arffWriter.writeArffFiles(runID, trainingSet, testingSet)
return wekaAPI.execute(trainingFile,
testingFile,
"weka.classifiers.trees.J48",
['-C', '0.25',
'-M', '2'])
@staticmethod
def classify( runID, trainingSet, testingSet ):
[trainingFile,testingFile] = arffWriter.writeArffFiles( runID, trainingSet, testingSet )
if config.n_components_PCA != 0:
[trainingFile,testingFile] = Utils.calcPCA2([trainingFile,testingFile])
if config.n_components_LDA != 0:
[trainingFile,testingFile] = Utils.calcLDA4([trainingFile,testingFile])
if config.n_components_QDA != 0:
[trainingFile,testingFile] = Utils.calcQDA([trainingFile,testingFile])
return wekaAPI.execute( trainingFile,
testingFile,
"weka.Run weka.classifiers.functions.LibSVM",
[#'-K','0', # Linear kernel
'-K','2', # RBF kernel
#'-G','0.0000019073486328125', # Gamma
'-G','0.000030518',
##May20 '-Z', # normalization 18 May 2015
#'-C','131072',
'-C','8'] ) # Cost
@staticmethod
def classify( runID, trainingSet, testingSet ):
[trainingFile,testingFile] = arffWriter.writeArffFiles( runID, trainingSet, testingSet )
if config.n_components_PCA != 0:
[trainingFile,testingFile] = Utils.calcPCA2([trainingFile,testingFile])
if config.n_components_LDA != 0:
[trainingFile,testingFile] = Utils.calcLDA6([trainingFile,testingFile])
if config.n_components_QDA != 0:
[trainingFile,testingFile] = Utils.calcQDA([trainingFile,testingFile])
return wekaAPI.execute( trainingFile, testingFile, "weka.classifiers.bayes.NaiveBayes", ['-K'] )
'''
| 2.265625
| 2
|
__main__.py
|
aflansburg/rchtmlreader
| 0
|
12776873
|
import sys
from cli_augments import arg_parser
from htmlreader import read_page
purgeFiles = False
newItem = False
weight = ''
upc = ''
video_link = None
# parse arguments
processedArgs = arg_parser(sys.argv)
if type(processedArgs) == str:
url = processedArgs
read_page(url, False)
elif type(processedArgs) == dict:
read_page(processedArgs, False)
elif type(processedArgs) == list:
for url_arg in processedArgs:
read_page(url_arg, True)
else:
print('\nNo valid URL was supplied. Program will now terminate.')
exit(0)
| 3.109375
| 3
|
webviz_core_components/wrapped_components/label.py
|
rubenthoms/webviz-core-components
| 6
|
12776874
|
from typing import Any
from dash import html
class Label(html.Label):
"""Returns a styled dcc.Label"""
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.className = "webviz-label"
| 2.625
| 3
|
verification/application/services/slack_operation.py
|
pratik-vii/snet-marketplace-service
| 0
|
12776875
|
<gh_stars>0
import json
import requests
from common.exceptions import BadRequestException
from common.logger import get_logger
from common.utils import validate_signature
from verification.application.services.verification_manager import individual_repository, VerificationManager
from verification.config import ALLOWED_SLACK_USER, SIGNING_SECRET, ALLOWED_SLACK_CHANNEL_ID, \
SLACK_APPROVAL_OAUTH_ACCESS_TOKEN, MAX_INDIVIDUAL_SLACK_LISTING, SLACK_APPROVAL_CHANNEL_URL
from verification.constants import IndividualVerificationStatus, OPEN_SLACK_VIEW_URL
from verification.exceptions import InvalidSlackUserException, InvalidSlackChannelException, \
InvalidSlackSignatureException
logger = get_logger(__name__)
class SlackOperation:
def __init__(self, username, channel_id):
self._username = username
self._channel_id = channel_id
@staticmethod
def generate_slack_signature_message(request_timestamp, event_body):
message = f"v0:{request_timestamp}:{event_body}"
return message
@staticmethod
def validate_slack_signature(signature, message):
return validate_signature(signature=signature, message=message, key=SIGNING_SECRET,
opt_params={"slack_signature_prefix": "v0="})
def validate_slack_user(self):
if self._username in ALLOWED_SLACK_USER:
return True
return False
def validate_slack_channel_id(self):
if self._channel_id in ALLOWED_SLACK_CHANNEL_ID:
return True
return False
def validate_slack_request(self, headers, payload_raw, ignore=False):
if not ignore:
if not self.validate_slack_channel_id():
raise InvalidSlackChannelException()
if not self.validate_slack_user():
raise InvalidSlackUserException()
slack_signature_message = self.generate_slack_signature_message(
request_timestamp=headers["X-Slack-Request-Timestamp"], event_body=payload_raw)
if not self.validate_slack_signature(
signature=headers["X-Slack-Signature"], message=slack_signature_message):
raise InvalidSlackSignatureException()
def get_pending_individual_verification(self):
individual_verification_list = VerificationManager().get_pending_individual_verification(limit=MAX_INDIVIDUAL_SLACK_LISTING)
slack_blocks = self.generate_slack_listing_blocks(individual_verification_list)
slack_payload = {"blocks": slack_blocks}
response = requests.post(url=SLACK_APPROVAL_CHANNEL_URL, data=json.dumps(slack_payload))
logger.info(f"{response.status_code} | {response.text}")
def process_interaction(self, payload):
data = {}
if payload["type"] == "block_actions":
for action in payload["actions"]:
if "button" == action.get("type"):
data = json.loads(action.get("value", {}))
if not data:
raise BadRequestException()
individual_username = data["username"]
self.create_and_send_view_individual_modal(individual_username, payload["trigger_id"])
elif payload["type"] == "view_submission":
individual_username = payload["view"]["blocks"][0]["fields"][1]["text"]
comment = payload["view"]["state"]["values"]["review_comment"]["comment"]["value"]
review_request_state = \
payload["view"]["state"]["values"]["approval_state"]["selection"]["selected_option"]["value"]
self.process_approval_comment(review_request_state, comment, individual_username)
def create_and_send_view_individual_modal(self, username, trigger_id):
verification = individual_repository.get_verification(username=username)
if verification is None:
raise Exception(f"No verification found with username: {username}")
comments = verification.comment_dict_list()
comment = "No comment"
comment_by = "-"
if len(comments) > 0:
comment = comments[0]["comment"]
comment_by = comments[0]["created_by"]
view = self.generate_view_individual_modal(username, comment, comment_by)
slack_payload = {
"trigger_id": trigger_id,
"view": view
}
headers = {"Authorization": SLACK_APPROVAL_OAUTH_ACCESS_TOKEN, "content-type": "application/json"}
response = requests.post(url=OPEN_SLACK_VIEW_URL, data=json.dumps(slack_payload), headers=headers)
logger.info(f"{response.status_code} | {response.text}")
def process_approval_comment(self, state, comment, individual_username):
verification = individual_repository.get_verification(username=individual_username)
if verification.status in [IndividualVerificationStatus.PENDING.value,
IndividualVerificationStatus.REJECTED.value,
IndividualVerificationStatus.CHANGE_REQUESTED.value]:
VerificationManager().callback(
json.dumps({"verificationStatus": state, "comment": comment, "reviewed_by": self._username}),
entity_id=individual_username)
else:
logger.info("Approval type is not valid")
def generate_view_individual_modal(self, individual_username, comment, comment_by):
view = {
"type": "modal",
"title": {
"type": "plain_text",
"text": "Individual for Approval",
"emoji": True
},
"submit": {
"type": "plain_text",
"text": "Submit",
"emoji": True
},
"close": {
"type": "plain_text",
"text": "Cancel",
"emoji": True
}
}
info_display_block = {
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": f"*Username:*"
},
{
"type": "plain_text",
"text": f"{individual_username}"
},
]
}
approver_comment_block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*Comments*\n{comment}\n *Comment by*: {comment_by}"
}
}
divider_block = {
'type': 'divider'
}
select_approval_state_block = {
"type": "input",
"block_id": "approval_state",
"element": {
"type": "radio_buttons",
"action_id": "selection",
"initial_option": {
"text": {
"type": "plain_text",
"text": "Request Change"
},
"value": IndividualVerificationStatus.CHANGE_REQUESTED.value,
"description": {
"type": "plain_text",
"text": "Request changes."
}
},
"options": [
{
"text": {
"type": "plain_text",
"text": "Approve"
},
"value": IndividualVerificationStatus.APPROVED.value,
"description": {
"type": "plain_text",
"text": "Allow user to publish service."
}
},
{
"text": {
"type": "plain_text",
"text": "Reject"
},
"value": IndividualVerificationStatus.REJECTED.value,
"description": {
"type": "plain_text",
"text": "Reject user request."
}
},
{
"text": {
"type": "plain_text",
"text": "Request Change"
},
"value": IndividualVerificationStatus.CHANGE_REQUESTED.value,
"description": {
"type": "plain_text",
"text": "Request changes."
}
}
]
},
"label": {
"type": "plain_text",
"text": "*Approve / Reject / Request Change*"
}
}
comment_block = {
"type": "input",
"block_id": "review_comment",
"optional": False,
"element": {
"type": "plain_text_input",
"action_id": "comment",
"multiline": True
},
"label": {
"type": "plain_text",
"text": "Comment",
"emoji": True
},
"hint": {
"type": "plain_text",
"text": "* Comment is mandatory field."
}
}
view["blocks"] = [info_display_block, divider_block, approver_comment_block,
select_approval_state_block, comment_block]
return view
def generate_slack_listing_blocks(self, verifications):
title_block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Individual Verification Requests*"
}
}
listing_slack_blocks = [title_block]
for verification in verifications:
individual_username = verification["username"]
mrkdwn_block = {
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": f"*Username:* {individual_username}"
}
]
}
review_button_block = {
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": "Review"
},
"style": "primary",
"value": json.dumps({
"username": individual_username
})
}
]
}
divider_block = {
"type": "divider"
}
listing_slack_blocks.extend([mrkdwn_block, review_button_block, divider_block])
return listing_slack_blocks
| 2.171875
| 2
|
pykeyvi/src/converters/__init__.py
|
remusao/keyvi
| 147
|
12776876
|
from .pykeyvi_autowrap_conversion_providers import *
from autowrap.ConversionProvider import special_converters
def register_converters():
special_converters.append(MatchIteratorPairConverter())
| 1.390625
| 1
|
ice/icalendar/doctype/caldav_account/test_caldav_account.py
|
canlann/ice
| 1
|
12776877
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, IT-Geräte und IT-Lösungen wie Server, Rechner, Netzwerke und E-Mailserver sowie auch Backups, and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestCalDavAccount(unittest.TestCase):
pass
| 1.367188
| 1
|
usim/_basics/_resource_level.py
|
AndreiBarsan/usim
| 10
|
12776878
|
from abc import abstractmethod
from weakref import WeakValueDictionary
from typing import Iterable, Tuple, Type, Generic, TypeVar
T = TypeVar('T')
class ResourceLevels(Generic[T]):
"""
Common class for named resource levels
Representation for the levels of multiple named resources. Every set of resources,
such as :py:class:`usim.Resources` or :py:class:`usim.Capacities`, specializes a
:py:class:`~.ResourceLevels` subclass with one attribute for each named resource.
For example, ``Resources(a=3, b=4)`` uses a :py:class:`~.ResourceLevels` with
attributes ``a`` and ``b``.
.. code:: python3
from usim import Resources
resources = Resources(a=3, b=4)
print(resources.levels.a) # 3
print(resources.levels.b) # 4
print(resources.levels.c) # raises AttributeError
:py:class:`~.ResourceLevels` subtypes allow no additional attributes other than
their initial resources, but their values may be changed.
Instantiating a subtype requires resource levels to be specified by keyword;
missing resource are set to zero.
Each resource always uses the same :py:class:`~.ResourceLevels` subtype.
Binary operators for comparisons and arithmetic can be applied for
instances of the same subtype.
.. describe:: levels_a + levels_b
levels_a - levels_b
Elementwise addition/subtraction of values.
.. describe:: levels_a > levels_b
levels_a >= levels_b
levels_a <= levels_b
levels_a < levels_b
Strict elementwise comparison of values.
:py:data:`True` if the comparison is satisfied by each element pair,
:py:data:`False` otherwise.
.. describe:: levels_a == levels_b
Total elementwise equality of values.
:py:data:`True` if each element pair is equal,
:py:data:`False` otherwise.
The inverse of ``levels_a != levels_b``.
.. describe:: levels_a != levels_b
Partial elementwise unequality of values.
:py:data:`False` if each element pair is equal,
:py:data:`True` otherwise.
The inverse of ``levels_a == levels_b``.
In addition, iteration on a :py:class:`~.ResourceLevels` subtype yields
``field, value`` pairs. This is similar to :py:meth:`dict.items`.
.. describe:: for field, value in levels_a
Iterate over the current ``field, value`` pairs.
.. describe:: dict(levels_a)
Create :py:class:`dict` of ``field: value`` pairs.
"""
__slots__ = ()
__fields__: Tuple[str] = ()
#: cache of currently used specialisations to avoid
#: recreating/duplicating commonly used types
__specialisation_cache__ = WeakValueDictionary()
def __init__(self, **kwargs: T):
spec_name = f'{__specialise__.__module__}.{__specialise__.__qualname__}'
raise TypeError(
f'Base class {self.__class__.__name__} cannot be instantiated.\n'
'\n'
f'The {self.__class__.__name__} type is intended to be automatically\n'
'subclassed by resources. You should not encounter the base class during\n'
'well-behaved simulations.\n'
'\n'
f'Use {spec_name} to declare subtypes with valid resource level names.\n'
)
@abstractmethod
def __add__(self, other: 'ResourceLevels[T]') -> 'ResourceLevels[T]':
raise NotImplementedError
@abstractmethod
def __sub__(self, other: 'ResourceLevels[T]') -> 'ResourceLevels[T]':
raise NotImplementedError
@abstractmethod
def __gt__(self, other: 'ResourceLevels[T]') -> bool:
raise NotImplementedError
@abstractmethod
def __ge__(self, other: 'ResourceLevels[T]') -> bool:
raise NotImplementedError
@abstractmethod
def __le__(self, other: 'ResourceLevels[T]') -> bool:
raise NotImplementedError
@abstractmethod
def __lt__(self, other: 'ResourceLevels[T]') -> bool:
raise NotImplementedError
@abstractmethod
def __eq__(self, other: 'ResourceLevels[T]') -> bool:
raise NotImplementedError
@abstractmethod
def __ne__(self, other: 'ResourceLevels[T]') -> bool:
raise NotImplementedError
def __iter__(self):
for field in self.__fields__:
yield field, getattr(self, field)
def __repr__(self):
content = ', '.join(
f'{key}={item}' for key, item in self
)
return f'{self.__class__.__name__}({content})'
def __specialise__(zero: T, names: Iterable[str]) -> Type[ResourceLevels[T]]:
"""
Create a specialisation of :py:class:`~.ResourceLevels`
:param zero: zero value for all fields
:param names: names of fields
"""
fields = tuple(sorted(names))
try:
return ResourceLevels.__specialisation_cache__[fields]
except KeyError:
pass
class SpecialisedResourceLevels(ResourceLevels):
__slots__ = fields
__fields__ = fields
__init__ = __make_init__(zero, fields)
__add__ = __binary_op__('__add__', '+', fields)
__sub__ = __binary_op__('__sub__', '-', fields)
__gt__ = __comparison_op__('__gt__', '>', fields)
__ge__ = __comparison_op__('__ge__', '>=', fields)
__le__ = __comparison_op__('__le__', '<=', fields)
__lt__ = __comparison_op__('__le__', '<', fields)
__eq__ = __comparison_op__('__eq__', '==', fields)
def __ne__(self, other):
return not self == other
ResourceLevels.__specialisation_cache__[fields] = SpecialisedResourceLevels
return SpecialisedResourceLevels
def __make_init__(zero, names: Tuple[str, ...]):
"""Make an ``__init__`` with ``names`` as keywords and defaults of ``zero``"""
namespace = {}
args_list = f'={zero}, '.join(names)
exec(
'\n'.join(
[
f"""def __init__(self, *, {args_list}={zero}):"""
] + [
f""" self.{name} = {name}"""
for name in names
]
),
namespace
)
return namespace['__init__']
def __binary_op__(op_name: str, op_symbol: str, names: Tuple[str, ...]):
"""
Make an operator method ``op_name`` to apply ``op_symbol`` to all fields ``names``
.. code:: python3
__add__ = __make_binary_op__("__add__", '+', ('foo', 'bar'))
def __add__(self, other):
return type(self)(
foo = self.foo + other.foo,
bar = self.bar + other.bar,
)
"""
namespace = {}
exec(
'\n'.join(
[
f"""def {op_name}(self, other):""",
""" assert type(self) is type(other),\\""",
""" 'resource levels specialisations cannot be mixed'""",
""" return type(self)(""",
] + [
f""" {name} = self.{name} {op_symbol} other.{name},"""
for name in names
] + [
""" )"""
]
),
namespace
)
return namespace[op_name]
def __comparison_op__(op_name: str, op_symbol: str, names: Tuple[str]):
"""
Make a comparison method ``op_name`` to apply ``op_symbol`` to all fields ``names``
.. code:: python3
__eq__ = __make_binary_op__("__eq__", '==', ('foo', 'bar'))
def __add__(self, other):
return (
self.foo + other.foo
and self.bar + other.bar
)
"""
namespace = {}
exec(
'\n'.join(
[
f"""def {op_name}(self, other):""",
""" assert type(self) is type(other),\\""",
""" 'resource levels specialisations cannot be mixed'""",
""" return (""",
f""" self.{names[0]} {op_symbol} other.{names[0]}"""
] + [
f""" and self.{name} {op_symbol} other.{name}"""
for name in names[1:]
] + [
""" )"""
]
),
namespace
)
return namespace[op_name]
| 2.96875
| 3
|
tracopenid/compat.py
|
dcnoye/TracOpenidPluggin
| 1
|
12776879
|
<reponame>dcnoye/TracOpenidPluggin<filename>tracopenid/compat.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 <NAME>
#
from __future__ import absolute_import
from distutils.version import LooseVersion
from trac.util.html import tag
import trac
from trac.env import Environment
from trac.util.translation import _
def _logout_link(href, **kwargs):
'''Return "Logout" link
This is a simple link, as used by trac < 1.0.2.
'''
return tag.a(_('Logout'), href=href('logout', **kwargs))
def _logout_form(href, **kwargs):
'''Return "Logout" "link"
This version returns a form — styled to look like a link — as used
by trac >= 1.0.2 (for CSRF protection.) Unfortunately, this does
not render nicely in older tracs, since ``trac.css`` does not
include the proper styling for ``form.trac-logout``.
'''
fields = [tag.button(_('Logout'), name='logout', type='submit')]
for name, value in kwargs.items():
fields.append(tag.input(type='hidden', name=name, value=value))
return tag.form(tag.div(*fields),
action=href('logout'), id='logout', class_='trac-logout')
# Recent versions of trac use a logout form for csrf protection. If
# trac.css supports it, we should too.
LOGOUT_REQUIRES_POST = LooseVersion(trac.__version__) >= '1.0.2'
logout_link = _logout_form if LOGOUT_REQUIRES_POST else _logout_link
def is_component_enabled(env, cls):
""" Determine whether a trac component is enabled.
"""
# We would like to use env.is_enabled(cls) to do this,
# however, trac 0.11 does not have ComponentManager.is_enabled().
# So instead, rely on ComponentManager.__getitem__(), which does
# have the same logic in it.
return env[cls] is not None
def _db_query_v1(env, query, params=None):
""" Excute a database query.
This is the trac >= 1.0 version.
"""
return env.db_query(query, params)
def _db_query_v0(env, query, params=None):
""" Excute a database query.
This is the trac < 1.0 version.
"""
# There is no get_read_db in trac < 0.12
get_read_db = getattr(env, 'get_read_db', env.get_db_cnx)
db = get_read_db()
cursor = db.cursor()
cursor.execute(query, params)
return cursor.fetchall()
HAS_DBAPI_1_0 = hasattr(Environment, 'db_query')
db_query = _db_query_v1 if HAS_DBAPI_1_0 else _db_query_v0
| 2.4375
| 2
|
images/bbox_cv_phy/BBoxPhysicalPairing.py
|
hongtaoh/iphyer.github.io
| 0
|
12776880
|
import numpy as np
# create array data
predict = np.array([[1,2,2,1],
[4.5,2.5,10,0.5],
[6,6,8,4],
[6.26,6.26,8.26,4.26]],np.double)
truth = np.array([[1,4,3,3],
[1.2,2.2,2.2,1.2],
[5,2,8,1],
[6.1,6.1,8.1,4.1],
[8.1,8.1,11.1,9.1]], np.double)
# get useful variables
nums_pred = len(predict)
nums_gt = len(truth)
iou_matrix = np.zeros((nums_pred,nums_gt))
# boxA 存储的是边界框的左上顶点坐标和右下顶点坐标
# boxA=[x1,y1,x2,y2]
def iou(boxA, boxB):
# 计算重合部分的上下左右4个边的值,注意最大最小函数的使用
left_max = max(boxA[0],boxB[0])
top_max = max(boxA[1],boxB[1])
right_min = min(boxA[2], boxB[2])
bottom_min = min(boxA[3], boxB[3])
# 计算重合部分的面积
inter = max(0,(right_min-left_max)) * max(0, (bottom_min-top_max)) # 宽*高
Sa = (boxA[2]-boxA[0])*(boxA[3]-boxA[1])
Sb = (boxB[2]-boxB[0])*(boxB[3]-boxB[1])
# 计算所有区域的面积并计算 iou
union = Sa+Sb-inter
iou = inter/union
return iou
def transformBBox(boxA):
# 将 BBox 从左下 + 右上 表示转换为 左上 + 右下
return [boxA[0], boxA[3], boxA[2], boxA[1]]
# get iou matrix
for i in range(nums_pred):
for j in range(nums_gt):
#print(truth[j])
iou_matrix[i][j] = iou(transformBBox(predict[i]), transformBBox(truth[j]))
print(iou_matrix)
res = []
IOU_theta = 0.4
while np.any(iou_matrix > IOU_theta):
ind = np.argmax(iou_matrix)
ind_col = ind % nums_gt
ind_row = (ind - ind_col) // nums_gt
print("row = %d, col = %d"%(ind_row, ind_col))
# store results for more analysis
res.append([predict[ind_row], truth[ind_col]])
# set the correspoding row and col to zero
# exclude those already paired from future comparsion
iou_matrix[ind_row][:] = 0
# set col to 0
for ii in range(nums_pred):
iou_matrix[ii][ind_col] = 0
print(iou_matrix)
print(res)
| 2.46875
| 2
|
scripts/synth/sample_kb.py
|
issca/inferbeddings
| 33
|
12776881
|
from kb import KB, TRAIN_LABEL, DEV_LABEL, TEST_LABEL
import random
import numpy as np
class SampleKB:
def __init__(self, num_relations, num_entities,
arities=[0.0, 1.0, 0.0],
fb_densities=[0.0, 0.0, 0.0],
arg_densities=[0., 0.1, 0.0],
fact_prob=0.2,
num_symm=2,
num_impl=[0, 2, 0],
num_impl_inv=2,
num_impl_conj=[0, 2, 0],
num_trans_single=2,
num_trans_diff=2,
seed=0,
position_dependent_args=False,
position_densities=[0., 0.5, 0.0]):
"""
:param num_relations:
:param num_entities: number of distinct entities to generate
:param arities: fraction of arities
:param arg_densities: fraction of entity combinations that are observed
:param fact_prob:
:param num_inv: number of 'inv' formulae R(X0, X1) :- R(X1, X0)
:param num_impl:
:param num_impl_conj:
:param num_trans:
:param negated_head_prob:
:param seed:
:return:
"""
random.seed(seed)
self.kb = KB(seed=seed)
num_relations_per_arity = [int(x * num_relations) for x in arities]
entities = list(map(lambda x: "e" + str(x), range(1, num_entities+1)))
entities_arg1 = []
entities_arg2 = []
entities_arg3 = []
if position_dependent_args:
arg1_boundary = int(len(entities)*position_densities[0])
arg2_boundary = arg1_boundary + int(len(entities)*position_densities[1])
entities_arg1 = entities[0:arg1_boundary]
entities_arg2 = entities[arg1_boundary:arg2_boundary]
entities_arg3 = entities[arg2_boundary:]
else:
entities_arg1 = entities
entities_arg2 = entities
entities_arg3 = entities
pairs = [(x, y) for x in entities_arg1
for y in entities_arg2 if not x == y]
triples = [(x, y, z) for x in entities_arg1
for y in entities_arg2 for z in entities_arg3
if not x == y and not y == z and not z == x]
num_pair_samples = min(len(pairs), int(len(entities_arg1) *
len(entities_arg2) *
arg_densities[1]))
num_triple_samples = min(len(triples), int(len(entities_arg1) *
len(entities_arg2) *
len(entities_arg3) *
arg_densities[2]))
entities_per_arity = {
1: entities_arg1,
2: random.sample(pairs, num_pair_samples),
3: random.sample(triples, num_triple_samples)
}
relations_per_arity = {}
for arity in range(1, len(num_relations_per_arity) + 1):
for i in range(1, num_relations_per_arity[arity - 1] + 1):
fb_prefix = ""
if fb_densities[arity-1] > random.uniform(0, 1.0):
fb_prefix = "REL$"
if arity == 1:
rel = fb_prefix+"u"
elif arity == 2:
rel = fb_prefix+"b"
else:
rel = fb_prefix+"t"
rel += str(i)
if not arity in relations_per_arity:
relations_per_arity[arity] = list()
relations_per_arity[arity].append(rel)
for args in random.sample(entities_per_arity[arity],
int(len(entities_per_arity[arity]) * fact_prob)):
self.kb.add_train(rel, args)
inverse = []
# sample symmetric relations r(X,Y) => r(Y,X)
if 2 in relations_per_arity:
symm = random.sample([(x, x) for x in relations_per_arity[2]], num_symm)
inverse += symm
# sampling implication, reversed: r1(X,Y) => r2(Y,X)
if 2 in relations_per_arity:
inverse += random.sample([(x, y) for x in relations_per_arity[2]
for y in relations_per_arity[2]
if not x == y], num_impl_inv)
if len(inverse) > 0:
self.kb.add_formulae("inv", {2: inverse})
# sampling implications:
# r1(X) => r2(X)
# r1(X,Y) => r2(X,Y)
implications_per_arity = {}
for arity in range(1, len(num_relations_per_arity) + 1):
if arity in relations_per_arity:
implications_per_arity[arity] = \
random.sample([(x, y) for x in relations_per_arity[arity] for y in relations_per_arity[arity]
if not x == y], num_impl[arity - 1])
self.kb.add_formulae("impl", implications_per_arity)
# sampling implications with conjunction in body:
# r1(X,Y) ^ r2(X,Y) => r3(X,Y)
# r1(X) ^ r2(X) => r3(X)
implications_with_conjunction_per_arity = {}
for arity in range(1, len(num_relations_per_arity) + 1):
if arity in relations_per_arity and len(relations_per_arity[arity]) >= 3:
implications_with_conjunction_per_arity[arity] = \
random.sample([(x, y, z) for x in relations_per_arity[arity]
for y in relations_per_arity[arity]
for z in relations_per_arity[arity]
if not x == y and not y == z and not z == x],
num_impl_conj[arity - 1])
self.kb.add_formulae("impl_conj", implications_with_conjunction_per_arity)
# sampling transitivities:
transitivities = []
# (1) simple transitivities r(X,Y) ^ r(Y,Z) => r(X,Z)
# (2) general transitivities r1(X,Y) ^ r2(Y,Z) => r3(X,Z) (r1, r2, r3 differ)
if 2 in relations_per_arity:
if num_trans_single > 0:
transitivities += random.sample([(x, x, x)
for x in relations_per_arity[2]], num_trans_single)
if num_trans_diff > 0:
transitivities += random.sample([(x, y, z)
for x in relations_per_arity[2]
for y in relations_per_arity[2]
for z in relations_per_arity[2]
if not x == y and
not y == z and
not z == x], num_trans_diff)
if len(transitivities) > 0:
self.kb.add_formulae("trans", {2: transitivities})
# todo: sampling negation (also applies to all heads of formulae above):
# r1 => !r2
def get_kb(self):
return self.kb
if __name__=="__main__":
import sys
import argparse
import os
#fixed args
sampled_unobserved_per_true = 1 # number of false (unobserved) test facts added for each true test fact (inferred from clause)
simple_transitivities = False
seed = 846
np.random.seed(seed)
#input args
argparser = argparse.ArgumentParser('create artificial dataset (train+test) with rules (all arity 2)')
argparser.add_argument('--entities', '-E', required=True, type=int, help='number of entities')
argparser.add_argument('--predicates', '-P', required=True, type=int, help='number of predicates')
argparser.add_argument('--test-prob', type=float, default=0.5,
help='fraction of inferred facts (from formulae) to be added to test set')
argparser.add_argument('--arg-density', type=float, default=0.1,
help='fraction of all possible pairs of entities observed')
argparser.add_argument('--fact-prob', type=float, default=0.1,
help='for all observed pairs: fraction of those that occur with each relation')
argparser.add_argument('--symm', type=int, default=0,
help='number of clauses p(X0, X1) :- p(X1, X0)')
argparser.add_argument('--impl', type=int, default=0,
help='number of clauses p(X0, X1) :- q(X0, X1) (with p and q different)')
argparser.add_argument('--impl-inv', type=int, default=0,
help='number of clauses p(X0, X1) :- q(X1, X0)')
argparser.add_argument('--impl-conj', type=int, default=0,
help='number of clauses r(X0, X1) :- p(X0, X1), q(X0, X1)')
argparser.add_argument('--trans-single', type=int, default=0,
help='number of clauses r(X0, X2) :- r(X0, X1), r(X1, X2)')
argparser.add_argument('--trans-diff', type=int, default=0,
help='number of clauses r(X0, X2) :- p(X0, X1), q(X1, X2) (with p,q,r different)')
argparser.add_argument('--dir', type=str, default='../../data/synth/sampled',
help='target directory')
argparser.add_argument('--tag', type=str, default='synth',
help='experiment tag')
args = argparser.parse_args(sys.argv[1:])
cmd = ' '.join(arg for arg in sys.argv[1:])
Ne = args.entities
Nr = args.predicates
test_prob = args.test_prob
arg_density = args.arg_density
fact_prob = args.fact_prob
num_symm = args.symm
num_impl = args.impl
num_impl_inv = args.impl_inv
num_impl_conj = args.impl_conj
num_trans_single = args.trans_single
num_trans_diff = args.trans_diff
testKB = SampleKB(Nr, Ne,
arg_densities=[0, arg_density, 0],
fact_prob=fact_prob,
num_symm=num_symm,
num_impl_inv=num_impl_inv,
num_impl=[0, num_impl, 0],
num_impl_conj=[0, num_impl_conj, 0],
num_trans_single=num_trans_single,
num_trans_diff=num_trans_diff,
seed=seed
).get_kb()
N_original_facts = len(testKB.get_all_facts(of_types=TRAIN_LABEL))
# for fact in testKB.get_all_facts(of_types=TRAIN_LABEL):
# print(fact)
# for clause in testKB.get_formulae_strings():
# print(clause)
testKB.apply_formulae(test_prob=test_prob, sampled_unobserved_per_true=sampled_unobserved_per_true)
#create train / test file for inferbeddings
train_file = os.path.join(args.dir, args.tag + '_train.tsv')
valid_file = os.path.join(args.dir, args.tag + '_valid.tsv')
test_file = os.path.join(args.dir, args.tag + '_test.tsv')
clause_file = os.path.join(args.dir, args.tag + '_clauses.pl')
readme_file = os.path.join(args.dir, args.tag + '_config.txt')
msg = '#file: '+ args.tag + '_config.txt\n'
msg += '#%d original purely random train facts (without formulae)\n'%N_original_facts
train_facts = testKB.get_all_facts(of_types=(TRAIN_LABEL,))
msg +='#%d train facts (after creating rules and adding inferred facts to train set with prob %.3f)\n'%(len(train_facts), 1.-test_prob)
test_facts = testKB.get_all_facts(of_types=(TEST_LABEL,))
test_facts_T = [f for f in test_facts if f[1]]
test_facts_F = [f for f in test_facts if not f[1]]
msg += '#%d test facts (%d True, %d False)\n'%(len(test_facts), len(test_facts_T), len(test_facts_F))
print('\n' + msg)
for clause in testKB.get_formulae_for_ntp_strings():
print(clause)
with open(readme_file, 'w') as rf:
rf.write('\n#command:\npython3 %s\n'%' '.join(list(sys.argv)))
rf.write('\n#config:\n')
for k in ['tag', 'entities', 'predicates', 'test_prob', 'arg_density', 'fact_prob',
'symm', 'impl', 'impl_inv', 'impl_conj', 'trans_single', 'trans_diff',
'dir']:
rf.write('{}\t{}\n'.format(k, vars(args)[k]))
rf.write('seed\t{}\n'.format(seed))
rf.write('sampled_unobserved_per_true\t{}\n'.format(sampled_unobserved_per_true))
rf.write('simple_transitivities\t{}\n'.format(simple_transitivities))
rf.write('\n#stats:\n')
rf.write(msg)
with open(train_file, 'w') as trf:
for fact in sorted(testKB.get_all_facts(of_types=TRAIN_LABEL)):
pred, (subj, obj) = fact[0]
trf.write('{}\t{}\t{}\n'.format(subj, pred, obj))
with open(valid_file, 'w') as vaf:
#simple strategy for artificial setting: tune on train data
#but: for AUC evaluation, we need false train facts as well
# (sampled_unobserved_per_true randomly sampled unobserved ones per positive train fact
nb_pos_test = int(len(testKB.get_all_facts(of_types=TEST_LABEL))/(sampled_unobserved_per_true+1.))
train_facts_True = testKB.get_all_facts(of_types=TRAIN_LABEL)
np.random.shuffle(train_facts_True)
valid_facts_True = train_facts_True #[:nb_pos_test]
valid_facts_False = []
for (pred, (subj, obj)), truth, _ in valid_facts_True:
if truth: #should be the case
vaf.write('{}\t{}\t{}\t{}\n'.format(subj, pred, obj, {True: 1, False: 0}[truth]))
((pred_n, (subj_n, obj_n)), _, _) = testKB.sample_neg(pred, 0, 1, oracle=True)
vaf.write('{}\t{}\t{}\t{}\n'.format(subj_n, pred, obj_n, 0)) #negative fact for same relation
with open(test_file, 'w') as tef:
for fact in sorted(testKB.get_all_facts(of_types=TEST_LABEL)):
pred, (subj, obj) = fact[0]
truth = fact[1]
tef.write('{}\t{}\t{}\t{}\n'.format(subj, pred, obj, {True: 1, False: 0}[truth]))
with open(clause_file, 'w') as clf:
for clause in testKB.get_formulae_for_ntp_strings():
clf.write(clause+'\n')
| 2.3125
| 2
|
roman/romanMin.py
|
mapinis/intro-to-programming-public
| 0
|
12776882
|
<reponame>mapinis/intro-to-programming-public<gh_stars>0
userNum = int(input("Input a number: "))
out = ""
numeralArr = [(1000, "M"), (500, "D"), (100, "C"), (50, "L"), (10, "X"), (5, "V"), (1, "I"), (0, ""), (0, "")]
def convert(num, nums, iters, halfs):
global out
if num >= nums[0] - iters[0]:
out += iters[1] + nums[1]
num -= nums[0] - iters[0]
elif num < nums[0] - iters[0]:
if halfs[0]: out += halfs[2]; num -= halfs[1]
out += iters[1] * (num // iters[0] if iters[0] > 0 else 1)
num -= iters[0] * (num // iters[0] if iters[0] > 0 else 1)
elif num == nums[0]:
out += nums[1]
num -= nums[0]
return num
for x in range(0, len(numeralArr) - 2, 2):
number, numeral = numeralArr[x]
halfNumber, halfNumeral = numeralArr[x + 1]
iterNumber, iterNumeral = numeralArr[x + 2]
out += numeral * (userNum // number)
userNum -= number * (userNum // number)
userNum = convert(userNum, (number, numeral) if userNum >= halfNumber else (halfNumber, halfNumeral), (iterNumber, iterNumeral), [userNum >= halfNumber, halfNumber, halfNumeral])
print(out)
| 3.765625
| 4
|
build/lib/saes/optimizer/__init__.py
|
Johumel/SAES
| 9
|
12776883
|
from .srfit import *
from .fit_sin_spec_pll import *
from .fit_sin_spec import *
from .specr_model import *
from .sinspec_model import *
| 1.070313
| 1
|
bitchstorm.py
|
Ollyd1gger/crawler
| 0
|
12776884
|
<reponame>Ollyd1gger/crawler<gh_stars>0
# -*- coding: utf-8 -*-
import urlparse
import scrapy
from items import BitchstormItem
BASE_URL="http://www.harikadiziler.com/"
class BitchStormXPath(scrapy.Spider):
name = 'bitchstorm'
start_urls = [
'http://www.harikadiziler.com/yabanci-dizi-bolumleri/',
]
def parse(self, response):
global a
a =response.xpath('//div[@class="bolumler"]')
global abuzer
for abuzer in a.xpath('.//a/@href').extract():
print abuzer
global ziyver
ziyver = "%s%s" %(BASE_URL,abuzer)
yield scrapy.Request(urlparse.urljoin(BASE_URL,abuzer),callback=self.parse2)
def parse2(self,response):
print "yarrroli...%s" %response.meta
if 'page' in response.meta:
page = response.meta['page']
else:
page = 1
d= response.xpath('//div[@class="video-izle"]/iframe').extract()
x = page
for line in d:
zayteam = BitchstormItem()
zibunkavayye= line
print zibunkavayye
if zibunkavayye == "":
break;
zayteam['href'] = zibunkavayye
zayteam['text'] = response.xpath("//header[@class='video-baslik']/p/i/text()").extract()
with open('log.txt', 'a') as f:
f.write('{0}\n'.format(zibunkavayye))
x+=1
nexturlpage = "%s/%s" % (ziyver, x)
print nexturlpage
yield zayteam
request = scrapy.Request(nexturlpage,callback=self.parse2)
request.meta['page']=x
yield request
| 2.671875
| 3
|
primes_test.py
|
danhje/primes
| 0
|
12776885
|
'''
@author: <NAME>
'''
import time
import numpy as np
import matplotlib.pyplot as plt
from algorithms import primes1, primes2, primes3, primes4, primes5, primes6, primes7, primes8
ubounds = range(0, 10000, 100)
num = len(ubounds)
results = []
for algorithm in (primes1, primes2, primes3, primes4, primes5, primes6, primes7, primes8):
print(f'Testing algorithm {algorithm.__name__}')
results_for_current_algorithm = []
for ubound in ubounds:
starttime = time.time()
result = algorithm(ubound)
endtime = time.time()
duration = endtime - starttime
results_for_current_algorithm.append(duration)
results.append(results_for_current_algorithm)
plt.plot(np.transpose(np.array(results)), linewidth=2)
plt.xticks(range(len(ubounds))[0::10], ubounds[0::10])
plt.xlabel('Upper bound for primes')
plt.ylabel('Time in seconds to generate primes')
plt.legend(['algorithm 1', 'algorithm 2', 'algorithm 3', 'algorithm 4',
'algorithm 5', 'algorithm 6', 'algorithm 7', 'algorithm 8'], loc=2)
plt.show()
| 3.265625
| 3
|
Discord/models.py
|
EliasEriksson/Codescord
| 0
|
12776886
|
<filename>Discord/models.py
from typing import *
from tortoise.models import Model
from tortoise import fields
import tortoise
class Servers(Model):
id = fields.IntField(pk=True)
server_id = fields.IntField()
auto_run = fields.BooleanField(default=False)
@classmethod
async def get_server(cls, server_id: int) -> "Servers":
return await cls.get(
server_id=server_id
)
@classmethod
async def create_server(cls, server_id: int, auto_run: bool = None) -> Model:
try:
server = await super(Servers, cls).get(
server_id=server_id
)
except tortoise.exceptions.DoesNotExist:
server = await super(Servers, cls).create(
server_id=server_id,
auto_run=True if auto_run else False
)
return server
class Channels(Model):
id = fields.IntField(pk=True)
server = fields.ForeignKeyField(f"models.{Servers.__name__}")
channel_id = fields.IntField()
class UserMessages(Model):
id = fields.IntField(pk=True)
server = fields.ForeignKeyField(f"models.{Servers.__name__}")
channel = fields.ForeignKeyField(f"models.{Channels.__name__}")
message_id = fields.IntField()
class ResponseMessages(Model):
id = fields.IntField(pk=True)
server = fields.ForeignKeyField(f"models.{Servers.__name__}")
channel = fields.ForeignKeyField(f"models.{Channels.__name__}")
user_message = fields.ForeignKeyField(f"models.{UserMessages.__name__}")
message_id = fields.IntField()
@classmethod
async def create_message(
cls, server_id: int, channel_id: int,
user_message_id: int, message_id: int) -> "ResponseMessages":
try:
server = await Servers.get(
server_id=server_id)
except tortoise.exceptions.DoesNotExist:
server = await Servers.create(
server_id=server_id)
await server.save()
try:
channel = await Channels.get(
server=server,
channel_id=channel_id)
except tortoise.exceptions.DoesNotExist:
channel = await Channels.create(
server=server,
channel_id=channel_id)
try:
user_message = await UserMessages.get(
server=server,
channel=channel,
message_id=user_message_id)
except tortoise.exceptions.DoesNotExist:
user_message = await UserMessages.create(
server=server,
channel=channel,
message_id=user_message_id)
response_message = await cls.create(
server=server,
channel=channel,
user_message=user_message,
message_id=message_id)
return response_message
@classmethod
async def get_message(
cls, server_id: int, channel_id: int,
user_message_id: int) -> "ResponseMessages":
server = await Servers.get(
server_id=server_id)
if not server:
server = await Servers.create(
server_id=server_id)
await server.save()
channel = await Channels.get(
server=server,
channel_id=channel_id)
if not channel:
channel = Channels.create(
server=server,
channel_id=channel_id)
user_message = await UserMessages.get(
server=server,
channel=channel,
message_id=user_message_id)
if not user_message:
user_message = await UserMessages.create(
server=server,
channel=channel,
message_id=user_message_id)
response_message = await cls.get(server=server, channel=channel, user_message=user_message)
return response_message
| 2.59375
| 3
|
Server/app/views/v2/mixed/post/post.py
|
moreal/DMS-Backend
| 27
|
12776887
|
<reponame>moreal/DMS-Backend
from flask import Blueprint, Response, abort
from flask_restful import Api
from flasgger import swag_from
from app.docs.v2.mixed.post.post import *
from app.views.v2 import BaseResource
from app.views.v2.admin.post import CATEGORY_MODEL_MAPPING
api = Api(Blueprint(__name__, __name__))
api.prefix = '/post/<category>'
@api.resource('')
class PostList(BaseResource):
@swag_from(POST_LIST_GET)
def get(self, category):
"""
게시글 목록 조회
"""
if category.upper() not in CATEGORY_MODEL_MAPPING:
abort(400)
return [{
'id': str(post.id),
'writeTime': post.write_time.strftime('%Y-%m-%d'),
'author': post.author,
'title': post.title,
'pinned': post.pinned
} for post in CATEGORY_MODEL_MAPPING[category.upper()].objects]
@api.resource('/<post_id>')
class PostItem(BaseResource):
@swag_from(POST_ITEM_GET)
def get(self, category, post_id):
"""
게시글 내용 조회
"""
if len(post_id) != 24:
return Response('', 204)
post = CATEGORY_MODEL_MAPPING[category.upper()].objects(id=post_id).first()
return {
'writeTime': post.write_time.strftime('%Y-%m-%d'),
'author': post.author,
'title': post.title,
'content': post.content,
'pinned': post.pinned
} if post else Response('', 204)
| 2.171875
| 2
|
evidently/telemetry/__init__.py
|
alex-zenml/evidently
| 2,212
|
12776888
|
<reponame>alex-zenml/evidently<filename>evidently/telemetry/__init__.py
from .sender import TelemetrySender
| 0.984375
| 1
|
experiment/resample.py
|
adapttech-ltd/SocketAAE
| 0
|
12776889
|
<reponame>adapttech-ltd/SocketAAE
import point_cloud_utils as pcu
import glob
import numpy as np
import open3d as open3d
import matplotlib.pylab as plt
import re
import os
import yaml
import sys
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset_path', '-dp', type=str, required=True,
help='path to data folder')
parser.add_argument('--save_path', '-sp',type=str, default='None',
help='path to save resampled files')
parser.add_argument('--n_points','-n', type=int, default=2048,
help='number of points to resample to')
parser.add_argument('--termination', '-t', default='.off',
help='termination to search for in folder, .off or .obj')
args = parser.parse_args()
def files_in_subdirs(top_dir, search_pattern):
for path, _, files in os.walk(top_dir):
for name in files:
full_name = os.path.join(path, name)
if full_name.endswith(search_pattern):
yield full_name
filenames = [f for f in files_in_subdirs(args.dataset_path, args.termination)]
for i, fi in enumerate(filenames):
path = os.path.split(fi)[0]
foldername = path.replace(args.dataset_path+'/','')
name = os.path.split(fi)[-1].split('.')[0]
if args.save_path=='None':
args.save_path = os.path.split(args.dataset_path)[0]+'/'+os.path.split(args.dataset_path)[1]+'_resampled'
if not os.path.exists(args.save_path): os.makedirs(args.save_path)
if os.path.split(foldername)[-1] == args.dataset_path.split('/')[-1]: #Single folder structure
destination_filename = args.save_path+'/'+name
else:
if not os.path.exists(args.save_path+'/'+foldername): os.makedirs(args.save_path+'/'+foldername)
destination_filename = args.save_path+'/'+foldername+'/'+name
if args.termination=='.off':
v, f, n = pcu.read_off(fi)
elif args.termination=='.obj':
v, f, n = pcu.read_obj(fi)
else:
print('Invalid termination')
sys.exit(1)
if len(f)!=0:
samples = pcu.sample_mesh_lloyd(v, f, args.n_points) #normals inside v, poorly saved
np.save(destination_filename+'.npy', samples)
| 2.046875
| 2
|
tests/unit/test_cab.py
|
2js855/symstore
| 0
|
12776890
|
<filename>tests/unit/test_cab.py<gh_stars>0
import mock
import importlib
import unittest
orig_import = __import__
#
# handle differences between python 2.7 and 3
#
# 'builtins' used to be named '__builtin__' in python 2.7
try:
import builtins # noqa
IMPORT_MODULE = "builtins.__import__"
except ImportError:
IMPORT_MODULE = "__builtin__.__import__"
# reload() used to be in the root module
if hasattr(importlib, "reload"):
_reload = importlib.reload
else:
_reload = reload # noqa
def no_gi_import(name, *args):
"""
Mocked import, that emulates that 'gi,*' modules do not exist
"""
if name.startswith("gi"):
raise ImportError("test")
return orig_import(name, *args)
def no_gcab_namespace(name, *args):
"""
Mock gi.require_version() to raise an ValueError to
simulate that GCab bindings are not available.
We mock importing the whole 'gi', so that this test
can be run even when the 'gi' package is not available.
"""
if name.startswith("gi"):
m = mock.Mock()
m.require_version.side_effect = ValueError
return m
return orig_import(name, *args)
class TestNoGcab(unittest.TestCase):
def tearDown(self):
# make sure we reload 'cab' module after the test,
# so the true value of 'compression supported'
# flag is restored, otherwise it will be stuck
# in 'not supported' mode for subsequent tests
import symstore.cab
_reload(symstore.cab)
@mock.patch(IMPORT_MODULE, side_effect=no_gi_import)
def test_gi_import_error(self, _):
"""
test the case when we can't import gi
"""
import symstore.cab
_reload(symstore.cab)
self.assertFalse(symstore.cab.compression_supported)
@mock.patch(IMPORT_MODULE, side_effect=no_gcab_namespace)
def test_no_gcab_namespace(self, _):
"""
test the case whan gi is available, but Gcab is not
"""
import symstore.cab
_reload(symstore.cab)
self.assertFalse(symstore.cab.compression_supported)
| 2.609375
| 3
|
calibrate.py
|
leeasar/Advanced-Lane-Lines
| 0
|
12776891
|
<reponame>leeasar/Advanced-Lane-Lines
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import pickle
# Prepare object points
# Number of inside corners in a calibration chessboard
nx = 9
ny = 6
# Accessing calibration images
cal_files = glob.glob('camera_cal/calibration*.jpg')
# Empty arrays for object points and image points
objpoints = [] # 3D point in real world space
imgpoints = [] # 2D points in image space
# Variable for object points -- will be same for each image
obj_temp = np.zeros((nx*ny, 3), np.float32)
obj_temp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
# Looping through all calibration files
for fname in cal_files:
# Looking for image points
img = mpimg.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If image points found, saving them and corresponding object points
if ret == True:
imgpoints.append(corners)
objpoints.append(obj_temp)
# Calculating calibration parameters
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# Saving the needed calibration parameters with pickle
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump(dist_pickle, open('camera_cal/calibration_pickle.p', 'wb'))
# Test
img = mpimg.imread('camera_cal/calibration5.jpg')
mpimg.imsave('camera_cal/test_calibration.jpg', cv2.undistort(img, mtx, dist, None, mtx))
| 2.671875
| 3
|
lib/es_request.py
|
chaimpeck/espp
| 0
|
12776892
|
<reponame>chaimpeck/espp
"""es_request.py"""
# https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
import requests
ES_BASE_URL = 'http://localhost:9200/'
class EsRequest:
def __init__(self, indices, types=None, query_string=None):
url = ES_BASE_URL + indices
if types:
url += '/' + types
url += '/_search?' + query_string
self.url = url
def request(self, method, data=None):
if method == "GET":
r = requests.get(self.url)
else:
r = requests.post(self.url, data=data)
return r.json()
| 2.65625
| 3
|
web_server.py
|
sdkskdks/assignment4
| 0
|
12776893
|
from flask import Flask, render_template
from flask import request
from database import Tableone
from database import db
from database import app
from selenium import webdriver
from bs4 import BeautifulSoup
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/coin', methods = ['POST'])
def coin():
if request.method == 'POST':
name = request.form['name']
url = 'https://coinmarketcap.com/currencies/'+ str(name) + "/news/"
driver = webdriver.Firefox()
driver.get(url)
page = driver.page_source
soup = BeautifulSoup(page,'html.parser')
news = []
findAllNews = []
findAllNews = soup.findAll('div', class_='sc-16r8icm-0 jKrmxw container')
for news_item in findAllNews:
if news_item.find('p', class_='sc-1eb5slv-0 svowul-3 ddtKCV') is not None:
news.append(news_item.text)
for p in news:
new_row = Tableone(name, p)
db.session.add(new_row)
db.session.commit()
return p
if __name__ == '__main__':
app.run(debug=True)
| 2.765625
| 3
|
zad13_6.py
|
kamilhabrych/python-semestr5-lista13
| 0
|
12776894
|
from graphics import *
import random
import math
max_width = 500
max_height = 500
n = int(input("Ile bokow: "))
win = GraphWin('<NAME> zadanie 6', max_width, max_height)
win.setBackground('brown')
center = (250, 250)
r = 125
for item in range(n):
start_point = Point(center[0] + r * math.cos(2 * math.pi * item / n), center[1] + r * math.sin(2 * math.pi * item / n))
if item != n - 1:
next_point = Point(center[0] + r * math.cos(2 * math.pi * (item + 1) / n), center[1] + r * math.sin(2 * math.pi * (item + 1) / n))
else:
next_point = Point(center[0] + r * math.cos(2 * math.pi * 0 / n), center[1] + r * math.sin(2 * math.pi * 0 / n))
l = Line(start_point,next_point)
print("{0} => {1}".format(start_point, next_point))
l.draw(win)
win.getMouse()
win.close()
| 3.59375
| 4
|
main.py
|
josconno/cellular
| 1
|
12776895
|
<gh_stars>1-10
import csv
import os
import os.path as path
from cellular import cellular
def do_run(csv_writer, cas_folder, i, n):
'do run i of n'
colors = ['black', 'blue', 'yellow', 'orange', 'red']
ca = cellular.TotalisticCellularAutomaton(400, colors=colors, radius=1, states=5)
base = str(ca)
ca_folder = path.join(cas_folder, base)
try:
os.mkdir(ca_folder)
except FileExistsError:
pass
j = 0
while any(rule != 0 for rule in ca.rules):
ca.reseed()
ca_class = ca.run(800)
ca.print_stats()
print()
image = ca.draw()
image.save(path.join(ca_folder, '{:02d}-{}.png'.format(j, ca)))
if ca_class is not None:
print("Detected class {}".format(ca_class))
else:
image.show()
while True:
answer = input("Class: ")
if answer in ('1', '2', '3', '4'):
break
else:
ca.resume(800)
image = ca.draw()
image.show()
ca_class = int(answer)
csv_writer.writerow([base, str(ca), j, ca_class, ca.lam, ca.lam_t, ca.entropy, ca.entropy_t, ca.entropy_p, ca.entropy_a])
ca.decimate()
j += 1
def main():
n_runs = 40
cas_folder = 'tmp/'
try:
os.mkdir(cas_folder)
except FileExistsError:
pass
csv_file = open(path.join(cas_folder, 'data.csv'), 'w', newline='')
csv_writer = csv.writer(csv_file)
for i in range(n_runs):
do_run(csv_writer, cas_folder, i, n_runs)
csv_file.close()
if __name__ == '__main__':
main()
| 2.5
| 2
|
mindarmour/adv_robustness/defenses/__init__.py
|
hboshnak/mindarmour
| 139
|
12776896
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module includes classical defense algorithms in defencing adversarial
examples and enhancing model security and trustworthy.
"""
from .adversarial_defense import AdversarialDefense
from .adversarial_defense import AdversarialDefenseWithAttacks
from .adversarial_defense import EnsembleAdversarialDefense
from .natural_adversarial_defense import NaturalAdversarialDefense
from .projected_adversarial_defense import ProjectedAdversarialDefense
__all__ = ['AdversarialDefense',
'AdversarialDefenseWithAttacks',
'NaturalAdversarialDefense',
'ProjectedAdversarialDefense',
'EnsembleAdversarialDefense']
| 1.359375
| 1
|
src/settings.py
|
AzemaBaptiste/SoundLandscape
| 1
|
12776897
|
<filename>src/settings.py
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
PROJECT_DIR = Path(__file__).resolve().parents[1]
IMAGE_STREET_PATH = os.path.join(PROJECT_DIR, "data", "raw", "streetview")
IMAGE_GPS_PATH = os.path.join(PROJECT_DIR, "data", "raw", "gps")
IMAGE_FACE_PATH = os.path.join(PROJECT_DIR, "data", "raw", "faces")
MOOD_MODEL_PATH = os.path.join(PROJECT_DIR, "models", "mood_model.h5")
LAND_MODEL_PATH = os.path.join(PROJECT_DIR, "models", "land_model.h5")
LAND_ARCHI_PATH = os.path.join(PROJECT_DIR, "models", "land_architecture.json")
FACE_MODEL_PATH = os.path.join(PROJECT_DIR, "models", "face_model.p")
ENCO_MODEL_PATH = os.path.join(PROJECT_DIR, "models", "face_encoding.p")
NAME_MODEL_PATH = os.path.join(PROJECT_DIR, "models", "face_names.p")
accuweather_key = os.environ.get("accuweathher_api_key")
google_key = os.environ.get("google_key")
spotify_id = os.environ.get("spotify_id")
spotify_pwd = os.environ.get("spotify_pwd")
RULES_PATH = os.path.join(PROJECT_DIR, "references", "settings.csv")
USER_PREFERENCES_PATH = os.path.join(PROJECT_DIR, "references", "user_preferences.json")
MOOD_PREFERENCES_PATH = os.path.join(PROJECT_DIR, "references", "mood_preferences.json")
lat_lon = os.path.join(PROJECT_DIR, "data", "raw", "settings.json")
| 1.890625
| 2
|
crawler/driver/rpc_gearman.py
|
dukov/simplecrawler
| 0
|
12776898
|
<filename>crawler/driver/rpc_gearman.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gearman
class RPCGearman(object):
def __init__(self, srvrs, client_id):
self.id = client_id
self.hosts = srvrs
self.reciever = gearman.GearmanWorker(self.hosts)
self.reciever.set_client_id(self.id)
self.sender = gearman.GearmanClient(self.hosts)
def register_task(self, task_name, func):
self.reciever.register_task(task_name, func)
def rpc_call(self, method, data, **kwargs):
return self.sender.submit_job(method, data, **kwargs)
def run(self):
self.reciever.work()
| 2.375
| 2
|
manualunload.py
|
peppelorum/Pianobaren
| 0
|
12776899
|
import rpyc
conn = rpyc.connect("localhost", 12345)
unload = rpyc.async_(conn.root.unload)
unload()
| 1.882813
| 2
|
weditor/web/handlers/page.py
|
crifan/weditor
| 1
|
12776900
|
# coding: utf-8
#
import base64
import io
import json
import os
import platform
import queue
import subprocess
import sys
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from subprocess import PIPE
from typing import Union
import six
import tornado
from logzero import logger
from PIL import Image
from tornado.concurrent import run_on_executor
from tornado.escape import json_decode
from ..device import connect_device, get_device
from ..utils import tostr
from ..version import __version__
from ..jsonrpc_client import ConsoleKernel
pathjoin = os.path.join
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header("Access-Control-Allow-Credentials",
"true") # allow cookie
self.set_header('Access-Control-Allow-Methods',
'POST, GET, PUT, DELETE, OPTIONS')
def options(self, *args):
self.set_status(204) # no body
self.finish()
def check_origin(self, origin):
""" allow cors request """
return True
class VersionHandler(BaseHandler):
def get(self):
ret = {
'name': "weditor",
'version': __version__,
}
self.write(ret)
class MainHandler(BaseHandler):
def get(self):
self.render("index.html")
gqueue = queue.Queue()
class BuildWSHandler(tornado.websocket.WebSocketHandler):
executor = ThreadPoolExecutor(max_workers=4)
def open(self):
print("Websocket opened")
self.proc = None
def check_origin(self, origin):
return True
@run_on_executor
def _run(self, device_url, code):
"""
Thanks: https://gist.github.com/mosquito/e638dded87291d313717
"""
try:
print("DEBUG: run code\n%s" % code)
env = os.environ.copy()
env['UIAUTOMATOR_DEBUG'] = 'true'
if device_url and device_url != 'default':
env['ATX_CONNECT_URL'] = tostr(device_url)
start_time = time.time()
self.proc = subprocess.Popen([sys.executable, "-u"],
env=env,
stdout=PIPE,
stderr=subprocess.STDOUT,
stdin=PIPE)
self.proc.stdin.write(code)
self.proc.stdin.close()
for line in iter(self.proc.stdout.readline, b''):
print("recv subprocess:", repr(line))
if line is None:
break
gqueue.put((self, {"buffer": line.decode('utf-8')}))
print("Wait exit")
exit_code = self.proc.wait()
duration = time.time() - start_time
ret = {
"buffer": "",
"result": {
"exitCode": exit_code,
"duration": int(duration) * 1000
}
}
gqueue.put((self, ret))
time.sleep(3) # wait until write done
except Exception:
traceback.print_exc()
@tornado.gen.coroutine
def on_message(self, message):
jdata = json.loads(message)
if self.proc is None:
code = jdata['content']
device_url = jdata.get('deviceUrl')
yield self._run(device_url, code.encode('utf-8'))
self.close()
else:
self.proc.terminate()
# on Windows, kill is alais of terminate()
if platform.system() == 'Windows':
return
yield tornado.gen.sleep(0.5)
if self.proc.poll():
return
yield tornado.gen.sleep(1.2)
if self.proc.poll():
return
print("Force to kill")
self.proc.kill()
def on_close(self):
print("Websocket closed")
class DeviceConnectHandler(BaseHandler):
def post(self):
platform = self.get_argument("platform").lower()
device_url = self.get_argument("deviceUrl")
try:
id = connect_device(platform, device_url)
except RuntimeError as e:
self.set_status(410) # 410 Gone
self.write({
"success": False,
"description": str(e),
})
except Exception as e:
logger.warning("device connect error: %s", e)
self.set_status(410) # 410 Gone
self.write({
"success": False,
"description": traceback.format_exc(),
})
else:
ret = {
"deviceId": id,
'success': True,
}
if platform == "android":
ws_addr = get_device(id).device.address.replace("http://", "ws://") # yapf: disable
ret['screenWebSocketUrl'] = ws_addr + "/minicap"
self.write(ret)
class DeviceHierarchyHandler(BaseHandler):
def get(self, device_id):
d = get_device(device_id)
self.write(d.dump_hierarchy())
class DeviceHierarchyHandlerV2(BaseHandler):
def get(self, device_id):
d = get_device(device_id)
self.write(d.dump_hierarchy2())
class WidgetPreviewHandler(BaseHandler):
def get(self, id):
self.render("widget_preview.html", id=id)
class DeviceWidgetListHandler(BaseHandler):
__store_dir = os.path.expanduser("~/.weditor/widgets")
def generate_id(self):
os.makedirs(self.__store_dir, exist_ok=True)
names = [
name for name in os.listdir(self.__store_dir)
if os.path.isdir(os.path.join(self.__store_dir, name))
]
return "%05d" % (len(names) + 1)
def get(self, widget_id: str):
data_dir = os.path.join(self.__store_dir, widget_id)
with open(pathjoin(data_dir, "hierarchy.xml"), "r",
encoding="utf-8") as f:
hierarchy = f.read()
with open(os.path.join(data_dir, "meta.json"), "rb") as f:
meta_info = json.load(f)
meta_info['hierarchy'] = hierarchy
self.write(meta_info)
def json_parse(self, source):
with open(source, "r", encoding="utf-8") as f:
return json.load(f)
def put(self, widget_id: str):
""" update widget data """
data = json_decode(self.request.body)
target_dir = os.path.join(self.__store_dir, widget_id)
with open(pathjoin(target_dir, "hierarchy.xml"), "w",
encoding="utf-8") as f:
f.write(data['hierarchy'])
# update meta
meta_path = pathjoin(target_dir, "meta.json")
meta = self.json_parse(meta_path)
meta["xpath"] = data['xpath']
with open(meta_path, "w", encoding="utf-8") as f:
f.write(json.dumps(meta, indent=4, ensure_ascii=False))
self.write({
"success": True,
"description": f"widget {widget_id} updated",
})
def post(self):
data = json_decode(self.request.body)
widget_id = self.generate_id()
target_dir = os.path.join(self.__store_dir, widget_id)
os.makedirs(target_dir, exist_ok=True)
image_fd = io.BytesIO(base64.b64decode(data['screenshot']))
im = Image.open(image_fd)
im.save(pathjoin(target_dir, "screenshot.jpg"))
lx, ly, rx, ry = bounds = data['bounds']
im.crop(bounds).save(pathjoin(target_dir, "template.jpg"))
cx, cy = (lx + rx) // 2, (ly + ry) // 2
# TODO(ssx): missing offset
# pprint(data)
widget_data = {
"resource_id": data["resourceId"],
"text": data['text'],
"description": data["description"],
"target_size": [rx - lx, ry - ly],
"package": data["package"],
"activity": data["activity"],
"class_name": data['className'],
"rect": dict(x=lx, y=ly, width=rx-lx, height=ry-ly),
"window_size": data['windowSize'],
"xpath": data['xpath'],
"target_image": {
"size": [rx - lx, ry - ly],
"url": f"http://localhost:17310/widgets/{widget_id}/template.jpg",
},
"device_image": {
"size": im.size,
"url": f"http://localhost:17310/widgets/{widget_id}/screenshot.jpg",
},
# "hierarchy": data['hierarchy'],
} # yapf: disable
with open(pathjoin(target_dir, "meta.json"), "w",
encoding="utf-8") as f:
json.dump(widget_data, f, ensure_ascii=False, indent=4)
with open(pathjoin(target_dir, "hierarchy.xml"), "w",
encoding="utf-8") as f:
f.write(data['hierarchy'])
self.write({
"success": True,
"id": widget_id,
"note": data['text'] or data['description'], # 备注
"data": widget_data,
})
class DeviceScreenshotHandler(BaseHandler):
def get(self, serial):
logger.info("Serial: %s", serial)
try:
d = get_device(serial)
buffer = io.BytesIO()
d.screenshot().convert("RGB").save(buffer, format='JPEG')
b64data = base64.b64encode(buffer.getvalue())
response = {
"type": "jpeg",
"encoding": "base64",
"data": b64data.decode('utf-8'),
}
self.write(response)
except EnvironmentError as e:
traceback.print_exc()
self.set_status(430, "Environment Error")
self.write({"description": str(e)})
except RuntimeError as e:
self.set_status(410) # Gone
self.write({"description": traceback.print_exc()})
class DeviceCodeDebugHandler(BaseHandler):
executor = ThreadPoolExecutor(max_workers=4)
@run_on_executor
def _run(self, device_id, code):
logger.debug("RUN code: %s", code)
client = ConsoleKernel.get_singleton()
output = client.call_output("run_device_code", [device_id, code])
return output
async def post(self, device_id):
start = time.time()
d = get_device(device_id)
logger.debug("deviceId: %s", device_id)
code = self.get_argument('code')
output = await self._run(device_id, code)
self.write({
"success": True,
"duration": int((time.time() - start) * 1000),
"content": output,
})
async def delete(self, device_id):
client = ConsoleKernel.get_singleton()
client.send_interrupt()
self.write({
"success": True,
})
| 2
| 2
|
recipes/Python/496825_Game_theory_payoff_matrix_solver/recipe-496825.py
|
tdiprima/code
| 2,023
|
12776901
|
''' Approximate the strategy oddments for 2 person zero-sum games of perfect information.
Applies the iterative solution method described by <NAME> in his classic
book, The Compleat Strategyst, ISBN 0-486-25101-2. See chapter 5, page 180 for details. '''
from operator import add, neg
def solve(payoff_matrix, iterations=100):
'Return the oddments (mixed strategy ratios) for a given payoff matrix'
transpose = zip(*payoff_matrix)
numrows = len(payoff_matrix)
numcols = len(transpose)
row_cum_payoff = [0] * numrows
col_cum_payoff = [0] * numcols
colpos = range(numcols)
rowpos = map(neg, xrange(numrows))
colcnt = [0] * numcols
rowcnt = [0] * numrows
active = 0
for i in xrange(iterations):
rowcnt[active] += 1
col_cum_payoff = map(add, payoff_matrix[active], col_cum_payoff)
active = min(zip(col_cum_payoff, colpos))[1]
colcnt[active] += 1
row_cum_payoff = map(add, transpose[active], row_cum_payoff)
active = -max(zip(row_cum_payoff, rowpos))[1]
value_of_game = (max(row_cum_payoff) + min(col_cum_payoff)) / 2.0 / iterations
return rowcnt, colcnt, value_of_game
###########################################
# Example solutions to two pay-off matrices
print solve([[2,3,1,4], [1,2,5,4], [2,3,4,1], [4,2,2,2]]) # Example on page 185
print solve([[4,0,2], [6,7,1]]) # Exercise 2 number 3
| 3.09375
| 3
|
covid_checker.py
|
gryffindor-guy/PERSONAL-CARE-CHATBOT
| 1
|
12776902
|
<gh_stars>1-10
import pyfiglet #Install this module using command --> pip install pyfiglet
import webbrowser
def symptoms():
print("Are you experiencing any of the following Symptoms")
print("1 : Cough")
print("2 : Fever")
print("3 : Difficulty in breathing")
print("4 : Loss of senses of smell and taste")
print("5 : None of the above")
print("__________________________________")
try:
score = int(input("Enter the number of symptoms that you are experiencing. Enter 0 if your choice is 'None of the Above' : "))
except:
print("please enter a valid number of symptoms that you are experiencing")
score = 10
return score
finally:
if (score < 0 or score > 4):
print("please enter a valid number of symptoms that you are experiencing")
score = 10
return score
def health_problems():
score = 0
print("Have you ever had any of the following?")
print("1 : Diabetes")
print("2 : Hypertension")
print("3 : Lung disease")
print("4 : Heart Diesease")
print("5 : Kidney Disorder")
print("0 : None of the above")
print("__________________________________")
try:
score = int(input("Enter the number of Health Problems that you are experiencing. Enter 0 if your choice is 'None of the Above' : "))
except:
print("please enter a valid number of Health Problems that you are experiencing")
score = 10;
return score
finally:
if (score < 0 or score > 5):
print("please enter a valid number of Health Problems that you are experiencing")
score = 10
return score
def isTravelled():
score = 0
print("Have you traveled anywhere internationally in the last 28-45 days?")
print("1 : Yes")
print("0 : No")
try:
score = int(input("Enter your Response : "))
except:
print("please enter a valid number of symptoms that you are experiencing")
score = 10;
return score
finally:
if (score < 0 or score > 1):
print("please enter a valid number of symptoms that you are experiencing")
score = 10
return score
def interaction():
score = 0
print("Which of the following apply to you?")
print("1 : I have recently interacted or lived with someone who has tested covid-19 positive ")
print("2 : I am a Healthcare Worker and I examined a covid-19 confirmed case without protective gear")
print("0 : None of the above")
print("__________________________________")
try:
score = int(input("Enter the number of above conditions that you are applicable. Enter 0 if your choice is 'None of the Above' : "))
except:
print("please enter a valid number of symptoms that you are experiencing")
score = 10
return score
finally:
if (score < 0 or score > 4):
print("please enter a valid number of symptoms that you are experiencing")
score = 10
return score
def covid_checkup_score():
total_score = 0
# Symptoms
score = symptoms()
if (score == 10): return "something went wrong"
elif (score > 2): total_score += (score + 2)
else: total_score += score
# Health Condition
score = health_problems()
if (score == 10): return "something went wrong"
elif (score > 2): total_score += (score + 2)
else: total_score += score
# Recently Travelled or not
score = isTravelled()
if (score == 10): return "something went wrong"
elif (score == 1): total_score += 3
else: total_score += score
# Interaction
score = interaction()
if (score == 10): return "something went wrong"
elif (score > 1): total_score += 5
else: total_score += score * 2
return total_score
def covid_checkup(value):
status = True
try:
if (value >= 15):
print("!!!!!!!!!")
print("If the information provided by you is accurate, It indicates that you are either unwell or at Risk.")
print("we recommand you to get tested for covid-19 near your local hospital!,Please put on a mask!")
print("CHOOSE YOUR INTEREST OF CONSULTATION")
print("1 : I want an Hospital appointment")
print("2 : I want an video appointment with a doctor")
print("3 : I will take care of it")
try:
response = int(input())
if response == 1:
print("Redirecting you........")
webbrowser.open("https://www.askapollo.com/online-doctors-consultation/", new=1)
elif response == 2:
print("Redirecting you........")
webbrowser.open("https://www.practo.com/", new=1)
except:
print("Invalid input, please choose carefully.")
status = False
elif (value < 15):
result = pyfiglet.figlet_format("HEALTH REPORT")
print(result)
print("Your infection risk is low :) :) :)")
print("May be its just climate change ,we recommend you to stay at home for a couple of days to avoid any chance of exposure to the Novel Coronavirus and get this sample test done again.")
print("Thank you! For your patience there isn't anything you should be worried about now, get this test done again when you dont feel well")
print("----------------------------------------------------------------------------------------------------")
status = False
except:
print("Invalid details are provided :(")
def check_me():
value = covid_checkup_score()
covid_checkup(value)
| 3.578125
| 4
|
echopype/model/ek60.py
|
cyrf0006/echopype
| 0
|
12776903
|
"""
echopype data model inherited from based class EchoData for EK60 data.
"""
import datetime as dt
import numpy as np
import xarray as xr
from .echo_data import EchoData
class EchoDataEK60(EchoData):
"""Class for manipulating EK60 echo data that is already converted to netCDF."""
def __init__(self, file_path=""):
EchoData.__init__(self, file_path)
self.tvg_correction_factor = 2 # range bin offset factor for calculating time-varying gain in EK60
def calibrate(self, save=False):
"""Perform echo-integration to get volume backscattering strength (Sv) from EK60 power data.
TODO: need to write a separate method for calculating TS as have been done for AZFP data.
Parameters
-----------
save : bool, optional
whether to save calibrated Sv output
default to ``False``
"""
# Open data set for Environment and Beam groups
ds_env = xr.open_dataset(self.file_path, group="Environment")
ds_beam = xr.open_dataset(self.file_path, group="Beam")
# Derived params
sample_thickness = ds_env.sound_speed_indicative * ds_beam.sample_interval / 2 # sample thickness
wavelength = ds_env.sound_speed_indicative / ds_env.frequency # wavelength
# Calc gain
CSv = 10 * np.log10((ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) ** 2 *
wavelength ** 2 * ds_env.sound_speed_indicative * ds_beam.transmit_duration_nominal *
10 ** (ds_beam.equivalent_beam_angle / 10)) /
(32 * np.pi ** 2))
# Get TVG and absorption
range_meter = ds_beam.range_bin * sample_thickness - \
self.tvg_correction_factor * sample_thickness # DataArray [frequency x range_bin]
range_meter = range_meter.where(range_meter > 0, other=0) # set all negative elements to 0
TVG = np.real(20 * np.log10(range_meter.where(range_meter != 0, other=1)))
ABS = 2 * ds_env.absorption_indicative * range_meter
# Save TVG and ABS for noise estimation use
self.sample_thickness = sample_thickness
self.TVG = TVG
self.ABS = ABS
# Calibration and echo integration
Sv = ds_beam.backscatter_r + TVG + ABS - CSv - 2 * ds_beam.sa_correction
Sv.name = 'Sv'
# Save calibrated data into the calling instance and
# ... to a separate .nc file in the same directory as the data file
self.Sv = Sv
if save:
print('%s saving calibrated Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
Sv.to_netcdf(path=self.Sv_path, mode="w")
# Close opened resources
ds_env.close()
ds_beam.close()
| 2.75
| 3
|
samples/switch-commands/aci-show-fex.py
|
carterej1989/acitoolkit
| 0
|
12776904
|
#!/usr/bin/env python
"""
This application replicates the switch CLI command 'show fex'
It largely uses raw queries to the APIC API
"""
from acitoolkit import Credentials, Session
from tabulate import tabulate
class FexCollector(object):
def __init__(self, url, login, password):
# Login to APIC
self._apic = Session(url, login, password)
if not self._apic.login().ok:
self._logged_in = False
print '%% Could not login to APIC'
else:
self._logged_in = True
def _get_query(self, query_url, error_msg):
resp = self._apic.get(query_url)
if not resp.ok:
print error_msg
print resp.text
return []
return resp.json()['imdata']
def get_fex_attributes(self, node_id, fex_id=None):
if fex_id is None:
query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
'&target-subtree-class=satmDExtCh' % node_id)
else:
query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
'&target-subtree-class=satmDExtCh&query-target-filter=eq(satmDExtCh.id, "%s")' % (node_id,
fex_id))
error_message = 'Could not collect APIC data for switch %s.' % node_id
return self._get_query(query_url, error_message)
def get_fabric_port_attributes(self, node_id, fex_id):
query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
'&target-subtree-class=satmFabP&query-target-filter='
'eq(satmFabP.extChId,"%s")' % (node_id, fex_id))
error_message = 'Could not collect APIC data for switch %s.' % node_id
return self._get_query(query_url, error_message)
def get_transceiver_attributes(self, node_id, fab_port_id):
query_url = ('/api/mo/topology/pod-1/node-%s/sys/satm/fabp-[%s].json?'
'query-target=subtree&target-subtree-class=satmRemoteFcot'
',satmRemoteFcotX2' % (node_id, fab_port_id))
error_message = 'Could not collect APIC data for switch %s.' % node_id
return self._get_query(query_url, error_message)
def get_chassis_attributes(self, node_id, fex_id):
query_url = '/api/mo/topology/pod-1/node-%s/sys/extch-%s.json' % (node_id, fex_id)
error_message = 'Could not collect APIC data for switch %s.' % node_id
return self._get_query(query_url, error_message)
def get_chassis_card_attributes(self, node_id, fex_id):
query_url = ('/api/mo/topology/pod-1/node-%s/sys/extch-%s.json?'
'query-target=subtree&target-subtree-class=eqptExtChCard' % (node_id, fex_id))
error_message = 'Could not collect APIC data for switch %s.' % node_id
return self._get_query(query_url, error_message)
def get_chassis_running_attributes(self, node_id, fex_id):
query_url = '/api/mo/topology/pod-1/node-%s/sys/extch-%s/running.json' % (node_id, fex_id)
error_message = 'Could not collect APIC data for switch %s.' % node_id
return self._get_query(query_url, error_message)
def get_chassis_cpu_attributes(self, node_id, fex_id):
query_url = ('/api/mo/topology/pod-1/node-%s/sys/extch-%s.json?'
'query-target=subtree&target-subtree-class=eqptExtChCPU' % (node_id, fex_id))
error_message = 'Could not collect APIC data for switch %s.' % node_id
return self._get_query(query_url, error_message)
def get_fex_ids(self, node_id):
fex_attrs = self.get_fex_attributes(node_id)
fex_ids = []
print fex_attrs
for fex_attr in fex_attrs:
fex_ids.append(str(fex_attr['satmDExtCh']['attributes']['id']))
return fex_ids
def get_node_ids(self, node_id):
"""
Get the list of node ids from the command line arguments.
If none, get all of the node ids
:param args: Command line arguments
:return: List of strings containing node ids
"""
if node_id is not None:
names = [node_id]
else:
names = []
query_url = ('/api/node/class/fabricNode.json?'
'query-target-filter=eq(fabricNode.role,"leaf")')
error_message = 'Could not get switch list from APIC.'
nodes = self._get_query(query_url, error_message)
for node in nodes:
names.append(str(node['fabricNode']['attributes']['id']))
return names
@staticmethod
def print_fex(fex_attr, chassis_attr, detail=False):
print 'FEX:%s Description: FEX0%s state: %s' % (fex_attr['id'],
fex_attr['id'],
fex_attr['operSt'])
print ' FEX version: %s [Switch version: %s]' % (fex_attr['ver'],
fex_attr['swVer'])
if detail:
print ' FEX Interim version:', fex_attr['intVer']
print ' Switch Interim version:', fex_attr['swIntVer']
print ' Extender Model: %s, Extender Serial: %s' % (fex_attr['model'],
fex_attr['ser'])
print ' Part No:', chassis_attr['partNum']
if detail:
print ' Card Id: %s,' % fex_attr['swCId']
print 'Mac Addr: %s,' % fex_attr['macAddr']
print 'Num Macs:', fex_attr['numMacs']
print ' Module Sw Gen:', fex_attr['swGen']
print ' [Switch Sw Gen: %s]' % fex_attr['swSwGen']
print ' pinning-mode: static Max-links: 1'
print ' Fabric port for control traffic:', fex_attr['controlFPort']
@staticmethod
def convert_to_ascii(data):
data = str(data).split(',')
resp = ''
for letter in data:
resp += str(unichr(int(letter)))
return resp
def print_fex_transceiver(self, node_id, fex_id):
if fex_id is None:
fex_ids = self.get_fex_ids(node_id)
else:
fex_ids = [fex_id]
for fex_id in fex_ids:
fab_port_num = 1
fab_ports = self.get_fabric_port_attributes(node_id, fex_id)
for fab_port in fab_ports:
fab_port_attr = fab_port['satmFabP']['attributes']
if fab_port_attr['id'].startswith('po'):
continue
print 'Fex Uplink:', fab_port_num
print ' Fabric Port :', fab_port_attr['id']
if 'fcot-present' in fab_port_attr['flags']:
transceiver_attr = self.get_transceiver_attributes(node_id, str(fab_port_attr['id']))
try:
transceiver_attr = transceiver_attr[0]['satmRemoteFcot']['attributes']
except KeyError:
raise NotImplementedError # probably satmRemoteFcotV2
print ' sfp is present'
print ' name is', self.convert_to_ascii(transceiver_attr['vendorName'])
print ' type is', transceiver_attr['typeName']
print ' part number is', self.convert_to_ascii(transceiver_attr['vendorPn'])
print ' revision is', self.convert_to_ascii(transceiver_attr['vendorRev'])
print ' serial number is', self.convert_to_ascii(transceiver_attr['vendorSn'])
print ' nominal bitrate is %s MBits/sec' % str(int(transceiver_attr['brIn100MHz']) * 100)
print ' Link length supported for 50/125mm fiber is 0 m(s)'
print ' Link length supported for 62.5/125mm fiber is 0 m(s)'
print ' Link length supported for copper is %s m' % transceiver_attr['distIn1mForCu']
print ' cisco id is', transceiver_attr['xcvrId']
print ' cisco extended id number is', transceiver_attr['xcvrExtId']
fab_port_num += 1
def print_fex_version(self, node_id, fex_id):
if fex_id is None:
fex_ids = self.get_fex_ids(node_id)
else:
fex_ids = [fex_id]
for fex_id in fex_ids:
chassis_attr = self.get_chassis_attributes(node_id, fex_id)
chassis_attr = chassis_attr[0]['eqptExtCh']['attributes']
chassis_running_attr = self.get_chassis_running_attributes(node_id, fex_id)
chassis_running_attr = chassis_running_attr[0]['firmwareExtChRunning']['attributes']
card_attr = self.get_chassis_card_attributes(node_id, fex_id)
card_attr = card_attr[0]['eqptExtChCard']['attributes']
fex_attr = self.get_fex_attributes(node_id, fex_id)
fex_attr = fex_attr[0]['satmDExtCh']['attributes']
cpu_attr = self.get_chassis_cpu_attributes(node_id, fex_id)
cpu_attr = cpu_attr[0]['eqptExtChCPU']['attributes']
print 'Software'
print ' Bootloader version: %s' % chassis_running_attr['loaderVer']
print ' System boot mode: primary'
print ' System image version: %s [build %s]' % (fex_attr['ver'], fex_attr['intVer'])
print '\nHardware'
print ' Module: %s' % card_attr['descr']
print ' CPU: %s' % cpu_attr['model']
print ' Serial number: %s' % card_attr['modSerial']
print ' Bootflash: locked'
# TODO: Finish - need to add timestamping
def show_fex(self, node=None, fex_id=None, detail=False, transceiver=False, version=False):
"""
Show fex
:param fex_id: String containing the specific FEX id. If none, all FEXs are used
:param detail: Boolean indicating whether a detailed report should be given.
:param transceiver: Boolean indicating whether a transceiver report should be given.
:param version: Boolean indicating whether a version report should be given.
:return: None
"""
for node_id in self.get_node_ids(node):
if fex_id is None:
if not (detail or transceiver or version):
# Show fex
data = []
for fex in self.get_fex_attributes(node_id):
fex_attr = fex['satmDExtCh']['attributes']
data.append((int(fex_attr['id']),
'FEX0' + str(fex_attr['id']),
fex_attr['operSt'],
fex_attr['model'],
fex_attr['ser']))
data.sort(key=lambda tup: tup[0])
if len(data):
print 'Switch:', node_id
print tabulate(data, headers=['Number', 'Description', 'State', 'Model', 'Serial'])
print '\n'
elif detail:
# Show fex detail
fex_ids = self.get_fex_ids(node_id)
for fex_id in fex_ids:
self.print_show_fex(node_id, fex_id, detailed=True)
elif transceiver:
self.print_fex_transceiver(node_id, None)
elif detail:
# Show fex <fex_id> detail
self.print_show_fex(node_id, fex_id, detailed=True)
elif transceiver:
# Show fex <fex_id> transceiver
self.print_fex_transceiver(node_id, fex_id)
elif version:
# Show fex <fex_id> version
self.print_fex_version(node_id, fex_id)
else:
# Show fex <fex_id>
self.print_show_fex(node_id, fex_id)
def print_show_fex(self, node_id, fex_id, detailed=False):
for fex in self.get_fex_attributes(node_id, fex_id):
fex_attr = fex['satmDExtCh']['attributes']
for chassis in self.get_chassis_attributes(node_id, fex_attr['id']):
chassis_attr = chassis['eqptExtCh']['attributes']
self.print_fex(fex_attr, chassis_attr)
query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
'&target-subtree-class=satmFabP&query-target-filter=eq(satmFabP.extChId,"%s")' % (
node_id,
fex_attr['id']))
resp = self._apic.get(query_url)
if not resp.ok:
print 'Could not collect APIC data for switch %s.' % node_id
print resp.text
return
if int(resp.json()['totalCount']) > 0:
print ' Fabric interface state:'
for interface in resp.json()['imdata']:
intf_attr = interface['satmFabP']['attributes']
print ' %15s - Interface %4s. State: %s' % (intf_attr['id'],
intf_attr['operSt'],
intf_attr['fsmSt'])
if detailed:
query_url = ('/api/mo/topology/pod-1/node-%s/sys/satm/fabp-[%s].json?query-target=subtree'
'&target-subtree-class=satmHostP' % (node_id, intf_attr['id']))
resp = self._apic.get(query_url)
if not resp.ok:
print 'Could not collect APIC data for switch %s.' % node_id
print resp.text
return
if int(resp.json()['totalCount']) > 0:
data = []
for port in resp.json()['imdata']:
port_attr = port['satmHostP']['attributes']
data.append((port_attr['id'], port_attr['operSt'], port_attr['fabricPort']))
data.sort(key=lambda tup: tup[0])
print tabulate(data, headers=['Fex Port', 'State', 'Fabric Port'])
def main():
"""
Main common routine for show fex description
:return: None
"""
# Set up the command line options
creds = Credentials(['apic', 'nosnapshotfiles'],
description=("This application replicates the switch "
"CLI command 'show interface'"))
creds.add_argument('-s', '--switch',
type=str,
default=None,
help='Specify a particular switch id, e.g. "101"')
creds.add_argument('-f', '--fex',
type=str,
default=None,
help='Specify a particular FEX id, e.g. "101"')
group = creds.add_mutually_exclusive_group()
group.add_argument('-d', '--detail',
action='store_true',
help='Provide a detailed report (equivalent to "show fex detail"')
group.add_argument('-t', '--transceiver',
action='store_true',
help='Provide a transceiver report (equivalent to "show fex transceiver"')
group.add_argument('-v', '--version',
action='store_true',
help='Provide a version report (equivalent to "show fex version"')
args = creds.get()
fex_collector = FexCollector(args.url, args.login, args.password)
# Show interface description
fex_collector.show_fex(args.switch, args.fex, args.detail, args.transceiver, args.version)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 2.34375
| 2
|
SloppyCell/lmopt.py
|
bcdaniels/SloppyCell
| 2
|
12776905
|
from __future__ import nested_scopes
# Levenberg Marquardt minimization routines
"""
fmin_lm : standard Levenberg Marquardt
fmin_lmNoJ : Levenberg Marquardt using a cost function instead of
a residual function and a gradient/J^tJ pair instead
of the derivative of the residual function. Useful
in problems where the number of residuals is very large.
fmin_lm_scale : scale invariant Levenberg Marquardt
"""
import scipy
from scipy import absolute, sqrt, asarray, zeros, mat, transpose, ones, dot, sum
import scipy.linalg
import copy
import SloppyCell.Utility
save = SloppyCell.Utility.save # module that provides pickled save
import SloppyCell.KeyedList_mod as KeyedList_mod
KeyedList = KeyedList_mod.KeyedList
abs = absolute
_epsilon = sqrt(scipy.finfo(scipy.float_).eps)
def approx_fprime(xk,f,epsilon,*args):
f0 = apply(f,(xk,)+args)
grad = scipy.zeros((len(xk),),scipy.float_)
ei = scipy.zeros((len(xk),),scipy.float_)
for k in range(len(xk)):
ei[k] = epsilon
grad[k] = (apply(f,(xk+ei,)+args) - f0)/epsilon
ei[k] = 0.0
return grad
def approx_fprime1(xk,f,epsilon,*args):
""" centred difference formula to approximate fprime """
#f0 = apply(f,(xk,)+args)
grad = scipy.zeros((len(xk),),scipy.float_)
ei = scipy.zeros((len(xk),),scipy.float_)
epsilon = (epsilon**2.0)**(1.0/3.0) # should be macheps^(1/3)
for k in range(len(xk)):
ei[k] = epsilon
grad[k] = (apply(f,(xk+ei,)+args) - apply(f,(xk-ei,)+args))/(2.0*epsilon)
ei[k] = 0.0
return grad
def approx_fprime2(xk,f,epsilon,*args):
""" centred difference formula to approximate the jacobian, given the residual
function """
#f0 = apply(f,(xk,)+args)
grad = scipy.zeros((len(xk),),scipy.float_)
ei = scipy.zeros((len(xk),),scipy.float_)
epsilon = (epsilon**2.0)**(1.0/3.0) # should be macheps^(1/3)
ei[0] = epsilon
resminus = asarray(apply(f,(xk-ei,)+args))
resplus = asarray(apply(f,(xk+ei,)+args))
m = len(resminus)
jac = scipy.zeros((m,len(xk)),scipy.float_)
jac[:,0] = (resplus-resminus)/(2.0*epsilon)
ei[0] = 0.0
for k in range(1,len(xk)):
ei[k] = epsilon
resplus = asarray(apply(f,(xk+ei,)+args))
resminus = asarray(apply(f,(xk-ei,)+args))
jac[:,k] = (resplus-resminus)/(2.0*epsilon)
#jac[k,:] = mat(transpose(mat(apply(f,(xk+ei,)+args) - apply(f,(xk-ei,)+args))))/(2.0*epsilon)
ei[k] = 0.0
return jac
def check_grad(func, grad, x0, *args):
approx_grad = approx_fprime(x0,func,_epsilon,*args)
print("Finite difference gradient ", approx_grad)
analytic_grad = grad(x0,*args)
print("Analytic gradient ", analytic_grad)
differencenorm = sqrt(sum(approx_grad-analytic_grad)**2)
print("Norm of difference is ", differencenorm)
return differencenorm
def approx_fhess_p(x0,p,fprime,epsilon,*args):
f2 = apply(fprime,(x0+epsilon*p,)+args)
f1 = apply(fprime,(x0,)+args)
return (f2 - f1)/epsilon
def safe_res(f,x,args):
"""
Applies f to x.
Returns f(x) and cost = sum(f(x)**2).
In the case that cost = NaN, returns cost = inf.
In the case of an exception, returns res = None, cost = inf.
"""
try:
res = asarray(apply(f,(x,)+args))
cost = sum(res**2)
except (SloppyCell.Utility.SloppyCellException,OverflowError):
res = None
cost = scipy.inf
if scipy.isnan(cost): cost = scipy.inf
return res, cost
def safe_fprime(fprime,x,args):
"""
Applies fprime to x.
Returns j and exit code. For nonzero exit codes, j is returned as None.
Exit code 0: No errors.
Exit code 3: Jacobian contains NaN or inf.
Exit code 4: Exception in Jacobian calculation.
"""
try:
j = asarray(apply(fprime,(x,)+args))
err = 0
except SloppyCell.Utility.SloppyCellException:
j = None
err = 4
if j is not None:
if ( scipy.isnan(j).any() or scipy.isinf(j).any() ):
j = None
err = 3
return j, err
def fmin_lm(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, lambdainit = None,
jinit = None, trustradius = 1.0):
"""Minimizer for a nonlinear least squares problem. Allowed to
have more residuals than parameters or vice versa.
f : residual function (function of parameters)
fprime : derivative of residual function with respect to parameters.
Should return a matrix (J) with dimensions number of residuals
by number of parameters.
x0 : initial parameter set
avegtol : convergence tolerance on the gradient vector
epsilon : size of steps to use for finite differencing of f (if fprime
not passed in)
maxiter : maximum number of iterations
full_output : 0 to get only the minimum set of parameters back
1 if you also want the best parameter set, the
lowest value of f, the number of function calls,
the number of gradient calls, the convergence flag,
the last Marquardt parameter used (lambda), and the
last evaluation of fprime (J matrix)
disp : 0 for no display, 1 to give cost at each iteration and convergence
conditions at the end
retall : 0 for nothing extra to be returned, 1 for all the parameter
sets during the optimization to be returned
lambdainit : initial value of the Marquardt parameter to use (useful if
continuing from an old optimization run
jinit : initial evaluation of the residual sensitivity matrix (J).
trustradius : set this to the maximum move you want to allow in a single
parameter direction.
If you are using log parameters, then setting this
to 1.0, for example, corresponds to a multiplicative
change of exp(1) = 2.718
"""
app_fprime = 0
if fprime is None:
app_fprime = 1
xcopy = copy.copy(x0)
if isinstance(x0,KeyedList) :
x0 = asarray(x0.values())
else :
x0 = asarray(x0)
if lambdainit != None :
Lambda = lambdainit
else :
Lambda = 1.0e-2
Mult = 10.0
n = len(x0)
func_calls = 0
grad_calls = 0
res,currentcost = safe_res(f,x0,args)
func_calls+=1
m = res.shape[0]
if maxiter is None :
maxiter = 200*n
niters = 0
x = x0
gtol = n*avegtol
if retall:
allvecs = [x]
x1 = x0
x2 = x0
d = zeros(n,scipy.float_)
move = zeros(n,scipy.float_)
finish = 0
if jinit!=None :
j = jinit
else :
if app_fprime :
j = asarray(apply(approx_fprime2,(x,f,epsilon)+args))
func_calls = func_calls + 2*len(x)
else :
j,err = safe_fprime(fprime,x,args)
if err:
finish = err
grad_calls+=1
# NOTE: Below is actually *half* the gradient (because
# we define the cost as the sum of squares of residuals)
# However the equations defining the optimization move, dp,
# are 2.0*J^tJ dp = -2.0*J^t r, where r is the residual
# vector; therefore, the twos cancel both sides
if j is not None: grad = mat(res)*mat(j)
while (niters<maxiter) and (finish == 0):
# note: grad, res and j will be available from the end of the
# last iteration. They just need to be computed the zeroth
# time aswell (above)
lmh = mat(transpose(j))*mat(j)
# use more accurate way to get e-vals/dirns
#[u,s,v] = scipy.linalg.svd(lmh)
[u,ssqrt,vt] = scipy.linalg.svd(j)
# want n singular values even if m<n and we have
# more parameters than data points.
if (len(ssqrt) == n) :
s = ssqrt**2
elif (len(ssqrt)<n) :
s = zeros((n,),scipy.float_)
s[0:len(ssqrt)] = ssqrt**2
#print "s is (in original) ", s
#rhsvect = -mat(transpose(u))*mat(transpose(grad))
rhsvect = -mat(vt)*mat(transpose(grad))
rhsvect = asarray(rhsvect)[:,0]
move = abs(rhsvect)/(s+Lambda*scipy.ones(n)+1.0e-30*scipy.ones(n))
move = list(move)
maxindex = move.index(max(move))
move = asarray(move)
if max(move) > trustradius :
Lambda = Mult*(1.0/trustradius*abs(rhsvect[maxindex])-s[maxindex])
#print " Increasing lambda to ", Lambda
# now do the matrix inversion
for i in range(0,n) :
if (s[i]+Lambda) < 1.0e-30 :
d[i] = 0.0
else :
d[i] = 1.0/(s[i]+Lambda)
move[i] = d[i]*rhsvect[i]
move = asarray(move)
# move = asarray(mat(transpose(v))*mat(transpose(mat(move))))[:,0]
move = asarray(mat(transpose(vt))*mat(transpose(mat(move))))[:,0]
# print move
x1 = x + move
moveold = move[:]
for i in range(0,n) :
if (s[i]+Lambda/Mult) < 1.0e-30 :
d[i] = 0.0
else :
d[i] = 1.0/(s[i]+Lambda/Mult)
move[i] = d[i]*rhsvect[i]
move = asarray(mat(transpose(vt))*mat(transpose(mat(move))))[:,0]
x2 = x + asarray(move)
_,currentcost = safe_res(f,x,args)
func_calls+=1
res2,costlambdasmaller = safe_res(f,x2,args)
func_calls+=1
res1,costlambda = safe_res(f,x1,args)
func_calls+=1
if disp :
print('Iteration number', niters)
print('Current cost', currentcost)
print("Move 1 gives cost of" , costlambda)
print("Move 2 gives cost of ", costlambdasmaller)
#fp = open('LMoutfile','a')
#fp.write('Iteration number ' + niters.__str__() + '\n')
#fp.write('Current cost ' + currentcost.__str__() + '\n')
#fp.write('Move 1 gives cost of ' + costlambda.__str__() + '\n')
#fp.write('Move 2 gives cost of ' + costlambdasmaller.__str__() + '\n')
#fp.close()
oldcost = currentcost
oldres = res
oldjac = j
if costlambdasmaller <= currentcost :
xprev = x[:]
Lambda = Lambda/Mult
x = x2[:]
if retall:
allvecs.append(x)
currentcost = costlambdasmaller
if app_fprime :
j = asarray(apply(approx_fprime2,(x2,f,epsilon)+args))
func_calls = func_calls + 2*len(x2)
else :
j,err = safe_fprime(fprime,x2,args)
if err:
x = xprev[:]
finish = err
grad_calls+=1
if j is not None: grad = mat(res2)*mat(j)
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
elif costlambda <= currentcost :
xprev = x[:]
currentcost = costlambda
x = x1[:]
move = moveold[:]
if retall:
allvecs.append(x)
if app_fprime :
j = asarray(apply(approx_fprime2,(x1,f,epsilon)+args))
func_calls = func_calls + 2*len(x1)
else :
j,err = safe_fprime(fprime,x1,args)
if err:
x = xprev[:]
finish = err
grad_calls+=1
if j is not None: grad = mat(res1)*mat(j)
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
else :
Lambdamult = Lambda
costmult = costlambda
piOverFour = .78539816339744825
NTrials = 0
NTrials2 = 0
move = moveold[:]
while (costmult > currentcost) and (NTrials < 10) :
num = -scipy.dot(grad,move)[0]
den = scipy.linalg.norm(grad)*scipy.linalg.norm(move)
gamma = scipy.arccos(num/den)
NTrials = NTrials+1
# was (gamma>piOverFour) below but that doens't
# make much sense to me. I don't think you should
# cut back on a given step, I think the trust
# region strategy is more successful
if (gamma > 0) :
Lambdamult = Lambdamult*Mult
for i in range(0,n) :
if s[i]+Lambdamult < 1.0e-30 :
d[i] = 0.0
else :
d[i] = 1.0/(s[i]+Lambdamult)
move[i] = d[i]*rhsvect[i]
move = asarray(mat(transpose(vt))*mat(transpose(mat(move))))[:,0]
x1 = x + move
res1,costmult = safe_res(f,x1,args)
func_calls+=1
else :
NTrials2 = 0
while (costmult > currentcost) and (NTrials2 < 10) :
NTrials2 = NTrials2 + 1
if disp == 1:
print(" Decreasing stepsize ")
move = (.5)**NTrials2*moveold
x1 = x + asarray(move)
res1,costmult = safe_res(f,x1,args)
func_calls+=1
if (NTrials==10) or (NTrials2==10) :
if disp == 1:
print(" Failed to converge")
finish = 1
else :
xprev = x[:]
x = x1[:]
if retall:
allvecs.append(x)
Lambda = Lambdamult
if app_fprime :
j = asarray(apply(approx_fprime2,(x,f,epsilon)+args))
func_calls = func_calls + 2*len(x)
else :
j,err = safe_fprime(fprime,x,args)
if err:
x = xprev[:]
finish = err
grad_calls+=1
if j is not None: grad = mat(res1)*mat(j)
currentcost = costmult
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
niters = niters + 1
# see if we need to reduce the trust region
newmodelval = oldres+asarray(mat(oldjac)*mat(transpose(mat(move))))[:,0]
oldmodelval = oldres
#print oldcost-sum(newmodelval**2)
#print trustradius
if ((oldcost-sum(newmodelval**2))>1.0e-16) :
ratio = (oldcost-currentcost)/(oldcost-sum(newmodelval**2))
if ratio < .25 :
trustradius = trustradius/2.0
if ratio >.25 and ratio<=.75 :
trustradius = trustradius
if ratio > .75 and trustradius<10.0 :
trustradius = 2.0*trustradius
#save(x,'currentParamsLM')
if disp :
if (niters>=maxiter) and (finish != 2) :
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
print(" Maximum number of iterations exceeded with no convergence ")
if (finish == 2) :
print(" Optimization terminated successfully.")
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
if (finish == 3) :
print(" Optimization aborted: Jacobian contains nan or inf.")
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
if (finish == 4) :
print(" Optimization aborted: Exception in Jacobian calculation.")
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
if isinstance(xcopy,KeyedList) :
xcopy.update(x)
else :
xcopy = x
if full_output:
retlist = xcopy, currentcost, func_calls, grad_calls, finish, Lambda, j
if retall:
retlist += (allvecs,)
else :
retlist = xcopy
if retall :
retlist = (xcopy,allvecs)
return retlist
def fmin_lmNoJ(fcost, x0, fjtj, args=(), avegtol=1e-5, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, trustradius=1.0):
"""Minimizer for a nonlinear least squares problem. Allowed to
have more residuals than parameters or vice versa
fcost : the cost function (*not* the residual function)
fjtj : this function must return back an ordered pair, the first entry
is the gradient of the cost and the second entry is the Levenberg
Marquardt (LM) approximation to the cost function.
NOTE: If the cost function = 1/2 * sum(residuals**2) then
the LM approximation is the matrix matrix product J^t J
where J = derivative of residual function with respect to parameters.
However if cost = k*sum(residuals**2) for some constant k, then
the LM approximation is 2*k*J^t J, so beware of this factor!!!
x0 : initial parameter set
avegtol : convergence tolerance on the gradient vector
epsilon : size of steps to use for finite differencing of f (if fprime
not passed in)
maxiter : maximum number of iterations
full_output : 0 to get only the minimum set of parameters back
1 if you also want the best parameter set, the
lowest value of f, the number of function calls,
the number of gradient calls, the convergence flag,
the last Marquardt parameter used (lambda), and the
last evaluation of fprime (J matrix)
disp : 0 for no display, 1 to give cost at each iteration and convergence
conditions at the end
retall : 0 for nothing extra to be returned, 1 for all the parameter
sets during the optimization to be returned
trustradius : set this to the maximum move you want to allow in a single
parameter direction.
If you are using log parameters, then setting this
to 1.0, for example, corresponds to a multiplicative
change of exp(1) = 2.718
This version requires fjtj to pass back an ordered pair with
a gradient evaluation of the cost and JtJ, but not a function for J.
This is important in problems when there is many residuals and J is too
cumbersome to compute and pass around, but JtJ is a lot "slimmer". """
xcopy = copy.copy(x0)
if isinstance(x0,KeyedList) :
x0 = asarray(x0.values())
else :
x0 = asarray(x0)
Lambda = 1.0e-02
Mult = 10.0
n = len(x0)
func_calls = 0
grad_calls = 0
if maxiter==None :
maxiter = 200*n
niters = 0
x = x0
gtol = n*avegtol
if retall:
allvecs = [x]
x1 = x0
x2 = x0
d = zeros(n,scipy.float_)
move = zeros(n,scipy.float_)
finish = 0
grad, lmh = apply(fjtj,(x,))
grad_calls+=1
while (niters<maxiter) and (finish == 0):
# estimate what Lambda should be
[u,s,v] = scipy.linalg.svd(lmh)
#print "s is (in NoJ) ", s
#s,u = scipy.linalg.eig(lmh)
#s = real(s)
#u = real(u)
oldlmh = lmh[:,:]
oldgrad = grad[:]
rhsvect = -scipy.dot(transpose(u),grad)
# rhsvect = asarray(rhsvect)[:,0]
move = abs(rhsvect)/(s+Lambda*ones(n)+1.0e-30*ones(n))
move = list(move)
maxindex = move.index(max(move))
move = asarray(move)
if max(move) > trustradius :
Lambda = Mult*(1.0/trustradius*abs(rhsvect[maxindex])-s[maxindex])
#print " Increasing lambda to ", Lambda
## lmhreg = lmh + Lambda*eye(n,n,typecode=scipy.float_)
## [u,s,v] = scipy.linalg.svd(lmhreg)
rhsvect = -scipy.dot(transpose(u),grad)
# rhsvect = asarray(rhsvect)[:,0]
for i in range(0,len(s)) :
if (s[i]+Lambda) < 1.0e-30 :
d[i] = 0.0
else :
d[i] = 1.0/(s[i]+Lambda)
move[i] = d[i]*rhsvect[i]
move = asarray(move)
move = dot(asarray(u),move)
x1 = x + move
moveold = move[:]
for i in range(0,len(s)) :
if (s[i]+Lambda/Mult) < 1.0e-30 :
d[i] = 0.0
else :
d[i] = 1.0/(s[i]+Lambda/Mult)
move[i] = d[i]*rhsvect[i]
move = asarray(move)
move = dot(asarray(u),move)
x2 = x + asarray(move)
currentcost = apply(fcost,(x,))
oldcost = currentcost
func_calls+=1
try:
costlambdasmaller = apply(fcost,(x2,))
except SloppyCell.Utility.SloppyCellException:
costlambdasmaller = scipy.inf
func_calls+=1
try:
costlambda = apply(fcost,(x1,))
except SloppyCell.Utility.SloppyCellException:
costlambda = scipy.inf
func_calls+=1
if disp :
print('Iteration number', niters)
print('Current cost', currentcost)
print("Move 1 gives cost of" , costlambda)
print("Move 2 gives cost of ", costlambdasmaller)
#fp = open('LMoutfile','a')
#fp.write('Iteration number ' + niters.__str__() + '\n')
#fp.write('Current cost ' + currentcost.__str__() + '\n')
#fp.write('Move 1 gives cost of ' + costlambda.__str__() + '\n')
#fp.write('Move 2 gives cost of ' + costlambdasmaller.__str__() + '\n')
#fp.close()
if costlambdasmaller <= currentcost :
Lambda = Lambda/Mult
x = x2[:]
if retall:
allvecs.append(x)
currentcost = costlambdasmaller
grad, lmh = apply(fjtj,(x2,))
grad_calls+=1
#if scipy.linalg.norm(asarray(grad)) < avegtol :
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
elif costlambda <= currentcost :
currentcost = costlambda
move = moveold[:]
x = x1[:]
if retall:
allvecs.append(x)
grad, lmh = apply(fjtj,(x1,))
grad_calls+=1
# if scipy.linalg.norm(asarray(grad)) < avegtol :
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
else :
Lambdamult = Lambda
costmult = costlambda
piOverFour = .78539816339744825
NTrials2 = 0
NTrials = 0
while (costmult > currentcost) and (NTrials < 10) :
# num = -dot(transpose(asarray(grad)),asarray(moveold) )
# den = scipy.linalg.norm(grad)*scipy.linalg.norm(moveold)
gamma = .1 # scipy.arccos(num/den)
NTrials = NTrials+1
if (gamma > 0) :
Lambdamult = Lambdamult*Mult
for i in range(0,len(s)) :
if s[i] + Lambdamult < 1.0e-30 :
d[i] = 0.0
else :
d[i] = 1.0/(s[i] + Lambdamult)
move[i] = d[i]*rhsvect[i]
move = asarray(move)
move = dot(asarray(u),move)
x1 = x + asarray(move)
func_calls+=1
costmult = apply(fcost,(x1,))
else :
NTrials2 = 0
while (costmult > currentcost) and (NTrials2 < 10) :
NTrials2 = NTrials2 + 1
if disp :
print(" Decreasing stepsize ")
move = (.5)**NTrials2*moveold
x1 = x + asarray(moveold)
func_calls+=1
costmult = apply(fcost,(x1,))
if (NTrials==10) or (NTrials2==10) :
if disp :
print(" Failed to converge")
finish = 1
else :
x = x1[:]
if retall:
allvecs.append(x)
Lambda = Lambdamult
grad, lmh = apply(fjtj,(x1,))
grad_calls+=1
currentcost = costmult
# if scipy.linalg.norm(grad) < avegtol :
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
niters = niters + 1
# see if we need to reduce the trust region, compare the actual change in
# cost to the linear and quadratic change in cost
model_change = scipy.dot(scipy.transpose(oldgrad),move) + \
.5*scipy.dot(scipy.transpose(move),scipy.dot(oldlmh,move) )
#print oldcost-sum(newmodelval**2)
#print trustradius
if model_change>1.0e-16 :
ratio = (oldcost-currentcost)/(model_change)
if ratio < .25 :
trustradius = trustradius/2.0
if ratio >.25 and ratio<=.75 :
trustradius = trustradius
if ratio > .75 and trustradius<10.0 :
trustradius = 2.0*trustradius
#save(x,'currentParamsLM')
if disp :
if (niters>=maxiter) and (finish != 2) :
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
print(" Maximum number of iterations exceeded with no convergence ")
if (finish == 2) :
print("Optimization terminated successfully.")
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
if isinstance(xcopy,KeyedList) :
xcopy.update(x)
else :
xcopy = x
if full_output:
retlist = xcopy, currentcost, func_calls, grad_calls, finish, Lambda, lmh
if retall:
retlist += (allvecs,)
else:
retlist = xcopy
if retall:
retlist = (xcopy, allvecs)
return retlist
def solve_lmsys(Lambda,s,g,rhsvect,currentcost,n) :
d = zeros(n,scipy.float_)
move = zeros(n,scipy.float_)
for i in range(0,n) :
if s[i] < 1.0e-20 :
d[i] = 0.0
else :
d[i] = 1.0/(s[i])
move[i] = d[i]*rhsvect[i]
return move
def fmin_lm_scale(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0,trustradius=1.0):
"""
Minimizer for a nonlinear least squares problem. Allowed to
have more residuals than parameters or vice versa.
f : residual function (function of parameters)
fprime : derivative of residual function with respect to parameters.
Should return a matrix (J) with dimensions number of residuals
by number of parameters.
x0 : initial parameter set
avegtol : convergence tolerance on the gradient vector
epsilon : size of steps to use for finite differencing of f (if fprime
not passed in)
maxiter : maximum number of iterations
full_output : 0 to get only the minimum set of parameters back
1 if you also want the best parameter set, the
lowest value of f, the number of function calls,
the number of gradient calls, the convergence flag,
the last Marquardt parameter used (lambda), and the
last evaluation of fprime (J matrix)
disp : 0 for no display, 1 to give cost at each iteration and convergence
conditions at the end
retall : 0 for nothing extra to be returned, 1 for all the parameter
sets during the optimization to be returned
trustradius : set this to the maximum length of move you want.
If you are using log parameters, then setting this
to 1.0, for example, corresponds to a multiplicative
change of exp(1) = 2.718 if the move is along a single
parameter direction
This version is scale invariant. This means that under a change of
scale of the parameters the direction the optimizer chooses to move
in does not change. To achieve this, we don't use a Marquardt
parameter to impose a trust region but rather take the infinite trust
region step and just cut it back to the length given in the variable
trustradius. """
app_fprime = 0
if fprime is None:
app_fprime = 1
xcopy = copy.copy(x0)
if isinstance(x0,KeyedList) :
x0 = asarray(x0.values())
else :
x0 = asarray(x0)
Lambda = 1.0e-02
Mult = 10.0
n = len(x0)
func_calls = 0
grad_calls = 0
res = asarray(apply(f,(x0,)))
m = res.shape[0]
if maxiter is None :
maxiter = 200*n
niters = 0
x = x0
gtol = n*avegtol
if retall:
allvecs = [x]
x1 = x0
x2 = x0
d = zeros(n,scipy.float_)
move = zeros(n,scipy.float_)
finish = 0
if app_fprime :
j = asarray(apply(approx_fprime2,(x,f,epsilon)+args))
func_calls = func_calls + 2*len(x)
else :
j = asarray(apply(fprime,(x,)))
grad_calls+=1
res = asarray(apply(f,(x,)))
func_calls+=1
grad = mat(res)*mat(j)
while (niters<maxiter) and (finish == 0):
# note: grad, res and j will be available from the end of the
# last iteration. They just need to be computed the zeroth
# time aswell (above)
lmh = mat(transpose(j))*mat(j)
# use more accurate way to get e-vals/dirns
#[u,s,v] = scipy.linalg.svd(lmh)
[u,ssqrt,vt] = scipy.linalg.svd(j)
# want n singular values even if m<n and we have
# more parameters than data points.
if (len(ssqrt) == n) :
s = ssqrt**2
elif (len(ssqrt)<n) :
s = zeros((n,),scipy.float_)
s[0:len(ssqrt)] = ssqrt**2
#rhsvect = -mat(transpose(u))*mat(transpose(grad))
rhsvect = -mat(vt)*mat(transpose(grad))
rhsvect = asarray(rhsvect)[:,0]
currentcost = sum(asarray(apply(f,(x,)))**2)
g = asarray(grad)[0,:]
Lambda = 0
move = solve_lmsys(Lambda,s,g,rhsvect,currentcost,n)
move = asarray(move)
move = asarray(mat(transpose(vt))*mat(transpose(mat(move))))[:,0]
unitmove = move/(scipy.linalg.norm(move))
move1 = unitmove*trustradius
# print move
x1 = x + move1
move2 = unitmove*trustradius*Mult
x2 = x + asarray(move2)
func_calls+=1
try:
res2 = asarray(apply(f,(x2,)))
costlambdasmaller = sum(res2**2)
except SloppyCell.Utility.SloppyCellException:
costlambdasmaller = scipy.inf
func_calls+=1
try:
res1 = asarray(apply(f,(x1,)))
costlambda = sum(res1**2)
except SloppyCell.Utility.SloppyCellException:
costlambda = scipy.inf
func_calls+=1
if disp :
print("Cost is ", currentcost)
print("Iteration is", niters)
oldcost = currentcost
oldres = res
oldjac = j
if costlambdasmaller <= currentcost :
trustradius = trustradius*Mult
x = x2
if retall:
allvecs.append(x)
currentcost = costlambdasmaller
if app_fprime :
j = asarray(apply(approx_fprime2,(x2,f,epsilon)+args))
func_calls = func_calls + 2*len(x2)
else :
j = asarray(apply(fprime,(x2,)))
grad_calls+=1
grad = mat(res2)*mat(j)
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
move = move2
elif costlambda <= currentcost :
currentcost = costlambda
x = x1
if retall:
allvecs.append(x)
if app_fprime :
j = asarray(apply(approx_fprime2,(x1,f,epsilon)+args))
func_calls = func_calls + 2*len(x1)
else :
j = asarray(apply(fprime,(x1,)))
grad_calls+=1
grad = mat(res1)*mat(j)
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
move = move1
else :
trustradmult = trustradius
costmult = costlambda
NTrials = 0
move = unitmove
while (costmult > currentcost) and (NTrials < 100) :
while (costmult > currentcost) and (NTrials < 100) :
NTrials = NTrials + 1
#print " Decreasing stepsize "
trustradmult = trustradmult/2.0
move = move*trustradmult
x1 = x + asarray(move)
res1 = asarray(apply(f,(x1,)))
func_calls+=1
costmult = sum(res1**2)
if (NTrials==100) :
if disp :
print(" Failed to converge")
finish = 1
else :
x = x1
if retall:
allvecs.append(x)
trustradius = trustradmult
if app_fprime :
j = asarray(apply(approx_fprime2,(x,f,epsilon)+args))
func_calls = func_calls + 2*len(x)
else :
j = asarray(apply(fprime,(x,)))
grad_calls+=1
grad = mat(res1)*mat(j)
currentcost = costmult
if sum(abs(2.0*grad), axis=None) < gtol :
finish = 2
niters = niters + 1
# see if we need to reduce the trust region
newmodelval = oldres+asarray(mat(oldjac)*mat(transpose(mat(move))))[:,0]
oldmodelval = oldres
#print oldcost-sum(newmodelval**2)
#print trustradius
if ((oldcost-sum(newmodelval**2))>1.0e-16) :
ratio = (oldcost-currentcost)/(oldcost-sum(newmodelval**2))
if ratio < .25 :
trustradius = trustradius/2.0
if ratio >.25 and ratio<=.75 :
trustradius = trustradius
if ratio > .75 and trustradius<10.0 :
trustradius = 2.0*trustradius
if disp :
if (niters>=maxiter) and (finish != 2) :
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
print(" Maximum number of iterations exceeded with no convergence ")
if (finish == 2) :
print("Optimization terminated successfully.")
print(" Current function value: %f" % currentcost)
print(" Iterations: %d" % niters)
print(" Function evaluations: %d" % func_calls)
print(" Gradient evaluations: %d" % grad_calls)
if isinstance(xcopy,KeyedList) :
xcopy.update(x)
else :
xcopy = x
if full_output:
retlist = xcopy, currentcost, func_calls, grad_calls, finish, Lambda, j
if retall:
retlist += (allvecs,)
else:
retlist = xcopy
if retall:
retlist = (xcopy, allvecs)
return retlist
| 2.453125
| 2
|
survey/exporter/tex/__init__.py
|
TheWITProject/MentorApp
| 0
|
12776906
|
from .configuration import Configuration
from .configuration_builder import ConfigurationBuilder
from .question2tex import Question2Tex
from .survey2tex import Survey2Tex, XelatexNotInstalled
__all__ = ["Question2Tex", "Survey2Tex", "Configuration", "ConfigurationBuilder", "XelatexNotInstalled"]
| 1.109375
| 1
|
contents/apis/default.py
|
williamlagos/contents-api
| 0
|
12776907
|
<reponame>williamlagos/contents-api
#!/usr/bin/python
#
# This file is part of django-emporio project.
#
# Copyright (C) 2011-2020 <NAME> <<EMAIL>>
#
# Emporio is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Emporio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Emporio. If not, see <http://www.gnu.org/licenses/>.
#
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db.models.fields.related import ManyToOneRel, RelatedField
from restless.dj import DjangoResource
from restless.exceptions import NotFound, BadRequest
from restless.preparers import FieldsPreparer, SubPreparer
isnot_fk = lambda field: not isinstance(field, (RelatedField, ManyToOneRel))
class DefaultResource(DjangoResource):
pass
class DefaultServiceResource(DjangoResource):
service = None
def __init__(self, *args, **kwargs):
super(DefaultServiceResource, self).__init__(self, args, kwargs)
self.fields = {}
for field in self.service.model._meta.get_fields():
if isnot_fk(field):
self.fields[field.name] = field.name
else:
nested_field_names = field.remote_field.model._meta.get_fields()
nested_fields = {f.name: f.name for f in nested_field_names if isnot_fk(f)}
self.fields[field.name] = SubPreparer(field.name, FieldsPreparer(nested_fields))
# Alternative implementation, using dict comprehensions
# model_fields = self.service.model._meta.get_fields()
# flat_fields = {f.name: f.name for f in model_fields if isnot_fk(f)}
# nested = lambda f: {f.name: f.name for f in f.remote_field.model._meta.get_fields() if isnot_fk(f)}
# nested_fields = {f.name: SubPreparer(f.name, FieldsPreparer(nested(f))) for f in model_fields if not isnot_fk(f)}
# self.fields = {**nested_fields, **flat_fields}
self.preparer = FieldsPreparer(self.fields)
def is_authenticated(self):
# Open everything wide!
# DANGEROUS, DO NOT DO IN PRODUCTION.
return True
# Alternatively, if the user is logged into the site...
# return self.request.user.is_authenticated()
# Alternatively, you could check an API key. (Need a model for this...)
# from myapp.models import ApiKey
# try:
# key = ApiKey.objects.get(key=self.request.GET.get('api_key'))
# return True
# except ApiKey.DoesNotExist:
# return False
# GET /
def list(self):
return self.service.model.objects.all()
# GET /<pk>/
def detail(self, pk):
try:
return self.service.model.objects.get(id=pk)
except ObjectDoesNotExist:
raise NotFound()
# POST /
def create(self):
return self.service.model.objects.create(self.data)
# PUT /<pk>/
def update(self, pk):
try:
model = self.service.model.objects.filter(id=pk).update(self.data)
except self.model.DoesNotExist:
model = self.service.model.objects.create(self.data)
return model
# DELETE /<pk>/
def delete(self, pk):
self.service.model.objects.filter(id=pk).delete()
| 1.835938
| 2
|
tsammalexdata/image_providers.py
|
Kevin2612/TSAMMELEX
| 0
|
12776908
|
<reponame>Kevin2612/TSAMMELEX
import os
from xml.etree import cElementTree as et
import re
from hashlib import md5
from mimetypes import guess_extension
from bs4 import BeautifulSoup
import requests
from purl import URL
import flickrapi
from dateutil.parser import parse
class DataProvider(object):
"""Given a URL of an accepted format, DataProviders can fetch metadata for an image.
"""
@staticmethod
def date(s):
try:
return str(parse(s)).split()[0]
except:
return
def id_from_url(self, url, host, comps):
"""
:return: An id to be passed into `info_for_id` or None, \
if `url` is not recognized.
"""
raise NotImplementedError()
def info_for_id(self, id_):
"""
:return: `dict` of metadata for an image.
"""
raise NotImplementedError
def postprocess(self, res):
new = {}
for k, v in res.items():
if k == 'date' and v:
v = self.date(v)
if k in ['latitude', 'longitude']:
v = float(v)
if v:
new[k] = v
return new
def info(self, url):
"""Interface method to be called when processing new images.
This method ties together the DataProvider workflow.
"""
url = URL(url)
return self.postprocess(
self.info_for_id(self.id_from_url(url, url.host(), url.path_segments())))
class Senckenberg(DataProvider):
__example__ = (
'http://www.westafricanplants.senckenberg.de/root/index.php?page_id=14&id=722#image=26800',
{
'creator': '<NAME>',
'date': '2008-05-03',
'place': 'Nigeria',
'source': 'http://www.westafricanplants.senckenberg.de/root/index.php?page_id=14&id=722#image%3D26800',
'source_url': 'http://www.westafricanplants.senckenberg.de/images/pictures/ficus_polita_img_04024_ralfbiechele_722_fc6e25.jpg',
'permission': 'http://creativecommons.org/licenses/by-nc/4.0/',
}
)
def id_from_url(self, url, host, comps):
"""This DataProvider recognizes URLs of the form
http://www.africanplants.senckenberg.de/root/index.php?page_id=14&id=722#image=26
Note that the URL fragment is necessary to determine the exact image referred to
on the page, listing all images for a species.
:param url: A URL.
:return: `url` if recognized, else `None`.
"""
if host.endswith('africanplants.senckenberg.de') \
and url.fragment() \
and len(comps) == 2 \
and comps[0] == 'root' \
and comps[1] in ['index.php']:
return url
def info_for_id(self, id_):
"""
We expect and exploit markup of the following form:
<img src="http://<host>/images/pictures/thumb_<img-filename>"
title="PhotoID: 26800;
Photographer: <NAME>;
Date: 2008-05-03 18:03:19;
Location: Nigeria" />
"""
photo_id = id_.fragment().split('=')[1]
for img in BeautifulSoup(requests.get(id_).text).find_all('img'):
if img.attrs.get('title', '').startswith('PhotoID: %s' % photo_id):
res = {
'source': '%s' % id_,
'source_url': img.attrs['src'].replace('/thumb_', '/'),
'permission': 'http://creativecommons.org/licenses/by-nc/4.0/',
}
for k, v in [
l.split(': ', 1) for l in img.attrs['title'].split('; \n') if l
]:
if k == 'Date':
res['date'] = v.split(' ')[0]
elif k == 'Photographer':
res['creator'] = v
elif k == 'Location':
res['place'] = v
return res
return {}
class Zimbabweflora(DataProvider):
__example__ = (
'http://www.zimbabweflora.co.zw/speciesdata/image-display.php?species_id=100760&image_id=2',
{
'creator': '<NAME>',
'date': '2012-01-08',
'gps': '-20.272510',
'permission': 'http://creativecommons.org/licenses/by-nc/4.0/',
'place': 'Zimbabwe, Great zimbabwe, Great enclosure',
'source': 'http://www.zimbabweflora.co.zw/speciesdata/image-display.php?species_id=100760&image_id=2',
'source_url': 'http://www.zimbabweflora.co.zw/speciesdata/images/10/100760-2.jpg',
}
)
def id_from_url(self, url, host, comps):
if host in ['www.zimbabweflora.co.zw', 'www.mozambiqueflora.com'] \
and len(comps) == 2 \
and comps[0] == 'speciesdata' \
and comps[1] in ['species-record.php', 'image-display.php']:
return url
def info_for_id(self, id_):
soup = BeautifulSoup(requests.get(id_).text)
img = soup.find('img')
if not img:
return {}
src = img.attrs['src']
if not src.startswith('http:'):
src = 'http://www.zimbabweflora.co.zw/speciesdata/' + src
res = {
'source': '%s' % id_,
'source_url': src,
'permission': 'http://creativecommons.org/licenses/by-nc/4.0/',
}
for table in soup.find_all('table'):
if table.attrs['summary'] in [
'Individual record details', 'Information about the photograph'
]:
for tr in table.find_all('tr'):
k, v = [td.get_text(' ', strip=True) for td in tr.find_all('td')]
if v:
# Location Country Latitude Date Photographer
if k == 'Location:':
res['place'] = v
if k == 'Country:':
loc = res.get('place', '')
res['place'] = '%s%s%s' % (v, ', ' if loc else '', loc)
if k == 'Latitude:':
res['gps'] = v
if k == 'Date:' and v != 'No date':
res['date'] = parse(v).date().isoformat()
if k == 'Photographer:':
res['creator'] = v
return res
class Flickr(DataProvider):
__example__ = (
'https://www.flickr.com/photos/damouns/78968973',
{
'comments': "title 'Bufo gutturalis'",
'creator': '<NAME>',
'date': '2005-12-27',
'permission': 'https://creativecommons.org/licenses/by/2.0/',
'source': 'https://www.flickr.com/photos/damouns/78968973/sizes/o/',
'source_url': 'https://farm1.staticflickr.com/39/78968973_f30ad8c62d_o.jpg',
}
)
def __init__(self):
self.api = flickrapi.FlickrAPI(
os.environ['FLICKR_KEY'], os.environ['FLICKR_SECRET'], format='parsed-json')
self.licenses = {l['id']: l['url'] for l in
self.api.photos.licenses.getInfo()['licenses']['license']}
def info_for_id(self, id_):
# creator, date, place, gps, permission, comments (title '...')
info = self.api.photos.getInfo(photo_id=id_)['photo']
res = dict(
creator=info['owner']['realname'] or info['owner']['username'],
date=info['dates']['taken'],
permission=self.licenses[info['license']],
comments="title '%s'" % info['title']['_content'])
if 'location' in info:
place = self.api.places.getInfo(place_id=info['location']['woeid'])['place']
res.update(
place=place['name'],
longitude=place['longitude'],
latitude=place['latitude'])
res.update(self.size(id_))
return res
def size(self, id_):
biggest = {'width': 0}
for size in self.api.photos.getSizes(photo_id=id_)['sizes']['size']:
if size['label'] == 'Original':
biggest = size
break
if int(size['width']) > biggest['width']:
biggest = size
return dict(source_url=biggest['source'], source=biggest['url'])
def id_from_url(self, url, host, comps):
if host.endswith('flickr.com') and len(comps) > 2 and comps[0] == 'photos':
return comps[2]
class Eol(DataProvider):
__example__ = (
'http://media.eol.org/data_objects/21916329',
{
'creator': 'Research Institute Senckenberg',
'mime_type': 'image/jpeg',
'permission': 'http://creativecommons.org/licenses/by-nc-sa/3.0/',
'place': 'Burkina Faso',
'source': 'http://media.eol.org/data_objects/21916329',
'source_url': 'http://192.168.3.11/content/2012/08/24/08/75619_orig.jpg',
}
)
def info_for_id(self, id_):
try:
info = requests.get(
'http://eol.org/api/data_objects/1.0/%s.json' % id_).json()['dataObjects'][0]
except:
return {}
agents = {a['role']: a['full_name'] for a in info['agents']}
if 'eolMediaURL' in info:
return {
'creator': agents.get('photographer', list(agents.values())[0]),
'date': info.get('created'),
'permission': info['license'],
'source': 'http://media.eol.org/data_objects/' + id_,
'source_url': info['eolMediaURL'],
'mime_type': info['mimeType'],
'place': info.get('location'),
'comments': info.get('description'),
}
def id_from_url(self, url, host, comps):
"""
http://media.eol.org/data_objects/23049910
"""
if host.endswith('eol.org') and len(comps) == 2 and comps[0] == 'data_objects':
return comps[1]
class Wikimedia(DataProvider):
filename_pattern = re.compile("(?P<fname>[a-zA-Z\-_,'\(\)%0-9]+\.(jpg|png|JPG))$")
license_pattern = re.compile('CC\-(?P<clauses>[A-Z\-]+)\-(?P<version>[0-9\.]+)')
license_map = {
'PD-user': 'http://en.wikipedia.org/wiki/Public_domain',
'PD 1923': 'http://en.wikipedia.org/wiki/Public_domain',
'CC-PD-Mark': 'http://en.wikipedia.org/wiki/Public_domain',
'PD other reasons': 'http://en.wikipedia.org/wiki/Public_domain',
#'PD-user': 'http://en.wikipedia.org/wiki/Public_domain',
}
def info_for_id(self, id_):
"""
http://tools.wmflabs.org/magnus-toolserver/commonsapi.php?image=Alcelaphus_caama.jpg
<?xml version="1.0" encoding="UTF-8"?>
<response version="0.92">
<file>
<name>Alcelaphus caama.jpg</name>
<title>File:Alcelaphus_caama.jpg</title>
<urls>
<file>http://upload.wikimedia.org/wikipedia/commons/1/1d/Alcelaphus_caama.jpg</file>
<description>http://commons.wikimedia.org/wiki/File:Alcelaphus_caama.jpg</description>
</urls>
<size>3485152</size>
<width>3085</width>
<height>2314</height>
<uploader>Lycaon</uploader>
<upload_date>2008-11-29T08:42:17Z</upload_date>
<sha1>718624712e4d7a76f5521904a795c81ae55363ee</sha1>
<location>
<lat>-19.216961</lat>
<lon>16.174706</lon>
</location>
<date><span style="white-space:nowrap"><time class="dtstart" datetime="2007-06-29">29 June 2007</time></span></date>
<author><span class="fn value"><a href="http://commons.wikimedia.org/wiki/User:Biopics" title="User:Biopics"><NAME></a></span></author>
<source><span class="int-own-work">Own work</span></source>
</file>
<licenses>
<license>
<name>CC-BY-SA-4.0</name>
</license>
</licenses>
</response>
"""
def text(e):
if e and e.text:
return BeautifulSoup(e.text).string
info = et.fromstring(requests.get(
'http://tools.wmflabs.org/magnus-toolserver/commonsapi.php',
params=dict(image=id_)).content)
try:
res = dict(
creator=text(info.find('file/author')),
source=info.find('file/urls/description').text,
source_url=info.find('file/urls/file').text,
permission=info.find('licenses/license/name').text)
except AttributeError:
return {}
if info.find('file/date'):
res['date'] = text(info.find('file/date'))
loc = info.find('file/location')
if loc:
res.update(longitude=loc.find('lon').text, latitude=loc.find('lat').text)
match = self.license_pattern.match(res['permission'])
if match:
res['permission'] = 'https://creativecommons.org/licenses/%s/%s/' \
% (match.group('clauses').lower(), match.group('version'))
else:
res['permission'] = self.license_map.get(res['permission'], res['permission'])
return res
def id_from_url(self, url, host, comps):
"""http://commons.wikimedia.org/wiki/File:Alcelaphus_caama.jpg
"""
if not host.endswith('wikimedia.org'):
return
if comps[0] == 'wiki':
if 'File:' in comps[1]:
return comps[1].split('File:')[1]
else:
return
for comp in comps:
m = self.filename_pattern.search(comp)
if m:
return m.group('fname')
PROVIDERS = [Wikimedia(), Flickr(), Eol(), Zimbabweflora(), Senckenberg()]
def get_image_info(img):
for field in ['source', 'source_url', 'id']:
for provider in PROVIDERS:
url = URL(img[field])
if provider.id_from_url(url, url.host(), url.path_segments()):
return provider.info(img[field])
def get_checksum(content=None, fname=None):
assert (content or fname) and not (content and fname)
if fname:
with open(fname, 'rb') as fp:
content = fp.read()
checksum = md5()
checksum.update(content)
return checksum.hexdigest()
def get_image(info, imgdir, verbose=True):
assert 'source_url' in info
#
# FIXME: replace with upload to Edmond, once the API becomes available!
#
res = requests.get(info['source_url'])
info['id'] = get_checksum(content=res.content)
info.setdefault('mime_type', res.headers['content-type'])
ext = guess_extension(info['mime_type'], strict=False)
ext = '.jpg' if ext == '.jpe' else ext
with open(os.path.join(imgdir, '%s%s' % (info['id'], ext)), mode='wb') as fp:
fp.write(res.content)
if verbose:
print(info)
return info
| 2.765625
| 3
|
cap6/ex12.py
|
felipesch92/livroPython
| 0
|
12776909
|
<filename>cap6/ex12.py<gh_stars>0
d = {}
palavra = '<NAME>'
for l in palavra:
if l in d:
d[l] = d[l] + 1
else:
d[l] = 1
print(d)
| 2.546875
| 3
|
eca.py
|
PetarPeychev/elementary-cellular-automata
| 0
|
12776910
|
<reponame>PetarPeychev/elementary-cellular-automata
class ECA:
def __init__(self, id):
self.id = bin(id)[2:].zfill(8)
self.dict = {}
for i in range(8):
self.dict[bin(7 - i)[2:].zfill(3)] = self.id[i]
self.array = [0 for x in range(199)]
self.array[99] = 1
def step(self):
arr = [0 for x in range(len(self.array))]
for i in range(len(self.array)):
s = ""
if i == 0:
s += "0"
else:
s += str(self.array[i - 1])
s += str(self.array[i])
if i == len(self.array) - 1:
s += "0"
else:
s += str(self.array[i + 1])
arr[i] = int(self.dict[s])
self.array = arr
| 3.4375
| 3
|
fewshot/models/measure_tests.py
|
yuchenlichuck/prototypical-random-walk
| 4
|
12776911
|
<reponame>yuchenlichuck/prototypical-random-walk
# Copyright (c) 2018 <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import unittest
from fewshot.models.measure import batch_apk, apk
def fake_batch_apk(logits, pos_mask, k):
ap = []
for ii in range(logits.shape[0]):
ap.append(apk(logits[ii], pos_mask[ii], k[ii]))
return np.array(ap)
class MeasureTests(unittest.TestCase):
def test_batch_apk(self):
rnd = np.random.RandomState(0)
for ii in range(100):
logits = rnd.uniform(0.0, 1.0, [10, 12])
pos_mask = (rnd.uniform(0.0, 1.0, [10, 12]) > 0.5).astype(np.float32)
k = rnd.uniform(5.0, 10.0, [10]).astype(np.int32)
ap1 = batch_apk(logits, pos_mask, k)
ap2 = fake_batch_apk(logits, pos_mask, k)
np.testing.assert_allclose(ap1, ap2)
if __name__ == "__main__":
unittest.main()
| 2.125
| 2
|
src/genie/libs/parser/iosxe/tests/ShowEthernetServiceInstance/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
| 204
|
12776912
|
<filename>src/genie/libs/parser/iosxe/tests/ShowEthernetServiceInstance/cli/equal/golden_output_1_expected.py
expected_output = {
"service_instance": {
501: {
"interfaces": {
"TenGigabitEthernet0/3/0": {"state": "Up", "type": "Static"},
"TenGigabitEthernet0/1/0": {"state": "Up", "type": "Static"},
}
},
502: {
"interfaces": {"TenGigabitEthernet0/3/0": {"state": "Up", "type": "Static"}}
},
}
}
| 1.398438
| 1
|
sw/chaac_rpi/flask/app/main.py
|
alvarop/chaac
| 21
|
12776913
|
import os
import time
import socket
import zipfile
from datetime import datetime, timedelta
from flask import Flask, request, g, render_template, jsonify, redirect, Response
from chaac.chaacdb import ChaacDB
app = Flask(__name__)
app.config.from_object(__name__) # load config from this file , flaskr.py
# Load default config and override config from an environment variable
app.config.update(dict(DATABASE=os.getenv("DATABASE")))
def get_pretty_hostname():
hostname = socket.gethostname()
if "chaac-" in hostname:
hostname = " ".join(hostname.split('-')[1:]).title()
else:
hostname = " ".join(hostname.split('-')).title()
return hostname
hostname = get_pretty_hostname()
default_uid = None
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, "sqlite_db"):
g.sqlite_db = ChaacDB(app.config["DATABASE"])
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, "sqlite_db"):
g.sqlite_db.close()
def get_latest_sample(uid):
""" Get latest weather data (and past day's rainfall) """
# Get last sample
db = get_db()
# Past day
now = datetime.fromtimestamp(int(time.time()))
end_time = start_time = time.mktime(now.timetuple())
# Start at midnight today
start_time = time.mktime(now.replace(hour=0, minute=0, second=0).timetuple())
end_time = time.mktime(now.timetuple())
rows = db.get_records("minute", start_date=start_time, end_date=end_time, order="desc", uid=uid)
if len(rows) == 0:
return None
sample = {}
# Convert the units
for key, val in rows[0]._asdict().items():
if key == "timestamp":
sample[key] = datetime.fromtimestamp(val).strftime("%Y-%m-%d %H:%M:%S")
sample["ts"] = val
elif val == None:
sample[key] = 0
else:
sample[key] = round(float(val), 2)
rain_total = 0
for row in rows:
rain_total += row.rain
sample["rain"] = round(rain_total, 2)
return sample
@app.route("/latest")
def latest_json():
db = get_db()
data = {"hostname": hostname, "devices":{}}
for device, name in db.devices.items():
sample = get_latest_sample(device)
if sample:
data["devices"][device] = get_latest_sample(device)
data["devices"][device]["name"] = name
return jsonify(data)
@app.route("/")
def summary():
return render_template("status.html", hostname=hostname)
rain_mod = {"day": (60 * 60), "week": (60 * 60 * 24), "month": (60 * 60 * 24)}
def get_start_bin(end_date, table):
""" Figure out what time it is now to start the bar chart
The numbers depend on whether it's a day/week/month plot
"""
# Start one day (% rain_mod) after today
end_date += rain_mod[table]
if table == "day":
return datetime.fromtimestamp(end_date).timetuple().tm_hour
elif table == "week":
return datetime.fromtimestamp(end_date).timetuple().tm_wday
elif table == "month":
return datetime.fromtimestamp(end_date).timetuple().tm_mday
else:
return None
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def get_rain_label(idx, table):
""" Get nice labels for the bar chart. Unfortunately, plotly
keeps re-sorting all the numbers so we have to add strings
around them to keep it in the correct order. """
if table == "day":
return "(" + str(idx) + ")"
elif table == "week":
return days[idx]
elif table == "month":
# TODO: Deal with days=0 etc
return "(" + str(idx) + ")"
else:
return None
def get_data_dict(uid, start_date, end_date, table="day"):
""" Get weather data for the specified weather period """
db = get_db()
if table == "day":
real_table = "minute"
else:
real_table = "hour"
rows = db.get_records(real_table, start_date=start_date, end_date=end_date, uid=uid)
if len(rows) == 0:
return None
plot = {}
col_names = rows[0]._asdict().keys()
for name in col_names:
plot[name] = []
# Rain doesn't have the same timestamp as the rest of the data
plot["rain_time"] = []
# Create lists with each data type and make timestamp pretty
for row in rows:
for name in col_names:
if name == "timestamp":
plot[name].append(
datetime.fromtimestamp(getattr(row, name)).strftime(
"%Y-%m-%d %H:%M:%S"
)
)
elif name == "uid" or name == "id" or name == "rain":
continue
else:
if getattr(row, name) is None:
plot[name].append(0)
else:
plot[name].append(round(getattr(row, name), 3))
# Bin data into the appropriate size for histograms
idx = get_start_bin(int(end_date - 1), table)
bins = range(int(start_date), int(end_date), rain_mod[table])
# Loop through each rain bin
for rain_bin in bins:
plot["rain_time"].append(get_rain_label(idx, table))
rain = 0
# Loop through each rain sample
for row in rows:
if row.rain == 0:
continue
# Check if the sample falls into our bin
if row.timestamp >= rain_bin and row.timestamp < (rain_bin + rain_mod[table]):
rain += row.rain
plot["rain"].append(rain)
# Wrap around depending on the number of bins (since we don't always start at 0)
idx = (idx + 1) % len(bins)
return plot
def join_data(prev_data, new_data):
for key in prev_data.keys():
if prev_data["timestamp"][-1] < new_data["timestamp"][0]:
prev_data[key] += new_data[key]
else:
prev_data[key] = new_data[key] + prev_data[key]
return prev_data
def get_stats(uid, start_date, end_date):
""" Get weather data for the specified weather period """
db = get_db()
rows = db.get_stats(start_date=start_date, end_date=end_date, uid=uid)
if len(rows) == 0:
return
plot = {}
col_names = rows[0]._asdict().keys()
plot["stat_fields"] = []
for name in col_names:
split_name = name.rsplit("__")
if len(split_name) > 1:
if split_name[0] not in plot:
plot[split_name[0]] = {}
plot["stat_fields"].append(split_name[0])
plot[split_name[0]][split_name[1]] = []
else:
plot[name] = []
ignored_fields = ["uid", "id", "data_period", "temperature_in", "wind_dir"]
# Create lists with each data type and make timestamp pretty
for row in rows:
for name in col_names:
if name == "timestamp":
plot[name].append(
datetime.fromtimestamp(getattr(row, name)).strftime("%Y-%m-%d")
)
elif name in ignored_fields:
continue
else:
split_name = name.rsplit("__")
if len(split_name) > 1:
if getattr(row, name) is None:
plot[split_name[0]][split_name[1]].append(0)
else:
plot[split_name[0]][split_name[1]].append(
round(getattr(row, name), 3)
)
else:
if getattr(row, name) is None:
plot[name].append(0)
else:
plot[name].append(round(getattr(row, name), 3))
if sum(plot["rain"]) == 0:
del plot["rain"]
return plot
def join_stats(stats1, stats2):
for key in stats1:
if isinstance(stats1[key], list):
stats1[key].extend(stats2[key])
elif isinstance(stats1[key], dict):
stats1[key] = join_stats(stats1[key], stats2[key])
return stats1
@app.route("/json/stats/year")
def json_stats_year_str():
db = get_db()
# time.time() is utc time, but now is a "naive"
# datetime object in current timezone
now = datetime.fromtimestamp(int(time.time()))
# Start this year before the next full hour
start_time = time.mktime(
(now.replace(minute=0, second=0, hour=0, day=1, month=1)).timetuple()
)
end_time = time.mktime(now.timetuple())
stats = {"hostname": hostname}
stats["start_date"] = datetime.fromtimestamp(start_time).strftime("%Y-%m-%d")
stats["end_date"] = datetime.fromtimestamp(end_time).strftime("%Y-%m-%d")
stats["devices"] = {}
for device, name in db.devices.items():
uid_stats = get_stats(device, start_time, end_time)
if uid_stats is not None:
if name in stats["devices"]:
stats["devices"][name] = join_stats(uid_stats, stats["devices"][name])
else:
stats["devices"][name] = uid_stats
return jsonify(stats)
@app.route("/json/day")
def json_day_str():
# time.time() is utc time, but now is a "naive"
# datetime object in current timezone
now = datetime.fromtimestamp(int(time.time()))
# Start 24 hours before the next full hour
start_time = time.mktime(
(
now.replace(minute=0, second=0) + timedelta(hours=1) - timedelta(days=1)
).timetuple()
)
end_time = time.mktime(now.timetuple())
db = get_db()
data = {"hostname": hostname}
data["start_date"] = datetime.fromtimestamp(start_time).strftime(
"%Y-%m-%d %H:%M:%S"
)
data["end_date"] = datetime.fromtimestamp(end_time).strftime("%Y-%m-%d %H:%M:%S")
data["data"] = {}
for device, name in db.devices.items():
data_dict = get_data_dict(device, start_time, end_time, "day")
if data_dict is not None:
if name in data["data"]:
data["data"][name] = join_data(data["data"][name], data_dict)
else:
data["data"][name] = data_dict
return jsonify(data)
@app.route("/json/week")
def json_week_str():
# time.time() is utc time, but now is a "naive"
# datetime object in current timezone
now = datetime.fromtimestamp(int(time.time()))
# Round to the full day, start 7 days ago
start_time = time.mktime(
(
now.replace(hour=0, minute=0, second=0)
+ timedelta(days=1)
- timedelta(weeks=1)
).timetuple()
)
end_time = time.mktime(now.timetuple())
db = get_db()
data = {"hostname": hostname}
data["start_date"] = datetime.fromtimestamp(start_time).strftime(
"%Y-%m-%d %H:%M:%S"
)
data["end_date"] = datetime.fromtimestamp(end_time).strftime("%Y-%m-%d %H:%M:%S")
data["data"] = {}
for device, name in db.devices.items():
data_dict = get_data_dict(device, start_time, end_time, "week")
if data_dict is not None:
if name in data["data"]:
data["data"][name] = join_data(data["data"][name], data_dict)
else:
data["data"][name] = data_dict
return jsonify(data)
@app.route("/json/month")
def json_month_str():
# time.time() is utc time, but now is a "naive"
# datetime object in current timezone
now = datetime.fromtimestamp(int(time.time()))
# TODO - round to the month?
# Round to the full day, start 31 days ago
start_time = time.mktime(
(
now.replace(hour=0, minute=0, second=0)
+ timedelta(days=1)
- timedelta(days=31)
).timetuple()
)
end_time = time.mktime(now.timetuple())
db = get_db()
data = {"hostname": hostname}
data["start_date"] = datetime.fromtimestamp(start_time).strftime(
"%Y-%m-%d %H:%M:%S"
)
data["end_date"] = datetime.fromtimestamp(end_time).strftime("%Y-%m-%d %H:%M:%S")
data["data"] = {}
for device, name in db.devices.items():
data_dict = get_data_dict(device, start_time, end_time, "month")
if data_dict is not None:
if name in data["data"]:
data["data"][name] = join_data(data["data"][name], data_dict)
else:
data["data"][name] = data_dict
return jsonify(data)
@app.route("/plots")
def plots():
return render_template("plots.html", hostname=hostname)
@app.route("/stats")
def stats():
return render_template("stats.html", hostname=hostname)
# Don't add hostname to redirect
# See https://stackoverflow.com/questions/30006740/how-can-i-tell-flask-not-to-add-host-scheme-info-to-my-redirect
class NoHostnameResponse(Response):
autocorrect_location_header = False
@app.route("/zipdb")
def download_zip_db():
dbname = "chaac.db.zip"
with zipfile.ZipFile(
f"/tmp/files/{dbname}", "w", compression=zipfile.ZIP_DEFLATED
) as dbzip:
print("Zipping ", os.getenv("DATABASE"))
dbzip.write(os.getenv("DATABASE"))
return redirect(f"files/{dbname}", Response=NoHostnameResponse)
| 2.640625
| 3
|
munimap/model/__init__.py
|
MrSnyder/bielefeldGEOCLIENT
| 2
|
12776914
|
<filename>munimap/model/__init__.py
from .mb_group import *
from .mb_user import *
from .layer import *
from .project import *
from .draw_schema import *
from .settings import *
| 1.21875
| 1
|
clarity/CellTypeDetection/parallelProcess.py
|
wjguan/phenocell
| 0
|
12776915
|
<reponame>wjguan/phenocell<filename>clarity/CellTypeDetection/parallelProcess.py
import numpy as np
## All this does is find the adaptive threshold for each point
def kernel(marker_channel_img, searchRadius, percent, localArea, center):
# Kernel to run for parallel processing: for deciding adaptive threshold of large z stacks.
xr, yr, zr = searchRadius
if isinstance(localArea, int):
z = localArea
elif len(localArea) == 2:
x = localArea[0]; y = localArea[0]; z = localArea[1]
elif len(localArea) == 3:
x = localArea[0]; y = localArea[1]; z = localArea[2]
if isinstance(localArea, int):
newimg = marker_channel_img[:,:,max(0,center[2]-z):min(marker_channel_img.shape[2],center[2]+z)]
else:
newimg = marker_channel_img[max(0,center[0]-x):min(marker_channel_img.shape[0],center[0]+x),
max(0,center[1]-y):min(marker_channel_img.shape[1],center[1]+y),
max(0,center[2]-z):min(marker_channel_img.shape[2],center[2]+z)]
return np.percentile(newimg, percent)*(2*xr+1)*(2*yr+1)*(2*zr+1)
| 2.484375
| 2
|
app/models.py
|
FrancisSakwa89/Pitch
| 0
|
12776916
|
<filename>app/models.py
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
pitches = db.relationship('Pitch',backref = 'pitches',lazy = "dynamic")
comments = db.relationship('Comment',backref='comments',lazy="dynamic")
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Pitch(UserMixin,db.Model):
__tablename__ = 'pitches'
id = db.Column(db.Integer, primary_key=True)
post = db.Column(db.String(255))
body = db.Column(db.String(1000))
category = db.Column(db.String(1000))
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comments = db.relationship('Comment',backref = 'pitch',lazy = "dynamic")
def save_pitch(self):
db.session.add(self)
db.session.commit()
class Comment(UserMixin,db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
poster = db.Column(db.String(255))
comment = db.Column(db.String(1000))
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
pitch_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
class PhotoProfile(db.Model):
__tablename__= 'profile_photos'
id = db.Column(db.Integer,primary_key = True)
pic_path = db.Column(db.String())
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
| 2.71875
| 3
|
deeplodocus/core/project/deep_structure/modules/transforms/transform_example.py
|
Ahleroy/deeplodocus
| 0
|
12776917
|
import random
#
# RANDOM FUNCTION EXAMPLE
#
def random_example_function(data, param_min, param_max):
parameters = random.uniform(param_min, param_max)
transformed_data, _ = example_function(data, parameters)
transform = ["example_function", example_function, {"parameters": parameters}]
return transformed_data, transform
#
# FUNCTION EXAMPLE
#
def example_function(data, parameters):
return data, None
| 2.8125
| 3
|
liftoff/proc_info.py
|
tudor-berariu/liftoff
| 9
|
12776918
|
<filename>liftoff/proc_info.py
""" Here we implement liftoff-procs and liftoff-abort
"""
from argparse import Namespace
import os.path
import subprocess
from termcolor import colored as clr
from .common.options_parser import OptionParser
def parse_options() -> Namespace:
""" Parse command line arguments and liftoff configuration.
"""
opt_parser = OptionParser(
"liftoff-status", ["experiment", "all", "timestamp_fmt", "results_path", "do"],
)
return opt_parser.parse_args()
def get_running_liftoffs(experiment: str, results_path: str):
""" Get the running liftoff processes.
"""
cmd = (
"COLUMNS=0 pgrep liftoff"
" | xargs -r -n 1 grep "
f"--files-with-matches {results_path:s}/*/.__* -e"
)
result = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
if result.stderr:
raise Exception(result.stderr.decode("utf-8"))
running = {}
for session_path in result.stdout.decode("utf-8").split("\n"):
if not session_path:
continue
with open(session_path) as hndlr:
ppid = int(hndlr.readline().strip())
experiment_full_name = os.path.basename(os.path.dirname(session_path))
if experiment is not None and experiment not in experiment_full_name:
continue
proc_group = dict({})
session_id = os.path.basename(session_path)[3:]
escaped_sid = session_id.replace("-", r"\-")
cmd = (
f"for p in "
f"`pgrep -f '\\-\\-session\\-id {escaped_sid:s}'`"
f"; do COLUMNS=0 ps -p $p -o pid,ppid,cmd h; done"
)
result = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
if result.stderr:
raise Exception(result.stderr.decode("utf-8"))
pids = []
print(result.stdout.decode("utf-8").split("\n"))
for line1 in result.stdout.decode("utf-8").split("\n"):
if not line1:
continue
pid, fake_ppid, *other = line1.split()
pid, fake_ppid = int(pid), int(fake_ppid)
if fake_ppid != 1:
cfg, good = "", True
for part in other:
if part.endswith("cfg.yaml"):
cfg = (
os.path.basename(os.path.dirname(os.path.dirname(part)))
+ "/"
+ os.path.basename(os.path.dirname(part))
)
elif ".__crash" in part:
good = False
break
if good:
pids.append((pid, cfg))
proc_group["session"] = session_id
proc_group["ppid"] = ppid
proc_group["procs"] = pids
running.setdefault(experiment_full_name, []).append(proc_group)
return running
def display_procs(running):
""" Display the running liftoff processes.
"""
for experiment_name, details in running.items():
print(clr(experiment_name, attrs=["bold"]))
for info in details:
nrunning = clr(f"{len(info['procs']):d}", color="blue", attrs=["bold"])
ppid = clr(f"{info['ppid']:5d}", color="red", attrs=["bold"])
print(f" {ppid:s}" f" :: {info['session']:s}" f" :: {nrunning:s} running")
for pid, name in info["procs"]:
print(f" - {pid:5d} :: {name:s}")
def procs() -> None:
""" Entry point for liftoff-procs.
"""
opts = parse_options()
display_procs(get_running_liftoffs(opts.experiment, opts.results_path))
| 2.515625
| 3
|
scimitar/core/modules/HeaderModule.py
|
aloheac/scimitar
| 0
|
12776919
|
<reponame>aloheac/scimitar<filename>scimitar/core/modules/HeaderModule.py
from scimitar.core.modules.BaseModules import PreExecutionModule
class HeaderModule( PreExecutionModule ):
def __init__( self, run ):
PreExecutionModule.__init__( self, "Header Module", 1, run )
def getScriptContribution( self ):
contribution = "# ***** PreExecution: Print Log Header *****\n"
contribution += "scimitar.runtime.modules.HeaderModule.ModuleRuntime( RUN_LISTING, DIR_ORDER, RUN_CONFIG ).execute()\n\n"
return contribution
| 1.78125
| 2
|
cmake/lib/config.py
|
uihsnv/lapack-dsyevr-test
| 1
|
12776920
|
# Copyright (c) 2015 by <NAME> and <NAME>
# See https://github.com/scisoft/autocmake/blob/master/LICENSE
import subprocess
import os
import sys
import shutil
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def check_cmake_exists(cmake_command):
"""
Check whether CMake is installed. If not, print
informative error message and quits.
"""
p = subprocess.Popen('%s --version' % cmake_command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
if not ('cmake version' in p.communicate()[0].decode('UTF-8')):
sys.stderr.write(' This code is built using CMake\n\n')
sys.stderr.write(' CMake is not found\n')
sys.stderr.write(' get CMake at http://www.cmake.org/\n')
sys.stderr.write(' on many clusters CMake is installed\n')
sys.stderr.write(' but you have to load it first:\n')
sys.stderr.write(' $ module load cmake\n')
sys.exit(1)
def setup_build_path(build_path):
"""
Create build directory. If this already exists, print informative
error message and quit.
"""
if os.path.isdir(build_path):
fname = os.path.join(build_path, 'CMakeCache.txt')
if os.path.exists(fname):
sys.stderr.write('aborting setup\n')
sys.stderr.write('build directory %s which contains CMakeCache.txt already exists\n' % build_path)
sys.stderr.write('remove the build directory and then rerun setup\n')
sys.exit(1)
else:
os.makedirs(build_path, 0o755)
def test_adapt_cmake_command_to_platform():
cmake_command = "FC=foo CC=bar CXX=RABOOF cmake -DTHIS -DTHAT='this and that cmake' .."
res = adapt_cmake_command_to_platform(cmake_command, 'linux')
assert res == cmake_command
res = adapt_cmake_command_to_platform(cmake_command, 'win32')
assert res == "set FC=foo && set CC=bar && set CXX=RABOOF && cmake -DTHIS -DTHAT='this and that cmake' .."
cmake_command = "cmake -DTHIS -DTHAT='this and that cmake' .."
res = adapt_cmake_command_to_platform(cmake_command, 'linux')
assert res == cmake_command
res = adapt_cmake_command_to_platform(cmake_command, 'win32')
assert res == cmake_command
def adapt_cmake_command_to_platform(cmake_command, platform):
"""
Adapt CMake command to MS Windows platform.
"""
if platform == 'win32':
pos = cmake_command.find('cmake')
s = ['set %s &&' % e for e in cmake_command[:pos].split()]
s.append(cmake_command[pos:])
return ' '.join(s)
else:
return cmake_command
def run_cmake(command, build_path, default_build_path):
"""
Execute CMake command.
"""
topdir = os.getcwd()
os.chdir(build_path)
p = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_coded, stderr_coded = p.communicate()
stdout = stdout_coded.decode('UTF-8')
stderr = stderr_coded.decode('UTF-8')
if stderr:
sys.stderr.write(stderr)
sys.exit(1)
# print cmake output to screen
print(stdout)
# write cmake output to file
f = open('cmake_output', 'w')
f.write(stdout)
f.close()
# change directory and return
os.chdir(topdir)
if 'Configuring incomplete' in stdout:
# configuration was not successful
if (build_path == default_build_path):
# remove build_path iff not set by the user
# otherwise removal can be dangerous
shutil.rmtree(default_build_path)
else:
# configuration was successful
save_setup_command(sys.argv, build_path)
print_build_help(build_path, default_build_path)
def print_build_help(build_path, default_build_path):
"""
Print help text after configuration step is done.
"""
print(' configure step is done')
print(' now you need to compile the sources:')
if (build_path == default_build_path):
print(' $ cd build')
else:
print(' $ cd ' + build_path)
print(' $ make')
def save_setup_command(argv, build_path):
"""
Save setup command to a file.
"""
file_name = os.path.join(build_path, 'setup_command')
f = open(file_name, 'w')
f.write(' '.join(argv[:]) + '\n')
f.close()
def configure(root_directory, build_path, cmake_command, only_show):
"""
Main configure function.
"""
default_build_path = os.path.join(root_directory, 'build')
# check that CMake is available, if not stop
check_cmake_exists('cmake')
# deal with build path
if build_path is None:
build_path = default_build_path
if not only_show:
setup_build_path(build_path)
cmake_command = adapt_cmake_command_to_platform(cmake_command, sys.platform)
print('%s\n' % cmake_command)
if only_show:
sys.exit(0)
run_cmake(cmake_command, build_path, default_build_path)
| 2.484375
| 2
|
app.py
|
g-jindal2001/blogs
| 0
|
12776921
|
<filename>app.py
from flask import Flask, render_template, request, redirect, session, flash
from flask_bootstrap import Bootstrap
from flask_mysqldb import MySQL
from flask_ckeditor import CKEditor
import bcrypt
import yaml
app = Flask(__name__)
Bootstrap(app)
ckeditor = CKEditor(app)
db = yaml.load(open('db.yaml'))
app.config['MYSQL_HOST'] = db['mysql_host']
app.config['MYSQL_USER'] = db['mysql_user']
app.config['MYSQL_PASSWORD'] = db['mysql_password']
app.config['MYSQL_DB'] = db['mysql_db']
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
mysql = MySQL(app)
app.config['SECRET_KEY'] = 'secret'
@app.route('/')
def index():
cur = mysql.connection.cursor()
resultValue = cur.execute("SELECT * FROM blog")
if resultValue > 0:
blogs = cur.fetchall()
cur.close()
return render_template('index.html', blogs=blogs)
cur.close()
return render_template('index.html', blogs=None)
@app.route('/about/')
def about():
return render_template('about.html')
@app.route('/blogs/<int:id>/')
def blogs(id):
cur = mysql.connection.cursor()
resultValue = cur.execute("SELECT * FROM blog WHERE blog_id = {}".format(id))
if resultValue > 0:
blog = cur.fetchone()
return render_template('blog.html', blog=blog)
return 'Blog not found'
@app.route('/register/', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
userDetails = request.form#Fetch all user details
if userDetails['password'] != userDetails['confirm_password']:#Check if passwords match
flash('Passwords do not match! Try again.', 'danger')
return render_template('register.html')
hashed = bcrypt.hashpw(userDetails['password'].encode('utf8'), bcrypt.gensalt())
cur = mysql.connection.cursor()#if passwords match then start a new sql cursor
cur.execute("INSERT INTO user(first_name, last_name, username, email, password) "\
"VALUES(%s,%s,%s,%s,%s)",(userDetails['first_name'], userDetails['last_name'], \
userDetails['username'], userDetails['email'], hashed))
mysql.connection.commit()#save the changes
cur.close()
flash('Registration successful! Please login.', 'success')
return redirect('/login')
return render_template('register.html')
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
userDetails = request.form#Fetch all user details ie login and password
username = userDetails['username']#Fetch the username
cur = mysql.connection.cursor()#Start a new sql cursor
resultValue = cur.execute("SELECT * FROM user WHERE username = %s", ([username]))#Fetch the username
if resultValue > 0:#If username exists in database
user = cur.fetchone()#Fetch that single row from database which contains all details of that username
if bcrypt.checkpw(userDetails['password'].encode('utf-8'), user['password'].encode('utf-8')):#Check if passwords match
session['login'] = True
session['firstName'] = user['first_name']
session['lastName'] = user['last_name']
flash('Welcome ' + session['firstName'] +'! You have been successfully logged in', 'success')
else:#If passwords do not match
cur.close()
flash('Password does not match', 'danger')
return render_template('login.html')
else:#If the user that is trying to login is not found in database
cur.close()
flash('User not found', 'danger')
return render_template('login.html')
cur.close()
return redirect('/')
return render_template('login.html')
# Write a new blog
@app.route('/write-blog/',methods=['GET', 'POST'])
def write_blog():
if request.method == 'POST':
blogpost = request.form
title = blogpost['title']
body = blogpost['body']
author = session['firstName'] + ' ' + session['lastName']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO blog(title, body, author) VALUES(%s, %s, %s)", (title, body, author))
mysql.connection.commit()
cur.close()
flash("Successfully posted new blog", 'success')
return redirect('/')
return render_template('write_blog.html')
# View my blog
@app.route('/my-blogs/')
def view_blogs():
try:
author = session['firstName'] + ' ' + session['lastName']
cur = mysql.connection.cursor()
result_value = cur.execute("SELECT * FROM blog WHERE author = %s",[author])
if result_value > 0:
my_blogs = cur.fetchall()
return render_template('my_blogs.html',my_blogs=my_blogs)
else:
return render_template('my_blogs.html',my_blogs=None)
except Exception:
return render_template('my_blogs.html',my_blogs="No login!")
# Edit blog
@app.route('/edit-blog/<int:id>/', methods=['GET', 'POST'])
def edit_blog(id):
if request.method == 'POST':
cur = mysql.connection.cursor()
title = request.form['title']
body = request.form['body']
cur.execute("UPDATE blog SET title = %s, body = %s where blog_id = %s",(title, body, id))
mysql.connection.commit()
cur.close()
flash('Blog updated successfully', 'success')
return redirect('/blogs/{}'.format(id))
cur = mysql.connection.cursor()
result_value = cur.execute("SELECT * FROM blog WHERE blog_id = {}".format(id))
if result_value > 0:
blog = cur.fetchone()
blog_form = {}
blog_form['title'] = blog['title']
blog_form['body'] = blog['body']
return render_template('edit_blog.html', blog_form=blog_form)
@app.route('/delete-blog/<int:id>/')
def delete_blog(id):
cur = mysql.connection.cursor()
cur.execute("DELETE FROM blog WHERE blog_id = {}".format(id))
mysql.connection.commit()
flash("Your blog has been deleted", 'success')
return redirect('/my-blogs')
@app.route('/logout/')
def logout():
session.clear()
flash("You have been logged out", 'info')
return redirect('/')
if __name__ == '__main__':
app.run(debug=True, port=5001)
| 2.59375
| 3
|
fybot/tests/timing.py
|
juanlazarde/financial_scanner
| 2
|
12776922
|
<filename>fybot/tests/timing.py
from sys import exit
from time import time as t
import core.snp as sn
def main():
forced = True
symbols = sn.GetAssets(forced).symbols # 3.6s
# 300 symbols
# sn.GetFundamental(symbols, forced) # 44.6 s
# s = t()
# sn.GetPrice(symbols, forced) # 84.7 s
# print(t() - s)
# exit()
# pass
if __name__ == '__main__':
main()
| 2.046875
| 2
|
studies/ti/gaussian.py
|
SimonBoothroyd/bayesiantesting
| 1
|
12776923
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 14:42:37 2019
@author: owenmadin
"""
import numpy
from bayesiantesting.kernels.bayes import ThermodynamicIntegration
from bayesiantesting.models.continuous import GaussianModel
def main():
priors = {"uniform": ("uniform", numpy.array([-5.0, 5.0]))}
# Build the model / models.
model = GaussianModel("gaussian", priors, 0.0, 1.0)
# Draw the initial parameter values from the model priors.
initial_parameters = model.sample_priors()
# Run the simulation
simulation = ThermodynamicIntegration(
legendre_gauss_degree=16,
model=model,
warm_up_steps=100000,
steps=500000,
output_directory_path="gaussian",
)
_, integral, error = simulation.run(initial_parameters, number_of_processes=4)
print("Final Integral:", integral, " +/- ", error)
print("==============================")
if __name__ == "__main__":
main()
| 2.703125
| 3
|
pyforms_lite/gui/controls/ControlFile.py
|
NikhilNarayana/pyforms-lite
| 0
|
12776924
|
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
from pyforms_lite.utils.settings_manager import conf
from pyforms_lite.gui.controls.ControlText import ControlText
import pyforms_lite.utils.tools as tools
from AnyQt import uic, _api
from AnyQt.QtWidgets import QFileDialog
class ControlFile(ControlText):
def __init__(self, *args, **kwargs):
super(ControlFile, self).__init__(*args, **kwargs)
self.use_save_dialog = kwargs.get('use_save_dialog', False)
def init_form(self):
control_path = tools.getFileInSameDirectory(__file__, "fileInput.ui")
self._form = uic.loadUi(control_path)
self._form.label.setText(self._label)
self._form.pushButton.clicked.connect(self.click)
self.form.lineEdit.editingFinished.connect(self.finishEditing)
self._form.pushButton.setIcon(conf.PYFORMS_ICON_FILE_OPEN)
def finishEditing(self):
"""Function called when the lineEdit widget is edited"""
self.changed_event()
def click(self):
if self.use_save_dialog:
value, _ = QFileDialog.getSaveFileName(self.parent, self._label, self.value)
else:
value = QFileDialog.getOpenFileName(self.parent, self._label, self.value)
if _api.USED_API == _api.QT_API_PYQT5:
value = value[0]
elif _api.USED_API == _api.QT_API_PYQT4:
value = str(value)
if value and len(value)>0: self.value = value
| 2.1875
| 2
|
Typer.py
|
kenanbit/KeyboardLayoutLearning
| 0
|
12776925
|
<reponame>kenanbit/KeyboardLayoutLearning
#!/usr/bin/env python3
import curses
from curses import wrapper, textpad
from time import sleep, strftime, mktime, gmtime
import subprocess
import _thread
import random
from sys import argv
from datetime import datetime
WORDS_FILE = '10000words.txt' #File to draw random words from
WORDS_SAMPLE_SIZE = 1000
LINE_CHARACTER_WIDTH = 70
TIME_LIMIT = 15*60 #Time out after 15 minutes
BACKSPACE_ENABLED = False
layouts = {
#LAYOUT TOP ROW HOME ROW BOTTOM ROW
#------ ----------- ----------- -----------
'qwerty': 'qwertyuiop'+'asdfghjkl;'+'zxcvbnm,./',
'0':'\',.pyfgcrl'+'aoeuidhtns'+';qjkxbmwvz', #Dvorak
'1': 'gljz,x.uvk'+'oidhtnears'+'pw;b/ycmfq', #10 most common letter in home row, otherwise random
'2': 'fjh,.;dvli'+'bostunprmz'+'exaqwy/gck', #Fewer than 5 most common letters in home row (4 to be exact), otherwise random
}
LAYOUT_TO_USE = argv[1]
PRETEST_LAYOUT = 'qwerty'
SYSTEM_LAYOUT = 'qwerty'
assert LAYOUT_TO_USE in layouts and SYSTEM_LAYOUT in layouts
SURVEY_QUESTIONS = [
'Do you normally look at the keyboard while you type? (Yes/No/Sometimes)',
'How many fingers do you normally use to type? (1-10)',
'How many different keyboard layouts (including different languages) have you learned to use? (0-10)',
'List them below, separated by commas:',
'If you would like the opportunity to receive a $20 Java card for achieving the highest score in this study, enter your email below. Leave blank to opt-out.'
]
PRETEST_INSTRUCTIONS = 'You will now take a short typing test to adjust to how the interface works.'
MAIN_INSTRUCTIONS = 'You will now begin to learn to type in the keyboard layout which is displayed below. The program will show one line of common english words, and you will type them as quickly as you can. All words are lowercase. Backspace is disabled. The test will end after 15 minutes have passed. You can see your current CPM (characters per minute) in the lefthand panel.'
GOODBYE = 'You have finished! If you filled out your email and have the highest score out of all experiment subjects, we will contact you to receive the gift card. Please tell the experiment coordinator that you have finished.\n\nThank you for your time and mental effort!'
INCORRECT_COLOR = (1,curses.COLOR_RED, curses.COLOR_BLACK)
CORRECT_COLOR = (2, curses.COLOR_GREEN, curses.COLOR_BLACK)
INCORRECT_KEY = (3,curses.COLOR_BLACK, curses.COLOR_RED)
CORRECT_KEY = (4, curses.COLOR_BLACK, curses.COLOR_GREEN)
MODEL_COLOR = (5,curses.COLOR_BLACK,curses.COLOR_WHITE)
GRAPH_COLOR = (6,curses.COLOR_YELLOW,curses.COLOR_BLACK)
def load_words():
words = []
count = 0
for line in open(WORDS_FILE, 'r'):
if count > WORDS_SAMPLE_SIZE:
break
words.append(line.strip())
count += 1
return words
def get_line_of_text(word_choices, character_length):
line = ""
while len(line) <= character_length:
to_add = random.choice(word_choices)
if len(line) + 1 + len(to_add) > character_length:
break
line += ' '+to_add
return line.strip()
def display_information(string, terminate_with_key=True, terminate_at_time=9, show_keyboard_layout = None):
main_pad.clear()
main_pad.addstr(0,0,string)
if show_keyboard_layout in layouts:
draw_keyboard(layouts[show_keyboard_layout], None)
if terminate_with_key:
main_pad.addstr(5,3, 'Press any key to continue...')
main_pad.refresh( 0,0, 0,0, 25,width+20)
stdscr.getch()
else:
if terminate_at_time <= 0:
return
else:
main_pad.addstr(5,3, 'We will continue in '+str(terminate_at_time)+' seconds...')
main_pad.refresh( 0,0, 0,0, 25,width+20)
sleep(1)
display_information(string, terminate_with_key=False, terminate_at_time=terminate_at_time-1, show_keyboard_layout = show_keyboard_layout)
def survey():
curses.echo()
out_file = id_str+'.results'
answers = []
with open(out_file, 'a+') as f:
f.write('id: '+id_str+'\r\n')
for question in SURVEY_QUESTIONS:
main_pad.clear()
main_pad.addstr(0,0, question)
main_pad.addstr(1,1,'')
main_pad.refresh( 0,0, 0,0, 25,width+20)
f.write(question)
f.write( str(stdscr.getstr(1,1, 80)) )
f.write('\r\n')
def display_stats(layout, time, model_line, typed_line, mistake_count):
main_pad.clear()
cpm = len(model_line)/time*60
accuracy = max(0, 1-mistake_count/len(model_line))
score = accuracy * cpm
display_string = \
'CPM (chars / minute): '+str(int(cpm))+'\n'+\
'Mistakes: '+str(mistake_count)+'\n'+\
'Accuracy: '+str(int(accuracy*100))+'%\n'+\
'Score on this line: '+str(int(score))+'\n'
#Write results
out_file = id_str+'.results'
with open(out_file, 'a+') as f:
strings = ['id: '+id_str,
'layout: '+layout,
'time: '+strftime("%c"),
'elapsed: '+str(time),
'length: '+str(len(model_line)),
'cpm: '+str(cpm),
'mistakes: '+str(mistake_count),
'accuracy: '+str(accuracy),
'score: '+str(score),
'model_line: '+model_line,
'typed_line: '+typed_line]
for string in strings:
f.write(string)
f.write('\r\n')
f.write('\r\n')
display_information(display_string, terminate_with_key=False, show_keyboard_layout = layout)
#TODO make keys flash green and red when pressed.
def draw_keyboard(char_list, pressed_char, is_correct = True):
KEY_WIDTH = 6
KEY_HEIGHT = 4
start_y = 6
char_list = char_list.upper()
#FIRST ROW
x = 0; y = start_y
for k in range(0, 10):
textpad.rectangle(main_pad, y, x, y + KEY_HEIGHT, x + KEY_WIDTH)
char = char_list[0+k]
style = curses.A_BOLD
if pressed_char == char.lower():
if is_correct:
style = curses.color_pair(CORRECT_KEY[0])
else:
style = curses.color_pair(INCORRECT_KEY[0])
if not char.isalpha():
char=' '
main_pad.addstr(y + 1, x+1, ' '*(KEY_WIDTH - 1), style)
main_pad.addstr(y + 2, x+1, (' '*(KEY_WIDTH//2 - 1)) + char + (' '*(KEY_WIDTH//2 - 1)), style)
main_pad.addstr(y + 3, x+1, ' '*(KEY_WIDTH - 1), style)
x += KEY_WIDTH
y += KEY_HEIGHT
x = 2
#SECOND ROW
for k in range(0, 10):
textpad.rectangle(main_pad, y, x, y + KEY_HEIGHT, x + KEY_WIDTH)
char = char_list[10+k]
style = curses.A_BOLD
if pressed_char == char.lower():
if is_correct:
style = curses.color_pair(CORRECT_KEY[0])
else:
style = curses.color_pair(INCORRECT_KEY[0])
if not char.isalpha():
char=' '
main_pad.addstr(y + 1, x+1, ' '*(KEY_WIDTH - 1), style)
main_pad.addstr(y + 2, x+1, (' '*(KEY_WIDTH//2 - 1)) + char + (' '*(KEY_WIDTH//2 - 1)), style)
main_pad.addstr(y + 3, x+1, ' '*(KEY_WIDTH - 1), style)
if (k in [3,6]):
main_pad.addstr(y + KEY_HEIGHT//2+1, x+KEY_WIDTH//2, '_', style)
x += KEY_WIDTH
y += KEY_HEIGHT
x = 5
#THIRD ROW
for k in range(0, 10):
textpad.rectangle(main_pad, y, x, y + KEY_HEIGHT, x + KEY_WIDTH)
char = char_list[20+k]
style = curses.A_BOLD
if pressed_char == char.lower():
if is_correct:
style = curses.color_pair(CORRECT_KEY[0])
else:
style = curses.color_pair(INCORRECT_KEY[0])
if not char.isalpha():
char=' '
main_pad.addstr(y + 1, x+1, ' '*(KEY_WIDTH - 1), style)
main_pad.addstr(y + 2, x+1, (' '*(KEY_WIDTH//2 - 1)) + char + (' '*(KEY_WIDTH//2 - 1)), style)
main_pad.addstr(y + 3, x+1, ' '*(KEY_WIDTH - 1), style)
x += KEY_WIDTH
main_pad.refresh( start_y, 0, start_y, 0, start_y + KEY_HEIGHT*3 , 0 + 4 + 10*KEY_WIDTH)
def convert(char, from_layout, to_layout):
from_string = layouts[from_layout]
to_string = layouts[to_layout]
try:
key_position = from_string.index(char)
except ValueError:
return None
return to_string[key_position]
def run_test(stdscr, choices, layout):
#Start of new line of the test
curses.noecho()
main_pad.clear()
draw_keyboard(layouts[layout], None)
model_line = get_line_of_text(choices, LINE_CHARACTER_WIDTH)
typed_line = ''
line_num = 1; x = 0
mistakes = 0
start = datetime.now()
main_pad.addstr(0,x,model_line)
main_pad.addstr(1,x,'') #Move cursor to next line
main_pad.refresh( 0,0, 0,0, 25,width+20)
while x < len(model_line):
#start of character fetch/process cycle
typed_char_code = stdscr.getch()
typed_char = chr(typed_char_code)
#Convert char to the new layout if not space or backspace
if not typed_char in [curses.KEY_BACKSPACE, ' ']:
typed_char = convert( typed_char, SYSTEM_LAYOUT, layout )
model_char = model_line[x]
if typed_char: #Conversion may have returned None
typed_line += typed_char
if typed_char == model_char: #CORRECT LETTER
main_pad.addstr(1, x, typed_char, curses.color_pair(CORRECT_COLOR[0]) | curses.A_BOLD)
x+=1
is_correct = True
elif typed_char == curses.KEY_BACKSPACE: #BACKSPACE
if BACKSPACE_ENABLED:
if x !=0:
x-=1
main_pad.addstr(line_num, x, ' ')
main_pad.addstr(line_num, x, '')
is_correct = False
else: #WRONG LETTER
#TODO this should be a separate option
if BACKSPACE_ENABLED:
main_pad.addstr(1, x, typed_char, curses.color_pair(INCORRECT_COLOR[0]) | curses.A_BOLD)
x += 1
mistakes += 1
is_correct = False
draw_keyboard(layouts[layout], typed_char, is_correct = is_correct)
main_pad.refresh( 0,0, 0,0, 25,width+20)
#Finished with a line, show the status page
now = datetime.now()
time = now - start
time = time.total_seconds()
display_stats(layout, time, model_line, typed_line, mistakes)
def run(stdscr):
choices = load_words()
survey()
#PRETEST
display_information(PRETEST_INSTRUCTIONS, show_keyboard_layout = PRETEST_LAYOUT)
run_test(stdscr, choices, PRETEST_LAYOUT)
#MAIN TEST
display_information(MAIN_INSTRUCTIONS, show_keyboard_layout = LAYOUT_TO_USE)
very_start = datetime.now()
while True:
run_test(stdscr, choices, LAYOUT_TO_USE)
#Check if we have exceeded the time limit
if (datetime.now() - very_start).total_seconds() >= TIME_LIMIT:
break
display_information(GOODBYE)
def init_curses():
global stdscr, main_pad, ref_pad, width
stdscr = curses.initscr()
curses.echo()
curses.cbreak()
curses.start_color()
curses.init_pair(*INCORRECT_COLOR)
curses.init_pair(*CORRECT_COLOR)
curses.init_pair(*INCORRECT_KEY)
curses.init_pair(*CORRECT_KEY)
curses.init_pair(*MODEL_COLOR)
curses.init_pair(*GRAPH_COLOR)
stdscr.attron(curses.A_BOLD)
stdscr.keypad(True)
width=curses.COLS
main_pad = curses.newpad(70, width)
def cleanup_curses():
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
def set_id():
global id_str
id_str = str(int(mktime(gmtime()))) + str(random.randint(100,999))
if __name__ == '__main__':
set_id()
init_curses()
wrapper(run) #main method
cleanup_curses() #Exiting
| 3.40625
| 3
|
landmark/utils/images/test.py
|
greedpejo/FER_SPRING
| 1
|
12776926
|
from PIL import Image
import numpy as np
img = Image.open('cifar.png')
pic = np.array(img)
noise = np.random.randint(-10,10,pic.shape[-1])
print(noise.shape)
pic = pic+noise
pic = pic.astype(np.uint8)
asd = Image.fromarray(pic)
| 3.109375
| 3
|
scripts/initial/ssh_connection.py
|
zhaozhilong1993/demon
| 0
|
12776927
|
#!/usr/bin/env python
# encoding: utf-8
import paramiko, base64
import optparse
import json
def generate_options():
p = optparse.OptionParser()
p.add_option("--address", "-a")
p.add_option("--username", "-u")
p.add_option("--password", <PASSWORD>")
options, argument = p.parse_args()
return options
if __name__ == '__main__':
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
options = generate_options()
address = options.address
username = options.username
password = <PASSWORD>.password
try:
client.connect(address, username=username, password=password, timeout=1)
stdin, stdout, stderr = client.exec_command('ls')
except Exception, e:
print "Fail"
else:
print "Success"
finally:
client.close()
| 2.28125
| 2
|
knn_classifier.py
|
artemnaumchuk/kNN-classifier-python
| 0
|
12776928
|
<filename>knn_classifier.py<gh_stars>0
class KNNClassifier(object):
def __init__(self, k=3, distance=None):
self.k = k
self.distance = distance
def fit(self, x, y):
pass
def predict(self, x):
pass
def __decision_function(self):
pass
| 2.3125
| 2
|
fwrite/config/__init__.py
|
fooying/fwrite
| 9
|
12776929
|
<gh_stars>1-10
#!/usr/bin/env python
#encoding=utf-8
#by Fooying 2013-11-17 01:49:57
'''
配置读写相关方法
'''
import os
import sys
import ConfigParser
reload(sys)
sys.setdefaultencoding('utf-8')
from ..utils import *
CONFIG = ConfigParser.ConfigParser()
FILE_PATH = os.path.join(os.path.dirname(__file__), 'config.fwrite')
def write_config(item, key, value):
CONFIG.read(FILE_PATH)
with open(FILE_PATH, 'w') as f:
if item not in CONFIG.sections():
CONFIG.add_section(item)
value = str_encrypt(value)
CONFIG.set(item, key, value)
CONFIG.write(f)
def read_config(item, key):
try:
CONFIG.read(FILE_PATH)
value = CONFIG.get(item, key)
value = str_decrypt(value)
except:
value = ''
return value
if __name__ == '__main__':
write_config('base', 'icp', '闽ICP备12005824号-5')
print read_config('base', 'icp')
| 2.609375
| 3
|
joecceasy/AbstractBaseClass.py
|
joetainment/joecceasy
| 0
|
12776930
|
from . import Utils
Funcs = Utils.Funcs
classproperty = Utils.classproperty
class AbstractBaseClass:
"""
This class simply has some functionality we often
want on typical classes, such as ontoDict and ontoSelf
extend this as a habit when making new classes in apps etc
for init and passing through kwargs intelligently/flexibly, use
a pattern like...
def __init__(
self,
*args,
**kwargs,
):
kwargs = self.kwargsViaDict( kwargs,
'title':"EasyLauncher",
'minimumHeight':450,
'example1':True,
'example2':False,
## now control which get also applied to self
filterOutput={
'example1','example2'
}
)
kwargs = self.kwargsViaDict( kwargs, {
'title':"EasyLauncher",
'minimumHeight':450,
'kwarg1-AddedViaKwargsViaDict':True,
'kwarg2-AddedViaKwargsViaDict':False,
})
## will modify both kwargs and self via it's attribute dict if given one,
## otherwise won't modify kwargs but will
## apply the ones in the filter to the object
self.kwargsOntoSelf(
kwargs,
{ ## keys/values to be added to kwargs
'kwarg3-AddedViaAttrDict':True,
'kwarg4-AddedViaAttrDict':False,
},
## attributes from kwargs to apply to self
filterInput={ ## only apply these keys from kwargs to self
## actual value is ignored is filterOutput is dict
'kwarg1-AddedViaKwargsViaDict':1,
'kwarg2-AddedViaKwargsViaDict':1,
},
)
self.kwargsOntoSelf(
None, #not given kwargs as dest dict, just attr dict
{
'kwarg5-AddedViaAttrDict':True,
'kwarg6-AddedViaAttrDict':False,
}
)
self.dictOntoSelf( {
'launcherColumns':7,
'pathOfShortcuts':DEFAULT_PATH,
})
superKwargs = self.dictCopyExclusive( kwargs, "minimumHeight" )
super().__init__( *args, **superKwargs
)
"""
@property
def kwargsD(self):
return self.kwargsViaDict
def kwargsViaDict(self, destDict, defaultsDict, *args, **kwargs):
## this swaps order of first two args in func call because it's way more intuitive
## when func is used in this manner
return Funcs.OntoDict( *args, defaultsDict, destDict, **kwargs )
def kwargsViaDictWithBackup(self, destDict, defaultsDict, *args, **kwargs):
return Funcs.OntoDictWithBackup( defaultsDict, destDict, *args, **kwargs )
def kwargsOntoObj(self, *args, **kwargs):
return Funcs.OntoDict( *args, **kwargs )
def kwargsOntoSelf(self, *args, **kwargs):
return Funcs.OntoObj( self, *args, **kwargs )
def dictOntoSelf(self, *args, **kwargs):
return Funcs.OntoObj(
## args below will be
## self, None meaning no default dict, first arg of *args as attrDict
self, None, *args, **kwargs
)
def dictCopyExclusive(self, *args, **kwargs ):
return Funcs.DictCopyExclusive( *args, **kwargs )
@classproperty
def Easy(cls):
import Easy
return Easy
@classproperty
def EasyFuncs(cls):
return Funcs
| 3.3125
| 3
|
checkerboard/checkerboard.py
|
lambdaloop/checkerboard
| 27
|
12776931
|
<reponame>lambdaloop/checkerboard
#!/usr/bin/env python3
import numpy as np
from scipy import signal
from scipy.spatial import cKDTree
from numpy import pi
from scipy.cluster.vq import kmeans
import cv2
try:
import gputools
GPUTOOLS = True
except:
GPUTOOLS = False
def create_correlation_patch(angle_1,angle_2,radius):
# width and height
width = radius*2+1
height = radius*2+1
# initialize template
template = []
for i in range(4):
x = np.zeros((height, width))
template.append(x)
# midpoint
mu = radius
mv = radius
# compute normals from angles
n1 = [-np.sin(angle_1), np.cos(angle_1)]
n2 = [-np.sin(angle_2), np.cos(angle_2)]
# for all points in template do
for u in range(width):
for v in range(height):
# vector
vec = [u-mu, v-mv]
dist = np.linalg.norm(vec)
# check on which side of the normals we are
s1 = np.dot(vec, n1)
s2 = np.dot(vec, n2)
if dist <= radius:
if s1 <= -0.1 and s2 <= -0.1:
template[0][v,u] = 1
elif s1 >= 0.1 and s2 >= 0.1:
template[1][v,u] = 1
elif s1 <= -0.1 and s2 >= 0.1:
template[2][v,u] = 1
elif s1 >= 0.1 and s2 <= -0.1:
template[3][v,u] = 1
# # normalize
for i in range(4):
template[i] /= np.sum(template[i])
return template
def detect_corners_template(gray, template, mode='same'):
img_corners = [None]*4
for i in range(4):
if GPUTOOLS and mode == 'same':
img_corners[i] = gputools.convolve(gray, template[i])
else:
img_corners[i] = signal.convolve(gray, template[i], mode=mode)
img_corners_mu = np.mean(img_corners, axis=0)
arr = np.array([img_corners[0]-img_corners_mu, img_corners[1]-img_corners_mu,
img_corners_mu-img_corners[2], img_corners_mu-img_corners[3]])
# case 1: a=white, b=black
img_corners_1 = np.min(arr, axis=0)
# case 2: b=white, a=black
img_corners_2 = np.min(-arr, axis=0)
# combine both
img_corners = np.max([img_corners_1, img_corners_2], axis=0)
return img_corners
TPROPS = [[0, pi/2], [pi/4, -pi/4],
# [0, pi/4], [0, -pi/4],
[pi/4, pi/2], [-pi/4, pi/2]]
# [-3*pi/8, 3*pi/8], [-pi/8, pi/8],
# [-pi/8, -3*pi/8], [pi/8, 3*pi/8]]
# TPROPS = [[0, pi/2], [0, -pi/4], [0, pi/4]]
RADIUS = [6, 8, 10]
def detect_corners(gray, radiuses=RADIUS):
out = np.zeros(gray.shape)
for angle_1, angle_2 in TPROPS:
for radius in radiuses:
temp = create_correlation_patch(angle_1, angle_2, radius)
corr = detect_corners_template(gray, temp)
out = np.max([corr, out], axis=0)
return out
def get_corner_candidates(corr, step=40, thres=0.01):
out = []
check = set()
for i in range(0, corr.shape[0], step//2):
for j in range(0, corr.shape[1], step//2):
region = corr[i:i+step, j:j+step]
ix = np.argmax(region)
r, c = np.unravel_index(ix, region.shape)
val = region[r, c]
if val > thres and (r+i, c+j) not in check:
out.append( (r+i, c+j, val) )
check.add( (r+i, c+j) )
return np.array(out)
def non_maximum_suppression(corners, dist=40):
tree = cKDTree(corners[:, :2])
good = np.ones(len(corners), dtype='bool')
for (a, b) in tree.query_pairs(dist):
if not good[a] or not good[b]:
continue
sa = corners[a, 2]
sb = corners[b, 2]
if sa >= sb:
good[b] = False
else:
good[a] = False
return corners[good]
def solve_patch_corner(dx, dy):
matsum = np.zeros((2,2))
pointsum = np.zeros(2)
for i in range(dx.shape[0]):
for j in range(dx.shape[1]):
vec = [dy[i,j], dx[i,j]]
pos = (i,j)
mat = np.outer(vec, vec)
pointsum += mat.dot(pos)
matsum += mat
try:
minv = np.linalg.inv(matsum)
except np.linalg.LinAlgError:
return None
newp = minv.dot(pointsum)
return newp
def get_angle_modes(corners, gray, winsize=11):
halfwin = (winsize-1)//2
out = []
for i, corner in enumerate(corners):
y, x = corner[:2]
y = int(round(y))
x = int(round(x))
dx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
dy = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
gg = gray[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
rx = dx[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
ry = dy[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
angs = np.mod(np.angle(rx + ry*1j).flatten(), np.pi)
absr = np.abs(rx + ry*1j)
weights = absr.flatten()
sim = np.random.choice(angs, p=weights/np.sum(weights), size=len(angs)*5)
means, distortion = kmeans(sim, 2)
out.append(means)
return np.array(out)
def score_corners(corners, gray, winsize=11):
halfwin = (winsize-1)//2
scores = np.zeros(corners.shape[0])
for i, corner in enumerate(corners):
y, x, score = corner
y = int(round(y))
x = int(round(x))
dx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
dy = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
gg = gray[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
rx = dx[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
ry = dy[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
angs = np.mod(np.angle(rx + ry*1j).flatten(), np.pi)
absr = np.abs(rx + ry*1j)
weights = absr.flatten()
sim = np.random.choice(angs, p=weights/np.sum(weights), size=len(angs)*5)
means, distortion = kmeans(sim, 2)
patch = create_correlation_patch(means[0], means[1], halfwin)
new_score = np.max(detect_corners_template(gg, patch, mode='valid'))
scores[i] = new_score
return scores
def refine_corners(corners, gray, winsize=11, check_only=False):
halfwin = (winsize-1)//2
out = []
dx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
dy = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
for corner in corners:
y, x, score = corner
y = int(round(y))
x = int(round(x))
rx = dx[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
ry = dy[y-halfwin:y+halfwin+1, x-halfwin:x+halfwin+1]
newp = solve_patch_corner(rx, ry)
if newp is None:
continue # bad point
newp = newp - [halfwin, halfwin]
if np.any(np.abs(newp) > halfwin+1):
continue # bad point
coord = newp + [y, x]
if check_only:
out.append([y, x, score])
else:
out.append([coord[0], coord[1], score])
return np.array(out)
def normalize_image(img):
if len(img.shape) > 2:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
blur_size = int(np.sqrt(gray.size) / 2)
grayb = cv2.GaussianBlur(gray, (3,3), 1)
gray_mean = cv2.blur(grayb, (blur_size, blur_size))
diff = (np.float32(grayb)-gray_mean) / 255.0
diff = np.clip(diff, -0.2, 0.2)+0.2
diff = (diff - np.min(diff)) / (np.max(diff) - np.min(diff))
return diff
def checkerboard_score(corners, size=(9,6)):
corners_reshaped = corners[:, :2].reshape(*size, 2)
maxm = 0
for rownum in range(size[0]):
for colnum in range(1,size[1]-1):
pts = corners_reshaped[rownum, [colnum-1, colnum, colnum+1]]
top = np.linalg.norm(pts[2] + pts[0] - 2*pts[1])
bot = np.linalg.norm(pts[2] - pts[0])
if np.abs(bot) < 1e-9:
return 1
maxm = max(top/bot, maxm)
for colnum in range(0,size[1]):
for rownum in range(1, size[0]-1):
pts = corners_reshaped[[rownum-1, rownum, rownum+1], colnum]
top = np.linalg.norm(pts[2] + pts[0] - 2*pts[1])
bot = np.linalg.norm(pts[2] - pts[0])
if np.abs(bot) < 1e-9:
return 1
maxm = max(top/bot, maxm)
return maxm
def make_mask_line(shape, start, end, thickness=2):
start = tuple([int(x) for x in start])
end = tuple([int(x) for x in end])
mask = np.zeros(shape)
cv2.line(mask, start, end, 1, thickness)
return mask
# TODO: this should be replaced by the growing checkerboard from the Geiger et al paper
def reorder_checkerboard(corners, gray, size=(9,6)):
corners_xy = corners[:, :2]
tree = cKDTree(corners_xy)
dist, ix_mid = tree.query(np.median(corners_xy, axis=0))
corner_mid = corners_xy[ix_mid]
dists, ixs = tree.query(corner_mid, k=7)
dx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
dy = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
dmag = np.abs(dx + dy*1j)
ixs = [i for i in ixs[1:] if i < corners_xy.shape[0]]
mags = []
for ix in ixs[1:]:
mask = make_mask_line(
gray.shape, corner_mid[::-1], corners_xy[ix, ::-1], thickness=3
)
mask /= np.sum(mask)
mag = np.sum(mask * dmag)
mags.append(mag)
mags = np.array(mags) / np.max(mags)
corners_selected = corners_xy[ixs[1:]][mags > 0.7]
mags = mags[mags > 0.7]
dirs = corners_selected - corner_mid
dirs_norm = dirs / np.linalg.norm(dirs, axis=1)[:, None]
ax1 = dirs[np.argmax(mags)]
ax2 = dirs[np.argmin(np.abs(np.dot(dirs_norm, ax1)))]
ax1 *= np.sign(np.sum(ax1))
ax2 *= np.sign(np.sum(ax2))
starts = np.argsort(np.dot(corners_xy, ax1 + ax2))
ixs_best = None
d_best = np.inf
# score_best = np.inf
start_best = 0
for start in starts[:2]:
start_xy = corners_xy[start]
for (ax1_test, ax2_test) in [[ax1,ax2],[ax2,ax1]]:
## better estimate of axes
_, right_ix = tree.query(ax1_test + start_xy)
_, bot_ix = tree.query(ax2_test + start_xy)
ax1_new = 0.6*ax1_test + 0.4*(corners_xy[right_ix] - start_xy)
ax2_new = 0.6*ax2_test + 0.4*(corners_xy[bot_ix] - start_xy)
xs, ys = np.mgrid[:size[0], :size[1]]
offsets = xs[:, :, None] * ax1_new + ys[:, :, None] * ax2_new
points_query = (start_xy + offsets).reshape(-1, 2)
dists, ixs = tree.query(points_query)
# score = checkerboard_score(corners[ixs], size)
d = np.max(dists)
if d < d_best:
# score_best = score
d_best = d
ixs_best = ixs
start_best = start
return np.copy(corners[ixs_best]), d_best
## TODO: make this trimming better, it's pretty hacky right now
def trim_picture(gray):
laplace = cv2.Laplacian(gray, cv2.CV_64F)
laplace_blur = cv2.blur(np.abs(laplace), (100,100))
thres = np.percentile(laplace_blur, 92)
img_thres = np.uint8(laplace_blur > thres)
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(img_thres)
best = np.argmax(stats[1:, 4]) + 1
mask = labels == best
if stats[best, 4] < 4000:
return None, None
mgrid = np.mgrid[:mask.shape[0], :mask.shape[1]]
lowy, lowx = np.min(mgrid[:, mask], axis=1)
highy, highx = np.max(mgrid[:, mask], axis=1)
lowy = max(lowy-50, 0)
highy = min(highy+50, mask.shape[0])
lowx = max(lowx-50, 0)
highx = min(highx+50, mask.shape[1])
gray = gray[lowy:highy,lowx:highx]
crop_start = np.array([lowx, lowy])
return gray, crop_start
def detect_checkerboard(gray, size=(9,6), winsize=9, trim=False):
if trim:
gray, crop_start = trim_picture(gray)
if gray is None:
return None, 1.0
else:
crop_start = [0,0]
diff = normalize_image(gray)
radiuses = [winsize+3]
if winsize >= 8:
radiuses.append(winsize-3)
corr = detect_corners(diff, radiuses=radiuses)
corrb = cv2.GaussianBlur(corr, (7,7),3)
corners = get_corner_candidates(corrb, winsize+2, np.max(corrb)*0.2)
if len(corners) < size[0]*size[1]:
return None, 1.0
corners = non_maximum_suppression(corners, winsize-2)
corners_sp = refine_corners(corners, diff, winsize=winsize+2)
# corners_sp = refine_corners(corners_sp, diff, winsize=max(winsize//2-1,5),
# check_only=True)
# corners_sp = refine_corners(corners_sp, diff, winsize=5,
# check_only=True)
scores = corners_sp[:, 2]
num_corners = size[0]*size[1]
if len(corners_sp) < size[0]*size[1]:
return None, 1.0
best_ix = np.argsort(-scores)[:num_corners+3]
best_corners = corners_sp[np.sort(best_ix)]
best_corners, max_dist = reorder_checkerboard(best_corners, diff, size)
check_score = checkerboard_score(best_corners, size)
if len(np.unique(best_corners, axis=0)) < num_corners:
check_score = 1
if np.isnan(check_score) or check_score > 0.3:
# print('trying with extra points...')
best_ix = np.argsort(-scores)[:num_corners+10]
best_corners = corners_sp[np.sort(best_ix)]
best_corners, max_dist = reorder_checkerboard(best_corners, diff, size)
check_score = checkerboard_score(best_corners, size)
# corner_scores = best_corners[:, 2]
# print('corner_scores', np.mean(corner_scores))
# print('max dist', max_dist)
# print('checkerboard score', check_score)
corners_opencv = np.copy(best_corners[:, :2])
corners_opencv[:, 0] = best_corners[:, 1] + crop_start[1]
corners_opencv[:, 1] = best_corners[:, 0] + crop_start[0]
corners_opencv = corners_opencv[:, None]
if check_score > 0.3 \
or len(best_corners) < num_corners \
or max_dist > winsize*3:
return None, 1.0
else:
return corners_opencv, check_score
| 2.5
| 2
|
24_pair_swap_ll.py
|
ojhaanshu87/LeetCode
| 0
|
12776932
|
'''
Given a linked list, swap every two adjacent nodes and return its head.
You must solve the problem without modifying the values in the list's nodes (i.e., only nodes themselves may be changed.)
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def swapPairs(self, head):
#edge case
if not head or not head.next:
return head
#get pre_node and dummy point to node 0
pre_node = dummy = ListNode(0)
#link dummy to head
dummy.next = head
while head and head.next:
#set next_node node after head each time
next_node = head.next
#swapping
head.next = next_node.next
next_node.next = head
pre_node.next = next_node
#after swapping
#move head to one node next becasue head is at node 1
head = head.next
#move pre_node to one node before head which is next_node.next node
pre_node = next_node.next
return dummy.next
| 3.90625
| 4
|
scattering/scattering1d/tests/test_utils.py
|
louity/scattering_transform
| 0
|
12776933
|
import torch
from torch.autograd import Variable
from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier
from scattering.scattering1d.utils import compute_border_indices
import numpy as np
import pytest
def test_pad1D(random_state=42):
"""
Tests the correctness and differentiability of pad1D
"""
torch.manual_seed(random_state)
N = 128
for pad_left in range(0, N, 16):
for pad_right in range(0, N, 16):
x = Variable(torch.randn(100, 4, N), requires_grad=True)
x_pad = pad1D(x, pad_left, pad_right, mode='reflect')
# Check the size
x2 = x.data.clone()
x_pad2 = x_pad.data.clone()
for t in range(1, pad_left + 1):
diff = x_pad2[..., pad_left - t] - x2[..., t]
assert torch.max(torch.abs(diff)) <= 1e-7
for t in range(x2.shape[-1]):
diff = x_pad2[..., pad_left + t] - x2[..., t]
assert torch.max(torch.abs(diff)) <= 1e-7
for t in range(1, pad_right + 1):
diff = x_pad2[..., x_pad.shape[-1] - 1 - pad_right + t]
diff -= x2[..., x.shape[-1] - 1 - t]
assert torch.max(torch.abs(diff)) <= 1e-7
# check the differentiability
loss = 0.5 * torch.sum(x_pad**2)
loss.backward()
# compute the theoretical gradient for x
x_grad_original = x.data.clone()
x_grad = x_grad_original.new(x_grad_original.shape).fill_(0.)
x_grad += x_grad_original
for t in range(1, pad_left + 1):
x_grad[..., t] += x_grad_original[..., t]
for t in range(1, pad_right + 1): # it is counted twice!
t0 = x.shape[-1] - 1 - t
x_grad[..., t0] += x_grad_original[..., t0]
# get the difference
diff = x.grad.data - x_grad
assert torch.max(torch.abs(diff)) <= 1e-7
# Check that the padding shows an error if we try to pad
with pytest.raises(ValueError):
pad1D(x, x.shape[-1], 0, mode='reflect')
with pytest.raises(ValueError):
pad1D(x, 0, x.shape[-1], mode='reflect')
def test_modulus(random_state=42):
"""
Tests the stability and differentiability of modulus
"""
torch.manual_seed(random_state)
# Test with a random vector
x = Variable(torch.randn(100, 4, 128, 2), requires_grad=True)
x_abs = modulus(x)
assert len(x_abs.shape) == len(x.shape) - 1
# check the value
x_abs2 = x_abs.data.clone()
x2 = x.data.clone()
diff = x_abs2 - torch.sqrt(x2[..., 0]**2 + x2[..., 1]**2)
assert torch.max(torch.abs(diff)) <= 1e-7
# check the gradient
loss = torch.sum(x_abs)
loss.backward()
x_grad = x2 / x_abs2.unsqueeze(-1)
diff = x.grad.data - x_grad
assert torch.max(torch.abs(diff)) <= 1e-7
# Test the differentiation with a vector made of zeros
x0 = Variable(torch.zeros(100, 4, 128, 2), requires_grad=True)
x_abs0 = modulus(x0)
loss0 = torch.sum(x_abs0)
loss0.backward()
assert torch.max(torch.abs(x0.grad.data)) <= 1e-7
def test_subsample_fourier(random_state=42):
"""
Tests whether the periodization in Fourier performs a good subsampling
in time
"""
rng = np.random.RandomState(random_state)
J = 10
x = rng.randn(100, 4, 2**J) + 1j * rng.randn(100, 4, 2**J)
x_fft = np.fft.fft(x, axis=-1)[..., np.newaxis]
x_fft.dtype = 'float64' # make it a vector
x_fft_th = torch.from_numpy(x_fft)
for j in range(J + 1):
x_fft_sub_th = subsample_fourier(x_fft_th, 2**j)
x_fft_sub = x_fft_sub_th.numpy()
x_fft_sub.dtype = 'complex128'
x_sub = np.fft.ifft(x_fft_sub[..., 0], axis=-1)
assert np.max(np.abs(x[:, :, ::2**j] - x_sub)) < 1e-7
def test_border_indices(random_state=42):
"""
Tests whether the border indices to unpad are well computed
"""
rng = np.random.RandomState(random_state)
J_signal = 10 # signal lives in 2**J_signal
J = 6 # maximal subsampling
T = 2**J_signal
i0 = rng.randint(0, T // 2 + 1, 1)[0]
i1 = rng.randint(i0 + 1, T, 1)[0]
x = np.ones(T)
x[i0:i1] = 0.
ind_start, ind_end = compute_border_indices(J, i0, i1)
for j in range(J + 1):
assert j in ind_start.keys()
assert j in ind_end.keys()
x_sub = x[::2**j]
# check that we did take the strict interior
assert np.max(x_sub[ind_start[j]:ind_end[j]]) == 0.
# check that we have not forgotten points
if ind_start[j] > 0:
assert np.min(x_sub[:ind_start[j]]) > 0.
if ind_end[j] < x_sub.shape[-1]:
assert np.min(x_sub[ind_end[j]:]) > 0.
| 2.25
| 2
|
hw2/ref/.py
|
kfirgirstein/DIP_HW_cs236860
| 0
|
12776934
|
<reponame>kfirgirstein/DIP_HW_cs236860
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import fftpack,signal
import scipy.signal
import skimage.measure
class PSFManager:
def CreatePSF(self,name:str, **kwargs):
choice = name.lower()
if choice == 'sinc':
return self.__sinc__(**kwargs)
elif choice == 'gaussian':
return self.__gaussian__(**kwargs)
elif choice == 'box':
return self.__box__(**kwargs)
elif choice == 'gaussian_kernel':
return self.__gaussian_kernel__(**kwargs)
else:
raise Exception(f"invalid name {name}")
def ApplyPSF(self,image,psf):
if psf is None or image is None:
raise Exception(f"invalid parames")
psf_image = signal.convolve2d(image, psf, mode='same', boundary='wrap')
psnr = skimage.metrics.peak_signal_noise_ratio(image, psf_image)
return (psf_image,psnr)
def __sinc__(self,window_size, filter_range):
x = np.linspace(- filter_range, filter_range, window_size)
xx = np.outer(x, x)
s = np.sinc(xx)
s = s / s.sum()
return s
def __gaussian__(self,window_size, filter_range, mu, sigma):
z = np.linspace(- filter_range, filter_range, window_size)
x, y = np.meshgrid(z, z)
d = np.sqrt(x*x+y*y)
g = np.exp(-((d-mu)**2 / (2.0 * sigma**2)))
g = g / g.sum()
return g
def __box__(self,width,height,box_size):
h_ = int(height / 2)
w_ = int(width / 2)
d_ = int(box_size / 2)
PSF_box_ = np.zeros((height, width))
PSF_box_[h_ - d_:h_ + d_, w_ - d_:w_ + d_] = 1 / (box_size ** 2)
return PSF_box_
def __gaussian_kernel__(self,size, std=1):
edge = size // 2
ax = np.linspace(-edge, edge, num=size)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx ** 2 + yy ** 2) / (2 * std **2))
return kernel / kernel.sum()
| 2.390625
| 2
|
plugins/postgresql_alt.py
|
swarm64/s64-sosreport-plugins
| 0
|
12776935
|
# -*- coding: utf8 -*-
import os
from shlex import split as shlex_split
from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
from subprocess import check_output, CalledProcessError
from typing import Dict, List, Optional, Tuple
import psycopg2
DEFAULT_DSN = 'postgresql://postgres@localhost/postgres'
class LoggingInfo:
def __init__(self, collect_logs, log_dir, data_dir):
self.collect_logs = collect_logs
self.log_dir = log_dir
self.data_dir = data_dir
class PostgreSQLAlt(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""PostgreSQL alternative collection plugin"""
plugin_name = "postgresql_alt"
requires_root = False
short_desc = 'PostgreSQL alternative collection plugin'
option_list = [
('dsn', 'The PostgreSQL DSN to collect information from.', '', DEFAULT_DSN),
('container_id', 'The docker container id where PostgreSQL runs in.', '', '')
]
@classmethod
def do_connect(cls, dsn: str) -> Tuple[object, Optional[Exception]]:
try:
conn = psycopg2.connect(dsn=dsn)
except psycopg2.Error as err:
return (None, err)
return (conn, None)
@classmethod
def _do_query(cls, conn: object, sql: str) -> Tuple[str, Optional[Exception]]:
try:
with conn.cursor() as cur:
cur.execute(sql)
return (cur.fetchall(), None)
except psycopg2.Error as err:
return (None, err)
@classmethod
def get_config(cls, conn: object) -> Tuple[List, Optional[Exception]]:
sql = 'SELECT name, setting FROM pg_settings ORDER BY name ASC'
return cls._do_query(conn, sql)
@classmethod
def config_to_string(cls, config: List[Tuple[str, str]]) -> str:
def normalize_string(s):
return s if s else "''"
return '\n'.join([f'{key} = {normalize_string(value)}' for key, value in config])
@classmethod
def get_logging_info(cls, conn: object) -> Tuple[LoggingInfo, Optional[Exception]]:
logging_info = LoggingInfo(False, '', '')
try:
with conn.cursor() as cur:
cur.execute('''
SELECT name, setting
FROM pg_settings
WHERE name IN (
'log_destination'
, 'logging_collector'
, 'log_directory'
, 'data_directory'
)''')
logging_config = cur.fetchall()
logging_config = {key:value for key, value in logging_config}
log_destinations = logging_config['log_destination'].split(',')
logging_collector = logging_config['logging_collector']
logging_info.log_dir = logging_config['log_directory']
logging_info.data_dir = logging_config['data_directory']
except psycopg2.Error as err:
return (logging_info, err)
except KeyError as err:
return (logging_info, err)
if any(item in ['stderr', 'csvlog'] for item in log_destinations):
if logging_collector == 'on':
logging_info.collect_logs = True
return (logging_info, None)
@classmethod
def docker_get_data_dir_host(cls, container_id: str, pg_data_dir: str) -> Tuple[str, Optional[Exception]]:
inspect_cmd = "docker inspect -f "
inspect_cmd += "'{{ range .Mounts }}{{ println .Destination .Source }}{{ end }}' "
inspect_cmd += container_id
try:
docker_mounts = check_output(shlex_split(inspect_cmd), universal_newlines=True)
docker_mounts = docker_mounts.split('\n')
data_dir = [mount.split(' ')[1] for mount in docker_mounts if pg_data_dir in mount][1]
except CalledProcessError as err:
return ('', err)
except IndexError as err:
return ('', err)
return (data_dir, None)
@classmethod
def get_s64_license(cls, conn: object) -> Tuple[Dict, str]:
sql = 'SELECT * FROM swarm64da.show_license()'
license_info, err = cls._do_query(conn, sql)
if err:
return (None, err)
if not license_info:
return ({}, err)
license_info = license_info[0]
return ({
'type': license_info[0],
'start': license_info[1],
'expiry': license_info[2],
'customer': license_info[3]
}, err)
def write_output(self, output):
self.add_string_as_file(output, 'postgresql.conf')
def setup(self):
dsn = self.get_option('dsn')
conn, error = PostgreSQLAlt.do_connect(dsn)
if error:
self.write_output(f'Could not connect to PostgreSQL to get config: {error}')
return
config, error = PostgreSQLAlt.get_config(conn)
if error:
self.write_output(f'Could not get PostgreSQL config: {error}')
return
config_str = PostgreSQLAlt.config_to_string(config)
self.write_output(config_str)
logging_info, error = PostgreSQLAlt.get_logging_info(conn)
if error:
self.write_output(f'Could not get log collection info: {error}')
return
container_id = self.get_option('container_id')
if logging_info.collect_logs and container_id:
data_dir_host = PostgreSQLAlt.docker_get_data_dir_host(container_id, logging_info.data_dir)
log_dir_host = os.path.join(data_dir_host, logging_info.log_dir, '*')
self.add_copy_spec(log_dir_host)
license_info, error = PostgreSQLAlt.get_s64_license(conn)
if error:
self.write_output(f'Could not get Swarm64 license: {error}')
self.write_output(f'Swarm64 license info: {str(license_info)}')
| 2.09375
| 2
|
api/website_analysis/views.py
|
gpiechnik2/senter
| 2
|
12776936
|
from rest_framework import viewsets, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import action
import json
from .serializers import SEOSerializer
from .utils import website_analysis
class SEOViewSet(viewsets.ViewSet):
"""
A viewset for creating seo analysis.
"""
@action(detail = False, methods = ['post'], permission_classes=[IsAuthenticated])
def create(self, request, *args, **kwargs):
serializer = SEOSerializer(data = request.data)
serializer.is_valid(raise_exception = True)
if self.request.user.is_anonymous:
return Response(status = status.HTTP_401_UNAUTHORIZED)
url = serializer.validated_data['url']
user_agent = self.request.user.user_agent
try:
analysis = website_analysis(url, user_agent)
except Exception:
analysis = []
return Response({
"analysis": analysis,
}, status = status.HTTP_200_OK)
| 2.34375
| 2
|
src/data_manager/data_file_manager.py
|
alliance-genome/agr_preprocess
| 0
|
12776937
|
import logging, yaml, os, sys, json, urllib3, requests
from cerberus import Validator
from files import JSONFile
from common import Singleton
from common import ContextInfo
from urllib.parse import urlparse
from .data_type_config import DataTypeConfig
logger = logging.getLogger(__name__)
class DataFileManager(metaclass=Singleton):
def __init__(self, config_file_loc):
self.context_info = ContextInfo()
# Load config yaml.
logger.debug('Loading config file: %s' % config_file_loc)
config_file = open(config_file_loc, 'r')
self.config_data = yaml.load(config_file, Loader=yaml.SafeLoader)
logger.debug("Config Data: %s" % self.config_data)
# Load validation yaml.
validation_yaml_file_loc = os.path.abspath('src/config/validation.yml')
logger.debug('Loading validation schema: %s' % validation_yaml_file_loc)
validation_schema_file = open(validation_yaml_file_loc, 'r')
self.validation_schema = yaml.load(validation_schema_file, Loader=yaml.SafeLoader)
# Assign values for thread counts.
self.FileTransactorThreads = self.config_data['FileTransactorThreads']
# Loading a JSON blurb from a file as a placeholder for submission system query.
other_file_meta_data = os.path.abspath('src/config/local_submission.json')
self.non_submission_system_data = JSONFile().get_data(other_file_meta_data)
urllib3.disable_warnings()
self.http = urllib3.PoolManager()
# use the recently created snapshot
api_url = self.context_info.env["FMS_API_URL"] + '/api/snapshot/release/' + self.context_info.env["ALLIANCE_RELEASE"]
logger.info(api_url)
submission_data = self.http.request('GET', api_url)
if submission_data.status != 200:
logger.error("Status: %s" % submission_data.status)
logger.error("No Data came from API: %s" % api_url)
sys.exit(-1)
self.snapshot_submission_system_data = json.loads(submission_data.data.decode('UTF-8'))
logger.debug(self.snapshot_submission_system_data)
for dataFile in self.non_submission_system_data['snapShot']['dataFiles']:
self.snapshot_submission_system_data['snapShot']['dataFiles'].append(dataFile)
logger.debug(self.snapshot_submission_system_data)
# List used for MOD and data type objects.
self.master_data_dictionary = {}
# Dictionary for transformed submission system data.
self.transformed_submission_system_data = {}
# process config file during initialization
self.process_config()
def get_FT_thread_settings(self):
return self.FileTransactorThreads
def get_config(self, data_type):
# Get the object for a data type. If the object doesn't exist, this returns None.
logger.debug("Getting config for: [%s] -> Config[%s]" % (data_type, self.master_data_dictionary))
return self.master_data_dictionary.get(data_type)
def dispatch_to_object(self):
# This function sends off our data types to become DataTypeConfig objects.
# The smaller SubTypeConfig objects are created in the DataTypeConfig functions, see data_type_config.py.
for config_entry in self.transformed_submission_system_data.keys():
# Skip string entries (e.g. schemaVersion, releaseVersion).
if isinstance(self.transformed_submission_system_data[config_entry], str):
continue
logger.debug('Processing DataType: %s' % config_entry)
# Create our data type object and add it to our master dictionary filed under the config_entry.
# e.g. Create BGI DataTypeConfig object and file it under BGI in the dictionary.
self.master_data_dictionary[config_entry] = DataTypeConfig(config_entry,
self.transformed_submission_system_data[config_entry])
def download_and_validate(self):
logger.debug('Beginning download and validation.')
for entry in self.master_data_dictionary.keys():
logger.debug('Downloading %s data.' % entry)
if isinstance(self.master_data_dictionary[entry], DataTypeConfig): # If we're dealing with an object.
self.master_data_dictionary[entry].get_data()
logger.debug('done with %s data.' % entry)
def process_config(self):
# This checks for the validity of the YAML file.
# See src/config/validation.yml for the layout of the schema.
# TODO Add requirement checking and more validation to the YAML schema.
validator = Validator(self.validation_schema)
validation_results = validator.validate(self.config_data)
if validation_results is True:
logger.debug('Config file validation successful.')
else:
logger.critical('Config file validation unsuccessful!')
for field, values in validator.errors.items():
for value in values: # May have more than one error per field.
message = field + ': ' + value
logger.critical(message)
logger.critical('Exiting')
sys.exit(-1)
# Query the submission system for the required data.
self.query_submission_system()
# Create our DataTypeConfig (which in turn create our SubTypeConfig) objects.
self.dispatch_to_object()
def _search_submission_data(self, dataType, dataSubType):
try:
returned_dict = next(item for item in self.snapshot_submission_system_data['snapShot']['dataFiles']
if item['dataType'].get('name') == dataType and item['dataSubType'].get('name') == dataSubType)
except StopIteration:
logger.debug('dataType: %s subType: %s not found in submission system data.' % (dataType, dataSubType))
logger.debug('Creating entry with \'None\' path and extracted path.')
returned_dict = {
'dataType': dataType,
'subType': dataSubType,
'path': None,
'tempExtractedFile': None
}
return returned_dict
def _query_api_datafile_latest(self, dataType, dataSubType):
api_url = self.context_info.env["FMS_API_URL"] + '/api/datafile/by/' + self.context_info.env["ALLIANCE_RELEASE"] + '/' + dataType + '/' + dataSubType + '?latest=true'
logger.debug(api_url)
submission_data = self.http.request('GET', api_url)
if submission_data.status != 200:
logger.error("Status: %s" % submission_data.status)
logger.error("No Data came from API: %s" % api_url)
sys.exit(-1)
endpoint_submission_system_data = json.loads(submission_data.data.decode('UTF-8'))
logger.debug(endpoint_submission_system_data)
s3Url = endpoint_submission_system_data[0].get('s3Url')
returned_dict = {
'dataType': dataType,
'subType': dataSubType,
's3Url': s3Url,
'tempExtractedFile': None
}
return returned_dict
def query_submission_system(self):
self.transformed_submission_system_data['releaseVersion'] = self.snapshot_submission_system_data['snapShot']['releaseVersion']['releaseVersion']
config_values_to_ignore = [
'releaseVersion', # Manually assigned above.
'schemaVersion', # There is no endpoint for latest schema version in api
'FileTransactorThreads',
'Neo4jTransactorThreads'
]
for datatype in self.config_data.keys(): # Iterate through our config file.
logger.debug("Datatype: %s" % datatype)
if datatype not in config_values_to_ignore: # Skip these entries.
self.transformed_submission_system_data[datatype] = [] # Create our empty list.
for sub_datatype in self.config_data[datatype]:
# to process by querying the api for the latest path
submission_system_dict = self._query_api_datafile_latest(datatype, sub_datatype)
# to process by using the release snapshot for that path
# submission_system_dict = self._search_submission_data(datatype, sub_datatype)
path = submission_system_dict.get('s3Url')
logger.debug("datatype %s sub_datatype %s path %s" % (datatype, sub_datatype, path))
tempExtractedFile = submission_system_dict.get('tempExtractedFile')
if tempExtractedFile is None or tempExtractedFile == '':
tempExtractedFile = urlparse(submission_system_dict.get('s3Url')).path[1:]
tempExtractedFile = os.path.basename(tempExtractedFile)
if tempExtractedFile is not None and len(tempExtractedFile) > 0 and tempExtractedFile.endswith('gz'):
tempExtractedFile = os.path.splitext(tempExtractedFile)[0]
self.transformed_submission_system_data[datatype].append([sub_datatype, path, tempExtractedFile])
else:
logger.debug("Ignoring datatype: %s" % datatype)
logger.debug("Loaded Types: %s" % self.transformed_submission_system_data)
| 2.109375
| 2
|
No_11_sunPro/No_11_sunPro/pipelines.py
|
a904919863/Spiders_Collection
| 3
|
12776938
|
<reponame>a904919863/Spiders_Collection<filename>No_11_sunPro/No_11_sunPro/pipelines.py
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class No11SunproPipeline:
fp = None
def open_spider(self, spider):
self.fp = open('sun.txt','w')
def process_item(self, item, spider):
if item.__class__.__name__ =='No11SunproItem':
self.fp.write(item['title']+'\n')
else:
self.fp.write(item['new_id']+item['content']+'\n')
return item
def close_spider(self, spider):
self.fp.close()
| 2.5
| 2
|
evobench/discrete/isg/parser.py
|
piotr-rarus/evobench
| 6
|
12776939
|
from pathlib import Path
import numpy as np
from .config import Config
from .spin import Spin
def load(path: Path) -> Config:
with path.open() as file:
lines = file.readlines()
global_optimum, best_solution = lines[0].split(' ')
global_optimum = float(global_optimum.strip())
best_solution = best_solution.strip()
best_solution = [float(gene) for gene in best_solution]
best_solution = np.array(best_solution)
spin_configs = []
for line in lines[2:]:
a_index, b_index, factor = line.split(' ')
a_index = int(a_index)
b_index = int(b_index)
factor = int(factor)
spin_config = Spin(
a_index,
b_index,
factor
)
spin_configs.append(spin_config)
config = Config(
path.name,
global_optimum,
best_solution,
spin_configs
)
return config
| 2.71875
| 3
|
app/db/repositories/articles.py
|
yasudakn/fastapi-realworld-example-app
| 0
|
12776940
|
from typing import List, Optional, Sequence, Union
import os
from aiocache import cached, Cache
from aiocache.serializers import PickleSerializer
from asyncpg import Connection, Record
from pypika import Query
from app.db.errors import EntityDoesNotExist
from app.db.queries.queries import queries
from app.db.queries.tables import (
Parameter,
articles,
articles_to_tags,
favorites,
tags as tags_table,
users,
)
from app.db.repositories.base import BaseRepository
from app.db.repositories.profiles import ProfilesRepository
from app.db.repositories.tags import TagsRepository
from app.models.domain.articles import Article
from app.models.domain.users import User
from app.db.caches import key_builder
AUTHOR_USERNAME_ALIAS = "author_username"
SLUG_ALIAS = "slug"
CAMEL_OR_SNAKE_CASE_TO_WORDS = r"^[a-z\d_\-]+|[A-Z\d_\-][^A-Z\d_\-]*"
class ArticlesRepository(BaseRepository): # noqa: WPS214
def __init__(self, conn: Connection) -> None:
super().__init__(conn)
self._profiles_repo = ProfilesRepository(conn)
self._tags_repo = TagsRepository(conn)
async def create_article( # noqa: WPS211
self,
*,
slug: str,
title: str,
description: str,
body: str,
author: User,
tags: Optional[Sequence[str]] = None,
) -> Article:
async with self.connection.transaction():
article_row = await queries.create_new_article(
self.connection,
slug=slug,
title=title,
description=description,
body=body,
author_username=author.username,
)
if tags:
await self._tags_repo.create_tags_that_dont_exist(tags=tags)
await self._link_article_with_tags(slug=slug, tags=tags)
return await self._get_article_from_db_record(
article_row=article_row,
slug=slug,
author_username=article_row[AUTHOR_USERNAME_ALIAS],
requested_user=author,
)
async def update_article( # noqa: WPS211
self,
*,
article: Article,
slug: Optional[str] = None,
title: Optional[str] = None,
body: Optional[str] = None,
description: Optional[str] = None,
) -> Article:
updated_article = article.copy(deep=True)
updated_article.slug = slug or updated_article.slug
updated_article.title = title or article.title
updated_article.body = body or article.body
updated_article.description = description or article.description
async with self.connection.transaction():
updated_article.updated_at = await queries.update_article(
self.connection,
slug=article.slug,
author_username=article.author.username,
new_slug=updated_article.slug,
new_title=updated_article.title,
new_body=updated_article.body,
new_description=updated_article.description,
)
return updated_article
async def delete_article(self, *, article: Article) -> None:
async with self.connection.transaction():
await queries.delete_article(
self.connection,
slug=article.slug,
author_username=article.author.username,
)
async def filter_articles( # noqa: WPS211
self,
*,
tag: Optional[str] = None,
author: Optional[str] = None,
favorited: Optional[str] = None,
limit: int = 20,
offset: int = 0,
requested_user: Optional[User] = None,
) -> List[Article]:
query_params: List[Union[str, int]] = []
query_params_count = 0
# fmt: off
query = Query.from_(
articles,
).select(
articles.id,
articles.slug,
articles.title,
articles.description,
articles.body,
articles.created_at,
articles.updated_at,
Query.from_(
users,
).where(
users.id == articles.author_id,
).select(
users.username,
).as_(
AUTHOR_USERNAME_ALIAS,
),
)
# fmt: on
if tag:
query_params.append(tag)
query_params_count += 1
# fmt: off
query = query.join(
articles_to_tags,
).on(
(articles.id == articles_to_tags.article_id) & (
articles_to_tags.tag == Query.from_(
tags_table,
).where(
tags_table.tag == Parameter(query_params_count),
).select(
tags_table.tag,
)
),
)
# fmt: on
if author:
query_params.append(author)
query_params_count += 1
# fmt: off
query = query.join(
users,
).on(
(articles.author_id == users.id) & (
users.id == Query.from_(
users,
).where(
users.username == Parameter(query_params_count),
).select(
users.id,
)
),
)
# fmt: on
if favorited:
query_params.append(favorited)
query_params_count += 1
# fmt: off
query = query.join(
favorites,
).on(
(articles.id == favorites.article_id) & (
favorites.user_id == Query.from_(
users,
).where(
users.username == Parameter(query_params_count),
).select(
users.id,
)
),
)
# fmt: on
query = query.limit(Parameter(query_params_count + 1)).offset(
Parameter(query_params_count + 2),
)
query_params.extend([limit, offset])
articles_rows = await self.connection.fetch(query.get_sql(), *query_params)
return [
await self._get_article_from_db_record(
article_row=article_row,
slug=article_row[SLUG_ALIAS],
author_username=article_row[AUTHOR_USERNAME_ALIAS],
requested_user=requested_user,
)
for article_row in articles_rows
]
@cached(cache=Cache.REDIS,
serializer=PickleSerializer(),
endpoint=os.environ.get('REDIS_HOST'),
key_builder=key_builder)
async def get_articles_for_user_feed(
self,
*,
user: User,
limit: int = 20,
offset: int = 0,
) -> List[Article]:
articles_rows = await queries.get_articles_for_feed(
self.connection,
follower_username=user.username,
limit=limit,
offset=offset,
)
return [
await self._get_article_from_db_record(
article_row=article_row,
slug=article_row[SLUG_ALIAS],
author_username=article_row[AUTHOR_USERNAME_ALIAS],
requested_user=user,
)
for article_row in articles_rows
]
@cached(cache=Cache.REDIS,
serializer=PickleSerializer(),
endpoint=os.environ.get('REDIS_HOST'),
key_builder=key_builder)
async def get_article_by_slug(
self,
*,
slug: str,
requested_user: Optional[User] = None,
) -> Article:
article_row = await queries.get_article_by_slug(self.connection, slug=slug)
if article_row:
return await self._get_article_from_db_record(
article_row=article_row,
slug=article_row[SLUG_ALIAS],
author_username=article_row[AUTHOR_USERNAME_ALIAS],
requested_user=requested_user,
)
raise EntityDoesNotExist("article with slug {0} does not exist".format(slug))
@cached(cache=Cache.REDIS,
serializer=PickleSerializer(),
endpoint=os.environ.get('REDIS_HOST'),
key_builder=key_builder)
async def get_tags_for_article_by_slug(self, *, slug: str) -> List[str]:
tag_rows = await queries.get_tags_for_article_by_slug(
self.connection,
slug=slug,
)
return [row["tag"] for row in tag_rows]
@cached(cache=Cache.REDIS,
serializer=PickleSerializer(),
endpoint=os.environ.get('REDIS_HOST'),
key_builder=key_builder)
async def get_favorites_count_for_article_by_slug(self, *, slug: str) -> int:
return (
await queries.get_favorites_count_for_article(self.connection, slug=slug)
)["favorites_count"]
async def is_article_favorited_by_user(self, *, slug: str, user: User) -> bool:
return (
await queries.is_article_in_favorites(
self.connection,
username=user.username,
slug=slug,
)
)["favorited"]
async def add_article_into_favorites(self, *, article: Article, user: User) -> None:
await queries.add_article_to_favorites(
self.connection,
username=user.username,
slug=article.slug,
)
async def remove_article_from_favorites(
self,
*,
article: Article,
user: User,
) -> None:
await queries.remove_article_from_favorites(
self.connection,
username=user.username,
slug=article.slug,
)
@cached(cache=Cache.REDIS,
serializer=PickleSerializer(),
endpoint=os.environ.get('REDIS_HOST'),
key_builder=key_builder)
async def _get_article_from_db_record(
self,
*,
article_row: Record,
slug: str,
author_username: str,
requested_user: Optional[User],
) -> Article:
return Article(
id_=article_row["id"],
slug=slug,
title=article_row["title"],
description=article_row["description"],
body=article_row["body"],
author=await self._profiles_repo.get_profile_by_username(
username=author_username,
requested_user=requested_user,
),
tags=await self.get_tags_for_article_by_slug(slug=slug),
favorites_count=await self.get_favorites_count_for_article_by_slug(
slug=slug,
),
favorited=await self.is_article_favorited_by_user(
slug=slug,
user=requested_user,
)
if requested_user
else False,
created_at=article_row["created_at"],
updated_at=article_row["updated_at"],
)
async def _link_article_with_tags(self, *, slug: str, tags: Sequence[str]) -> None:
await queries.add_tags_to_article(
self.connection,
[{SLUG_ALIAS: slug, "tag": tag} for tag in tags],
)
| 2.078125
| 2
|
tests_compiled/to_revive/test_rank1_wrapper.py
|
Pressio/pressio4py
| 4
|
12776941
|
<filename>tests_compiled/to_revive/test_rank1_wrapper.py
import pytest, math
import numpy as np
import test_rank1_wrapper_module as m
def testCnstr0():
print("testCnstr0")
a = m.construct0(5)
assert(a.shape[0] == 5)
def testCnstr1():
print("testCnstr1")
a = np.zeros(5)
a_add = a.__array_interface__['data'][0]
print("a: ", hex(a_add))
assert(m.construct1(a, a_add) == True)
def testCnstr2():
print("testCnstr2")
a = np.zeros(5)
a_add = a.__array_interface__['data'][0]
print("a: ", hex(a_add))
assert(m.construct2(a, a_add) == True)
def testCopyConstr():
print("testCopyConstr")
assert(m.copyConstruct() == True)
def testMoveConstr():
print("testMoveConstr")
a = np.zeros(5)
a_add = a.__array_interface__['data'][0]
print("a: ", hex(a_add))
assert(m.moveCstr(a, a_add) == True)
def testExtent():
print("testExtent")
a = np.zeros(5)
size = m.extent(a)
assert size == 5
def testSubscripting():
print("testSubscripting")
assert(m.subscript() == True)
def testSliceContiguous():
'''
create numpy array
pass a contiguous slice to cpp
modify slice on cpp slice
python origianl object should change
'''
print("testSliceContiguous")
a = np.zeros(5)
m.sliceContiguous(a[2:4])
assert( math.isclose(a[2], 1.1) )
assert( math.isclose(a[3], 2.2) )
def testSliceNonContiguous():
'''
create numpy array
pass a non contiguous slice to cpp
(since it is noncont, the cpp side has to make
a copy because a numpy::array is contiguous by definition
so it cannot "view" a contiguous array so it has to make a copy)
modify slice on cpp slice
python origianl object should NOT change
'''
print("testSliceNonContiguous")
a = np.zeros(8)
b = a[2:8:2]
assert(b.shape[0] == 3)
b_add = b.__array_interface__['data'][0]
m.sliceNonContiguous(b, b_add)
gold = np.zeros(8)
assert( np.allclose(a, gold) )
| 2.21875
| 2
|
pubs/server.py
|
WIPACrepo/publication-web-db
| 0
|
12776942
|
"""
Server for publication db
"""
import os
import logging
import binascii
from functools import wraps
from urllib.parse import urlparse
import base64
import csv
from io import StringIO
import itertools
from tornado.web import RequestHandler, HTTPError
from rest_tools.server import RestServer, from_environment, catch_error
import motor.motor_asyncio
import pymongo
from bson.objectid import ObjectId
from . import __version__ as version
from . import PUBLICATION_TYPES, PROJECTS, SITES
from .utils import create_indexes, date_format, add_pub, edit_pub, try_import_file
logger = logging.getLogger('server')
def basic_auth(method):
@wraps(method)
async def wrapper(self, *args, **kwargs):
if not self.current_user:
header = self.request.headers.get('Authorization')
if header is None or not header.lower().startswith('basic '):
self.set_header('WWW-Authenticate', 'Basic realm=IceCube')
self.set_status(401)
self.finish()
return
raise HTTPError(403, reason="authentication failed")
return await method(self, *args, **kwargs)
return wrapper
def get_domain(link):
"""Get domain name of a url"""
if (not link.startswith('http')) and not link.startswith('//'):
link = f'//{link}'
return urlparse(link).netloc
class BaseHandler(RequestHandler):
def initialize(self, db=None, basic_auth=None, debug=False, **kwargs):
super().initialize(**kwargs)
self.db = db
self.basic_auth = basic_auth if basic_auth else {}
self.debug = debug
def set_default_headers(self):
self._headers['Server'] = f'Pub DB {version}'
def get_template_namespace(self):
namespace = super().get_template_namespace()
namespace['domain'] = get_domain
namespace['date_format'] = date_format
namespace['experiment'] = 'IceCube'
namespace['title'] = ''
namespace['PUBLICATION_TYPES'] = PUBLICATION_TYPES
namespace['PROJECTS'] = PROJECTS
namespace['SITES'] = SITES
namespace['error'] = None
namespace['edit'] = False
return namespace
def get_current_user(self):
try:
type, data = self.request.headers['Authorization'].split(' ', 1)
if type.lower() != 'basic':
raise Exception('bad header type')
logger.debug(f'auth data: {data}')
auth_decoded = base64.b64decode(data).decode('ascii')
username, password = str(auth_decoded).split(':', 1)
if self.basic_auth.get(username, None) == password:
return username
except Exception:
if self.debug and 'Authorization' in self.request.headers:
logger.info('Authorization: %r', self.request.headers['Authorization'])
logger.info('failed auth', exc_info=True)
return None
def args_to_match_query(self):
match = {}
if projects := self.get_arguments('projects'):
match['projects'] = {"$all": projects}
if sites := self.get_arguments('sites'):
match['sites'] = {"$all": sites}
start = self.get_argument('start_date', '')
end = self.get_argument('end_date', '')
if start and end:
match['date'] = {"$gte": start, "$lte": end}
elif start:
match['date'] = {"$gte": start}
elif end:
match['date'] = {"$lte": end}
if types := self.get_arguments('type'):
match['type'] = {"$in": types}
if search := self.get_argument('search', ''):
match['$text'] = {"$search": search}
if authors := self.get_arguments('authors'):
match['authors'] = {"$all": authors}
return match, {
'projects': projects,
'sites': sites,
'start_date': start,
'end_date': end,
'type': types,
'search': search,
'authors': authors,
}
async def count_pubs(self):
match, _ = self.args_to_match_query()
return await self.db.publications.count_documents(match)
async def get_pubs(self, mongoid=False):
match, args = self.args_to_match_query()
kwargs = {}
if not mongoid:
kwargs['projection'] = {'_id': False}
if page := self.get_argument('page', None):
page = int(page)
if limit := self.get_argument('limit', None):
limit = int(limit)
pubs = []
i = -1
async for row in self.db.publications.find(match, **kwargs).sort('date', pymongo.DESCENDING):
i += 1
if mongoid:
row['_id'] = str(row['_id'])
if page is not None and limit and i < page*limit:
continue
if 'projects' in row:
row['projects'].sort()
if 'sites' in row:
row['sites'].sort()
pubs.append(row)
if page is not None and limit and len(pubs) >= limit:
break
args['publications'] = pubs
return args
async def get_authors(self):
aggregation = [
{"$unwind": "$authors"},
{"$group": {
"_id": 0,
"authornames": {"$addToSet": "$authors"}
}}
]
authors = []
async for row in self.db.publications.aggregate(aggregation):
authors = row["authornames"]
return authors
class Main(BaseHandler):
async def get(self):
hide_projects = self.get_argument('hide_projects', 'false').lower() == 'true'
pubs = await self.get_pubs()
self.render('main.html', **pubs, hide_projects=hide_projects)
class CSV(BaseHandler):
async def get(self):
pubs = await self.get_pubs()
f = StringIO()
writer = csv.DictWriter(f, fieldnames=list(pubs['publications'][0].keys()))
writer.writeheader()
for p in pubs['publications']:
data = {}
for k in p:
if isinstance(p[k], list):
data[k] = ','.join(p[k])
else:
data[k] = p[k]
writer.writerow(data)
self.write(f.getvalue())
self.set_header('Content-Type', 'text/csv; charset=utf-8')
class Manage(BaseHandler):
@catch_error
@basic_auth
async def get(self):
existing_authors = await self.get_authors()
pubs = await self.get_pubs(mongoid=True)
self.render('manage.html', message='', existing_authors=existing_authors, **pubs)
@catch_error
@basic_auth
async def post(self):
message = ''
try:
if action := self.get_argument('action', None):
if action == 'delete':
mongoid = ObjectId(self.get_argument('pub_id'))
await self.db.publications.delete_one({'_id': mongoid})
elif action == 'new':
doc = {
'title': self.get_argument('new_title').strip(),
'authors': [a.strip() for a in self.get_argument('new_authors').split('\n') if a.strip()],
'date': self.get_argument('new_date'),
'pub_type': self.get_argument('new_type'),
'citation': self.get_argument('new_citation').strip(),
'downloads': [d.strip() for d in self.get_argument('new_downloads').split('\n') if d.strip()],
'projects': self.get_arguments('new_projects'),
'sites': self.get_arguments('new_sites'),
}
await add_pub(db=self.db, **doc)
elif action == 'edit':
mongoid = ObjectId(self.get_argument('pub_id'))
doc = {
'title': self.get_argument('new_title').strip(),
'authors': [a.strip() for a in self.get_argument('new_authors').split('\n') if a.strip()],
'date': self.get_argument('new_date'),
'pub_type': self.get_argument('new_type'),
'citation': self.get_argument('new_citation').strip(),
'downloads': [d.strip() for d in self.get_argument('new_downloads').split('\n') if d.strip()],
'projects': self.get_arguments('new_projects'),
'sites': self.get_arguments('new_sites'),
}
await edit_pub(db=self.db, mongo_id=mongoid, **doc)
elif action == 'import':
if not self.request.files:
raise Exception('no files uploaded')
for files in itertools.chain(self.request.files.values()):
for f in files:
await try_import_file(self.db, f.body.decode('utf-8-sig'))
else:
raise Exception('bad action')
except Exception as e:
if self.debug:
logging.debug('manage error', exc_info=True)
message = f'Error: {e}'
existing_authors = await self.get_authors()
pubs = await self.get_pubs(mongoid=True)
self.render('manage.html', message=message, existing_authors=existing_authors, **pubs)
class APIBaseHandler(BaseHandler):
def write_error(self, status_code=500, **kwargs):
"""Write out custom error json."""
data = {
'code': status_code,
'error': self._reason,
}
self.write(data)
self.finish()
class APIPubs(APIBaseHandler):
@catch_error
async def get(self):
pubs = await self.get_pubs()
self.write(pubs)
class APIPubsCount(APIBaseHandler):
@catch_error
async def get(self):
pubs = await self.count_pubs()
self.write({"count": pubs})
class APIFilterDefaults(APIBaseHandler):
@catch_error
async def get(self):
self.write({
'projects': [],
'sites': [],
'start_date': '',
'end_date': '',
'type': [],
'search': '',
'authors': [],
'hide_projects': False,
})
class APITypes(APIBaseHandler):
@catch_error
async def get(self):
self.write(PUBLICATION_TYPES)
class APIProjects(APIBaseHandler):
@catch_error
async def get(self):
self.write(PROJECTS)
def create_server():
static_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
default_config = {
'HOST': 'localhost',
'PORT': 8080,
'DEBUG': False,
'DB_URL': 'mongodb://localhost/pub_db',
'COOKIE_SECRET': binascii.hexlify(b'secret').decode('utf-8'),
'BASIC_AUTH': '', # user:pass,user:pass
}
config = from_environment(default_config)
logging.info(f'DB: {config["DB_URL"]}')
db_url, db_name = config['DB_URL'].rsplit('/', 1)
logging.info(f'DB name: {db_name}')
db = motor.motor_asyncio.AsyncIOMotorClient(db_url)
create_indexes(db_url, db_name)
users = {v.split(':')[0]: v.split(':')[1] for v in config['BASIC_AUTH'].split(',') if v}
logging.info(f'BASIC_AUTH users: {users.keys()}')
main_args = {
'debug': config['DEBUG'],
'db': db[db_name],
'basic_auth': users,
}
server = RestServer(static_path=static_path, template_path=template_path,
cookie_secret=config['COOKIE_SECRET'], xsrf_cookies=True,
debug=config['DEBUG'])
server.add_route(r'/', Main, main_args)
server.add_route(r'/csv', CSV, main_args)
server.add_route(r'/manage', Manage, main_args)
server.add_route(r'/api/publications', APIPubs, main_args)
server.add_route(r'/api/publications/count', APIPubsCount, main_args)
server.add_route(r'/api/filter_defaults', APIFilterDefaults, main_args)
server.add_route(r'/api/types', APITypes, main_args)
server.add_route(r'/api/projects', APIProjects, main_args)
server.startup(address=config['HOST'], port=config['PORT'])
return server
| 2.234375
| 2
|
mloop_multishot.py
|
zakv/analysislib-mloop
| 0
|
12776943
|
import lyse
import runmanager.remote as rm
import numpy as np
import mloop_config
import sys
import logging
import os
from labscript_utils.setup_logging import LOG_PATH
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('lyse', '2.5.0', '4.0')
check_version('zprocess', '2.13.1', '4.0')
check_version('labscript_utils', '2.12.5', '4.0')
def configure_logging(config):
console_log_level = config['analysislib_console_log_level']
file_log_level = config['analysislib_file_log_level']
LOG_FILENAME = 'analysislib_mloop.log'
global logger
logger = logging.getLogger('analysislib_mloop')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(filename)s:%(funcName)s:%(lineno)d:%(levelname)s: %(message)s'
)
# Set up handlers if not already present from previous runs.
if not logger.handlers:
# Set up console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(console_log_level)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Set up file handler
full_filename = os.path.join(LOG_PATH, LOG_FILENAME)
file_handler = logging.FileHandler(full_filename, mode='w')
file_handler.setLevel(file_log_level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('Logger configured.')
def check_runmanager(config):
logger.debug('Checking runmanager...')
msgs = []
logger.debug('Getting globals.')
rm_globals = rm.get_globals()
if not all([x in rm_globals for x in config['mloop_params']]):
msgs.append('Not all optimisation parameters present in runmanager.')
logger.debug('Getting run shots state.')
if not rm.get_run_shots():
msgs.append('Run shot(s) not selected in runmanager.')
logger.debug('Checking for errors in globals.')
if rm.error_in_globals():
msgs.append('Error in runmanager globals.')
logger.debug('Checking number of shots.')
n_shots = rm.n_shots()
if n_shots > 1 and not config['ignore_bad']:
msgs.append(
f'runmanager is set to compile {n_shots:d} shots per request, but your '
+ 'mloop_config has ignore_bad = False. You are advised to (i) remove '
+ 'iterable globals so as to compile one shot per cost or (ii) set '
+ 'ignore_bad = True in your mloop_config and only return one cost with '
+ 'bad = False per sequence.'
)
if msgs:
logger.warning('\n'.join(msgs))
return False
else:
logger.debug('Runmanager ok.')
return True
def verify_globals(config):
logger.debug('Verifying globals...')
# Get the current runmanager globals
logger.debug('Getting values of globals from runmanager.')
rm_globals = rm.get_globals()
current_values = [rm_globals[name] for name in config['mloop_params']]
# Retrieve the parameter values requested by M-LOOP on this iteration
logger.debug('Getting requested globals values from lyse.routine_storage.')
requested_values = lyse.routine_storage.params
requested_dict = dict(zip(config['mloop_params'], requested_values))
# Get the parameter values for the shot we just computed the cost for
logger.debug('Getting lyse dataframe.')
df = lyse.data()
shot_values = [df[name].iloc[-1] for name in config['mloop_params']]
# Verify integrity by cross-checking against what was requested
if not np.array_equal(current_values, requested_values):
message = (
'Cost requested for values different to those in runmanager.\n'
'Please add an executed shot to lyse with: {requested_dict}'
).format(requested_dict=requested_dict)
logger.error(message)
return False
if not np.array_equal(shot_values, requested_values):
message = (
'Cost requested for different values to those used to compute cost.\n'
'Please add an executed shot to lyse with: {requested_dict}'
).format(requested_dict=requested_dict)
logger.error(message)
return False
logger.debug('Globals verified.')
return True
def cost_analysis(cost_key=(None,), maximize=True, x=None):
"""Return a cost dictionary to M-LOOP with at least:
{'bad': True} or {'cost': float}.
- Look for the latest cost in the cost_key column of the lyse
- DataFrame and an uncertainty ('u_' prefix at the lowest level).
- Report bad shot to M-LOOP if cost is nan or inf.
- Negate value in DataFrame if maximize = True.
- Fallback to reporting a constant or fake cost (from x).
"""
logger.debug('Getting cost...')
cost_dict = {'bad': False}
# Retrieve current lyse DataFrame
logger.debug('Getting lyse dataframe.')
df = lyse.data()
# Use the most recent shot
ix = -1
# Retrieve cost from specified column
if len(df) and cost_key in df:
cost = (df[cost_key].astype(float).values)[ix]
if np.isnan(cost) or np.isinf(cost):
cost_dict['bad'] = True
logger.info('Got bad cost: {cost}'.format(cost=cost))
else:
cost_dict['cost'] = (1 - 2 * maximize) * cost
logger.info('Got cost: {cost}'.format(cost=cost_dict['cost']))
u_cost_key = cost_key[:-1] + ('u_' + cost_key[-1],)
if u_cost_key in df:
cost_dict['uncer'] = df[u_cost_key].iloc[ix]
logger.info('Got uncertainty: {uncer}'.format(uncer=cost_dict['uncer']))
# If it doesn't exist, generate a fake cost
elif x is not None:
from fake_result import fake_result
cost_dict['cost'] = (1 - 2 * maximize) * fake_result(x)
logger.info('Faked cost: {cost}'.format(cost=cost_dict['cost']))
# Or just use a constant cost (for debugging)
else:
cost_dict['cost'] = 1.2
logger.info('Faked constant cost: {cost}'.format(cost=cost_dict['cost']))
return cost_dict
if __name__ == '__main__':
config = mloop_config.get()
configure_logging(config)
if not hasattr(lyse.routine_storage, 'queue'):
logger.info('First execution of lyse routine...')
try:
from queue import Queue
except ImportError:
# PY2
from Queue import Queue
logger.debug('Creating queue.')
lyse.routine_storage.queue = Queue()
if (
hasattr(lyse.routine_storage, 'optimisation')
and lyse.routine_storage.optimisation.is_alive()
):
cost_dict = cost_analysis(
cost_key=config['cost_key'] if not config['mock'] else [],
maximize=config['maximize'],
x=lyse.routine_storage.params[0] if config['mock'] else None,
)
if not cost_dict['bad'] or not config['ignore_bad']:
if check_runmanager(config):
if verify_globals(config):
logger.debug('Putting cost in queue.')
lyse.routine_storage.queue.put(cost_dict)
else:
message = 'NOT putting cost in queue because verify_globals failed.'
logger.debug(message)
else:
message = 'NOT putting cost in queue because check_runmanager failed.'
logger.debug(message)
else:
message = (
'NOT putting cost in queue because cost was bad and ignore_bad is True.'
)
logger.debug(message)
elif check_runmanager(config):
logger.info('(Re)starting optimisation process...')
import threading
import mloop_interface
logger.debug('Starting interface thread...')
lyse.routine_storage.optimisation = threading.Thread(
target=mloop_interface.main
)
lyse.routine_storage.optimisation.daemon = True
lyse.routine_storage.optimisation.start()
logger.debug('Interface thread started.')
else:
print(
'\nNot (re)starting optimisation process.',
'Please address above warnings before trying again.',
)
| 1.992188
| 2
|
test/on_yubikey/cli_piv/test_misc.py
|
timo-quinn/yubikey-manager
| 0
|
12776944
|
<gh_stars>0
from ..util import ykman_cli
from .util import PivTestCase
class Misc(PivTestCase):
def test_info(self):
output = ykman_cli('piv', 'info')
self.assertIn('PIV version:', output)
def test_reset(self):
output = ykman_cli('piv', 'reset', '-f')
self.assertIn('Success!', output)
| 2.125
| 2
|
python/ShoppingList_to_excel.py
|
esix2/coffee-accounting
| 0
|
12776945
|
import pandas as pd
import os
from openpyxl.styles import Color, Fill, Border, Alignment, Font
from openpyxl.cell import get_column_letter
import numpy as np
def ShoppingList_to_excel():
pwd = os.getcwd()
os.chdir("../..") ## it changes to the parent folder, since shopping list is there
csvFile = 'ShoppingList'
df = pd.read_csv(csvFile)
if float(sum(1 for line in open(csvFile))) < 2:
df = pd.DataFrame(np.nan, index=[0], columns=df.columns)
xlsFile = csvFile+'.xlsx'
writer = pd.ExcelWriter(xlsFile,engine='openpyxl')
df.index = range(1,len(df)+1)
df.to_excel(writer,sheet_name='Sheet1')
wb = writer.book
ws = writer.sheets['Sheet1']
Columns = ws.get_highest_column()
Rows = ws.get_highest_row()
ws.row_dimensions[1].height = 50
for i in range(2,Rows+1):
ws.row_dimensions[i].height = 30
ws.column_dimensions[get_column_letter((1))].width = 5
ws.column_dimensions[get_column_letter((2))].width = 15
ws.column_dimensions[get_column_letter((3))].width = 30
ws.column_dimensions[get_column_letter((4))].width = 10
ws.column_dimensions[get_column_letter((5))].width = 10
ws.column_dimensions[get_column_letter((6))].width = 15
ws.column_dimensions[get_column_letter((7))].width = 10
ws.column_dimensions[get_column_letter((8))].width = 15
ws.column_dimensions[get_column_letter((9))].width = 15
ws.column_dimensions[get_column_letter((10))].width = 10
ws.column_dimensions[get_column_letter((11))].width = 15
Style_ShoppingList(ws,1,Rows,1,Columns,'nb')
Style_ShoppingList(ws,1,1,2,Columns,'g')
Style_ShoppingList(ws,2,Rows,1,1,'g')
ws['F1'].value = "Price Per item ("+u"\u20AC"+")"
ws['G1'].value = "Tip\n ("+u"\u20AC"+")"
ws['H1'].value = "Total Price\n ("+u"\u20AC"+")"
os.chdir(pwd)
return ws
def Style_ShoppingList(ws,Row_s,Row_e,Col_s,Col_e,c):
for i in range(Row_s,Row_e+1):
for j in range(Col_s,Col_e+1):
# print "i="+str(i)+", j="+str(j)
tmp_cell = ws.cell(row=i-1,column=j-1)
if c == 'nb': ## stands for no background
pass
else:
tmp_cell.style.fill.fill_type = Fill.FILL_SOLID
if c == 'g': ## stands for green
tmp_cell.style.fill.start_color.index = Color.GREEN
elif c == 'r': ## stands for red
tmp_cell.style.fill.start_color.index = Color.RED
elif c == 'lb': ## stands for red
tmp_cell.style.fill.start_color.index = 'FF89C4FF'#'FF6CB5FF'#'FF5CADFF'
elif c == 'y': ## stands for yellow
tmp_cell.style.fill.start_color.index = Color.YELLOW
elif c == 'o': ## stands for orange
tmp_cell.style.fill.start_color.index = 'FFF4C99F'#'FFEDAB69'
elif c == 'lgr': ## stands for light gray
tmp_cell.style.fill.start_color.index = 'FFE6E6E6'
elif c == 'dgr': ## stands for dark gray
tmp_cell.style.fill.start_color.index = 'FFBABABA'
if i == 1 or j == 1:
tmp_cell.style.font.name = 'Times New Roman'
tmp_cell.style.font.bold = True
else:
tmp_cell.style.font.name = 'Arial'
tmp_cell.style.font.bold = False
tmp_cell.style.alignment.vertical = "center"
tmp_cell.style.alignment.horizontal = "center"
tmp_cell.style.alignment.wrap_text = True
tmp_cell.style.borders.top.border_style = Border.BORDER_THIN
tmp_cell.style.borders.bottom.border_style = Border.BORDER_THIN
tmp_cell.style.borders.right.border_style = Border.BORDER_THIN
tmp_cell.style.borders.left.border_style = Border.BORDER_THIN
| 3.21875
| 3
|
evaluate_numbering.py
|
koreyou/pdf-struct
| 10
|
12776946
|
# Copyright (c) 2021, Hitachi America Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
from typing import List
import click
from pdf_struct import loader
from pdf_struct.core import transition_labels
from pdf_struct.core.document import Document
from pdf_struct.core.structure_evaluation import evaluate_structure, \
evaluate_labels
from pdf_struct.core.transition_labels import ListAction
from pdf_struct.features.listing import SectionNumber, SectionNumberJa
section_number_cls_dict = {
'SectionNumber': SectionNumber,
'SectionNumberJa': SectionNumberJa
}
def predict_transitions_numbering(section_number_cls, document: Document) -> Document:
numbered_list = []
anchors: List[int] = []
labels = []
pointers = []
for i in range(document.n_blocks):
candidates = section_number_cls.extract_section_number(document.texts[i])
if len(candidates) == 0:
labels.append(ListAction.CONTINUOUS)
pointers.append(None)
continue
for j in range(len(numbered_list) - 1, -1, -1):
for section_number in candidates:
if section_number.is_next_of(numbered_list[j]):
if j == len(numbered_list) - 1:
labels.append(ListAction.SAME_LEVEL)
pointers.append(None)
else:
labels.append(ListAction.UP)
pointers.append(anchors[j])
numbered_list = numbered_list[:j]
numbered_list.append(section_number)
anchors = anchors[:j]
anchors.append(i)
break
else:
continue
break
else:
# No valid continuation found... check if it is a new level
for section_number in candidates:
if isinstance(section_number.number, str) or section_number.number <= 1:
numbered_list.append(section_number)
anchors.append(i)
labels.append(ListAction.DOWN)
pointers.append(None)
break
else:
# section number does not match anything, but it is still probably a new paragraph
labels.append(ListAction.SAME_LEVEL)
pointers.append(None)
# append final label --- which would always be ignored
labels.append(ListAction.UP)
pointers.append(-1)
labels = labels[1:]
pointers = pointers[1:]
assert len(labels) == len(pointers) == len(document.labels)
document = copy.deepcopy(document)
document.pointers = pointers
document.labels = labels
return document
@click.command()
@click.option('--metrics', type=click.Path(exists=False), default=None,
help='Dump metrics as a JSON file.')
@click.argument('file-type', type=click.Choice(('txt', 'pdf')))
@click.argument('section-number', type=click.Choice(tuple(section_number_cls_dict.keys())))
@click.argument('raw-dir', type=click.Path(exists=True))
@click.argument('anno-dir', type=click.Path(exists=True))
def main(metrics, file_type: str, section_number: str, raw_dir: str, anno_dir: str):
print(f'Loading annotations from {anno_dir}')
annos = transition_labels.load_annos(anno_dir)
print('Loading and extracting features from raw files')
if file_type == 'pdf':
documents = loader.pdf.load_from_directory(raw_dir, annos)
else:
documents = loader.text.load_from_directory(raw_dir, annos)
section_number_cls = section_number_cls_dict[section_number]
documents_pred = [predict_transitions_numbering(section_number_cls, document)
for document in documents]
if metrics is None:
print(json.dumps(evaluate_structure(documents, documents_pred), indent=2))
print(json.dumps(evaluate_labels(documents, documents_pred), indent=2))
else:
_metrics = {
'structure': evaluate_structure(documents, documents_pred),
'labels': evaluate_labels(documents, documents_pred)
}
with open(metrics, 'w') as fout:
json.dump(_metrics, fout, indent=2)
if __name__ == '__main__':
main()
| 2.265625
| 2
|
docs/00.Python/demo_pacages/p1/pp1/a3.py
|
mheanng/PythonNote
| 0
|
12776947
|
from ...p1 import mm
# 导入主包边界之外的包
print('this is a2')
| 1.554688
| 2
|
beowulf/cli.py
|
beowulf-foundation/beowulf-python
| 9
|
12776948
|
import argparse
import json
import logging
import os
import pprint
import re
import sys
import click._compat
import pkg_resources
from prettytable import PrettyTable
import beowulf as bwf
from beowulfbase.account import PrivateKey
from beowulfbase.storage import configStorage
from .account import Account
from .amount import Amount
from .block import Block
from .blockchain import Blockchain
from .instance import shared_beowulfd_instance
from .supernode import Supernode
availableConfigurationKeys = [
"default_account",
"default_vote_weight",
"nodes",
]
def legacyentry():
"""
Piston like cli application.
This will be re-written as a @click app in the future.
"""
global args
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Command line tool to interact with the Beowulf network")
"""
Default settings for all tools
"""
parser.add_argument(
'--node',
type=str,
default=configStorage["node"],
help='URL for public Beowulf API (default: "https://bw.beowulfchain.com")'
)
parser.add_argument(
'--no-broadcast',
'-d',
action='store_true',
help='Do not broadcast anything')
parser.add_argument(
'--no-wallet',
'-p',
action='store_true',
help='Do not load the wallet')
parser.add_argument(
'--unsigned',
'-x',
action='store_true',
help='Do not try to sign the transaction')
parser.add_argument(
'--expires',
'-e',
default=60,
help='Expiration time in seconds (defaults to 60)')
parser.add_argument(
'--verbose', '-v', type=int, default=3, help='Verbosity')
parser.add_argument(
'--version',
action='version',
version='%(prog)s {version}'.format(
version=pkg_resources.require("beowulf")[0].version))
subparsers = parser.add_subparsers(help='sub-command help')
"""
Command "set"
"""
setconfig = subparsers.add_parser('set', help='Set configuration')
setconfig.add_argument(
'key',
type=str,
choices=availableConfigurationKeys,
help='Configuration key')
setconfig.add_argument('value', type=str, help='Configuration value')
setconfig.set_defaults(command="set")
"""
Command "config"
"""
configconfig = subparsers.add_parser(
'config', help='Show local configuration')
configconfig.set_defaults(command="config")
"""
Command "info"
"""
parser_info = subparsers.add_parser(
'info', help='Show basic BWF blockchain info')
parser_info.set_defaults(command="info")
parser_info.add_argument(
'objects',
nargs='*',
type=str,
help='General information about the blockchain, a block, an account'
' name, a post, a public key, ...')
"""
Command "changewalletpassphrase"
"""
changepasswordconfig = subparsers.add_parser(
'changewalletpassphrase', help='Change wallet password')
changepasswordconfig.set_defaults(command="changewalletpassphrase")
"""
Command "addkey"
"""
addkey = subparsers.add_parser(
'addkey', help='Add a new key to the wallet')
addkey.add_argument(
'--unsafe-import-key',
nargs='*',
type=str,
help='private key to import into the wallet (unsafe, unless you ' +
'delete your shell history)')
addkey.set_defaults(command="addkey")
parsewif = subparsers.add_parser(
'parsewif', help='Parse a WIF private key without importing')
parsewif.add_argument(
'--unsafe-import-key',
nargs='*',
type=str,
help='WIF key to parse (unsafe, delete your bash history)')
parsewif.set_defaults(command='parsewif')
"""
Command "delkey"
"""
delkey = subparsers.add_parser(
'delkey', help='Delete keys from the wallet')
delkey.add_argument(
'pub',
nargs='*',
type=str,
help='the public key to delete from the wallet')
delkey.set_defaults(command="delkey")
"""
Command "getkey"
"""
getkey = subparsers.add_parser(
'getkey', help='Dump the privatekey of a pubkey from the wallet')
getkey.add_argument(
'pub',
type=str,
help='the public key for which to show the private key')
getkey.set_defaults(command="getkey")
"""
Command "listkeys"
"""
listkeys = subparsers.add_parser(
'listkeys', help='List available keys in your wallet')
listkeys.set_defaults(command="listkeys")
"""
Command "listaccounts"
"""
listaccounts = subparsers.add_parser(
'listaccounts', help='List available accounts in your wallet')
listaccounts.set_defaults(command="listaccounts")
"""
Command "transfer"
"""
parser_transfer = subparsers.add_parser('transfer', help='Transfer BWF')
parser_transfer.set_defaults(command="transfer")
parser_transfer.add_argument('to', type=str, help='Recipient')
parser_transfer.add_argument(
'amount', type=float, help='Amount to transfer')
parser_transfer.add_argument(
'asset',
type=str,
choices=["BWF", "W"],
help='Asset to transfer (i.e. BWF or W)')
parser_transfer.add_argument(
'fee', type=float, help='Fee to transfer')
parser_transfer.add_argument(
'asset_fee',
type=str,
choices=["W"],
help='Asset fee to transfer (W)')
parser_transfer.add_argument(
'memo', type=str, nargs="?", default="", help='Optional memo')
parser_transfer.add_argument(
'--account',
type=str,
required=False,
default=configStorage["default_account"],
help='Transfer from this account')
"""
Command "convert"
"""
parser_convert = subparsers.add_parser(
'convert',
help='Convert BWFDollars to Beowulf (takes a week to settle)')
parser_convert.set_defaults(command="convert")
parser_convert.add_argument(
'amount', type=float, help='Amount of W to convert')
parser_convert.add_argument(
'--account',
type=str,
required=False,
default=configStorage["default_account"],
help='Convert from this account')
"""
Command "balance"
"""
parser_balance = subparsers.add_parser(
'balance', help='Show the balance of one more more accounts')
parser_balance.set_defaults(command="balance")
parser_balance.add_argument(
'account',
type=str,
nargs="*",
default=configStorage["default_account"],
help='balance of these account (multiple accounts allowed)')
"""
Command "newaccount"
"""
parser_newaccount = subparsers.add_parser(
'newaccount', help='Create a new account')
parser_newaccount.set_defaults(command="newaccount")
parser_newaccount.add_argument(
'accountname', type=str, help='New account name')
parser_newaccount.add_argument(
'--account',
type=str,
required=False,
default=configStorage["default_account"],
help='Account that pays the fee')
"""
Command "importaccount"
"""
parser_importaccount = subparsers.add_parser(
'importaccount', help='Import an account using a passphrase')
parser_importaccount.set_defaults(command="importaccount")
parser_importaccount.add_argument('account', type=str, help='Account name')
parser_importaccount.add_argument(
'--roles',
type=str,
nargs="*",
default=["owner"], # no owner
help='Import specified keys (owner, active, posting, memo)')
"""
Command "approvesupernode"
"""
parser_approvesupernode = subparsers.add_parser(
'approvesupernode', help='Approve a supernodees')
parser_approvesupernode.set_defaults(command="approvesupernode")
parser_approvesupernode.add_argument(
'supernode', type=str, help='Supernode to approve')
parser_approvesupernode.add_argument(
'--account',
type=str,
required=False,
default=configStorage["default_account"],
help='Your account')
"""
Command "disapprovesupernode"
"""
parser_disapprovesupernode = subparsers.add_parser(
'disapprovesupernode', help='Disapprove a supernodees')
parser_disapprovesupernode.set_defaults(command="disapprovesupernode")
parser_disapprovesupernode.add_argument(
'supernode', type=str, help='Supernode to disapprove')
parser_disapprovesupernode.add_argument(
'--account',
type=str,
required=False,
default=configStorage["default_account"],
help='Your account')
"""
Command "sign"
"""
parser_sign = subparsers.add_parser(
'sign',
help='Sign a provided transaction with available and required keys')
parser_sign.set_defaults(command="sign")
parser_sign.add_argument(
'--file',
type=str,
required=False,
help='Load transaction from file. If "-", read from ' +
'stdin (defaults to "-")')
"""
Command "broadcast"
"""
parser_broadcast = subparsers.add_parser(
'broadcast', help='broadcast a signed transaction')
parser_broadcast.set_defaults(command="broadcast")
parser_broadcast.add_argument(
'--file',
type=str,
required=False,
help='Load transaction from file. If "-", read from ' +
'stdin (defaults to "-")')
"""
Command "supernodeupdate"
"""
parser_supernodeprops = subparsers.add_parser(
'supernodeupdate', help='Change supernode properties')
parser_supernodeprops.set_defaults(command="supernodeupdate")
parser_supernodeprops.add_argument(
'--supernode',
type=str,
default=configStorage["default_account"],
help='Supernode name')
parser_supernodeprops.add_argument(
'--maximum_block_size',
type=float,
required=False,
help='Max block size')
parser_supernodeprops.add_argument(
'--account_creation_fee',
type=float,
required=False,
help='Account creation fee')
parser_supernodeprops.add_argument(
'--signing_key', type=str, required=False, help='Signing Key')
"""
Command "supernodecreate"
"""
parser_supernodecreate = subparsers.add_parser(
'supernodecreate', help='Create a supernode')
parser_supernodecreate.set_defaults(command="supernodecreate")
parser_supernodecreate.add_argument('supernode', type=str, help='Supernode name')
parser_supernodecreate.add_argument(
'signing_key', type=str, help='Signing Key')
parser_supernodecreate.add_argument(
'--maximum_block_size',
type=float,
default="65536",
help='Max block size')
parser_supernodecreate.add_argument(
'--account_creation_fee',
type=float,
default=30,
help='Account creation fee')
"""
Parse Arguments
"""
args = parser.parse_args()
# Logging
log = logging.getLogger(__name__)
verbosity = ["critical", "error", "warn", "info", "debug"][int(
min(args.verbose, 4))]
log.setLevel(getattr(logging, verbosity.upper()))
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(getattr(logging, verbosity.upper()))
ch.setFormatter(formatter)
log.addHandler(ch)
# GrapheneAPI logging
if args.verbose > 4:
verbosity = ["critical", "error", "warn", "info", "debug"][int(
min((args.verbose - 4), 4))]
gphlog = logging.getLogger("graphenebase")
gphlog.setLevel(getattr(logging, verbosity.upper()))
gphlog.addHandler(ch)
if args.verbose > 8:
verbosity = ["critical", "error", "warn", "info", "debug"][int(
min((args.verbose - 8), 4))]
gphlog = logging.getLogger("grapheneapi")
gphlog.setLevel(getattr(logging, verbosity.upper()))
gphlog.addHandler(ch)
if not hasattr(args, "command"):
parser.print_help()
sys.exit(2)
# initialize BWF instance
options = {
"node": args.node,
"unsigned": args.unsigned,
"expires": args.expires
}
if args.command == "sign":
options.update({"offline": True})
if args.no_wallet:
options.update({"wif": []})
beowulf = bwf.Beowulf(no_broadcast=args.no_broadcast, **options)
if args.command == "set":
# TODO: Evaluate this line with cli refactor.
if (args.key in ["default_account"] and args.value[0] == "@"):
args.value = args.value[1:]
configStorage[args.key] = args.value
elif args.command == "config":
t = PrettyTable(["Key", "Value"])
t.align = "l"
for key in configStorage:
# hide internal config data
if key in availableConfigurationKeys:
t.add_row([key, configStorage[key]])
print(t)
elif args.command == "info":
if not args.objects:
t = PrettyTable(["Key", "Value"])
t.align = "l"
blockchain = Blockchain(mode="head")
info = blockchain.info()
for key in info:
t.add_row([key, info[key]])
print(t.get_string(sortby="Key"))
for obj in args.objects:
# Block
if re.match("^[0-9]*$", obj):
block = Block(obj)
if block:
t = PrettyTable(["Key", "Value"])
t.align = "l"
for key in sorted(block):
value = block[key]
if key == "transactions":
value = json.dumps(value, indent=4)
t.add_row([key, value])
print(t)
else:
print("Block number %s unknown" % obj)
# Account name
elif re.match("^[a-zA-Z0-9\-\._]{2,16}$", obj):
from math import log10
account = Account(obj)
t = PrettyTable(["Key", "Value"])
t.align = "l"
for key in sorted(account):
value = account[key]
if key == "json_metadata":
value = json.dumps(json.loads(value or "{}"), indent=4)
if key in ["supernode_votes", "owner"]:
value = json.dumps(value, indent=4)
if key == "reputation" and int(value) > 0:
value = int(value)
rep = (max(log10(value) - 9, 0) * 9 + 25 if value > 0
else max(log10(-value) - 9, 0) * -9 + 25)
value = "{:.2f} ({:d})".format(rep, value)
t.add_row([key, value])
print(t)
# supernode available?
try:
supernode = Supernode(obj)
t = PrettyTable(["Key", "Value"])
t.align = "l"
for key in sorted(supernode):
value = supernode[key]
if key in ["props", "wd_exchange_rate"]:
value = json.dumps(value, indent=4)
t.add_row([key, value])
print(t)
except: # noqa FIXME
pass
# Public Key
elif re.match("^BEO.{48,55}$", obj):
account = beowulf.commit.wallet.getAccountFromPublicKey(obj)
if account:
t = PrettyTable(["Account"])
t.align = "l"
t.add_row([account])
print(t)
else:
print("Public Key not known" % obj)
else:
print("Couldn't identify object to read")
elif args.command == "changewalletpassphrase":
beowulf.commit.wallet.changeUserPassphrase()
elif args.command == "addkey":
if args.unsafe_import_key:
for key in args.unsafe_import_key:
try:
beowulf.commit.wallet.add_private_key(key)
except Exception as e:
print(str(e))
else:
import getpass
while True:
wifkey = getpass.getpass('Private Key (wif) [Enter to quit]:')
if not wifkey:
break
try:
beowulf.commit.wallet.add_private_key(wifkey)
except Exception as e:
print(str(e))
continue
installed_keys = beowulf.commit.wallet.getPublicKeys()
if len(installed_keys) == 1:
name = beowulf.commit.wallet.getAccountFromPublicKey(
installed_keys[0])
print("=" * 30)
print("Would you like to make %s a default user?" % name)
print()
print("You can set it with with:")
print(" beowulfpy set default_account <account>")
print("=" * 30)
elif args.command == "delkey":
if confirm("Are you sure you want to delete keys from your wallet?\n"
"This step is IRREVERSIBLE! If you don't have a backup, "
"You may lose access to your account!"):
for pub in args.pub:
beowulf.commit.wallet.removePrivateKeyFromPublicKey(pub)
elif args.command == "parsewif":
if args.unsafe_import_key:
for key in args.unsafe_import_key:
try:
print(PrivateKey(key).pubkey)
except Exception as e:
print(str(e))
else:
import getpass
while True:
wifkey = getpass.getpass('Private Key (wif) [Enter to quit:')
if not wifkey:
break
try:
print(PrivateKey(wifkey).pubkey)
except Exception as e:
print(str(e))
continue
elif args.command == "getkey":
print(beowulf.commit.wallet.getPrivateKeyForPublicKey(args.pub))
elif args.command == "listkeys":
t = PrettyTable(["Available Key"])
t.align = "l"
for key in beowulf.commit.wallet.getPublicKeys():
t.add_row([key])
print(t)
elif args.command == "listaccounts":
t = PrettyTable(["Name", "Type", "Available Key"])
t.align = "l"
for account in beowulf.commit.wallet.getAccounts():
t.add_row([
account["name"] or "n/a", account["type"] or "n/a",
account["pubkey"]
])
print(t)
elif args.command == "transfer":
print_json(
beowulf.commit.transfer(
args.to,
args.amount,
args.asset,
args.fee,
args.asset_fee,
memo=args.memo,
account=args.account))
elif args.command == "convert":
print_json(beowulf.commit.convert(
args.amount,
account=args.account,
))
elif args.command == "balance":
if args.account and isinstance(args.account, list):
for account in args.account:
a = Account(account)
print("\n%s" % a.name)
t = PrettyTable(["Account", "BWF", "W", "M"])
t.align = "r"
t.add_row([
'Available',
a.balances['available']['BWF'],
a.balances['available']['W'],
a.balances['available']['M'],
])
t.add_row([
'Rewards',
a.balances['rewards']['BWF'],
a.balances['rewards']['W'],
a.balances['rewards']['M'],
])
t.add_row([
'TOTAL',
a.balances['total']['BWF'],
a.balances['total']['W'],
a.balances['total']['M'],
])
print(t)
else:
print("Please specify an account: beowulfpy balance <account>")
elif args.command == "permissions":
account = Account(args.account)
print_permissions(account)
elif args.command == "newaccount":
import getpass
while True:
pw = getpass.getpass("New Account Passphrase: ")
if not pw:
print("You cannot chosen an empty password!")
continue
else:
pwck = getpass.getpass("Confirm New Account Passphrase: ")
if pw == pwck:
break
else:
print("Given Passphrases do not match!")
print_json(
beowulf.commit.create_account(
args.accountname,
creator=args.account,
password=pw,
))
elif args.command == "importaccount":
from beowulfbase.account import PasswordKey
import getpass
password = getpass.getpass("Account Passphrase: ")
account = Account(args.account)
imported = False
if "owner" in args.roles:
owner_key = PasswordKey(args.account, password, role="owner")
owner_pubkey = format(owner_key.get_public_key(), "BEO")
if owner_pubkey in [x[0] for x in account["owner"]["key_auths"]]:
print("Importing owner key!")
owner_privkey = owner_key.get_private_key()
beowulf.commit.wallet.add_private_key(owner_privkey)
imported = True
if not imported:
print("No matching key(s) found. Password correct?")
elif args.command == "sign":
if args.file and args.file != "-":
if not os.path.isfile(args.file):
raise Exception("File %s does not exist!" % args.file)
with open(args.file) as fp:
tx = fp.read()
else:
tx = sys.stdin.read()
tx = eval(tx)
print_json(beowulf.commit.sign(tx))
elif args.command == "broadcast":
if args.file and args.file != "-":
if not os.path.isfile(args.file):
raise Exception("File %s does not exist!" % args.file)
with open(args.file) as fp:
tx = fp.read()
else:
tx = sys.stdin.read()
tx = eval(tx)
beowulf.commit.broadcast(tx)
elif args.command == "approvesupernode":
print_json(
beowulf.commit.approve_supernode(args.supernode, account=args.account))
elif args.command == "disapprovesupernode":
print_json(
beowulf.commit.disapprove_supernode(
args.supernode, account=args.account))
elif args.command == "supernodeupdate":
supernode = Supernode(args.supernode)
props = supernode["props"]
if args.account_creation_fee:
props["account_creation_fee"] = str(
Amount("%f BWF" % args.account_creation_fee))
if args.maximum_block_size:
props["maximum_block_size"] = args.maximum_block_size
print_json(
beowulf.commit.supernode_update(
args.signing_key or supernode["signing_key"],
props,
account=args.supernode))
elif args.command == "supernodecreate":
props = {
"account_creation_fee":
str(Amount("%f BWF" % args.account_creation_fee)),
"maximum_block_size":
args.maximum_block_size
}
print_json(
beowulf.commit.supernode_update(
args.signing_key, props, account=args.supernode))
else:
print("No valid command given")
def confirm(question, default="yes"):
""" Confirmation dialog that requires *manual* input.
:param str question: Question to ask the user
:param str default: default answer
:return: Choice of the user
:rtype: bool
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
# Python 2.7 `input` attempts to evaluate the input, while in 3+
# it returns a string. Python 2.7 `raw_input` returns a str as desired.
if sys.version >= '3.0':
choice = input().lower()
else:
choice = click._compat.raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def get_terminal(text="Password", confirm=False, allowedempty=False):
import getpass
while True:
pw = getpass.getpass(text)
if not pw and not allowedempty:
print("Cannot be empty!")
continue
else:
if not confirm:
break
pwck = getpass.getpass("Confirm " + text)
if pw == pwck:
break
else:
print("Not matching!")
return pw
def format_operation_details(op, memos=False):
if op[0] == "transfer":
str_ = "%s -> %s %s" % (
op[1]["from"],
op[1]["to"],
op[1]["amount"],
op[1]["fee"],
)
if memos:
memo = op[1]["memo"]
if len(memo) > 0 and memo[0] == "#":
beowulf = shared_beowulfd_instance()
# memo = beowulf.decode_memo(memo, op[1]["from"])
memo = beowulf.decode_memo(memo, op)
str_ += " (%s)" % memo
return str_
else:
return json.dumps(op[1], indent=4)
def print_permissions(account):
t = PrettyTable(["Permission", "Threshold", "Key/Account"], hrules=0)
t.align = "r"
for permission in ["owner"]:
auths = []
for type_ in ["account_auths", "key_auths"]:
for authority in account[permission][type_]:
auths.append("%s (%d)" % (authority[0], authority[1]))
t.add_row([
permission,
account[permission]["weight_threshold"],
"\n".join(auths),
])
print(t)
def print_json(tx):
if sys.stdout.isatty():
print(json.dumps(tx, indent=4))
else:
# You're being piped or redirected
print(tx)
# this is another console script entrypoint
# also this function sucks and should be taken out back and shot
def beowulftailentry():
parser = argparse.ArgumentParser(
description="UNIX tail(1)-like tool for the beowulf blockchain")
parser.add_argument(
'-f',
'--follow',
help='Constantly stream output to stdout',
action='store_true')
parser.add_argument(
'-n', '--lines', type=int, default=10, help='How many ops to show')
parser.add_argument(
'-j',
'--json',
help='Output as JSON instead of human-readable pretty-printed format',
action='store_true')
args = parser.parse_args(sys.argv[1:])
op_count = 0
if args.json:
if not args.follow:
sys.stdout.write('[')
for op in Blockchain().reliable_stream():
if args.json:
sys.stdout.write('%s' % json.dumps(op))
if args.follow:
sys.stdout.write("\n") # for human eyeballs
sys.stdout.flush() # flush after each op if live mode
else:
pprint.pprint(op)
op_count += 1
if not args.follow:
if op_count > args.lines:
if args.json:
sys.stdout.write(']')
return
else:
if args.json:
sys.stdout.write(',')
| 1.828125
| 2
|
morphounit/scores/score_RangeCheck.py
|
appukuttan-shailesh/morphounit
| 1
|
12776949
|
import sciunit
#==============================================================================
class RangeCheck(sciunit.Score):
"""
Checks if value is within specified range
Approach: Returns True if within range, False otherwise
"""
_allowed_types = (bool,)
_description = ('Checks if value of prediction is within the '
' specified range for the observation')
@classmethod
def compute(cls, observation, prediction):
"""
Computes True/False based on whether value is within the range.
"""
assert isinstance(prediction,dict)
assert isinstance(observation,dict)
p_val = prediction['value']
o_min = observation['min']
o_max = observation['max']
if p_val >= o_min and p_val <= o_max:
score = True
else:
score = False
return RangeCheck(score)
@property
def sort_key(self):
"""
Returns 1.0 for a Boolean score of True, and 0.0 for a score of False.
"""
return 1.0 if self.score else 0.0
def __str__(self):
return 'Pass' if self.score else 'Fail'
| 3.203125
| 3
|
multi_orbital/evaluation/gap_plots.py
|
nikwitt/FLEX_IR
| 1
|
12776950
|
<filename>multi_orbital/evaluation/gap_plots.py
# -*- encoding: latin-1 -*-
import sys
MKL_THREADS_VAR = str(sys.argv[1])
import os
os.environ["MKL_NUM_THREADS"] = MKL_THREADS_VAR
os.environ["NUMEXPR_NUM_THREADS"] = MKL_THREADS_VAR
os.environ["OMP_NUM_THREADS"] = "1"
from numpy import *
import scipy as sc
import pyfftw
from ir_load import ir_load
from parameters import parameters
from hamiltonian import hamiltonian
from gfunction import gfunction_calc
from gfunction import gfunction_load
from eliashberg2 import eliashberg
from kpath_extract import kpath_extract
import matplotlib
import matplotlib.pyplot as plt
import datetime
import time
from Hexagonal_BZ_quantitiy_plot import Hexagonal_BZ_plot
##### Please input in order:
# MKL_NUM_THREADS | T | T_load | JUratio | JU_ratio_load | round_it
n_fill = (7-3.43)/2
T = float(sys.argv[2])
T_load = float(sys.argv[3])
JU_ratio = float(sys.argv[4])
JU_ratio_load = float(sys.argv[5])
round_it = int(sys.argv[6])
print(T, T_load, JU_ratio, JU_ratio_load, round_it)
sym_list = ['f1','f2'] #
### Initiate parameters -------------------------------------------------
start = time.process_time()
p = parameters(round(T, 5), round(n_fill ,5), round(JU_ratio,5), round_it,\
T_load = round(T_load, 5), JU_ratio_load = round(JU_ratio_load, 5))
print("##################################################"\
, file=open(p.Logstr,'a'))
print(datetime.datetime.now().strftime('%d. %B %Y %I:%M%p')\
, file=open(p.Logstr,'a'))
print("Parameter set: n = " + str(p.n_fill) + ", T = " + str(p.T)\
+ ", U = " + str(p.u0) + ", J_H = " + str(p.JU_ratio) + "U\n"\
, file=open(p.Logstr,'a'))
print("Elapsed time - parameter init: " + str(time.process_time() - start)\
, file=open(p.Logstr,'a'))
### Load hamiltionian----------------------------------------------------
t_hset = time.process_time()
h = hamiltonian(p)
print("Elapsed time - hamiltonian set (tot | module): " \
+ str(time.process_time() - start) + " | " \
+ str(time.process_time() - t_hset), file=open(p.Logstr,'a'))
### Load irbasis --------------------------------------------------------
t_bload = time.process_time()
b = ir_load(p.Lambda, p.beta)
print("Elapsed time - basis load (tot | module): " \
+ str(time.process_time() - start) + " | " \
+ str(time.process_time() - t_bload), file=open(p.Logstr,'a'))
### Calculate full Greens function---------------------------------------
t_gcal = time.process_time()
g = gfunction_load(p,b)
print("Elapsed time - g_scf_calc load (tot | module): " \
+ str(time.process_time() - start) + " | " \
+ str(time.process_time() - t_gcal), file=open(p.Logstr,'a'))
# For kernel evaluation
print("Now setting up interaction", file=open(p.Logstr,'a'))
chi_spin = g.ckio@linalg.inv(g.E_int - h.S_<EMAIL>@g.ckio)
chi_charge = g.ckio@linalg.inv(g.E_int + h.C_mat@g.ckio)
v = - 1./2.* h.S_mat@chi_spin@h.S_mat - 1./2.* <EMAIL>@<EMAIL>
v = v.reshape(len(b.bm),p.nk1,p.nk2,p.nk3,p.nwan**4)
fft_object = pyfftw.builders.fftn(v, axes=(1,2,3))
v = fft_object().reshape(len(b.bm),p.nk*p.nwan**4)
result, _, _, _ = sc.linalg.lstsq(b.bose_Uln, v, lapack_driver='gelsy')
v = dot(b.bose_Ulx_fermi, result)
v = v.reshape(len(b.ft),p.nk,p.nwan,p.nwan,p.nwan,p.nwan)
v_DC = (h.C_mat + h.S_mat)/2
### Calculate SC parameter ----------------------------------------------
for sym_it in sym_list:
print(("Now do things for symmetry: {}").format(sym_it), file=open(p.Logstr,'a'))
p.SC_type = sym_it
p.SC_savepath = p.sp_dir + sym_it + "w_" + p.sp_name_save
p.SC_loadpath = p.sp_dir_load + sym_it + "w_" + p.sp_name_load
print("Gap loading...", file=open(p.Logstr,'a'))
dum = gfunction_load.func_load(p, "_gap_", 2, sc_state='sc')
gap = dum.reshape(len(dum)//(p.nwan**2),p.nwan,p.nwan,order='F')
gap = gap.reshape(size(gap)//(p.nk*p.nwan**2),p.nk,p.nwan,p.nwan)
gap = transpose(gap, axes=(0,1,3,2))
# Plot elements at iw_1
print("Plotting elementwise over k...", file=open(p.Logstr,'a'))
for it1 in range(p.nwan):
for it2 in range(p.nwan):
Hexagonal_BZ_plot(p ,real(gap[b.f_iwn_zero_ind,:,it1,it2]),\
title=('$\\Delta(i\\omega_1,k)$, {}-wave, element {}{}').format(p.SC_type,it1,it2),\
save_name = ('Odata_gap_weight/JU_{:.2f}/Delta_{}w_T_{:.3f}_JU_{:.2f}_element_{}{}.png').format(p.JU_ratio,p.SC_type,p.T,p.JU_ratio,it1,it2))
# Plot iw_n dependence
print("Plotting elementwise over iw_n...", file=open(p.Logstr,'a'))
for it1 in range(p.nwan):
for it2 in range(p.nwan):
quant = gap[:,:,it1,it2].reshape(-1,p.nk1,p.nk2)
plt.figure()
plt.plot(b.iwn_smpl_fermi,real(quant[:,70,70]))
plt.plot(b.iwn_smpl_fermi,imag(quant[:,70,70]))
plt.legend(['Real','Imaginary'])
plt.title(('{}-wave: T = {:.3f} , J/U = {:.2f} , element {}{}').format(p.SC_type,p.T,p.JU_ratio,it1,it2))
plt.xlabel('$\\Delta(i\\omega_n,K)$')
plt.ylabel('n')
plt.savefig(('Odata_gap_weight/JU_{:.2f}/frequency_dependence_Kpoint_Delta_{}w_T_{:.3f}_JU_{:.2f}_element_{}{}.png').format(p.JU_ratio,p.SC_type,p.T,p.JU_ratio,it1,it2))
plt.xlim([-20,20])
plt.savefig(('Odata_gap_weight/JU_{:.2f}/frequency_dependence_Kpoint_Zoom_Delta_{}w_T_{:.3f}_JU_{:.2f}_element_{}{}.png').format(p.JU_ratio,p.SC_type,p.T,p.JU_ratio,it1,it2))
plt.close()
#Evaluate kernel
print("Evaluating kernel now...", file=open(p.Logstr,'a'))
print("Setting up f", file=open(p.Logstr,'a'))
f = g.gkio@gap@conj(g.gkio_invk)
f = f.reshape(len(b.fm),p.nk1,p.nk2,p.nk3,p.nwan**2)
fft_object = pyfftw.builders.fftn(f, axes=(1,2,3))
f = fft_object().reshape(len(b.fm),p.nk*p.nwan*p.nwan)
result, _, _, _ = sc.linalg.lstsq(b.fermi_Uln, f, lapack_driver='gelsy')
result[abs(result) < 10**(-13)] = 0
f = dot(b.fermi_Ulx, result).reshape(len(b.ft),p.nk,p.nwan,p.nwan)
print("Calculating convolution", file=open(p.Logstr,'a'))
y = - einsum('ijkmln,ijml->ijkn', v, f)
y = y.reshape(len(b.ft),p.nk1,p.nk2,p.nk3,p.nwan**2)
fft_object = pyfftw.builders.ifftn(y, axes=(1,2,3))
y = fft_object()/p.nk
y = y.reshape(len(b.ft),p.nk*p.nwan**2)
result, _, _, _ = sc.linalg.lstsq(b.fermi_Ulx, y, lapack_driver='gelsy')
y = dot(b.fermi_Uln, result)
y = y.reshape(len(b.fm),p.nk,p.nwan,p.nwan)
print("Printing eigenvalue to file", file=open(p.Logstr,'a'))
print(('{}-wave | T = {} | J/U = {}').format(p.SC_type,p.T,p.JU_ratio), file=open(('lam_output_JU_{:.3f}.dat').format(p.JU_ratio),'a'))
lam = y/gap
lam = lam.reshape(-1,p.nk1,p.nk2,p.nwan,p.nwan)
for kit in [0, 70]:
for it1 in range(p.nwan):
for it2 in range(p.nwan):
print(kit, it1,it2, lam[b.f_iwn_zero_ind,kit,kit,it1,it2], file=open(('lam_output_JU_{:.3f}.dat').format(p.JU_ratio),'a'))
print(('{}-wave | T = {} | J/U = {} | with additive term').format(p.SC_type,p.T,p.JU_ratio), file=open(('lam_output_JU_{:.3f}.dat').format(p.JU_ratio),'a'))
y = y -einsum('kmln,ml->kn',v_DC.reshape(p.nwan,p.nwan,p.nwan,p.nwan),f[0,0])*ones((len(b.fm),p.nk,p.nwan,p.nwan))/p.nk
lam = y/gap
lam = lam.reshape(-1,p.nk1,p.nk2,p.nwan,p.nwan)
for kit in [20, 70]:
for it1 in range(p.nwan):
for it2 in range(p.nwan):
print(kit, it1,it2, lam[b.f_iwn_zero_ind,kit,kit,it1,it2], file=open(('lam_output_JU_{:.3f}.dat').format(p.JU_ratio),'a'))
print('lam from sum: ',sum(conj(y)*gap), file=open(('lam_output_JU_{:.3f}.dat').format(p.JU_ratio),'a'))
print("##################################################"\
, file=open(p.Logstr,'a'))
print("\n",file=open(p.Logstr,'a'))
| 2.015625
| 2
|