max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
smorest_crud/test/test_crud.py | jetbridge/smorest-crud | 9 | 12771851 | from flask.testing import FlaskClient
from smorest_crud.test.app import USER_NAME
def test_list(client: FlaskClient, pets):
res = client.get("/human")
assert res.status_code == 200
res = res.json
assert len(res) == 10
assert res[0]["pets"][0]
assert res[0]["pets"][0]["genus"]
def test_get(client: FlaskClient, pets, db):
human = pets[0].human
human.name = USER_NAME # for access check
db.session.commit()
res = client.get(f"/human/{human.id}")
assert res.status_code == 200
human = res.json
assert "name" in human
prefetched = client.get(f"/pet").json["loaded"]
assert prefetched["first.human"], "failed to prefetch rel 'human'"
assert prefetched[
"first.human.cars"
], "failed to prefetch secondary relationship 'human' -> 'car'"
def test_post(client: FlaskClient, pets):
res = client.post(f"/pet", json={"species": "Felis"})
assert res.status_code == 200
pet = res.json
assert "edible" in pet
def test_patch(client: FlaskClient, pets):
update = {"species": "Canis"}
res = client.patch(f"/pet/{pets[0].id}", json=update)
assert res.status_code == 200
pet = res.json
assert pet["species"] == "Canis"
def test_delete(client: FlaskClient, pets, db):
res = client.delete(f"/pet/{pets[0].id}")
assert res.status_code == 200
res = client.get(f"/pet/{pets[0].id}")
assert res.status_code == 404
def test_disallowed(client: FlaskClient, pets):
# delete disabled
assert client.delete("/human/2").status_code == 405
# human has get_enabled
assert client.get("/human/2000").status_code == 404
# pointless doesn't allow anything
assert client.post("/pointless").status_code == 405
assert client.patch("/pointless/2").status_code == 405
assert client.get("/pointless/1").status_code == 405
| 2.421875 | 2 |
pymatflow/qe/scripts/qe-bands.py | DeqiTang/pymatflow | 6 | 12771852 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import argparse
from pymatflow.qe.static import StaticRun
"""
usage:
"""
control = {}
system = {}
electrons = {}
bands = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", type=str, default="tmp-qe-static",
help="Directory for the static running.")
parser.add_argument("-f", "--file", type=str,
help="The xyz file containing the structure to be simulated.")
parser.add_argument("--runopt", type=str, default="gen",
choices=["gen", "run", "genrun"],
help="Generate or run or both at the same time.")
parser.add_argument("--auto", type=int, default=3,
help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf")
# -------------------------------------------------------------------
# scf related parameters
# -------------------------------------------------------------------
parser.add_argument("--ecutwfc", type=int, default=100,
help="Kinetic energy cutoff for wave functions in unit of Rydberg, default value: 100 Ry")
parser.add_argument("--ecutrho", type=int, default=None,
help="Kinetic energy cutoff for charge density and potential in unit of Rydberg, default value: None")
parser.add_argument("--kpoints-option", type=str, default="crystal_b",
choices=["automatic", "gamma", "crystal_b"],
help="Kpoints generation scheme option for band calculation")
parser.add_argument("--kpoints-mp", type=str, nargs="+",
default=[1, 1, 1, 0, 0, 0],
help="Monkhorst-Pack kpoint grid, in format like --kpoints-mp 1 1 1 0 0 0")
parser.add_argument("--crystal-b", type=str, nargs="+", default=None,
help="manual input kpath in crystal_b, like --crystal-b '0.000000 0.000000 0.000000 GAMMA 5' '0.500000 0.000000 0.000000 X 5' '0.0000 0.000 0.50000 A |' '0.5 0.5 0.5 R '")
parser.add_argument("--crystal-b-file", type=str, default='kpath-from-seekpath.txt',
help="manual input kpath in crystal_b read from the file")
parser.add_argument("--conv-thr", type=float, default=1.0e-6,
help="Convergence threshold for SCF calculation.")
parser.add_argument("--occupations", type=str, default="smearing",
choices=["smearing", "tetrahedra", "tetrahedra_lin", "tetrahedra_opt", "fixed", "from_input"],
help="Occupation method for the calculation.")
parser.add_argument("--smearing", type=str, default="gaussian",
choices=["gaussian", "methfessel-paxton", "marzari-vanderbilt", "fermi-dirac"],
help="Smearing type for occupations by smearing, default is gaussian in this script")
parser.add_argument("--degauss", type=float, default=0.001,
help="Value of the gaussian spreading (Ry) for brillouin-zone integration in metals.(defualt: 0.001 Ry)")
parser.add_argument("--vdw-corr", type=str, default="none",
choices=["dft-d", "dft-d3", "ts", "xdm"],
help="Type of Van der Waals correction in the calculation")
parser.add_argument("--nbnd", type=int, default=None,
help="Number of electronic states (bands) to be calculated")
# -----------------------------------------
# bands.x related parameters
# -----------------------------------------
parser.add_argument("--lsym", type=str, default=".true.",
choices=[".true.", ".false."],
help="set lsym variable in bands.x input.")
# -----------------------------------------------------------------
# run params
# -----------------------------------------------------------------
parser.add_argument("--mpi", type=str, default="",
help="MPI command: like 'mpirun -np 4'")
parser.add_argument("--server", type=str, default="pbs",
choices=["pbs", "yh"],
help="type of remote server, can be pbs or yh")
parser.add_argument("--jobname", type=str, default="qe-band-structure",
help="jobname on the pbs server")
parser.add_argument("--nodes", type=int, default=1,
help="Nodes used in server")
parser.add_argument("--ppn", type=int, default=32,
help="ppn of the server")
# ==========================================================
# transfer parameters from the arg parser to opt_run setting
# ==========================================================
args = parser.parse_args()
system["occupations"] = args.occupations
system["smearing"] = args.smearing
system["degauss"] = args.degauss
bands["lsym"] = args.lsym
# --------------------------------------------------------------
# process crystal_b
if args.crystal_b != None:
# crystal_b from script argument args.crystal_b
crystal_b = []
for kpoint in args.crystal_b:
if kpoint.split()[4] != "|":
crystal_b.append([
float(kpoint.split()[0]),
float(kpoint.split()[1]),
float(kpoint.split()[2]),
kpoint.split()[3].upper(),
int(kpoint.split()[4]),
])
elif kpoint.split()[4] == "|":
crystal_b.append([
float(kpoint.split()[0]),
float(kpoint.split()[1]),
float(kpoint.split()[2]),
kpoint.split()[3].upper(),
"|",
])
elif args.crystal_b == None:
# crystal_b read from file specified by args.crystal_b_file
# file is in format like this
"""
5
0.0 0.0 0.0 #GAMMA 15
x.x x.x x.x #XXX |
x.x x.x x.x #XXX 10
x.x x.x x.x #XXX 15
x.x x.x x.x #XXX 20
"""
# if there is a '|' behind the label it means the path is
# broken after that point!!!
crystal_b = []
with open(args.crystal_b_file, 'r') as fin:
crystal_b_file = fin.readlines()
nk = int(crystal_b_file[0])
for i in range(nk):
if crystal_b_file[i+1].split("\n")[0].split()[4] != "|":
crystal_b.append([
float(crystal_b_file[i+1].split()[0]),
float(crystal_b_file[i+1].split()[1]),
float(crystal_b_file[i+1].split()[2]),
crystal_b_file[i+1].split()[3].split("#")[1].upper(),
int(crystal_b_file[i+1].split()[4]),
])
elif crystal_b_file[i+1].split("\n")[0].split()[4] == "|":
crystal_b.append([
float(crystal_b_file[i+1].split()[0]),
float(crystal_b_file[i+1].split()[1]),
float(crystal_b_file[i+1].split()[2]),
crystal_b_file[i+1].split()[3].split("#")[1].upper(),
'|',
])
else:
pass
# --------------------------------------------------------------------
task = StaticRun()
task.get_xyz(args.file)
task.set_kpoints(kpoints_option=args.kpoints_option, kpoints_mp=args.kpoints_mp, crystal_b=crystal_b)
task.set_params(control=control, system=system, electrons=electrons)
task.set_bands(bands_input=bands)
task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn)
task.bands(directory=args.directory, runopt=args.runopt, auto=args.auto)
| 2.421875 | 2 |
harvester/harvester_classifier.py | diogonal/SentimentAnalyser | 1 | 12771853 | <gh_stars>1-10
#########################################################################################################
#
# Author: <NAME>
# Date: Apr/2015
# Name: harvester_classifier.py
# Description: Performs Sentiment, geo location and gender analysis for streamed tweets as they come.
# Logs will be written in a file for each quadrant defined in the settings i.e. log_harvester_TEST_1.txt
#
# Execution: python harvester_classifier.py 1
# Output: log_harvester_TEST_1.txt
#
#########################################################################################################
import json #json docs
#Twitter Streaming API connection
from tweepy.streaming import StreamListener
from tweepy import Stream, OAuthHandler
import couchdb #couchdb connection
import settings #settings defining quadrants, API keys and tokens
import emailer #emailing services
import time #record date and time
import atexit #catch termination
import random #generate random process ID
import sys #sys
from signal import signal, SIGTERM #detect termination by the system
from sys import exit
import tweet_classifier.classifier as classifier #Sentiment Classifier
from genderizer.genderizer import Genderizer #Gender classifier
proc_id = int(random.random() * 1000)
quadrant = str(sys.argv[1]) #get the quadrant argument from command line
settings.defineQuadrant(quadrant) #Assign corresponding quadrant to this process
#Streaming Listener
class listener(StreamListener):
tweet_count = 0
processed_tweets = 0
ignored_tweets = 0
def on_data(self, data):
#Load Json from Twitter API
tweet = json.loads(data)
try:
tweet["_id"] = str(tweet['id']) #Get the ID
lang = tweet['lang']
name = tweet['user']['name']
#Gender Analysis:
name_list = name.split()
name = name_list[0]
gender = Genderizer.detect(firstName = name)
tweet['user']['gender'] = gender
#Sentiment Analysis
analysed_result = classifier.doSentimentAnalysis(str(tweet['text']))
if str(lang) == 'en': #only analyse english texts
if not hasAlreadySentiment(tweet):
tweet = updateSentimentDoc(tweet,analysed_result["sentiment"],analysed_result["polarity"],analysed_result["subjectivity"])
self.processed_tweets += 1
else:
self.ignored_tweets += 1
else: #otherwise ignore it!
self.ignored_tweets += 1
#Update place coordinates to work with GeoJson
tweet = updatePlaceDoc(tweet)
doc = db.save(tweet) #Save tweet into CouchDB
# print("Obtained Tweet ID: " + str(tweet['id']))
self.tweet_count += 1
if (self.tweet_count%10000 == 0):
#Notify when 10000 new tweets have been stored on database
msg_update = '10K new tweets on database: ' + settings.database
emailer.sendEmail(message=str(msg_update))
except:
writeLog("Twitter API error")
pass
return True
def on_error(self, status):
writeLog("Error during streaming"+str(status))
sys.exit()
def writeLog(msg):
file_name = "log_harvester_" + settings.location + "_" + settings.region_quadrant + ".dat"
with open(file_name, "a") as myfile:
myfile.write(msg + "\n")
myfile.close()
#Verify if tweet has already sentiment analysis field
def hasAlreadySentiment(doc):
try:
obj = doc["sentiment_analysis"]
except KeyError:
return False
return True
#Update Document with new fields (sentiment results)
def updateSentimentDoc(doc,sentiment,polarity,subjectivity):
doc["sentiment_analysis"] = json.loads('{"sentiment": "%s","polarity": %.2f,"subjectivity": %.2f}' % (sentiment,polarity,subjectivity))
return doc
#Update the Palce field of the tweet when it is populated in order to follow GeoJson format for polygons
def updatePlaceDoc(doc):
if doc["place"] is not None:
place_coordinates = doc["place"]["bounding_box"]["coordinates"][0]
tmp_coordinates = []
if place_coordinates[0] != place_coordinates[1]:
tmp_coordinates.insert(0,place_coordinates[0])
tmp_coordinates.insert(1,place_coordinates[1])
tmp_coordinates.insert(2,place_coordinates[2])
tmp_coordinates.insert(3,place_coordinates[3])
tmp_coordinates.insert(4,place_coordinates[0])
doc["place"]["bounding_box"]["coordinates"][0] = tmp_coordinates
else:
doc["place"]["bounding_box"] = None
return doc
#Instance Listener object
listnerTweet = listener()
#Handler function to perform in case of program interruption
def exit_handler():
error_msg = 'Server has interrupted harvesting process. \n'
error_msg = error_msg + 'Process terminated at: ' + time.strftime('%c') + '\n'
error_msg = error_msg + 'Location: ' + settings.location + ', Quadrant: ' + settings.region_quadrant + '\n'
error_msg = error_msg + 'VM: ' + settings.vm_ip + '\n'
error_msg = error_msg + 'Process Id: ' + str(proc_id) + '\n'
error_msg = error_msg + 'Total tweets received: %d' % listnerTweet.tweet_count
writeLog(error_msg)
emailer.sendEmail(message=str(error_msg))
writeLog("--------------------------------------")
#Starting process
writeLog("--------------------------------------")
writeLog("Starting streaming process...")
atexit.register(exit_handler)
signal(SIGTERM, exit_handler)
#API authentication
auth = OAuthHandler(settings.consumer_key,settings.consumer_secret)
auth.set_access_token(settings.access_token,settings.access_secret)
twitterStream = Stream(auth, listnerTweet)
server = couchdb.Server(settings.server)
server.resource.credentials = (settings.admin_user, settings.admin_pass)
try:
#Create DB if does not exist
db = server.create(settings.database)
writeLog("Database: " + settings.database + " doesn't exist. Proceeding with creation...")
except:
#Just use existing DB
db = server[settings.database]
notice_msg = 'Server has initiated harvesting process \n'
notice_msg = notice_msg + 'Process initiated at: ' + time.strftime('%c') + '\n'
notice_msg = notice_msg + 'Location: ' + settings.location + ', Quadrant: ' + settings.region_quadrant + '\n'
notice_msg = notice_msg + 'Process Id: ' + str(proc_id) + '\n'
notice_msg = notice_msg + 'Server: ' + settings.server + '\n'
notice_msg = notice_msg + 'Database: ' + settings.database + '\n'
notice_msg = notice_msg + 'VM: ' + settings.vm_ip + '\n'
writeLog(notice_msg)
emailer.sendEmail(message=str(notice_msg))
#Streams not terminate unless the connection is closed, blocking the thread.
#Tweepy offers a convenient async parameter on filter so the stream will run on a new thread.
twitterStream.filter(locations = settings.locations) | 2.46875 | 2 |
day4/part1.py | rockybulwinkle/advent-of-code-2020 | 0 | 12771854 | <filename>day4/part1.py
import sys
import common
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} file")
passports = common.load_passports(sys.argv[1])
count_p1 = 0
count_p2 = 0
for idx,i in enumerate(passports):
if i.valid_p1():
count_p1+=1
valid = i.valid_p2()
if valid:
count_p2+=1
print(idx, valid, i)
print()
print(count_p1)
print(count_p2)
| 3.0625 | 3 |
python/GafferImageUI/ResampleUI.py | ddesmond/gaffer | 561 | 12771855 | <gh_stars>100-1000
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import itertools
import Gaffer
import GafferImage
Gaffer.Metadata.registerNode(
GafferImage.Resample,
"description",
"""
Utility node used internally within GafferImage, but
not intended to be used directly by end users.
""",
plugs = {
"matrix" : [
"description",
"""
The transform to be applied to the input image.
This must contain only translation and scaling.
""",
],
"filter" : [
"description",
"""
The filter used to perform the resampling. The name
of any OIIO filter may be specified. The default automatically
picks an appropriate high-quality filter based on whether
or not the image is being enlarged or reduced.
""",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"preset:Default", "",
] + list( itertools.chain(
*[ ( "preset:" + x.title(), x ) for x in GafferImage.FilterAlgo.filterNames() ]
) ),
"filterScale" : [
"description",
"""
A multiplier for the scale of the filter used. Scaling up gives a softer
result, scaling down gives a sharper result ( likely to alias or even create black
patches where no pixels can be found ). Less than 1 is not recommended unless
you have a special technical reason.
""",
],
"boundingMode" : [
"description",
"""
The method used when a filter references pixels outside the
input data window.
""",
"preset:Black", GafferImage.Sampler.BoundingMode.Black,
"preset:Clamp", GafferImage.Sampler.BoundingMode.Clamp,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"expandDataWindow" : [
"description",
"""
Expands the data window by the filter radius, to include the
external pixels affected by the filter.
""",
],
"debug" : [
"description",
"""
Enables debug output. The HorizontalPass setting outputs
an intermediate image filtered just in the horizontal
direction - this is an internal optimisation used when
filtering with a separable filter. The SinglePass setting
forces all filtering to be done in a single pass (as if
the filter was non-separable) and can be used for validating
the results of the the two-pass (default) approach.
""",
"preset:Off", GafferImage.Resample.Debug.Off,
"preset:HorizontalPass", GafferImage.Resample.Debug.HorizontalPass,
"preset:SinglePass", GafferImage.Resample.Debug.SinglePass,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
}
)
| 0.878906 | 1 |
config/default.py | sartography/cr-connect-workflow | 2 | 12771856 | <reponame>sartography/cr-connect-workflow
import os
import re
from os import environ
basedir = os.path.abspath(os.path.dirname(__file__))
JSON_SORT_KEYS = False # CRITICAL. Do not sort the data when returning values to the front end.
# The API_TOKEN is used to ensure that the
# workflow synch can work without a lot of
# back and forth.
# you may want to change this to something simple for testing!!
# NB, if you change this in the local endpoint,
# it needs to be changed in the remote endpoint as well
API_TOKEN = environ.get('API_TOKEN', default = '<KEY>')
NAME = "CR Connect Workflow"
SERVER_NAME = environ.get('SERVER_NAME', default="localhost:5000")
DEFAULT_PORT = "5000"
FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default=DEFAULT_PORT)
FRONTEND = environ.get('FRONTEND', default="localhost:4200")
BPMN = environ.get('BPMN', default="localhost:5002")
CORS_DEFAULT = f'{FRONTEND}, {BPMN}'
CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default=CORS_DEFAULT))
TESTING = environ.get('TESTING', default="false") == "true"
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true")
TEST_UID = environ.get('TEST_UID', default="dhf8r")
ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah3us,cl3wf"))
# Sentry flag
ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true" # To be removed soon
SENTRY_ENVIRONMENT = environ.get('SENTRY_ENVIRONMENT', None)
# Add trailing slash to base path
APPLICATION_ROOT = re.sub(r'//', '/', '/%s/' % environ.get('APPLICATION_ROOT', default="/").strip('/'))
DB_HOST = environ.get('DB_HOST', default="localhost")
DB_PORT = environ.get('DB_PORT', default="5432")
DB_NAME = environ.get('DB_NAME', default="crc_dev")
DB_USER = environ.get('DB_USER', default="crc_user")
DB_PASSWORD = environ.get('DB_PASSWORD', default="<PASSWORD>")
SQLALCHEMY_ENGINE_OPTIONS = {
"pool_pre_ping": True,
"pool_recycle": 300,
}
SQLALCHEMY_POOL_SIZE = 10
SQLALCHEMY_MAX_OVERFLOW = 20
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = environ.get(
'SQLALCHEMY_DATABASE_URI',
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
)
TOKEN_AUTH_TTL_HOURS = float(environ.get('TOKEN_AUTH_TTL_HOURS', default=24))
SECRET_KEY = environ.get('SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
# %s/%i placeholders expected for uva_id and study_id in various calls.
PB_ENABLED = environ.get('PB_ENABLED', default="false") == "true"
PB_BASE_URL = environ.get('PB_BASE_URL', default="http://localhost:5001/v2.0/").strip('/') + '/' # Trailing slash required
PB_USER_STUDIES_URL = environ.get('PB_USER_STUDIES_URL', default=PB_BASE_URL + "user_studies?uva_id=%s")
PB_INVESTIGATORS_URL = environ.get('PB_INVESTIGATORS_URL', default=PB_BASE_URL + "investigators?studyid=%i")
PB_REQUIRED_DOCS_URL = environ.get('PB_REQUIRED_DOCS_URL', default=PB_BASE_URL + "required_docs?studyid=%i")
PB_STUDY_DETAILS_URL = environ.get('PB_STUDY_DETAILS_URL', default=PB_BASE_URL + "study?studyid=%i")
PB_SPONSORS_URL = environ.get('PB_SPONSORS_URL', default=PB_BASE_URL + "sponsors?studyid=%i")
PB_IRB_INFO_URL = environ.get('PB_IRB_INFO_URL', default=PB_BASE_URL + "current_irb_info/%i")
PB_CHECK_STUDY_URL = environ.get('PB_CHECK_STUDY_URL', default=PB_BASE_URL + "check_study/%i")
# Ldap Configuration
LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No trailing slash or http://
LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1))
LDAP_USER = environ.get('LDAP_USER', default='')
LDAP_PASS = environ.get('LDAP_PASS', default='')
# Github settings
GITHUB_TOKEN = environ.get('GITHUB_TOKEN', None)
GITHUB_REPO = environ.get('GITHUB_REPO', None)
TARGET_BRANCH = environ.get('TARGET_BRANCH', None)
# Email configuration
DEFAULT_SENDER = '<EMAIL>'
FALLBACK_EMAILS = ['<EMAIL>', '<EMAIL>']
MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True)
MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io')
MAIL_PORT = environ.get('MAIL_PORT', default=2525)
MAIL_USE_SSL = environ.get('MAIL_USE_SSL', default=False)
MAIL_USE_TLS = environ.get('MAIL_USE_TLS', default=False)
MAIL_USERNAME = environ.get('MAIL_USERNAME', default='')
MAIL_PASSWORD = environ.get('MAIL_PASSWORD', default='')
| 2.09375 | 2 |
tests/test_aio_trampoline.py | suned/pfun | 126 | 12771857 | import pytest
from hypothesis import assume, given
from pfun import compose, identity
from pfun.aio_trampoline import Done
from pfun.hypothesis_strategies import aio_trampolines, anything, unaries
from .monad_test import MonadTest
class TestTrampoline(MonadTest):
@pytest.mark.asyncio
@given(aio_trampolines(anything()))
async def test_right_identity_law(self, trampoline):
assert (await
trampoline.and_then(Done).run()) == (await trampoline.run())
@pytest.mark.asyncio
@given(anything(), unaries(aio_trampolines(anything())))
async def test_left_identity_law(self, value, f):
assert (await Done(value).and_then(f).run()) == (await f(value).run())
@pytest.mark.asyncio
@given(
aio_trampolines(anything()),
unaries(aio_trampolines(anything())),
unaries(aio_trampolines(anything()))
)
async def test_associativity_law(self, trampoline, f, g):
assert (await trampoline.and_then(f).and_then(g).run(
)) == (await trampoline.and_then(lambda x: f(x).and_then(g)).run())
@given(anything())
def test_equality(self, value):
assert Done(value) == Done(value)
@given(anything(), anything())
def test_inequality(self, first, second):
assume(first != second)
assert Done(first) != Done(second)
@pytest.mark.asyncio
@given(anything())
async def test_identity_law(self, value):
assert (await
Done(value).map(identity).run()) == (await Done(value).run())
@pytest.mark.asyncio
@given(unaries(anything()), unaries(anything()), anything())
async def test_composition_law(self, f, g, value):
h = compose(f, g)
assert (await Done(value).map(g).map(f).run()
) == (await Done(value).map(h).run())
| 2.359375 | 2 |
Cogs/administration.py | CptSnuffy/Research-Bot | 0 | 12771858 | import os
from datetime import datetime
from distutils.util import strtobool
import discord
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
LOCKDOWN_ACTIVE = bool(os.getenv('LOCKDOWN_ACTIVE'))
class Administration(commands.Cog):
@commands.command()
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member : discord.Member, *, reason=None):
action = 'ban'
await member.ban(reason=reason)
await ctx.send(f'{member} has been banned.')
await self.log_action(action)
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member):
action = 'unban'
banned_users = await ctx.guid.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if(user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'{user} has been unbanned.')
await self.log_action(action)
@commands.command()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member : discord.Member, *, reason=None):
action = 'kick'
await member.kick(reason=reason)
await ctx.send(f'{member} has been kicked.')
await self.log_action(action)
@commands.command(aliases=['clean','cls','sweep','purge'])
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount=None):
action = 'clear'
amount = int(amount)
await ctx.channel.purge(limit=amount+1)
await self.log_action(action)
def log_action(self, action):
with open('.\Data\AdminLogs.txt', 'a') as file:
file.write(f'{action} occured at {datetime.now()}\n')
file.close()
@commands.command()
async def whois(self, ctx, member: discord.Member = None):
if not member:
member = ctx.message.author
roles = [role for role in member.roles]
embed = discord.Embed(colour=ctx.author.color, timestamp=ctx.message.created_at,
title=str(member))
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=f"Requested by {ctx.author}")
embed.add_field(name="Display Name:", value=member.display_name)
embed.add_field(name="ID:", value=member.id)
embed.add_field(name="Created Account On:", value=member.created_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name="Joined Server On:", value=member.joined_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name="Roles:", value="".join([role.mention for role in roles[1:]]))
embed.add_field(name="Highest Role:", value=member.top_role.mention)
await ctx.send(embed=embed)
#DEBUG WIP LISTENER UNCOMMENT AND SETUP ENVIRONMENT TO LOCKDOWN SERVER
# @commands.Cog.listener()
# async def on_member_join(self, member):
# if LOCKDOWN_ACTIVE == True:
# print(LOCKDOWN_ACTIVE)
# await member.send('Server is still a WIP please try again later')
# await member.kick(reason='Server is still a WIP please try again later')
# print('member kicked')
# else:
# print(f'member not kicked lockdown = {LOCKDOWN_ACTIVE}')
def setup(bot):
bot.add_cog(Administration(bot))
| 2.328125 | 2 |
tests/test_sklearn_normalizer_converter.py | twosense/sklearn-onnx | 323 | 12771859 | # SPDX-License-Identifier: Apache-2.0
"""
Tests scikit-normalizer converter.
"""
import unittest
import numpy
from sklearn.preprocessing import Normalizer
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import (
Int64TensorType, FloatTensorType, DoubleTensorType)
from test_utils import dump_data_and_model, TARGET_OPSET
class TestSklearnNormalizerConverter(unittest.TestCase):
def test_model_normalizer(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", Int64TensorType([None, 1]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
def test_model_normalizer_blackop(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET,
black_op={"Normalizer"})
self.assertNotIn('op_type: "Normalizer', str(model_onnx))
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1BlackOp-SkipDim1")
def test_model_normalizer_float_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1-SkipDim1")
def test_model_normalizer_float_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2-SkipDim1")
def test_model_normalizer_double_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL1Double-SkipDim1")
def test_model_normalizer_double_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL2Double-SkipDim1")
def test_model_normalizer_float_noshape(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2NoShape-SkipDim1")
if __name__ == "__main__":
unittest.main()
| 2.5 | 2 |
nqueens.py | aliyaconrad/nqueens | 0 | 12771860 | import random as r
def solve(board_size):
"""Solves the n-queens problem given a board size n
Args:
board_size (int): The size of the n*n board containing n queens.
Returns:
board (int[]): A list where each element corresponds to the row of a queen. It is 1-based.
"""
# Declares the max steps for the minConflicts algorithm.
maxSteps = 20
# Loop until a solution is found.
while True:
# the board is initialized and returned along with a list of conflicting queen positions
board, conflictList = initializeBoard(board_size)
# The range here is the max_steps from the min-conflicts algorithm shown in the PDF.
for i in range(maxSteps):
# Checks to see if the current board is a solution.
if solution(board, board_size):
# if the current board is a solution, then it is returned
return board
# If the current board is not a solution, then a random conflicting Queen is chosen.
var = r.choice(conflictList)
''' The minConflicts algorithm runs and returns the new board. It also takes in var as a parameters and
returns a boolean (conflicting) that determines whether or not var still has any conflicts.'''
board, conflicting = minConflicts(board, board_size, var)
# if var does not have any existing conflicts...
if not conflicting:
#...then clearly var should be removed from the conflict list
conflictList.remove(var)
def initializeBoard(boardSize):
"""Initializes the representation of the chess board.
Args:
boardSize (int): The size of the n*n board
Returns:
board (int[]): The representation of the chess board
conflictList (int[(int, int)]): A list of tuples of type (int, int),
this gets passed on to the minConflicts() function.
"""
# board list is initialized
board = []
# The list of Queens that conflict with each other.
conflictList = []
integerList = list(range(1, boardSize + 1))
integerList2 = list(range(boardSize))
# variable to represent half the size of the board (if the size is odd, it takes the floor as it should)
halfSize = int(boardSize / 2)
"""
The general idea is reducing the problem to a knight's problem. Two knights could take over each other on a 3*2 or
2*3 board on the corner, if we switch the knights to queens, it will show that the queens are not conflicting with
each other on row/column/diagonal. The purpose of this part of algorithm is to repeat this process until the board
has enough queens. The situation will change according to the size of the board, each branch of the if statement
shows a different case.
"""
if boardSize % 6 == 2:
board = [0] * (boardSize)
for i in range(1, halfSize + 1):
index1 = (2 * (i - 1) + halfSize - 1) % boardSize
index2 = boardSize - (index1 + 1)
board[index1] = i
board[index2] = boardSize + 1 - i
elif (boardSize - 1) % 6 == 2:
board = [0] * (boardSize)
for i in range(1, halfSize + 1):
index1 = (2 * (i - 1) + halfSize - 1) % (boardSize - 1)
index2 = boardSize - (index1 + 2)
board[index1] = i
board[index2] = boardSize - i
board[boardSize - 1] = boardSize
else:
for i in range(1, halfSize + 1):
board.append(halfSize + i)
board.append(i)
if boardSize % 2 == 1:
board.append(boardSize)
"""
Randomly picks x Queens to shuffle, creating conflicts.
This shows that our algorithm works, and it works well.
The higher the value of x, the more our algorithm has to work.
We decided to let x = 8 in honour of "the eight queens problem"
"""
for i in range(8):
randomInt = r.choice(integerList)
randomIndex = r.choice(integerList2)
board[randomIndex] = randomInt
conflictList.append((randomInt, randomIndex))
# the board and conflict list are returned
return board, conflictList
def minConflicts(board, boardSize, var):
"""Checks to see if a Queen is conflicting with any other Queens on the board.
Args:
board (int[]) : The representation of our chess board.
boardSize (int) : The size of the n*n chess board.
var ((int,int)) : An element of the conflictList list that initializeBoard() returns.
Returns:
board (int[]) : The representation of our chess board.
conflicting (bool) : Whether the Queen is conflicting with another piece.
"""
# we start out by assuming that the queen in question has conflicts
conflicting = True
# Initializes new lists for conflict detection.
counterRow = [0] * (boardSize + 1)
counterDiagonal1 = [0] * (2 * boardSize + 1)
counterDiagonal2 = [0] * (2 * boardSize + 1)
# The number of conflicts on rows/diagonals are counted.
for i in range(boardSize):
counterRow[board[i]] += 1
counterDiagonal1[board[i] - i + boardSize] += 1
counterDiagonal2[board[i] + i] += 1
# variable initializations
minimalConflictor = boardSize
minimalRow = 0
# Loops through the board to see which queen has the least number of conflicts and what the corresponding row is.
for i in range(1, boardSize + 1):
currentConflictor = counterRow[i]
currentConflictor += counterDiagonal1[i - var[1] + boardSize]
currentConflictor += counterDiagonal2[i + var[1]]
if (currentConflictor < minimalConflictor):
minimalConflictor = currentConflictor
minimalRow = i
# Moves the Queen to the row with minimal conflicts.
board[var[1]] = minimalRow
# Checks to see if there is still a conflict after the move...
if minimalConflictor == 0:
# and if there is not, then that means this queen has no conflicts
conflicting = False
#the board and conflicting boolean are each returned
return board, conflicting
def solution(board, boardSize):
"""Checks to see if the board is a solution.
Args:
board (int[]) : The representation of the chess board.
boardSize (int) : The size of the n*n chess board.
"""
# If there is no board, no solution.
if not board:
return False
"""
The set() function removes duplicates (turns a list into a set).
For a board to be a solution, there needs to be exactly one Queen on every column.
So if the length of the board is the same as the length of the set of the board, that means all elements are unique.
And if all elements are unique, then there must be exactly one Queen on every row.
"""
if len(board) != len(set(board)):
return False
# list declarations
diagonal1 = []
diagonal2 = []
# The hills & dales of each Queen are calculated.
for i in range(0, boardSize):
diagonal1.append(board[i] + i)
diagonal2.append(board[i] - i)
# The diagonals are checked the same way that the rows are checked
if len(diagonal1) != len(set(diagonal1)) or len(diagonal2) != len(set(diagonal2)):
return False
# The solution works if it passed all the previous requirements
return True
| 4.28125 | 4 |
plusseg/modeling/segmentor/generalized_segmentor.py | tonysy/SegmentationToolbox.PyTorch | 13 | 12771861 | <reponame>tonysy/SegmentationToolbox.PyTorch
"""
Implement the Generalized Segmentation Model
"""
import torch
from torch import nn
# from plusseg.structures
from ..backbone import build_backbone
from ..decoder import build_decoder
from ..postprocessor import build_post_processor
from ..decoder import SegmentationLossComputation
class GeneralizeSegmentor(nn.Module):
"""
Main class for Generalized Segmentation Model. Current support semantic segmentation
It consits of three main parts:
- backbone
- decoder
- post_processor
"""
def __init__(self, cfg):
super(GeneralizeSegmentor, self).__init__()
self.backbone = build_backbone(cfg)
self.decoder = build_decoder(cfg)
# self.postprocessor = build_post_processor(cfg)
self.loss_calculator = SegmentationLossComputation(
aux_factor = cfg.MODEL.DECODER.AUX_FACTOR,
ignore_index = cfg.MODEL.DECODER.IGNORE_INDEX
)
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[Tenosr]): ground-truth mask for this image(optional)
Returns:
results (list[Tensor]): the output from the model
"""
if self.training and targets is None:
raise ValueError("In the training time, targets should be passed")
# images = to_image_list(images)
features = self.backbone(images)
# imsize = [images.size()[-2], images.size()[-1]]
final_out = self.decoder(features, images.size()[2:])
# if self.postprocessor:
# final_out = self.postprocessor(decoder_out)
# else:
# final_out = decoder_out
if self.training:
seg_loss = self.loss_calculator(final_out, targets)
return final_out, dict(seg_loss=seg_loss)
return final_out | 2.71875 | 3 |
python/dcp027_balanced_parenthesis.py | iapurba/Daily-Coding-Problem | 1 | 12771862 | #!/usr/bin/env python3
def isBalanced(s: str) -> bool:
stack = []
pair = {'(': ')', '{': '}', '[': ']'}
for ch in s:
# For left brackets push right brackets
if (ch in pair.keys()):
stack.append(pair[ch])
else:
# Unmatch right char
if len(stack) == 0:
return False
# Ensure a match
if (ch != stack[-1]):
return False
# Good match, pop
stack.pop()
return len(stack) == 0
if __name__ == "__main__":
print(isBalanced("((){[]})"))
print(isBalanced(""))
print(isBalanced("[[["))
| 3.875 | 4 |
app.py | CalvinGreen94/Heroku_Heart_Attack | 0 | 12771863 | import numpy as np
from flask import Flask, session,abort,request, jsonify, render_template,redirect,url_for,flash
import pickle
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from keras.models import load_model
import os
import stripe
import datetime
import keras
from keras import optimizers
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dropout, Activation
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/heartAttack',methods=['POST'])
def heartAttack():
model = load_model('models/heart_disease_model.h5')
int_features = [[int(x) for x in request.form.values()]]
final_features = [np.array(int_features)]
prediction_proba = model.predict(final_features)
prediction = (prediction_proba > 0.5)
return render_template('index.html', prediction_text='THANK YOU FOR YOUR PURCHASE, \n FOR THE DATA YOU ENTERED \n IT IS PREDICTED {} \n THAT THE PATIENT WILL HAVE A STROKE WITHIN \n THE NEXT 10 YEARS.'.format(prediction))
if __name__ == "__main__":
app.run(debug=True, port=8080) #debug=True,host="0.0.0.0",port=50000
| 2.875 | 3 |
tests/test_block_observation.py | knifecake/leapdna-python | 0 | 12771864 | <reponame>knifecake/leapdna-python
from leapdna.errors import LeapdnaError
from leapdna.blocks.locus import Locus
from leapdna.blocks.allele import Allele
from .support import TestCase
from leapdna.blocks import Observation
class TestObservationBlock(TestCase):
def test_can_be_instantiated(self):
l1 = Locus('l1')
a1 = Allele('a1', l1)
obs = Observation(a1, count=2, frequency=0.25)
self.assertTrue(obs is not None)
self.assertTrue('bservation' in str(obs)) | 2.671875 | 3 |
tests/base_tests/expression_tests/test_truediv.py | lycantropos/symba | 2 | 12771865 | from numbers import Real
from typing import Union
from hypothesis import given
from symba.base import Expression
from tests.utils import equivalence
from . import strategies
@given(strategies.expressions, strategies.non_zero_reals_or_expressions)
def test_basic(expression: Expression,
real_or_expression: Union[Real, Expression]) -> None:
result = expression / real_or_expression
assert isinstance(result, Expression)
@given(strategies.finite_non_zero_expressions)
def test_self_inverse(expression: Expression) -> None:
assert expression / expression == 1
@given(strategies.finite_non_zero_expressions,
strategies.finite_non_zero_expressions)
def test_commutative_case(first: Expression, second: Expression) -> None:
assert equivalence(first / second == second / first,
abs(first) == abs(second))
@given(strategies.definite_expressions, strategies.unary_reals_or_expressions)
def test_right_neutral_element(expression: Expression,
real_or_expression: Union[Real, Expression]
) -> None:
assert expression / real_or_expression == expression
@given(strategies.finite_expressions, strategies.finite_expressions,
strategies.definite_non_zero_reals_or_expressions)
def test_add_dividend(first: Expression,
second: Expression,
real_or_expression: Expression) -> None:
result = (first + second) / real_or_expression
assert result == ((first / real_or_expression)
+ (second / real_or_expression))
@given(strategies.finite_expressions, strategies.finite_expressions,
strategies.definite_non_zero_reals_or_expressions)
def test_sub_dividend(first: Expression,
second: Expression,
real_or_expression: Expression) -> None:
result = (first - second) / real_or_expression
assert result == ((first / real_or_expression)
- (second / real_or_expression))
@given(strategies.finite_expressions,
strategies.definite_non_zero_reals_or_expressions,
strategies.definite_non_zero_reals_or_expressions)
def test_mul_divisor(expression: Expression,
first_real_or_expression: Union[Real, Expression],
second_real_or_expression: Union[Real, Expression]
) -> None:
result = expression / (first_real_or_expression
* second_real_or_expression)
assert result == ((expression / first_real_or_expression)
/ second_real_or_expression)
| 2.859375 | 3 |
oidc_auth/tests/test_views.py | intelie/django-oidc-auth | 25 | 12771866 | <filename>oidc_auth/tests/test_views.py
from urllib.parse import urlparse, parse_qs
from django.conf import settings
from django.contrib.auth import get_user_model
from importlib import import_module
from django.test import Client
from nose import tools
import mock
from .utils import OIDCTestCase
from oidc_auth.models import OpenIDProvider, Nonce
from oidc_auth.settings import oidc_settings
UserModel = get_user_model()
class TestAuthorizationPhase(OIDCTestCase):
def setUp(self):
super(TestAuthorizationPhase, self).setUp()
self.client = Client()
def tearDown(self):
OpenIDProvider.objects.all().delete()
def test_get_login(self):
with oidc_settings.override(DEFAULT_PROVIDER={}):
response = self.client.get('/oidc/login/')
tools.assert_equal(response.status_code, 200)
tools.assert_true(any(t.name == 'oidc/login.html' for t in response.templates))
@mock.patch('requests.get')
def test_post_login(self, get_mock):
get_mock.return_value = self.response_mock
with oidc_settings.override(DEFAULT_PROVIDER=self.configs):
response = self.client.post('/oidc/login/', data={
'issuer': 'http://example.it'
})
tools.assert_equal(response.status_code, 302)
redirect_url = urlparse(response['Location'])
tools.assert_equal('http://example.it', '%s://%s' % (redirect_url.scheme, redirect_url.hostname))
params = parse_qs(redirect_url.query)
tools.assert_equal(set(params.keys()),
{'response_type', 'scope', 'client_id', 'state'})
def test_login_complete_without_oidc_session(self):
response = self.client.get('/oidc/complete') # without oidc_state in session *on purpose*
tools.assert_equal(response.status_code, 301)
@mock.patch('requests.get')
def test_login_default_provider(self, get_mock):
configs = dict(self.configs,
authorization_endpoint='http://default.example.it/authorize')
get_mock.return_value.status_code = 200
get_mock.return_value.json.return_value = configs
with oidc_settings.override(DEFAULT_PROVIDER=configs):
response = self.client.get('/oidc/login/')
tools.assert_equal(response.status_code, 302)
redirect_url = urlparse(response['Location'])
tools.assert_equal('default.example.it', redirect_url.hostname)
class TestTokenExchangePhase(OIDCTestCase):
def setUp(self):
super(TestTokenExchangePhase, self).setUp()
self.client = Client()
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
def test_invalid_request(self):
session = self.client.session
session['oidc_state'] = 'foobar'
session.save()
tools.assert_equal(400, self.client.post('/oidc/complete/').status_code)
tools.assert_equal(400, self.client.post('/oidc/complete/', data={
'code': '12345'}).status_code)
tools.assert_equal(400, self.client.post('/oidc/complete/', data={
'state': '12345'}).status_code)
@mock.patch('requests.post')
def test_post_token_endpoint(self, post_mock):
response = mock.MagicMock()
response.status_code = 200
response.json.return_value = {
'access_token': '12345',
'refresh_token': '12345',
'expires_in': 3600,
'token_type': 'Bearer',
'id_token': '12345'
}
post_mock.return_value = response
state = 'abcde'
Nonce.objects.create(issuer_url='http://example.it', state=state, redirect_url='http://back.to.me')
provider = OpenIDProvider.objects.create(issuer='http://example.it',
client_id='12345',
client_secret='abcde',
token_endpoint='http://example.it/token',
authorization_endpoint='http://a.b/auth',
userinfo_endpoint='http://a.b/userinfo',
jwks_uri='http://a.b/jwks')
user = UserModel.objects.create(username='foobar')
session = self.client.session
session['oidc_state'] = state
session.save()
with mock.patch.object(OpenIDProvider, 'verify_id_token') as mock_verify_id_token:
with mock.patch('requests.get') as get_mock:
response = mock.MagicMock()
response.status_code = 200
response.json.return_value = {
'sub': 'foobar',
'given_name': 'teste',
'family_name': 'teste',
'preferred_username': 'test',
'email': 'test_email',
}
get_mock.return_value = response
mock_verify_id_token.return_value = { 'sub': 'foobar' }
response = self.client.get('/oidc/complete/', data={
'state': state,
'code': '12345'
})
post_mock.assert_called_with(provider.token_endpoint, params={
'grant_type': 'authorization_code',
'code': '12345',
'redirect_uri': 'http://testserver/oidc/complete/'
}, auth=provider.client_credentials, verify=True)
@mock.patch('requests.post')
def test_post_token_endpoint_with_invalid_ssl(self, post_mock):
with oidc_settings.override(VERIFY_SSL=False):
response = mock.MagicMock()
response.status_code = 200
response.json.return_value = {
'access_token': '<PASSWORD>',
'refresh_token': '<PASSWORD>',
'expires_in': 3600,
'token_type': 'Bearer',
'id_token': (
'<KEY>'
'yI6ICJodHRwOi8vc2VydmVyLmV4YW1wbGUuY29tIiwKICJzdWIiOiAiMjQ4Mjg5'
'<KEY>'
'<KEY>'
'AKfQ.ggW8hZ1EuVLuxNuuIJKX_V8a_OMXzR0EHR9R6jgdqrOOF4daGU96Sr_P6q'
'Jp6IcmD3HP99Obi1PRs-cwh3LO-p146waJ8IhehcwL7F09JdijmBqkvPeB2T9CJ'
'NqeGpe-gccMg4vfKjkM8FcGvnzZUN4_KSP0aAp1tOJ1zZwgjxqGByKHiOtX7Tpd'
'QyHE5lcMiKPXfEIQILVq0pc_E2DzL7emopWoaoZTF_m0_N0YzFC6g6EJbOEoRoS'
'K5hoDalrcvRYLSrQAZZKflyuVCyixEoV9GfNQC3_osjzw2PAithfubEEBLuVVk4'
'XUVrWOLrLl0nx7RkKU8NXNHq-rvKMzqg'),
}
post_mock.return_value = response
state = 'abcde'
Nonce.objects.create(issuer_url='http://example.it', state=state, redirect_url='http://back.to.me')
provider = OpenIDProvider.objects.create(issuer='http://example.it',
client_id='12345',
client_secret='abcde',
token_endpoint='http://example.it/token',
authorization_endpoint='http://a.b/',
userinfo_endpoint='http://a.b/',
jwks_uri='http://a.b/')
session = self.client.session
session['oidc_state'] = state
session.save()
user = UserModel.objects.create(username='foobar')
with mock.patch.object(OpenIDProvider, 'verify_id_token') as mock_verify_id_token:
with mock.patch('requests.get') as get_mock:
response = mock.MagicMock()
response.status_code = 200
response.json.return_value = {
'sub': 'foobar',
'given_name': 'teste',
'family_name': 'teste',
'preferred_username': 'test',
'email': 'test_email',
}
get_mock.return_value = response
mock_verify_id_token.return_value = { 'sub': 'foobar' }
response = self.client.get('/oidc/complete/', data={
'state': state,
'code': '12345'
})
post_mock.assert_called_with(provider.token_endpoint, params={
'grant_type': 'authorization_code',
'code': '12345',
'redirect_uri': 'http://testserver/oidc/complete/'
}, auth=provider.client_credentials, verify=False)
| 2.21875 | 2 |
tests/features/steps/datasets_repo/test_datasets_delete.py | dataloop-ai/dtlpy | 10 | 12771867 | <gh_stars>1-10
import behave
@behave.when(u'I delete the dataset that was created by name')
def step_impl(context):
context.project.datasets.delete(dataset_name=context.dataset.name,
sure=True,
really=True)
@behave.when(u'I delete the dataset that was created by id')
def step_impl(context):
context.project.datasets.delete(dataset_id=context.dataset.id,
sure=True,
really=True)
@behave.when(u'I try to delete a dataset by the name of "{dataset_name}"')
def step_impl(context, dataset_name):
try:
context.project.datasets.delete(dataset_name=dataset_name,
sure=True,
really=True)
context.error = None
except Exception as e:
context.error = e
@behave.then(u'No dataset was deleted')
def step_impl(context):
assert len(context.project.datasets.list()) == context.dataset_count
| 2.453125 | 2 |
pyglet-hg/tests/window/WINDOW_FULLSCREEN_SIZE.py | sangh/LaserShow | 21 | 12771868 | #!/usr/bin/env python
'''Test that window can be set to and from various fullscreen sizes.
Expected behaviour:
One window will be opened. Press a number to switch to the corresponding
fullscreen size; hold control and press a number to switch back
to the corresponding window size:
0 - Default size
1 - 320x200
2 - 640x480
3 - 800x600
4 - 1024x768
5 - 1280x800 (widescreen)
6 - 1280x1024
In all cases the window bounds will be indicated by a green rectangle
which should be completely visible.
All events will be printed to the terminal.
Press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import window
from pyglet.window.event import WindowEventLogger
from pyglet.window import key
from pyglet.gl import *
import window_util
class WINDOW_SET_FULLSCREEN(unittest.TestCase):
def on_key_press(self, symbol, modifiers):
fullscreen = not modifiers & key.MOD_CTRL
doing = fullscreen and 'Setting' or 'Restoring from'
if symbol == key._0:
print '%s default size' % doing
self.w.set_fullscreen(fullscreen)
return
elif symbol == key._1:
width, height = 320, 200
elif symbol == key._2:
width, height = 640, 480
elif symbol == key._3:
width, height = 800, 600
elif symbol == key._4:
width, height = 1024, 768
elif symbol == key._5:
width, height = 1280, 800 # 16:10
elif symbol == key._6:
width, height = 1280, 1024
else:
return
print '%s width=%d, height=%d' % (doing, width, height)
self.w.set_fullscreen(fullscreen, width=width, height=height)
def on_expose(self):
glClearColor(1, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT)
window_util.draw_client_border(self.w)
self.w.flip()
def test_set_fullscreen(self):
self.w = w = window.Window(200, 200)
w.push_handlers(self)
w.push_handlers(WindowEventLogger())
self.on_expose()
try:
while not w.has_exit:
w.dispatch_events()
w.close()
except SystemExit:
# Child process on linux calls sys.exit(0) when it's done.
pass
if __name__ == '__main__':
unittest.main()
| 3.015625 | 3 |
tripp/knn.py | mjamesruggiero/tripp | 1 | 12771869 | from collections import Counter
import algebra
def majority_vote(labels):
"""assumes that labels are ordered from nearest to farthest"""
vote_counts = Counter(labels)
winner, winner_count = vote_counts.most_common(1)[0]
num_winners = len([count
for count in vote_counts.values()
if count == winner_count])
if num_winners == 1:
return winner
else:
return majority_vote(labels[:-1])
def knn_classify(k, labeled_points, new_point):
"""each labeled point should be a pair (point, label)"""
by_distance = sorted(labeled_points,
key=lambda (point, _): algebra.distance(point,
new_point))
k_nearest_labels = [label for _, label in by_distance[:k]]
return majority_vote(k_nearest_labels)
| 3.59375 | 4 |
evaluation/forks.py | jordibisbal8/network-slicing | 0 | 12771870 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/forks/forks_pow.csv')
x = np.arange(0.0, 100, 1)
data = df[['T1', 'T2','T3','T4', 'T5','T6','T7', 'T8','T9','T10', 'T11','T12','T13', 'T14','T15','T16', 'T17','T18','T19', 'T20','T21','T21', 'T22','T23','T24', 'T25','T26','T27', 'T28','T29','T30']]
fig, ax = plt.subplots(figsize=(8,5))
ax.errorbar(x, np.log10(data.mean(axis=1)), yerr=np.log10(data.std(axis=1)*1.96/np.sqrt(30)) , fmt='.')
plt.xlabel('# blocks', fontsize=16)
plt.ylabel('log (average # forks ' + '$f_b$)', fontsize=16)
plt.grid(linestyle=':',linewidth=1.5)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.tick_params(axis='both', which='major', labelsize=16)
ax.legend(loc=1,prop={'size': 16})
ax.set_xlim(xmin=0, xmax=100)
ax.set_ylim(ymin=-1, ymax=3)
plt.savefig('ev_forks_pow.png')
plt.show()
| 2.546875 | 3 |
src/tabu_list.py | AHalic/tabu-search | 0 | 12771871 | from typing import List, Tuple
import time
import pandas as pd
from initial_solution import *
from local_search import *
from graph import *
def algorithm(nodes: List[dict], vehicles: int, clients: int, vehicle_capacity: int, tenure: int, file_writer, iter_max:int=1000, savings:bool=True) -> None:
"""
Aplica o algoritmo de tabu list utilizando ou uma solucao inicial aleatoria ou uma solucao utilizando o metodo de
Clarke Wright de economia.
"""
inicio = time.time()
# Cria matriz de distancia entre as cidades
distances_between_clients = clients_distance(nodes, clients)
# Escolhe a solucao inicial
if savings:
best_sol = corrected_savings(distances_between_clients, nodes, vehicles, clients, vehicle_capacity, 0.1)
else:
best_sol = random_initial_sol(nodes, vehicles, vehicle_capacity)
best_sol_dist, best_feasible_flag = total_distance(distances_between_clients, best_sol, nodes, vehicle_capacity)
current_sol = best_sol.copy()
current_dist = best_sol_dist
current_feasible_flag = best_feasible_flag
# Mostra a rota inicial e a distancia
file_writer.write('-Solucao inicial-\n')
show_routes(distances_between_clients, best_sol, nodes, vehicle_capacity, file_writer)
file_writer.write(f"Distancia total: {best_sol_dist}\n\n")
# Inicializa lista tabu e condicoes de parada
tabu_list = []
tempo = 0
iter_ = 0
sol_dists = []
best_dists = []
while tempo < 300 and iter_ < iter_max:
aux_current_sol, aux_current_dist, tabu_list, aux_current_feasible_flag = best_neighbor(distances_between_clients, current_sol, current_dist, nodes, vehicles, vehicle_capacity, tabu_list, tenure, best_sol_dist, current_feasible_flag)
if aux_current_sol != None:
current_sol, current_dist, current_feasible_flag = aux_current_sol, aux_current_dist, aux_current_feasible_flag
if (current_feasible_flag and not best_feasible_flag) or (current_feasible_flag and current_dist < best_sol_dist) or (not best_feasible_flag and current_dist < best_sol_dist):
best_sol = current_sol.copy()
best_sol_dist = current_dist
best_feasible_flag = current_feasible_flag
iter_ = 0
else:
iter_ += 1
sol_dists.append(aux_current_dist)
fim = time.time()
tempo = fim - inicio
file_writer.write('-Solucao final-\n')
show_routes(distances_between_clients, best_sol, nodes, vehicle_capacity, file_writer)
df = pd.DataFrame(sol_dists)
df.to_csv('log_dists.csv', index=True, index_label="iter")
return tempo, iter_, best_sol_dist | 3.40625 | 3 |
nikkei2019_2_qual_b.py | hythof/atc | 0 | 12771872 | <reponame>hythof/atc
from collections import Counter
n,*d=map(int,open(0).read().split())
c=Counter(d)
if d[0]!=0 or c[0]!=1:
print(0)
exit()
ans=1
m=998244353
for i in range(1, max(d)+1):
ans*=pow(c[i-1],c[i],m)
ans%=m
print(ans)
| 2.46875 | 2 |
universe/wrappers/experimental/__init__.py | BitJetKit/universe | 8,120 | 12771873 | from universe.wrappers.experimental.action_space import SafeActionSpace, SoftmaxClickMouse
from universe.wrappers.experimental.observation import CropObservations
from universe.wrappers.experimental.random_env import RandomEnv
| 1.109375 | 1 |
website/migrations/0055_merge_20210119_2125.py | czhu1217/cmimc-online | 0 | 12771874 | # Generated by Django 3.1.5 on 2021-01-20 02:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0052_auto_20210114_2228'),
('website', '0054_task_grader_data_file'),
]
operations = [
]
| 1.40625 | 1 |
supar/models/__init__.py | ExplorerFreda/parser | 0 | 12771875 | # -*- coding: utf-8 -*-
from .constituency import CRFConstituencyModel
from .dependency import (BiaffineDependencyModel, CRF2oDependencyModel,
CRFDependencyModel, CRFNPDependencyModel,
SimplestBiaffineDependencyModel)
__all__ = ['BiaffineDependencyModel',
'CRFDependencyModel',
'CRF2oDependencyModel',
'CRFNPDependencyModel',
'CRFConstituencyModel',
'SimplestBiaffineDependencyModel']
| 1.054688 | 1 |
For_Cluster/Temp/letshpc_folder/main_script_without_perf.py | yatin2410/HPC_N_QUEENS | 2 | 12771876 | <filename>For_Cluster/Temp/letshpc_folder/main_script_without_perf.py
#!/bin/python
import subprocess
import os
import sys
import maps
import time
import logging
def line(n):
print('-'*n)
logging.basicConfig(filename = "LetsHPC_Team_CodeRunner.log", level = logging.INFO)
logger = logging.getLogger(__name__)
########################################################################################################
USAGE = """
Usage:
run.py problem_name approach_name serial_executable parallel_executable runs log_directory output_directory input_directory base_directory
'problem_name' is the name of the problem assigned to you.
'approach_name' is the name of the appraoch assigned to you.
'serial_executable' must be the name of the compiled executable file for the serial code.
'parallel_executable' must be the name of the compiled executable file for the parallel code.
'runs' is the number of times to run the codes. Run at least thrice and ideally 10 times.
'log_directory' is the directory where you want to store the log files
'output_directory' is the directory where you want to store the output files
'input_directory' is the directory where you take the input from
"""
def run_perf(log_directory, n, p, serial_executable, parallel_executable, input_file, problem_name, approach_name):
perf1 = None
perf2 = None
perf3 = None
perf4 = None
clear_cache = "sudo /sbin/sysctl vm.drop_caches=3"
if parallel_executable is None:
perf1 = "sudo perf stat -o %s --append -e cycles,instructions,cache-references,cache-misses,bus-cycles -a %s %s %s %s >> %s" % (log_directory + "perf_logs/serial_" + str(n) + "_0.log.1",serial_executable, str(n), "0", input_file, log_directory + problem_name + "_" + approach_name + ".logs")
perf2 = "sudo perf stat -o %s --append -e L1-dcache-loads,L1-dcache-load-misses,L1-dcache-stores,dTLB-loads,dTLB-load-misses,dTLB-prefetch-misses -a %s %s %s %s >> %s" % (log_directory + "perf_logs/serial_" + str(n) + "_0.log.2",serial_executable, str(n), "0", input_file, log_directory + problem_name + "_" + approach_name + ".logs")
perf3 = "sudo perf stat -o %s --append -e LLC-loads,LLC-load-misses,LLC-stores,LLC-prefetches -a %s %s %s %s >> %s" % (log_directory + "perf_logs/serial_" + str(n) + "_0.log.3",serial_executable, str(n), "0", input_file, log_directory + problem_name + "_" + approach_name + ".logs")
perf4 = "sudo perf stat -o %s --append -e branches,branch-misses,context-switches,cpu-migrations,page-faults -a %s %s %s %s >> %s" % (log_directory + "perf_logs/serial_" + str(n) + "_0.log.4",serial_executable, str(n), "0", input_file, log_directory + problem_name + "_" + approach_name + ".logs")
else:
perf1 = "sudo perf stat -o %s --append -e cycles,instructions,cache-references,cache-misses,bus-cycles -a %s %s %s %s >> %s" % (log_directory + "perf_logs/parallel_" + str(n) + "_" + str(p) + ".log.1", parallel_executable, str(n), str(p), input_file, log_directory + problem_name + "_" + approach_name + ".logs")
perf2 = "sudo perf stat -o %s --append -e L1-dcache-loads,L1-dcache-load-misses,L1-dcache-stores,dTLB-loads,dTLB-load-misses,dTLB-prefetch-misses -a %s %s %s %s >> %s" % (log_directory + "perf_logs/parallel_" + str(n) + "_" + str(p) + ".log.2", parallel_executable, str(n), str(p), input_file, log_directory + problem_name + "_" + approach_name + ".logs")
perf3 = "sudo perf stat -o %s --append -e LLC-loads,LLC-load-misses,LLC-stores,LLC-prefetches -a %s %s %s %s >> %s" % (log_directory + "perf_logs/parallel_" + str(n) + "_" + str(p) + ".log.3", parallel_executable, str(n), str(p), input_file, log_directory + problem_name + "_" + approach_name + ".logs")
perf4 = "sudo perf stat -o %s --append -e branches,branch-misses,context-switches,cpu-migrations,page-faults -a %s %s %s %s >> %s" % (log_directory + "perf_logs/parallel_" + str(n) + "_" + str(p) + ".log.4", parallel_executable, str(n), str(p), input_file, log_directory + problem_name + "_" + approach_name + ".logs")
logger.info("Perf Command 1: %s " %(perf1))
logger.info("Perf Command 2: %s " %(perf2))
logger.info("Perf Command 3: %s " %(perf3))
logger.info("Perf Command 4: %s " %(perf4))
#subprocess.call(clear_cache, shell = True)
subprocess.call(perf1, shell = True)
#subprocess.call(clear_cache, shell = True)
subprocess.call(perf2, shell = True)
#subprocess.call(clear_cache, shell = True)
subprocess.call(perf3, shell = True)
#subprocess.call(clear_cache, shell = True)
subprocess.call(perf4, shell = True)
def foobar(l):
if len(l) < 10:
print USAGE
return
problem_name = l[1]
approach_name = l[2]
serial_executable = l[3]
parallel_executable = l[4]
runs = int(l[5])
logger.info("-"*80)
logger.info("Problem Name : %s" % (problem_name))
logger.info("Approach Name : %s" % (approach_name))
logger.info("Serial Executable : %s" % (serial_executable))
logger.info("Parallel Executable : %s" % (parallel_executable))
logger.info("Number of runs : %s" % (str(runs)))
if problem_name not in maps.problem_list:
print problem_name, 'not in', maps.problem_list
logger.error("%s not in problem list" % (problem_name))
exit(0)
if approach_name not in maps.approaches[problem_name]:
print approach_name, 'not a valid approach for', problem_name
print 'Choose from:'
print maps.approaches[problem_name]
logger.error("%s is not a valid approach" % (approach_name))
exit(0)
log_directory = l[6]
output_directory = l[7]
input_directory = l[8]
line(80)
logger.info("Log Directory : %s" %(log_directory))
logger.info("Output Directory : %s" % (output_directory))
logger.info("Input Directory : %s" % (input_directory))
print 'Assuming that input has been created for:', problem_name
subprocess.call('lscpu > '
+ log_directory
+ "lscpu.txt", shell=True)
subprocess.call('cat /proc/cpuinfo > '
+ log_directory
+ "cpuinfo.txt", shell=True)
logger.info("Creating Empty files for perf data")
for i in maps.problem_size[problem_name]:
for p in maps.processor_range:
subprocess.call("touch %s" % (log_directory + "perf_logs/parallel_" + \
str(i) + "_" + str(p) + ".log.1"), shell = True)
subprocess.call("touch %s" % (log_directory + "perf_logs/parallel_" + \
str(i) + "_" + str(p) + ".log.2"), shell = True)
subprocess.call("touch %s" % (log_directory + "perf_logs/parallel_" + \
str(i) + "_" + str(p) + ".log.3"), shell = True)
subprocess.call("touch %s" % (log_directory + "perf_logs/parallel_" + \
str(i) + "_" + str(p) + ".log.4"), shell = True)
for run in range(runs):
os.chdir(l[9])
print 'Run:', str(run+1)
print('Running Serial')
logger.info("Started running the serial code for run_id = %d" %(run))
for n in maps.problem_size[problem_name]:
print('Problem Size:', n)
input_file = input_directory+problem_name+'_'+str(n)+'_input.txt'
logger.info("Running the Command : " + serial_executable
+ " " + str(n)
+ " " + str(0) # p=0 for serial code.
+ " " + input_file
+ " >> " + log_directory
+ problem_name + "_" + approach_name
+ ".logs")
subprocess.call(serial_executable
+ " " + str(n)
+ " " + str(0) # p=0 for serial code.
+ " " + input_file
+ " >> " + log_directory
+ problem_name + "_" + approach_name
+ ".logs",
shell=True)
perf_command = "sudo perf stat -o %s --append \
-e L1-dcache-loads,L1-dcache-load-misses,L1-dcache-stores,dTLB-loads,dTLB-load-misses,dTLB-prefetch-misses,LLC-loads,LLC-load-misses,LLC-stores,LLC-prefetches,branches,branch-misses,context-switches,cycles,instructions,cache-references,cache-misses,bus-cycles,cpu-migrations,page-faults -a %s %s %s %s" % (log_directory + "perf_logs/serial_" + str(n) + "_0.log",serial_executable, str(n), "0", input_file)
perf_command = "sudo perf stat -o %s --append \
-e cycles,instructions,cache-references,cache-misses,bus-cycles,cpu-migrations,page-faults,L1-dcache-loads,L1-dcache-load-misses,L1-dcache-stores,LLC-loads,LLC-load-misses,LLC-stores,LLC-prefetches,branches,branch-misses,context-switches -a %s %s %s %s" % (log_directory + "perf_logs/serial_" + str(n) + "_0.log",serial_executable, str(n), "0", input_file)
#logger.info("Running the Perf Commands Commands")
#run_perf(log_directory, n, "0", serial_executable, None, input_file, problem_name, approach_name)
line(80)
print('Running Parallel')
for p in maps.processor_range:
print('Number of Processors:', p)
logger.info("Running the parallel code with %d processors" % (p))
for n in maps.problem_size[problem_name]:
os.chdir(l[9])
input_file = input_directory+problem_name+'_'+str(n)+'_input.txt'
print('Problem Size:', n)
logger.info("Running the Command : " + parallel_executable
+ " " + str(n)
+ " " + str(p)
+ " " + input_file
+ " >> " + log_directory
+ problem_name + "_" + approach_name
+ ".logs")
subprocess.call(parallel_executable
+ " " + str(n)
+ " " + str(p)
+ " " + input_file
+ " >> " + log_directory
+ problem_name + "_" + approach_name
+ ".logs",
shell=True)
perf_command = "sudo perf stat -o %s --append \-e L1-dcache-loads,L1-dcache-load-misses,L1-dcache-stores,dTLB-loads,dTLB-load-misses,dTLB-prefetch-misses,LLC-loads,LLC-load-misses,LLC-stores,LLC-prefetches,branches,branch-misses,context-switches,cycles,instructions,cache-references,cache-misses,bus-cycles,cpu-migrations,page-faults -a %s %s %s %s" % (log_directory + "perf_logs/parallel_" + str(n) + "_" + str(p) + ".log", parallel_executable, str(n), str(p), input_file)
#logger.info("Running the perf commands")
#run_perf(log_directory, n, p, None, parallel_executable, input_file, problem_name, approach_name)
# Look into flushing memory
line(80)
#print('Comparing results')
# subprocess.call('python3 compare.py '+problem_name, shell=True)
# line(80)
print(os.getcwd())
#######################################################################
all_inputs = os.getcwd() + '/all_input/'
base = os.getcwd() + '/all_codes/'
starting_point = os.getcwd()
all_codes = os.listdir(base)
runs = int(sys.argv[1])
print('the number of runs is ' + str(runs))
count = 0
try:
os.remove(base + "progress.txt")
except Exception as e:
print "File already deleted"
print(all_codes)
code_to_run = None
codes_already_run = None
try:
uber = open(os.getcwd() + "/codes_run_file", "r")
codes_already_run = uber.readlines()
uber.close()
except Exception as e:
command = "touch %s" % (starting_point + "/codes_run_file")
subprocess.call(command, shell = True)
if codes_already_run is None:
code_to_run = all_codes[0]
else:
for each in all_codes:
if each+"\n" not in codes_already_run:
code_to_run = each
break
print "The following code will be run now", code_to_run
if code_to_run is None:
print "All the codes have already been executed. You can run the collect data script now"
sys.exit(1)
for each_code in [code_to_run]:
if each_code == "progress.txt" or "log" in each_code:
continue
subprocess.call("rm -rf "
+ base + each_code + "/output"
, shell=True)
subprocess.call("rm -rf "
+ base + each_code + "/logs"
, shell=True)
division = each_code.split("-")
problem = division[2]
approach = division[3]
print "-"*80
print problem, approach
all_files = os.listdir(base+each_code+"/")
serial = None
parallel = None
for each_file in all_files:
if 'clean' not in each_file.lower() and 'logs'!=each_file.lower() and 'output'!=each_file.lower():
if 'par' not in each_file.lower() and each_file!="ser":
serial = each_file
elif 'parallel' in each_file.lower():
parallel = each_file
compiler = "gcc "
if ".cpp" in parallel:
compiler = "g++ "
print serial, parallel
#raw_input()
if 'logs' not in all_files:
os.mkdir(base + each_code + "/logs")
os.mkdir(base + each_code + "/logs/perf_logs")
os.mkdir(base + each_code + "/output")
#raw_input()
subprocess.call(compiler
+ base + each_code + "/" + parallel
+ " -fopenmp -lm -w -o "
+ base + each_code + "/parr", shell=True)
subprocess.call(compiler
+ base + each_code + "/" + serial
+ " -fopenmp -lm -w -o "
+ base + each_code + "/ser", shell=True)
print serial,parallel
#raw_input()
foobar(['run.py', problem, approach, base + each_code + "/ser", base + each_code + "/parr", int(runs), base + each_code + "/logs/", \
base + each_code + "/output/", all_inputs, base + each_code + "/"])
f = open(base + "progress.txt", "a")
f.write(str(time.time()) + " " + str(count) + " " + str(each_code)+"\n")
f.close()
count +=1
print "Reached Here:", code_to_run, type(code_to_run)
w2f = open(starting_point + "/codes_run_file", "a")
string_to_write = code_to_run + "\n"
w2f.write(string_to_write)
w2f.close()
print "Written To file"
| 2.578125 | 3 |
tests/integration/records/single_db/test_records_save_df.py | cwegrzyn/records-mover | 36 | 12771877 | import pytz
import logging
from .base_records_test import BaseRecordsIntegrationTest
from ..directory_validator import RecordsDirectoryValidator
from records_mover.records import (
RecordsSchema, DelimitedRecordsFormat, ProcessingInstructions
)
import tempfile
import pathlib
import datetime
logger = logging.getLogger(__name__)
class RecordsSaveDataframeIntegrationTest(BaseRecordsIntegrationTest):
def save_and_verify(self, records_format, processing_instructions=None) -> None:
if not self.has_pandas():
logger.warning("Skipping test as we don't have Pandas to save with.")
return
from pandas import DataFrame
if processing_instructions is None:
processing_instructions = ProcessingInstructions()
us_eastern = pytz.timezone('US/Eastern')
df = DataFrame.from_dict([{
'num': 123,
'numstr': '123',
'str': 'foo',
'comma': ',',
'doublequote': '"',
'quotecommaquote': '","',
'newlinestr': ("* SQL unload would generate multiple files (one for each slice/part)\n"
"* Filecat would produce a single data file"),
'date': datetime.date(2000, 1, 1),
'time': datetime.time(0, 0),
'timestamp': datetime.datetime(2000, 1, 2, 12, 34, 56, 789012),
'timestamptz': us_eastern.localize(datetime.datetime(2000, 1, 2, 12, 34, 56, 789012))
}])
records_schema = RecordsSchema.from_dataframe(df,
processing_instructions,
include_index=False)
records_schema = records_schema.refine_from_dataframe(df, processing_instructions)
with tempfile.TemporaryDirectory(prefix='test_records_save_df') as tempdir:
output_url = pathlib.Path(tempdir).resolve().as_uri() + '/'
source = self.records.sources.dataframe(df=df,
records_schema=records_schema,
processing_instructions=processing_instructions)
target = self.records.targets.directory_from_url(output_url,
records_format=records_format)
out = self.records.move(source, target, processing_instructions)
self.verify_records_directory(records_format.format_type,
records_format.variant,
tempdir,
records_format.hints)
return out
def verify_records_directory(self, format_type, variant, tempdir, hints={}) -> None:
validator = RecordsDirectoryValidator(tempdir,
self.resource_name(format_type, variant, hints),
self.engine.name)
validator.validate()
def test_save_with_defaults(self):
hints = {}
self.save_and_verify(records_format=DelimitedRecordsFormat(hints=hints))
def test_save_csv_variant(self):
records_format = DelimitedRecordsFormat(variant='csv')
self.save_and_verify(records_format=records_format)
def test_save_with_no_compression(self):
hints = {
'compression': None,
}
records_format = DelimitedRecordsFormat(hints=hints)
self.save_and_verify(records_format=records_format)
| 2.4375 | 2 |
python/tvm/contrib/graph_runtime.py | dayanandasiet/tvmdbg | 0 | 12771878 | """Minimum graph runtime that executes graph containing TVM PackedFunc."""
from .._ffi.base import string_types
from .._ffi.function import get_global_func
from .rpc import base as rpc_base
from .. import ndarray as nd
from ..tools.debug.runtime import debugruntime
from ..tools.debug.util import common
def create(graph_json_str, libmod, ctx, debug=False):
"""Create a runtime executor module given a graph and module.
Parameters
----------
graph_json_str : str or graph class
The graph to be deployed in json format output by nnvm graph.
The graph can only contain one operator(tvm_op) that
points to the name of PackedFunc in the libmod.
libmod : tvm.Module
The module of the corresponding function
ctx : TVMContext
The context to deploy the module, can be local or remote.
debug : bool
To enable or disable the debugging
Returns
-------
graph_module : GraphModule
Runtime graph module that can be used to execute the graph.
"""
if not isinstance(graph_json_str, string_types):
try:
graph_json_str = graph_json_str._tvm_graph_json()
except AttributeError:
raise ValueError("Type %s is not supported" % type(graph_json_str))
device_type = ctx.device_type
device_id = ctx.device_id
if device_type >= rpc_base.RPC_SESS_MASK:
assert libmod.type_key == "rpc"
assert rpc_base._SessTableIndex(libmod) == ctx._rpc_sess._tbl_index
hmod = rpc_base._ModuleHandle(libmod)
fcreate = ctx._rpc_sess.get_function("tvm.graph_runtime.remote_create")
device_type = device_type % rpc_base.RPC_SESS_MASK
func_obj = fcreate(graph_json_str, hmod, device_type, device_id)
return GraphModule(func_obj, ctx, graph_json_str, debug)
fcreate = get_global_func("tvm.graph_runtime.create")
func_obj = fcreate(graph_json_str, libmod, device_type, device_id)
return GraphModule(func_obj, ctx, graph_json_str, debug)
class GraphModule(object):
"""Wrapper runtime module.
This is a thin wrapper of the underlying TVM module.
you can also directly call set_input, run, and get_output
of underlying module functions
Parameters
----------
module : Module
The interal tvm module that holds the actual graph functions.
ctx : TVMContext
The context this module is under
Attributes
----------
module : Module
The interal tvm module that holds the actual graph functions.
ctx : TVMContext
The context this module is under
"""
def __init__(self, module, ctx, graph_json_str, debug):
self.module = module
self._set_input = module["set_input"]
self._run = module["run"]
self._get_output = module["get_output"]
self._get_input = module["get_input"]
self._set_debug_buffer = module["set_debug_buffer"]
self._debug_run = module["debug_run"]
self._load_params = module["load_params"]
self.ctx = ctx
self.debug = debug
if self.debug:
self.dbgobj = debugruntime.create(self, graph_json_str, ctx)
def set_input(self, key=None, value=None, **params):
"""Set inputs to the module via kwargs
Parameters
----------
key : int or str
The input key
value : the input value.
The input key
params : dict of str to NDArray
Additonal arguments
"""
if key:
self._set_input(key, nd.array(value, ctx=self.ctx))
for k, v in params.items():
self._set_input(k, nd.array(v, ctx=self.ctx))
if self.debug:
self.dbgobj.set_input(key, value, **params)
return self
def set_debug_buffer(self):
"""Set the debug out buffers for each tvm nodes
Parameters
----------
None
"""
for eid in range(self.dbgobj.get_debug_buffer_count()):
self._set_debug_buffer(self.dbgobj.get_debug_buffer(eid))
def debug_run(self):
#call cli debug run and when user execute run command debug_run will be invoked
run_cli_session = self.dbgobj.get_run_command()
run_start_resp = run_cli_session.get_run_start_resp()
retvals = True
if run_start_resp.action == common.CLIRunStartAction.DEBUG_RUN:
self.set_debug_buffer()
retvals = self._debug_run()
self.dbgobj.dump_output()
self.dbgobj.run_end(run_cli_session, retvals)
elif run_start_resp.action == common.CLIRunStartAction.NON_DEBUG_RUN:
retvals = self._run()
self.dbgobj.run_end(run_cli_session, retvals)
def run(self, **input_dict):
"""Run forward execution of the graph
Parameters
----------
input_dict: dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
if not self.debug:
self._run()
else:
self.debug_run()
def get_input(self, index, out):
"""Get index-th input to out
Parameters
----------
index : int
The input index
out : NDArray
The output array container
"""
self._get_input(index, out)
return out
def get_output(self, index, out):
"""Get index-th output to out
Parameters
----------
index : int
The input index
out : NDArray
The output array container
"""
self._get_output(index, out)
return out
def load_params(self, params_bytes):
"""Load parameters from serialized byte array of parameter dict.
Parameters
----------
params_bytes : bytearray
The serialized parameter dict.
"""
self._load_params(bytearray(params_bytes))
def __getitem__(self, key):
"""Get internal module function
Parameters
----------
key : str
The key to the module.
"""
return self.module[key]
| 2.125 | 2 |
r2k/cli/config/config_show.py | mcouthon/rss-to-kindle | 11 | 12771879 | import click
import orjson as json
import yaml
from r2k.cli import cli_utils, logger
from r2k.config import config as _config
@click.command("show")
@cli_utils.config_path_option()
@click.option(
"-j",
"--json",
"is_json",
is_flag=True,
help="When passed the output will be in JSON format (e.g. for use with jq).\n"
"Use with the --no-ansi flag for best results",
)
def config_show(is_json: bool) -> None:
"""Show all the available configuration."""
result = _config.as_dict()
if "password" in result:
result["password"] = "<PASSWORD>"
if is_json:
logger.info(json.dumps(result))
else:
logger.info(yaml.safe_dump(result))
| 2.546875 | 3 |
savingsapp/apps.py | primeuser/banking-software-django | 3 | 12771880 | <filename>savingsapp/apps.py
from django.apps import AppConfig
class SavingsappConfig(AppConfig):
name = 'savingsapp'
| 1.320313 | 1 |
packages/depolitics.org/_old/src/application/forms.py | caveljan/depolitics | 0 | 12771881 | <reponame>caveljan/depolitics<gh_stars>0
from django import forms
from .models import Politician
class AddForm(forms.ModelForm):
class Meta:
model = Politician
fields = ('first_name',
'last_name',
'name_variants',
'current_function',
'previous_functions')
class SearchForm(forms.ModelForm):
class Meta:
model = Politician
fields = ('first_name',
'last_name',
'name_variants',
'current_function',
'previous_functions')
# fields = ('identification_string', ) | 2.234375 | 2 |
applications/Ma-Net/utils/mask_damaging.py | Simon-liusheng/PaddleVideo | 0 | 12771882 | <filename>applications/Ma-Net/utils/mask_damaging.py
import numpy as np
from scipy.ndimage import interpolation
try:
from skimage import morphology, transform
except ImportError as e:
print(
f"{e}, [scikit-image] package and it's dependencies is required for MA-Net."
)
import paddle
import cv2
import random
####
def mask_damager(labels=None, p_black=0.2):
scales = (0.8, 1.0, 1.2)
kernel_size = random.randint(10, 15)
kernel = np.ones((kernel_size, kernel_size), np.uint8)
if random.random() < p_black:
final_label = paddle.zeros_like(labels)
final_label = final_label.squeeze().numpy()
else:
prot = random.randint(5, 15)
nrot = random.randint(-15, -5)
rots = [prot, nrot, 0]
rot = rots[random.randint(0, 2)]
sc = scales[random.randint(0, 2)]
_, _, h, w = labels.shape
tmp = labels.squeeze()
tmp = tmp.unsqueeze(-1)
tmp = tmp.numpy().astype(np.uint8)
morph_p = random.random()
if morph_p < 0.5:
tmp = cv2.morphologyEx(tmp, cv2.MORPH_OPEN, kernel)
else:
tmp = cv2.morphologyEx(tmp, cv2.MORPH_CLOSE, kernel)
tmp = tmp.astype(np.uint8)
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, rot, sc)
final_label = cv2.warpAffine(tmp, M, (w, h), cv2.INTER_NEAREST)
return final_label
#####
def damage_masks(labels, shift=True, scale=True, rotate=True):
"""
Args:
labels: numpy array (batch_size * 1 * h * w)
"""
bs, _, h, w = labels.shape
labels = labels.transpose([0, 2, 3, 1])
labels = labels.numpy()
final_label = []
for i in range(bs):
label = labels[i]
damaged_label = damage_masks_np(label, shift, scale, rotate)
final_label.append(damaged_label)
final_label = np.array(final_label)
final_label = paddle.to_tensor(final_label)
final_label = final_label.transpose([0, 3, 1, 2])
return final_label
def damage_masks_np(labels, shift=True, scale=True, rotate=True):
"""Performs the actual mask damaging in numpy.
Args:
labels: Int32 numpy array of shape (height, width, 1).
shift: Boolean, whether to damage the masks by shifting.
scale: Boolean, whether to damage the masks by scaling.
rotate: Boolean, whether to damage the masks by rotation.
dilate: Boolean, whether to damage the masks by dilation.
Returns:
The damaged version of labels.
"""
unique_labels = np.unique(labels)
unique_labels = np.setdiff1d(unique_labels, [0])
# Shuffle to get random depth ordering when combining together.
np.random.shuffle(unique_labels)
damaged_labels = np.zeros_like(labels)
for l in unique_labels:
obj_mask = (labels == l)
damaged_obj_mask = _damage_single_object_mask(obj_mask, shift, scale,
rotate)
damaged_labels[damaged_obj_mask] = l
return damaged_labels
def _damage_single_object_mask(mask, shift, scale, rotate):
"""Performs mask damaging in numpy for a single object.
Args:
mask: Boolean numpy array of shape(height, width, 1).
shift: Boolean, whether to damage the masks by shifting.
scale: Boolean, whether to damage the masks by scaling.
rotate: Boolean, whether to damage the masks by rotation.
dilate: Boolean, whether to damage the masks by dilation.
Returns:
The damaged version of mask.
"""
if shift:
mask = _shift_mask(mask)
if scale:
mask = _scale_mask(mask)
if rotate:
mask = _rotate_mask(mask)
return mask
def _shift_mask(mask, max_shift_factor=0.05):
"""Damages a mask for a single object by randomly shifting it in numpy.
Args:
mask: Boolean numpy array of shape(height, width, 1).
max_shift_factor: Float scalar, the maximum factor for random shifting.
Returns:
The shifted version of mask.
"""
nzy, nzx, _ = mask.nonzero()
h = nzy.max() - nzy.min()
w = nzx.max() - nzx.min()
size = np.sqrt(h * w)
offset = np.random.uniform(-size * max_shift_factor,
size * max_shift_factor, 2)
shifted_mask = interpolation.shift(np.squeeze(mask, axis=2),
offset,
order=0).astype('bool')[..., np.newaxis]
return shifted_mask
def _scale_mask(mask, scale_amount=0.025):
"""Damages a mask for a single object by randomly scaling it in numpy.
Args:
mask: Boolean numpy array of shape(height, width, 1).
scale_amount: Float scalar, the maximum factor for random scaling.
Returns:
The scaled version of mask.
"""
nzy, nzx, _ = mask.nonzero()
cy = 0.5 * (nzy.max() - nzy.min())
cx = 0.5 * (nzx.max() - nzx.min())
scale_factor = np.random.uniform(1.0 - scale_amount, 1.0 + scale_amount)
shift = transform.SimilarityTransform(translation=[-cx, -cy])
inv_shift = transform.SimilarityTransform(translation=[cx, cy])
s = transform.SimilarityTransform(scale=[scale_factor, scale_factor])
m = (shift + (s + inv_shift)).inverse
scaled_mask = transform.warp(mask, m) > 0.5
return scaled_mask
def _rotate_mask(mask, max_rot_degrees=3.0):
"""Damages a mask for a single object by randomly rotating it in numpy.
Args:
mask: Boolean numpy array of shape(height, width, 1).
max_rot_degrees: Float scalar, the maximum number of degrees to rotate.
Returns:
The scaled version of mask.
"""
cy = 0.5 * mask.shape[0]
cx = 0.5 * mask.shape[1]
rot_degrees = np.random.uniform(-max_rot_degrees, max_rot_degrees)
shift = transform.SimilarityTransform(translation=[-cx, -cy])
inv_shift = transform.SimilarityTransform(translation=[cx, cy])
r = transform.SimilarityTransform(rotation=np.deg2rad(rot_degrees))
m = (shift + (r + inv_shift)).inverse
scaled_mask = transform.warp(mask, m) > 0.5
return scaled_mask
| 2.03125 | 2 |
custom_components/ge_home/entities/ccm/ge_ccm_pot_not_present_binary_sensor.py | 87racer/ha_gehome | 0 | 12771883 | <reponame>87racer/ha_gehome
from ..common import GeErdBinarySensor
class GeCcmPotNotPresentBinarySensor(GeErdBinarySensor):
@property
def is_on(self) -> bool:
"""Return True if entity is not pot present."""
return not self._boolify(self.appliance.get_erd_value(self.erd_code))
| 2.109375 | 2 |
ScienceCruiseDataManagement/data_storage_management/management/commands/importfromstaging.py | Swiss-Polar-Institute/science-cruise-data-management | 6 | 12771884 | <reponame>Swiss-Polar-Institute/science-cruise-data-management<gh_stars>1-10
from django.core.management.base import BaseCommand, CommandError
from data_storage_management import cifs_utils
from data_storage_management.models import Directory, NASResource
from django.conf import settings
import os
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# <NAME> (<EMAIL>) and <NAME> (<EMAIL>), 2016-2017.
class Command(BaseCommand):
help = 'Copies data from staging areas into the ace_data'
def add_arguments(self, parser):
parser.add_argument('action', type=str)
def handle(self, *args, **options):
if options['action'] == 'import':
db_nas_directories = Directory.objects.filter(nas_resource__isnull=False)
print("Will import the NAS directories. To be processed: {}".format(len(db_nas_directories)))
for nas_directory in db_nas_directories:
importer = cifs_utils.Importer(settings.NAS_IP,
nas_directory.nas_resource.shared_resource,
"guest",
None,
nas_directory.source_directory,
nas_directory.destination_directory)
importer.run()
importer.register_import(nas_directory)
elif options['action'] == 'check':
nas_shares = NASResource.objects.all()
directories_in_nas = []
for nas_share in nas_shares:
mounted = cifs_utils.Importer.mount(settings.NAS_IP, nas_share.shared_resource)
files_and_dirs = os.listdir(mounted)
for file_or_dir in files_and_dirs:
if os.path.isdir(os.path.join(mounted, file_or_dir)):
report_if_directory_no_exist(file_or_dir, nas_share.shared_resource)
cifs_utils.Importer.umount(mounted)
def report_if_directory_no_exist(directory, shared_resource):
query_set = Directory.objects.filter(nas_resource__isnull=False, source_directory=directory)
exists_in_db = query_set.exists()
if not exists_in_db:
print("Directory {} from shared resource {} is not in the database".format(directory, shared_resource))
| 2.28125 | 2 |
test/ably/restrequest_test.py | jvinet/ably-python | 22 | 12771885 | <reponame>jvinet/ably-python<gh_stars>10-100
import httpx
import pytest
from ably import AblyRest
from ably.http.paginatedresult import HttpPaginatedResponse
from test.ably.restsetup import RestSetup
from test.ably.utils import BaseAsyncTestCase
from test.ably.utils import VaryByProtocolTestsMetaclass, dont_vary_protocol
# RSC19
class TestRestRequest(BaseAsyncTestCase, metaclass=VaryByProtocolTestsMetaclass):
async def setUp(self):
self.ably = await RestSetup.get_ably_rest()
self.test_vars = await RestSetup.get_test_vars()
# Populate the channel (using the new api)
self.channel = self.get_channel_name()
self.path = '/channels/%s/messages' % self.channel
for i in range(20):
body = {'name': 'event%s' % i, 'data': 'lorem ipsum %s' % i}
await self.ably.request('POST', self.path, body=body)
async def tearDown(self):
await self.ably.close()
def per_protocol_setup(self, use_binary_protocol):
self.ably.options.use_binary_protocol = use_binary_protocol
self.use_binary_protocol = use_binary_protocol
async def test_post(self):
body = {'name': 'test-post', 'data': 'lorem ipsum'}
result = await self.ably.request('POST', self.path, body=body)
assert isinstance(result, HttpPaginatedResponse) # RSC19d
# HP3
assert type(result.items) is list
assert len(result.items) == 1
assert result.items[0]['channel'] == self.channel
assert 'messageId' in result.items[0]
async def test_get(self):
params = {'limit': 10, 'direction': 'forwards'}
result = await self.ably.request('GET', self.path, params=params)
assert isinstance(result, HttpPaginatedResponse) # RSC19d
# HP2
assert isinstance(await result.next(), HttpPaginatedResponse)
assert isinstance(await result.first(), HttpPaginatedResponse)
# HP3
assert isinstance(result.items, list)
item = result.items[0]
assert isinstance(item, dict)
assert 'timestamp' in item
assert 'id' in item
assert item['name'] == 'event0'
assert item['data'] == 'lorem ipsum 0'
assert result.status_code == 200 # HP4
assert result.success is True # HP5
assert result.error_code is None # HP6
assert result.error_message is None # HP7
assert isinstance(result.headers, list) # HP7
@dont_vary_protocol
async def test_not_found(self):
result = await self.ably.request('GET', '/not-found')
assert isinstance(result, HttpPaginatedResponse) # RSC19d
assert result.status_code == 404 # HP4
assert result.success is False # HP5
@dont_vary_protocol
async def test_error(self):
params = {'limit': 'abc'}
result = await self.ably.request('GET', self.path, params=params)
assert isinstance(result, HttpPaginatedResponse) # RSC19d
assert result.status_code == 400 # HP4
assert not result.success
assert result.error_code
assert result.error_message
async def test_headers(self):
key = 'X-Test'
value = 'lorem ipsum'
result = await self.ably.request('GET', '/time', headers={key: value})
assert result.response.request.headers[key] == value
# RSC19e
@dont_vary_protocol
async def test_timeout(self):
# Timeout
timeout = 0.000001
ably = AblyRest(token="foo", http_request_timeout=timeout)
assert ably.http.http_request_timeout == timeout
with pytest.raises(httpx.ReadTimeout):
await ably.request('GET', '/time')
await ably.close()
# Bad host, use fallback
ably = AblyRest(key=self.test_vars["keys"][0]["key_str"],
rest_host='some.other.host',
port=self.test_vars["port"],
tls_port=self.test_vars["tls_port"],
tls=self.test_vars["tls"],
fallback_hosts_use_default=True)
result = await ably.request('GET', '/time')
assert isinstance(result, HttpPaginatedResponse)
assert len(result.items) == 1
assert isinstance(result.items[0], int)
await ably.close()
# Bad host, no Fallback
ably = AblyRest(key=self.test_vars["keys"][0]["key_str"],
rest_host='some.other.host',
port=self.test_vars["port"],
tls_port=self.test_vars["tls_port"],
tls=self.test_vars["tls"])
with pytest.raises(httpx.ConnectError):
await ably.request('GET', '/time')
await ably.close()
| 1.851563 | 2 |
schools2/pages.py | ferleyrincon/schools | 0 | 12771886 | <gh_stars>0
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class consent(Page):
form_model = 'player'
form_fields = ['consent','consent_account']
def is_displayed(self):
return self.round_number == 1
class ResultsWaitPage(WaitPage):
pass
class Results(Page):
pass
page_sequence = [consent, ResultsWaitPage, Results]
| 2.171875 | 2 |
tests/debug.py | theiviaxx/unhandled | 0 | 12771887 | <reponame>theiviaxx/unhandled
import unhandled
conf = {
'handlers': [
unhandled.VerboseExceptionHandler,
unhandled.SimpleExceptionHandler
]
}
unhandled.init()
def foo():
f = 1
print(f)
1/0
# with unhandled.pause():
foo()
| 2.265625 | 2 |
tools/update.py | HadesD/LaraCC | 0 | 12771888 | import os
import subprocess
subprocess.call([
'git'
, 'submodule'
, 'foreach'
, 'git'
, 'checkout'
, 'master'
])
subprocess.call([
'git'
, 'submodule'
, 'foreach'
, 'git'
, 'pull'
, 'origin'
, 'master'
])
| 1.890625 | 2 |
api/main.py | reddimohan/FastAPI-RestAPI-PostgreSQL-PgAdmin-Authentication-docker | 0 | 12771889 | <gh_stars>0
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.core import tasks
from app.api_v1.api import api_router
from app.db.base_class import Base
from app.db.session import engine
from app.core.config import settings
def get_application():
app = FastAPI(
title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json"
)
Base.metadata.create_all(bind=engine)
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.add_event_handler("startup", tasks.create_start_app_handler(app))
app.add_event_handler("shutdown", tasks.create_stop_app_handler(app))
return app
app = get_application()
app.include_router(api_router, prefix=settings.API_V1_STR) | 2.046875 | 2 |
libra/validator_config.py | MoveOnLibra/libra-core | 5 | 12771890 | <filename>libra/validator_config.py
from __future__ import annotations
from libra.move_resource import MoveResource
from libra.crypto.ed25519 import Ed25519PublicKey
from libra.crypto.x25519 import X25519PublicKey
from canoser import Struct, BytesT
Multiaddr = BytesT()
class ValidatorConfig(Struct):
_fields = [
('consensus_public_key', Ed25519PublicKey),
('validator_network_signing_pubkey', Ed25519PublicKey),
('validator_network_identity_pubkey', X25519PublicKey),
('validator_network_address', Multiaddr),
('fullnodes_network_identity_pubkey', X25519PublicKey),
('fullnodes_network_address', Multiaddr)
]
class ValidatorConfigResource(Struct, MoveResource):
_fields = [
('validator_config', ValidatorConfig),
]
MODULE_NAME: str = "ValidatorConfig"
STRUCT_NAME: str = MODULE_NAME
| 1.945313 | 2 |
OSTL/P03-palindrome.py | ashnigga98/CollegeStuff | 0 | 12771891 | <reponame>ashnigga98/CollegeStuff
#
# if type(input) == string:
# find if palindrome
# elif type(input) == number:
# find factorial
# find if palindrome
#
def pal(st):
l = len(st)
i = 0
flag = True
while i < l//2:
if st[i] == st[l-i-1]:
pass
else:
flag = False
i += 1
return flag
while True:
print("Menu\n1. for palindrome\n2. for factorial\n3. exit")
ip = int(input("enter your choise: "))
if ip == 1:
st = input("enter number or string: ")
if pal(st):
print("the input is a plaindrome")
else:
print("the input is not a plindrome")
elif ip == 2:
st = int(input("enter number: "))
i = 1
fact = 1
while i <= int(st):
fact *= i
i += 1
print("fact of",ip, "is",fact)
elif ip == 3:
break
else:
print("enter valid choise")
| 4.09375 | 4 |
python-packages/order_utils/src/zero_ex/order_utils/signature_utils.py | TheOceanTrade/0x-monorepo | 0 | 12771892 | <reponame>TheOceanTrade/0x-monorepo<gh_stars>0
"""Signature utilities."""
from typing import Dict, Tuple
import json
from pkg_resources import resource_string
from eth_utils import is_address, to_checksum_address
from web3 import Web3
import web3.exceptions
from web3.utils import datatypes
from zero_ex.dev_utils.type_assertions import assert_is_hex_string
# prefer `black` formatting. pylint: disable=C0330
EXCHANGE_ABI = json.loads(
resource_string("zero_ex.contract_artifacts", "artifacts/Exchange.json")
)["compilerOutput"]["abi"]
network_to_exchange_addr: Dict[str, str] = {
"1": "0x4f833a24e1f95d70f028921e27040ca56e09ab0b",
"3": "0x4530c0483a1633c7a1c97d2c53721caff2caaaaf",
"42": "0x35dd2932454449b14cee11a94d3674a936d5d7b2",
"50": "0x48bacb9266a570d521063ef5dd96e61686dbe788",
}
# prefer `black` formatting. pylint: disable=C0330
def is_valid_signature(
provider: Web3.HTTPProvider, data: str, signature: str, signer_address: str
) -> Tuple[bool, str]:
# docstring considered all one line by pylint: disable=line-too-long
"""Check the validity of the supplied signature.
Check if the supplied ``signature`` corresponds to signing ``data`` with
the private key corresponding to ``signer_address``.
:param provider: A Web3 provider able to access the 0x Exchange contract.
:param data: The hex encoded data signed by the supplied signature.
:param signature: The hex encoded signature.
:param signer_address: The hex encoded address that signed the data to
produce the supplied signature.
:rtype: Boolean indicating whether the given signature is valid.
>>> is_valid_signature(
... Web3.HTTPProvider("http://127.0.0.1:8545"),
... '0x6927e990021d23b1eb7b8789f6a6feaf98fe104bb0cf8259421b79f9a34222b0',
... '0x1B61a3ed31b43c8780e905a260a35faefcc527be7516aa11c0256729b5b351bc3340349190569279751135161d22529dc25add4f6069af05be04cacbda2ace225403',
... '0x5409ed021d9299bf6814279a6a1411a7e866a631',
... )
(True, '')
""" # noqa: E501 (line too long)
# TODO: make this provider check more flexible. pylint: disable=fixme
# https://app.asana.com/0/684263176955174/901300863045491/f
if not isinstance(provider, Web3.HTTPProvider):
raise TypeError("provider is not a Web3.HTTPProvider")
assert_is_hex_string(data, "data")
assert_is_hex_string(signature, "signature")
assert_is_hex_string(signer_address, "signer_address")
if not is_address(signer_address):
raise ValueError("signer_address is not a valid address")
web3_instance = Web3(provider)
# false positive from pylint: disable=no-member
network_id = web3_instance.net.version
contract_address = network_to_exchange_addr[network_id]
# false positive from pylint: disable=no-member
contract: datatypes.Contract = web3_instance.eth.contract(
address=to_checksum_address(contract_address), abi=EXCHANGE_ABI
)
try:
return (
contract.call().isValidSignature(
data, to_checksum_address(signer_address), signature
),
"",
)
except web3.exceptions.BadFunctionCallOutput as exception:
known_revert_reasons = [
"LENGTH_GREATER_THAN_0_REQUIRED",
"SIGNATURE_UNSUPPORTED",
"LENGTH_0_REQUIRED",
"LENGTH_65_REQUIRED",
]
for known_revert_reason in known_revert_reasons:
if known_revert_reason in str(exception):
return (False, known_revert_reason)
return (False, f"Unknown: {exception}")
| 1.945313 | 2 |
app/create_app.py | KarolJaksik/IO-Project | 3 | 12771893 | <gh_stars>1-10
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from passlib.apps import custom_app_context as pwd_context
from flask_jwt_extended import JWTManager
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_optional,
jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)
from flask_mail import Mail, Message
from functools import wraps
from flask_cors import CORS
from flask_expects_json import expects_json
import os
def create_app():
app = Flask(__name__)
cors = CORS(app)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = os.environ.get('JWT_SECRET')
app.config['SECRET_KEY'] = os.environ.get('JWT_SECRET')
app.config['MAIL_SERVER'] = os.environ.get('MAIL_SERVER')
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USER')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
return app
| 2.015625 | 2 |
src/magicdb/database/Database.py | CircleOnCircles/MagicDB | 5 | 12771894 | import firebase_admin
from firebase_admin import credentials, firestore
class Database:
def __init__(self):
self._conn = None
def connect(self, creds=None, from_file=None):
if not creds and not from_file:
raise Exception("Credentials or service account json file path required to connect with firestore")
if not creds:
creds = credentials.Certificate(from_file)
try:
firebase_admin.initialize_app(creds)
except Exception as e:
if 'The default Firebase app already exists' in str(e):
raise Exception(
'If you want to connect to Firestore from_file, make sure fireorm.connect(from_file=<YOUR FILE>) '
'comes directly after importing FireORM for the first time.')
self._conn = firestore.client()
@property
def conn(self):
if self._conn is None:
firebase_admin.initialize_app()
self._conn = firestore.client()
return self._conn
| 3.125 | 3 |
database/generic_repository.py | fcjack/InventoryManager | 0 | 12771895 | <gh_stars>0
import sqlite3
class GenericRepository:
def get_connection(self):
connection = sqlite3.connect('inventory')
return connection
| 2.203125 | 2 |
python/tests/test_graph_business_trip.py | daveeS987/data-structures-and-algorithms | 0 | 12771896 | import pytest
from graphs.graph import Graph, Vertex, Edge
from graph_business_trip.graph_business_trip import business_trip
def test_can_instantiate_Graph():
graph = Graph()
assert graph
assert graph._adjacency_list == {}
def test_can_instantiate_Vertex():
vertex = Vertex()
assert vertex
assert vertex.value == None
def test_can_instantiate_Edge():
vertex1 = Vertex("hello")
edge = Edge(vertex1, weight=50)
assert edge.vertex == vertex1
assert edge.weight == 50
def test_input_metroville_pandora_returns_true(example):
graph = example[0]
metroville = example[3]
pandora = example[1]
assert metroville.value == "metroville"
assert pandora.value == "pandora"
actual = business_trip(graph, [metroville, pandora])
expected = "True, $82"
assert actual == expected
def test_happy_path_multiple_cities_returns_true(example):
graph = example[0]
arendelle = example[2]
monstropolis = example[4]
naboo = example[6]
assert arendelle.value == "arendelle"
assert monstropolis.value == "monstropolis"
assert naboo.value == "naboo"
actual = business_trip(graph, [arendelle, monstropolis, naboo])
expected = "True, $115"
assert actual == expected
def test_unhappy_path_returns_fales(example):
graph = example[0]
naboo = example[6]
pandora = example[1]
assert naboo.value == "naboo"
assert pandora.value == "pandora"
actual = business_trip(graph, [naboo, pandora])
expected = "False, $0"
assert actual == expected
def test_unhappy_path_with_multiple_cities_return_false(example):
graph = example[0]
narnia = example[5]
arendelle = example[2]
naboo = example[6]
assert narnia.value == "narnia"
assert arendelle.value == "arendelle"
assert naboo.value == "naboo"
actual = business_trip(graph, [narnia, arendelle, naboo])
expected = "False, $0"
assert actual == expected
@pytest.fixture
def example():
graph1 = Graph()
pandora = Vertex("pandora")
arendelle = Vertex("arendelle")
metroville = Vertex("metroville")
monstropolis = Vertex("monstropolis")
narnia = Vertex("narnia")
naboo = Vertex("naboo")
graph1.add_node(pandora)
graph1.add_node(arendelle)
graph1.add_node(metroville)
graph1.add_node(monstropolis)
graph1.add_node(narnia)
graph1.add_node(naboo)
graph1.add_edge(pandora, arendelle, 150)
graph1.add_edge(pandora, metroville, 82)
graph1.add_edge(arendelle, pandora, 150)
graph1.add_edge(arendelle, metroville, 99)
graph1.add_edge(arendelle, monstropolis, 42)
graph1.add_edge(metroville, pandora, 82)
graph1.add_edge(metroville, narnia, 37)
graph1.add_edge(metroville, naboo, 26)
graph1.add_edge(metroville, monstropolis, 105)
graph1.add_edge(metroville, arendelle, 99)
graph1.add_edge(monstropolis, arendelle, 42)
graph1.add_edge(monstropolis, metroville, 105)
graph1.add_edge(monstropolis, naboo, 73)
graph1.add_edge(narnia, metroville, 37)
graph1.add_edge(narnia, naboo, 250)
graph1.add_edge(naboo, narnia, 250)
graph1.add_edge(naboo, metroville, 26)
graph1.add_edge(naboo, monstropolis, 73)
return graph1, pandora, arendelle, metroville, monstropolis, narnia, naboo
| 2.828125 | 3 |
elif_bayindir/phase_1/python_basic_1/day_8/q3.py | CodedLadiesInnovateTech/-python-challenge-solutions | 6 | 12771897 | <reponame>CodedLadiesInnovateTech/-python-challenge-solutions
# Question 3
# Absolute file path.
import os
os.chdir("/home/elif/Desktop")
print(os.path.abspath("elma.py"))
| 2.78125 | 3 |
env/sample_server.py | Normandez/ddos_watchdog | 1 | 12771898 | import http.server
import socketserver
import threading
ADDR = "192.168.2.1"
PORT = 80
class UDPHandler(socketserver.DatagramRequestHandler):
def handle(self):
data = self.request[0]
socket = self.request[1]
print ('client send: ', data)
socket.sendto(b'Hello from server!', self.client_address)
with socketserver.TCPServer((ADDR, PORT), http.server.SimpleHTTPRequestHandler) as http:
print("serving HTTP on", ADDR, "at port", PORT)
server_thread = threading.Thread(target=http.serve_forever)
server_thread.daemon = True
server_thread.start()
with socketserver.UDPServer((ADDR, PORT), UDPHandler) as udp:
print("serving UDP on", ADDR, "at port", PORT)
udp.serve_forever()
| 3.265625 | 3 |
salinization/prod/salinization/config.py | elbertsoftware/SpringboardAIC | 3 | 12771899 | <gh_stars>1-10
import confuse
config = None
def get_config():
global config
if config == None:
config = confuse.Configuration('salinization', __name__)
return config | 1.890625 | 2 |
array_problems/Tests.py | daniel-zeiler/potential-happiness | 0 | 12771900 | import unittest
import array_problems.Solutions as array_problems
import array_problems.Solutions_Two as array_problems_two
import array_problems.Solutions_Three as array_problems_three
import array_problems.Solutions_Four as array_problems_four
import array_problems.Solutions_Five as array_problems_five
class SolutionsTest(unittest.TestCase):
def test_merge(self):
intervals = [[1, 3], [2, 6], [8, 10], [15, 18]]
output = [[1, 6], [8, 10], [15, 18]]
self.assertListEqual(output, array_problems_five.merge(intervals))
intervals = [[1, 4], [4, 5]]
output = [[1, 5]]
self.assertListEqual(output, array_problems_five.merge(intervals))
def test_largest_parameter(self):
input = [
[1, 0, 1, 1, 1],
[1, 0, 1, 1, 1],
[0, 1, 0, 1, 1]]
output = 7
self.assertEqual(output, array_problems_four.largest_parameter(input))
input = [[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]
output = 9
self.assertEqual(output, array_problems_four.largest_parameter(input))
def test_max_increase_keep_city_skyline(self):
grid = [
[3, 0, 8, 4],
[2, 4, 5, 7],
[9, 2, 6, 3],
[0, 3, 1, 0]
]
output = 35
self.assertEqual(output, array_problems_four.maxIncreaseKeepingSkyline(grid))
grid = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
output = 0
self.assertEqual(output, array_problems_four.maxIncreaseKeepingSkyline(grid))
def test_array_pair_sum(self):
nums = [6, 2, 6, 5, 1, 2]
output = 9
self.assertEqual(output, array_problems_five.arrayPairSum(nums))
nums = [1, 4, 3, 2]
output = 4
self.assertEqual(output, array_problems_five.arrayPairSum(nums))
def test_sort_array_by_parity(self):
input = [3, 1, 2, 4]
output = [2, 4, 3, 1]
self.assertEqual(output, array_problems_five.sortArrayByParity(input))
def test_replace_elements(self):
input = [17, 18, 5, 4, 6, 1]
output = [18, 6, 6, 6, 1, -1]
self.assertEqual(output, array_problems_five.replaceElements(input))
def test_count_squares(self):
input = [
[0, 1, 1, 1],
[1, 1, 1, 1],
[0, 1, 1, 1]
]
output = 15
self.assertEqual(output, array_problems_four.countSquares(input))
input = [
[1, 0, 1],
[1, 1, 0],
[1, 1, 0]
]
output = 7
self.assertEqual(output, array_problems_four.countSquares(input))
def test_count_battleships(self):
board = [["X", ".", ".", "X"], [".", ".", ".", "X"], [".", ".", ".", "X"]]
output = 2
self.assertEqual(output, array_problems_four.countBattleships(board))
def test_interval_intersection(self):
firstList = [[0, 2], [5, 10], [13, 23], [24, 25]]
secondList = [[1, 5], [8, 12], [15, 24], [25, 26]]
output = [[1, 2], [5, 5], [8, 10], [15, 23], [24, 24], [25, 25]]
self.assertListEqual(output, array_problems_five.intervalIntersection(firstList, secondList))
def test_permute(self):
nums = [1, 2, 3]
output = [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
self.assertListEqual(output, array_problems_five.permute(nums))
def test_subsets(self):
nums = [1, 2, 3]
output = [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]
self.assertCountEqual(output, array_problems_five.subsets(nums))
nums = [0]
output = [[], [0]]
self.assertCountEqual(output, array_problems_five.subsets(nums))
def test_cal_points(self):
ops = ["5", "2", "C", "D", "+"]
output = 30
self.assertEqual(output, array_problems_five.calPoints(ops))
ops = ["5", "-2", "4", "C", "D", "9", "+", "+"]
output = 27
self.assertEqual(output, array_problems_five.calPoints(ops))
def test_island_perimeter(self):
grid = [
[0, 1, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 0],
[1, 1, 0, 0]
]
output = 16
self.assertEqual(output, array_problems_two.islandPerimeter(grid))
grid = [[1]]
output = 4
self.assertEqual(output, array_problems_two.islandPerimeter(grid))
grid = [[1, 0]]
output = 4
self.assertEqual(output, array_problems_two.islandPerimeter(grid))
def test_max_area_of_island(self):
grid = [
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]
]
output = 6
self.assertEqual(output, array_problems_two.maxAreaOfIsland(grid))
grid = [[0, 0, 0, 0, 0, 0, 0, 0]]
output = 0
self.assertEqual(output, array_problems_two.maxAreaOfIsland(grid))
def test_relative_sort(self):
arr1 = [2, 3, 1, 3, 2, 4, 6, 7, 9, 2, 19]
arr2 = [2, 1, 4, 3, 9, 6]
output = [2, 2, 2, 1, 4, 3, 3, 9, 6, 7, 19]
self.assertListEqual(output, array_problems_five.relativeSortArray(arr1, arr2))
def test_intersection(self):
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
output = [2]
self.assertCountEqual(output, array_problems_five.intersection(nums1, nums2))
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
output = [9, 4]
self.assertCountEqual(output, array_problems_five.intersection(nums1, nums2))
def test_minimal_fall_path(self):
matrix = [
[2, 1, 3],
[6, 5, 4],
[7, 8, 9]
]
output = 13
self.assertEqual(output, array_problems_five.minFallingPathSum(matrix))
matrix = [
[-19, 57],
[-40, -5]
]
output = -59
self.assertEqual(output, array_problems_five.minFallingPathSum(matrix))
def test_get_max_gold(self):
input = [
[1, 0, 7],
[2, 0, 6],
[3, 4, 5],
[0, 3, 0],
[9, 0, 20]
]
output = 28
self.assertEqual(output, array_problems_four.getMaximumGold(input))
input = [
[0, 6, 0],
[5, 8, 7],
[0, 9, 0]
]
output = 24
self.assertEqual(output, array_problems_four.getMaximumGold(input))
def test_validate_stack_sequence(self):
pushed = [1, 2, 3, 4, 5]
popped = [4, 5, 3, 2, 1]
output = True
self.assertEqual(output, array_problems_four.validateStackSequences(pushed, popped))
pushed = [1, 2, 3, 4, 5]
popped = [4, 3, 5, 1, 2]
output = False
self.assertEqual(output, array_problems_four.validateStackSequences(pushed, popped))
def test_update_board(self):
board = [
["E", "E", "E", "E", "E"],
["E", "E", "M", "E", "E"],
["E", "E", "E", "E", "E"],
["E", "E", "E", "E", "E"]
]
click = [3, 0]
output = [
["B", "1", "E", "1", "B"],
["B", "1", "M", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]
]
self.assertEqual(output, array_problems.minesweeper(board, click))
board = [
["B", "1", "E", "1", "B"],
["B", "1", "M", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]
]
click = [1, 2]
output = [
["B", "1", "E", "1", "B"],
["B", "1", "X", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]
]
self.assertEqual(output, array_problems.minesweeper(board, click))
def test_last_stones(self):
input = [2, 7, 4, 1, 8, 1]
output = 1
self.assertEqual(output, array_problems_five.lastStoneWeight(input))
def test_can_reach(self):
arr = [4, 2, 3, 0, 3, 1, 2]
start = 0
output = True
self.assertEqual(output, array_problems_five.canReach(arr, start))
arr = [3, 0, 2, 1, 2]
start = 2
output = False
self.assertEqual(output, array_problems_five.canReach(arr, start))
def test_longest_ones(self):
nums = [0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1]
k = 3
output = 10
self.assertEqual(output, array_problems_five.longestOnes(nums, k))
nums = [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0]
k = 2
output = 6
self.assertEqual(output, array_problems_five.longestOnes(nums, k))
def test_num_enclaves(self):
grid = [
[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]
]
output = 3
self.assertEqual(output, array_problems.numEnclaves(grid))
def test_min_path_sum(self):
grid = [[1, 3, 1], [1, 5, 1], [4, 2, 1]]
output = 7
self.assertEqual(output, array_problems_four.minPathSum(grid))
grid = [[1, 2, 3], [4, 5, 6]]
output = 12
self.assertEqual(output, array_problems_four.minPathSum(grid))
def test_is_monitonic(self):
nums = [1, 2, 2, 3]
output = True
self.assertEqual(output, array_problems.isMonotonic(nums))
nums = [6, 5, 4, 4]
output = True
self.assertEqual(output, array_problems.isMonotonic(nums))
nums = [1, 3, 2]
output = False
self.assertEqual(output, array_problems.isMonotonic(nums))
def test_flood_fill(self):
image = [
[1, 1, 1],
[1, 1, 0],
[1, 0, 1]
]
sr = 1
sc = 1
newColor = 2
output = [
[2, 2, 2],
[2, 2, 0],
[2, 0, 1]
]
self.assertListEqual(output, array_problems_four.floodFill(image, sr, sc, newColor))
image = [
[0, 0, 0],
[0, 0, 0]
]
sr = 0
sc = 0
newColor = 2
output = [
[2, 2, 2],
[2, 2, 2]
]
self.assertListEqual(output, array_problems_four.floodFill(image, sr, sc, newColor))
def test_circular_queue(self):
my_circular_queue = array_problems.MyCircularDeque(3)
self.assertEqual(True, my_circular_queue.insertLast(1))
self.assertEqual(True, my_circular_queue.insertLast(2))
self.assertEqual(True, my_circular_queue.insertFront(3))
self.assertEqual(False, my_circular_queue.insertFront(4))
self.assertEqual(2, my_circular_queue.getRear())
self.assertEqual(True, my_circular_queue.isFull())
self.assertEqual(True, my_circular_queue.deleteLast())
self.assertEqual(True, my_circular_queue.insertFront(4))
self.assertEqual(4, my_circular_queue.getFront())
def test_min_cost_climbing_stairs(self):
cost = [10, 15, 20]
output = 15
self.assertEqual(output, array_problems_five.minCostClimbingStairs(cost))
cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
output = 6
self.assertEqual(output, array_problems_five.minCostClimbingStairs(cost))
def test_max_satisfied(self):
customers = [1, 0, 1, 2, 1, 1, 7, 5]
grumpy = [0, 1, 0, 1, 0, 1, 0, 1]
minutes = 3
output = 16
self.assertEqual(output, array_problems_five.maxSatisfied(customers, grumpy, minutes))
def test_max_consecutive(self):
nums = [1, 1, 0, 1, 1, 1]
output = 3
self.assertEqual(output, array_problems_two.findMaxConsecutiveOnes(nums))
nums = [1, 0, 1, 1, 0, 1]
output = 2
self.assertEqual(output, array_problems_two.findMaxConsecutiveOnes(nums))
def test_intersects(self):
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
output = [2, 2]
self.assertEqual(output, array_problems_four.intersect(nums1, nums2))
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
output = [4, 9]
self.assertEqual(output, array_problems_four.intersect(nums1, nums2))
def test_max_area(self):
height = [1, 8, 6, 2, 5, 4, 8, 3, 7]
output = 49
self.assertEqual(output, array_problems_five.maxArea(height))
height = [1, 1]
output = 1
self.assertEqual(output, array_problems_five.maxArea(height))
def test_sort_array(self):
nums = [5, 2, 3, 1]
output = [1, 2, 3, 5]
self.assertListEqual(output, array_problems_two.sortArray(nums))
nums = [5, 1, 1, 2, 0, 0]
output = [0, 0, 1, 1, 2, 5]
self.assertListEqual(output, array_problems_two.sortArray(nums))
def test_majority_elements(self):
nums = [2, 2, 1, 1, 1, 2, 2]
output = 2
self.assertEqual(output, array_problems.majorityElement(nums))
nums = [3, 2, 3]
output = 3
self.assertEqual(output, array_problems.majorityElement(nums))
def test_game_of_life(self):
board = [
[0, 1, 0],
[0, 0, 1],
[1, 1, 1],
[0, 0, 0]
]
output = [[0, 0, 0], [1, 0, 1], [0, 1, 1], [0, 1, 0]]
array_problems.gameOfLife(board)
self.assertListEqual(output, board)
board = [
[1, 1],
[1, 0]
]
output = [[1, 1], [1, 1]]
array_problems.gameOfLife(board)
self.assertListEqual(output, board)
def test_move_zeroes(self):
nums = [0, 1, 0, 3, 12]
array_problems.moveZeroes(nums)
output = [1, 3, 12, 0, 0]
self.assertListEqual(output, nums)
nums = [1, 3, 5, 0, 2, 0]
array_problems.moveZeroes(nums)
output = [1, 3, 5, 2, 0, 0]
self.assertListEqual(output, nums)
def test_reorder_log_files(self):
logs = ["dig1 8 1 5 1", "let1 art can", "dig2 3 6", "let2 own kit dig", "let3 art zero"]
output = ["let1 art can", "let3 art zero", "let2 own kit dig", "dig1 8 1 5 1", "dig2 3 6"]
self.assertListEqual(output, array_problems_five.reorderLogFiles(logs))
logs = ["a1 9 2 3 1", "g1 act car", "zo4 4 7", "ab1 off key dog", "a8 act zoo"]
output = ["g1 act car", "a8 act zoo", "ab1 off key dog", "a1 9 2 3 1", "zo4 4 7"]
self.assertListEqual(output, array_problems_five.reorderLogFiles(logs))
def test_sort_colors(self):
nums = [2, 0, 2, 1, 1, 0]
output = [0, 0, 1, 1, 2, 2]
array_problems_two.sortColors(nums)
self.assertListEqual(output, nums)
def test_max_profits(self):
prices = [7, 1, 5, 3, 6, 4]
output = 5
self.assertEqual(output, array_problems_five.maxProfit(prices))
prices = [7, 6, 4, 3, 1]
output = 0
self.assertEqual(output, array_problems_five.maxProfit(prices))
def test_number_of_islands(self):
input = [
["1", "1", "1", "1", "0"],
["1", "1", "0", "1", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "0", "0", "0"]
]
output = 1
self.assertEqual(output, array_problems_two.numIslands(input))
input = [
["1", "1", "0", "0", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "1", "0", "0"],
["0", "0", "0", "1", "1"]
]
output = 3
self.assertEqual(output, array_problems_two.numIslands(input))
def test_rotting_oranges(self):
grid = [
[2, 1, 1],
[1, 1, 0],
[0, 1, 1]
]
output = 4
self.assertEqual(output, array_problems_four.orangesRotting(grid))
grid = [[2, 1, 1], [0, 1, 1], [1, 0, 1]]
output = -1
self.assertEqual(output, array_problems_four.orangesRotting(grid))
grid = [[0, 2]]
output = 0
self.assertEqual(output, array_problems_four.orangesRotting(grid))
def test_remove_elements(self):
nums = [0, 1, 2, 2, 3, 0, 4, 2]
val = 2
output = 5
self.assertEqual(output, array_problems_five.removeElement(nums, val))
nums = [3, 2, 2, 3]
val = 3
output = 2
self.assertEqual(output, array_problems_five.removeElement(nums, val))
def test_find_judge(self):
n = 2
trust = [[1, 2]]
output = 2
self.assertEqual(output, array_problems_five.findJudge(n, trust))
n = 3
trust = [[1, 3], [2, 3]]
output = 3
self.assertEqual(output, array_problems_five.findJudge(n, trust))
n = 3
trust = [[1, 3], [2, 3], [3, 1]]
output = -1
self.assertEqual(output, array_problems_five.findJudge(n, trust))
def test_max_sub_array(self):
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
output = 6
self.assertEqual(output, array_problems_five.maxSubArray(nums))
nums = [1]
output = 1
self.assertEqual(output, array_problems_five.maxSubArray(nums))
nums = [5, 4, -1, 7, 8]
output = 23
self.assertEqual(output, array_problems_five.maxSubArray(nums))
def test_length_of_list(self):
nums = [10, 9, 2, 5, 3, 7, 101, 18]
output = 4
self.assertEqual(output, array_problems_five.lengthOfLIS(nums))
nums = [0, 1, 0, 3, 2, 3]
output = 4
self.assertEqual(output, array_problems_five.lengthOfLIS(nums))
nums = [7, 7, 7, 7, 7, 7, 7]
output = 1
self.assertEqual(output, array_problems_five.lengthOfLIS(nums))
def test_expressive_words(self):
# s = "heeellooo"
# words = ["hello", "hi", "helo"]
# output = 1
# self.assertEqual(output, array_problems.expressiveWords(s, words))
# s = "zzzzzyyyyy"
# words = ["zzyy", "zy", "zyy"]
# output = 3
# self.assertEqual(output, array_problems.expressiveWords(s, words))
pass
def test_rob(self):
nums = [1, 2, 3, 1]
output = 4
self.assertEqual(output, array_problems_five.rob(nums))
nums = [2, 7, 9, 3, 1]
output = 12
self.assertEqual(output, array_problems_five.rob(nums))
def test_merge2(self):
intervals = [[1, 3], [2, 6], [8, 10], [15, 18]]
output = [[1, 6], [8, 10], [15, 18]]
self.assertListEqual(output, array_problems_five.merge_2(intervals))
intervals = [[1, 4], [4, 5]]
output = [[1, 5]]
self.assertListEqual(output, array_problems_five.merge_2(intervals))
def test_maximal_square(self):
matrix = [
["1", "0", "1", "0", "0"],
["1", "0", "1", "1", "1"],
["1", "1", "1", "1", "1"],
["1", "0", "0", "1", "0"]
]
output = 4
self.assertEqual(output, array_problems_four.maximalSquare(matrix))
matrix = [
["0", "1"],
["1", "0"]
]
output = 1
self.assertEqual(output, array_problems_four.maximalSquare(matrix))
matrix = [
["0"]
]
output = 0
self.assertEqual(output, array_problems_four.maximalSquare(matrix))
def test_plus_one(self):
digits = [4, 3, 2, 1]
output = [4, 3, 2, 2]
self.assertListEqual(output, array_problems_five.plusOne(digits))
digits = [0]
output = [1]
self.assertListEqual(output, array_problems_five.plusOne(digits))
digits = [9]
output = [1, 0]
self.assertListEqual(output, array_problems_five.plusOne(digits))
def test_shortest_path_binary_matrix(self):
grid = [
[0, 1],
[1, 0]
]
output = 2
self.assertEqual(output, array_problems_four.shortestPathBinaryMatrix(grid))
grid = [
[0, 0, 0],
[1, 1, 0],
[1, 1, 0]
]
output = 4
self.assertEqual(output, array_problems_four.shortestPathBinaryMatrix(grid))
grid = [[1, 0, 0], [1, 1, 0], [1, 1, 0]]
output = -1
self.assertEqual(output, array_problems_four.shortestPathBinaryMatrix(grid))
def test_exists(self):
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
word = "ABCCED"
output = True
self.assertEqual(output, array_problems.exist(board, word))
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
word = "SEE"
output = True
self.assertEqual(output, array_problems.exist(board, word))
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
word = "ABCB"
output = False
self.assertEqual(output, array_problems.exist(board, word))
def test_can_jump(self):
nums = [2, 3, 1, 1, 4]
output = True
self.assertEqual(output, array_problems_five.canJump(nums))
nums = [3, 2, 1, 0, 4]
output = False
self.assertEqual(output, array_problems_five.canJump(nums))
def test_first_and_last_k(self):
nums = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 5, 5, 5, 6]
k = 1
self.assertListEqual([0, 4], array_problems_four.first_and_last_of_k(nums, k))
nums = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 5, 5, 5, 6, 11]
k = 10
array_problems.first_and_last_of_k(nums, k)
self.assertListEqual([-1, -1], array_problems_four.first_and_last_of_k(nums, k))
nums = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 5, 5, 5, 6]
k = 3
self.assertListEqual([7, 7], array_problems_four.first_and_last_of_k(nums, k))
def test_toeplitze_matrix(self):
matrix = [[1, 2, 3, 4], [5, 1, 2, 3], [9, 5, 1, 2]]
output = True
self.assertEqual(output, array_problems_four.isToeplitzMatrix(matrix))
matrix = [[1, 2], [2, 2]]
output = False
self.assertEqual(output, array_problems_four.isToeplitzMatrix(matrix))
def test_is_alien_sorted(self):
words = ["hello", "leetcode"]
order = "hlabcdefgijkmnopqrstuvwxyz"
output = True
self.assertEqual(output, array_problems_five.isAlienSorted(words, order))
words = ["word", "world", "row"]
order = "worldabcefghijkmnpqstuvxyz"
output = False
self.assertEqual(output, array_problems_five.isAlienSorted(words, order))
words = ["apple", "app"]
order = "abcdefghijklmnopqrstuvwxyz"
output = False
self.assertEqual(output, array_problems_five.isAlienSorted(words, order))
def test_plus_one_large_number(self):
input = [
[9, 9, 9, 9],
[1, 1],
[9, 4, 5, 6],
[9, 0, 0, 0],
[9, 9, 9, 9]
]
output = [3, 8, 4, 6, 5]
self.assertListEqual(output, array_problems_five.plus_one_large_number(input))
def test_can_attend_meetings(self):
input = [[0, 30], [5, 10], [15, 20]]
output = False
self.assertEqual(output, array_problems_five.canAttendMeetings(input))
input = [[7, 10], [2, 4]]
output = True
self.assertEqual(output, array_problems_five.canAttendMeetings(input))
def test_num_meeting_rooms(self):
input = [[0, 30], [5, 10], [15, 20]]
output = 2
self.assertEqual(output, array_problems_five.numberMeetingRooms(input))
def test_meeting_room_conflicts(self):
calendar = [[1, 3], [4, 6], [6, 8], [9, 11], [6, 9], [1, 3], [4, 10]]
rooms = 3
queries = [[1, 9], [2, 6], [7, 9], [3, 5], [3, 9], [2, 4], [7, 10], [5, 9], [3, 10], [9, 10]]
output = [False, True, False, True, False, True, False, False, False, True]
self.assertListEqual(output, array_problems_three.meeting_room_conflicts(calendar, rooms, queries))
def test_pacific_atlantic(self):
heights = [[1, 2, 2, 3, 5], [3, 2, 3, 4, 4], [2, 4, 5, 3, 1], [6, 7, 1, 4, 5], [5, 1, 1, 2, 4]]
output = [[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]]
self.assertCountEqual(output, array_problems.pacificAtlantic(heights))
heights = [[2, 1], [1, 2]]
output = [[0, 0], [0, 1], [1, 0], [1, 1]]
self.assertCountEqual(output, array_problems.pacificAtlantic(heights))
def test_pick_random_weight(self):
pick_weight_random = array_problems_three.PickWeightedRandom([1, 3, 7, 1])
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
print(pick_weight_random.pickIndex())
def test_full_justify(self):
words = ["This", "is", "an", "example", "of", "text", "justification."]
maxWidth = 16
output = [
"This is an",
"example of text",
"justification. "
]
self.assertListEqual(output, array_problems_three.fullJustify(words, maxWidth))
words = ["What", "must", "be", "acknowledgment", "shall", "be"]
maxWidth = 16
output = [
"What must be",
"acknowledgment ",
"shall be "
]
self.assertListEqual(output, array_problems_three.fullJustify(words, maxWidth))
def test_exclusive_time(self):
n = 2
logs = ["0:start:0", "1:start:2", "1:end:5", "0:end:6"]
output = [3, 4]
self.assertListEqual(output, array_problems_three.exclusiveTime(n, logs))
def test_rotate_matrix(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
output = [
[7, 4, 1],
[8, 5, 2],
[9, 6, 3]
]
array_problems_five.rotate(matrix)
self.assertListEqual(output, matrix)
def test_fall_and_crush(self):
input = [['#', '.', '#', '#', '*'],
['#', '.', '.', '#', '#'],
['.', '#', '.', '#', '.'],
['.', '.', '#', '.', '#'],
['#', '*', '.', '.', '.'],
['.', '.', '*', '#', '.']]
output = [['.', '.', '.', '.', '*'],
['.', '.', '.', '.', '.'],
['.', '.', '.', '.', '.'],
['.', '.', '.', '.', '.'],
['.', '.', '.', '#', '#'],
['#', '.', '#', '#', '#']]
self.assertListEqual(output, array_problems_three.fallAndCrush2(input))
def test_number_of_markers_on_road(self):
coordinates = [[4, 7], [-1, 5], [3, 6]]
output = 9
self.assertEqual(output, array_problems_five.number_of_markers_on_road(coordinates))
def test_shortest_bridge(self):
grid = [[0, 1, 0], [0, 0, 0], [0, 0, 1]]
output = 2
self.assertEqual(output, array_problems_three.shortestBridge(grid))
grid = [[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 1, 0, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]]
output = 1
self.assertEqual(output, array_problems_three.shortestBridge(grid))
grid = [
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1]
]
output = 3
self.assertEqual(output, array_problems_three.shortestBridge(grid))
grid = [
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]
output = 1
self.assertEqual(output, array_problems_three.shortestBridge(grid))
if __name__ == '__main__':
unittest.main()
| 2.953125 | 3 |
gru_multi_cnn.py | dtchuink/RCovnvNet | 0 | 12771901 | <filename>gru_multi_cnn.py
'''
Created on Oct 2, 2018
@author: danielle
'''
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from datasets import PartDataset
from lr_scheduler import ReduceLROnPlateau
from tensorflow.contrib.batching.ops.gen_batch_ops import batch
# Hyper Parameters
input_size = 2500
hidden_size = 1800
num_layers = 1
#num_classes = 10
batch_size = 32
test_batch_size=10
num_epochs = 5
learning_rate = 0.0001
hidden_size2 = 1048
blue = lambda x:'\033[94m' + x + '\033[0m'
train_dataset = PartDataset(root = '/home/danielle/pyDevelopment/lstm-rnn/src/ln_lstm/shapenetcore_partanno_segmentation_benchmark_v0', classification = False, class_choice = ['Chair'])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
test_dataset = PartDataset(root = '/home/danielle/pyDevelopment/lstm-rnn/src/ln_lstm/shapenetcore_partanno_segmentation_benchmark_v0', classification = False, train = False, class_choice = ['Chair'])
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
print(len(train_dataset), len(test_dataset))
num_classes = train_dataset.num_seg_classes
print('classes', num_classes)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, hidden_size2, output_size, n_layers=1):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.hidden_size2=hidden_size2
self.gru = nn.GRU(input_size, hidden_size, n_layers, dropout=0.05, batch_first=True)
self.c1 = nn.Conv1d(3, 64, 1)
self.c2 = nn.Conv1d(64, 128, 1)
self.c3 = nn.Conv1d(128, 256, 1)
self.c4 = nn.Conv1d(256, hidden_size2, 1)
self.mp1 = nn.AdaptiveMaxPool1d(1)
self.l1 = nn.Linear(hidden_size2, num_classes)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(256)
self.bn4 = nn.BatchNorm1d(hidden_size2)
def forward(self, x, hidden):
output, hidden = self.gru(x, hidden)
x, hidden = self.gru2(output, hidden)
x = F.relu(self.bn1(self.c1(x)))
x = F.relu(self.bn2(self.c2(x)))
x = F.relu(self.bn3(self.c3(x)))
x = F.relu(self.bn4(self.c4(x)))
x = self.mp1(x)
x = x.view(-1, self.hidden_size2)
x = self.l1(x)
# iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batch_size,1)
# if x.is_cuda:
# iden = iden.cuda()
# x = x + iden
# x = x.view(-1, 3, 3)
return F.log_softmax(x), hidden
class sRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1, k=2):
super(sRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.hidden_size2=hidden_size2
self.k = k
self.gru = nn.GRU(input_size, hidden_size, num_layers=n_layers, dropout=0.05, batch_first=True)
self.c1 = nn.Conv1d(3, 64, 1)
self.c2 = nn.Conv1d(64, 128, 1)
self.c3 = nn.Conv1d(128, 256, 1)
self.c4 = nn.Conv1d(256, hidden_size, 1)
self.mp1 = nn.AdaptiveMaxPool1d(1)
self.l1 = nn.Linear(hidden_size, num_classes)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(256)
self.bn4 = nn.BatchNorm1d(hidden_size)
def forward(self, x, hidden):
output, hidden = self.gru(x, hidden)
x, hidden = self.gru2(output, hidden)
x = F.relu(self.bn1(self.c1(x)))
x = F.relu(self.bn2(self.c2(x)))
x = F.relu(self.bn3(self.c3(x)))
x = F.relu(self.bn4(self.c4(x)))
x = x.transpose(2,1).contiguous()
x = F.log_softmax(x.view(-1, num_classes))
x = x.view(batch_size, self.num_points, num_classes)
return x, hidden
# iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batch_size,1)
# if x.is_cuda:
# iden = iden.cuda()
# x = x + iden
# x = x.view(-1, 3, 3)
# return F.log_softmax(x), hidden
rnn = sRNN(input_size, hidden_size, output_size = num_classes, n_layers=num_layers)
criterion = nn.CrossEntropyLoss()
# optimizer = torch.optim.Adam(rnn.parameters(), lr=0.01)
optimizer = torch.optim.SGD(rnn.parameters(), lr=0.0001, momentum=0.9)
scheduler = ReduceLROnPlateau(optimizer, 'max') # set up scheduler
accuracy = 0
iteration = 0
for epoch in range(num_epochs):
print("epoch=", epoch)
for i, points in enumerate(train_loader, 0):
data, target = points
data, target = Variable(data), Variable(target[:,0])
data = data.transpose(2,1)
optimizer.zero_grad()
pred, hidden = rnn(points, None)
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
#print(pred.size(), target.size())
loss = F.nll_loss(pred, target)
loss.backward()
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, batch_size, loss.data[0], correct/float(batch_size * 2500)))
# print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
# %(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
#
#Testing each epoch
if i % 10 == 0:
j, data = enumerate(test_loader, 0).__next__()
points, target = data
points, target = Variable(points), Variable(target)
points = points.transpose(2,1)
pred, _ = rnn(points)
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
loss = F.nll_loss(pred, target)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
accuracy = accuracy + correct/float(batch_size * 2500)
iteration = iteration +1
print('[%d: %d/%d] %s loss: %f accuracy: %f avg_accuracy' %(epoch, i, batch_size, blue('test'), loss.data[0], correct/float(batch_size * 2500), accuracy/iteration))
# Test the Model
correct = 0
total = 0
i =0
for point_set, cls in test_loader:
point_set = Variable(point_set)
point_set = point_set.transpose(2,1)
outputs, hidden = rnn(point_set, None)
_, predicted = torch.max(outputs.data, 1)
total += cls.size(0)
correct += (predicted == cls[:,0]).sum()
print("Test data %d correct %d, Total %d"% (i, correct, total))
print('Test Accuracy of the model on the test data: %d %%' % (100 * correct / total))
| 2.15625 | 2 |
tests/util/util.py | havardhuns/graphsense-REST | 0 | 12771902 | import yaml
def yamldump(data):
if isinstance(data, list):
data = [d.to_dict() for d in data]
else:
data = data.to_dict()
print(yaml.dump(data))
| 3.0625 | 3 |
seniorproject/recommendation/simpletocompound/simpletocompound.py | teammanicotti/writingstyle | 0 | 12771903 | <gh_stars>0
"""Encapsulates the logic of determining sentence type and similarity."""
import logging
from typing import List
from spacy.tokens import Span
from seniorproject.model.simpletocompoundrecommendation import \
SimpleToCompoundRecommendation
from seniorproject.recommendation.simpletocompound.sentencecombination\
.conjunctions import Conjunctions
from seniorproject.model.document import Document
from seniorproject.model.recommendationtype import RecommendationType
from seniorproject.model.sentence import Sentence
from seniorproject.recommendation.recommendationengine import \
RecommendationEngine
from seniorproject.recommendation.simpletocompound.model.sentencetype import \
SentenceType
from seniorproject.recommendation.simpletocompound.semanticsimilarity import \
similarityclassifier
from seniorproject.recommendation.simpletocompound.sentencecombination.combine \
import Combine
__author__ = '<NAME>, <NAME>'
SUBJ_TAGS = ['nsubj', 'nsubjpass', 'expl']
class SimpleToCompound(RecommendationEngine):
"""Determines sentence type and similarity"""
def __init__(
self,
spacy_instance,
tf_session,
tf_encodings,
tf_input_placeholder,
tf_sentence_piece_processor
):
super(SimpleToCompound, self).__init__()
self.nlp = spacy_instance
self.similarity_classifier = \
similarityclassifier.SimilarityClassifier(
tf_session,
tf_encodings,
tf_input_placeholder,
tf_sentence_piece_processor
)
self.logger = logging.getLogger(__name__)
def analyze(self, doc: Document, **kwargs) -> \
List[SimpleToCompoundRecommendation]:
"""Determines the sentence type and similarity of sentence pairs
Analyzes the provides text using spaCy. Then, the sentences are
lexically analyzed to determine their type. After this, the sentences
are paired and analyzed for their similarity and the results of the
paired analysis are returned.
:param doc: Document to be analyzed
:param threshold: float threshold of minimum similarity.
:return: list of recommendations created
"""
results = []
sentences = []
for paragraph in doc.paragraphs:
if len(list(paragraph.spacy_doc.sents)) < 2:
continue
for sent in paragraph.spacy_doc.sents:
sent_type = self.sentence_type(sent)
sentences.append(
Sentence(
sent.text,
sent.start_char,
sent.end_char,
doc.paragraphs.index(paragraph),
sent_type,
sent
)
)
sentence_pairs = list(zip(sentences, sentences[1:]))
for first, second in sentence_pairs:
try:
if first.sentence_type is SentenceType.SIMPLE and \
second.sentence_type is SentenceType.SIMPLE:
similarity_scores = self.similarity_classifier \
.determine_similarity(
[
first.span._.text_without_citations,
second.span._.text_without_citations
]
)
similarity_score = similarity_scores.min().item() # pylint: disable=no-member,line-too-long
if similarity_score >= kwargs.get('similarity_threshold'):
results.append(SimpleToCompoundRecommendation(
RecommendationType.SIMPLE_TO_COMPOUND,
f'{first.text} {second.text}',
first.start_position,
second.end_position,
first.paragraph_idx,
[],
f'{first.text} {second.text}',
[e.value for e in Conjunctions],
Combine.generate_combined(first.span, second.span),
similarity_score
))
except Exception as ex: # pylint: disable=broad-except
self.logger.error(ex)
return results
@staticmethod
def sentence_type(sentence: Span) -> SentenceType:
"""Determines the type of a sentence
Determines the type of a sentence based on its lexical components.
:param sentence: Span object containing a sentence
:return: SentenceType enum object representing the sentence type
"""
is_compound = False
is_complex = False
for token in sentence._.tokens_without_citations:
if token.dep_ in SUBJ_TAGS and token.head.pos_ == 'VERB' and \
token.head.dep_ != 'ROOT':
if token.head.dep_ == 'conj':
is_compound = True
else:
is_complex = True
if is_complex and is_compound:
return SentenceType.COMPLEX_COMPOUND
if is_complex:
return SentenceType.COMPLEX
if is_compound:
return SentenceType.COMPOUND
return SentenceType.SIMPLE
| 2.703125 | 3 |
petabvis/main.py | PEtab-dev/petab-interactive-viz | 6 | 12771904 | import argparse
import sys # We need sys so that we can pass argv to QApplication
import os
import warnings
import pandas as pd
import petab.C as ptc
import pyqtgraph as pg
from PySide6 import QtWidgets, QtCore, QtGui
from PySide6.QtWidgets import (
QVBoxLayout, QComboBox, QWidget, QLabel, QTreeView
)
from petab import core
import petab
from petab.visualize.helper_functions import check_ex_exp_columns
from . import (utils, vis_spec_plot, window_functionality)
from .bar_plot import BarPlot
from .options_window import (OptionMenu, CorrelationOptionMenu,
OverviewPlotWindow)
class MainWindow(QtWidgets.QMainWindow):
"""
The main window
Attributes:
exp_data: PEtab measurement table
visualization_df: PEtab visualization table
yaml_dict: Dictionary of the files in the yaml file
condition_df: PEtab condition table
observable_df: PEtab observable table
plot1_widget: pg.GraphicsLayoutWidget containing the main plot
plot2_widget: pg.GraphicsLayoutWidget containing the correlation plot
warn_msg: QLabel displaying current warning messages
popup_tables: List of Popup TableWidget displaying the clicked table
tree_view: QTreeView of the yaml file
visu_spec_plots: A list of VisuSpecPlots
cbox: A dropdown menu for the plots
current_list_index: List index of the currently displayed plot
wid: QSplitter between main plot and correlation plot
"""
def __init__(self, yaml_filename: str = None,
simulation_file: pd.DataFrame = None, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# set the background color to white
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pg.setConfigOption("antialias", True)
self.resize(1000, 600)
self.setWindowTitle("petabvis")
self.visualization_df = None
self.simulation_df = None
self.condition_df = None
self.observable_df = None
self.exp_data = None
self.yaml_filename = yaml_filename
self.yaml_dict = None
self.color_map = utils.generate_color_map("viridis")
self.vis_spec_plots = []
self.wid = QtWidgets.QSplitter()
self.plot1_widget = pg.GraphicsLayoutWidget(show=True)
self.plot2_widget = pg.GraphicsLayoutWidget(show=False)
self.overview_plot_window = None
self.wid.addWidget(self.plot1_widget)
# plot2_widget will be added to the QSplitter when
# a simulation file is opened
self.cbox = QComboBox() # dropdown menu to select plots
self.cbox.currentIndexChanged.connect(lambda x: self.index_changed(x))
self.warn_msg = QLabel("")
self.warnings = []
self.warning_counter = {}
# The new window that pops up to display a table
self.popup_tables = []
self.options_window = OptionMenu(window=self,
vis_spec_plots=self.vis_spec_plots)
self.correlation_options_window = \
CorrelationOptionMenu(vis_spec_plots=self.vis_spec_plots)
self.correlation_option_button = None
self.overview_plot_button = None
self.tree_view = QTreeView(self)
self.tree_view.setHeaderHidden(True)
self.tree_root_node = None
self.simulation_tree_branch = None
self.wid.addWidget(self.tree_view)
self.current_list_index = 0
warnings.showwarning = self.redirect_warning
window_functionality.add_file_selector(self)
window_functionality.add_option_menu(self)
# the layout of the plot-list and message textbox
lower_layout = QVBoxLayout()
lower_layout.addWidget(self.cbox)
lower_layout.addWidget(self.warn_msg)
lower_widget = QWidget()
lower_widget.setLayout(lower_layout)
split_plots_and_warnings = QtWidgets.QSplitter()
split_plots_and_warnings.setOrientation(QtCore.Qt.Vertical)
split_plots_and_warnings.addWidget(self.wid)
split_plots_and_warnings.addWidget(lower_widget)
layout = QVBoxLayout()
layout.addWidget(split_plots_and_warnings)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
if self.yaml_filename:
self.read_data_from_yaml_file()
if simulation_file:
self.add_and_plot_simulation_file(simulation_file)
else:
self.add_plots()
def read_data_from_yaml_file(self):
self.yaml_dict = petab.load_yaml(self.yaml_filename)["problems"][0]
folder_path = os.path.dirname(self.yaml_filename) + "/"
if ptc.VISUALIZATION_FILES not in self.yaml_dict:
self.visualization_df = None
self.add_warning(
"The YAML file contains no "
"visualization file (default plotted)")
# table_tree_view sets the df attributes of the window
# equal to the first file of each branch
# (measurement, visualization, ...)
window_functionality.table_tree_view(self, folder_path)
def add_and_plot_simulation_file(self, filename):
"""
Add the simulation file and plot them.
Also, add the correlation plot to the window
and enable correlation plot and overview plot options.
Arguments:
filename: Path of the simulation file.
"""
sim_data = core.get_simulation_df(filename)
# check columns, and add non-mandatory default columns
sim_data, _, _ = check_ex_exp_columns(
sim_data, None, None, None, None, None,
self.condition_df, sim=True)
# delete the replicateId column if it gets added to the simulation
# table but is not in exp_data because it causes problems when
# splitting the replicates
if ptc.REPLICATE_ID not in self.exp_data.columns \
and ptc.REPLICATE_ID in sim_data.columns:
sim_data.drop(ptc.REPLICATE_ID, axis=1, inplace=True)
if len(self.yaml_dict[ptc.MEASUREMENT_FILES]) > 1:
self.add_warning(
"Not Implemented Error: Loading a simulation file with "
"multiple measurement files is currently not supported.")
else:
self.simulation_df = sim_data
self.add_plots()
# insert correlation plot at position 1
self.wid.insertWidget(1, self.plot2_widget)
filename = os.path.basename(filename)
window_functionality.add_simulation_df_to_tree_view(self, filename)
# add correlation options and overview plot to option menu
self.correlation_option_button.setVisible(True)
self.overview_plot_button.setVisible(True)
self.add_overview_plot_window()
def add_plots(self):
"""
Adds the current visuSpecPlots to the main window,
removes the old ones and updates the
cbox (dropdown list)
Returns:
List of PlotItem
"""
self.clear_qsplitter()
self.vis_spec_plots.clear()
self.options_window.reset_states()
if self.visualization_df is not None:
# to keep the order of plots consistent
# with names from the plot selection
plot_ids = list(self.visualization_df[ptc.PLOT_ID].unique())
for plot_id in plot_ids:
self.create_and_add_vis_plot(plot_id)
else: # default plot when no visu_df is provided
self.create_and_add_vis_plot()
plots = [vis_spec_plot.get_plot() for vis_spec_plot in
self.vis_spec_plots]
# update the cbox
self.cbox.clear()
# calling this method sets the index of the cbox to 0
# and thus displays the first plot
utils.add_plotnames_to_cbox(self.exp_data, self.visualization_df,
self.cbox)
return plots
def index_changed(self, i: int):
"""
Changes the displayed plot to the one selected in the dropdown list
Arguments:
i: index of the selected plot
"""
if 0 <= i < len(
self.vis_spec_plots): # i is -1 when the cbox is cleared
self.clear_qsplitter()
self.plot1_widget.addItem(self.vis_spec_plots[i].get_plot())
self.plot2_widget.hide()
if self.simulation_df is not None:
self.plot2_widget.show()
self.plot2_widget.addItem(
self.vis_spec_plots[i].correlation_plot)
self.current_list_index = i
def keyPressEvent(self, ev):
"""
Changes the displayed plot by pressing arrow keys
Arguments:
ev: key event
"""
# Exit when pressing ctrl + Q
ctrl = False
if ev.modifiers() & QtCore.Qt.ControlModifier:
ctrl = True
if ctrl and ev.key() == QtCore.Qt.Key_Q:
sys.exit()
if ev.key() == QtCore.Qt.Key_Up:
self.index_changed(self.current_list_index - 1)
if ev.key() == QtCore.Qt.Key_Down:
self.index_changed(self.current_list_index + 1)
if ev.key() == QtCore.Qt.Key_Left:
self.index_changed(self.current_list_index - 1)
if ev.key() == QtCore.Qt.Key_Right:
self.index_changed(self.current_list_index + 1)
def closeEvent(self, event):
sys.exit()
def add_warning(self, message: str):
"""
Adds the message to the warnings box
Arguments:
message: The message to display
"""
if message not in self.warnings:
self.warnings.append(message)
self.warning_counter[message] = 1
else:
self.warning_counter[message] += 1
self.warn_msg.setText(self.warnings_to_string())
def warnings_to_string(self):
"""
Convert the list of warnings to a string and
indicate the number of occurences
Returns:
Self.warnings as a string
"""
return "\n".join([warning if self.warning_counter[warning] <= 1
else warning + " (occured {} times)".format(
str(self.warning_counter[warning]))
for warning in self.warnings])
def redirect_warning(self, message, category, filename=None, lineno=None,
file=None, line=None):
"""
Redirect all warning messages and display them in the window.
Arguments:
message: The message of the warning
"""
print("Warning redirected: " + str(message))
self.add_warning(str(message))
def create_and_add_vis_plot(self, plot_id=""):
"""
Create a vis_spec_plot object based on the given plot_id.
If no plot_it is provided the default will be plotted.
Add all the warnings of the vis_plot object to the warning text box.
The actual plotting happens in the index_changed method
Arguments:
plot_id: The plotId of the plot
"""
# split the measurement df by observable when using default plots
if self.visualization_df is None:
observable_ids = list(self.exp_data[ptc.OBSERVABLE_ID].unique())
for observable_id in observable_ids:
rows = self.exp_data[ptc.OBSERVABLE_ID] == observable_id
data = self.exp_data[rows]
simulation_df = self.simulation_df
if simulation_df is not None:
rows = self.simulation_df[ptc.OBSERVABLE_ID]\
== observable_id
simulation_df = self.simulation_df[rows]
vis_plot = vis_spec_plot.VisSpecPlot(
measurement_df=data, visualization_df=None,
condition_df=self.condition_df,
simulation_df=simulation_df, plot_id=observable_id,
color_map=self.color_map)
self.vis_spec_plots.append(vis_plot)
if vis_plot.warnings:
self.add_warning(vis_plot.warnings)
else:
# reduce the visualization df to the relevant rows (by plotId)
rows = self.visualization_df[ptc.PLOT_ID] == plot_id
vis_df = self.visualization_df[rows]
if ptc.PLOT_TYPE_SIMULATION in vis_df.columns and \
vis_df.iloc[0][ptc.PLOT_TYPE_SIMULATION] == ptc.BAR_PLOT:
bar_plot = BarPlot(measurement_df=self.exp_data,
visualization_df=vis_df,
condition_df=self.condition_df,
simulation_df=self.simulation_df,
plot_id=plot_id)
# might want to change the name of
# visu_spec_plots to clarify that
# it can also include bar plots (maybe to plots?)
self.vis_spec_plots.append(bar_plot)
else:
vis_plot = vis_spec_plot.VisSpecPlot(
measurement_df=self.exp_data,
visualization_df=vis_df,
condition_df=self.condition_df,
simulation_df=self.simulation_df, plot_id=plot_id,
color_map=self.color_map)
self.vis_spec_plots.append(vis_plot)
if vis_plot.warnings:
self.add_warning(vis_plot.warnings)
def clear_qsplitter(self):
"""
Clear the GraphicsLayoutWidgets for the
measurement and correlation plot
"""
self.plot1_widget.clear()
self.plot2_widget.clear()
def add_overview_plot_window(self):
self.overview_plot_window = OverviewPlotWindow(self.exp_data,
self.simulation_df)
def main():
options = argparse.ArgumentParser()
options.add_argument("-y", "--YAML", type=str, required=False,
help="PEtab YAML file", default=None)
options.add_argument("-s", "--simulation", type=str, required=False,
help="PEtab simulation file", default=None)
args = options.parse_args()
simulation_file = None
if args.simulation is not None:
simulation_file = args.simulation
app = QtWidgets.QApplication(sys.argv)
main_window = MainWindow(args.YAML, simulation_file)
main_window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 2.375 | 2 |
pi_mqtt_gpio/modules/rc522.py | BitStab/pi-mqtt-gpio | 0 | 12771905 | from pi_mqtt_gpio.modules import GenericSensor
import logging
REQUIREMENTS = ("spidev","mfrc522")
CONFIG_SCHEMA = {
"rc522": {"type": "integer", "required": True, "empty": False},
}
_LOG = logging.getLogger("mqtt_gpio")
class Sensor(GenericSensor):
"""
Implementation of RC255 NFC/RFID sensor.
"""
def __init__(self, config):
from mrfc522 import SimpleMFRC522
self.reader = SimpleMFRC522()
def setup_sensor(self, config):
return True # nothing to do here
def get_value(self, config):
id, text = self.reader.read()
_LOG.warning("MFRC522: Reading from Tag: "+id+" " +text)
return id
def cleanup(self):
# nothing to do here | 2.796875 | 3 |
speakerz/accounts/views.py | avisalmon/speakerz | 0 | 12771906 | <filename>speakerz/accounts/views.py
from django.urls import reverse_lazy
from .forms import UserCreatForm
from django.views.generic import CreateView
class SignUp(CreateView):
form_class = UserCreatForm
success_url = reverse_lazy('login')
template_name = 'accounts/signup.html'
| 1.695313 | 2 |
nn_interpretability/dataset/imagenet_data_loader.py | miquelmn/nn_interpretability | 41 | 12771907 | <reponame>miquelmn/nn_interpretability
import torch
from pathlib import Path
import torchvision.datasets as datasets
from torchvision.transforms import transforms
class ImageNetValDataLoader:
def __init__(self, batch_size: int = 64):
self.valdir = Path(__file__).parent.parent.parent.joinpath('data/ImageNet/val')
print(self.valdir)
self.transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
self.normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.process = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
self.normalize,
])
self.valset = datasets.ImageFolder(str(self.valdir), self.process)
self.valloader = torch.utils.data.DataLoader(self.valset, batch_size=batch_size, shuffle=True, num_workers=0)
| 2.71875 | 3 |
xl_tensorflow/models/vision/detection/architecture/fpn.py | Lannister-Xiaolin/xl_tensorflow | 0 | 12771908 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Feature Pyramid Networks.
Feature Pyramid Networks were proposed in:
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
, and <NAME>
Feature Pyramid Networks for Object Detection. CVPR 2017.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import logging
import tensorflow as tf
from tensorflow.python.keras import backend
from . import nn_ops
from ..ops import spatial_transform_ops
from ..utils.efficientdet_utils import get_feat_sizes, activation_fn
from xl_tensorflow.utils import hparams_config
@tf.keras.utils.register_keras_serializable(package='Text')
class WeightedAdd(tf.keras.layers.Layer):
def __init__(self, epsilon=1e-4, activation="relu", **kwargs):
"""
Args:
epsilon:
activation: relu and softmax
**kwargs:
"""
super(WeightedAdd, self).__init__(**kwargs)
self.epsilon = epsilon
self.activation = tf.nn.softmax if activation == "softmax" else tf.nn.relu
def build(self, input_shape):
num_in = len(input_shape)
self.w = self.add_weight(name=self.name,
shape=(num_in,),
initializer=tf.keras.initializers.constant(1 / num_in),
trainable=True,
dtype=tf.float32)
def call(self, inputs, **kwargs):
w = self.activation(self.w)
weights_sum = tf.reduce_sum(self.w)
x = tf.reduce_sum([(w[i] * inputs[i]) / (weights_sum + self.epsilon) for i in range(len(inputs))], axis=0)
return x
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super(WeightedAdd, self).get_config()
config.update({
'epsilon': self.epsilon
})
return config
class BiFpn(object):
"""BiFeature pyramid networks.
1、去掉training_bn参数
2、以keras网络层为主,部分tf.nn层
todo 把bifpn放到yolo种
"""
def __init__(self,
min_level=3,
max_level=7,
):
"""FPN initialization function.
Args:
min_level: `int` minimum level in FPN output feature maps.
max_level: `int` maximum level in FPN output feature maps.
"""
self._min_level = min_level
self._max_level = max_level
def get_fpn_config(self, fpn_name, min_level, max_level, weight_method):
"""Get fpn related configuration."""
if not fpn_name:
fpn_name = 'bifpn_fa'
name_to_config = {
'bifpn_sum': self.bifpn_sum_config(),
'bifpn_fa': self.bifpn_fa_config(),
'bifpn_dyn': self.bifpn_dynamic_config(min_level, max_level, weight_method)
}
return name_to_config[fpn_name]
def fuse_features(self, nodes, weight_method):
"""Fuse features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
weight_method: feature fusion method. One of:
- "attn" - Softmax weighted fusion
- "fastattn" - Fast normalzied feature fusion
- "sum" - a sum of inputs
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if weight_method == 'attn':
new_node = WeightedAdd(activation="softmax")(nodes)
elif weight_method == 'fastattn':
new_node = WeightedAdd(activation="relu")(nodes)
elif weight_method == 'sum':
new_node = tf.add_n(nodes)
else:
raise ValueError(
'unknown weight_method {}'.format(weight_method))
return new_node
def build_bifpn_layer(self, feats, feat_sizes, params):
"""Builds a feature pyramid given previous feature pyramid and config."""
p = params # use p to denote the network config.
if p.fpn.fpn_config:
fpn_config = p.fpn_config
else:
fpn_config = self.get_fpn_config(p.fpn.fpn_name, p.architecture.min_level, p.architecture.max_level,
p.fpn.fpn_weight_method)
num_output_connections = [0 for _ in feats]
for i, fnode in enumerate(fpn_config.nodes):
with tf.name_scope('fnode{}'.format(i)):
logging.info('fnode %d : %s', i, fnode)
new_node_height = feat_sizes[fnode['feat_level']]['height']
new_node_width = feat_sizes[fnode['feat_level']]['width']
nodes = []
for idx, input_offset in enumerate(fnode['inputs_offsets']):
input_node = feats[input_offset]
num_output_connections[input_offset] += 1
input_node = spatial_transform_ops.resample_feature_map(
input_node, '{}_{}_{}'.format(idx, input_offset, len(feats)),
new_node_height, new_node_width, p.fpn.fpn_feat_dims,
p.fpn.apply_bn_for_resampling, p.is_training_bn,
p.fpn.conv_after_downsample,
p.fpn.use_native_resize_op,
p.fpn.pooling_type,
use_tpu=p.use_tpu,
data_format=params.data_format)
nodes.append(input_node)
new_node = self.fuse_features(nodes, fpn_config.weight_method)
with tf.name_scope('op_after_combine{}'.format(len(feats))):
if not p.fpn.conv_bn_act_pattern:
new_node = activation_fn(new_node, p.act_type)
if p.fpn.use_separable_conv:
conv_op = functools.partial(
tf.keras.layers.SeparableConv2D, depth_multiplier=1)
else:
conv_op = tf.keras.layers.Conv2D
new_node = conv_op(
filters=p.fpn.fpn_feat_dims,
kernel_size=(3, 3),
padding='same',
use_bias=not p.fpn.conv_bn_act_pattern,
data_format=params.data_format)(new_node)
# 拆分activation
act_type = None if not p.fpn.conv_bn_act_pattern else p.act_type
new_node = tf.keras.layers.BatchNormalization(
axis=1 if params.data_format == "channels_first" else -1,
momentum=p.norm_activation.batch_norm_momentum,
epsilon=p.norm_activation.batch_norm_epsilon)(new_node)
if act_type:
new_node = activation_fn(new_node, act_type)
feats.append(new_node)
num_output_connections.append(0)
output_feats = {}
for l in range(p.architecture.min_level, p.architecture.max_level + 1):
for i, fnode in enumerate(reversed(fpn_config.nodes)):
if fnode['feat_level'] == l:
output_feats[l] = feats[-1 - i]
break
return output_feats
def bifpn_sum_config(self):
"""BiFPN config with sum."""
p = hparams_config.Config()
p.nodes = [
{'feat_level': 6, 'inputs_offsets': [3, 4]},
{'feat_level': 5, 'inputs_offsets': [2, 5]},
{'feat_level': 4, 'inputs_offsets': [1, 6]},
{'feat_level': 3, 'inputs_offsets': [0, 7]},
{'feat_level': 4, 'inputs_offsets': [1, 7, 8]},
{'feat_level': 5, 'inputs_offsets': [2, 6, 9]},
{'feat_level': 6, 'inputs_offsets': [3, 5, 10]},
{'feat_level': 7, 'inputs_offsets': [4, 11]},
]
p.weight_method = 'sum'
return p
def bifpn_fa_config(self):
"""BiFPN config with fast weighted sum."""
p = self.bifpn_sum_config()
p.weight_method = 'fastattn'
return p
def bifpn_dynamic_config(self, min_level, max_level, weight_method):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def __call__(self, multilevel_features, params):
"""Returns the FPN features for a given multilevel features.
Args:
multilevel_features: a `dict` containing `int` keys for continuous feature
levels, e.g., [2, 3, 4, 5]. The values are corresponding features with
shape [batch_size, height_l, width_l, num_filters].
Returns:
a `dict` containing `int` keys for continuous feature levels
[min_level, min_level + 1, ..., max_level]. The values are corresponding
FPN features with shape [batch_size, height_l, width_l, fpn_feat_dims].
"""
# step 1: Build additional input features that are not from backbone.(ie. level 6 and 7)
feats = []
# with tf.name_scope('bifpn'):
with backend.get_graph().as_default(), tf.name_scope('bifpn'):
for level in range(self._min_level, self._max_level + 1):
if level in multilevel_features.keys():
feats.append(multilevel_features[level])
else:
h_id, w_id = (1, 2) # 不允许通道前置,即data_format必须等于channels_last
feats.append(
spatial_transform_ops.resample_feature_map(
feats[-1],
name='p%d' % level,
target_height=(feats[-1].shape[h_id] - 1) // 2 + 1,
target_width=(feats[-1].shape[w_id] - 1) // 2 + 1,
target_num_channels=params.fpn.fpn_feat_dims,
apply_bn=params.fpn.apply_bn_for_resampling,
is_training=params.is_training_bn,
conv_after_downsample=params.fpn.conv_after_downsample,
use_native_resize_op=params.fpn.use_native_resize_op,
pooling_type=params.fpn.pooling_type,
use_tpu=False,
data_format="channels_last"
))
feat_sizes = get_feat_sizes(params.efficientdet_parser.output_size[0], self._max_level)
with tf.name_scope("bifpn_cells"):
for rep in range(params.fpn.fpn_cell_repeats):
logging.info('building cell %d', rep)
new_feats = self.build_bifpn_layer(feats, feat_sizes, params)
feats = [
new_feats[level]
for level in range(
self._min_level, self._max_level + 1)
]
return new_feats
| 1.90625 | 2 |
events/tests.py | Chiefautoparts/MillGeekV2 | 0 | 12771909 | # # -*- coding: utf-8 -*-
# from __future__ import unicode_literals
# from django.test import TestCase
# # Create your tests here.
# class SimpleTest(TestCase):
# self.failUnlessEqual(1 + 1, 2)
# __test__ = {"doctest": """
# Another way to test that 1 + 1 is equal to 2.
# >>> 1 + 1 == 2
# True
# """} | 2.609375 | 3 |
modules/models.py | ifr1m/pcdarts-tf2 | 26 | 12771910 | <filename>modules/models.py
import tensorflow as tf
from absl import logging
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import (Input, Dense, Flatten, Conv2D,
AveragePooling2D, GlobalAveragePooling2D,
ReLU)
from modules.operations import (OPS, FactorizedReduce, ReLUConvBN,
BatchNormalization, Identity, drop_path,
kernel_init, regularizer)
import modules.genotypes as genotypes
class Cell(tf.keras.layers.Layer):
"""Cell Layer"""
def __init__(self, genotype, ch, reduction, reduction_prev, wd,
name='Cell', **kwargs):
super(Cell, self).__init__(name=name, **kwargs)
self.wd = wd
if reduction_prev:
self.preprocess0 = FactorizedReduce(ch, wd=wd)
else:
self.preprocess0 = ReLUConvBN(ch, k=1, s=1, wd=wd)
self.preprocess1 = ReLUConvBN(ch, k=1, s=1, wd=wd)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(ch, op_names, indices, concat, reduction)
def _compile(self, ch, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self._ops = []
for name, index in zip(op_names, indices):
strides = 2 if reduction and index < 2 else 1
op = OPS[name](ch, strides, self.wd, True)
self._ops.append(op)
self._indices = indices
def call(self, s0, s1, drop_path_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for step_index in range(self._steps):
op1 = self._ops[2 * step_index]
op2 = self._ops[2 * step_index + 1]
h1 = op1(states[self._indices[2 * step_index]])
h2 = op2(states[self._indices[2 * step_index + 1]])
if drop_path_prob is not None:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_path_prob, name='drop_path_h1')
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_path_prob, name='drop_path_h2')
s = h1 + h2
states += [s]
return tf.concat([states[i] for i in self._concat], axis=-1)
class AuxiliaryHeadCIFAR(tf.keras.layers.Layer):
"""Auxiliary Head Cifar"""
def __init__(self, num_classes, wd, name='AuxiliaryHeadCIFAR', **kwargs):
super(AuxiliaryHeadCIFAR, self).__init__(name=name, **kwargs)
self.features = Sequential([
ReLU(),
AveragePooling2D(5, strides=3, padding='valid'),
Conv2D(filters=128, kernel_size=1, strides=1, padding='valid',
kernel_initializer=kernel_init(),
kernel_regularizer=regularizer(wd), use_bias=False),
BatchNormalization(affine=True),
ReLU(),
Conv2D(filters=768, kernel_size=2, strides=1, padding='valid',
kernel_initializer=kernel_init(),
kernel_regularizer=regularizer(wd), use_bias=False),
BatchNormalization(affine=True),
ReLU()])
self.classifier = Dense(num_classes, kernel_initializer=kernel_init(),
kernel_regularizer=regularizer(wd))
def call(self, x):
x = self.features(x)
x = self.classifier(Flatten()(x))
return x
def CifarModel(cfg, training=True, stem_multiplier=3, name='CifarModel'):
"""Cifar Model"""
logging.info(f"buliding {name}...")
input_size = cfg['input_size']
ch_init = cfg['init_channels']
layers = cfg['layers']
num_cls = cfg['num_classes']
wd = cfg['weights_decay']
genotype = eval("genotypes.%s" % cfg['arch'])
# define model
inputs = Input([input_size, input_size, 3], name='input_image')
if training:
drop_path_prob = Input([], name='drop_prob')
else:
drop_path_prob = None
ch_curr = stem_multiplier * ch_init
s0 = s1 = Sequential([
Conv2D(filters=ch_curr, kernel_size=3, strides=1, padding='same',
kernel_initializer=kernel_init(),
kernel_regularizer=regularizer(wd), use_bias=False),
BatchNormalization(affine=True)], name='stem')(inputs)
ch_curr = ch_init
reduction_prev = False
logits_aux = None
for layer_index in range(layers):
if layer_index in [layers // 3, 2 * layers // 3]:
ch_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, ch_curr, reduction, reduction_prev, wd,
name=f'Cell_{layer_index}')
s0, s1 = s1, cell(s0, s1, drop_path_prob)
reduction_prev = reduction
if layer_index == 2 * layers // 3 and training:
logits_aux = AuxiliaryHeadCIFAR(num_cls, wd=wd)(s1)
fea = GlobalAveragePooling2D()(s1)
logits = Dense(num_cls, kernel_initializer=kernel_init(),
kernel_regularizer=regularizer(wd))(Flatten()(fea))
if training:
return Model((inputs, drop_path_prob), (logits, logits_aux), name=name)
else:
return Model(inputs, logits, name=name)
| 2.4375 | 2 |
server/app/utils/setup.py | WagnerJM/quality | 0 | 12771911 | import os
import secrets
def create_env_file():
prompt = "> "
print("POSTGRES_USER= ?")
POSTGRES_USER = input(prompt)
print("POSTGRES_PW= ?")
POSTGRES_PW = input(prompt)
print("DATABASE= ?")
DATABASE = input(prompt)
print("REDIS_PW= ?")
REDIS_PW = input(prompt)
SECRET_KEY = secrets.token_hex(32)
JWT_SECRET = secrets.token_hex(32)
env_list = [
"APP_SETTINGS={}".format(APP_SETTINGS),
"FLASK_ENV={}".format(FLASK_ENV),
"FLASK_APP={}".format(FLASK_APP),
"POSTGRES_USER={}".format(POSTGRES_USER),
"POSTGRES_PW={}".format(POSTGRES_PW),
"DATABASE={}".format(DATABASE),
"REDIS_PW={}".format(REDIS_PW),
"SECRET_KEY={}".format(SECRET_KEY),
"JWT_SECRET={}".format(JWT_SECRET)
]
with open(os.path.join('../../..', '.env'), 'a') as f:
[ f.write(env_var) for env_var in env_list ]
f.close()
def main():
create_env_file()
if __name__ == '__main__':
main()
| 2.671875 | 3 |
paper code/solveCrossTime.py | PKandarp/TICC | 393 | 12771912 | from snap import *
from cvxpy import *
import math
import multiprocessing
import numpy
from scipy.sparse import lil_matrix
import sys
import time
import __builtin__
import code
# File format: One edge per line, written as "srcID dstID"
# Commented lines that start with '#' are ignored
# Returns a TGraphVX object with the designated edges and nodes
def LoadEdgeList(Filename):
gvx = TGraphVX()
nids = set()
infile = open(Filename, 'r')
with open(Filename) as infile:
for line in infile:
if line.startswith('#'): continue
[src, dst] = line.split()
if int(src) not in nids:
gvx.AddNode(int(src))
nids.add(int(src))
if int(dst) not in nids:
gvx.AddNode(int(dst))
nids.add(int(dst))
gvx.AddEdge(int(src), int(dst))
return gvx
# TGraphVX inherits from the TUNGraph object defined by Snap.py
class TGraphVX(TUNGraph):
__default_objective = norm(0)
__default_constraints = []
# Data Structures
# ---------------
# node_objectives = {int NId : CVXPY Expression}
# node_constraints = {int NId : [CVXPY Constraint]}
# edge_objectives = {(int NId1, int NId2) : CVXPY Expression}
# edge_constraints = {(int NId1, int NId2) : [CVXPY Constraint]}
# all_variables = set(CVXPY Variable)
#
# ADMM-Specific Structures
# ------------------------
# node_variables = {int NId :
# [(CVXPY Variable id, CVXPY Variable name, CVXPY Variable, offset)]}
# node_values = {int NId : numpy array}
# node_values points to the numpy array containing the value of the entire
# variable space corresponding to then node. Use the offset to get the
# value for a specific variable.
#
# Constructor
# If Graph is a Snap.py graph, initializes a SnapVX graph with the same
# nodes and edges.
def __init__(self, Graph=None):
# Initialize data structures
self.node_objectives = {}
self.node_variables = {}
self.node_constraints = {}
self.edge_objectives = {}
self.edge_constraints = {}
self.node_values = {}
self.all_variables = set()
self.status = None
self.value = None
# Initialize superclass
nodes = 0
edges = 0
if Graph != None:
nodes = Graph.GetNodes()
edges = Graph.GetEdges()
TUNGraph.__init__(self, nodes, edges)
# Support for constructor with Snap.py graph argument
if Graph != None:
for ni in Graph.Nodes():
self.AddNode(ni.GetId())
for ei in Graph.Edges():
self.AddEdge(ei.GetSrcNId(), ei.GetDstNId())
# Simple iterator to iterator over all nodes in graph. Similar in
# functionality to Nodes() iterator of PUNGraph in Snap.py.
def Nodes(self):
ni = TUNGraph.BegNI(self)
for i in xrange(TUNGraph.GetNodes(self)):
yield ni
ni.Next()
# Simple iterator to iterator over all edge in graph. Similar in
# functionality to Edges() iterator of PUNGraph in Snap.py.
def Edges(self):
ei = TUNGraph.BegEI(self)
for i in xrange(TUNGraph.GetEdges(self)):
yield ei
ei.Next()
# Adds objectives together to form one collective CVXPY Problem.
# Option of specifying Maximize() or the default Minimize().
# Graph status and value properties will also be set.
# Individual variable values can be retrieved using GetNodeValue().
# Option to use serial version or distributed ADMM.
# maxIters optional parameter: Maximum iterations for distributed ADMM.
def Solve(self, M=Minimize, UseADMM=True, NumProcessors=0, Rho=1.0,
MaxIters=250, EpsAbs=0.01, EpsRel=0.01, Verbose=False,
UseClustering = False, ClusterSize = 1000 ):
global m_func
m_func = M
# Use ADMM if the appropriate parameter is specified and if there
# are edges in the graph.
#if __builtin__.len(SuperNodes) > 0:
if UseClustering and ClusterSize > 0:
SuperNodes = self.__ClusterGraph(ClusterSize)
self.__SolveClusterADMM(M,UseADMM,SuperNodes, NumProcessors, Rho, MaxIters,\
EpsAbs, EpsRel, Verbose)
return
if UseADMM and self.GetEdges() != 0:
self.__SolveADMM(NumProcessors, Rho, MaxIters, EpsAbs, EpsRel,
Verbose)
return
if Verbose:
print 'Serial ADMM'
objective = 0
constraints = []
# Add all node objectives and constraints
for ni in self.Nodes():
nid = ni.GetId()
objective += self.node_objectives[nid]
constraints += self.node_constraints[nid]
# Add all edge objectives and constraints
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
objective += self.edge_objectives[etup]
constraints += self.edge_constraints[etup]
# Solve CVXPY Problem
objective = m_func(objective)
problem = Problem(objective, constraints)
try:
problem.solve()
except SolverError:
problem.solve(solver=SCS)
if problem.status in [INFEASIBLE_INACCURATE, UNBOUNDED_INACCURATE]:
problem.solve(solver=SCS)
# Set TGraphVX status and value to match CVXPY
self.status = problem.status
self.value = problem.value
# Insert into hash to support ADMM structures and GetNodeValue()
for ni in self.Nodes():
nid = ni.GetId()
variables = self.node_variables[nid]
value = None
for (varID, varName, var, offset) in variables:
if var.size[0] == 1:
val = numpy.array([var.value])
else:
val = numpy.array(var.value).reshape(-1,)
if value is None:
value = val
else:
value = numpy.concatenate((value, val))
self.node_values[nid] = value
"""Function to solve cluster wise optimization problem"""
def __SolveClusterADMM(self,M,UseADMM,superNodes,numProcessors, rho_param,
maxIters, eps_abs, eps_rel,verbose):
#initialize an empty supergraph
supergraph = TGraphVX()
nidToSuperidMap = {}
edgeToClusterTupMap = {}
for snid in xrange(__builtin__.len(superNodes)):
for nid in superNodes[snid]:
nidToSuperidMap[nid] = snid
"""collect the entities for the supergraph. a supernode is a subgraph. a superedge
is a representation of a graph cut"""
superEdgeObjectives = {}
superEdgeConstraints = {}
superNodeObjectives = {}
superNodeConstraints = {}
superNodeVariables = {}
superNodeValues = {}
varToSuperVarMap = {}
"""traverse through the list of edges and add each edge's constraint and objective to
either the supernode to which it belongs or the superedge which connects the ends
of the supernodes to which it belongs"""
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
supersrcnid,superdstnid = nidToSuperidMap[etup[0]],nidToSuperidMap[etup[1]]
if supersrcnid != superdstnid: #the edge is a part of the cut
if supersrcnid > superdstnid:
supersrcnid,superdstnid = superdstnid,supersrcnid
if (supersrcnid,superdstnid) not in superEdgeConstraints:
superEdgeConstraints[(supersrcnid,superdstnid)] = self.edge_constraints[etup]
superEdgeObjectives[(supersrcnid,superdstnid)] = self.edge_objectives[etup]
else:
superEdgeConstraints[(supersrcnid,superdstnid)] += self.edge_constraints[etup]
superEdgeObjectives[(supersrcnid,superdstnid)] += self.edge_objectives[etup]
else: #the edge is a part of some supernode
if supersrcnid not in superNodeConstraints:
superNodeConstraints[supersrcnid] = self.edge_constraints[etup]
superNodeObjectives[supersrcnid] = self.edge_objectives[etup]
else:
superNodeConstraints[supersrcnid] += self.edge_constraints[etup]
superNodeObjectives[supersrcnid] += self.edge_objectives[etup]
for ni in self.Nodes():
nid = ni.GetId()
supernid = nidToSuperidMap[nid]
value = None
for (varID, varName, var, offset) in self.node_variables[nid]:
if var.size[0] == 1:
val = numpy.array([var.value])
else:
val = numpy.array(var.value).reshape(-1,)
if not value:
value = val
else:
value = numpy.concatenate((value, val))
if supernid not in superNodeConstraints:
superNodeObjectives[supernid] = self.node_objectives[nid]
superNodeConstraints[supernid] = self.node_constraints[nid]
else:
superNodeObjectives[supernid] += self.node_objectives[nid]
superNodeConstraints[supernid] += self.node_constraints[nid]
for ( varId, varName, var, offset) in self.node_variables[nid]:
superVarName = varName+str(varId)
varToSuperVarMap[(nid,varName)] = (supernid,superVarName)
if supernid not in superNodeVariables:
superNodeVariables[supernid] = [(varId, superVarName, var, offset)]
superNodeValues[supernid] = value
else:
superNodeOffset = sum([superNodeVariables[supernid][k][2].size[0]* \
superNodeVariables[supernid][k][2].size[1]\
for k in xrange(__builtin__.len(superNodeVariables[supernid])) ])
superNodeVariables[supernid] += [(varId, superVarName, var, superNodeOffset)]
superNodeValues[supernid] = numpy.concatenate((superNodeValues[supernid],value))
#add all supernodes to the supergraph
for supernid in superNodeConstraints:
supergraph.AddNode(supernid, superNodeObjectives[supernid], \
superNodeConstraints[supernid])
supergraph.node_variables[supernid] = superNodeVariables[supernid]
supergraph.node_values[supernid] = superNodeValues[supernid]
#add all superedges to the supergraph
for superei in superEdgeConstraints:
superSrcId,superDstId = superei
supergraph.AddEdge(superSrcId, superDstId, None,\
superEdgeObjectives[superei],\
superEdgeConstraints[superei])
#call solver for this supergraph
if UseADMM and supergraph.GetEdges() != 0:
supergraph.__SolveADMM(numProcessors, rho_param, maxIters, eps_abs, eps_rel, verbose)
else:
supergraph.Solve(M, False, numProcessors, rho_param, maxIters, eps_abs, eps_rel, verbose,
UseClustering=False)
self.status = supergraph.status
self.value = supergraph.value
for ni in self.Nodes():
nid = ni.GetId()
snid = nidToSuperidMap[nid]
self.node_values[nid] = []
for ( varId, varName, var, offset) in self.node_variables[nid]:
superVarName = varToSuperVarMap[(nid,varName)]
self.node_values[nid] = numpy.concatenate((self.node_values[nid],\
supergraph.GetNodeValue(snid, superVarName[1])))
# Implementation of distributed ADMM
# Uses a global value of rho_param for rho
# Will run for a maximum of maxIters iterations
def __SolveADMM(self, numProcessors, rho_param, maxIters, eps_abs, eps_rel,
verbose):
global node_vals, edge_z_vals, edge_u_vals, rho
global getValue, rho_update_func
if numProcessors <= 0:
num_processors = multiprocessing.cpu_count()
else:
num_processors = numProcessors
rho = rho_param
if verbose:
print 'Distributed ADMM (%d processors)' % num_processors
# Organize information for each node in helper node_info structure
node_info = {}
# Keeps track of the current offset necessary into the shared node
# values Array
length = 0
for ni in self.Nodes():
nid = ni.GetId()
deg = ni.GetDeg()
obj = self.node_objectives[nid]
variables = self.node_variables[nid]
con = self.node_constraints[nid]
neighbors = [ni.GetNbrNId(j) for j in xrange(deg)]
# Node's constraints include those imposed by edges
for neighborId in neighbors:
etup = self.__GetEdgeTup(nid, neighborId)
econ = self.edge_constraints[etup]
con += econ
# Calculate sum of dimensions of all Variables for this node
size = sum([var.size[0] for (varID, varName, var, offset) in variables])
# Nearly complete information package for this node
node_info[nid] = (nid, obj, variables, con, length, size, deg,\
neighbors)
length += size
node_vals = multiprocessing.Array('d', [0.0] * length)
x_length = length
# Organize information for each node in final edge_list structure and
# also helper edge_info structure
edge_list = []
edge_info = {}
# Keeps track of the current offset necessary into the shared edge
# values Arrays
length = 0
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
obj = self.edge_objectives[etup]
con = self.edge_constraints[etup]
con += self.node_constraints[etup[0]] +\
self.node_constraints[etup[1]]
# Get information for each endpoint node
info_i = node_info[etup[0]]
info_j = node_info[etup[1]]
ind_zij = length
ind_uij = length
length += info_i[X_LEN]
ind_zji = length
ind_uji = length
length += info_j[X_LEN]
# Information package for this edge
tup = (etup, obj, con,\
info_i[X_VARS], info_i[X_LEN], info_i[X_IND], ind_zij, ind_uij,\
info_j[X_VARS], info_j[X_LEN], info_j[X_IND], ind_zji, ind_uji)
edge_list.append(tup)
edge_info[etup] = tup
edge_z_vals = multiprocessing.Array('d', [0.0] * length)
edge_u_vals = multiprocessing.Array('d', [0.0] * length)
z_length = length
# Populate sparse matrix A.
# A has dimensions (p, n), where p is the length of the stacked vector
# of node variables, and n is the length of the stacked z vector of
# edge variables.
# Each row of A has one 1. There is a 1 at (i,j) if z_i = x_j.
A = lil_matrix((z_length, x_length), dtype=numpy.int8)
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
info_edge = edge_info[etup]
info_i = node_info[etup[0]]
info_j = node_info[etup[1]]
for offset in xrange(info_i[X_LEN]):
row = info_edge[Z_ZIJIND] + offset
col = info_i[X_IND] + offset
A[row, col] = 1
for offset in xrange(info_j[X_LEN]):
row = info_edge[Z_ZJIIND] + offset
col = info_j[X_IND] + offset
A[row, col] = 1
A_tr = A.transpose()
# Create final node_list structure by adding on information for
# node neighbors
node_list = []
for nid, info in node_info.iteritems():
entry = [nid, info[X_OBJ], info[X_VARS], info[X_CON], info[X_IND],\
info[X_LEN], info[X_DEG]]
# Append information about z- and u-value indices for each
# node neighbor
for i in xrange(info[X_DEG]):
neighborId = info[X_NEIGHBORS][i]
indices = (Z_ZIJIND, Z_UIJIND) if nid < neighborId else\
(Z_ZJIIND, Z_UJIIND)
einfo = edge_info[self.__GetEdgeTup(nid, neighborId)]
entry.append(einfo[indices[0]])
entry.append(einfo[indices[1]])
node_list.append(entry)
pool = multiprocessing.Pool(num_processors)
num_iterations = 0
z_old = getValue(edge_z_vals, 0, z_length)
# Proceed until convergence criteria are achieved or the maximum
# number of iterations has passed
while num_iterations <= maxIters:
# Check convergence criteria
if num_iterations != 0:
x = getValue(node_vals, 0, x_length)
z = getValue(edge_z_vals, 0, z_length)
u = getValue(edge_u_vals, 0, z_length)
# Determine if algorithm should stop. Retrieve primal and dual
# residuals and thresholds
stop, res_pri, e_pri, res_dual, e_dual =\
self.__CheckConvergence(A, A_tr, x, z, z_old, u, rho,\
x_length, z_length,
eps_abs, eps_rel, verbose)
if stop: break
z_old = z
# Update rho and scale u-values
rho_new = rho_update_func(rho, res_pri, e_pri, res_dual, e_dual)
scale = float(rho) / rho_new
edge_u_vals[:] = [i * scale for i in edge_u_vals]
rho = rho_new
num_iterations += 1
if verbose:
# Debugging information prints current iteration #
print 'Iteration %d' % num_iterations
pool.map(ADMM_x, node_list)
pool.map(ADMM_z, edge_list)
pool.map(ADMM_u, edge_list)
pool.close()
pool.join()
# Insert into hash to support GetNodeValue()
for entry in node_list:
nid = entry[X_NID]
index = entry[X_IND]
size = entry[X_LEN]
self.node_values[nid] = getValue(node_vals, index, size)
# Set TGraphVX status and value to match CVXPY
if num_iterations <= maxIters:
self.status = 'Optimal'
else:
self.status = 'Incomplete: max iterations reached'
# self.value = self.GetTotalProblemValue()
# Iterate through all variables and update values.
# Sum all objective values over all nodes and edges.
def GetTotalProblemValue(self):
global getValue
result = 0.0
for ni in self.Nodes():
nid = ni.GetId()
for (varID, varName, var, offset) in self.node_variables[nid]:
var.value = self.GetNodeValue(nid, varName)
for ni in self.Nodes():
result += self.node_objectives[ni.GetId()].value
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
result += self.edge_objectives[etup].value
return result
# Returns True if convergence criteria have been satisfied
# eps_abs = eps_rel = 0.01
# r = Ax - z
# s = rho * (A^T)(z - z_old)
# e_pri = sqrt(p) * e_abs + e_rel * max(||Ax||, ||z||)
# e_dual = sqrt(n) * e_abs + e_rel * ||rho * (A^T)u||
# Should stop if (||r|| <= e_pri) and (||s|| <= e_dual)
# Returns (boolean shouldStop, primal residual value, primal threshold,
# dual residual value, dual threshold)
def __CheckConvergence(self, A, A_tr, x, z, z_old, u, rho, p, n,
e_abs, e_rel, verbose):
norm = numpy.linalg.norm
Ax = A.dot(x)
r = Ax - z
s = rho * A_tr.dot(z - z_old)
# Primal and dual thresholds. Add .0001 to prevent the case of 0.
e_pri = math.sqrt(p) * e_abs + e_rel * max(norm(Ax), norm(z)) + .0001
e_dual = math.sqrt(n) * e_abs + e_rel * norm(rho * A_tr.dot(u)) + .0001
# Primal and dual residuals
res_pri = norm(r)
res_dual = norm(s)
if verbose:
# Debugging information to print convergence criteria values
print ' r:', res_pri
print ' e_pri:', e_pri
print ' s:', res_dual
print ' e_dual:', e_dual
stop = (res_pri <= e_pri) and (res_dual <= e_dual)
return (stop, res_pri, e_pri, res_dual, e_dual)
# API to get node Variable value after solving with ADMM.
def GetNodeValue(self, NId, Name):
self.__VerifyNId(NId)
for (varID, varName, var, offset) in self.node_variables[NId]:
if varName == Name:
offset = offset
value = self.node_values[NId]
return value[offset:(offset + var.size[0])]
return None
# Prints value of all node variables to console or file, if given
def PrintSolution(self, Filename=None):
numpy.set_printoptions(linewidth=numpy.inf)
out = sys.stdout if (Filename == None) else open(Filename, 'w+')
out.write('Status: %s\n' % self.status)
out.write('Total Objective: %f\n' % self.value)
for ni in self.Nodes():
nid = ni.GetId()
s = 'Node %d:\n' % nid
out.write(s)
for (varID, varName, var, offset) in self.node_variables[nid]:
val = numpy.transpose(self.GetNodeValue(nid, varName))
s = ' %s %s\n' % (varName, str(val))
out.write(s)
# Helper method to verify existence of an NId.
def __VerifyNId(self, NId):
if not TUNGraph.IsNode(self, NId):
raise Exception('Node %d does not exist.' % NId)
# Helper method to determine if
def __UpdateAllVariables(self, NId, Objective):
if NId in self.node_objectives:
# First, remove the Variables from the old Objective.
old_obj = self.node_objectives[NId]
self.all_variables = self.all_variables - set(old_obj.variables())
# Check that the Variables of the new Objective are not currently
# in other Objectives.
new_variables = set(Objective.variables())
if __builtin__.len(self.all_variables.intersection(new_variables)) != 0:
raise Exception('Objective at NId %d shares a variable.' % NId)
self.all_variables = self.all_variables | new_variables
# Helper method to get CVXPY Variables out of a CVXPY Objective
def __ExtractVariableList(self, Objective):
l = [(var.name(), var) for var in Objective.variables()]
# Sort in ascending order by name
l.sort(key=lambda t: t[0])
l2 = []
offset = 0
for (varName, var) in l:
# Add tuples of the form (id, name, object, offset)
l2.append((var.id, varName, var, offset))
offset += var.size[0]
return l2
# Adds a Node to the TUNGraph and stores the corresponding CVX information.
def AddNode(self, NId, Objective=__default_objective,\
Constraints=__default_constraints):
self.__UpdateAllVariables(NId, Objective)
self.node_objectives[NId] = Objective
self.node_variables[NId] = self.__ExtractVariableList(Objective)
self.node_constraints[NId] = Constraints
return TUNGraph.AddNode(self, NId)
def SetNodeObjective(self, NId, Objective):
self.__VerifyNId(NId)
self.__UpdateAllVariables(NId, Objective)
self.node_objectives[NId] = Objective
self.node_variables[NId] = self.__ExtractVariableList(Objective)
def GetNodeObjective(self, NId):
self.__VerifyNId(NId)
return self.node_objectives[NId]
def SetNodeConstraints(self, NId, Constraints):
self.__VerifyNId(NId)
self.node_constraints[NId] = Constraints
def GetNodeConstraints(self, NId):
self.__VerifyNId(NId)
return self.node_constraints[NId]
# Helper method to get a tuple representing an edge. The smaller NId
# goes first.
def __GetEdgeTup(self, NId1, NId2):
return (NId1, NId2) if NId1 < NId2 else (NId2, NId1)
# Helper method to verify existence of an edge.
def __VerifyEdgeTup(self, ETup):
if not TUNGraph.IsEdge(self, ETup[0], ETup[1]):
raise Exception('Edge {%d,%d} does not exist.' % ETup)
# Adds an Edge to the TUNGraph and stores the corresponding CVX information.
# obj_func is a function which accepts two arguments, a dictionary of
# variables for the source and destination nodes
# { string varName : CVXPY Variable }
# obj_func should return a tuple of (objective, constraints), although
# it will assume a singleton object will be an objective and will use
# the default constraints.
# If obj_func is None, then will use Objective and Constraints, which are
# parameters currently set to defaults.
def AddEdge(self, SrcNId, DstNId, ObjectiveFunc=None,
Objective=__default_objective, Constraints=__default_constraints):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
if ObjectiveFunc != None:
src_vars = self.GetNodeVariables(SrcNId)
dst_vars = self.GetNodeVariables(DstNId)
ret = ObjectiveFunc(src_vars, dst_vars)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.edge_objectives[ETup] = ret[0]
self.edge_constraints[ETup] = ret[1]
else:
# Singleton object = assume it is the objective
self.edge_objectives[ETup] = ret
self.edge_constraints[ETup] = self.__default_constraints
else:
self.edge_objectives[ETup] = Objective
self.edge_constraints[ETup] = Constraints
return TUNGraph.AddEdge(self, SrcNId, DstNId)
def SetEdgeObjective(self, SrcNId, DstNId, Objective):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
self.edge_objectives[ETup] = Objective
def GetEdgeObjective(self, SrcNId, DstNId):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
return self.edge_objectives[ETup]
def SetEdgeConstraints(self, SrcNId, DstNId, Constraints):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
self.edge_constraints[ETup] = Constraints
def GetEdgeConstraints(self, SrcNId, DstNId):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
return self.edge_constraints[ETup]
# Returns a dictionary of all variables corresponding to a node.
# { string name : CVXPY Variable }
# This can be used in place of bulk loading functions to recover necessary
# Variables for an edge.
def GetNodeVariables(self, NId):
self.__VerifyNId(NId)
d = {}
for (varID, varName, var, offset) in self.node_variables[NId]:
d[varName] = var
return d
# Bulk loading for nodes
# ObjFunc is a function which accepts one argument, an array of strings
# parsed from the given CSV filename
# ObjFunc should return a tuple of (objective, constraints), although
# it will assume a singleton object will be an objective
# Optional parameter NodeIDs allows the user to pass in a list specifying,
# in order, the node IDs that correspond to successive rows
# If NodeIDs is None, then the file must have a column denoting the
# node ID for each row. The index of this column (0-indexed) is IdCol.
# If NodeIDs and IdCol are both None, then will iterate over all Nodes, in
# order, as long as the file lasts
def AddNodeObjectives(self, Filename, ObjFunc, NodeIDs=None, IdCol=None):
infile = open(Filename, 'r')
if NodeIDs == None and IdCol == None:
stop = False
for ni in self.Nodes():
nid = ni.GetId()
while True:
line = infile.readline()
if line == '': stop = True
if not line.startswith('#'): break
if stop: break
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetNodeObjective(nid, ret[0])
self.SetNodeConstraints(nid, ret[1])
else:
# Singleton object = assume it is the objective
self.SetNodeObjective(nid, ret)
if NodeIDs == None:
for line in infile:
if line.startswith('#'): continue
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetNodeObjective(int(data[IdCol]), ret[0])
self.SetNodeConstraints(int(data[IdCol]), ret[1])
else:
# Singleton object = assume it is the objective
self.SetNodeObjective(int(data[IdCol]), ret)
else:
for nid in NodeIDs:
while True:
line = infile.readline()
if line == '':
raise Exception('File %s is too short.' % filename)
if not line.startswith('#'): break
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetNodeObjective(nid, ret[0])
self.SetNodeConstraints(nid, ret[1])
else:
# Singleton object = assume it is the objective
self.SetNodeObjective(nid, ret)
infile.close()
# Bulk loading for edges
# If Filename is None:
# ObjFunc is a function which accepts three arguments, a dictionary of
# variables for the source and destination nodes, and an unused param
# { string varName : CVXPY Variable } x2, None
# ObjFunc should return a tuple of (objective, constraints), although
# it will assume a singleton object will be an objective
# If Filename exists:
# ObjFunc is the same, except the third param will be be an array of
# strings parsed from the given CSV filename
# Optional parameter EdgeIDs allows the user to pass in a list specifying,
# in order, the EdgeIDs that correspond to successive rows. An edgeID is
# a tuple of (srcID, dstID).
# If EdgeIDs is None, then the file may have columns denoting the srcID and
# dstID for each row. The indices of these columns are 0-indexed.
# If EdgeIDs and id columns are None, then will iterate through all edges
# in order, as long as the file lasts.
def AddEdgeObjectives(self, ObjFunc, Filename=None, EdgeIDs=None,\
SrcIdCol=None, DstIdCol=None):
if Filename == None:
for ei in self.Edges():
src_id = ei.GetSrcNId()
src_vars = self.GetNodeVariables(src_id)
dst_id = ei.GetDstNId()
dst_vars = self.GetNodeVariables(dst_id)
ret = ObjFunc(src_vars, dst_vars, None)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(src_id, dst_id, ret[0])
self.SetEdgeConstraints(src_id, dst_id, ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(src_id, dst_id, ret)
return
infile = open(Filename, 'r')
if EdgeIDs == None and (SrcIdCol == None or DstIdCol == None):
stop = False
for ei in self.Edges():
src_id = ei.GetSrcNId()
src_vars = self.GetNodeVariables(src_id)
dst_id = ei.GetDstNId()
dst_vars = self.GetNodeVariables(dst_id)
while True:
line = infile.readline()
if line == '': stop = True
if not line.startswith('#'): break
if stop: break
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(src_vars, dst_vars, data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(src_id, dst_id, ret[0])
self.SetEdgeConstraints(src_id, dst_id, ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(src_id, dst_id, ret)
if EdgeIDs == None:
for line in infile:
if line.startswith('#'): continue
data = [x.strip() for x in line.split(',')]
src_id = int(data[SrcIdCol])
dst_id = int(data[DstIdCol])
src_vars = self.GetNodeVariables(src_id)
dst_vars = self.GetNodeVariables(dst_id)
ret = ObjFunc(src_vars, dst_vars, data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(src_id, dst_id, ret[0])
self.SetEdgeConstraints(src_id, dst_id, ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(src_id, dst_id, ret)
else:
for edgeID in EdgeIDs:
etup = self.__GetEdgeTup(edgeID[0], edgeID[1])
while True:
line = infile.readline()
if line == '':
raise Exception('File %s is too short.' % Filename)
if not line.startswith('#'): break
data = [x.strip() for x in line.split(',')]
src_vars = self.GetNodeVariables(etup[0])
dst_vars = self.GetNodeVariables(etup[1])
ret = ObjFunc(src_vars, dst_vars, data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(etup[0], etup[1], ret[0])
self.SetEdgeConstraints(etup[0], etup[1], ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(etup[0], etup[1], ret)
infile.close()
"""return clusters of nodes of the original graph.Each cluster corresponds to
a supernode in the supergraph"""
def __ClusterGraph(self,clusterSize):
#obtain a random shuffle of the nodes
nidArray = [ni.GetId() for ni in self.Nodes()]
numpy.random.shuffle(nidArray)
visitedNode = {}
for nid in nidArray:
visitedNode[nid] = False
superNodes = []
superNode,superNodeSize = [],0
for nid in nidArray:
if not visitedNode[nid]:
oddLevel, evenLevel, isOdd = [],[],True
oddLevel.append(nid)
visitedNode[nid] = True
#do a level order traversal and add nodes to the superNode until the
#size of the supernode variables gets larger than clusterSize
while True:
if isOdd:
if __builtin__.len(oddLevel) > 0:
while __builtin__.len(oddLevel) > 0:
topId = oddLevel.pop(0)
node = TUNGraph.GetNI(self,topId)
varSize = sum([variable[2].size[0]* \
variable[2].size[1]\
for variable in self.node_variables[topId]])
if varSize + superNodeSize <= clusterSize:
superNode.append(topId)
superNodeSize = varSize + superNodeSize
else:
if __builtin__.len(superNode) > 0:
superNodes.append(superNode)
superNodeSize = varSize
superNode = [topId]
neighbors = [node.GetNbrNId(j) \
for j in xrange(node.GetDeg())]
for nbrId in neighbors:
if not visitedNode[nbrId]:
evenLevel.append(nbrId)
visitedNode[nbrId] = True
isOdd = False
#sort the nodes according to their variable size
if __builtin__.len(evenLevel) > 0:
evenLevel.sort(key=lambda nid : sum([variable[2].size[0]* \
variable[2].size[1] for variable \
in self.node_variables[nid]]))
else:
break
else:
if __builtin__.len(evenLevel) > 0:
while __builtin__.len(evenLevel) > 0:
topId = evenLevel.pop(0)
node = TUNGraph.GetNI(self,topId)
varSize = sum([variable[2].size[0]* \
variable[2].size[1]\
for variable in self.node_variables[topId]])
if varSize + superNodeSize <= clusterSize:
superNode.append(topId)
superNodeSize = varSize + superNodeSize
else:
if __builtin__.len(superNode) > 0:
superNodes.append(superNode)
superNodeSize = varSize
superNode = [topId]
neighbors = [node.GetNbrNId(j) \
for j in xrange(node.GetDeg())]
for nbrId in neighbors:
if not visitedNode[nbrId]:
oddLevel.append(nbrId)
visitedNode[nbrId] = True
isOdd = True
#sort the nodes according to their variable size
if __builtin__.len(oddLevel) > 0:
oddLevel.sort(key=lambda nid : sum([variable[2].size[0]* \
variable[2].size[1] for variable \
in self.node_variables[nid]]))
else:
break
if superNode not in superNodes:
superNodes.append(superNode)
return superNodes
## ADMM Global Variables and Functions ##
# By default, the objective function is Minimize().
__default_m_func = Minimize
m_func = __default_m_func
# By default, rho is 1.0. Default rho update is identity function and does not
# depend on primal or dual residuals or thresholds.
__default_rho = 1.0
__default_rho_update_func = lambda rho, res_p, thr_p, res_d, thr_d: rho
rho = __default_rho
# Rho update function takes 5 parameters
# - Old value of rho
# - Primal residual and threshold
# - Dual residual and threshold
rho_update_func = __default_rho_update_func
def SetRho(Rho=None):
global rho
rho = Rho if Rho else __default_rho
# Rho update function should take one parameter: old_rho
# Returns new_rho
# This function will be called at the end of every iteration
def SetRhoUpdateFunc(Func=None):
global rho_update_func
rho_update_func = Func if Func else __default_rho_update_func
# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
# Information for each neighbor is two entries, appended in order.
# Starting index of the corresponding z-value in edge_z_vals. Then for u.
(X_NID, X_OBJ, X_VARS, X_CON, X_IND, X_LEN, X_DEG, X_NEIGHBORS) = range(8)
# Tuple of indices to identify the information package for each edge.
# Z_EID: Edge ID / tuple
# Z_OBJ: CVXPY Objective
# Z_CON: CVXPY Constraints
# Z_[IJ]VARS: CVXPY Variables for Node [ij] (entry from node_variables)
# Z_[IJ]LEN: Total length (sum of dimensions) of all variables for Node [ij]
# Z_X[IJ]IND: Starting index into shared node_vals Array for Node [ij]
# Z_Z[IJ|JI]IND: Starting index into shared edge_z_vals Array for edge [ij|ji]
# Z_U[IJ|JI]IND: Starting index into shared edge_u_vals Array for edge [ij|ji]
(Z_EID, Z_OBJ, Z_CON, Z_IVARS, Z_ILEN, Z_XIIND, Z_ZIJIND, Z_UIJIND,\
Z_JVARS, Z_JLEN, Z_XJIND, Z_ZJIIND, Z_UJIIND) = range(13)
# Contain all x, z, and u values for each node and/or edge in ADMM. Use the
# given starting index and length with getValue() to get individual node values
node_vals = None
edge_z_vals = None
edge_u_vals = None
# Extract a numpy array value from a shared Array.
# Give shared array, starting index, and total length.
def getValue(arr, index, length):
return numpy.array(arr[index:(index + length)])
# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.
def writeValue(sharedarr, index, nparr, length):
if length == 1:
nparr = [nparr]
sharedarr[index:(index + length)] = nparr
# Write the values for all of the Variables involved in a given Objective to
# the given shared Array.
# variables should be an entry from the node_values structure.
def writeObjective(sharedarr, index, objective, variables):
for v in objective.variables():
vID = v.id
value = v.value
# Find the tuple in variables with the same ID. Take the offset.
# If no tuple exists, then silently skip.
for (varID, varName, var, offset) in variables:
if varID == vID:
writeValue(sharedarr, index + offset, value, var.size[0])
break
# Proximal operators
def Prox_logdet(S, A, eta):
global rho
d, q = numpy.linalg.eigh(eta*A-S)
q = numpy.matrix(q)
X_var = ( 1/(2*float(eta)) )*q*( numpy.diag(d + numpy.sqrt(numpy.square(d) + (4*eta)*numpy.ones(d.shape))) )*q.T
x_var = X_var[numpy.triu_indices(S.shape[1])] # extract upper triangular part as update variable
return numpy.matrix(x_var).T
def upper2Full(a):
n = int((-1 + numpy.sqrt(1+ 8*a.shape[0]))/2)
A = numpy.zeros([n,n])
A[numpy.triu_indices(n)] = a
temp = A.diagonal()
A = (A + A.T) - numpy.diag(temp)
return A
def ij2symmetric(i,j,size):
return (size * (size + 1))/2 - (size-i)*((size - i + 1))/2 + j - i
# x-update for ADMM for one node
def ADMM_x(entry):
global rho
variables = entry[X_VARS]
#-----------------------Proximal operator ---------------------------
x_update = [] # proximal update for the variable x
if(__builtin__.len(entry[1].args) > 1 ):
# print 'we are in logdet + trace node'
cvxpyMat = entry[1].args[1].args[0].args[0]
numpymat = cvxpyMat.value
mat_shape = ( int( numpymat.shape[1] * ( numpymat.shape[1]+1 )/2.0 ) ,)
a = numpy.zeros(mat_shape)
for i in xrange(entry[X_DEG]):
z_index = X_NEIGHBORS + (2 * i)
u_index = z_index + 1
zi = entry[z_index]
ui = entry[u_index]
for (varID, varName, var, offset) in variables:
z = getValue(edge_z_vals, zi + offset, var.size[0])
u = getValue(edge_u_vals, ui + offset, var.size[0])
a += (z-u)
A = upper2Full(a)
A = A/entry[X_DEG]
eta = 1/float(rho)
x_update = Prox_logdet(numpymat, A, eta)
solution = numpy.array(x_update).T.reshape(-1)
writeValue(node_vals, entry[X_IND] + variables[0][3], solution, variables[0][2].size[0])
else:
x_update = [] # no variable to update for dummy node
return None
# z-update for ADMM for one edge
def ADMM_z(entry, index_penalty = 1):
global rho
rho = float(rho)
#-----------------------Proximal operator ---------------------------
a_ij = [] #
flag = 0
variables_i = entry[Z_IVARS]
for (varID, varName, var, offset) in variables_i:
x_i = getValue(node_vals, entry[Z_XIIND] + offset, var.size[0])
u_ij = getValue(edge_u_vals, entry[Z_UIJIND] + offset, var.size[0])
if flag == 0:
a_ij = (x_i + u_ij)
flag = 1
else:
a_ij += (x_i + u_ij)
lamb = entry[1].args[0].args[0].value
numBlocks = entry[1].args[1].args[0].value
sizeBlocks = entry[1].args[2].args[0].value
probSize = numBlocks*sizeBlocks
z_ij = numpy.zeros(probSize*(probSize+1)/2)
for i in range(numBlocks):
if (i == 0):
#In the A^{(0)} block (the blocks on the diagonal)
for j in range(sizeBlocks):
for k in range(j, sizeBlocks):
elems = numBlocks
lamSum = 0
points = numpy.zeros((elems))
locList = []
for l in range(elems):
(loc1, loc2) = (l*sizeBlocks + j, l*sizeBlocks+k)
locList.append((loc1,loc2))
index = ij2symmetric(loc1, loc2, probSize)
points[l] = a_ij[index]
lamSum = lamSum + lamb[loc1,loc2]
#Calculate soft threshold
#If answer is positive
ansPos = max((rho*numpy.sum(points) - lamSum)/(rho*elems),0)
#If answer is negative
ansNeg = min((rho*numpy.sum(points) + lamSum)/(rho*elems),0)
if (rho*numpy.sum(points) > lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansPos
elif(rho*numpy.sum(points) < -1*lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansNeg
else:
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = 0
else:
#Off diagonal blocks
for j in range(sizeBlocks):
for k in range(sizeBlocks):
elems = (2*numBlocks - 2*i)/2
lamSum = 0
points = numpy.zeros((elems))
locList = []
for l in range(elems):
(loc1, loc2) = ((l+i)*sizeBlocks + j, l*sizeBlocks+k)
locList.append((loc2,loc1))
index = ij2symmetric(loc2, loc1, probSize)
points[l] = a_ij[index]
lamSum = lamSum + lamb[loc2,loc1]
#Calculate soft threshold
#If answer is positive
ansPos = max((rho*numpy.sum(points) - lamSum)/(rho*elems),0)
#If answer is negative
ansNeg = min((rho*numpy.sum(points) + lamSum)/(rho*elems),0)
if (rho*numpy.sum(points) > lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansPos
elif(rho*numpy.sum(points) < -1*lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansNeg
else:
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = 0
writeValue(edge_z_vals, entry[Z_ZIJIND] + variables_i[0][3], z_ij, variables_i[0][2].size[0])
return None
# u-update for ADMM for one edge
def ADMM_u(entry):
global rho
size_i = entry[Z_ILEN]
uij = getValue(edge_u_vals, entry[Z_UIJIND], size_i) +\
getValue(node_vals, entry[Z_XIIND], size_i) -\
getValue(edge_z_vals, entry[Z_ZIJIND], size_i)
writeValue(edge_u_vals, entry[Z_UIJIND], uij, size_i)
size_j = entry[Z_JLEN]
uji = getValue(edge_u_vals, entry[Z_UJIIND], size_j) +\
getValue(node_vals, entry[Z_XJIND], size_j) -\
getValue(edge_z_vals, entry[Z_ZJIIND], size_j)
writeValue(edge_u_vals, entry[Z_UJIIND], uji, size_j)
return entry
| 2.3125 | 2 |
hcc_ws/src/estimation_pos/src/drone_object.py | Jackie890621/Localization_by_Apriltag | 0 | 12771913 | <reponame>Jackie890621/Localization_by_Apriltag<gh_stars>0
#!/usr/bin/env python2
from numpy.core.defchararray import count
from numpy.lib.financial import nper
import rospy
import numpy as np
import message_filters
import cv2
from cv_bridge import CvBridge, CvBridgeError
from darknet_ros_msgs.msg import BoundingBoxes
from sensor_msgs.msg import Image, CameraInfo
from geometry_msgs.msg import PointStamped
from nav_msgs.msg import Odometry
import tf
from tf.transformations import quaternion_matrix, translation_matrix
from tf import transformations
import math
import os
import sys
pub = rospy.Publisher('/object_pose', PointStamped, queue_size=10)
pub1 = rospy.Publisher('/camera_pose', PointStamped, queue_size=10)
rospy.init_node('drone_Object', anonymous=True)
rospy.loginfo("Start D435_Object_Distance")
cv_bridge = CvBridge()
print('Try to get camera info...')
msg = rospy.wait_for_message('/camera/color/camera_info', CameraInfo, timeout=None)
# [fx' 0 cx' Tx]
# P = [ 0 fy' cy' Ty]
# [ 0 0 1 0]
print('Get camera info')
fx = msg.P[0]
fy = msg.P[5]
cx = msg.P[2]
cy = msg.P[6]
transform_time = 0.0
transform = Odometry()
Umbrella = np.zeros(3)
Bicycle = np.zeros(3)
TeddyBear = np.zeros(3)
Chair = np.zeros(3)
Umbrella_output = np.array([])
Bicycle_output = np.array([])
TeddyBear_output = np.array([])
Chair_output = np.array([])
def main():
depth_image_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image) # ('???', ???)
bb_sub = message_filters.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes) #('???', ???)
ts = message_filters.ApproximateTimeSynchronizer([depth_image_sub, bb_sub], 10, 0.5) #(???, ???, ???)
ts.registerCallback(callback) #(???)
rospy.Subscriber("apriltag_localization", Odometry, transform_cb) #("???", ???, ???)
rospy.spin()
def transform_cb(msg):
global transform_time
global transform
transform_time = msg.header.stamp.to_sec()
transform = msg
# print("Get transform time")
# print(transform_time)
def publish_object_location(object_position, depth_img, org, obj, class_type, bb_size):
global Umbrella_output
global Bicycle_output
global TeddyBear_output
global Chair_output
# print(object_position/1000)
point_message = PointStamped()
point_message.header = depth_img.header
point_message.header.frame_id = "origin"
point_message.point.x = object_position[0]/1000 + org[0]
point_message.point.y = object_position[1]/1000 + org[1]
point_message.point.z = object_position[2]/1000 + org[2]
# update obj
obj[0] = object_position[0]/1000 + org[0]
obj[1] = object_position[1]/1000 + org[1]
obj[2] = object_position[2]/1000 + org[2]
print('position:', obj[0], obj[1], obj[2])
# append to array
if class_type == "Umbrella":
Umbrella_output = np.append(Umbrella_output,obj)
elif class_type == "Bicycle":
Bicycle_output = np.append(Bicycle_output,obj)
elif class_type == "TeddyBear":
TeddyBear_output = np.append(TeddyBear_output,obj)
elif class_type == "Chair":
Chair_output = np.append(Chair_output,obj)
# print(Green_bottle_output.reshape(-1,3))
# usage: reshape(-1,3) --> [[o,o,o][o,o,o][o,o,o]]
submission_path = os.path.realpath('..') + "/output/drone_pkg"
np.savez(submission_path ,
a = Umbrella_output,
b = Bicycle_output,
c = TeddyBear_output,
d = Chair_output)
# print("npsave {0}",class_type)
def callback(depth_img, bb):
global transform_time
global transform
local_time = depth_img.header.stamp.to_sec()
print("Get local_time")
print(local_time)
print(transform_time)
# print("Get local_time")
# print(local_time)
# you could set the time error2, 3, 4, 5 (local_time - transform_time) by yourself
if abs(local_time - transform_time) < 1 and transform_time != 0: #??? and transform_time != 0:
print("Time error")
print(local_time - transform_time)
# hint: http://docs.ros.org/en/jade/api/tf/html/python/transformations.html
# You could use "quaternion_matrix" function to find the 4x4 transform matrix
global_transform = quaternion_matrix(np.array(
[transform.pose.pose.orientation.x,
transform.pose.pose.orientation.y,
transform.pose.pose.orientation.z,
transform.pose.pose.orientation.w])) #(???)
global_transform[0][3] = transform.pose.pose.position.x #???
global_transform[1][3] = transform.pose.pose.position.y #???
global_transform[2][3] = transform.pose.pose.position.z #???
# print("transform")
# print(global_transform)
try:
cv_depthimage = cv_bridge.imgmsg_to_cv2(depth_img, "32FC1")
cv_depthimage2 = np.array(cv_depthimage, dtype=np.float32)
except CvBridgeError as e:
print(e)
# publish camera pos in origin frame
v1 = np.array([0,0,0,1])
org = np.matmul(global_transform, v1)
# print("camera_link")
# print(object_position)
point_message = PointStamped()
point_message.header = depth_img.header
point_message.header.frame_id = "origin"
point_message.point.x = org[0]
point_message.point.y = org[1]
point_message.point.z = org[2]
pub1.publish(point_message)
for i in bb.bounding_boxes:
x_mean = (i.xmax + i.xmin) / 2
y_mean = (i.ymax + i.ymin) / 2
bb_size = (i.xmax - i.xmin)*(i.ymax - i.ymin)
thr = 10
if (i.xmax>(640-thr) or i.xmin<thr or i.ymax>(480-thr) or i.ymin<thr) and i.Class != "umbrella":
# if i.Class == "umbrella":
# print("umbrella")
continue
if i.Class == "umbrella" and i.probability >= 0.6:
rospy.loginfo("see Umbrella")
zc = cv_depthimage2[int(y_mean) - 30][int(x_mean)]
v1 = np.array(getXYZ(x_mean - 30, y_mean, zc, fx, fy, cx, cy))
object_position = np.matmul(global_transform, v1)
publish_object_location(object_position,depth_img, org, Umbrella, "Umbrella", bb_size)
elif i.Class == "bicycle" and i.probability >= 0.4 and bb_size > 90000:
rospy.loginfo("see Bicycle")
zc = cv_depthimage2[int(y_mean) + 65][int(x_mean)]
v1 = np.array(getXYZ(x_mean, y_mean + 65, zc, fx, fy, cx, cy))
object_position = np.matmul(global_transform, v1)
publish_object_location(object_position,depth_img, org, Bicycle, "Bicycle", bb_size)
elif i.Class == "teddy bear":
rospy.loginfo("see TeddyBear")
zc = cv_depthimage2[int(y_mean) + 10][int(x_mean)]
v1 = np.array(getXYZ(x_mean, y_mean + 10, zc, fx, fy, cx, cy))
object_position = np.matmul(global_transform, v1)
publish_object_location(object_position,depth_img, org, TeddyBear, "TeddyBear", bb_size)
elif i.Class == "chair" and i.probability >= 0.4:
rospy.loginfo("see Chair")
zc = cv_depthimage2[int(y_mean)][int(x_mean)]
v1 = np.array(getXYZ(x_mean, y_mean, zc, fx, fy, cx, cy))
object_position = np.matmul(global_transform, v1)
publish_object_location(object_position,depth_img, org, Chair, "Chair", bb_size)
############################
# Student Implementation #
############################
def getXYZ(xp, yp, zc, fx, fy, cx, cy):
#### Definition:
# cx, cy : image center(pixel)
# fx, fy : focal length
# xp, yp: index of the depth image
# zc: depth
inv_fx = 1.0/fx
inv_fy = 1.0/fy
x = (xp-cx) * zc * inv_fx
y = (yp-cy) * zc * inv_fy
z = zc
return (x,y,z,1.0)
if __name__ == '__main__':
main()
| 2.140625 | 2 |
lm_pretrain/evaluate.py | dillondaudert/pssp_lstm | 3 | 12771914 | <reponame>dillondaudert/pssp_lstm
"""Evaluate the model"""
# basic example of training a network end-to-end
from time import process_time
from pathlib import Path
import tensorflow as tf, numpy as np
import pandas as pd
import numpy as np
from .model_helper import create_model
def evaluate(hparams, files, outfile=None):
"""Evaluate a trained model"""
cols = ["file", "id", "len", "seq", "phyche", "pssm", "logits", "ss"]
hcols = ["h_0", "h_1", "h_2", "lm_logits"]
recs = []
for f in files:
hparams.valid_file = f
eval_tuple = create_model(hparams, tf.contrib.learn.ModeKeys.EVAL)
with eval_tuple.graph.as_default():
local_initializer = tf.local_variables_initializer()
print("Evaluating model on %s" % (hparams.valid_file))
# do evaluation
eval_tuple.model.saver.restore(eval_tuple.session, hparams.ckpt)
eval_tuple.session.run([eval_tuple.iterator.initializer, local_initializer])
while True:
try:
fetched = eval_tuple.model.named_eval(eval_tuple.session)
if "filter_size" in vars(hparams):
k = hparams.filter_size
else:
k = 1
rec = (f,
fetched["inputs"].id[0],
fetched["inputs"].len[0],
fetched["inputs"].seq[0],
fetched["inputs"].phyche[0, k:-k, :],
fetched["inputs"].pssm[0],
fetched["logits"][0],
fetched["inputs"].ss[0]
)
if "outputs" in fetched.keys():
rec = rec + (fetched["outputs"].h_0[0],
fetched["outputs"].h_1[0],
fetched["outputs"].h_2[0],
fetched["outputs"].lm_logits[0],
)
for t in rec[3:]:
# assert that all input/output tensors are of same sequence length
assert t.shape[0] == fetched["inputs"].len[0]
recs.append(rec)
# summary_writer.add_summary(summary, global_step)
except tf.errors.OutOfRangeError:
break
df_cols = cols if len(cols) == len(recs[0]) else cols+hcols
df = pd.DataFrame.from_records(data=recs, columns=df_cols)
df = df.reindex(columns = cols+hcols)
print(df.iloc[-1])
# do some data verification
for i in range(df.shape[0]):
row = df.iloc[i]
assert row.seq.shape[1] == 23 # num AA one-hots
assert row.phyche.shape[1] == hparams.num_phyche_features
assert row.pssm.shape[1] == hparams.num_pssm_features
assert row.logits.shape[1] == hparams.num_labels
assert row.ss.shape[1] == hparams.num_labels
if not np.isscalar(row.h_0):
assert row.h_0.shape[1] == 2*hparams.lm_hparams.num_units
assert row.h_1.shape[1] == 2*hparams.lm_hparams.num_units
assert row.h_2.shape[1] == 2*hparams.lm_hparams.num_units
assert row.lm_logits.shape[1] == hparams.lm_hparams.num_labels
if outfile is not None:
df.to_pickle(outfile)
| 2.578125 | 3 |
cls/prs.py | HesswareGmbH/clscenter-python | 0 | 12771915 | <reponame>HesswareGmbH/clscenter-python
import requests
import json
import urllib
import time
import base64
verify_SSL = False
# Some enums to make it easier to read
# we use python 2.7 so enum.Enum is not available
class CommandMode(object):
ProgMode = 0
DataMode = 1
WaitForAck = 2
class MeterReading(object):
def __init__(self):
self.timestamp = -1
class MeteringPoint(object):
def __init__(self, json, prs):
self.name = json["name"]
self.lastRecord = json["unixTstmpLastPushRecords"]
self.medium = json["medium"]
self.gatewayOwnerNo = json["gatewayOwnerNo"]
self.__firstreadingID = -1
self.__sendInterval = json["sendInterval"]
self.__prs = prs
def getReading(self, timestamp):
return MeterReading()
class Gateway(object):
def __init__(self, json, prs):
self.name = json["ownerNo"]
self.mac = json["macAddress"]
if not json["lastPing"] is None:
self.lastPing = json["lastPing"]["gwTimestamp"]
self.confHash = json["lastPing"]["cnfHash"]
self.swHash = json["lastPing"]["swHash"]
else:
self.lastPing = 0
self.confHash = "0000000"
self.swHash = "0000000"
self.created = int(int(json["created"]) / 1000)
self.onlineControl = False
self.__prs = prs
class PRS(object):
"""
user = username to use
passwd = <PASSWORD>
url = base url for the PRS instance
cache_time = Data cache in seconds
api_key = Key to use the API directly
"""
def __init__(self, url, user, passwd, cache_time = 60, api_key = "special-key"):
self.__prs = url
self.__key = api_key
self.__meteringPoints = dict()
self.__meteringPointsLast = 0
self.__gateways = dict()
self.__gatewaysLast = 0
s = requests.Session()
s.auth = (user, passwd)
self.__session = s
self.__headers = {'Accept': 'application/json'}
def getData(self, module, function):
url = "%s/%s/%s?api_key=%s" % (self.__prs, module, function, self.__key)
r = self.__session.get(url, headers=self.__headers, verify=verify_SSL)
return r
def postData(self, module, function, data_to_send):
url = "%s/%s/%s?api_key=%s" % (self.__prs, module, function, self.__key)
#data_to_send["api_key"] = self.__key
r = self.__session.post(url, headers=self.__headers, verify=verify_SSL, data = data_to_send)
return r
def getMeteringPoints(self):
error = False
errorCode = 0
if time.time() - self.__meteringPointsLast < 60:
return (self.__meteringPoints, error, errorCode)
r = self.getData("mds", "meteringpoints")
try:
decoded = json.loads(r.text)
mpoints = dict()
# We fetch name and last pushRecord here
for mpoint in decoded:
mpoints[mpoint["name"]] = MeteringPoint(mpoint, self)
# Override the current values
self.__meteringPointsLast = time.time()
self.__meteringPoints = mpoints
except ValueError:
error = True
errorCode = 1500
return (self.__meteringPoints, error, errorCode)
def getGateways(self):
error = False
errorCode = 0
if time.time() - self.__gatewaysLast < 60:
return (self.__gateways, error, errorCode)
r = self.getData("mds", "gateways")
try:
decoded = json.loads(r.text)
gateways = dict()
# We fetch name and last pushRecord here
for gateway in decoded:
gateways[gateway["ownerNo"]] = Gateway(gateway, self)
# Override the current values
self.__gatewaysLast = time.time()
self.__gateways = gateways
self.__updateOnlineGW()
except ValueError:
error = True
errorCode = 1500
return (self.__gateways, error, errorCode)
def __updateOnlineGW(self):
error = False
errorCode = 0
retData = dict()
r = self.getData("smpf-json", "connections/list")
try:
decoded = json.loads(r.text)
onlinegw = list()
# We need to check for the mac to be right
for gateway in decoded:
if (not gateway or not gateway["macAddress"]):
continue;
# Wow this is ugly, can someone come up with a better version, please?
mac = gateway["macAddress"].split(":")
gwmac = "%s%s%s%s%s%s" % (mac[0].zfill(2),
mac[1].zfill(2),
mac[2].zfill(2),
mac[3].zfill(2),
mac[4].zfill(2),
mac[5].zfill(2))
onlinegw.append(gwmac)
# Set all gateways to offline
for gw in self.__gateways:
if self.__gateways[gw].mac in onlinegw:
retData[self.__gateways[gw].name] = self.__gateways[gw]
self.__gateways[gw].onlineControl = True
except ValueError:
error = True
errorCode = 1500
return (retData, error, errorCode)
def getOnlineGWs(self):
return dict()
def _sendRLMFetchCommand(self, command, mac, owner, start, end):
error = False
errorCode = 1500
params = "{\"owner\":\"%s\",\"start\":%d,\"end\":%d}" % (owner, start, end)
data = {"mac": mac, "pluginname": "rlm_readout", "plugincall": command, "params": params }
r = self.postData("smpf-json", "gateways/sendcommand/tomac", data)
if r.status_code == requests.codes.ok:
error = True
errorCode = r.status_code
return r.text.encode('utf-8'), error, errorCode
# Short cuts
def fetchLastgang(self, mac, owner, start, end):
return self._sendRLMFetchCommand("fetchLastgang", mac, owner, start, end)
def fetchErrorLog(self, mac, owner, start, end):
return self._sendRLMFetchCommand("fetchErrorLog", mac, owner, start, end)
def sendRLMCommand(self, mac, owner, command, prog = 0, timeout = -1):
error = False
errorCode = 1500
params = "{\"owner\":\"%s\",\"command\": \"%s\", \"prog\":%i,\"timeout\":%i}" % (owner, command, prog, timeout)
data = {"mac": mac, "pluginname": "rlm_readout", "plugincall": "sendCommand" , "params": params }
r = self.postData("smpf-json", "gateways/sendcommand/tomac", data)
if r.status_code == requests.codes.ok:
error = True
errorCode = r.status_code
return r.text, error, errorCode
def getLoadCurve(self, mpoint, start, end):
error = False
errorCode = 0
retData = list()
r = self.getData("mds", "/meteringpoints/rlm/%s/%d/%d" % (mpoint, start, end))
try:
decoded = json.loads(r.text)
for encoded in decoded:
retData.append(base64.b64decode(encoded))
except ValueError:
error = True
errorCode = 1500
return (retData, error, errorCode)
| 2.8125 | 3 |
783_minimum_distance_between_BST_nodes.py | Sanster/LeetCode | 2 | 12771916 | from utils import TreeNode
class Solution:
pre = None
min_val = 999999
def minDiffInBST(self, root: TreeNode) -> int:
self.pre = None
self.min_val = 999999
self.inorder(root)
return self.min_val
"""
中序遍历可以得到从小到大的值,只要当前值和前一个最小的值做减法就可以了
"""
def inorder(self, node: TreeNode):
if not node:
return
self.inorder(node.left)
if self.pre:
self.min_val = min(self.min_val, node.val - self.pre.val)
self.pre = node
self.inorder(node.right)
| 3.234375 | 3 |
tests/test_session_creation.py | pyansys/openapi-common | 1 | 12771917 | import json
import os
from functools import wraps
import pytest
import requests_mock
import requests_ntlm
from ansys.openapi.common import (
SessionConfiguration,
ApiClientFactory,
ApiConnectionException,
)
SERVICELAYER_URL = "http://localhost/mi_servicelayer"
SECURE_SERVICELAYER_URL = "https://localhost/mi_servicelayer"
ACCESS_TOKEN = (
"<KEY>
<KEY>"
)
REFRESH_TOKEN = (
"<KEY>
<KEY>"
)
def test_anonymous():
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=200, reason="OK", text="Connection OK")
_ = ApiClientFactory(SERVICELAYER_URL).with_anonymous()
@pytest.mark.parametrize(
("status_code", "reason_phrase"),
[(403, "Forbidden"), (404, "Not Found"), (500, "Internal Server Error")],
)
def test_other_status_codes_throw(status_code, reason_phrase):
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=status_code, reason=reason_phrase)
with pytest.raises(ApiConnectionException) as excinfo:
_ = ApiClientFactory(SERVICELAYER_URL).with_anonymous()
assert excinfo.value.status_code == status_code
assert excinfo.value.reason_phrase == reason_phrase
def test_missing_www_authenticate_throws():
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=401, reason="Unauthorized")
with pytest.raises(ValueError) as excinfo:
_ = ApiClientFactory(SERVICELAYER_URL).with_autologon()
assert "www-authenticate" in str(excinfo.value)
def test_unconfigured_builder_throws():
with pytest.raises(ValueError) as excinfo:
_ = ApiClientFactory(SERVICELAYER_URL).connect()
assert "authentication" in str(excinfo.value)
def test_can_connect_with_basic():
with requests_mock.Mocker() as m:
m.get(
SERVICELAYER_URL,
status_code=401,
headers={"WWW-Authenticate": 'Basic realm="localhost"'},
)
m.get(
SERVICELAYER_URL,
status_code=200,
request_headers={"Authorization": "Basic VEVTVF9VU0VSOlBBU1NXT1JE"},
)
_ = ApiClientFactory(SERVICELAYER_URL).with_credentials(
username="TEST_USER", password="PASSWORD"
)
def test_can_connect_with_basic_and_domain():
with requests_mock.Mocker() as m:
m.get(
SERVICELAYER_URL,
status_code=401,
headers={"WWW-Authenticate": 'Basic realm="localhost"'},
)
m.get(
SERVICELAYER_URL,
status_code=200,
request_headers={
"Authorization": "Basic RE9NQUlOXFRFU1RfVVNFUjpQQVNTV09SRA=="
},
)
_ = ApiClientFactory(SERVICELAYER_URL).with_credentials(
username="TEST_USER", password="PASSWORD", domain="DOMAIN"
)
def test_only_called_once_with_basic_when_anonymous_is_ok():
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=200)
_ = ApiClientFactory(SERVICELAYER_URL).with_credentials(
username="TEST_USER", password="PASSWORD"
)
assert m.called_once
def test_throws_with_invalid_credentials():
with requests_mock.Mocker() as m:
UNAUTHORIZED = "Unauthorized_unique"
m.get(
SERVICELAYER_URL,
status_code=401,
headers={"WWW-Authenticate": 'Basic realm="localhost"'},
reason=UNAUTHORIZED,
)
m.get(
SERVICELAYER_URL,
status_code=200,
request_headers={"Authorization": "Basic VEVTVF9VU0VSOlBBU1NXT1JE"},
)
with pytest.raises(ApiConnectionException) as exception_info:
_ = ApiClientFactory(SERVICELAYER_URL).with_credentials(
username="NOT_A_TEST_USER", password="PASSWORD"
)
assert exception_info.value.status_code == 401
assert exception_info.value.reason_phrase == UNAUTHORIZED
def wrap_with_workstation(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
import os
try:
current_workstation = os.environ["NETBIOS_COMPUTER_NAME"]
except KeyError:
current_workstation = None
os.environ["NETBIOS_COMPUTER_NAME"] = "TESTWORKSTATION"
func(self, *args, **kwargs)
if current_workstation is not None:
os.environ["NETBIOS_COMPUTER_NAME"] = current_workstation
else:
del os.environ["NETBIOS_COMPUTER_NAME"]
return wrapper
class MockNTLMAuth(requests_ntlm.HttpNtlmAuth):
def __init__(self, username, password, session=None):
super().__init__(username, password, session, send_cbt=False)
@pytest.mark.skip(reason="Mock is not working in tox for some reason.")
@pytest.mark.skipif(os.name != "nt", reason="NTLM is not currently supported on linux")
def test_can_connect_with_ntlm(mocker):
expect1 = {
"Authorization": "NTLM TlRMTVNTUAABAAAAMZCI4gAAAAAoAAAAAAAAACgAAAAGAbEdAAAADw=="
}
response1 = {
"WWW-Authenticate": "NTLM TlRMTVNTUAACAAAAHgAeADgAAAA1gori1CEifyE0ovkAAAAAAAAAAJgAmABWAAAAC"
"gBhSgAAAA9UAEUAUwBUAFcATwBSAEsAUwBUAEEAVABJAE8ATgACAB4AVABFAFMAVABXAE8AUgBLAFMAVABBAFQASQB"
"PAE4AAQAeAFQARQBTAFQAVwBPAFIASwBTAFQAQQBUAEkATwBOAAQAHgBUAEUAUwBUAFcATwBSAEsAUwBUAEEAVABJA"
"E8ATgADAB4AVABFAFMAVABXAE8AUgBLAFMAVABBAFQASQBPAE4ABwAIADbWHPMoRNcBAAAAAA=="
}
expect2 = {
"Authorization": "NTLM TlRMTVNTUAADAAAAGAAYAGgAAADQANAAgAAAAAAAAABYAAAAEAAQAFgAAAAAAAAAaAAAAAg"
"ACABQAQAANYKK4gYBsR0AAAAPgNpphHi8APlNXyGtGcP/LUkASQBTAF8AVABlAHMAdAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAADBY98WhVO4ccHK2mJ3PQ+GAQEAAAAAAAA21hzzKETXAd6tvu/erb7vAAAAAAIAHgBUAEUAUwBUAFcATwBSAEsAU"
"wBUAEEAVABJAE8ATgABAB4AVABFAFMAVABXAE8AUgBLAFMAVABBAFQASQBPAE4ABAAeAFQARQBTAFQAVwBPAFIASwBTAF"
"QAQQBUAEkATwBOAAMAHgBUAEUAUwBUAFcATwBSAEsAUwBUAEEAVABJAE8ATgAHAAgANtYc8yhE1wEGAAQAAgAAAAAAAAA"
"AAAAAcTfJ2nPXFQA="
}
mocker.patch(
"os.urandom",
return_value=b"\xDE\xAD\xBE\xEF\xDE\xAD\xBE\xEF",
)
mocker.patch("_session.HttpNtlmAuth", MockNTLMAuth)
with requests_mock.Mocker() as m:
m.get(
url=SERVICELAYER_URL,
status_code=401,
headers={"WWW-Authenticate": "NTLM"},
)
m.get(
url=SERVICELAYER_URL,
status_code=401,
headers=response1,
request_headers=expect1,
)
m.get(url=SERVICELAYER_URL, status_code=200, request_headers=expect2)
configuration = SessionConfiguration()
configuration.verify_ssl = False
_ = ApiClientFactory(
SERVICELAYER_URL, session_configuration=configuration
).with_credentials(
username="IIS_Test",
password="<PASSWORD>",
)
def test_can_connect_with_negotiate():
pass
def test_only_called_once_with_autologon_when_anonymous_is_ok():
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=200)
_ = ApiClientFactory(SERVICELAYER_URL).with_autologon()
assert m.called_once
def test_can_connect_with_oidc():
pass
def test_only_called_once_with_oidc_when_anonymous_is_ok():
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=200)
_ = ApiClientFactory(SERVICELAYER_URL).with_oidc().authorize()
assert m.called_once
def test_can_connect_with_oidc_using_token():
redirect_uri = "https://www.example.com/login/"
authority_url = "https://www.example.com/authority/"
client_id = "b4e44bfa-6b73-4d6a-9df6-8055216a5836"
authenticate_header = f'Bearer redirecturi="{redirect_uri}", authority="{authority_url}", clientid="{client_id}"'
well_known_response = json.dumps(
{
"token_endpoint": f"{authority_url}token",
"authorization_endpoint": f"{authority_url}authorization",
}
)
with requests_mock.Mocker() as m:
m.get(
f"{authority_url}.well-known/openid-configuration",
status_code=200,
text=well_known_response,
)
m.get(
SECURE_SERVICELAYER_URL,
status_code=401,
headers={"WWW-Authenticate": authenticate_header},
)
m.get(
SECURE_SERVICELAYER_URL,
status_code=200,
request_headers={"Authorization": f"Bearer {ACCESS_TOKEN}"},
)
session = (
ApiClientFactory(SECURE_SERVICELAYER_URL)
.with_oidc()
.with_token(access_token=ACCESS_TOKEN, refresh_token="")
.connect()
)
resp = session.rest_client.get(SECURE_SERVICELAYER_URL)
assert resp.status_code == 200
def test_neither_basic_nor_ntlm_throws():
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=401, headers={"WWW-Authenticate": "Bearer"})
with pytest.raises(ConnectionError) as exception_info:
_ = ApiClientFactory(SERVICELAYER_URL).with_credentials(
username="TEST_USER", password="PASSWORD"
)
assert "Unable to connect with credentials" in str(exception_info.value)
def test_no_autologon_throws():
with requests_mock.Mocker() as m:
m.get(SERVICELAYER_URL, status_code=401, headers={"WWW-Authenticate": "Bearer"})
with pytest.raises(ConnectionError) as exception_info:
_ = ApiClientFactory(SERVICELAYER_URL).with_autologon()
assert "Unable to connect with autologon" in str(exception_info.value)
def test_no_oidc_throws():
with requests_mock.Mocker() as m:
m.get(
SERVICELAYER_URL,
status_code=401,
headers={"WWW-Authenticate": 'Basic realm="localhost"'},
)
with pytest.raises(ConnectionError) as exception_info:
_ = (
ApiClientFactory(SERVICELAYER_URL)
.with_oidc()
.with_token(access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
)
assert "Unable to connect with OpenID Connect" in str(exception_info.value)
def test_self_signed_throws():
pass
| 2.234375 | 2 |
setup.py | guidow/pyfarm-master | 0 | 12771918 | <reponame>guidow/pyfarm-master
# No shebang line, this module is meant to be imported
#
# Copyright 2013 <NAME>
# Copyright 2015 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import sys
assert sys.version_info[0:2] >= (2, 7), "Python 2.7 or higher is required"
import os
from os import walk
from os.path import isfile, join, relpath
from setuptools import setup
# Version requirements explanations:
# pyfarm.core: certain enums which are only present in later versions,
# configuration loader changes
# sqlalchemy: Post-1.x release there were a few regressions that broke tests
install_requires = [
"pyfarm.core>=0.9.3",
"sqlalchemy>=0.9.9,!=1.0.0,!=1.0.1,!=1.0.2",
"flask",
"flask-login>=0.3.0,<0.4.0",
"flask-sqlalchemy>=0.12",
"itsdangerous",
"blinker",
"voluptuous",
"celery",
"redis",
"requests!=2.4.0",
"netaddr",
"lockfile",
"wtforms"]
if "READTHEDOCS" in os.environ:
install_requires += ["sphinxcontrib-httpdomain", "sphinx"]
if isfile("README.rst"):
with open("README.rst", "r") as readme:
long_description = readme.read()
else:
long_description = ""
def get_package_data(package_root, *package_data_roots):
output = []
for top in package_data_roots:
for root, dirs, files in walk(top):
for filename in files:
output.append(relpath(join(root, filename), package_root))
return output
setup(
name="pyfarm.master",
version="0.8.7",
packages=[
"pyfarm",
"pyfarm.master",
"pyfarm.master.api",
"pyfarm.master.user_interface",
"pyfarm.master.user_interface.statistics",
"pyfarm.models",
"pyfarm.models.statistics",
"pyfarm.models.core",
"pyfarm.scheduler"],
namespace_packages=["pyfarm"],
include_package_data=True,
package_data={
"pyfarm.master": get_package_data(
join("pyfarm", "master"),
join("pyfarm", "master", "etc"),
join("pyfarm", "master", "static"),
join("pyfarm", "master", "templates")
),
"pyfarm.models": get_package_data(
join("pyfarm", "models"),
join("pyfarm", "models", "etc")
),
"pyfarm.scheduler": get_package_data(
join("pyfarm", "scheduler"),
join("pyfarm", "scheduler", "etc")
)
},
entry_points={
"console_scripts": [
"pyfarm-master = pyfarm.master.entrypoints:run_master",
"pyfarm-tables = pyfarm.master.entrypoints:tables"]},
install_requires=install_requires,
url="https://github.com/pyfarm/pyfarm-master",
license="Apache v2.0",
author="<NAME>",
author_email="<EMAIL>",
description="Sub-library which contains the code necessary to "
"communicate with the database via a REST api.",
long_description=long_description,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: System :: Distributed Computing"])
| 1.5 | 2 |
essay/models.py | lukasy2/strona | 0 | 12771919 | <gh_stars>0
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from quiz.models import Question
@python_2_unicode_compatible
class Essay_Question(Question):
def check_if_correct(self, guess):
return False
def get_answers(self):
return False
def get_answers_list(self):
return False
def answer_choice_to_string(self, guess):
return str(guess)
def __str__(self):
return self.content
class Meta:
verbose_name = _("Pytanie otwarte")
verbose_name_plural = _("Pytania otwarte")
| 2.046875 | 2 |
tests/test_pyprocessing_math.py | jeacom25b/pyprocessing | 0 | 12771920 | from unittest import TestCase
from copy import copy
from pyprocessing.math import PVector
class PyProcessingMathTest(TestCase):
def setUp(self):
pass
def test_pvector_instanciation(self):
'''
Test instanciating a vector
'''
vector = PVector(0, 0, 0)
self.assertIsInstance(vector, PVector)
def test_pvector_addition(self):
'''
Test additioning one vector to another
'''
vector = PVector(0, 1, 0)
adder = PVector(1, 0, 0)
self.assertEqual(PVector(1, 1, 0), vector + adder)
self.assertEqual(PVector(1, 1, 0), vector.add(adder))
self.assertEqual(PVector(1, 1, 0), PVector.add(vector, adder))
self.assertEqual(PVector(1, 1, 0), vector.add((1, 0, 0)))
self.assertEqual(PVector(1, 1, 0), vector.add(1, 0, 0))
def test_pvector_difference(self):
'''
Test substracting one vector to another
'''
vector = PVector(1, 0, 0)
diff = PVector(1, 0, 0)
self.assertEqual(PVector(0, 0, 0), vector - diff)
self.assertEqual(PVector(0, 0, 0), vector.sub(diff))
self.assertEqual(PVector(0, 0, 0), PVector.sub(vector, diff))
self.assertEqual(PVector(0, 0, 0), vector.sub((1, 0, 0)))
self.assertEqual(PVector(0, 0, 0), vector.sub(1, 0, 0))
def test_pvector_mult(self):
'''
Test multiplying by a scalar, computing the cross product, and the dot
product.
'''
self.assertEqual(PVector(2, 2, 2), PVector(1, 1, 1) * 2)
self.assertEqual(PVector(2, 2, 2), PVector(1, 1, 1).mult(2))
self.assertEqual(6, PVector(1, 1, 1) * PVector(2, 2, 2))
self.assertEqual(6, PVector(1, 1, 1) * (2, 2, 2))
self.assertEqual(6, PVector(1, 1, 1) * [2, 2, 2])
self.assertEqual(PVector(0, 0, 0), PVector(1, 1, 1) @ PVector(2, 2, 2))
self.assertEqual(PVector(1, 0, 0), PVector(0, 0, 1) @ PVector(0, -1, 0))
self.assertEqual(PVector(0, 0, 0), PVector(1, 1, 1).cross(PVector(2, 2, 2)))
self.assertEqual(PVector(1, 0, 0), PVector(0, 0, 1).cross(PVector(0, -1, 0)))
def test_pvector_div(self):
'''
Test dividing a vector by a scalar
'''
self.assertEqual(PVector(1, 1, 1), PVector(2, 2, 2) / 2.)
self.assertEqual(PVector(1, 1, 1), PVector(2, 2, 2).div(2))
def test_pvector_copy(self):
'''
Test that copying a vector returns a new instance
'''
vec = PVector(0, 0, 0)
cp = copy(vec)
cp2 = vec.copy()
self.assertEqual(vec, cp)
self.assertEqual(vec, cp2)
self.assertIsNot(vec, cp)
self.assertIsNot(vec, cp2)
def test_pvector_lerp(self):
'''
Test that linear interpolation of a vector to another returns the proper
value
'''
vec = PVector(0, 0, 0)
target = PVector(2, 2, 2)
self.assertEqual(PVector(1, 1, 1), vec.lerp(target, 0.5))
self.assertIs(vec, vec.lerp(target, 0))
self.assertIs(target, vec.lerp(target, 1))
def test_pvector_shorthands(self):
'''
Test that PVector shorthands for common vectors work properly
'''
self.assertEqual(PVector(0, 0, 0), PVector.zero)
self.assertEqual(PVector(1, 1, 1), PVector.one)
self.assertEqual(PVector(1, 0, 0), PVector.x_unit)
self.assertEqual(PVector(0, 1, 0), PVector.y_unit)
self.assertEqual(PVector(0, 0, 1), PVector.z_unit)
def test_pvector_swizzle(self):
'''
Test that swizzle operations to scramble vector elements behave as expected
'''
vec = PVector(1, 2, 3, 4)
self.assertEqual(PVector(1, 1, 1), vec.xxx)
self.assertEqual(PVector(2, 2, 2), vec.yyy)
self.assertEqual(PVector(3, 2, 1), vec.zyx)
self.assertEqual(PVector(3, 2, 1, 4), vec.zyxw)
self.assertEqual(1, vec.x)
self.assertEqual(2, vec.y)
self.assertEqual(3, vec.z)
self.assertEqual(4, vec.w)
vec.xyz = 2, 4, 6
self.assertEqual(PVector(2, 4, 6, 4), vec)
vec.zxw = 1, 2, 4
self.assertEqual(PVector(2, 4, 1, 4), vec)
def test_pvector_normalization(self):
'''
Test that normalization operations work properly
'''
vec = PVector(1, 2, 3)
self.assertEqual(vec.dot(vec), vec.mag_sq())
self.assertAlmostEqual(vec.normalized().mag(), 1)
vec.normalize()
self.assertAlmostEqual(vec.mag_sq(), 1)
| 3.3125 | 3 |
setup.py | gerpark/ftsim | 0 | 12771921 | <filename>setup.py
#!/usr/bin/env python3
# encoding: UTF-8
from setuptools import setup, find_packages
import pathlib
VERSION = '0.9.3'
DESCRIPTION = 'Simulates and visualises transport of loadunits in a warehouse environment.'
# Get the long description from the README file
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.rst').read_text(encoding='utf-8')
setup(
name='ftsim',
version=VERSION,
author="<NAME>",
author_email="<<EMAIL>>",
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/x-rst',
url = "https://github.com/gerpark/ftsim",
license='MIT',
keywords = ['tkinter', 'sqlite', 'threading', 'game'],
packages=find_packages(include=['ftsim', 'ftsim.*']),
package_data = {
# If any package contains *.txt or *.rst files, include them:
'': ['*.db', '*.xml', '*.png', '*.sql'],
},
entry_points={
# : script=dir.file:function
'console_scripts': ['ftsim=ftsim.ftmain9:ftmain'],
},
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6'
]
)
| 1.414063 | 1 |
scripts/run_telathbot.py | telathio/telathbot | 9 | 12771922 | <filename>scripts/run_telathbot.py
import httpx
import typer
def main(
url: str = typer.Option(
..., help="Fully formed URL (i.e. http://telathbot.com", envvar="TELATHBOT_URL"
)
):
typer.echo("Starting TelathBot SafetyTools checks.")
ip_check_resp = httpx.post(
f"{url}/appdata/check/ip",
headers={"content-type": "application/json"},
json={"ip": ""},
)
if ip_check_resp.status_code != 200:
raise Exception("Error checking public IP.")
typer.echo("TelathBot public IP unchanged, scraping SafetyTools uses.")
safetools_scrape_response = httpx.post(f"{url}/safetytools/uses/red")
if safetools_scrape_response.status_code != 200:
raise Exception("Error scraping on safetytools.")
typer.echo("TelathBot scraping complete, notifying of uses.")
safetools_scrape_response = httpx.post(f"{url}/safetytools/notify/red")
if safetools_scrape_response.status_code != 200:
raise Exception("Error notifying on safetytools.")
typer.echo("Everything complete, exiting!.")
if __name__ == "__main__":
typer.run(main)
| 2.71875 | 3 |
DWM/lab1.py | Cha-V/VSCodes | 1 | 12771923 | import mysql.connector
import pandas as pd
from sqlalchemy import create_engine
hostname="localhost"
dbname="DWM"
uname="root"
pwd="<PASSWORD>"
engine = create_engine("mysql+pymysql://{user}:{pw}@{host}/{db}".format(host=hostname, db=dbname, user=uname, pw=pwd))
dataset = pd.read_csv('Student_details.csv')
dataset | 2.84375 | 3 |
acnportal/acnsim/tests/test_base.py | gazzayeah/acnportal | 0 | 12771924 | <reponame>gazzayeah/acnportal
import unittest
import numpy as np
from .. import ChargingNetwork
from ..base import ErrorAllWrapper
class TestErrorAllWrapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.builtin_data = 2
cls.builtin_wrapper = ErrorAllWrapper(cls.builtin_data)
cls.network_data = ChargingNetwork()
cls.network_wrapper = ErrorAllWrapper(cls.network_data)
def test_correct_on_init(self):
self.assertEqual(self.builtin_data, self.builtin_wrapper.data)
self.assertEqual(self.network_data, self.network_wrapper.data)
def test_builtin_success(self):
self.assertEqual(self.builtin_wrapper.__dict__, {'_data': 2})
def test_builtin_error(self):
with self.assertRaises(TypeError):
_ = self.builtin_wrapper == self.network_wrapper
def test_network_attribute_error(self):
with self.assertRaises(TypeError):
_ = self.network_wrapper.constraint_matrix
def test_network_function_error(self):
with self.assertRaises(TypeError):
_ = self.network_wrapper.is_feasible(np.array([[0]]))
if __name__ == '__main__':
unittest.main()
| 2.703125 | 3 |
research/attention_ocr/python/datasets/testdata/fsns/download_data.py | gujralsanyam22/models | 82,518 | 12771925 | <gh_stars>1000+
import urllib.request
import tensorflow as tf
import itertools
URL = 'http://download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001'
DST_ORIG = 'fsns-00000-of-00001.orig'
DST = 'fsns-00000-of-00001'
KEEP_NUM_RECORDS = 5
print('Downloading %s ...' % URL)
urllib.request.urlretrieve(URL, DST_ORIG)
print('Writing %d records from %s to %s ...' %
(KEEP_NUM_RECORDS, DST_ORIG, DST))
with tf.io.TFRecordWriter(DST) as writer:
for raw_record in itertools.islice(tf.compat.v1.python_io.tf_record_iterator(DST_ORIG), KEEP_NUM_RECORDS):
writer.write(raw_record)
| 2.765625 | 3 |
libs/pipeline_monitor/test.py | silentmonk/KubeFlow | 2,527 | 12771926 | from pipeline_monitor import prometheus_monitor as monitor
_labels= {'a_label_key':'a_label_value'}
@monitor(labels=_labels, name="test_monitor")
def test_log_inputs_and_outputs(arg1: int, arg2: int):
return arg1 + arg2
test_log_inputs_and_outputs(4, 5)
| 1.671875 | 2 |
PinEstate/adverts/migrations/0001_initial.py | ohouens/Real_Estates_Recommender_System | 1 | 12771927 | <gh_stars>1-10
# Generated by Django 3.2.4 on 2021-07-30 16:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Estate',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('price', models.IntegerField(default=0)),
('size', models.IntegerField(default=0)),
('image', models.CharField(max_length=250)),
('rooms', models.IntegerField(default=1)),
('location', models.CharField(max_length=200)),
('e_type', models.CharField(max_length=40)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('password', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Pin',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('views', models.IntegerField(default=0)),
('likes', models.IntegerField(default=0)),
('estate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='adverts.estate')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='adverts.user')),
],
),
]
| 1.851563 | 2 |
torchlib/visualization.py | CarlosPena00/kaggle-datasciencebowl-2018 | 0 | 12771928 | <gh_stars>0
import cv2
import numpy as np
import matplotlib.pyplot as plt
def torgb(im):
if len(im.shape)==2:
im = np.expand_dims(im, axis=2)
im = np.concatenate( (im,im,im), axis=2 )
return im
def setcolor(im, mask, color):
tmp=im.copy()
tmp=np.reshape( tmp, (-1, im.shape[2]) )
mask = np.reshape( mask, (-1,1))
tmp[ np.where(mask>0)[0] ,:] = color
im=np.reshape( tmp, (im.shape) )
return im
def lincomb(im1,im2,mask, alpha):
#im = np.zeros( (im1.shape[0], im1.shape[1], 3) )
im = im1.copy()
row, col = np.where(mask>0)
for i in range( len(row) ):
r,c = row[i],col[i]
#print(r,c)
im[r,c,0] = im1[r,c,0]*(1-alpha) + im2[r,c,0]*(alpha)
im[r,c,1] = im1[r,c,1]*(1-alpha) + im2[r,c,1]*(alpha)
im[r,c,2] = im1[r,c,2]*(1-alpha) + im2[r,c,2]*(alpha)
return im
def makebackgroundcell(labels):
ch = labels.shape[2]
cmap = plt.get_cmap('jet_r')
imlabel = np.zeros( (labels.shape[0], labels.shape[1], 3) )
for i in range(ch):
mask = labels[:,:,i]
color = cmap(float(i)/ch)
imlabel = setcolor(imlabel,mask,color[:3])
return imlabel
def makeedgecell(labels):
ch = labels.shape[2]
cmap = plt.get_cmap('jet_r')
imedge = np.zeros( (labels.shape[0], labels.shape[1], 3) )
for i in range(ch):
mask = labels[:,:,i]
color = cmap(float(i)/ch)
mask = mask.astype(np.uint8)
_,contours,_ = cv2.findContours(mask, cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE )
for cnt in contours: cv2.drawContours(imedge, cnt, -1, color[:3], 1)
return imedge
def makeimagecell(image, labels, alphaback=0.3, alphaedge=0.3):
imagecell = image.copy()
imagecell = imagecell - np.min(imagecell)
imagecell = imagecell / np.max(imagecell)
imagecell = torgb(imagecell)
mask = np.sum(labels, axis=2)
imagecellbackground = makebackgroundcell(labels)
imagecelledge = makeedgecell(labels)
maskedge = np.sum(imagecelledge, axis=2)
imagecell = lincomb(imagecell,imagecellbackground, mask, alphaback )
imagecell = lincomb(imagecell,imagecelledge, maskedge, alphaedge )
return imagecell
| 2.5625 | 3 |
cowin_project/models/sub_project/prev_investment_management/sub_project_establishment.py | shangdinvxu/cowinaddons | 0 | 12771929 | <reponame>shangdinvxu/cowinaddons
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.modules.module import get_module_resource
from odoo import tools
from odoo.exceptions import UserError
# 项目立项 构建小项目
class Cowin_project_subproject(models.Model):
_name = 'cowin_project.cowin_subproject'
'''
项目立项
'''
@api.model
def _default_image(self):
image_path = get_module_resource('hr', 'static/src/img', 'default_image.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
# 关联到settings中,把该字段看成配置选项的操作
# project_id = fields.Many2one('cowin_project.cowin_project', ondelete="cascade")
meta_sub_project_id = fields.Many2one('cowin_project.meat_sub_project', string=u'元子工程实例' , ondelete="cascade")
# # 这个字段仅仅是为了让程序设计更加的完备性!!!
# subproject_id = fields.Many2one('cowin_project.cowin_subproject')
# 关联到自环节表(缓解状态表中)
# sub_process_tache_status_id = fields.One2many('cowin_project.subproject_process_tache', "sub_project_id")
examine_and_verify = fields.Char(string=u'审核校验', default=u'未开始审核')
image = fields.Binary("LOGO", default=_default_image, attachment=True,
help="This field holds the image used as photo for the cowin_project, limited to 1024x1024px.")
name = fields.Char(string=u"项目名称")
project_number = fields.Char(string=u'项目编号')
project_source = fields.Selection([(1, u'朋友介绍'), (2, u'企业自荐')], string=u'项目来源')
project_source_note = fields.Char(string=u'项目来源备注')
invest_manager_id = fields.Many2one('hr.employee', string=u'投资经理')
invest_manager_ids = fields.Many2many('hr.employee', string=u'投资经理')
# ---------- 投资基金
round_financing_and_foundation_id = fields.Many2one('cowin_project.round_financing_and_foundation',
string=u'基金轮次实体')
round_financing_id = fields.Many2one('cowin_common.round_financing', string=u'融资轮次')
foundation_id = fields.Many2one('cowin_foundation.cowin_foudation', string=u'基金名称')
the_amount_of_financing = fields.Float(string=u'本次融资金额')
the_amount_of_investment = fields.Float(string=u'本次投资金额')
ownership_interest = fields.Integer(string=u'股份比例')
compute_round_financing_and_foundation_id = fields.Char(compute=u'_compute_value')
@api.depends('round_financing_id', 'foundation_id', 'the_amount_of_financing', 'the_amount_of_investment', 'ownership_interest')
def _compute_value(self):
for rec in self:
rec.meta_sub_project_id.round_financing_and_Foundation_ids[0].round_financing_id = rec.round_financing_id
rec.meta_sub_project_id.round_financing_and_Foundation_ids[0].foundation_id = rec.foundation_id
rec.meta_sub_project_id.round_financing_and_Foundation_ids[0].the_amount_of_financing = rec.the_amount_of_financing
rec.meta_sub_project_id.round_financing_and_Foundation_ids[0].the_amount_of_investment = rec.the_amount_of_investment
rec.meta_sub_project_id.round_financing_and_Foundation_ids[0].ownership_interest = rec.ownership_interest
# round_financing_id = fields.Many2one('cowin_common.round_financing',
# related='round_financing_and_foundation_id.round_financing_id', string=u'融资轮次')
# foundation_id = fields.Many2one('cowin_foundation.cowin_foudation',
# related='round_financing_and_foundation_id.foundation_id', string=u'基金名称')
# the_amount_of_financing = fields.Float(
# related='round_financing_and_foundation_id.the_amount_of_financing', string=u'本次融资额')
# the_amount_of_investment = fields.Float(
# related='round_financing_and_foundation_id.the_amount_of_investment', string=u'本次投资金额')
# ownership_interest = fields.Integer(
# related='round_financing_and_foundation_id.ownership_interest',string=u'股份比例')
# ---------------
project_company_profile = fields.Text(string=u'项目公司概况')
project_appraisal = fields.Text(string=u'项目评价')
project_note = fields.Text(string=u'备注')
industry = fields.Many2one('cowin_common.cowin_industry', string=u'所属行业')
stage = fields.Selection([(1, u'种子期'), (2, u'成长早期'), (3, u'成长期'), (4, u'成熟期')], string=u'所属阶段', default=1)
production = fields.Text(string=u'产品')
registered_address = fields.Char(string=u'注册地')
peration_place = fields.Char(string=u'运营地')
founding_time = fields.Date(string=u'成立时间')
contract_person = fields.Char(string=u'联系人')
contract_phone = fields.Char(string=u'联系电话')
contract_email = fields.Char(string=u'Email')
# attachment_ids = fields.Many2many('ir.attachment', string=u"附件")
attachment_ids = fields.Many2many('ir.attachment', 'cowin_subproject_attachment_rel', string=u"附件")
attachment_note = fields.Char(string=u'附件说明')
# 投资决策委员会会议决议 这张字表需要使用该字段的一次影像!!!f
trustee_id = fields.Many2one('hr.employee', string=u'董事')
supervisor_id = fields.Many2one('hr.employee', string=u'监事')
# 投决会决议 这张表使用的该字段的一次影像!!!
amount_of_entrusted_loan = fields.Float(string=u'委托贷款金额')
@api.model
def create(self, vals):
tache_info = self._context['tache']
meta_sub_project_id = int(tache_info['meta_sub_project_id'])
# 校验meta_sub_project所对应的子工程只能有一份实体
meta_sub_project_entity = self.env['cowin_project.meat_sub_project'].browse(meta_sub_project_id)
if len(meta_sub_project_entity.sub_project_ids) > 1:
raise UserError(u'每个元子工程只能有一份实体!!!')
vals['meta_sub_project_id'] = meta_sub_project_id
sub_tache_id = int(tache_info['sub_tache_id'])
target_sub_tache_entity = meta_sub_project_entity.sub_tache_ids.browse(sub_tache_id)
sub_project = super(Cowin_project_subproject, self).create(vals)
sub_project._compute_value() # 将数据写入到指定的位置!!!
target_sub_tache_entity.write({
'res_id': sub_project.id,
'view_or_launch': True,
})
# 判断 发起过程 是否需要触发下一个子环节
# target_sub_tache_entity.check_or_not_next_sub_tache()
# target_sub_tache_entity.check_or_not_next_sub_tache()
target_sub_tache_entity.update_sub_approval_settings()
# # 触发下一个依赖子环节处于解锁状态
# for current_sub_tache_entity in meta_sub_project_entity.sub_tache_ids:
# if current_sub_tache_entity.parent_id == target_sub_tache_entity:
# current_sub_tache_entity.write({
# 'is_unlocked': True,
# })
return sub_project
@api.multi
def write(self, vals):
res = super(Cowin_project_subproject, self).write(vals)
tache_info = self._context['tache']
meta_sub_project_id = int(tache_info['meta_sub_project_id'])
# 校验meta_sub_project所对应的子工程只能有一份实体
meta_sub_project_entity = self.env['cowin_project.meat_sub_project'].browse(meta_sub_project_id)
if len(meta_sub_project_entity.sub_project_ids) > 1:
raise UserError(u'每个元子工程只能有一份实体!!!')
sub_tache_id = int(tache_info['sub_tache_id'])
target_sub_tache_entity = meta_sub_project_entity.sub_tache_ids.browse(sub_tache_id)
target_sub_tache_entity.write({
'is_launch_again': False,
})
# 判断 发起过程 是否需要触发下一个子环节
# target_sub_tache_entity.check_or_not_next_sub_tache()
target_sub_tache_entity.update_sub_approval_settings()
return res
def load_and_return_action(self, **kwargs):
tache_info = kwargs['tache_info']
# tache_info = self._context['tache']
meta_sub_project_id = int(tache_info['meta_sub_project_id'])
meta_sub_project_entity = self.env['cowin_project.meat_sub_project'].browse(meta_sub_project_id)
tem = meta_sub_project_entity.project_id.copy_data()[0]
res = {}
for k, v in tem.iteritems():
nk = 'default_' + k
if type(v) is tuple:
res[nk] = v[0]
else:
res[nk] = v
# 默认的投资经理的数据我们需要去自定义添加
invest_manager_entity = self.env['cowin_common.approval_role'].search([('name', '=', u'投资经理')])
rel_entities = meta_sub_project_entity.sub_meta_pro_approval_settings_role_rel & invest_manager_entity.sub_meta_pro_approval_settings_role_rel
res['default_invest_manager_ids'] = [(6, 0, [rel.employee_id.id for rel in rel_entities])]
return {
'name': self._name,
'type': 'ir.actions.act_window',
'res_model': self._name,
'views': [[False, 'form']],
'view_type': 'form',
'view_mode': 'form',
'view_id': False,
'res_id': self.id,
'target': 'new',
'context': res,
}
| 2.046875 | 2 |
tests/testutils.py | xxh840912/zask | 0 | 12771930 | import random
import os
_tmpfiles = []
def random_ipc_endpoint():
tmpfile = '/tmp/zerorpc_test_socket_{0}.sock'.format(
str(random.random())[2:])
_tmpfiles.append(tmpfile)
return 'ipc://{0}'.format(tmpfile)
def teardown():
global _tmpfiles
for tmpfile in _tmpfiles:
print 'unlink', tmpfile
try:
os.unlink(tmpfile)
except Exception:
pass
_tmpfiles = []
| 2.5625 | 3 |
authentication/templates/urls.py | aruna1993/django-rest-framework-crud-jwt | 0 | 12771931 | <gh_stars>0
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
from django.urls import include, path
from rest_framework.schemas import get_schema_view
from django.views.generic import TemplateView
# urls
urlpatterns = [
path('openapi/', get_schema_view(
title="Movie Restful Webservice",
description="Movie Restful Webservice Description"
), name='openapi-schema'),
path('api/v1/movies/', include('movies.urls')),
path('api/v1/auth/', include('authentication.urls')),
path('admin/', admin.site.urls),
path('docs/', TemplateView.as_view(
template_name='documentation.html',
extra_context={'schema_url':'openapi-schema'}
), name='swagger-ui'),
]
| 1.765625 | 2 |
benchmark.py | frawi/mptcp-tests | 2 | 12771932 | #!/usr/bin/env python2
def create_network(paths, bw, loss, latency):
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.node import CPULimitedHost, Controller
from mininet.log import info
net = Mininet(controller=Controller, link=TCLink)
# since we need a switch we also need a controller
c0 = net.addController('c0')
s1 = net.addSwitch('s1')
# create virtual hosts
h1 = net.addHost('h1', cpu=40)
h2 = net.addHost('h2', cpu=40)
info("Create link s1 <-> h2 ")
li = net.addLink(s1, h2)
info("\n")
info("Create link h1 <-> s1 ")
li = net.addLink(h1, s1, loss=loss, bw=bw, delay=latency)
info("\n")
# create the links
for i in range(paths-1):
info("Create link h1 <-> s1 ")
li = net.addLink(h1, s1, loss=loss, bw=bw, delay=latency)
li.intf1.setIP("192.168.%d.1"%i, 24)
info("\n")
return net
def set_route(intf, gateway, table):
intf.cmd("ip rule add from %s table %d" % (intf.ip, table), shell=True)
shift = 32-int(intf.prefixLen)
netnum = reduce(lambda l, r: (l<<8) + int(r), intf.ip.split("."), 0) >> shift << shift
netip = ".".join([str((netnum>>(24-i*8))&0xff) for i in range(4)])
intf.cmd("ip route add %s/%s dev %s scope link table %d" % (netip, intf.prefixLen, intf.name, table), shell=True)
intf.cmd("ip route add default dev %s table %d" % (intf.name, table), shell=True)
def sysctl(name, value):
from subprocess import check_output
return check_output(["sysctl", "-w", str(name) + "=" + str(value)])
def set_parameter(name, value):
from subprocess import check_output
with open (name, "w") as subf:
subf.write(str(value))
return name + " = " + check_output(["cat", name])
def setup(net):
h1 = net["h1"]
h2 = net["h2"]
for i in h1.intfs:
h1.intfs[i].ifconfig("down")
sleep(1)
for i in h1.intfs:
intf1 = h1.intfs[i]
intf1.ifconfig("up")
intf1.cmd("ip link set dev %s multipath on", intf1.name)
set_route(intf1, h2.intfs[0].ip, i+1)
if i == 0:
h1.cmd("ip route add default scope global nexthop via %s dev %s" % (h2.intfs[0].ip, intf1.name), shell=True)
sys.stderr.write("h1: ip rule show\n" + h1.cmdPrint("ip rule show"))
sys.stderr.write("h1: ip route\n" + h1.cmdPrint("ip route"))
for i in h1.intfs:
sys.stderr.write(("h1: ip route show table %d\n"%(i+1)) + h1.cmdPrint("ip route show table %d" % (i+1)))
h2.cmd("ip route add default dev %s" % h2.defaultIntf().name)
sys.stderr.write("h2: ip rule show\n" + h2.cmdPrint("ip rule show"))
sys.stderr.write("h2: ip route\n" + h2.cmdPrint("ip route"))
for i in h2.intfs:
sys.stderr.write(("h2: ip route show table %d\n"%(i+1)) + h2.cmdPrint("ip route show table %d" % (i+1)))
def start_bwm(node, filename=None):
from mininet.term import makeTerm
cmd = ["bwm-ng", "-u", "bits"]
if filename:
return node.popen(cmd + ["-o", "csv", "-F", filename, "-T", "sum"])
else:
makeTerm(node, cmd="bash -c '%s || read'"% " ".join(cmd))
return None
def start_htop(node):
from mininet.term import makeTerm
cmd = ["htop"]
makeTerm(node, cmd="bash -c '%s || read'"% " ".join(cmd))
def start_tcpdump(node):
return node.popen(["tcpdump", "-i", "any", "-s", "65535", "-w", node.name+".pcap"])
def pingall(*nodes):
for s in nodes:
for d in nodes:
if s == d:
continue
for i in s.intfs:
for j in d.intfs:
sys.stderr.write(s.cmdPrint(["ping", "-I", s.intfs[i].name, "-c", "4", d.intfs[j].ip]))
if __name__ == '__main__':
from time import sleep
import sys
import argparse
parser = argparse.ArgumentParser("Setup a Multipath environment and run a benchmark")
parser.add_argument("--term", action='store_true',
help="Run the tunnel in xterm. This makes the stdout of nctun visible")
parser.add_argument("--cli", action='store_true',
help="Run the mininet CLI instead of the benchmark")
parser.add_argument("--bwm", default=None,
help="Run a bandwidth monitor on the switch and save to a csv file")
parser.add_argument("--bwm-term", action='store_true',
help="Run a bandwidth monitor on the nodes")
parser.add_argument("--htop", action='store_true',
help="Run a htop on the nodes")
parser.add_argument("--tcpdump", action='store_true',
help="Use tcpdump to store the transfered packets")
parser.add_argument("--log", default=None,
help="Set the mininet log level")
parser.add_argument("--bw", default=1, type=float,
help="Bandwidth in Mbps for each path")
parser.add_argument("--paths", default=2, type=int,
help="Maximum number of paths between the nodes")
parser.add_argument("--loss", default=0, type=int,
help="Loss percentage for each link")
parser.add_argument("--latency", default="10ms",
help="Latency of a single packet transmission")
parser.add_argument("--time", type=int, default=10,
help="Duration of the benchmark")
parser.add_argument("--repeat", type=int, default=1,
help="Number of times to repeat one measurement")
parser.add_argument("--mptcp-disabled", action='store_true',
help="Disable the kernel mptcp support")
parser.add_argument("--mptcp-syn-retries", type=int, default=3,
help="""Specifies how often we retransmit a SYN with the
MP_CAPABLE-option. After this, the SYN will not contain the
MP_CAPABLE-option. This is to handle middleboxes that drop SYNs
with unknown TCP options.""")
parser.add_argument("--mptcp-no-checksum", action='store_true', default=False,
help="Disable the MPTCP checksum")
parser.add_argument("--mptcp-path-manager", default="fullmesh",
help="Select the MPTCP path manager")
parser.add_argument("--mptcp-subflows", default=1,
help="Number of subflows to use")
parser.add_argument("--congestion-control", default="olia",
help="Congestion control algorithm")
args = parser.parse_args()
# set the log level to get some feedback from mininet
if args.log:
from mininet.log import setLogLevel
setLogLevel(args.log)
if args.mptcp_disabled:
sys.stderr.write(sysctl("net.mptcp.mptcp_enabled", 0))
else:
sys.stderr.write(sysctl("net.mptcp.mptcp_enabled", 1))
sys.stderr.write(sysctl("net.mptcp.mptcp_syn_retries", args.mptcp_syn_retries))
if args.mptcp_no_checksum:
sys.stderr.write(sysctl("net.mptcp.mptcp_checksum", 0))
else:
sys.stderr.write(sysctl("net.mptcp.mptcp_checksum", 1))
sys.stderr.write(sysctl("net.mptcp.mptcp_path_manager", args.mptcp_path_manager))
sys.stderr.write(sysctl("net.ipv4.tcp_congestion_control", args.congestion_control))
sys.stderr.write(set_parameter("/sys/module/mptcp_%s/parameters/num_subflows" % args.mptcp_path_manager, args.mptcp_subflows))
net = create_network(paths=args.paths, bw=args.bw, loss=args.loss, latency=args.latency)
net.start()
s1 = net['s1']
h1 = net['h1']
h2 = net['h2']
sleep(1)
setup(net)
pingall(h1, h2)
procs = []
if args.bwm:
procs.append(start_bwm(s1, args.bwm))
if args.bwm_term:
start_bwm(h1)
start_bwm(h2)
if args.htop:
start_htop(h1)
start_htop(h2)
if args.tcpdump:
procs.append(start_tcpdump(h1))
procs.append(start_tcpdump(h2))
sleep(2)
if args.cli:
from mininet.cli import CLI
CLI(net)
else:
for _ in range(args.repeat):
sleep(1)
result = net.iperf(seconds=args.time, fmt="m")
print "%s %s" % (result[0], result[1])
for p in procs:
if p:
p.terminate()
p.wait()
net.stop()
| 2.5 | 2 |
survey/forms.py | Udantu/portfolio-v1 | 0 | 12771933 | from django import forms
class AnswerQuestion(forms.Form):
answer = forms.IntegerField()
| 1.867188 | 2 |
tools/wptrunner/wptrunner/browsers/safari_webdriver.py | Ms2ger/web-platform-tests | 1 | 12771934 | <gh_stars>1-10
from .base import inherit
from . import safari
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
inherit(safari, globals(), "safari_webdriver")
# __wptrunner__ magically appears from inherit, F821 is undefined name
__wptrunner__["executor"]["testharness"] = "WebDriverTestharnessExecutor" # noqa: F821
__wptrunner__["executor"]["reftest"] = "WebDriverRefTestExecutor" # noqa: F821
| 1.609375 | 2 |
opentsdb/exceptions.py | razvandimescu/opentsdb-py | 48 | 12771935 | <filename>opentsdb/exceptions.py
class TSDBClientException(Exception):
pass
class TSDBNotAlive(TSDBClientException):
pass
class TagsError(TSDBClientException):
pass
class ValidationError(TSDBClientException):
pass
class UnknownTSDBConnectProtocol(TSDBClientException):
def __init__(self, protocol):
self.protocol = protocol
def __str__(self):
return "Unknown TSDB connection protocol: %s" % self.protocol
| 2.25 | 2 |
docs/migrations/0003_documentation_language.py | lnxpy/docket | 3 | 12771936 | <reponame>lnxpy/docket
# Generated by Django 3.0.4 on 2020-04-04 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('docs', '0002_auto_20200404_1326'),
]
operations = [
migrations.AddField(
model_name='documentation',
name='language',
field=models.CharField(choices=[('en', 'English'), ('pe', 'Persian')], default='en', max_length=5),
),
]
| 1.578125 | 2 |
Iniciante-Begginer/Python/#1001 - #1050/#1010-CalculoSimples.py | jocelinoFG017/Beecrowd-judge-solutions | 2 | 12771937 | <gh_stars>1-10
# Author: <NAME>
a, b = input().split(), input().split()
q1 = int(a[1])
v1 = float(a[2])
q2 = int(b[1])
v2 = float(b[2])
t1 = q1 * v1
t2 = q2 * v2
tt = t1 + t2
print('VALOR A PAGAR: R$ %.2f' %tt)
| 2.875 | 3 |
Preprocess/Mat2Edgelist.py | yashchandak/GNN | 1 | 12771938 | import networkx as nx
from scipy.io import loadmat
x = loadmat(dataset)
dataset='blogcatalog.mat'
x = loadmat(dataset)
x = x['network']
G = nx.from_scipy_sparse_matrix(x)
del x
f=open("BC_DW.edgelist",'wb')
nx.write_edgelist(G, f)
| 2.78125 | 3 |
utility/profile.py | mingsumsze1/mcts | 0 | 12771939 | from pyinstrument import Profiler
from functools import wraps
def profile(func):
@wraps(func)
def wrapper(*args, **kwargs):
profiler = Profiler()
profiler.start()
results = func(*args, **kwargs)
profiler.stop()
profiler.output_text()
return results
return wrapper
| 2.46875 | 2 |
wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_message_info_editor.py | odidev/virgil-crypto-c | 26 | 12771940 | <reponame>odidev/virgil-crypto-c
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: <NAME> Inc. <<EMAIL>>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from virgil_crypto_lib.common._c_bridge import vsc_data_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
class vscf_message_info_editor_t(Structure):
pass
class VscfMessageInfoEditor(object):
"""Add and/or remove recipients and it's parameters within message info.
Usage:
1. Unpack binary message info that was obtained from RecipientCipher.
2. Add and/or remove key recipients.
3. Pack MessagInfo to the binary data."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_message_info_editor_new(self):
vscf_message_info_editor_new = self._lib.vscf_message_info_editor_new
vscf_message_info_editor_new.argtypes = []
vscf_message_info_editor_new.restype = POINTER(vscf_message_info_editor_t)
return vscf_message_info_editor_new()
def vscf_message_info_editor_delete(self, ctx):
vscf_message_info_editor_delete = self._lib.vscf_message_info_editor_delete
vscf_message_info_editor_delete.argtypes = [POINTER(vscf_message_info_editor_t)]
vscf_message_info_editor_delete.restype = None
return vscf_message_info_editor_delete(ctx)
def vscf_message_info_editor_use_random(self, ctx, random):
vscf_message_info_editor_use_random = self._lib.vscf_message_info_editor_use_random
vscf_message_info_editor_use_random.argtypes = [POINTER(vscf_message_info_editor_t), POINTER(vscf_impl_t)]
vscf_message_info_editor_use_random.restype = None
return vscf_message_info_editor_use_random(ctx, random)
def vscf_message_info_editor_setup_defaults(self, ctx):
"""Set dependencies to it's defaults."""
vscf_message_info_editor_setup_defaults = self._lib.vscf_message_info_editor_setup_defaults
vscf_message_info_editor_setup_defaults.argtypes = [POINTER(vscf_message_info_editor_t)]
vscf_message_info_editor_setup_defaults.restype = c_int
return vscf_message_info_editor_setup_defaults(ctx)
def vscf_message_info_editor_unpack(self, ctx, message_info_data):
"""Unpack serialized message info.
Note that recipients can only be removed but not added.
Note, use "unlock" method to be able to add new recipients as well."""
vscf_message_info_editor_unpack = self._lib.vscf_message_info_editor_unpack
vscf_message_info_editor_unpack.argtypes = [POINTER(vscf_message_info_editor_t), vsc_data_t]
vscf_message_info_editor_unpack.restype = c_int
return vscf_message_info_editor_unpack(ctx, message_info_data)
def vscf_message_info_editor_unlock(self, ctx, owner_recipient_id, owner_private_key):
"""Decrypt encryption key this allows adding new recipients."""
vscf_message_info_editor_unlock = self._lib.vscf_message_info_editor_unlock
vscf_message_info_editor_unlock.argtypes = [POINTER(vscf_message_info_editor_t), vsc_data_t, POINTER(vscf_impl_t)]
vscf_message_info_editor_unlock.restype = c_int
return vscf_message_info_editor_unlock(ctx, owner_recipient_id, owner_private_key)
def vscf_message_info_editor_add_key_recipient(self, ctx, recipient_id, public_key):
"""Add recipient defined with id and public key."""
vscf_message_info_editor_add_key_recipient = self._lib.vscf_message_info_editor_add_key_recipient
vscf_message_info_editor_add_key_recipient.argtypes = [POINTER(vscf_message_info_editor_t), vsc_data_t, POINTER(vscf_impl_t)]
vscf_message_info_editor_add_key_recipient.restype = c_int
return vscf_message_info_editor_add_key_recipient(ctx, recipient_id, public_key)
def vscf_message_info_editor_remove_key_recipient(self, ctx, recipient_id):
"""Remove recipient with a given id.
Return false if recipient with given id was not found."""
vscf_message_info_editor_remove_key_recipient = self._lib.vscf_message_info_editor_remove_key_recipient
vscf_message_info_editor_remove_key_recipient.argtypes = [POINTER(vscf_message_info_editor_t), vsc_data_t]
vscf_message_info_editor_remove_key_recipient.restype = c_bool
return vscf_message_info_editor_remove_key_recipient(ctx, recipient_id)
def vscf_message_info_editor_remove_all(self, ctx):
"""Remove all existent recipients."""
vscf_message_info_editor_remove_all = self._lib.vscf_message_info_editor_remove_all
vscf_message_info_editor_remove_all.argtypes = [POINTER(vscf_message_info_editor_t)]
vscf_message_info_editor_remove_all.restype = None
return vscf_message_info_editor_remove_all(ctx)
def vscf_message_info_editor_packed_len(self, ctx):
"""Return length of serialized message info.
Actual length can be obtained right after applying changes."""
vscf_message_info_editor_packed_len = self._lib.vscf_message_info_editor_packed_len
vscf_message_info_editor_packed_len.argtypes = [POINTER(vscf_message_info_editor_t)]
vscf_message_info_editor_packed_len.restype = c_size_t
return vscf_message_info_editor_packed_len(ctx)
def vscf_message_info_editor_pack(self, ctx, message_info):
"""Return serialized message info.
Precondition: this method can be called after "apply"."""
vscf_message_info_editor_pack = self._lib.vscf_message_info_editor_pack
vscf_message_info_editor_pack.argtypes = [POINTER(vscf_message_info_editor_t), POINTER(vsc_buffer_t)]
vscf_message_info_editor_pack.restype = None
return vscf_message_info_editor_pack(ctx, message_info)
def vscf_message_info_editor_shallow_copy(self, ctx):
vscf_message_info_editor_shallow_copy = self._lib.vscf_message_info_editor_shallow_copy
vscf_message_info_editor_shallow_copy.argtypes = [POINTER(vscf_message_info_editor_t)]
vscf_message_info_editor_shallow_copy.restype = POINTER(vscf_message_info_editor_t)
return vscf_message_info_editor_shallow_copy(ctx)
| 0.6875 | 1 |
Lib/site-packages/jupyterlab_server/process_app.py | edupyter/EDUPYTER38 | 0 | 12771941 | """A lab app that runs a sub process for a demo or a test."""
import sys
from jupyter_server.extension.application import ExtensionApp, ExtensionAppJinjaMixin
from tornado.ioloop import IOLoop
from .handlers import LabConfig, add_handlers
from .process import Process
class ProcessApp(ExtensionAppJinjaMixin, LabConfig, ExtensionApp):
"""A jupyterlab app that runs a separate process and exits on completion."""
load_other_extensions = True
# Do not open a browser for process apps
open_browser = False
def get_command(self):
"""Get the command and kwargs to run with `Process`.
This is intended to be overridden.
"""
return ["python", "--version"], {}
def initialize_settings(self):
"""Start the application."""
IOLoop.current().add_callback(self._run_command)
def initialize_handlers(self):
add_handlers(self.handlers, self)
def _run_command(self):
command, kwargs = self.get_command()
kwargs.setdefault("logger", self.log)
future = Process(command, **kwargs).wait_async()
IOLoop.current().add_future(future, self._process_finished)
def _process_finished(self, future):
try:
IOLoop.current().stop()
sys.exit(future.result())
except Exception as e:
self.log.error(str(e))
sys.exit(1)
| 2.453125 | 2 |
subway/examples/miscs/pi_run.py | Rails-on-HPC/subway | 4 | 12771942 | from random import random
import sys
def pie(times=100):
incircle = 0
for _ in range(times):
x = random()
y = random()
if x * x + y * y < 1:
incircle += 1
return incircle / times * 4
if __name__ == "__main__":
with open(sys.argv[1], "r") as f:
times = f.readlines()[0]
p = pie(int(times))
with open(sys.argv[2], "w") as f:
f.writelines([str(p) + "\n" + times])
| 2.96875 | 3 |
securityheaders/checkers/xpoweredby/__init__.py | th3cyb3rc0p/securityheaders | 151 | 12771943 | from .present import XPoweredByPresentChecker
__all__ = ['XPoweredByPresentChecker']
| 1.054688 | 1 |
scripts/makedocu.py | lsyefficient/qwtplot3d | 64 | 12771944 | <gh_stars>10-100
import os.path
import shutil
import tarfile
import zlib
import zipfile
from time import gmtime, strftime
refuseddirs = ['CVS','images']
refuseddirsextra = ['CVS','small']
source = 'V:\\\\cvs\\qwtplot3d\\doc\\'
sourceim = 'V:\\\\cvs\\qwtplot3d\\doc\\images\\'
zipname = 'V:\\\\cvs\\uploads\\qwtplot3d-doc.zip'
zipextraname = 'V:\\\\cvs\\uploads\\qwtplot3d-doc-extra.zip'
def compresstree(src, zip, refdir):
names = os.listdir(src)
for name in names:
srcname = os.path.join(src, name)
if os.path.isdir(srcname):
if name not in refdir:
compresstree(srcname, zip, refdir)
if name == 'images':
compresstree(srcname +'\\small', zip, refdir)
else:
zip.write(srcname,'qwtplot3d\\doc\\' + srcname[len(source):])
zip = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
compresstree(source, zip, refuseddirs)
zip.close()
zip = zipfile.ZipFile(zipextraname, 'w', zipfile.ZIP_DEFLATED)
compresstree(sourceim, zip, refuseddirsextra)
zip.close()
| 2.359375 | 2 |
autotabular/pipeline/components/classification/gaussian_nb.py | jianzhnie/AutoTabular | 48 | 12771945 | import numpy as np
from autotabular.pipeline.components.base import AutotabularClassificationAlgorithm
from autotabular.pipeline.constants import DENSE, PREDICTIONS, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
class GaussianNB(AutotabularClassificationAlgorithm):
def __init__(self, random_state=None, verbose=0):
self.random_state = random_state
self.verbose = int(verbose)
self.estimator = None
def fit(self, X, y):
import sklearn.naive_bayes
self.estimator = sklearn.naive_bayes.GaussianNB()
self.classes_ = np.unique(y.astype(int))
# Fallback for multilabel classification
if len(y.shape) > 1 and y.shape[1] > 1:
import sklearn.multiclass
self.estimator = sklearn.multiclass.OneVsRestClassifier(
self.estimator, n_jobs=1)
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'GaussianNB',
'name': 'Gaussian Naive Bayes classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
return cs
| 2.484375 | 2 |
Python/Merge_sort.py | Harshalszz/-HACKTOBERFEST2K20 | 30 | 12771946 | def merge_sort(unsorted_list):
if len(unsorted_list) <= 1:
return unsorted_list
# Finding the middle point and partitioning the array into two halves
middle = len(unsorted_list) // 2
left = unsorted_list[:middle]
right = unsorted_list[middle:]
left = merge_sort(left)
right = merge_sort(right)
return list(merge(left, right))
#Merging the sorted halves
def merge(left,right):
res = []
while len(left) != 0 and len(right) != 0:
if left[0] < right[0]:
res.append(left[0])
left.remove(left[0])
else:
res.append(right[0])
right.remove(right[0])
if len(left) == 0:
res = res + right
else:
res = res + left
return res
input_list = list(map(int,input("Enter unsorted input list: ").split()))
print("Unsorted Input: ", input_list)
print("Sorted Output: ", merge_sort(input_list)) | 4.28125 | 4 |
matlab_ext/code-miners/projects/prepro/src_2/hided/tmp.py | zaqwes8811/coordinator-tasks | 0 | 12771947 | <gh_stars>0
''' удаляем комментаии '''
def purgeFromComments( fname ):
# читаем в список
linesFromInc = file2ListLines( fname )
# ищем закомментированные строки и пишем незак. в файл
try:
f = io.open( fname, "wt")#, encoding='utf-8' ) # пусть так и остается
try:
for at in linesFromInc:
if not ';' in at:
f.write( at )
else:
at = at.replace(';', '; ') # добавляе к точкам с запяты пробелы
at_tmp = at.replace('\t', '') # удаляем табы
at_tmp = at_tmp.split(' ')
if at_tmp[0] != ';': # не комментарий
f.write( at )
finally:
f.close()
except IOError:
print 'IOError' | 3.078125 | 3 |
qarc-gym/qarc-innovation.py | kasimte/QARC | 40 | 12771948 | import gym
import gym_qarc
import numpy as np
from inet import InnovationNetwork
import os
import tensorflow as tf
VIDEO_BIT_RATE = [0.01, 0.3, 0.5, 0.8, 1.1, 1.4]
A_DIM = len(VIDEO_BIT_RATE)
S_INFO = 6
S_LEN = 10 # take how many frames in the past
LR_RATE = 1e-4
RAND_RANGE = 1000
env = gym.make('QARC-v0')
total_steps = 0
os.system('mkdir results')
_file = open('test.csv', 'w')
observation = env.reset()
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
_innovation_network = InnovationNetwork(
sess, state_dim=[S_INFO, S_LEN], action_dim=A_DIM, learning_rate=LR_RATE)
for i_episode in range(3000):
total_max_reward = 0.0
total_reward = 0.0
for _step in range(10):
_reward = -1000
_action = -1
for action in range(A_DIM):
observation_, reward, done, info = env.step_without_change(
action)
if reward > _reward:
_reward, _action = reward, action
_pred = _innovation_network.predict(observation)[0]
action_cumsum = np.cumsum(_pred)
_selected = (action_cumsum > np.random.randint(
1, RAND_RANGE) / float(RAND_RANGE)).argmax()
observation_, reward, done, info = env.step(_selected)
print _action, _selected
_innovation_network.train(observation, _action) # ground truth
total_max_reward += _reward
total_reward += reward
observation = observation_
print i_episode, total_reward, total_max_reward
_file.write(str(i_episode) + ',' + str(total_reward) +
',' + str(total_max_reward) + '\n')
_file.flush()
_file.close()
| 2.4375 | 2 |
StatisticsPreview/migrations/0003_auto_20190828_1128.py | tifat58/lsv-c4-django-webexperiment | 1 | 12771949 | <filename>StatisticsPreview/migrations/0003_auto_20190828_1128.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('StatisticsPreview', '0002_auto_20190828_1017'),
]
operations = [
migrations.AddField(
model_name='resultmenu',
name='has_child',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='resultmenu',
name='menu_url',
field=models.CharField(max_length=350, default=''),
),
]
| 1.484375 | 1 |
database.py | nathanielangafor/Dandy-2021-Backend | 0 | 12771950 | <gh_stars>0
import json
import sqlite3
import base64
import random
from google.cloud import storage
import string
from PIL import Image
from io import BytesIO
import os
from flask import Flask, request
app = Flask(__name__)
app.secret_key = b"Da ya biezlikiy patamy shto u menya nie litso"
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/home/nangafor/Dandy-2021-Backend/engaged-diode-329906-957b934a14a8.json'
def readOne(table, criteria, value):
conn = sqlite3.connect("database.db")
c = conn.cursor()
c.execute('SELECT * FROM {} WHERE {}=(?)'.format(table, criteria), (value,))
data = c.fetchone()
return json.dumps(data)
def orig_read(table):
conn = sqlite3.connect("database.db")
c = conn.cursor()
c.execute('SELECT * FROM {}'.format(table))
data = c.fetchall()
return json.dumps(data)
def get_blob_link(bucket_name, source_file_name):
storage_client = storage.Client("Dandy Bois")
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_file_name)
blob.make_public()
url = blob.public_url
return url
def upload_blob(bucket_name, source_file_name, destination_blob_name):
storage_client = storage.Client("Dandy Bois")
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(
"File {} uploaded to {}.".format(
source_file_name, destination_blob_name
)
)
def randomString(length):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
def userTable():
conn = sqlite3.connect("database.db")
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS Users (email TEXT, username TEXT, password TEXT, points INETEGER, achievements TEXT)')
def locationTable():
conn = sqlite3.connect("database.db")
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS Locations (id INTEGER, user TEXT, longitude REAL, latitude REAL, image TEXT, comment TEXT, type INTEGER, title TEXT, currentUser TEXT, points INTEGER, locName STRING)')
@app.route('/update/', methods=['GET', 'POST'])
def update():
if request.method == 'POST':
conn = sqlite3.connect("database.db")
parsed = json.loads((request.data).decode('utf-8'))
c = conn.cursor()
c.execute('UPDATE {} SET {}=(?) WHERE {}=(?)'.format(parsed['form']['table'], parsed['form']['setPos'], parsed['form']['where']), (parsed['form']['newValue'], parsed['form']['whereValue'],))
conn.commit()
return 'True'
@app.route('/insertUser/', methods=['GET', 'POST'])
def insertUser():
if request.method == 'POST':
conn = sqlite3.connect("database.db")
achievements = {
"beHuman": ["Be Human", False, "You are human!", 100],
"environmentalist": ["Environmentalist", False, "You completed your first cleanup activity!", 100],
"warrior": ["Warrior", False, "You completed your first 5 cleanup activities!", 300],
"cleanupWarlock": ["Cleanup Warlock", False, "You completed your first 10 cleanup activities!", 500],
}
points = 0
c = conn.cursor()
c.execute("INSERT INTO Locations (id, user, longitude, latitude, image, comment, type, title, currentUser, points) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (len(orig_read('Locations')) + 1, parsed['user'], float(parsed['longitude']), float(parsed['latitude']), "", parsed['comment'], parsed['type'], parsed['title'], '', int(parsed['points']) ))
conn.commit()
return 'True'
@app.route('/insertLocation/', methods=['GET', 'POST'])
def insertLocation():
if request.method == 'POST':
conn = sqlite3.connect("database.db")
parsed = json.loads((request.data).decode('utf-8'))
try:
fileName = randomString(9)
image = Image.open(BytesIO(base64.b64decode(parsed['image'])))
image.save('{}.png'.format(fileName), 'PNG')
upload_blob("dbb_1", '{}.png'.format(fileName), '{}.png'.format(fileName))
image = get_blob_link("dbb_1", "{}.png".format(fileName))
print(image)
c = conn.cursor()
c.execute("INSERT INTO Locations (id, user, longitude, latitude, image, comment, type, title, currentUser, points, locName) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (len(orig_read('Locations')) + 1, parsed['user'], float(parsed['longitude']), float(parsed['latitude']), image, parsed['comment'], parsed['type'], parsed['title'], '', int(parsed['points']), parsed['locName']))
conn.commit()
os.remove(fileName + '.png')
except:
c = conn.cursor()
c.execute("INSERT INTO Locations (id, user, longitude, latitude, image, comment, type, title, currentUser, points, locName) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (len(orig_read('Locations')) + 1, parsed['user'], float(parsed['longitude']), float(parsed['latitude']), parsed['image'], parsed['comment'], parsed['type'], parsed['title'], '', int(parsed['points']), parsed['locName']))
conn.commit()
return 'True'
@app.route('/delete/', methods=['GET', 'POST'])
def delete():
if request.method == 'POST':
conn = sqlite3.connect("database.db")
parsed = json.loads((request.data).decode('utf-8'))
c = conn.cursor()
c.execute('DELETE FROM {} WHERE {}=(?)'.format(parsed['table'], parsed['criteria']), (parsed['identifier'],))
conn.commit()
return 'True'
@app.route('/read/', methods=['GET', 'POST'])
def read():
if request.method == 'POST':
parsed = json.loads((request.data).decode('utf-8'))
print(parsed)
return str(orig_read(parsed['table']))
@app.route('/readOne/', methods=['GET', 'POST'])
def readOne1():
if request.method == 'POST':
parsed = json.loads((request.data).decode('utf-8'))
data = str(readOne(parsed['user']['table'], parsed['user']['criteria'], parsed['user']['value']))
return data
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
conn = sqlite3.connect("database.db")
c = conn.cursor()
c.execute('SELECT * FROM Users')
data = c.fetchall()
for user in data:
if user[1] == request.form.get('username'):
if user[2] == request.form.get('password'):
return 'logged in'
else:
return 'invalid password'
return 'invalid username'
@app.route('/test/', methods=['GET', 'POST'])
def test():
if request.method == 'GET':
return 'TEST'
userTable()
locationTable()
if __name__ == "__main__":
app.run(host= '0.0.0.0', port="80")
| 2.5625 | 3 |