max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
python/protein-translation/protein_translation.py | squibdev/Exercism.io | 0 | 12773051 | from textwrap import wrap
def proteins(strand):
# textwrap.wrap(s, i) creates a list of elements from s with a max width of i
codons = wrap(strand, 3)
codons_to_protein = {
'AUG': 'Methionine',
'UUU': 'Phenylalanine',
'UUC': 'Phenylalanine',
'UUA': 'Leucine',
'UUG': 'Leucine',
'UCU': 'Serine',
'UCC': 'Serine',
'UCA': 'Serine',
'UCG': 'Serine',
'UAU': 'Tyrosine',
'UAC': 'Tyrosine',
'UGU': 'Cysteine',
'UGC': 'Cysteine',
'UGG': 'Tryptophan',
'UAA': 'STOP',
'UAG': 'STOP',
'UGA': 'STOP'
}
RNA = []
for i in codons:
if codons_to_protein[i] == 'STOP':
break
else:
RNA.append(codons_to_protein[i])
return RNA
| 3.359375 | 3 |
autoPyTorch/components/metrics/additional_logs.py | jmm-montiel/Auto-PyTorch | 2 | 12773052 | class test_result():
"""Log the performance on the test set"""
def __init__(self, autonet, X_test, Y_test):
self.autonet = autonet
self.X_test = X_test
self.Y_test = Y_test
def __call__(self, model, epochs):
if self.Y_test is None or self.X_test is None:
return float("nan")
return self.autonet.score(self.X_test, self.Y_test)
class gradient_norm():
"""Log the norm of the gradients"""
def __init_(self):
pass
def __call__(self, network, epoch):
total_gradient = 0
n_params = 0
for p in list(filter(lambda p: p.grad is not None, network.parameters())):
total_gradient += p.grad.data.norm(2).item()
n_params += 1
# Prevent division through 0
if total_gradient==0:
n_params = 1
return total_gradient/n_params
| 2.8125 | 3 |
src/Alice/Brain.py | EnochXDDD/DevTools | 0 | 12773053 | <reponame>EnochXDDD/DevTools
import logging
import sys
import time
import traceback
from functools import partial
LOG = logging.getLogger(__name__)
class TimeEstimate:
def __init__(self, block_name="", without_any_args=False, handler=None):
self._block_name = block_name
self._without_any_args = without_any_args
self._handler = handler
self._time_list = [0.0, 0.0]
def __call__(self, func):
self._func = func
if not bool(self._block_name) and self._func is not None:
self._block_name = self._func.__name__
if self._handler is None:
self._handler = partial(print, "time of {}:".format(self._block_name))
def wrapper(*args, **kwargs):
LOG.debug("args: {}, kwargs: {}".format(args, kwargs))
self._time_list[0] = time.perf_counter()
if not self._without_any_args:
result = self._func(*args, **kwargs)
else:
result = self._func()
self._time_list[1] = time.perf_counter()
time_diff = self._time_list[1] - self._time_list[0]
LOG.debug("time of {}: {}".format(self._block_name, time_diff))
self._handler(time_diff)
return result
return wrapper
def __enter__(self):
if self._handler is None:
self._handler = partial(print, "time of {}:".format(self._block_name))
self._time_list[0] = time.perf_counter()
def __exit__(self, exc_type, exc_val, exc_tb):
self._time_list[1] = time.perf_counter()
time_diff = self._time_list[1] - self._time_list[0]
LOG.debug("time of {}: {}".format(self._block_name, time_diff))
self._handler(time_diff)
class ExceptionCatch:
def __init__(self, handler=partial(print, file=sys.stderr)):
self._handler = handler
sys.excepthook = self.__call__
LOG.debug("exception hook to handler")
def __call__(self, etype, value, tb):
msg = "".join(traceback.format_exception(etype, value, tb))
self._handler(msg)
class ThreadObject:
def __init__(self):
pass
| 2.53125 | 3 |
py/test/fixture/touchscreen_calibration/fixture.py | arccode/factory | 3 | 12773054 | # Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import threading
import time
from cros.factory.test import event
from cros.factory.test.i18n import _
from cros.factory.test import session
from cros.factory.test.utils import serial_utils
# Define the driver name and the interface protocols to find the arduino ports.
# NATIVE_USB_PORT: used to monitor the internal state of test fixture.
# PROGRAMMING_PORT: used to upload the firmware from host to the arduino and
# issue calibration commands to control the test fixture.
NATIVE_USB_PORT = 0
PROGRAMMING_PORT = 1
ARDUINO_DRIVER = 'cdc_acm'
interface_protocol_dict = {NATIVE_USB_PORT: '00', PROGRAMMING_PORT: '01'}
ArduinoCommand = collections.namedtuple(
'ArduinoCommand', ['DOWN', 'UP', 'STATE', 'RESET'])
COMMAND = ArduinoCommand('d', 'u', 's', 'r')
ArduinoState = collections.namedtuple(
'ArduinoState', ['INIT', 'STOP_DOWN', 'STOP_UP', 'GOING_DOWN', 'GOING_UP',
'EMERGENCY_STOP'])
STATE = ArduinoState('i', 'D', 'U', 'd', 'u', 'e')
class FixtureException(Exception):
"""A dummy exception class for FixtureSerialDevice."""
class FixutreNativeUSB(serial_utils.SerialDevice):
"""A native usb port used to monitor the internal state of the fixture."""
def __init__(self, driver=ARDUINO_DRIVER,
interface_protocol=interface_protocol_dict[NATIVE_USB_PORT],
timeout=86400):
super(FixutreNativeUSB, self).__init__()
self.driver = driver
self.interface_protocol = interface_protocol
self.timeout = timeout
self.port = self._GetPort()
self._Connect(self.port)
self.state_string = None
self.last_state_string = None
# The ordering of the state names should match that in
# touchscreen_calibration.ino
self.state_name_dict = [
'state',
'jumper',
'button debug',
'sensor extreme up',
'sensor up',
'sensor down',
'sensor safety',
'motor direction',
'motor enabled',
'motor locked',
'motor duty cycle',
'pwm frequency',
'count',
]
def _GetPort(self):
return serial_utils.FindTtyByDriver(self.driver, self.interface_protocol)
def _Connect(self, port):
try:
self.Connect(port=port, timeout=self.timeout)
msg = 'Connect to native USB port "%s" for monitoring internal state.'
session.console.info(msg, port)
except Exception:
msg = 'FixtureNativeUSB: failed to connect to native usb port: %s'
session.console.warn(msg, port)
def _CheckReconnection(self):
"""Reconnect the native usb port if it has been refreshed."""
curr_port = self._GetPort()
if curr_port != self.port:
self.Disconnect()
self._Connect(curr_port)
self.port = curr_port
session.console.info('Reconnect to new port: %s', curr_port)
def GetState(self):
"""Get the fixture state from the native usb port.
The complete state_string looks like: <i1001000000.6000.0>
Its format is defined in self.state_name_dict in __init__() above.
The first character describes the main state.
This call is blocked until a complete fixture state has been received.
Call this method with a new thread if needed.
"""
self._CheckReconnection()
reply = []
while True:
ch = self.Receive()
reply.append(ch)
if ch == '>':
self.last_state_string = self.state_string
self.state_string = ''.join(reply)
return self.state_string
def QueryFixtureState(self):
"""Query fixture internal state."""
self._CheckReconnection()
self.Send('s')
def _ExtractStateList(self, state_string):
if state_string:
state, pwm_freq, count = state_string.strip().strip('<>').split('.')
state_list = list(state)
state_list.extend([pwm_freq, count])
else:
state_list = []
return state_list
def DiffState(self):
"""Get the difference of between this state and the last state."""
old_state_list = self._ExtractStateList(self.last_state_string)
new_state_list = self._ExtractStateList(self.state_string)
return [(self.state_name_dict[i], new_state_list[i])
for i in range(len(new_state_list))
if old_state_list == [] or new_state_list[i] != old_state_list[i]]
def CompleteState(self):
"""Get the complete state snap shot."""
state_list = self._ExtractStateList(self.state_string)
return [(self.state_name_dict[i], state_list[i])
for i in range(len(state_list))]
class BaseFixture(serial_utils.SerialDevice):
"""A base fixture class."""
def __init__(self, state=None):
super(BaseFixture, self).__init__()
self.state = state
self.native_usb = None
class FakeFixture(BaseFixture):
"""A fake fixture class used for development purpose only."""
TIMEOUT = 10
def __init__(self, ui, state=None):
super(FakeFixture, self).__init__(state)
self.ui = ui
self.final_calibration_lock = threading.Event()
def QueryState(self):
"""Queries the state of the arduino board."""
return self.state
def IsStateUp(self):
"""Checks if the fixture is in the INIT or STOP_UP state."""
return (self.state in [STATE.INIT, STATE.STOP_UP])
def IsEmergencyStop(self):
"""Checks if the fixture is in the EMERGENCY_STOP state."""
return self.state == STATE.EMERGENCY_STOP
def DriveProbeDown(self):
"""Drives the probe to the 'down' position."""
session.console.info('Drive Probe Down....')
self.ui.Alert(_('Pull the lever down.'))
def DriveProbeUp(self):
"""Drives the probe to the 'up' position."""
session.console.info('Drive Probe Up....')
self.ui.Alert(_('Pull the lever up.'))
self.final_calibration_lock.wait(self.TIMEOUT)
self.ui.PostEvent(event.Event(event.Event.Type.TEST_UI_EVENT,
subtype='FinishTest'))
def DriveProbeUpDone(self):
"""Notify that the DriveProbeUp has been done."""
self.final_calibration_lock.set()
class FixtureSerialDevice(BaseFixture):
"""A serial device to control touchscreen fixture."""
def __init__(self, driver=ARDUINO_DRIVER,
interface_protocol=interface_protocol_dict[PROGRAMMING_PORT],
timeout=20):
super(FixtureSerialDevice, self).__init__()
try:
port = serial_utils.FindTtyByDriver(driver, interface_protocol)
self.Connect(port=port, timeout=timeout)
msg = 'Connect to programming port "%s" for issuing commands.'
session.console.info(msg, port)
session.console.info('Wait up to %d seconds for arduino initialization.',
timeout)
except Exception:
raise FixtureException('Failed to connect the test fixture.')
self.AssertStateWithTimeout([STATE.INIT, STATE.STOP_UP,
STATE.EMERGENCY_STOP], timeout)
# The 2nd-generation tst fixture has a native usb port.
self.native_usb = FixutreNativeUSB()
if not self.native_usb:
raise FixtureException('Fail to connect the native usb port.')
def QueryState(self):
"""Queries the state of the arduino board."""
try:
self.state = self.SendReceive(COMMAND.STATE)
except Exception:
raise FixtureException('QueryState failed.')
return self.state
def IsStateUp(self):
"""Checks if the fixture is in the INIT or STOP_UP state."""
return (self.QueryState() in [STATE.INIT, STATE.STOP_UP])
def IsEmergencyStop(self):
"""Checks if the fixture is in the EMERGENCY_STOP state."""
return self.QueryState() == STATE.EMERGENCY_STOP
def AssertStateWithTimeout(self, expected_states, timeout):
"""Assert the state with timeout."""
while True:
result, state = self._AssertState(expected_states)
if result is True:
session.console.info('state: %s (expected)', state)
return
session.console.info('state: %s (transient, probe still moving)', state)
time.sleep(1)
timeout -= 1
if timeout == 0:
break
msg = 'AssertState failed: actual state: "%s", expected_states: "%s".'
raise FixtureException(msg % (state, str(expected_states)))
def _AssertState(self, expected_states):
"""Confirms that the arduino is in the specified state.
It returns True if the actual state is in the expected states;
otherwise, it returns the actual state.
"""
if not isinstance(expected_states, list):
expected_states = [expected_states]
actual_state = self.QueryState()
return (actual_state in expected_states, actual_state)
def AssertState(self, expected_states):
result, _ = self._AssertState(expected_states)
if result is not True:
msg = 'AssertState failed: actual state: "%s", expected_states: "%s".'
raise FixtureException(msg % (result, str(expected_states)))
def DriveProbeDown(self):
"""Drives the probe to the 'down' position."""
try:
response = self.SendReceive(COMMAND.DOWN)
session.console.info('Send COMMAND.DOWN(%s). Receive state(%s).',
COMMAND.DOWN, response)
except Exception:
raise FixtureException('DriveProbeDown failed.')
self.AssertState(STATE.STOP_DOWN)
def DriveProbeUp(self):
"""Drives the probe to the 'up' position."""
try:
response = self.SendReceive(COMMAND.UP)
session.console.info('Send COMMAND.UP(%s). Receive state(%s).',
COMMAND.UP, response)
except Exception:
raise FixtureException('DriveProbeUp failed.')
self.AssertState(STATE.STOP_UP)
| 2.234375 | 2 |
src/cyborgbackup/ui/urls.py | ThorpeJosh/cyborgbackup | 0 | 12773055 | <reponame>ThorpeJosh/cyborgbackup
from django.conf.urls import url
from django.views.generic.base import TemplateView
from django.contrib.staticfiles import views
from django.urls import re_path
from cyborgbackup.main.utils.tasks import catalog_is_running, celery_worker_is_running
from cyborgbackup.main.models import Job, Policy, Repository, Schedule, Client
app_name = 'ui'
class IndexView(TemplateView):
template_name = 'ui/index.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['celery'] = celery_worker_is_running()
context['catalog'] = catalog_is_running()
context['jobs'] = len(Job.objects.filter())
context['policies'] = len(Policy.objects.filter())
context['clients'] = len(Client.objects.filter())
context['schedules'] = len(Schedule.objects.filter())
context['repositories'] = len(Repository.objects.filter())
return context
index = IndexView.as_view()
urlpatterns = [
url(r'^$', index, name='index'),
re_path(r'^(?P<path>.*)$', views.serve),
]
| 1.992188 | 2 |
pycrowipmodule/status_state.py | febalci/pycrowipmodule | 1 | 12773056 | '''Crow/AAP Alarm IP Module Feedback Class for alarm state'''
class StatusState:
# Zone State
@staticmethod
def get_initial_zone_state(maxZones):
"""Builds the proper zone state collection."""
_zoneState = {}
for i in range (1, maxZones+1):
_zoneState[i] = {'status': {'open': False, 'bypass': False, 'alarm': False, 'tamper': False},
'last_fault': 0}
return _zoneState
# Area State
@staticmethod
def get_initial_area_state(maxAreas):
"""Builds the proper alarm state collection."""
_areaState = {}
for i in range(1, maxAreas+1):
_areaState[i] = {'status': {'alarm': False, 'armed': False, 'stay_armed': False,
'disarmed': False,'exit_delay': False, 'stay_exit_delay': False,
'alarm_zone': '', 'last_disarmed_by_user': '',
'last_armed_by_user': '' }}
return _areaState
# Output State
@staticmethod
def get_initial_output_state(maxOutputs):
_outputState = {}
for i in range (1, maxOutputs+1):
_outputState[i] = {'status': {'open': False}}
return _outputState
# System Status State
@staticmethod
def get_initial_system_state():
_systemState = {'status':{'mains': True, 'battery': True,'tamper': False, 'line': True,
'dialler': True,'ready': True, 'fuse': True, 'zonebattery': True,
'pendantbattery': True, 'codetamper': False}}
return _systemState
| 2.265625 | 2 |
create_venv.py | toopazo/toopazo_ulg | 0 | 12773057 | #!/usr/bin/env python3
import subprocess
import argparse
def increment_setup_version():
filename = 'setup.py'
# Read file
fd = open(filename, 'r')
line_arr = fd.readlines()
fd.close()
# Search for the specific line
count = 0
version = None
version_line = None
for i in range(0, len(line_arr)):
line = line_arr[i]
count += 1
# print("line{}: {}".format(count, line))
pattern0 = "version=\"0.0."
pattern1 = "\","
if pattern0 in line:
print("[find_version] line {}: {}".format(count, line))
version = line
version = version.strip()
version = version.replace(pattern0, "")
version = version.replace(pattern1, "")
version = int(version)
version_line = i
print("[find_version] version {}".format(version))
print("[find_version] version_line {}".format(version_line))
# Modify specific line
orig_line = line_arr[version_line]
new_line = orig_line.replace(str(version), str(version + 1))
line_arr[version_line] = new_line
# Write everythin again
with open(filename, 'w') as file:
file.writelines(line_arr)
def test_subprocess():
cmd = ['python', '--`version']
# result = subprocess.run(cmd, stdout=subprocess.PIPE)
# result = subprocess.run(cmd, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
result = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# result = subprocess.run(cmd, capture_output=True)
# print('cmd %s result %s' % (cmd, result))
print('result.stdout %s' % result.stdout)
# cmd = 'ls /usr/bin/python'
def check_python3_version():
for i in range(9, 6, -1):
pver = 'python3.%s' % i
cmd = [pver, '--version']
try:
result = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# print('result.stdout %s' % result.stdout)
_ = result
return pver
except FileNotFoundError:
result = 'FileNotFoundError'
# print('cmd %s result %s' % (cmd, result))
_ = result
return 'No python 3.x found'
def exec_cmd_and_report(cmd_str, decode):
result = subprocess.run(
cmd_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print('cmd %s' % cmd_str)
print('result.stdout %s' % result.stdout)
if decode:
bstr = result.stdout
result_str = bstr.decode()
print('result_str')
print(result_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Operations related to Python venv')
parser.add_argument('--pver', action='store_true',
help='Get highest python3 version')
parser.add_argument('--iacp', action='store_true',
help='Increment version, '
'add any changes, commit and push to repo')
args = parser.parse_args()
if args.pver:
upver = check_python3_version()
print(upver)
if args.iacp:
increment_setup_version()
exec_cmd_and_report(['git', 'status'], decode=True)
exec_cmd_and_report(['git', 'add', '.'], decode=True)
exec_cmd_and_report(['git', 'commit', '-m',
'\"automated commit using iacp\"'], decode=True)
exec_cmd_and_report(['git', 'push'], decode=True)
exec_cmd_and_report(['git', 'status'], decode=True)
| 2.90625 | 3 |
Solutions/284.py | ruppysuppy/Daily-Coding-Problem-Solutions | 70 | 12773058 | <reponame>ruppysuppy/Daily-Coding-Problem-Solutions<filename>Solutions/284.py<gh_stars>10-100
"""
Problem:
Two nodes in a binary tree can be called cousins if they are on the same level of the
tree but have different parents. For example, in the following diagram 4 and 6 are
cousins.
1
/ \
2 3
/ \ \
4 5 6
Given a binary tree and a particular node, find all cousins of that node.
"""
from typing import List, Optional
from DataStructures.Tree import BinaryTree, Node
def get_depth_dfs_helper(
node: Node, search_node_val: int, depth: int, parent_val: Optional[int] = None
) -> Optional[int]:
if node.val == search_node_val:
return depth, parent_val
if node.left:
left_depth, parent = get_depth_dfs_helper(
node.left, search_node_val, depth + 1, node
)
if left_depth:
return left_depth, parent
if node.right:
right_depth, parent = get_depth_dfs_helper(
node.right, search_node_val, depth + 1, node
)
if right_depth:
return right_depth, parent
return None, None
def get_node_by_depth(
node: Node,
curr_depth: int,
depth: int,
search_node_val: int,
accumulator: int,
ignore_parent_val: int,
parent_val: Optional[int] = None,
) -> None:
# getting all nodes where the depth is equal to the input depth (except the node
# with black-listed parent ["ignore_parent_val"])
if parent_val == ignore_parent_val:
return
if node.val == search_node_val:
return
if curr_depth == depth:
accumulator.append(node.val)
return
if node.left:
get_node_by_depth(
node.left,
curr_depth + 1,
depth,
search_node_val,
accumulator,
ignore_parent_val,
node,
)
if node.right:
get_node_by_depth(
node.right,
curr_depth + 1,
depth,
search_node_val,
accumulator,
ignore_parent_val,
node,
)
def dfs_get_depth(tree: BinaryTree, search_node_val: int):
return get_depth_dfs_helper(tree.root, search_node_val, 0)
def get_cousins(tree: BinaryTree, node_val: int) -> List[int]:
depth, parent = dfs_get_depth(tree, node_val)
if depth is None:
raise ValueError("Node not present in Tree")
cousins = []
get_node_by_depth(tree.root, 0, depth, node_val, cousins, parent)
return cousins
if __name__ == "__main__":
tree = BinaryTree()
tree.root = Node(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.right = Node(6)
print(tree)
print(get_cousins(tree, 4))
tree.root.right.left = Node(7)
print(tree)
print(get_cousins(tree, 4))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(n)
"""
| 3.8125 | 4 |
src/bio2bel_ddr/__init__.py | bio2bel/ddr | 1 | 12773059 | # -*- coding: utf-8 -*-
"""Bio2BEL DDR."""
from .ddr import Manager
| 0.996094 | 1 |
loss_functions.py | xdr940/cc | 0 | 12773060 | # Author: <NAME>
# Copyright (c) 2019, <NAME>
# All rights reserved.
# based on github.com/ClementPinard/SfMLearner-Pytorch
import torch
from torch import nn
from torch.autograd import Variable
from inverse_warp import inverse_warp, flow_warp
from ssim import ssim
from process_functions import depth_occlusion_masks,occlusion_masks
from utils import robust_l1,logical_or,weighted_binary_cross_entropy
from utils import tensor2array
import matplotlib.pyplot as plt
#loss1 E_R recunstruction loss
def photometric_reconstruction_loss(tgt_img, ref_imgs, intrinsics, intrinsics_inv, depth, explainability_mask, pose, rotation_mode='euler', padding_mode='zeros', lambda_oob=0, qch=0.5, wssim=0.5):
def one_scale(depth, explainability_mask, occ_masks):
assert(explainability_mask is None or depth.size()[2:] == explainability_mask.size()[2:])
assert(pose.size(1) == len(ref_imgs))
reconstruction_loss = 0
b, _, h, w = depth.size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = nn.functional.adaptive_avg_pool2d(tgt_img, (h, w))
ref_imgs_scaled = [nn.functional.adaptive_avg_pool2d(ref_img, (h, w)) for ref_img in ref_imgs]
intrinsics_scaled = torch.cat((intrinsics[:, 0:2]/downscale, intrinsics[:, 2:]), dim=1)
intrinsics_scaled_inv = torch.cat((intrinsics_inv[:, :, 0:2]*downscale, intrinsics_inv[:, :, 2:]), dim=2)
weight = 1.
#
for i, ref_img in enumerate(ref_imgs_scaled):#ref_imgs_scaled: list with 4 items( ref dimention)
current_pose = pose[:, i]
ref_img_warped = inverse_warp(ref_img, depth[:,0], current_pose, intrinsics_scaled, intrinsics_scaled_inv, rotation_mode, padding_mode)
valid_pixels = 1 - (ref_img_warped == 0).prod(1, keepdim=True).type_as(ref_img_warped)#[4,1,h,w]
diff = (tgt_img_scaled - ref_img_warped) * valid_pixels#[4,3,h,w]
ssim_loss = 1 - ssim(tgt_img_scaled, ref_img_warped) * valid_pixels#ssim(a,b)返回同样shape的c,按元素,越相似越接近1, 否则最低为0
oob_normalization_const = valid_pixels.nelement()/valid_pixels.sum()#avg, 根据monodepth改成min??
assert((oob_normalization_const == oob_normalization_const).item() == 1)
if explainability_mask is not None:
diff = diff * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(diff)
ssim_loss = ssim_loss * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(ssim_loss)
else:
diff = diff *(1-occ_masks[:,i:i+1]).expand_as(diff)
ssim_loss = ssim_loss*(1-occ_masks[:,i:i+1]).expand_as(ssim_loss)
reconstruction_loss += (1- wssim)*weight*oob_normalization_const*(robust_l1(diff, q=qch) + wssim*ssim_loss.mean()) + lambda_oob*robust_l1(1 - valid_pixels, q=qch)
assert((reconstruction_loss == reconstruction_loss).item() == 1)
#weight /= 2.83
return reconstruction_loss
if type(explainability_mask) not in [tuple, list]:
explainability_mask = [explainability_mask]
if type(depth) not in [list, tuple]:
depth = [depth]
loss = 0
for d, mask in zip(depth, explainability_mask):
occ_masks = depth_occlusion_masks(d, pose, intrinsics, intrinsics_inv)
loss += one_scale(d, mask, occ_masks)
return loss
def photometric_reconstruction_loss_robust(tgt_img, ref_imgs, intrinsics, intrinsics_inv, depth, explainability_mask, pose, rotation_mode='euler', padding_mode='zeros', lambda_oob=0, qch=0.5, wssim=0.5):
def one_scale(depth, explainability_mask, occ_masks):
assert(explainability_mask is None or depth.size()[2:] == explainability_mask.size()[2:])
assert(pose.size(1) == len(ref_imgs))
reconstruction_loss = 0
b, _, h, w = depth.size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = nn.functional.adaptive_avg_pool2d(tgt_img, (h, w))
ref_imgs_scaled = [nn.functional.adaptive_avg_pool2d(ref_img, (h, w)) for ref_img in ref_imgs]
intrinsics_scaled = torch.cat((intrinsics[:, 0:2]/downscale, intrinsics[:, 2:]), dim=1)
intrinsics_scaled_inv = torch.cat((intrinsics_inv[:, :, 0:2]*downscale, intrinsics_inv[:, :, 2:]), dim=2)
weight = 1.
for i, ref_img in enumerate(ref_imgs_scaled):
current_pose = pose[:, i]
ref_img_warped = inverse_warp(ref_img, depth[:,0], current_pose, intrinsics_scaled, intrinsics_scaled_inv, rotation_mode, padding_mode)
valid_pixels = 1 - (ref_img_warped == 0).prod(1, keepdim=True).type_as(ref_img_warped)
diff = (tgt_img_scaled - ref_img_warped) * valid_pixels
ssim_loss = 1 - ssim(tgt_img_scaled, ref_img_warped) * valid_pixels#ssim(a,b)返回同样shape的c,按元素,越相似越接近1, 否则最低为0
oob_normalization_const = valid_pixels.nelement()/valid_pixels.sum()
assert((oob_normalization_const == oob_normalization_const).item() == 1)
if explainability_mask is not None:
diff = diff * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(diff)
ssim_loss = ssim_loss * (1 - occ_masks[:,i:i+1])* explainability_mask[:,i:i+1].expand_as(ssim_loss)
else:
diff = diff *(1-occ_masks[:,i:i+1]).expand_as(diff)
ssim_loss = ssim_loss*(1-occ_masks[:,i:i+1]).expand_as(ssim_loss)
reconstruction_loss += (1- wssim)*weight*oob_normalization_const*(robust_l1(diff, q=qch) + wssim*ssim_loss.mean()) + lambda_oob*robust_l1(1 - valid_pixels, q=qch)
assert((reconstruction_loss == reconstruction_loss).item() == 1)
#weight /= 2.83
return reconstruction_loss
if type(explainability_mask) not in [tuple, list]:
explainability_mask = [explainability_mask]
if type(depth) not in [list, tuple]:
depth = [depth]
loss = 0
for d, mask in zip(depth, explainability_mask):
occ_masks = depth_occlusion_masks(d, pose, intrinsics, intrinsics_inv)
loss += one_scale(d, mask, occ_masks)
return loss
#loss2 E_M
def explainability_loss(mask):
'''
mask 面积越大, 损失越大
:param mask:
:return:
'''
if type(mask) not in [tuple, list]:
mask = [mask]
loss = 0
for mask_scaled in mask:
ones_var = torch.ones(1).expand_as(mask_scaled).type_as(mask_scaled)
loss += nn.functional.binary_cross_entropy(mask_scaled, ones_var)
return loss
#loss_3 E_S smooth loss
def smooth_loss(pred_disp):
def gradient(pred):
D_dy = pred[:, :, 1:] - pred[:, :, :-1]
D_dx = pred[:, :, :, 1:] - pred[:, :, :, :-1]
return D_dx, D_dy
if type(pred_disp) not in [tuple, list]:
pred_disp = [pred_disp]
loss = 0
weight = 1.
for scaled_disp in pred_disp:
dx, dy = gradient(scaled_disp)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss += (dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean())*weight
weight /= 2.3 # 2sqrt(2)
return loss
#loss4 E_f, flow_loss
def photometric_flow_loss(tgt_img, ref_imgs, flows, explainability_mask, lambda_oob=0, qch=0.5, wssim=0.5):
'''
call: occlusion mask:通过光流反解ref,计算差异性损失
aug:
flows:[flow_fwd,flow_bwd]
flow_fwd/list
list
|--0:tensor:[4,2,128,512]
...
|--5:tensor:[4,2,4,16]
....
explainability_mask:flow_exp_mask
|--0:tensor:[4,2,128,512]
....
'''
def one_scale(explainability_mask, occ_masks, flows):
assert(explainability_mask is None or flows[0].size()[2:] == explainability_mask.size()[2:])
assert(len(flows) == len(ref_imgs))
reconstruction_loss = 0
b, _, h, w = flows[0].size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = nn.functional.adaptive_avg_pool2d(tgt_img, (h, w))
ref_imgs_scaled = [nn.functional.adaptive_avg_pool2d(ref_img, (h, w)) for ref_img in ref_imgs]
weight = 1.
for i, ref_img in enumerate(ref_imgs_scaled):
current_flow = flows[i]
ref_img_warped = flow_warp(ref_img, current_flow)#fomulate 48 w_c
valid_pixels = 1 - (ref_img_warped == 0).prod(1, keepdim=True).type_as(ref_img_warped)
diff = (tgt_img_scaled - ref_img_warped) * valid_pixels
ssim_loss = 1 - ssim(tgt_img_scaled, ref_img_warped) * valid_pixels
oob_normalization_const = valid_pixels.nelement()/valid_pixels.sum()
if explainability_mask is not None:
diff = diff * explainability_mask[:,i:i+1].expand_as(diff)
ssim_loss = ssim_loss * explainability_mask[:,i:i+1].expand_as(ssim_loss)
if occ_masks is not None:
diff = diff *(1-occ_masks[:,i:i+1]).expand_as(diff)
ssim_loss = ssim_loss*(1-occ_masks[:,i:i+1]).expand_as(ssim_loss)
reconstruction_loss += (1- wssim)*weight*oob_normalization_const*(robust_l1(diff, q=qch) + wssim*ssim_loss.mean()) + lambda_oob*robust_l1(1 - valid_pixels, q=qch)
#weight /= 2.83
assert((reconstruction_loss == reconstruction_loss).item() == 1)
return reconstruction_loss
if type(flows[0]) not in [tuple, list]:#flows[0] is flow_fwd , a list or no
if explainability_mask is not None:
explainability_mask = [explainability_mask]
flows = [[uv] for uv in flows]
loss = 0
for i in range(len(flows[0])):#根据尺度遍历scales
flow_at_scale = [uv[i] for uv in flows]#flow_at_sacle:list(2):item:tensor:[4,2,128/i^2,512/i^2],2 是向量图的缘故
occ_mask_at_scale_bw, occ_mask_at_scale_fw = occlusion_masks(flow_at_scale[0], flow_at_scale[1])#0:fwd;1:bwd
#occ_mask-at_scale_bw.shape[b,h,w]
occ_mask_at_scale = torch.stack((occ_mask_at_scale_bw, occ_mask_at_scale_fw), dim=1)
# occ_mask_at_scale = None
loss += one_scale(explainability_mask[i], occ_mask_at_scale, flow_at_scale)
return loss
#loss_5 E_C 修改过one_scale loss func
def consensus_depth_flow_mask(explainability_mask, census_mask_bwd, census_mask_fwd, exp_masks_bwd_target, exp_masks_fwd_target, THRESH, wbce):
# Loop over each scale
def one_scale(explainability_mask, census_mask_bwd, census_mask_fwd, exp_masks_bwd_target, exp_masks_fwd_target, THRESH, wbce):
#for i in range(len(explainability_mask)):
#exp_mask_one_scale = explainability_mask
census_mask_fwd_one_scale = (census_mask_fwd < THRESH).type_as(explainability_mask).prod(dim=1, keepdim=True)
census_mask_bwd_one_scale = (census_mask_bwd < THRESH).type_as(explainability_mask).prod(dim=1, keepdim=True)
#census_mask_bwd_one_scale:tensor[b,1,h,w]
#Using the pixelwise consensus term
exp_fwd_target_one_scale = exp_masks_fwd_target
exp_bwd_target_one_scale = exp_masks_bwd_target
census_mask_fwd_one_scale = logical_or(census_mask_fwd_one_scale, exp_fwd_target_one_scale)
census_mask_bwd_one_scale = logical_or(census_mask_bwd_one_scale, exp_bwd_target_one_scale)
# OR gate for constraining only rigid pixels
# exp_mask_fwd_one_scale = (exp_mask_one_scale[:,2].unsqueeze(1) > 0.5).type_as(exp_mask_one_scale)
# exp_mask_bwd_one_scale = (exp_mask_one_scale[:,1].unsqueeze(1) > 0.5).type_as(exp_mask_one_scale)
# census_mask_fwd_one_scale = 1- (1-census_mask_fwd_one_scale)*(1-exp_mask_fwd_one_scale)
# census_mask_bwd_one_scale = 1- (1-census_mask_bwd_one_scale)*(1-exp_mask_bwd_one_scale)
census_mask_fwd_one_scale = Variable(census_mask_fwd_one_scale.data, requires_grad=False)
census_mask_bwd_one_scale = Variable(census_mask_bwd_one_scale.data, requires_grad=False)
rigidity_mask_combined = torch.cat((census_mask_bwd_one_scale, census_mask_bwd_one_scale,
census_mask_fwd_one_scale, census_mask_fwd_one_scale), dim=1)
return weighted_binary_cross_entropy(explainability_mask, rigidity_mask_combined.type_as(explainability_mask), [wbce, 1-wbce])
assert (len(explainability_mask) == len(census_mask_bwd))
assert (len(explainability_mask) == len(census_mask_fwd))
loss = 0.
if type(explainability_mask) not in [tuple, list]:
return one_scale(explainability_mask, census_mask_bwd, census_mask_fwd, exp_masks_bwd_target, exp_masks_fwd_target, THRESH, wbce)
else:
for i in range(len(explainability_mask)):
loss+=one_scale(explainability_mask[i], census_mask_bwd[i], census_mask_fwd[i], exp_masks_bwd_target[i], exp_masks_fwd_target[i], THRESH, wbce)
return loss
#note use
def gaussian_explainability_loss(mask):
if type(mask) not in [tuple, list]:
mask = [mask]
loss = 0
for mask_scaled in mask:
loss += torch.exp(-torch.mean((mask_scaled-0.5).pow(2))/0.15)
return loss
def edge_aware_smoothness_loss(img, pred_disp):
def gradient_x(img):
gx = img[:,:,:-1,:] - img[:,:,1:,:]
return gx
def gradient_y(img):
gy = img[:,:,:,:-1] - img[:,:,:,1:]
return gy
def get_edge_smoothness(img, pred):
pred_gradients_x = gradient_x(pred)
pred_gradients_y = gradient_y(pred)
image_gradients_x = gradient_x(img)
image_gradients_y = gradient_y(img)
weights_x = torch.exp(-torch.mean(torch.abs(image_gradients_x), 1, keepdim=True))
weights_y = torch.exp(-torch.mean(torch.abs(image_gradients_y), 1, keepdim=True))
smoothness_x = torch.abs(pred_gradients_x) * weights_x
smoothness_y = torch.abs(pred_gradients_y) * weights_y
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
loss = 0
weight = 1.
for scaled_disp in pred_disp:
b, _, h, w = scaled_disp.size()
scaled_img = nn.functional.adaptive_avg_pool2d(img, (h, w))
loss += get_edge_smoothness(scaled_img, scaled_disp)
weight /= 2.3 # 2sqrt(2)
return loss
class MaskedMSELoss(nn.Module):
def __init__(self):
super(MaskedMSELoss, self).__init__()
def forward(self, pred, target):
assert pred.dim() == target.dim(), "inconsistent dimensions"
valid_mask = (target>0).detach()
diff = target - pred
diff = diff[valid_mask]
self.loss = (diff ** 2).mean()
return self.loss
class MaskedL1Loss(nn.Module):
'''
空定义,forward
'''
def __init__(self):
super(MaskedL1Loss, self).__init__()
def forward(self, target,pred):
if type(pred) not in [tuple, list]:
pred = [pred]
#assert pred.dim() == target.dim(), "inconsistent dimensions"
loss=0
for scaled_pred,scaled_target in zip( pred,target):
valid_mask = (scaled_target>0).detach()
diff = scaled_target - scaled_pred
diff = diff[valid_mask]
loss += diff.abs().mean()
return loss
class HistgramLoss(nn.Module):
def __init__(self):
super(HistgramLoss, self).__init__()
def forward(self, pred,target):
assert pred.dim() == target.dim(), "inconsistent dimensions"
numel = pred.numel()
pred=pred.detach()
target = target.detach()
gt_h = torch.histc(target.flatten(), bins=256, min=0, max=1).float() / numel
pre_h = torch.histc(pred.flatten(), bins=256, min=0, max=1).float() / numel
diff = gt_h - pre_h
self.loss = diff.abs().mean()
return self.loss*100
class ComputeErrors(nn.Module):
def __init__(self):
super(ComputeErrors, self).__init__()
def forward(self, gt,pred,crop):
gt*=255
pred*=255
abs_diff, abs_rel, sq_rel, a1, a2, a3, epe = 0, 0, 0, 0, 0, 0, 0
batch_size = gt.size(0)
'''
crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
construct a mask of False values, with the same size as target
and then set to True values inside the crop
'''
if crop:
crop_mask = gt[0] != gt[0]
y1, y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
x1, x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
crop_mask[y1:y2, x1:x2] = 1
for current_gt, current_pred in zip(gt, pred):
valid = (current_gt > 0) & (current_gt < 255)
if crop:
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid].clamp(1e-3, 255)
valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
diff = valid_gt - valid_pred
abs_diff += torch.mean(torch.abs(diff))
abs_rel += torch.mean(torch.abs(diff) / valid_gt)
sq_rel += torch.mean((diff ** 2) / valid_gt)
return [metric / batch_size for metric in [abs_diff, abs_rel, sq_rel, a1, a2, a3]]
def compute_errors2(gt, pred, crop=False):
abs_diff, abs_rel, sq_rel, a1, a2, a3,epe = 0, 0, 0, 0, 0, 0,0
batch_size = gt.size(0)
'''
crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
construct a mask of False values, with the same size as target
and then set to True values inside the crop
'''
if crop:
crop_mask = gt[0] != gt[0]
y1, y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
x1, x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
crop_mask[y1:y2, x1:x2] = 1
for current_gt, current_pred in zip(gt, pred):
valid = (current_gt > 0) & (current_gt < 80)
if crop:
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid].clamp(1e-3, 80)
valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
diff = valid_gt - valid_pred
abs_diff += torch.mean(torch.abs(diff))
abs_rel += torch.mean(torch.abs(diff) / valid_gt) * 100
sq_rel += torch.mean((diff ** 2) / valid_gt) * 100
epe+= torch.mean(diff ** 2)
return [metric / batch_size for metric in [abs_diff, abs_rel, sq_rel, a1, a2, a3,epe]]
def VGS_loss(gt,pred):
pass
| 1.820313 | 2 |
src/bt_utils/handle_sqlite.py | zaanposni/umfrageBot | 6 | 12773061 | <filename>src/bt_utils/handle_sqlite.py<gh_stars>1-10
import os.path
import sqlite3
from .console import Console
from .config import cfg
from .get_content import content_dir
SHL = Console(prefix="handleSQLITE")
BASE_PATH = content_dir
DB_PATH = "bundestag.db"
class DatabaseHandler:
debug = False
def __init__(self):
if cfg.get("disable_database"):
return
self.con = sqlite3.connect(os.path.join(BASE_PATH, DB_PATH))
def create_structure(self, roles):
if cfg.get("disable_database"):
return
cursor = self.con.cursor()
role_entries = ''
for role in roles:
role_entries += "\"" + role + '\" integer, '
if len(role_entries) > 1:
role_entries = role_entries[:-2]
# create user table
statement = "CREATE TABLE IF NOT EXISTS users(user_id integer PRIMARY KEY, " + role_entries + ")"
cursor.execute(statement)
self.con.commit()
def update_columns(self, roles):
if cfg.get("disable_database"):
return
cursor = self.con.cursor()
statement = "PRAGMA table_info('users')"
cursor.execute(statement)
structure = cursor.fetchall()
for role in roles:
if role not in [col[1] for col in structure]:
# add new role to table
cursor.execute("ALTER TABLE users ADD \"" + role + "\" integer DEFAULT 0")
if self.debug: SHL.output("Adding new role to users table " + role)
self.con.commit()
def add_user(self, uid, roles):
if cfg.get("disable_database"):
return
cursor = self.con.cursor()
roles_tuple = (0,) * len(roles)
user = (uid,) + roles_tuple
user_prep = "?, " * len(user)
if len(user_prep) > 1:
user_prep = user_prep[:-2]
cursor.execute("INSERT INTO users VALUES (" + user_prep + ")", user)
self.con.commit()
def get_all_users(self):
if cfg.get("disable_database"):
return
cursor = self.con.cursor()
cursor.execute('SELECT * FROM users')
rows = cursor.fetchall()
return rows
def get_specific_user(self, uid):
if cfg.get("disable_database"):
return
cursor = self.con.cursor()
cursor.execute('SELECT * FROM users WHERE user_id = ' + str(uid))
user = cursor.fetchall()
if len(user): return user[0] # TODO: raise custom error and catch it in reactions.py line 21
return ()
def add_reaction(self, uid, role_reaction):
if cfg.get("disable_database"):
return
cursor = self.con.cursor()
# statement to increment reaction counter
statement = "UPDATE users SET \"" + role_reaction + "\" = \"" + \
role_reaction + "\" + 1 WHERE user_id = " + str(uid)
cursor.execute(statement)
self.con.commit()
if self.debug: SHL.output("added reaction to db")
def remove_reaction(self, uid, role_reaction):
if cfg.get("disable_database"):
return
cursor = self.con.cursor()
# statement to increment reaction counter
statement = "UPDATE users SET \"" + role_reaction + "\" = \"" + \
role_reaction + "\" - 1 WHERE user_id = " + str(uid)
cursor.execute(statement)
self.con.commit()
if self.debug: SHL.output("removed reaction from db")
def __del__(self):
if cfg.get("disable_database"):
return
self.con.close()
| 2.78125 | 3 |
tests/pgsmo_tests/test_object_server.py | DaeunYim/pgtoolsservice | 33 | 12773062 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import unittest
import unittest.mock as mock
import urllib.parse as parse
import inflection
from ossdbtoolsservice.driver.types.psycopg_driver import PostgreSQLConnection
from pgsmo.objects.database.database import Database
from pgsmo.objects.server.server import Server
from smo.common.node_object import NodeCollection, NodeLazyPropertyCollection
from tests.pgsmo_tests.utils import MockPGServerConnection
from tests.utils import MockPsycopgConnection
class TestServer(unittest.TestCase):
CHECK_RECOVERY_ROW = {
'inrecovery': True,
'isreplaypaused': True
}
def test_init(self):
# If: I construct a new server object
host = 'host'
port = '1234'
dbname = 'dbname'
mock_conn = MockPGServerConnection(None, name=dbname, host=host, port=port)
server = Server(mock_conn)
# Then:
# ... The assigned properties should be assigned
self.assertIsInstance(server._conn, MockPGServerConnection)
self.assertIsInstance(server.connection, MockPGServerConnection)
self.assertIs(server.connection, mock_conn)
self.assertEqual(server._host, host)
self.assertEqual(server.host, host)
self.assertEqual(server._port, port)
self.assertEqual(server.port, port)
self.assertEqual(server._maintenance_db_name, dbname)
self.assertEqual(server.maintenance_db_name, dbname)
self.assertTupleEqual(server.version, server._conn.server_version)
# ... Recovery options should be a lazily loaded thing
self.assertIsInstance(server._recovery_props, NodeLazyPropertyCollection)
for key, collection in server._child_objects.items():
# ... The child object collection a NodeCollection
self.assertIsInstance(collection, NodeCollection)
# ... There should be a property mapped to the node collection
prop = getattr(server, inflection.pluralize(key.lower()))
self.assertIs(prop, collection)
def test_recovery_properties(self):
# Setup:
# NOTE: We're *not* mocking out the template rendering b/c this will verify that there's a template
# ... Create a mock query execution that will return the properties
mock_exec_dict = mock.MagicMock(return_value=([], [TestServer.CHECK_RECOVERY_ROW]))
# ... Create an instance of the class and override the connection
mock_connection = MockPsycopgConnection({'host': 'host', 'dbname': 'dbname'})
with mock.patch('psycopg2.connect', new=mock.Mock(return_value=mock_connection)):
pg_connection = PostgreSQLConnection({})
pg_connection.execute_dict = mock_exec_dict
obj = Server(pg_connection)
# If: I retrieve all the values in the recovery properties
# Then:
# ... The properties based on the properties should be availble
self.assertEqual(obj.in_recovery, TestServer.CHECK_RECOVERY_ROW['inrecovery'])
self.assertEqual(obj.wal_paused, TestServer.CHECK_RECOVERY_ROW['isreplaypaused'])
def test_maintenance_db(self):
# Setup:
# ... Create a server object that has a connection
obj = Server(MockPGServerConnection(None, name='dbname'))
# ... Mock out the database lazy loader's indexer
mock_db = {}
mock_db_collection = mock.Mock()
mock_db_collection.__getitem__ = mock.MagicMock(return_value=mock_db)
obj._child_objects[Database.__name__] = mock_db_collection
# If: I retrieve the maintenance db for the server
maintenance_db = obj.maintenance_db
# Then:
# ... It must have come from the mock handler
self.assertIs(maintenance_db, mock_db)
obj._child_objects[Database.__name__].__getitem__.assert_called_once_with('dbname')
def test_refresh(self):
# Setup:
# ... Create a server object that has a connection
obj = Server(MockPGServerConnection())
# ... Mock out the reset methods on the various collections
obj.databases.reset = mock.MagicMock()
obj.roles.reset = mock.MagicMock()
obj.tablespaces.reset = mock.MagicMock()
obj._recovery_props.reset = mock.MagicMock()
# If: I refresh the server
obj.refresh()
# Then: The collections should have been reset
obj.databases.reset.assert_called_once()
obj.roles.reset.assert_called_once()
obj.tablespaces.reset.assert_called_once()
obj._recovery_props.reset.assert_called_once()
def test_urn_base(self):
# Setup:
# ... Create a server object that has a connection
server = Server(MockPGServerConnection())
# If: I get the URN base for the server
urn_base = server.urn_base
# Then: The urn base should match the expected outcome
urn_base_regex = re.compile(r'//(?P<user>.+)@(?P<host>.+):(?P<port>\d+)')
urn_base_match = urn_base_regex.match(urn_base)
self.assertIsNotNone(urn_base_match)
self.assertEqual(urn_base_match.groupdict()['user'], server.connection.user_name)
self.assertEqual(urn_base_match.groupdict()['host'], server.host)
self.assertEqual(urn_base_match.groupdict()['port'], server.port)
def test_get_obj_by_urn_empty(self):
# Setup: Create a server object
server = Server(MockPGServerConnection())
test_cases = [None, '', '\t \n\r']
for test_case in test_cases:
with self.assertRaises(ValueError):
# If: I get an object by its URN without providing a URN
# Then: I should get an exception
server.get_object_by_urn(test_case)
def test_get_obj_by_urn_wrong_server(self):
# Setup: Create a server object
server = Server(MockPGServerConnection())
with self.assertRaises(ValueError):
# If: I get an object by its URN with a URN that is invalid for the server
# Then: I should get an exception
invalid_urn = '//this<EMAIL>.wrong.urn:456/Database.123/'
server.get_object_by_urn(invalid_urn)
def test_get_obj_by_urn_wrong_collection(self):
# Setup: Create a server object
server = Server(MockPGServerConnection())
with self.assertRaises(ValueError):
# If: I get an object by its URN with a URN that points to an invalid path off the server
# Then: I should get an exception
invalid_urn = parse.urljoin(server.urn_base, 'Datatype.123/')
server.get_object_by_urn(invalid_urn)
def test_get_obj_by_urn_success(self):
# Setup: Create a server with a database under it
server = Server(MockPGServerConnection())
mock_db = Database(server, 'test_db')
mock_db._oid = 123
server._child_objects[Database.__name__] = {123: mock_db}
# If: I get an object by its URN
urn = parse.urljoin(server.urn_base, '/Database.123/')
obj = server.get_object_by_urn(urn)
# Then: The object I get back should be the same as the object I provided
self.assertIs(obj, mock_db)
| 2.484375 | 2 |
nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py | gkucsko/NeMo | 0 | 12773063 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.data import ConcatDataset
from nemo.collections.common.metrics import MetricStringToTorchMetric
from nemo.collections.common.metrics.classification_accuracy import ExactStringPerCategoryMatchMetric
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher
from nemo.utils import AppState, logging
try:
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator, get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = ['MegatronT5FinetuneModel']
class MegatronT5FinetuneModel(MegatronT5Model):
"""Finetune Model that Inherits from MegatronT5Model instead."""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
self.val_metric, self.val_metric_name = self.setup_metric(self.cfg.data.validation_ds)
self.val_metric = torch.nn.ModuleList(self.val_metric)
if hasattr(self.cfg.data, "test_ds"):
self.test_metric, self.test_metric_name = self.setup_metric(self.cfg.data.test_ds)
self.test_metric = torch.nn.ModuleList(self.test_metric)
def setup_metric(self, data_cfg):
# XNLI is a special case.
metric_name = "exact_string_match"
if hasattr(self.cfg, "eval_languages"):
metric = [ExactStringPerCategoryMatchMetric(self.cfg.eval_languages)]
else:
if not hasattr(data_cfg, "metric"):
metric = MetricStringToTorchMetric["exact_string_match"]
else:
if not hasattr(data_cfg.metric, "name"):
raise ValueError("Metric name is not provided in the metric config.")
if data_cfg.metric.name not in MetricStringToTorchMetric:
raise KeyError(
f"{data_cfg.metric.name} is not supported. List of supported metrics: {MetricStringToTorchMetric.keys()}"
)
metric_name = data_cfg.metric.name
metric = MetricStringToTorchMetric[metric_name]
# GLUE will not have a "src_file_name" attribute and will always have only a single metric.
if hasattr(data_cfg, "src_file_name"):
if isinstance(data_cfg.src_file_name, ListConfig):
# We pass average and num_classes to the metric constructor via kwargs even if they don't exist for each metric.
metric = [
metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)
for _ in range(len(self.cfg.data.test_ds.src_file_name))
]
else:
metric = [metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)]
else:
metric = [metric()] # GLUE does need to specify average or num_classes.
return metric, metric_name
def setup(self, stage=None):
# This is just to keep the parent class happy since we override its setup() method.
self.init_consumed_samples = 0
self.init_global_step = 0
if stage == 'predict':
return
# NOTE: PTL uses the same stage string "test" for both testing and validation.
self.build_train_valid_test_datasets(stage=stage)
if hasattr(self, '_validation_ds'):
self.setup_validation_data()
if hasattr(self, '_test_ds'):
self.setup_test_data()
if hasattr(self, '_train_ds'):
self.setup_training_data()
def _process_global_batch(self, global_batch):
"""Process a list of microbatches into a global batch."""
# If there is no language information in the global batch (ex: English MNLI), we can use the parent global batch processor as is.
if 'lang' not in global_batch[0]:
return self._process_global_batch_without_megatron_batch_sampler(global_batch)
# For validation data (XNLI), we need to process the global batch and and then deal with language info separately.
else:
assert all(['lang' in micro_batch for micro_batch in global_batch])
langs_list = []
processed_global_batch = self._process_global_batch_without_megatron_batch_sampler(
[{k: v for k, v in micro_batch.items() if k != 'lang'} for micro_batch in global_batch]
)
for micro_batch in global_batch:
langs_list.extend(micro_batch['lang'])
processed_global_batch['lang'] = langs_list
return processed_global_batch
def on_validation_epoch_start(self):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_start()
def on_validation_epoch_end(self):
app_state = AppState()
if hasattr(self, "_train_ds"):
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# When running `trainer.validate()`, the training dataset is not available.
else:
logging.warning('No training data found, reconfiguring microbatches based on validation batch sizes.')
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_end()
def training_step(self, batch, batch_idx):
micro_batch_size = batch[0]['text_enc'].size(0)
# This should happen only on the last batch of the dataset.
if micro_batch_size != self.cfg.data.train_ds.micro_batch_size:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size
* parallel_state.get_data_parallel_world_size()
* get_num_microbatches(),
micro_batch_size=micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# At this point batch is a list of dictionaries where eatch dict is a microbatch.
# After the process_global_batch call, batch will be a single dictionary containing the global batch.
# This is required since the parent class expects a single global batch dictioanry.
batch = self._process_global_batch(batch)
return super().training_step(batch, batch_idx)
def cast_for_metric(self, pred, label, metric_name):
if metric_name == 'exact_string_match':
return pred, label
pred = pred.replace(' ', '')
label = label.replace(' ', '')
# Correlation metrics require casting to float.
if metric_name in ['pearson_corr_coef', 'spearman_corr_coef']:
# Text-to-text model predictions may not always be valid floating point numbers.
try:
pred = float(pred)
except ValueError:
pred = 0.0
try:
label = float(label)
except ValueError:
raise ValueError(f'Could not convert {label} to float.')
pred = torch.FloatTensor([pred]).to(self.device)
label = torch.FloatTensor([label]).to(self.device)
# Other metrics require casting to integers.
elif metric_name in ['accuracy', 'auc', 'auroc', 'average_precision', 'f1']:
# Text-to-text model predictions may not always be valid integers.
try:
pred = int(pred)
except ValueError:
pred = 0
try:
label = int(label)
except ValueError:
raise ValueError(f'Could not convert {label} to int.')
pred = torch.LongTensor([pred]).to(self.device)
label = torch.LongTensor([label]).to(self.device)
else:
raise ValueError(f'Metric {metric_name} not supported.')
return pred, label
def inference_step(self, batch, batch_idx, mode, dataloader_idx=0):
batch_has_lang_information = len(batch[0]) == 7
micro_batch_size = batch[0]['text_enc'].size(0)
# This should happen only on the last batch of the dataset.
if micro_batch_size != self.cfg.data.validation_ds.micro_batch_size:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size
* parallel_state.get_data_parallel_world_size()
* get_num_microbatches(),
micro_batch_size=micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# At this point processed_batch is a list of dictionaries where eatch dict is a microbatch.
# After the process_global_batch call, processed_batch will be a single dictionary containing the global batch.
# This is required since the parent class expects a single global batch dictioanry.
processed_batch = self._process_global_batch(batch)
# Call parent validation step to get the loss.
# NOTE: There could be extra keys in the processed_batch dictionary such as "langs" for XNLI, this will be ignored in the parent class.
loss = super().validation_step(processed_batch, batch_idx)
predicted_token_ids, _ = self.decode(
tokens_enc=processed_batch['text_enc'], enc_mask=processed_batch['enc_mask'], num_tokens_to_generate=30
)
# Special ids to text function to handle stripping <eos> and special tokens with sentencepiece tokenizers.
preds_text = self.ids_to_text(predicted_token_ids)
labels_text = self.ids_to_text(processed_batch['labels'])
input_text = self.ids_to_text(processed_batch['text_enc'])
if not batch_has_lang_information:
categories = [None] * len(preds_text)
else:
categories = processed_batch['lang']
metric = self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
assert len(categories) == len(preds_text) == len(labels_text)
for _, (pred, label, category) in enumerate(zip(preds_text, labels_text, categories)):
# To compute metrics like pearson or spearman correlation, we need to cast the predicted string and labels to floats.
pred, label = self.cast_for_metric(
pred, label, self.val_metric_name if mode == 'validation' else self.test_metric_name
)
if batch_has_lang_information:
_ = metric(pred, label, category)
else:
_ = metric(pred, label)
return {
'loss': loss,
'preds': preds_text,
'labels': labels_text,
'categories': categories,
'inputs': input_text,
}
def ids_to_text(self, batch_ids):
batch_ids = batch_ids.cpu().numpy().tolist()
texts = []
for ids in batch_ids:
if self.tokenizer.eos_id in ids:
idx = ids.index(self.tokenizer.eos_id)
ids = ids[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(self.tokenizer, 'special_token_to_id'):
ids = [id for id in ids if id not in self.tokenizer.special_token_to_id.values()]
text = self.tokenizer.ids_to_text(ids)
texts.append(text)
return texts
def _determine_log_key(self, data_config, dataloader_idx, metric_name, mode):
# Function that determines whether to log based on the user provided name of the dataset or the dataloader index.
base_key = f"{mode}_{metric_name}_" if metric_name is not None else f"{mode}_"
# If the user provided names for each validation/test dataset, use those.
if hasattr(data_config, "names") and data_config.names is not None:
# With only a single validation/test dataset, the name is not a list.
if not isinstance(data_config.names, ListConfig):
name = data_config.names
else:
name = data_config.names[dataloader_idx]
return base_key + name
else:
return base_key + f"dataloader{dataloader_idx}"
def inference_epoch_end(self, outputs, mode, data_cfg):
# Parent class will handle logging of the loss.
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
averaged_loss = []
averaged_metric = []
metric_name = self.val_metric_name if mode == 'validation' else self.test_metric_name
# Log metrics for each provided validation/test dataset.
for dataloader_idx, output in enumerate(outputs):
loss = super().validation_epoch_end([x['loss'] for x in output])
# Determine the key used to log the loss based on the user provided name of the dataset or the dataloader index.
loss_log_key = self._determine_log_key(data_cfg, dataloader_idx, "loss", mode)
# Determine the key used to log the eval metric based on the user provided name of the dataset or the dataloader index.
metric_log_key = self._determine_log_key(data_cfg, dataloader_idx, metric_name, mode)
self.log(loss_log_key, loss)
metric_object = (
self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
)
metric = metric_object.compute()
# Handle logging of GLUE/XNLI separately here. XNLI has a separate metric per language.
if isinstance(metric, dict):
# GLUE case:
if len(metric) == 1 and 'acc' in metric:
metric = metric['acc']
self.log(metric_log_key, metric)
logging.info(f"{mode} {metric_name}: {metric}")
# XNLI case where the metric dictionary contains the language and the computed metric as values.
else:
for k, v in metric.items():
if k != 'acc' and 'total' not in k:
self.log(metric_log_key + f'_{k}', v)
logging.info(f"{mode} {metric_name} lang {k} : {v}")
metric = metric['acc']
else:
self.log(metric_log_key, metric)
logging.info(f"{mode} {metric_name}: {metric}")
metric_object.reset()
averaged_loss.append(loss)
averaged_metric.append(metric)
# Write predictions, labels, and inputs to a file for each validation/test dataset.
if data_cfg.get("write_predictions_to_file", False):
# Check if the user provided a prefix path to the file(s) they want to write.
if not hasattr(data_cfg, "output_file_path_prefix") or data_cfg.output_file_path_prefix is None:
raise ValueError(
f"Cannot write predictions to file when output_file_path_prefix is not set or present in the yaml config file."
)
# Gather the outputs object from all data parallel ranks since we are using the DistributedSampler which splits data across DDP ranks.
gathered_outputs = [None for _ in range(self.world_size)]
torch.distributed.all_gather_object(
gathered_outputs,
[
{
'preds': x['preds'],
'labels': x['labels'],
'categories': x['categories'],
'inputs': x['inputs'],
}
for x in output
],
)
# Figure out what the suffix of the file should be.
filename_log_key = self._determine_log_key(data_cfg, dataloader_idx, None, mode)
# Keep a set of ground truths and inputs to write deduplicated predictions. Distributed Sampler may duplicate examples.
gt_inp_set = set()
deduplicated_outputs = {
'preds': [],
'labels': [],
'categories': [],
'inputs': [],
}
# PTL models have a self.global_rank attribute and we want to write to disk only on global rank 0.
if self.global_rank == 0:
for rank in range(0, self.world_size):
for batch in gathered_outputs[rank]:
for pred, label, input, category in zip(
batch['preds'], batch['labels'], batch['inputs'], batch['categories']
):
if input + label not in gt_inp_set:
gt_inp_set.add(input + label)
deduplicated_outputs['preds'].append(pred)
deduplicated_outputs['labels'].append(label)
deduplicated_outputs['categories'].append(category)
deduplicated_outputs['inputs'].append(input)
self.write_predictions_to_file(
deduplicated_outputs, f"{data_cfg.output_file_path_prefix}_{filename_log_key}"
)
torch.distributed.barrier()
# Logging of the averaged metrics:
averaged_loss = sum(averaged_loss) / len(averaged_loss)
averaged_metric = sum(averaged_metric) / len(averaged_metric)
# Handle case where metrics can be nan or inf. This can break checkpoint save/load.
if torch.isinf(averaged_metric) or torch.isnan(averaged_metric):
app_state = AppState()
monitor_mode = app_state.checkpoint_callback_params.mode
assert monitor_mode in ['min', 'max']
averaged_metric = 0.0 if monitor_mode == 'max' else 1e5
if mode == 'validation':
self.log("validation_loss", averaged_loss)
self.log(f"validation_{self.val_metric_name}", averaged_metric)
elif mode == 'test':
self.log("test_loss", averaged_loss)
self.log(f"test_{self.test_metric_name}", averaged_metric)
return averaged_loss, averaged_metric
def write_predictions_to_file(self, outputs, output_file_path_prefix):
with open(output_file_path_prefix + "_inputs_preds_labels.json", "w") as f_json:
json_output = {
"inputs": outputs["inputs"],
"preds": outputs["preds"],
"labels": outputs["labels"],
}
json.dump(json_output, f_json)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.inference_step(batch, batch_idx, 'validation', dataloader_idx)
def validation_epoch_end(self, outputs):
_ = self.inference_epoch_end(outputs, 'validation', self.cfg.data.validation_ds)
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.inference_step(batch, batch_idx, 'test', dataloader_idx)
def test_epoch_end(self, outputs):
_ = self.inference_epoch_end(outputs, 'test', self.cfg.data.test_ds)
def build_data_loader(
self,
dataset,
micro_batch_size,
global_batch_size,
shuffle,
num_workers,
pin_memory,
drop_last,
check_validation_interval,
):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle
)
# This check makes sure the val_check_interval is less than the number of global batches.
# Normally, PTL would do this check and properly account for gradient accumulation.
# But now, it is implicit in the apex fwd/bwd functions and so we need to check for this somewhere.
# The consequence of not doing this is that training loop will never run validation.
# NOTE: Prog bar is also broken as a result of this.
global_batch_size_per_gpu = micro_batch_size * get_num_microbatches()
if (
self.trainer.val_check_interval > (sampler.num_samples // global_batch_size_per_gpu)
and check_validation_interval
):
raise ValueError(
f"trainer.val_check_interval {self.trainer.val_check_interval} is > number of global batches {sampler.num_samples // global_batch_size}"
)
# Data loader. Note that batch size is the per GPU batch size.
return torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collate_fn,
sampler=sampler,
batch_size=micro_batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
)
def setup_training_data(self):
self._train_dl = self.build_data_loader(
self._train_ds,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
shuffle=self.cfg.data.train_ds.shuffle,
num_workers=self.cfg.data.train_ds.num_workers,
pin_memory=self.cfg.data.train_ds.pin_memory,
drop_last=self.cfg.data.train_ds.drop_last,
check_validation_interval=True,
)
def setup_eval_data(self, datasets, data_cfg):
dataloaders = []
for dataset in datasets:
eval_dl = self.build_data_loader(
dataset,
micro_batch_size=data_cfg.micro_batch_size,
global_batch_size=data_cfg.global_batch_size,
shuffle=data_cfg.shuffle,
num_workers=data_cfg.num_workers,
pin_memory=data_cfg.pin_memory,
drop_last=data_cfg.drop_last,
check_validation_interval=False,
)
dataloaders.append(eval_dl)
return dataloaders
def setup_validation_data(self):
self._validation_dl = self.setup_eval_data(self._validation_ds, self.cfg.data.validation_ds)
def setup_test_data(self):
self._test_dl = self.setup_eval_data(self._test_ds, self.cfg.data.test_ds)
def _build_train_dataset(self, data_cfg):
"""Build the training dataset."""
if (
data_cfg.drop_last is False
and data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size()
):
raise ValueError(
f"Cannot use drop_last=False in your training data with gradient accumulation found grad acc of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} with global_batch_size {data_cfg.global_batch_size}, micro_batch_size {data_cfg.micro_batch_size}, data parallel size {parallel_state.get_data_parallel_world_size()}"
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatDataset(
datasets=datasets,
sampling_technique=data_cfg.get('concat_sampling_technique', 'temperature'),
sampling_temperature=data_cfg.get('concat_sampling_temperature', 5),
sampling_probabilities=data_cfg.get(
'concat_sampling_probabilities', [1 / len(datasets)] * len(datasets)
),
global_rank=parallel_state.get_data_parallel_rank(),
world_size=parallel_state.get_data_parallel_world_size(),
)
return dataset
else:
return datasets[0]
def _build_eval_dataset(self, data_cfg):
"""Build the evaluation dataset."""
if data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size():
raise ValueError(
f'You are trying to use "implicit gradient accumulation" of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} in your validation/test datasets. This is not supported. Please set global_batch_size equal to micro_batch_size * data_parallel_world_size.'
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
is_names_list_config = False
if hasattr(data_cfg, "names"):
if isinstance(data_cfg.names, ListConfig):
is_names_list_config = True
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
if is_names_list_config and len(data_cfg.names) != len(data_cfg.src_file_name):
raise ValueError(
"If you are providing names for each src/tgt file, they must have the same number of elements."
)
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
)
datasets.append(dataset)
return datasets
def build_train_valid_test_datasets(self, stage):
logging.info('Building datasets ...')
if stage != 'test':
self._validation_ds = self._build_eval_dataset(self.cfg.data.validation_ds)
if stage != 'validation':
if hasattr(self.cfg.data, 'test_ds'):
self._test_ds = self._build_eval_dataset(self.cfg.data.test_ds)
if stage == 'validation' or stage == 'test':
return
self._train_ds = self._build_train_dataset(self.cfg.data.train_ds)
logging.info(f'Finished building datasets ...')
def on_train_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
self.trainer.fit_loop._data_fetcher = GlobalBatchDataFetcher()
def on_validation_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
self.trainer.fit_loop.epoch_loop.val_loop._data_fetcher = GlobalBatchDataFetcher()
self.trainer.validate_loop._data_fetcher = GlobalBatchDataFetcher()
def on_test_start(self) -> None:
self.trainer.test_loop._data_fetcher = GlobalBatchDataFetcher()
| 1.6875 | 2 |
bullet-gym-primitive/showKerasCEMCartPoleExample.py | benelot/bullet-gym | 55 | 12773064 | <gh_stars>10-100
#!/usr/bin/python
import os
os.system('python runTrainer.py --agent=KerasCEMAgent --env=CartPolev0Env --train-for=0 --test-for=10000000 --delay=0.005 --gui --show-test --load-file=checkpoints/KerasCEM-CartPolev0-chkpt-1.h5')
| 1.804688 | 2 |
database_api/mongodb_server.py | shenhao-stu/2021_computer_design | 3 | 12773065 | <filename>database_api/mongodb_server.py<gh_stars>1-10
# -*- coding:utf-8 -*-
# -------------------------------
# @Author : shenhao-stu
# @Email : <EMAIL>
# -------------------------------
# @File : mongodb_server.py
# @Time : 2021-03-14 16:24
# -------------------------------
from flask import Flask, Response, request
from flask_cors import CORS
import pymongo
import re
import json
app = Flask(__name__)
CORS(app, supports_credentials=True)
def connect_mongodb(db_name, collection_name):
# connect MongoDB
client = pymongo.MongoClient(host='localhost', port=27017)
# select test db
db = client[db_name]
# select table/collection
collection = db[collection_name]
return collection
def search_data_from_mongodb(collection):
# find_one() / find()
result = collection.find_one({'age': {'$gt': 20}})
results = collection.find({'gender': 'male'})
count = results.count()
for result in results:
print(result)
@app.route('/ccpc', methods=['GET'])
def query_ccpc():
# initialize variables
author, content, title = '' if not request.args.get('author') else request.args.get(
'author'), '' if not request.args.get('content') else request.args.get('content'), '' if not request.args.get(
'title') else request.args.get('title')
res = dict()
print(f"author:{author},content:{content},title:{title}")
# query sentences
condition = {'author': re.compile(author), 'content': re.compile(
content), 'title': re.compile(title)}
# mongodb connect
collection = connect_mongodb(db_name="poetry", collection_name="ccpc")
results = collection.find(condition)
num_count = results.count()
index = 0
for result in results:
del result['_id']
res[f"{index}"] = result
index += 1
# return response
response = Response(json.dumps({'msg': 'success', 'state': 'success', 'result': res, 'count': num_count},
ensure_ascii=False).encode('utf-8'), mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
if __name__ == "__main__":
app.config['JSON_AS_ASCII'] = False
app.run(host='localhost', debug=True)
| 2.984375 | 3 |
plugin_tests/dataverse_test.py | whole-tale/girder_wholetale | 3 | 12773066 | <reponame>whole-tale/girder_wholetale
import json
import os
import responses
import time
import vcr
from tests import base
from girder.models.folder import Folder
from girder.models.item import Item
DATA_PATH = os.path.join(
os.path.dirname(os.environ['GIRDER_TEST_DATA_PREFIX']),
'data_src', 'plugins', 'wholetale'
)
def setUpModule():
base.enabledPlugins.append('wholetale')
base.enabledPlugins.append('wt_home_dir')
base.startServer()
def tearDownModule():
base.stopServer()
class DataverseHarversterTestCase(base.TestCase):
def setUp(self):
users = ({
'email': '<EMAIL>',
'login': 'admin',
'firstName': 'Root',
'lastName': '<NAME>',
'password': '<PASSWORD>'
}, {
'email': '<EMAIL>',
'login': 'joeregular',
'firstName': 'Joe',
'lastName': 'Regular',
'password': '<PASSWORD>'
})
self.admin, self.user = [self.model('user').createUser(**user)
for user in users]
@vcr.use_cassette(os.path.join(DATA_PATH, 'dataverse_lookup.txt'))
@responses.activate
def testLookup(self):
responses.add_passthru("https://dataverse.harvard.edu/api/access")
responses.add_passthru("https://dataverse.harvard.edu/api/datasets")
responses.add_passthru("https://dataverse.harvard.edu/dataset.xhtml")
responses.add_passthru("https://dataverse.harvard.edu/file.xhtml")
responses.add_passthru("https://dvn-cloud.s3.amazonaws.com/")
responses.add_passthru("https://dataverse.harvard.edu/api/search?q=filePersistentId")
responses.add_passthru("https://dataverse.harvard.edu/citation")
responses.add_passthru("https://doi.org")
responses.add(
responses.GET,
"https://dataverse.harvard.edu/api/search?q=entityId:3040230",
json={
"status": "OK",
"data": {
"q": "entityId:3040230",
"total_count": 1,
"start": 0,
"spelling_alternatives": {},
"items": [
{
"name": "2017-07-31.tab",
"type": "file",
"url": "https://dataverse.harvard.edu/api/access/datafile/3040230",
"file_id": "3040230",
"published_at": "2017-07-31T22:27:23Z",
"file_type": "Tab-Delimited",
"file_content_type": "text/tab-separated-values",
"size_in_bytes": 12025,
"md5": "e7dd2f725941b978d45fed3f33ff640c",
"checksum": {
"type": "MD5",
"value": "e7dd2f725941b978d45fed3f33ff640c",
},
"unf": "UNF:6:6wGE3C5ragT8A0qkpGaEaQ==",
"dataset_citation": (
"<NAME>, 2017, \"Open Source at Harvard\", "
"https://doi.org/10.7910/DVN/TJCLKP, Harvard Dataverse, "
" V2, UNF:6:6wGE3C5ragT8A0qkpGaEaQ== [fileUNF]"
),
}
],
"count_in_response": 1,
},
}
)
resp = self.request(
path='/repository/lookup', method='GET', user=self.user,
params={'dataId': json.dumps([
'https://doi.org/10.7910/DVN/RLMYMR',
'https://doi.org/10.7910/DVN/RLMYMR/WNKD3W',
'https://dataverse.harvard.edu/api/access/datafile/3040230'
])}
)
self.assertStatus(resp, 200)
self.assertEqual(resp.json, [
{
"dataId": "https://dataverse.harvard.edu/dataset.xhtml"
"?persistentId=doi:10.7910/DVN/RLMYMR",
"doi": "doi:10.7910/DVN/RLMYMR",
"name": "Karnataka Diet Diversity and Food Security for "
"Agricultural Biodiversity Assessment",
"repository": "Dataverse",
"size": 495885,
"tale": False,
},
{
"dataId": "https://dataverse.harvard.edu/file.xhtml"
"?persistentId=doi:10.7910/DVN/RLMYMR/WNKD3W",
"doi": "doi:10.7910/DVN/RLMYMR",
"name": "Karnataka Diet Diversity and Food Security for "
"Agricultural Biodiversity Assessment",
"repository": "Dataverse",
"size": 2321,
"tale": False,
},
{
"dataId": "https://dataverse.harvard.edu/api/access/datafile/3040230",
"doi": "doi:10.7910/DVN/TJCLKP",
"name": "Open Source at Harvard",
"repository": "Dataverse",
"size": 12025,
"tale": False,
}
])
resp = self.request(
path='/repository/listFiles', method='GET', user=self.user,
params={'dataId': json.dumps([
'https://doi.org/10.7910/DVN/RLMYMR',
'https://doi.org/10.7910/DVN/RLMYMR/WNKD3W',
'https://dataverse.harvard.edu/api/access/datafile/3040230'
])}
)
self.assertStatus(resp, 200)
self.assertEqual(resp.json, [
{
"Karnataka Diet Diversity and Food Security for "
"Agricultural Biodiversity Assessment": {
"fileList": [
{"Karnataka_DDFS_Data-1.tab": {"size": 2408}},
{"Karnataka_DDFS_Data-1.xlsx": {"size": 700840}},
{"Karnataka_DDFS_Questionnaire.pdf": {"size": 493564}}
]
}
},
{
"Karnataka Diet Diversity and Food Security for "
"Agricultural Biodiversity Assessment": {
"fileList": [
{"Karnataka_DDFS_Data-1.tab": {"size": 2408}},
{"Karnataka_DDFS_Data-1.xlsx": {"size": 700840}}
]
}
},
{
"Open Source at Harvard": {
"fileList": [
{"2017-07-31.csv": {"size": 11684}},
{"2017-07-31.tab": {"size": 12100}}
]
}
}
])
def testConfigValidators(self):
from girder.plugins.wholetale.constants import PluginSettings, SettingDefault
resp = self.request('/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_URL,
'value': 'random_string'})
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'field': 'value',
'type': 'validation',
'message': 'Invalid Dataverse URL'
})
resp = self.request(
'/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_URL,
'value': SettingDefault.defaults[PluginSettings.DATAVERSE_URL]})
self.assertStatusOk(resp)
resp = self.request(
'/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_URL,
'value': ''})
self.assertStatusOk(resp)
resp = self.request(
'/system/setting', user=self.admin, method='GET',
params={'key': PluginSettings.DATAVERSE_URL})
self.assertStatusOk(resp)
self.assertEqual(
resp.body[0].decode(),
'"{}"'.format(SettingDefault.defaults[PluginSettings.DATAVERSE_URL]))
@vcr.use_cassette(os.path.join(DATA_PATH, 'dataverse_single.txt'))
def testSingleDataverseInstance(self):
from girder.plugins.wholetale.constants import PluginSettings, SettingDefault
resp = self.request('/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_URL,
'value': 'https://demo.dataverse.org/'})
self.assertStatusOk(resp)
resp = self.request(
path='/repository/lookup', method='GET', user=self.user,
params={'dataId': json.dumps([
"https://demo.dataverse.org/api/access/datafile/1849559"
])}
)
self.assertStatus(resp, 200)
self.assertEqual(resp.json, [
{
"dataId": "https://demo.dataverse.org/api/access/datafile/1849559",
"doi": "doi:10.70122/FK2/H60OIK",
"name": "test file access by version",
"repository": "Dataverse",
"size": 4750,
"tale": False,
}
])
resp = self.request(
path='/repository/listFiles', method='GET', user=self.user,
params={'dataId': json.dumps([
'https://demo.dataverse.org/api/access/datafile/1849559'
])}
)
self.assertStatus(resp, 200)
self.assertEqual(resp.json, [
{
"test file access by version": {
"fileList": [
{"images.jpg": {"size": 4750}},
]
}
}
])
resp = self.request(
'/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_URL,
'value': SettingDefault.defaults[PluginSettings.DATAVERSE_URL]})
self.assertStatusOk(resp)
def testExtraHosts(self):
from girder.plugins.wholetale.constants import PluginSettings, SettingDefault
resp = self.request('/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_EXTRA_HOSTS,
'value': 'dataverse.org'})
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'field': 'value',
'type': 'validation',
'message': 'Dataverse extra hosts setting must be a list.'
})
resp = self.request('/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_EXTRA_HOSTS,
'value': json.dumps(['not a domain'])})
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'field': 'value',
'type': 'validation',
'message': 'Invalid domain in Dataverse extra hosts'
})
# defaults
resp = self.request(
'/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_EXTRA_HOSTS,
'value': ''})
self.assertStatusOk(resp)
resp = self.request(
'/system/setting', user=self.admin, method='GET',
params={'key': PluginSettings.DATAVERSE_EXTRA_HOSTS})
self.assertStatusOk(resp)
self.assertEqual(
resp.body[0].decode(),
str(SettingDefault.defaults[PluginSettings.DATAVERSE_EXTRA_HOSTS]))
resp = self.request(
'/system/setting', user=self.admin, method='PUT',
params={'list': json.dumps([
{
'key': PluginSettings.DATAVERSE_EXTRA_HOSTS,
'value': ['random.d.org', 'random2.d.org']
},
{
'key': PluginSettings.DATAVERSE_URL,
'value': 'https://demo.dataverse.org'
}
])}
)
self.assertStatusOk(resp)
from girder.plugins.wholetale.lib.dataverse.provider import DataverseImportProvider
self.assertEqual(
'^https?://(demo.dataverse.org|random.d.org|random2.d.org).*$',
DataverseImportProvider().regex.pattern
)
resp = self.request(
'/system/setting', user=self.admin, method='PUT',
params={'key': PluginSettings.DATAVERSE_URL,
'value': SettingDefault.defaults[PluginSettings.DATAVERSE_URL]})
@vcr.use_cassette(os.path.join(DATA_PATH, 'dataverse_hierarchy.txt'))
def testDatasetWithHierarchy(self):
from girder.plugins.jobs.models.job import Job
from girder.plugins.jobs.constants import JobStatus
from server.models.image import Image
from server.models.tale import Tale
from server.lib.manifest import Manifest
from server.lib.manifest_parser import ManifestParser
doi = "doi:10.7910/DVN/Q5PV4U"
dataMap = [
{
"dataId": (
"https://dataverse.harvard.edu/dataset.xhtml?"
"persistentId=" + doi
),
"doi": doi,
"name": (
"Replication Data for: Misgovernance and Human Rights: "
"The Case of Illegal Detention without Intent"
),
"repository": "Dataverse",
"size": 6326512,
"tale": False,
}
]
resp = self.request(
path="/dataset/register",
method="POST",
params={"dataMap": json.dumps(dataMap)},
user=self.user,
)
self.assertStatusOk(resp)
registration_job = resp.json
for _ in range(100):
job = Job().load(registration_job["_id"], force=True)
if job["status"] > JobStatus.RUNNING:
break
time.sleep(0.1)
self.assertEqual(job["status"], JobStatus.SUCCESS)
ds_root = Folder().findOne({"meta.identifier": doi})
ds_subfolder = Folder().findOne(
{"name": "Source Data", "parentId": ds_root["_id"]}
)
ds_item = Item().findOne(
{"name": "03_Analysis_Code.R", "folderId": ds_root["_id"]}
)
dataSet = [
{
"_modelType": "folder",
"itemId": str(ds_root["_id"]),
"mountPath": ds_root["name"],
},
{
"_modelType": "folder",
"itemId": str(ds_subfolder["_id"]),
"mountPath": ds_subfolder["name"],
},
{
"_modelType": "item",
"itemId": str(ds_item["_id"]),
"mountPath": ds_item["name"],
}
]
image = Image().createImage(name="test my name", creator=self.user, public=True)
tale = Tale().createTale(
image, dataSet, creator=self.user, title="Blah", public=True
)
manifest = Manifest(tale, self.user, expand_folders=True).manifest
restored_dataset = ManifestParser(manifest).get_dataset()
self.assertEqual(restored_dataset, dataSet)
Tale().remove(tale)
Image().remove(image)
def testProtoTale(self):
from server.lib.dataverse.provider import DataverseImportProvider
provider = DataverseImportProvider()
datamap = {
"dataId": (
"https://dataverse.harvard.edu/dataset.xhtml?"
"persistentId=doi:10.7910/DVN/26721"
),
"doi": "doi:10.7910/DVN/26721",
"name": (
"Replication data for: Priming Predispositions "
"and Changing Policy Positions"
),
"repository": "Dataverse",
"size": 44382520,
"tale": False,
}
tale = provider.proto_tale_from_datamap(datamap, False)
self.assertEqual(set(tale.keys()), {"title", "relatedIdentifiers", "category"})
tale = provider.proto_tale_from_datamap(datamap, True)
self.assertEqual(tale["authors"][0]["lastName"], "Tesler")
datamap = {
"dataId": (
"http://dataverse.icrisat.org/dataset.xhtml?"
"persistentId=doi:10.21421/D2/TCCVS7"
),
"doi": "doi:10.21421/D2/TCCVS7",
"name": (
"Phenotypic evaluation data of International Chickpea "
"Varietal Trials (ICVTs) – Desi for Year 2016-17"
),
"repository": "Dataverse",
"size": 99504,
"tale": False,
}
tale = provider.proto_tale_from_datamap(datamap, True)
self.assertEqual(tale["authors"][0]["firstName"], "Pooran")
def tearDown(self):
self.model('user').remove(self.user)
self.model('user').remove(self.admin)
| 2.15625 | 2 |
cycle-gan-vgg/util/proper.py | nallab/style-transfer | 0 | 12773067 | """
準備用:データセットをTFRecord形式にする
"""
import tensorflow as tf
from absl import flags
from absl import app
from glob import glob
from tensorflow.keras.preprocessing.image import load_img, img_to_array
FLAGS = flags.FLAGS
flags.DEFINE_string('old_image_path', "./datasets/original_data", 'Path to the data folder')
flags.DEFINE_string('new_image_path', "./datasets/remake_data", 'Path to the data folder')
flags.DEFINE_string('test_old_image_path', "./datasets/test_original", 'Path to the data folder')
flags.DEFINE_string('test_new_image_path', "./datasets/test_remake", 'Path to the data folder')
# old_label = [str(0) for i in range(len(old_paths))]
# new_label = [str(1) for i in range(len(new_paths))]
def save_tfrec(paths, name):
images_ds = tf.data.Dataset.from_tensor_slices(paths).map(tf.io.read_file)
tfrec = tf.data.experimental.TFRecordWriter(name + '.tfrec')
tfrec.write(images_ds)
def run_main(argv):
del argv
kwargs = {
'old_path': FLAGS.old_image_path,
'new_path': FLAGS.new_image_path,
'test_old_path': FLAGS.test_old_image_path,
'test_new_path': FLAGS.test_new_image_path,
}
main(**kwargs)
def main(old_path, new_path, test_old_path, test_new_path):
old_paths = glob(old_path + "/image_*")
new_paths = glob(new_path + "/image_*")
test_old_paths = glob(test_old_path + "/image_*")
test_new_paths = glob(test_new_path + "/image_*")
save_tfrec(old_paths, 'old')
save_tfrec(new_paths, 'new')
save_tfrec(test_old_paths, 'test_old')
save_tfrec(test_new_paths, 'test_new')
if __name__ == '__main__':
app.run(run_main)
| 2.546875 | 3 |
src/data_manager.py | bruggerl/gdp-height | 0 | 12773068 | <gh_stars>0
import pandas as pd
import constants as c
class DataManager:
"""
This class encapsulates the logic for reading and processing the datasets.
"""
def __init__(self, heights_csv, gdps_csv):
"""
Initialize the DataManager with the given CSV inputs. The DataManager holds the input as
pandas dataframes and saves the processed datasets as well as the merged output when the respective functions
are called. When reading the "average height" dataset, only the columns 'Country', 'Sex', 'Year', 'Age group'
and 'Mean height' are saved. When reading the "GDP per capita" dataset, only the columns 'Country Name',
'Country Code' and '2019' are saved since we are only interested in data from 2019.
:param heights_csv: path to the CSV file containing data about the average heights
:param gdps_csv: path to the CSV file containing data about the GDP
"""
self.heights_input = pd.read_csv(heights_csv, usecols=['Country', 'Sex', 'Year', 'Age group', 'Mean height'])
self.gdps_input = pd.read_csv(gdps_csv, usecols=['Country Name', 'Country Code', '2019'])
self.heights_processed = None
self.gdps_processed = None
self.output = None
def process_datasets(self):
"""
Process the two datasets. The "average heights" dataset is filtered so that only the data entries from 2019 and
for the age group 19 are considered (those columns are dropped in the next step because they contain the same
value for each row). The 'Mean height' column is renamed to a more meaningful name. Rows that contain a NaN
value in the "GDP per capita" dataset are deleted. Additionally, the columns 'Country Name', '2019' and
'Country Code' are renamed in order to be able to merge the two datasets and to provide meaningful column names.
:return: nothing
"""
# PREPARE DATAFRAMES
# filter heights: year 2019, age 19
self.heights_processed = self.heights_input.loc[(self.heights_input['Year'] == 2019) & (self.heights_input['Age group'] == 19)]
self.heights_processed = self.heights_processed.drop(labels=['Year', 'Age group'], axis=1) # drop unnecessary columns
self.heights_processed = self.heights_processed.rename(columns={'Mean height': c.AVG_HEIGHT})
self.gdps_processed = self.gdps_input.dropna(thresh=3) # drop NaN GDPs
self.gdps_processed = self.gdps_processed.rename(
columns={'Country Name': c.COUNTRY, '2019': c.GDP, 'Country Code': c.COUNTRY_CODE})
def merge_processed_datasets(self):
"""
Merge the processed datasets. The processed datasets are merged on the 'Country' column which contains the
name of the country. An exception is thrown if the datasets have not been processed yet (i.e., one of the
datasets is None).
:return: nothing
"""
if self.heights_processed is not None and self.gdps_processed is not None:
# MERGE DATAFRAMES
merged = pd.merge(self.gdps_processed, self.heights_processed, on='Country')
self.output = merged
else:
raise Exception('Datasets have not been processed yet!')
def export_csv(self):
"""
Export the resulting dataset to a CSV file. An exception is thrown if the result dataset has not been
created yet.
:return: nothing
"""
if self.output is not None:
self.output.to_csv('out/gdp_avgHeight_per_country.csv', index_label='ID', index=True)
else:
raise Exception('No data produced yet!')
def get_dataset_males(self):
"""
Get the filtered result dataframe which only contains records for 19-year-old males. An exception is thrown
if the result dataset has not been created yet.
:return: nothing
"""
if self.output is not None:
return self.output.loc[(self.output['Sex'] == 'Boys')].drop(labels=['Sex'], axis=1)
else:
raise Exception('No data produced yet!')
def get_dataset_females(self):
"""
Get the filtered result dataframe which only contains records for 19-year-old females. An exception is thrown
if the result dataset has not been created yet.
:return: nothing
"""
if self.output is not None:
return self.output.loc[(self.output['Sex'] == 'Girls')].drop(labels=['Sex'], axis=1)
else:
raise Exception('No data produced yet!')
| 3.90625 | 4 |
google-native-ts-k8s-python-postgresql/app/app/config.py | riddopic/examples | 0 | 12773069 | from pydantic import BaseSettings, SecretStr
class Config(BaseSettings):
app_port: int
app_host: str
db_username: str
db_password: SecretStr | None = None
db_host: str
db_port: int
db_database_name: str
| 1.867188 | 2 |
kattis/buka.py | terror/Solutions | 2 | 12773070 | def go(o, a, b):
if o == '+':
return a + b
return a * b
def main():
a, o, b = int(input()), input(), int(input())
return go(o, a, b)
if __name__ == '__main__':
print(main())
| 3.53125 | 4 |
edit-file/edit-file.py | bgoodr/rhythmbox-plugins-edit-file | 0 | 12773071 | # -*- Mode: python; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
#
# edit-file.py
#
# Adds an option to edit the file containing the selected track
# to the right click context menu.
# Based on code in
#
# Partly based on code in https://github.com/donaghhorgan/rhythmbox-plugins-open-containing-folder/blob/master/OpenContainingFolder.py
from gi.repository import Gio, GObject, Gtk, Peas, RB
import subprocess
import urllib
class EditFile(GObject.Object, Peas.Activatable):
"""Adds an option to edit the file containing the selected track to
the right click context menu."""
object = GObject.property(type=GObject.Object)
_action = 'edit-file'
_locations = ['browser-popup',
'playlist-popup',
'podcast-episode-popup',
'queue-popup']
def __init__(self):
super(EditFile, self).__init__()
self._app = Gio.Application.get_default()
def edit_file(self, *args):
"""Open the given folder.
Args:
args: Additional arguments. These are ignored.
"""
page = self.object.props.selected_page
selected = page.get_entry_view().get_selected_entries()
try:
selected = page.get_entry_view().get_selected_entries()
if selected:
uri = urllib.parse.unquote(selected[0].get_playback_uri())
print('edit-file plugin: uri==<{}>'.format(uri))
abspath = uri.replace("file://","")
print('edit-file plugin: abspath==<{}>'.format(abspath))
subprocess.Popen(['audacity', abspath])
except:
print('edit-file plugin: Could not edit file')
def do_activate(self):
"""Activate the plugin."""
print('edit-file plugin: Activating')
action = Gio.SimpleAction(name=EditFile._action)
action.connect('activate', self.edit_file)
self._app.add_action(action)
item = Gio.MenuItem()
item.set_label('Edit file')
item.set_detailed_action('app.%s' % EditFile._action)
for location in EditFile._locations:
self._app.add_plugin_menu_item(location,
EditFile._action,
item)
def do_deactivate(self):
"""Deactivate the plugin."""
print('edit-file plugin: Deactivating')
for location in EditFile._locations:
self._app.remove_plugin_menu_item(location,
EditFile._action)
| 2.515625 | 3 |
web-service/kagan.py | ResearcherOne/example-repo | 0 | 12773072 | print("yeni kagan ozellik çalışıyor")
| 1.289063 | 1 |
tests/helpers/triple_store.py | data4knowledge/RdfOgm | 0 | 12773073 | <reponame>data4knowledge/RdfOgm
import requests
from requests.auth import HTTPBasicAuth
class TripleStore:
def __init__(self):
self.__cfg__ = self.__store_config()
def clear(self, graph='DEFAULT'):
if graph != 'DEFAULT':
graph = f'GRAPH <{graph}>'
self.__update(f'CLEAR {graph}')
def upload(self, filename, graph=""):
self.__upload(filename, graph)
def query(self, sparql):
self.__query(sparql)
def update(self, sparql):
self._update(sparql)
def __query(self, sparql):
headers = {'Accept': 'application/sparql-results+json', 'Content-type': 'application/x-www-form-urlencoded'}
response = requests.post(
self.__endpoint('query'),
headers = headers,
auth = self.__auth(),
data = f'query={sparql}'
)
return response
def __update(self, sparql):
headers = {'Accept': 'application/sparql-results+json', 'Content-type': 'application/x-www-form-urlencoded'}
response = requests.post(
self.__endpoint('update'),
headers = headers,
auth = self.__auth(),
data = f'update={sparql}'
)
return response
def __upload(self, filename, graph=""):
import os
from requests_toolbelt import MultipartEncoder
head, tail = os.path.split(filename)
multipart_data = MultipartEncoder(fields={'file': (tail, open(filename, 'rb'), 'text/plain')})
headers={'Content-Type': multipart_data.content_type}
response=requests.put(self.__endpoint('data', graph), data=multipart_data, auth=self.__auth(),headers=headers)
return response
def __auth(self):
return HTTPBasicAuth(self.__cfg__['username'], self.__cfg__['password'])
def __endpoint(self, type, graph=""):
protocol = self.__cfg__['protocol']
host = self.__cfg__['host']
port = self.__cfg__['port']
ds = self.__cfg__['dataset']
endpoint = f'{protocol}://{host}:{port}/{ds}/{type}'
if graph != "":
endpoint += f'?graph={graph}'
return endpoint
def __store_config(self):
return {'protocol': 'http', 'host': 'localhost', 'port': '3030', 'dataset': 'test', 'username': '', 'password': ''}
| 2.046875 | 2 |
Placer.py | hal-lab-u-tokyo/GenMap | 0 | 12773074 | # This file is part of GenMap and released under the MIT License, see LICENSE.
# Author: <NAME>
import networkx as nx
import random
import math
from multiprocessing import Pool
import multiprocessing as multi
class Placer():
def __init__(self, method, iterations = 50, randomness = "Full"):
""" Initializes this class
Args:
method (str) : initial mapping method
available methods are follows:
1. graphviz (default)
2. tsort
3. random
iterations (int): maximum iteration number for generating a node position.
Default = 50
randomness (str): randomness of rounding.
if it is "Full", then the positions are rounded fully randomly.
if is is "Partial", then partilly randomly.
"""
self.__iterations = iterations
self.__method = method
if not randomness in ["Full", "Partial"]:
raise ValueError("Invalid randomness type: " + randomness)
else:
self.__randomness = randomness
def generate_init_mappings(self, dag, width, height, count, proc_num=multi.cpu_count()):
"""Returns multiple initial mappings.
Args:
dag (networkx DiGraph): data-flow-graph
width (int): PE array width
height (int): PE array height
count (int): try count to generate mappings
Optional:
proc_num (int): the number of process
Default is equal to cpu count
Returns:
list: a list of mappings
"""
if self.__method == "graphviz":
mt_args = [(dag, random.randint(1, width), height) for i in range(count)]
p = Pool(proc_num)
results = p.map(self.mt_wrapper, mt_args)
p.close()
init_mapping = []
init_hashable_mapping = set() # for checking dupulication
for mapping in results:
# remove invalid results
if not mapping is None:
if not mapping.values() in init_hashable_mapping:
init_mapping.append(mapping)
init_hashable_mapping.add(mapping.values())
return init_mapping
elif self.__method == "tsort":
return self.make_random_mappings(dag, width, height, count, 1)
else:
return self.make_random_mappings(dag, width, height, count, 0)
def mt_wrapper(self, args):
return self.make_graphviz_mapping(*args)
def make_graphviz_mapping(self, dag, width, height):
""" Makes nodes position on the PE array.
Args:
dag (networkx DiGraph): data-flow-graph
width (int): PE array width
height (int): PE array height
Returns:
Dictionary: keys are operation label, values are mapped positions of them.
In case of failure, returns None
"""
# validate input dag
if nx.is_directed_acyclic_graph(dag) == False:
raise ValueError("Input data-flow-graph is not DAG")
# check dag size
node_num = len(dag.nodes())
if node_num > width * height:
return None
# enumerate possible rectangles
rect_pattern = [(w, h) for w in range(1, width + 1) for h in range(1, height + 1) if w * h >= node_num]
# graph layout by dot's algorithm
pos = nx.nx_pydot.graphviz_layout(dag, prog="dot")
# normalize coordinates
max_x = max([x for (x, y) in pos.values()])
max_y = max([y for (x, y) in pos.values()])
pos = {v: (x / max_x, y / max_y) for v, (x, y) in pos.items()}
# make sink nodes upper side
pos = {v: (x, 1 - y) for v, (x, y) in pos.items()}
# randomly rotate by 90 deg.
if random.randint(0, 1) == 0:
pos = {v: (y, 1 - x) for v, (x, y) in pos.items()}
# randomly flip x position
if random.randint(0, 1) == 0:
pos = {v: (1 - x, y) for v, (x, y) in pos.items()}
# choose a rectangle pattern
(map_width, map_height) = rect_pattern[random.randint(0, len(rect_pattern) - 1)]
# calculate actual coordinates
pos = {v: ((map_width - 1) * x, (map_height - 1) * y) for v, (x, y) in pos.items()}
# try to rounding the conrdinates
best_mapping_lest = len(pos)
for i in range(self.__iterations):
mapping = {v: self.__coord_rouding((x, y)) for v, (x, y) in pos.items()}
# check duplication
duplicated_node_num = len(list(mapping.values())) - len(set(mapping.values()))
if duplicated_node_num == 0:
# check dependency
# if self.__if_keep_dependency(dag, mapping):
# break
break
elif duplicated_node_num < best_mapping_lest:
best_mapping = mapping
best_mapping_lest = duplicated_node_num
else:
# fail to rouding
# get duplicated nodes
duplicated_nodes = {coord: [v for v in best_mapping.keys() if best_mapping[v] == coord] \
for coord in set(best_mapping.values()) \
if list(best_mapping.values()).count(coord) > 1}
# fix one of nodes which are mapped to same coord
for coord in duplicated_nodes:
duplicated_nodes[coord].pop(\
random.randint(0, len(duplicated_nodes[coord]) - 1))
# sort in order of lest node count
duplicated_nodes = dict(sorted(duplicated_nodes.items(), key=lambda x: - len(x[1])))
# get free coordinates
free_coords = [(x, y) for x in range(map_width) for y in range(map_height)\
if not (x, y) in best_mapping.values()]
for coord, nodes in duplicated_nodes.items():
for v in nodes:
dists = [math.sqrt((x - coord[0]) ** 2 + (y - coord[1]) ** 2) \
for (x, y) in free_coords]
nearest_pos = free_coords[dists.index(min(dists))]
free_coords.remove(nearest_pos)
best_mapping[v] = nearest_pos
return best_mapping
return mapping
@staticmethod
def make_random_mappings(dag, width, height, size, sort_prob = 0.5):
""" Generate random mappings
Args:
dag (networkx DiGraph): data-flow-graph
width (int): PE array width
height (int): PE array height
size (int): The number of mappings to be generated
Option:
sort_prob (float): topological sort probability.
Returns:
list: generated mappings
"""
# validate input dag
if nx.is_directed_acyclic_graph(dag) == False:
raise ValueError("Input data-flow-graph is not DAG")
# check dag size
node_num = len(dag.nodes())
if node_num > width * height:
return None
# enumerate possible rectangles
rect_pattern = [(w, h) for w in range(1, width + 1) for h in range(1, height + 1) if w * h >= node_num]
rtn_list = []
for i in range(size):
if random.random() < sort_prob:
topological_sort_enable = True
else:
topological_sort_enable = False
(map_width, map_height) = rect_pattern[random.randint(0, len(rect_pattern) - 1)]
positions = random.sample([(x, y) for x in range(map_width) for y in range(map_height)], node_num)
if topological_sort_enable:
if random.randint(0, 1) == 0:
origin = (0, 0)
else:
origin = (map_width - 1, 0)
positions = sorted(positions, key=lambda x: \
(x[0] - origin[0])**2 + (x[1] - origin[1]) ** 2)
rtn_list.append({k: v for k, v in zip(list(nx.topological_sort(dag)), positions)})
else:
rtn_list.append({k: v for k, v in zip(dag.nodes(), positions)})
return rtn_list
@staticmethod
def __if_keep_dependency(dag, mapping):
"""Check dependency between operations.
Args:
dag (networkx digraph): data-flow-graph to be mapped
mapping (dict): operation mapping
keys: operation labels
values: PE coordinates where the nodes are mapped
"""
valid = True
for u, v in dag.edges():
if mapping[u][1] > mapping[v][1]:
valid = False
break
return valid
def __coord_rouding(self, coord):
""" Round a float value coordinate to a int value coordinate.
Args:
coord: a list-like coordinate
Return:
a tuple: rounded coordinate
"""
if self.__randomness == "Full":
# Either ceil or floor is used randomly
x_ceil = random.randint(0, 1) == 0
y_ceil = random.randint(0, 1) == 0
elif self.__randomness == "Partial":
# extract after the decimal points
x_dec = coord[0] - int(coord[0])
y_dec = coord[0] - int(coord[0])
# decide ceil or floor depending on the decimal
x_ceil = random.random() < x_dec
y_ceil = random.random() < y_dec
if x_ceil and y_ceil:
return (math.ceil(coord[0]), math.ceil(coord[1]))
elif x_ceil and not y_ceil:
return (math.ceil(coord[0]), math.floor(coord[1]))
elif not x_ceil and y_ceil:
return (math.floor(coord[0]), math.ceil(coord[1]))
else:
return (math.floor(coord[0]), math.floor(coord[1]))
| 2.796875 | 3 |
openslides_voting/urls.py | jwinzer/openslides-voting | 4 | 12773075 | from django.conf.urls import url
from . import views
from .votecollector import urls
urlpatterns = [
url(r'^voting/attendance/shares/$',
views.AttendanceView.as_view(),
name='voting_attendance'),
] + urls.urlpatterns
| 1.578125 | 2 |
var/spack/repos/builtin/packages/r-mlinterfaces/package.py | whitfin/spack | 3 | 12773076 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMlinterfaces(RPackage):
"""This package provides uniform interfaces to machine learning
code for data in R and Bioconductor containers."""
homepage = "https://www.bioconductor.org/packages/MLInterfaces/"
git = "https://git.bioconductor.org/packages/MLInterfaces.git"
version('1.56.0', commit='<KEY>')
depends_on('r@3.4.0:3.4.9', when='@1.56.0')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-gdata', type=('build', 'run'))
depends_on('r-pls', type=('build', 'run'))
depends_on('r-sfsmisc', type=('build', 'run'))
depends_on('r-rda', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-fpc', type=('build', 'run'))
depends_on('r-ggvis', type=('build', 'run'))
depends_on('r-shiny', type=('build', 'run'))
depends_on('r-gbm', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-hwriter', type=('build', 'run'))
depends_on('r-threejs', type=('build', 'run'))
depends_on('r-mlbench', type=('build', 'run'))
| 1.609375 | 2 |
openid/test/test_pape_draft5.py | pbx/python3-openid | 39 | 12773077 | <filename>openid/test/test_pape_draft5.py
from openid.extensions.draft import pape5 as pape
from openid.message import *
from openid.server import server
import warnings
warnings.filterwarnings(
'ignore', module=__name__, message='"none" used as a policy URI')
import unittest
class PapeRequestTestCase(unittest.TestCase):
def setUp(self):
self.req = pape.Request()
def test_construct(self):
self.assertEqual([], self.req.preferred_auth_policies)
self.assertEqual(None, self.req.max_auth_age)
self.assertEqual('pape', self.req.ns_alias)
self.assertFalse(self.req.preferred_auth_level_types)
bogus_levels = ['http://janrain.com/our_levels']
req2 = pape.Request([pape.AUTH_MULTI_FACTOR], 1000, bogus_levels)
self.assertEqual([pape.AUTH_MULTI_FACTOR],
req2.preferred_auth_policies)
self.assertEqual(1000, req2.max_auth_age)
self.assertEqual(bogus_levels, req2.preferred_auth_level_types)
def test_addAuthLevel(self):
self.req.addAuthLevel('http://example.com/', 'example')
self.assertEqual(['http://example.com/'],
self.req.preferred_auth_level_types)
self.assertEqual('http://example.com/',
self.req.auth_level_aliases['example'])
self.req.addAuthLevel('http://example.com/1', 'example1')
self.assertEqual(['http://example.com/', 'http://example.com/1'],
self.req.preferred_auth_level_types)
self.req.addAuthLevel('http://example.com/', 'exmpl')
self.assertEqual(['http://example.com/', 'http://example.com/1'],
self.req.preferred_auth_level_types)
self.req.addAuthLevel('http://example.com/', 'example')
self.assertEqual(['http://example.com/', 'http://example.com/1'],
self.req.preferred_auth_level_types)
self.assertRaises(KeyError, self.req.addAuthLevel,
'http://example.com/2', 'example')
# alias is None; we expect a new one to be generated.
uri = 'http://another.example.com/'
self.req.addAuthLevel(uri)
self.assertTrue(uri in list(self.req.auth_level_aliases.values()))
# We don't expect a new alias to be generated if one already
# exists.
before_aliases = list(self.req.auth_level_aliases.keys())
self.req.addAuthLevel(uri)
after_aliases = list(self.req.auth_level_aliases.keys())
self.assertEqual(before_aliases, after_aliases)
def test_add_policy_uri(self):
self.assertEqual([], self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR],
self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR],
self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_PHISHING_RESISTANT)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.req.preferred_auth_policies)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.req.preferred_auth_policies)
def test_getExtensionArgs(self):
self.assertEqual({
'preferred_auth_policies': ''
}, self.req.getExtensionArgs())
self.req.addPolicyURI('http://uri')
self.assertEqual({
'preferred_auth_policies': 'http://uri'
}, self.req.getExtensionArgs())
self.req.addPolicyURI('http://zig')
self.assertEqual({
'preferred_auth_policies': 'http://uri http://zig'
}, self.req.getExtensionArgs())
self.req.max_auth_age = 789
self.assertEqual({
'preferred_auth_policies': 'http://uri http://zig',
'max_auth_age': '789'
}, self.req.getExtensionArgs())
def test_getExtensionArgsWithAuthLevels(self):
uri = 'http://example.com/auth_level'
alias = 'my_level'
self.req.addAuthLevel(uri, alias)
uri2 = 'http://example.com/auth_level_2'
alias2 = 'my_level_2'
self.req.addAuthLevel(uri2, alias2)
expected_args = {
('auth_level.ns.%s' % alias): uri,
('auth_level.ns.%s' % alias2): uri2,
'preferred_auth_level_types': ' '.join([alias, alias2]),
'preferred_auth_policies': '',
}
self.assertEqual(expected_args, self.req.getExtensionArgs())
def test_parseExtensionArgsWithAuthLevels(self):
uri = 'http://example.com/auth_level'
alias = 'my_level'
uri2 = 'http://example.com/auth_level_2'
alias2 = 'my_level_2'
request_args = {
('auth_level.ns.%s' % alias): uri,
('auth_level.ns.%s' % alias2): uri2,
'preferred_auth_level_types': ' '.join([alias, alias2]),
'preferred_auth_policies': '',
}
# Check request object state
self.req.parseExtensionArgs(
request_args, is_openid1=False, strict=False)
expected_auth_levels = [uri, uri2]
self.assertEqual(expected_auth_levels,
self.req.preferred_auth_level_types)
self.assertEqual(uri, self.req.auth_level_aliases[alias])
self.assertEqual(uri2, self.req.auth_level_aliases[alias2])
def test_parseExtensionArgsWithAuthLevels_openID1(self):
request_args = {
'preferred_auth_level_types': 'nist jisa',
}
expected_auth_levels = [pape.LEVELS_NIST, pape.LEVELS_JISA]
self.req.parseExtensionArgs(request_args, is_openid1=True)
self.assertEqual(expected_auth_levels,
self.req.preferred_auth_level_types)
self.req = pape.Request()
self.req.parseExtensionArgs(request_args, is_openid1=False)
self.assertEqual([], self.req.preferred_auth_level_types)
self.req = pape.Request()
self.assertRaises(
ValueError,
self.req.parseExtensionArgs,
request_args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_ignoreBadAuthLevels(self):
request_args = {'preferred_auth_level_types': 'monkeys'}
self.req.parseExtensionArgs(request_args, False)
self.assertEqual([], self.req.preferred_auth_level_types)
def test_parseExtensionArgs_strictBadAuthLevels(self):
request_args = {'preferred_auth_level_types': 'monkeys'}
self.assertRaises(
ValueError,
self.req.parseExtensionArgs,
request_args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs(self):
args = {
'preferred_auth_policies': 'http://foo http://bar',
'max_auth_age': '9'
}
self.req.parseExtensionArgs(args, False)
self.assertEqual(9, self.req.max_auth_age)
self.assertEqual(['http://foo', 'http://bar'],
self.req.preferred_auth_policies)
self.assertEqual([], self.req.preferred_auth_level_types)
def test_parseExtensionArgs_strict_bad_auth_age(self):
args = {'max_auth_age': 'not an int'}
self.assertRaises(
ValueError,
self.req.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_empty(self):
self.req.parseExtensionArgs({}, False)
self.assertEqual(None, self.req.max_auth_age)
self.assertEqual([], self.req.preferred_auth_policies)
self.assertEqual([], self.req.preferred_auth_level_types)
def test_fromOpenIDRequest(self):
policy_uris = [pape.AUTH_MULTI_FACTOR, pape.AUTH_PHISHING_RESISTANT]
openid_req_msg = Message.fromOpenIDArgs({
'mode':
'checkid_setup',
'ns':
OPENID2_NS,
'ns.pape':
pape.ns_uri,
'pape.preferred_auth_policies':
' '.join(policy_uris),
'pape.max_auth_age':
'5476'
})
oid_req = server.OpenIDRequest()
oid_req.message = openid_req_msg
req = pape.Request.fromOpenIDRequest(oid_req)
self.assertEqual(policy_uris, req.preferred_auth_policies)
self.assertEqual(5476, req.max_auth_age)
def test_fromOpenIDRequest_no_pape(self):
message = Message()
openid_req = server.OpenIDRequest()
openid_req.message = message
pape_req = pape.Request.fromOpenIDRequest(openid_req)
assert (pape_req is None)
def test_preferred_types(self):
self.req.addPolicyURI(pape.AUTH_PHISHING_RESISTANT)
self.req.addPolicyURI(pape.AUTH_MULTI_FACTOR)
pt = self.req.preferredTypes(
[pape.AUTH_MULTI_FACTOR, pape.AUTH_MULTI_FACTOR_PHYSICAL])
self.assertEqual([pape.AUTH_MULTI_FACTOR], pt)
class DummySuccessResponse:
def __init__(self, message, signed_stuff):
self.message = message
self.signed_stuff = signed_stuff
def isOpenID1(self):
return False
def getSignedNS(self, ns_uri):
return self.signed_stuff
class PapeResponseTestCase(unittest.TestCase):
def setUp(self):
self.resp = pape.Response()
def test_construct(self):
self.assertEqual([], self.resp.auth_policies)
self.assertEqual(None, self.resp.auth_time)
self.assertEqual('pape', self.resp.ns_alias)
self.assertEqual(None, self.resp.nist_auth_level)
req2 = pape.Response([pape.AUTH_MULTI_FACTOR], "2004-12-11T10:30:44Z",
{pape.LEVELS_NIST: 3})
self.assertEqual([pape.AUTH_MULTI_FACTOR], req2.auth_policies)
self.assertEqual("2004-12-11T10:30:44Z", req2.auth_time)
self.assertEqual(3, req2.nist_auth_level)
def test_add_policy_uri(self):
self.assertEqual([], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual([pape.AUTH_MULTI_FACTOR], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_PHISHING_RESISTANT)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.resp.auth_policies)
self.resp.addPolicyURI(pape.AUTH_MULTI_FACTOR)
self.assertEqual(
[pape.AUTH_MULTI_FACTOR,
pape.AUTH_PHISHING_RESISTANT], self.resp.auth_policies)
self.assertRaises(RuntimeError, self.resp.addPolicyURI, pape.AUTH_NONE)
def test_getExtensionArgs(self):
self.assertEqual({
'auth_policies': pape.AUTH_NONE
}, self.resp.getExtensionArgs())
self.resp.addPolicyURI('http://uri')
self.assertEqual({
'auth_policies': 'http://uri'
}, self.resp.getExtensionArgs())
self.resp.addPolicyURI('http://zig')
self.assertEqual({
'auth_policies': 'http://uri http://zig'
}, self.resp.getExtensionArgs())
self.resp.auth_time = "1776-07-04T14:43:12Z"
self.assertEqual({
'auth_policies': 'http://uri http://zig',
'auth_time': "1776-07-04T14:43:12Z"
}, self.resp.getExtensionArgs())
self.resp.setAuthLevel(pape.LEVELS_NIST, '3')
self.assertEqual({
'auth_policies': 'http://uri http://zig',
'auth_time': "1776-07-04T14:43:12Z",
'auth_level.nist': '3',
'auth_level.ns.nist': pape.LEVELS_NIST
}, self.resp.getExtensionArgs())
def test_getExtensionArgs_error_auth_age(self):
self.resp.auth_time = "long ago"
self.assertRaises(ValueError, self.resp.getExtensionArgs)
def test_parseExtensionArgs(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': '1970-01-01T00:00:00Z'
}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual('1970-01-01T00:00:00Z', self.resp.auth_time)
self.assertEqual(['http://foo', 'http://bar'], self.resp.auth_policies)
def test_parseExtensionArgs_valid_none(self):
args = {'auth_policies': pape.AUTH_NONE}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_old_none(self):
args = {'auth_policies': 'none'}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_old_none_strict(self):
args = {'auth_policies': 'none'}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_empty(self):
self.resp.parseExtensionArgs({}, is_openid1=False)
self.assertEqual(None, self.resp.auth_time)
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_empty_strict(self):
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs, {},
is_openid1=False,
strict=True)
def test_parseExtensionArgs_ignore_superfluous_none(self):
policies = [pape.AUTH_NONE, pape.AUTH_MULTI_FACTOR_PHYSICAL]
args = {
'auth_policies': ' '.join(policies),
}
self.resp.parseExtensionArgs(args, is_openid1=False, strict=False)
self.assertEqual([pape.AUTH_MULTI_FACTOR_PHYSICAL],
self.resp.auth_policies)
def test_parseExtensionArgs_none_strict(self):
policies = [pape.AUTH_NONE, pape.AUTH_MULTI_FACTOR_PHYSICAL]
args = {
'auth_policies': ' '.join(policies),
}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_strict_bogus1(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': 'yesterday'
}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_openid1_strict(self):
args = {
'auth_level.nist': '0',
'auth_policies': pape.AUTH_NONE,
}
self.resp.parseExtensionArgs(args, strict=True, is_openid1=True)
self.assertEqual('0', self.resp.getAuthLevel(pape.LEVELS_NIST))
self.assertEqual([], self.resp.auth_policies)
def test_parseExtensionArgs_strict_no_namespace_decl_openid2(self):
# Test the case where the namespace is not declared for an
# auth level.
args = {
'auth_policies': pape.AUTH_NONE,
'auth_level.nist': '0',
}
self.assertRaises(
ValueError,
self.resp.parseExtensionArgs,
args,
is_openid1=False,
strict=True)
def test_parseExtensionArgs_nostrict_no_namespace_decl_openid2(self):
# Test the case where the namespace is not declared for an
# auth level.
args = {
'auth_policies': pape.AUTH_NONE,
'auth_level.nist': '0',
}
self.resp.parseExtensionArgs(args, is_openid1=False, strict=False)
# There is no namespace declaration for this auth level.
self.assertRaises(KeyError, self.resp.getAuthLevel, pape.LEVELS_NIST)
def test_parseExtensionArgs_strict_good(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': '1970-01-01T00:00:00Z',
'auth_level.nist': '0',
'auth_level.ns.nist': pape.LEVELS_NIST
}
self.resp.parseExtensionArgs(args, is_openid1=False, strict=True)
self.assertEqual(['http://foo', 'http://bar'], self.resp.auth_policies)
self.assertEqual('1970-01-01T00:00:00Z', self.resp.auth_time)
self.assertEqual(0, self.resp.nist_auth_level)
def test_parseExtensionArgs_nostrict_bogus(self):
args = {
'auth_policies': 'http://foo http://bar',
'auth_time': 'when the cows come home',
'nist_auth_level': 'some'
}
self.resp.parseExtensionArgs(args, is_openid1=False)
self.assertEqual(['http://foo', 'http://bar'], self.resp.auth_policies)
self.assertEqual(None, self.resp.auth_time)
self.assertEqual(None, self.resp.nist_auth_level)
def test_fromSuccessResponse(self):
policy_uris = [pape.AUTH_MULTI_FACTOR, pape.AUTH_PHISHING_RESISTANT]
openid_req_msg = Message.fromOpenIDArgs({
'mode':
'id_res',
'ns':
OPENID2_NS,
'ns.pape':
pape.ns_uri,
'pape.auth_policies':
' '.join(policy_uris),
'pape.auth_time':
'1970-01-01T00:00:00Z'
})
signed_stuff = {
'auth_policies': ' '.join(policy_uris),
'auth_time': '1970-01-01T00:00:00Z'
}
oid_req = DummySuccessResponse(openid_req_msg, signed_stuff)
req = pape.Response.fromSuccessResponse(oid_req)
self.assertEqual(policy_uris, req.auth_policies)
self.assertEqual('1970-01-01T00:00:00Z', req.auth_time)
def test_fromSuccessResponseNoSignedArgs(self):
policy_uris = [pape.AUTH_MULTI_FACTOR, pape.AUTH_PHISHING_RESISTANT]
openid_req_msg = Message.fromOpenIDArgs({
'mode':
'id_res',
'ns':
OPENID2_NS,
'ns.pape':
pape.ns_uri,
'pape.auth_policies':
' '.join(policy_uris),
'pape.auth_time':
'1970-01-01T00:00:00Z'
})
signed_stuff = {}
class NoSigningDummyResponse(DummySuccessResponse):
def getSignedNS(self, ns_uri):
return None
oid_req = NoSigningDummyResponse(openid_req_msg, signed_stuff)
resp = pape.Response.fromSuccessResponse(oid_req)
self.assertTrue(resp is None)
if __name__ == '__main__':
unittest.main()
| 2.15625 | 2 |
src/dynamic_sites/urls.py | oruehenbeck/django_dynamic_sites | 0 | 12773078 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import RedirectView, ContentView, ContentEditView, ContentDeleteView, ContentDownloadView, ContentFormView
urlpatterns = [
# toplayer
url(r'^$', RedirectView.as_view(), name='redirect_landing'),
url(r'^(?P<slug>[-\w]*form[-\w]*)/$', ContentFormView.as_view(), name='form_view'), # allowing first layer form views
url(r'^(?P<slug>[-\w]+)/$', ContentView.as_view(), name='content_view'), # always have the contentview last because its catchall
# every other layer
url(r'^(?P<rest_url>([-\w]+\/)*)(?P<slug>editor)/(?P<id>\d+)/$', ContentEditView.as_view(), name='editor_view'),
url(r'^(?P<rest_url>([-\w]+\/)*)(?P<slug>delete)/(?P<id>\d+)/$', ContentDeleteView.as_view(), name='delete_view'),
url(r'^(?P<rest_url>([-\w]+\/)*)(?P<slug>download)/(?P<id>\d+)/$', ContentDownloadView.as_view(), name='download_view'),
url(r'^(?P<rest_url>([-\w]+\/)*)(?P<slug>[-\w]*form[-\w]*)/$', ContentFormView.as_view(), name='form_view'),
url(r'^(?P<rest_url>([-\w]+\/)*)(?P<slug>[-\w]+)/$', ContentView.as_view(), name='content_view'),
] | 1.992188 | 2 |
learningPygame/Derek/00-MovingSmile/04-Drawing.py | Rosebotics/catapult2019 | 0 | 12773079 | <reponame>Rosebotics/catapult2019<gh_stars>0
# TODO: Copy all of your 03-Colors.py program and put it below this comment.
# TODO One way to do so is:
# TODO 1. Inside 03-Colors.py, do:
# TODO -- Control-A (to SELECT the entire contents of the file, then
# TODO -- Control-C (to COPY that entire selection)
# TODO 2. Inside this file:
# TODO -- Click below this comment, then
# TODO -- Control-V (to PASTE the copied code into this file.
# TODO: In this module we'll start drawing a simple smiley face
# Yellow circle for the head
# Two black circle eyes
# Red rectangle (rect) mouth
# Red circle nose.
import pygame
import math
pygame.init()
running = True
frame = pygame.display.set_mode((640, 480))
red_value = 255
circle_radius = 25
screen_red = 0
screen_green = 0
screen_blue = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
frame.fill((screen_red, screen_green, screen_blue))
screen_red += 1
if screen_red > 255:
screen_red = 0
screen_green += 1
if screen_green > 255:
screen_green = 0
screen_blue += 1
if screen_blue > 255:
screen_blue = 0
red_value -= 1
if red_value < 0:
red_value = 255
circle_radius -= 1
if circle_radius < 0:
circle_radius = 25
face = pygame.draw.circle(frame, (255, 255, 255), (320, 240), 140)
left_eye = pygame.draw.circle(frame, (red_value, 0, 0), (260, 200), circle_radius)
right_eye = pygame.draw.circle(frame, (red_value, 0, 0), (380, 200), circle_radius)
nose = pygame.draw.circle(frame, (255, 0, 255), (320, 240), circle_radius)
mouth = pygame.draw.arc(frame, (red_value, 0, 0), (270, 260, 100, 100), math.pi, 2*math.pi, 10)
pygame.display.update() | 3.296875 | 3 |
Ch12/bst_height.py | anandg95/CLRS-solutions | 3 | 12773080 | <gh_stars>1-10
from collections import deque
def height_recursive(bst, node):
if node is None or (node.left == node.right == None):
return 0
lh = height_recursive(bst, node.left)
rh = height_recursive(bst, node.right)
return 1 + max(lh, rh)
def height_iterative(bst):
if bst.root is None:
return 0
current = bst.root
h = -1
q = deque()
q.appendleft(current)
while len(q) > 0:
size = len(q)
while size > 0:
current = q.pop()
if current.left is not None:
q.appendleft(current.left)
if current.right is not None:
q.appendleft(current.right)
size -= 1
# this while block will clear out a level from the queue, and add all elements in the next level
h += 1
return h
| 3.609375 | 4 |
ms2ldaviz/ms2ldaviz/settings_docker.py | sdrogers/ms2ldaviz | 6 | 12773081 | from .settings import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^(2&40trp_*ei%$_*p-k598#hu3-w(@9%&&dr�##dpag=c%+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
DATABASES['default']['HOST'] = 'db'
CELERY_BROKER_URL = "redis://redis:6379/0"
CHEMSPIDER_APIKEY='<KEY>' | 1.046875 | 1 |
Vault7/Lost-in-Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/lp/gui/__init__.py | dendisuhubdy/grokmachine | 46 | 12773082 |
# package dsz.lp.gui
import dsz
import dsz.lp.alias
import dsz.lp.cmdline
import dsz.path
import dsz.version
import os
import re
import sys
import xml.dom.minidom
| 1.039063 | 1 |
tests/test_scripts/test_gen_jsonld_context.py | krishna-saravan/linkml | 83 | 12773083 | <gh_stars>10-100
import unittest
import click
from linkml.generators import jsonldcontextgen
from tests.test_scripts.environment import env
from tests.utils.clicktestcase import ClickTestCase
from tests.utils.filters import ldcontext_metadata_filter
class GenContextTestCase(ClickTestCase):
testdir = "gencontext"
click_ep = jsonldcontextgen.cli
prog_name = "gen-jsonld-context"
env = env
def test_help(self):
self.do_test("--help", 'help')
def test_meta(self):
self.maxDiff = None
self.do_test([], 'meta.context.jsonld', filtr=ldcontext_metadata_filter)
self.do_test('--metauris', 'meta_contextn.jsonld', filtr=ldcontext_metadata_filter)
self.do_test('-f xsv', 'meta_error', expected_error=click.exceptions.BadParameter)
self.do_test('--niggles', 'meta2_error', expected_error=click.exceptions.NoSuchOption)
def test_prefix_options(self):
""" Test various prefix emission options"""
# prefixes only, no-merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--no-mergeimports', '--no-model'],
'simple_uri_test.no_merge.prefixes_only.context.jsonld', add_yaml=False)
# flat prefixes only, no-merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--no-mergeimports', '--no-model', '--flatprefixes'],
'simple_uri_test.no_merge.flatprefixes_only.context.jsonld', add_yaml=False)
# model only, no-merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--no-mergeimports', '--no-prefixes'],
'simple_uri_test.no_merge.model_only.context.jsonld', add_yaml=False)
# both, no-merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--no-mergeimports', '--model', '--prefixes'],
'simple_uri_test.no_merge.context.jsonld', add_yaml=False)
# prefixes only, merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--mergeimports', '--no-model'],
'simple_uri_test.merge.prefixes_only.context.jsonld', add_yaml=False)
# flat prefixes only, merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--mergeimports', '--no-model', '--flatprefixes'],
'simple_uri_test.merge.flatprefixes_only.context.jsonld', add_yaml=False)
# model only, merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--mergeimports', '--no-prefixes'],
'simple_uri_test.merge.model_only.context.jsonld', add_yaml=False)
# both, merge
self.do_test([self.env.input_path('simple_uri_test.yaml'), '--no-metadata', '--mergeimports', '--model', '--prefixes'],
'simple_uri_test.merge.context.jsonld', add_yaml=False)
def test_slot_class_uri(self):
# Note: two warnings are expected below:
# WARNING:ContextGenerator:No namespace defined for URI: http://example.org/slot/su
# WARNING:ContextGenerator:No namespace defined for URI: http://example.org/class/cu
self.do_test(env.input_path('uri_tests.yaml'), 'uri_tests.jsonld', filtr=ldcontext_metadata_filter,
add_yaml=False)
if __name__ == '__main__':
unittest.main()
| 2.03125 | 2 |
src/musescore/__init__.py | ryanrudes/musescore | 0 | 12773084 | <gh_stars>0
from .musescore import *
| 0.894531 | 1 |
pymap3d/azelradec.py | unjambonakap/pymap3d | 0 | 12773085 | <gh_stars>0
"""
Azimuth / elevation <==> Right ascension, declination
"""
from typing import Tuple
from datetime import datetime
from .vallado import azel2radec as vazel2radec, radec2azel as vradec2azel
from .timeconv import str2dt # astropy can't handle xarray times (yet)
try:
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS
except ImportError:
Time = None
def azel2radec(az_deg: float, el_deg: float,
lat_deg: float, lon_deg: float,
time: datetime, usevallado: bool = False) -> Tuple[float, float]:
"""
viewing angle (az, el) to sky coordinates (ra, dec)
Parameters
----------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
"""
if usevallado or Time is None: # non-AstroPy method, less accurate
return vazel2radec(az_deg, el_deg, lat_deg, lon_deg, time)
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
direc = AltAz(location=obs, obstime=Time(str2dt(time)),
az=az_deg * u.deg, alt=el_deg * u.deg)
sky = SkyCoord(direc.transform_to(ICRS()))
return sky.ra.deg, sky.dec.deg
def radec2azel(ra_deg: float, dec_deg: float,
lat_deg: float, lon_deg: float,
time: datetime, usevallado: bool = False) -> Tuple[float, float]:
"""
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
"""
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
obs = EarthLocation(lat=lat_deg * u.deg,
lon=lon_deg * u.deg)
points = SkyCoord(Angle(ra_deg, unit=u.deg),
Angle(dec_deg, unit=u.deg),
equinox='J2000.0')
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree
| 2.796875 | 3 |
sqlce.py | jobead/python-excel | 0 | 12773086 | <gh_stars>0
import os
import time
import datetime
import zipfile
import xml.dom.minidom
def get_files_from_folder(path = os.environ['HOME']):
homedir = os.environ['HOME'] + '/python'
def get_external_data_connections(path = os.environ['HOME'] + '/python/excelfiles', filename = 'external_data_connections'):
print 'STARTING SEARCH FOR EXTERNAL DATA CONNECTIONS'
homedir = os.environ['HOME'] + '/python'
print 'DIRECTORY TO SEARCH'
print '\t' + path
os.chdir(path)
filelist = os.listdir(path)
for files in filelist:
i=0
if filelist[i] == '.DS_Store': # eliminates apple's directory view settings file from list
filelist[i:i+1] = []
i=i+1
#print filelist
print 'FILES TO SEARCH'
for files in filelist:
print '\t' + files
outputfile = open(homedir + '/' + filename, 'w+')
print outputfile
outputfile.write('THESE ARE ALL MY EXCEL FILES\t\t\tRUNTIME: ' + str(datetime.datetime.now()) + '\n\n')
print 'i am about to start writing files'
i=0
for files in filelist:
outputfile.write('-- ' + filelist[i]+'\n\n')
archive = zipfile.ZipFile(path + '/' + filelist[i])
conn = archive.read(name='xl/connections.xml')
# basicxml = xml.dom.minidom.parseString(books)
# xmlelements = basicxml.getElementsByTagName('sheet')
# sheetrowsout = list(len(xmlelements)*' ')
basicxml = xml.dom.minidom.parseString(conn)
# conn = str(basicxml.toprettyxml())
xmlelements = basicxml.getElementsByTagName('connection')
# print 'i have parsed my xml'
connrowsout = list(len(xmlelements)*' ')
# print 'i have setup my output list'
# print xmlelements
j=0
for conns in xmlelements:
# print 'i am about to parse my ' + str(j) + 'th xmlelement'
# print xmlelements[j].attributes['id'].value + '\t\t' + xmlelements[j].attributes['name'].value
conncommand = conns.getElementsByTagName('dbPr')
querytext = '\t\t' + str(conncommand[0].attributes['command'].value)
querytext = str.replace(querytext,'_x000d__x000a_','\n\t\t')
querytext = str.replace(querytext,'_x0009_','\t')
querytext = str.replace(querytext,'>','>')
querytext = str.replace(querytext,'<','<')
# print conncommand
connrowsout[j] = '--' + '\t' + xmlelements[j].attributes['name'].value + '\n' + querytext + '\n\n'
# print 'i have parsed my ' + str(j) + 'th element'
j=j+1
connrowsout.sort()
#print pretty_xml_as_string
# conn = str.replace(conn,'command="','command="\n')
k=0
for outputs in connrowsout:
outputfile.write(connrowsout[k]+'\n')
k=k+1
outputfile.write('\n\n\n')
i=i+1
outputfile.write('\n\n\n')
print 'i have written all my files'
#print os.getcwd()
#archive.printdir()
#print conn1[:100] + '... plus ' + str(len(conn1) - 100) + ' more characters'
os.chdir(homedir)
print 'i have reset the working directory to ' + homedir
#print os.getcwd()
def get_sheet_names(path = os.environ['HOME'] + '/python/excelfiles', filename = 'sheet_names'):
print 'STARTING SEARCH FOR SHEET NAMES'
homedir = os.environ['HOME'] + '/python'
print 'DIRECTORY TO SEARCH'
print '\t' + path
os.chdir(path)
filelist = os.listdir(path)
for files in filelist:
i=0
if filelist[i] == '.DS_Store': # eliminates apple's directory view settings file from list
filelist[i:i+1] = []
i=i+1
#print filelist
print 'FILES TO SEARCH'
for files in filelist:
print '\t' + files
outputfile = open(homedir + '/' + filename, 'w+')
print outputfile
outputfile.write('THESE ARE ALL MY EXCEL FILES\n\n')
print 'i am about to start writing files'
i=0
for files in filelist:
print filelist[i]
outputfile.write(filelist[i]+'\n')
archive = zipfile.ZipFile(path + '/' + filelist[i])
# print archive.infolist()
# print archive.namelist()
books = archive.read(name='xl/workbook.xml')
basicxml = xml.dom.minidom.parseString(books)
xmlelements = basicxml.getElementsByTagName('sheet')
sheetrowsout = list(len(xmlelements)*' ')
j=0
for sheetrows in xmlelements:
# print (try: visible = xmlelements[j].attributes['state'].value except: 'hidden') + '\t' + xmlelements[j].attributes['name'].value
try:
visible = '\t\t' + xmlelements[j].attributes['state'].value
except:
visible = ''
# print '\t\t\t' + 'hidden'
sheetrowsout[j] = visible.strip()[:1] + ('000' + xmlelements[j].attributes['r:id'].value[3:])[-4:] + '\t' + visible + '\t' + xmlelements[j].attributes['name'].value + '\n'
j=j+1
sheetrowsout.sort()
# books = str(basicxml.toprettyxml())
k=0
for outputs in sheetrowsout:
outputfile.write(sheetrowsout[k])
k=k+1
outputfile.write('\n\n')
i=i+1
os.chdir(homedir)
print 'i have reset the working directory to ' + homedir
| 2.9375 | 3 |
dev_global/dev_global/basic.py | FrederichRiver/neutrino2 | 0 | 12773087 | <reponame>FrederichRiver/neutrino2
#!/usr/bin/python38
import atexit
import os
import signal
import sys
__version__ = '1.0.1'
PROG_NAME = 'Neutrino'
def deamon(pid_file: str, log_file: str, prog_name: str):
"""
pid_file: full path of pid file, it is suggested in /tmp
log_file: full path of log file.
"""
# This is a daemon programe, which will start after
# system booted.
#
# It is defined to start by rc.local.
#
# fork a sub process from father
if os.path.exists(pid_file):
raise RuntimeError(f"{prog_name} is already running")
# the first fork.
if os.fork() > 0:
raise SystemExit(0)
os.chdir('/')
os.umask(0)
os.setsid()
# Second fork
if os.fork() > 0:
raise SystemExit(0)
# Flush I/O buffers
sys.stdout.flush()
sys.stderr.flush()
# with open(log_file, 'rb', 0) as read_null:
# os.dup2(read_null.fileno(), sys.stdin.fileno())
with open(log_file, 'a') as write_null:
# Redirect to 1 which means stdout
os.dup2(write_null.fileno(), 1)
with open(log_file, 'a') as error_null:
# Redirect to 2 which means stderr
os.dup2(error_null.fileno(), 2)
# write parent process pid into pid file.
if pid_file:
with open(pid_file, 'w+') as f:
f.write(str(os.getpid()))
atexit.register(os.remove, pid_file)
def sigterm_handler(signo, frame):
raise SystemExit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
| 2.578125 | 3 |
py/codeforces/872A.py | shhuan/algorithms | 0 | 12773088 | <reponame>shhuan/algorithms<filename>py/codeforces/872A.py<gh_stars>0
# -*- coding: utf-8 -*-
import math
import collections
import bisect
import heapq
import time
import random
import itertools
"""
created by shhuan at 2017/10/20 08:16
"""
N, M = map(int, input().split())
A = [int(x) for x in input().split()]
B = [int(x) for x in input().split()]
a = min(A)
b = min(B)
c = set(A) & set(B)
if c:
print(min(c))
elif a > b:
print(b*10+a)
else:
print(a*10+b)
| 3.203125 | 3 |
techStore(final)/img/image_data/admin.py | X3eRo0/E-Commerce-Website | 0 | 12773089 | <gh_stars>0
from django.contrib import admin
# Register your models here.
from .models import Product
# Register your models here.
admin.site.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display=['__str__','slug']
class Meta:
model=Product
| 1.8125 | 2 |
2021/09/solution_two.py | adtok/advent-of-code | 0 | 12773090 | <reponame>adtok/advent-of-code
"""
Advent of Code 2021: Day 09 Part 1
tldr: graph traversal to find basins
"""
from functools import reduce
from typing import List, Set, Tuple
Point = Tuple[int, int]
Map = List[List[int]]
def read_input(input_file: str) -> Map:
"""Reads the input file into a list of lists of integers"""
with open(input_file, "r") as file:
result = [list(map(int, line.strip())) for line in file]
return result
def get_neighbors(row: int, col: int, height: int, width: int) -> List[Point]:
"""Get the neighbors for a coordinate"""
neighbors: List[int] = []
if row != 0:
neighbors.append((row - 1, col))
if col != 0:
neighbors.append((row, col - 1))
if row != height - 1:
neighbors.append((row + 1, col))
if col != width - 1:
neighbors.append((row, col + 1))
return neighbors
def find_minima(heatmap: Map, height: int, width: int) -> List[Point]:
"""Returns a list of coordinates with the local minima for a heatmap"""
minima: List[int] = []
for y in range(height):
for x in range(width):
reading = heatmap[y][x]
neighbors = get_neighbors(y, x, height, width)
if all(heatmap[row][col] > reading for row, col in neighbors):
minima.append((y, x))
return minima
def get_basin(
heatmap: Map, height: int, width: int, initial_point: Point
) -> List[Point]:
points: List[Point] = [initial_point]
visited: Set[Point] = set()
basin: List[Tuple[int]] = [initial_point]
while points:
point_p = y_p, x_p = points.pop()
for point_n in get_neighbors(y_p, x_p, height, width):
if point_n in visited:
continue
y_n, x_n = point_n
reading_n = heatmap[y_n][x_n]
if reading_n != 9 and point_n not in basin:
points = [point_n] + points
basin.append(point_n)
visited.add(point_p)
return basin
def solve(input_file: str) -> int:
"""Solves the puzzle for an input"""
heatmap = read_input(input_file)
height = len(heatmap)
assert height > 0
width = len(heatmap[0])
assert width > 0
minima = find_minima(heatmap, height, width)
basins = [get_basin(heatmap, height, width, point) for point in minima]
biggest_basins = sorted(basins, key=len, reverse=True)[:3]
sizes = [len(basin) for basin in biggest_basins]
result = reduce(lambda s1, s2: s1 * s2, sizes, 1)
print(f"The result for {input_file!r} is {result}.")
return result
def main():
test_result = solve("input.test")
test_answer = 1134
assert test_result == test_answer
solve("input.solution")
if __name__ == "__main__":
main()
| 3.90625 | 4 |
src/coin_price/admin.py | HaotingS/Python-Django-CRUD-project | 0 | 12773091 | from django.contrib import admin
# Register your models here.
from .models import Coin
admin.site.register(Coin) | 1.382813 | 1 |
collect.py | cuppett/collect_aws_logs | 2 | 12773092 | <gh_stars>1-10
# Copyright (c) 2015 <NAME> <<EMAIL>>
# All rights reserved
from boto import logs
from boto.logs.exceptions import LimitExceededException
import argparse
import tempfile
import time
import os
import shutil
parser = argparse.ArgumentParser(description='An AWS log file extractor', epilog=' - For Steven')
parser.add_argument('--log-prefix', '-l', help='The prefix of the log groups to include in the retrieval.', default=None)
parser.add_argument('--minutes', '-m', help='Amount of minutes to include in output (default is 20).', type=int, default=20)
parser.add_argument('--folder', '-f', help='The parent folder to use for all retrieved output (default is '+tempfile.gettempdir()+').', default=tempfile.gettempdir())
args = parser.parse_args()
if not os.path.exists(args.folder):
print('Not a valid path: ' + args.folder)
exit()
# Important variables which control log group collection
cwlogs = logs.connect_to_region('us-east-1')
# Computing how much log data to bring back.
startTime = int(time.time() * 1000) - (abs(args.minutes) * 60 * 1000)
# Retrieving initial list of groups to use.
group_response = cwlogs.describe_log_groups(args.log_prefix, None, None)
has_more_groups = True
def getLogEvents(logGroup, logStream, startTime, nextForwardToken = None):
"""
Handles fetching the next batch of log events. Will account for rate limits
and continue trying until it can get through.
"""
log_events = None
while not log_events:
try:
log_events = cwlogs.get_log_events(logGroup, logStream, startTime, None, nextForwardToken, None, True)
except LimitExceededException:
log_events = None
time.sleep(1)
return log_events
# Loop over initial group response
while has_more_groups:
has_more_groups = False
# Loop over individual groups
for logGroup in group_response['logGroups']:
# Ascertain directory to create log files in.
directory = args.folder + os.path.sep + logGroup['logGroupName']
if os.path.exists(directory):
print('Removing existing contents in ' + directory)
shutil.rmtree(directory)
os.mkdir(directory)
# Get the individual streams.
streams_response = cwlogs.describe_log_streams(logGroup['logGroupName'], None, None)
has_more_streams = True
while has_more_streams:
has_more_streams = False
# Work our way through the streams
for stream in streams_response['logStreams']:
# Create the file.
file = directory + os.path.sep + stream['logStreamName'] + '.log'
log_file = None
log_events = None
more_log_events = stream['lastEventTimestamp'] > startTime
if more_log_events:
log_events = getLogEvents(logGroup['logGroupName'], stream['logStreamName'], startTime)
print('Creating log file ' + file)
log_file = open(file, 'w')
while more_log_events:
for log_line in log_events['events']:
try:
# If we have seen the last message, there's no more to process
if stream['lastEventTimestamp'] <= log_line['timestamp']:
more_log_events = False
log_file.write(log_line['message'])
log_file.write("\n")
except:
log_file.write('Bad line read from logs.')
if log_events['nextForwardToken'][2:] == log_events['nextBackwardToken'][2:]:
more_log_events = False
if more_log_events:
log_events = getLogEvents(logGroup['logGroupName'], stream['logStreamName'], startTime, log_events['nextBackwardToken'])
if log_file:
log_file.close()
if 'nextToken' in streams_response:
streams_response = cwlogs.describe_log_streams(logGroup['logGroupName'], streams_response['nextToken'], None)
has_more_streams = True
if 'nextToken' in group_response:
group_response = cwlogs.describe_log_groups(args.log_prefix, group_response['nextToken'], None)
has_more_groups = True
| 2.234375 | 2 |
mabed/functions.py | FirasOdeh/MABED | 1 | 12773093 | # coding: utf-8
import timeit
import json
import time
from flask import jsonify
from mabed.es_corpus import Corpus
from mabed.mabed import MABED
from mabed.es_connector import Es_connector
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Interface Functions
class Functions:
def __init__(self):
self.sessions_index = 'mabed_sessions'
self.sessions_doc_type = 'session'
# print("Functions init")
# ==================================================================
# Event Detection
# ==================================================================
def detect_events(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, cluster=2):
sw = 'stopwords/twitter_all.txt'
sep = '\t'
print('Parameters:')
print(
' Index: %s\n k: %d\n Stop-words: %s\n Min. abs. word frequency: %d\n Max. rel. word frequency: %f' %
(index, k, sw, maf, mrf))
print(' p: %d\n theta: %f\n sigma: %f' % (p, theta, sigma))
print('Loading corpus...')
start_time = timeit.default_timer()
my_corpus = Corpus(sw, maf, mrf, sep, index=index)
elapsed = timeit.default_timer() - start_time
print('Corpus loaded in %f seconds.' % elapsed)
time_slice_length = tsl
print('Partitioning tweets into %d-minute time-slices...' % time_slice_length)
start_time = timeit.default_timer()
my_corpus.discretize(time_slice_length, cluster)
elapsed = timeit.default_timer() - start_time
print('Partitioning done in %f seconds.' % elapsed)
print('Running MABED...')
start_time = timeit.default_timer()
mabed = MABED(my_corpus)
mabed.run(k=k, p=p, theta=theta, sigma=sigma)
elapsed = timeit.default_timer() - start_time
print('Event detection performed in %f seconds.' % elapsed)
return mabed
def event_descriptions(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, cluster=2):
mabed = self.detect_events(index, k, maf, mrf, tsl, p, theta, sigma, cluster)
# format data
event_descriptions = []
impact_data = []
formatted_dates = []
for i in range(0, mabed.corpus.time_slice_count):
formatted_dates.append(int(time.mktime(mabed.corpus.to_date(i).timetuple())) * 1000)
for event in mabed.events:
mag = event[0]
main_term = event[2]
raw_anomaly = event[4]
formatted_anomaly = []
time_interval = event[1]
related_terms = []
for related_term in event[3]:
# related_terms.append(related_term[0] + ' (' + str("{0:.2f}".format(related_term[1])) + ')')
related_terms.append({'word':related_term[0], 'value':str("{0:.2f}".format(related_term[1])) })
event_descriptions.append((mag,
str(mabed.corpus.to_date(time_interval[0])),
str(mabed.corpus.to_date(time_interval[1])),
main_term,
json.dumps(related_terms)))
for i in range(0, mabed.corpus.time_slice_count):
value = 0
if time_interval[0] <= i <= time_interval[1]:
value = raw_anomaly[i]
if value < 0:
value = 0
formatted_anomaly.append([ formatted_dates[i],value])
impact_data.append({"key": main_term, "values": formatted_anomaly})
return {"event_descriptions": event_descriptions, "impact_data": impact_data}
def detect_filtered_events(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, session=False, filter=False, cluster=2):
sw = 'stopwords/twitter_all.txt'
sep = '\t'
print('Parameters:')
print(
' Index: %s\n k: %d\n Stop-words: %s\n Min. abs. word frequency: %d\n Max. rel. word frequency: %f' %
(index, k, sw, maf, mrf))
print(' p: %d\n theta: %f\n sigma: %f' % (p, theta, sigma))
print('Loading corpus...')
start_time = timeit.default_timer()
my_corpus = Corpus(sw, maf, mrf, sep, index=index, session=session, filter=filter)
if not my_corpus.tweets:
return False
elapsed = timeit.default_timer() - start_time
print('Corpus loaded in %f seconds.' % elapsed)
time_slice_length = tsl
print('Partitioning tweets into %d-minute time-slices...' % time_slice_length)
start_time = timeit.default_timer()
my_corpus.discretize(time_slice_length, cluster)
elapsed = timeit.default_timer() - start_time
print('Partitioning done in %f seconds.' % elapsed)
print('Running MABED...')
start_time = timeit.default_timer()
mabed = MABED(my_corpus)
mabed.run(k=k, p=p, theta=theta, sigma=sigma)
elapsed = timeit.default_timer() - start_time
print('Event detection performed in %f seconds.' % elapsed)
return mabed
def filtered_event_descriptions(self, index="test3", k=10, maf=10, mrf=0.4, tsl=30, p=10, theta=0.6, sigma=0.6, session=False, filter=False, cluster=2):
mabed = self.detect_filtered_events(index, k, maf, mrf, tsl, p, theta, sigma, session, filter, cluster)
if not mabed:
return False
# format data
event_descriptions = []
impact_data = []
formatted_dates = []
for i in range(0, mabed.corpus.time_slice_count):
formatted_dates.append(int(time.mktime(mabed.corpus.to_date(i).timetuple())) * 1000)
for event in mabed.events:
mag = event[0]
main_term = event[2]
raw_anomaly = event[4]
formatted_anomaly = []
time_interval = event[1]
related_terms = []
for related_term in event[3]:
# related_terms.append(related_term[0] + ' (' + str("{0:.2f}".format(related_term[1])) + ')')
related_terms.append({'word':related_term[0], 'value':str("{0:.2f}".format(related_term[1])) })
event_descriptions.append((mag,
str(mabed.corpus.to_date(time_interval[0])),
str(mabed.corpus.to_date(time_interval[1])),
main_term,
json.dumps(related_terms)))
for i in range(0, mabed.corpus.time_slice_count):
value = 0
if time_interval[0] <= i <= time_interval[1]:
value = raw_anomaly[i]
if value < 0:
value = 0
formatted_anomaly.append([ formatted_dates[i],value])
impact_data.append({"key": main_term, "values": formatted_anomaly})
return {"event_descriptions": event_descriptions, "impact_data": impact_data}
# ==================================================================
# Tweets
# ==================================================================
def get_tweets(self, index="test3", word=""):
my_connector = Es_connector(index=index)
# res = my_connector.search({
# "query": {
# "simple_query_string": {
# "fields": [
# "text"
# ],
# "query": word
# }
# }
# })
# res = my_connector.bigSearch(
# {
# "_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
# "query": {
# "simple_query_string": {
# "fields": [
# "text"
# ],
# "query": word
# }
# }
# })
res = my_connector.init_paginatedSearch({
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
return res
def get_tweets_scroll(self, index, sid, scroll_size):
my_connector = Es_connector(index=index)
res = my_connector.loop_paginatedSearch(sid, scroll_size)
return res
def get_big_tweets(self, index="test3", word=""):
my_connector = Es_connector(index=index)
res = my_connector.bigSearch(
{
"_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
return res
def get_tweets_state(self, index="test3", session="",state="proposed"):
my_connector = Es_connector(index=index)
res = my_connector.init_paginatedSearch(
{
"query": {
"term": {
"session_"+session: state
}
}
})
return res
def get_tweets_query_state(self, index="test3", word="", state="proposed", session=""):
my_connector = Es_connector(index=index)
query = {
"query": {
"bool": {
"must": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
},
"filter": {
"bool": {
"should": [
{
"match": {
session: state
}
}
]
}
}
}
}
}
res = my_connector.init_paginatedSearch(query)
return res
def get_big_tweets_scroll(self, index="test3", word=""):
my_connector = Es_connector(index=index)
res = my_connector.init_paginatedSearch(
{
"_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
return res
def get_event_tweets(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# res = my_connector.search({"query": {"term" : { "text" : word }}})
# query = {
# "bool": {
# "must": {
# "match": {
# "text": {
# "query": main_term,
# "operator": "or"
# }
# }
# },
# "should": terms
# }
# }
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
# print(query)
# res = my_connector.search(query)
res = my_connector.init_paginatedSearch(query)
return res
def get_event_filter_tweets(self, index="test3", main_term="", related_terms="", state = "proposed", session=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# query = {
# "sort": [
# "_score"
# ],
# "query": {
# "bool": {
# "should": terms
# }
# }
# }
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"must": [
{
"bool": {
"should": terms
}
}
],
"filter": {
"bool": {
"should": [
{
"match": {
session: state
}
}
]
}
}
}
}
}
res = my_connector.init_paginatedSearch(query)
return res
def get_event_tweets2(self, index="test3", main_term="", related_terms="", cid =0):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# terms.append({"match": {
# "imagesCluster": {
# "query": cid
# }
# }})
# query = {
# "query": {
# "bool": {
# "must": {
# "exists": {
# "field": "imagesCluster"
# }
# },
# # "must": { "match": { "imagesCluster" : cid }},
# "should": terms
# }
# }
# }
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms,
"minimum_should_match": 1,
"must": [
{
"match": {
"imagesCluster": cid
}
}
]
}
}
}
# res = my_connector.bigSearch(query)
res = my_connector.init_paginatedSearch(query)
return res
def get_cluster_tweets(self, index="test3", cid=0):
my_connector = Es_connector(index=index)
query = {
# "_source": [
# "id_str",
# "imagesCluster",
# "session_Twitter2015",
# "extended_entities"
# ],
"query": {
"term" : { "imagesCluster": cid }
}
}
res = my_connector.search(query)
return res
def get_event_image(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# res = my_connector.search({"query": {"term" : { "text" : word }}})
# query = {
# "bool": {
# "must": {
# "match": {
# "text": {
# "query": main_term,
# "operator": "or"
# }
# }
# },
# "should": terms
# }
# }
query = {
"size": 1,
"_source": [
"id_str",
"imagesCluster",
"session_Twitter2015",
"extended_entities"
],
"query": {
"bool": {
"must":
{
"exists": {
"field": "extended_entities"
}
},
"should": terms
}
}
}
# print(query)
res = my_connector.search(query)
return res
def get_valid_tweets(self, index="test3"):
my_connector = Es_connector(index=index)
res = my_connector.search({
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
})
# res = my_connector.bigSearch(
# {
# "_source": ["text", "id_str", "extended_entities", "user", "created_at", "link"],
# "query": {
# "simple_query_string": {
# "fields": [
# "text"
# ],
# "query": word
# }
# }
# })
return res['hits']['hits']
# ==================================================================
# Clusters
# ==================================================================
def get_clusters(self, index="test3", word=""):
my_connector = Es_connector(index=index)
res = my_connector.search({
"size": 1,
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
},
"aggs": {
"group_by_cluster": {
"terms": {
"field": "imagesCluster",
"size": 9999
}
}
}
})
# print("Clusters")
# print(res['aggregations']['group_by_cluster']['buckets'])
clusters = res['aggregations']['group_by_cluster']['buckets']
with open(index+'.json') as f:
data = json.load(f)
for cluster in clusters:
# print(cluster['key'])
images = data['duplicates'][cluster['key']]
# print(images[0])
cluster['image']=images[0]
cluster['size'] = len(images)
# print(clusters)
return clusters
def get_event_clusters(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
# query = {
# "size": 0,
# "query": {
# "bool": {
# "should": terms
# }
# },
# "aggs": {
# "group_by_cluster": {
# "terms": {
# "field": "imagesCluster",
# "size": 200
# }
# }
# }
# }
query = {
"size": 0,
"query": {
"bool": {
"should": terms
}
},
"aggregations": {
"group_by_cluster": {
"terms": {
"field": "imagesCluster",
# "shard_size": 999999999,
"size": 999999
}
}
}
}
# print(query)
res = my_connector.search(query)
# print("Clusters")
# print(res['aggregations']['group_by_cluster']['buckets'])
clusters = res['aggregations']['group_by_cluster']['buckets']
with open(index + '.json') as f:
data = json.load(f)
for cluster in clusters:
# q1 = {
# "_source": [
# "text",
# "imagesCluster"
# ],
# "query": {
# "bool": {
# "should": terms,
# "filter": {
# "bool": {
# "should": [
# {
# "match": {
# "imagesCluster": cluster['key']
# }
# }
# ]
# }
# }
# }
# }
# }
q2 = {
"query": {
"term": {"imagesCluster": cluster['key']}
}
}
# cres1 = my_connector.search(q1)
cres = my_connector.count(q2)
# print(cluster['key'])
images = data['duplicates'][cluster['key']]
# print(images[0])
cluster['image'] = images[0]
# cluster['size'] = len(images)
# print(cres)
cluster['size'] = cres['count']
# cluster['size2'] = cres1['hits']['total']
# if cluster['key']==1452:
# print(cluster)
# print(clusters)
return clusters
# ==================================================================
# Sessions
# ==================================================================
# Get all sessions
def get_sessions(self):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
query = {
"query": {
"match_all": {}
}
}
res = my_connector.search(query)
return res
# Get session by session ID
def get_session(self, id):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
res = my_connector.get(id)
return res
# Get session by session name
def get_session_by_Name(self, name):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
query = {
"query": {
"constant_score" : {
"filter" : {
"term" : {
"s_name" : name
}
}
}
}
}
res = my_connector.search(query)
return res
# Add new session
def add_session(self, name, index):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
session = self.get_session_by_Name(name)
if session['hits']['total']==0:
res = my_connector.post({
"s_name": name,
"s_index": index,
"s_type": "tweet"
})
tweets_connector = Es_connector(index=index, doc_type="tweet")
tweets_connector.update_all('session_'+name, 'proposed')
return res
else:
return False
# Update specific field value in an Index
def update_all(self, index, doc_type, field, value):
my_connector = Es_connector(index=index, doc_type=doc_type)
res = my_connector.update_all(field, value)
return res
# Update session events results
def update_session_results(self, id, events, impact_data):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
res = my_connector.update(id, {
"doc" : {
"events" : events,
"impact_data": impact_data
}
})
return res
# Get session events results
def get_session_results(self, id):
my_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
res = my_connector.get(id)
return res
# Delete session by name
def delete_session(self, id):
session_connector = Es_connector(index=self.sessions_index, doc_type=self.sessions_doc_type)
session = session_connector.get(id)
if session:
print("delete Session")
# print(session)
# 1. Delete session data from the tweets
tweets_connector = Es_connector(index=session['_source']['s_index'], doc_type=session['_source']['s_type'])
session_name = 'session_'+session['_source']['s_name']
print(session_name)
tweets_connector.remove_field_all(session_name)
# 2. Delete the session
session_connector.delete(id)
return True
else:
return False
# ==================================================================
# Tweets session status
# ==================================================================
# Set tweets status
def set_all_status(self, index, session, status):
tweets_connector = Es_connector(index=index, doc_type="tweet")
res = tweets_connector.update_all(session, status)
return res
def set_status(self, index, session, data):
tweets_connector = Es_connector(index=index, doc_type="tweet")
# All tweets
session = 'session_'+session
event = json.loads(data['event'])
# print("------------------------")
# print(data)
# print("------------------------")
# print(event)
# print(event['main_term'])
terms = []
words = event['main_term'] + ' '
for t in event['related_terms']:
terms.append({"match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word'] + " "
terms.append({"match": {
"text": {
"query": event['main_term'],
"boost": 2
}
}})
# query = {
# "query": {
# "bool": {
# "should": terms
# }
# }
# }
query = {
"query": {
"bool": {
"must": [
{
"bool": {
"should": terms
}
}
],
"filter": {
"bool": {
"should": [
{
"match": {
session: "proposed"
}
}
]
}
}
}
}
}
# print(query)
res = tweets_connector.update_query(query, session, data['status'])
# Event related
return res
def set_search_status(self, index, session, state, word):
tweets_connector = Es_connector(index=index, doc_type="tweet")
session = 'session_'+session
query = {
"query": {
"bool": {
"must": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
},
"filter": {
"bool": {
"should": [
{
"match": {
session: "proposed"
}
}
]
}
}
}
}
}
res = tweets_connector.update_query(query, session, state)
return res
def set_search_status_force(self, index, session, state, word):
tweets_connector = Es_connector(index=index, doc_type="tweet")
session = 'session_'+session
query = {
"query": {
"bool": {
"must": {
"simple_query_string": {
"fields": [
"text"
],
"query": word
}
}
}
}
}
res = tweets_connector.update_query(query, session, state)
return res
def set_cluster_state(self, index, session, cid, state):
tweets_connector = Es_connector(index=index, doc_type="tweet")
# All tweets
session = 'session_'+session
query = {
"query": {
"term" : { "imagesCluster": cid }
}
}
res = tweets_connector.update_query(query, session, state)
return res
def set_tweet_state(self, index, session, tid, val):
tweets_connector = Es_connector(index=index, doc_type="tweet")
session = 'session_'+session
query = {
"doc" : {
session : val
}
}
res = tweets_connector.update(tid, query)
return res
def export_event(self, index, session):
my_connector = Es_connector(index=index)
res = my_connector.bigSearch(
{
"_source": {
"excludes": ["session_*"]
},
"query": {
"term": {
"session_"+session: "confirmed"
}
}
})
return res
# ==================================================================
# Beta
# ==================================================================
def get_event_tweets_count(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.count(query)
return res['count']
def get_event_state_tweets_count(self, index="test3", session="", words="", state="confirmed"):
my_connector = Es_connector(index=index)
query = {
"query": {
"bool": {
"must": [
{
"match": {
"text": {
"query": words
}
}
}
],
"filter": {
"bool": {
"should": [
{
"match": {
"session_"+session: state
}
}
]
}
}
}
}
}
res = my_connector.count(query)
return res['count']
def get_words_tweets_count(self, index="test3", session="", words=""):
my_connector = Es_connector(index=index)
query = {
"query": {
"bool": {
"must": [
{
"match": {
"text": {
"query": words
}
}
}
]
}
}
}
res = my_connector.count(query)
return res['count']
def get_all_count(self, index="test3"):
my_connector = Es_connector(index=index)
query = {
"query": {
"match_all": {}
}
}
res = my_connector.count(query)
return res['count']
def get_words_count(self, index="test3", words=""):
my_connector = Es_connector(index=index)
query = {
"query": {
"simple_query_string": {
"fields": [
"text"
],
"query": words
}
}
}
res = my_connector.count(query)
return res['count']
def get_start_date(self, index):
my_connector = Es_connector(index=index)
res = my_connector.search_size({
"_source": [
"@timestamp",
"timestamp_ms"
],
"query": {
"match_all": {}
},
"sort": [
{
"@timestamp": {
"order": "asc"
}
}
]
},1)
return res['hits']['hits'][0]['_source']
def get_end_date(self, index):
my_connector = Es_connector(index=index)
res = my_connector.search_size({
"_source": [
"@timestamp",
"timestamp_ms"
],
"query": {
"match_all": {}
},
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
},1)
return res['hits']['hits'][0]['_source']
def get_range_count(self, index, start, end):
my_connector = Es_connector(index=index)
query = {
"query": {
"range": {
"timestamp_ms": {
"gt": str(start),
"lt": str(end)
}
}
}
}
print(query)
res = my_connector.count(query)
return res['count']
def process_range_tweets(self, index, start, end, words,count):
sw = 'stopwords/twitter_all.txt'
my_connector = Es_connector(index=index)
res = my_connector.range_tweets(start, end, sw, words,count)
return res
def process_w2v_tweets(self, index, words,count):
sw = 'stopwords/twitter_all.txt'
my_connector = Es_connector(index=index)
res = my_connector.w2v_tweets(sw, words,count)
return res
def get_event_central_tweets(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.search_size(query,1)
return res
def get_event_tweets_bigsearch(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({ "match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word']+ " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.bigTweetTextSearch(query)
return res
def getMean(self, index="test3", main_term="", related_terms=""):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({"match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word'] + " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"_source": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
query = {
"size": 0,
"query": {
"bool": {
"should": terms
}
},
"aggs": {
"sum_scores": {
"sum": {
"script": "_score"
}
}
}
}
res = my_connector.search(query)
total = res['hits']['total']
sum = res['aggregations']['sum_scores']['value']
mean = sum / total
# res = my_connector.bigSearchMean(query)
return mean
def getSSE(self, index="test3", main_term="", related_terms="", mean=0):
my_connector = Es_connector(index=index)
terms = []
words = main_term + ' '
for t in related_terms:
terms.append({"match": {
"text": {
"query": t['word'],
"boost": t['value']
}
}})
words += t['word'] + " "
terms.append({"match": {
"text": {
"query": main_term,
"boost": 2
}
}})
query = {
"sort": [
"_score"
],
"query": {
"bool": {
"should": terms
}
}
}
res = my_connector.bigSearchSSE(query, mean)
return res
def d2v(self, tweet, data):
# data = ["I love machine learning. Its awesome.",
# "I love coding in python",
# "I love building chatbots python",
# "they chat amagingly well",
# "So we have saved the model and its ready for implementation. Lets play with it"]
print("=============================================================")
print("=============================================================")
print(tweet)
print("-------------")
print("-------------")
tagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(data)]
max_epochs = 100
vec_size = 20
alpha = 0.025
model = Doc2Vec(vector_size=vec_size,
alpha=alpha,
min_alpha=0.00025,
min_count=1,
dm=1)
model.build_vocab(tagged_data)
for epoch in range(max_epochs):
# print('iteration {0}'.format(epoch))
model.train(tagged_data,
total_examples=model.corpus_count,
epochs=model.iter)
# decrease the learning rate
model.alpha -= 0.0002
# fix the learning rate, no decay
model.min_alpha = model.alpha
# test_data = word_tokenize("So we have saved the model and its ready for implementation. Lets play with it".lower())
test_data = word_tokenize(tweet.lower())
v1 = model.infer_vector(test_data)
# print("V1_infer", v1)
# to find most similar doc using tags
similar_doc = model.docvecs.most_similar([v1])
print("similar_docs:")
print("-------------")
# print(similar_doc)
for doc in similar_doc:
print(data[int(doc[0])])
# print(doc[1])
print("=============================================================")
print("=============================================================")
# to find vector of doc in training data using tags or in other words, printing the vector of document at index 1 in training data
# print(model.docvecs['1']) | 2.375 | 2 |
generate_datasets/TISE/tise_gendata.py | clott3/SIB-CL | 2 | 12773094 | <gh_stars>1-10
import numpy as np
from solveTISE import TISE
import time
import h5py
import sys
sys.path.append('./../PhC/')
from fourier_phc import FourierPhC
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from skimage.transform import resize
from scipy.sparse.linalg import eigs, eigsh
import scipy.sparse as sp
from scipy import interpolate
import argparse
import math
"""instructions:
- to generate target for UC sampled at target res use defaults
- to generate target for UC sampled at some other res, specify --orires 1200 (e.g.)
- to generate lowres, specify --lowres flag. for UC sampled at target res add specify --orires 32 (target res)"""
parser = argparse.ArgumentParser()
parser.add_argument('--ngrid',type=int, help='number of grid points; default = 32', default=32)
parser.add_argument('--lrgrid',type=int, help='number of LR grid points; default = 5', default=5)
parser.add_argument('--ndim',type=int, help='no. of dimensions, 2 or 3',default=2)
parser.add_argument('--nsam',type=int, help='number of samples',default=5000)
parser.add_argument('--seed',type=int, help='random seed',default=42)
parser.add_argument('--h5filename', required=True, type=str, help='REQUIRED: path to save generated h5 file, in format path/to/filename.h5')
parser.add_argument('--maxF',type=int, help='maximum number of fourier components',default=2)
parser.add_argument('--no_createh5', action='store_true', help='Set this flag to not create h5')
parser.add_argument('--maxfill',type=float, help='maximum filling ratio',default=0.99)
parser.add_argument('--minfill',type=float, help='maximum filling ratio',default=0.01)
parser.add_argument('--sigmafactor',type=float, help='sigma for filtered',default=20.)
parser.add_argument('--lowres',action='store_true', help='create lowres and save high res input')
parser.add_argument('--xmax',type=float,default=5.)
parser.add_argument('--maxeps',type=float, default=1.0)
parser.add_argument('--orires',type=int, help ='to create high resolution of original, e.g. 1200, default not used', default=0)
# parser.add_argument('--no_orires', action='store_true')
# parser.add_argument('--refgrid',type=int, default=32)
parser.add_argument('--epsneg', action='store_true')
# parser.add_argument('--epszero', action='store_true')
parser.add_argument('--centre_min', action='store_true')
parser.add_argument('--use_fill', action='store_true')
args = parser.parse_args()
N = args.ngrid
if args.lowres:
N = args.lrgrid
h5out = args.h5filename + ".h5"
xmax = args.xmax
maxF = args.maxF
orires = args.orires
ndim = args.ndim
maxeps=args.maxeps
np.random.seed(args.seed)
phc = FourierPhC(dim=ndim,maxF=maxF,maxeps=args.maxeps, mineps=0.,\
minfill=args.minfill,maxfill=args.maxfill,\
use_fill = args.use_fill)
if not args.no_createh5:
with h5py.File(h5out,"a") as f:
f.create_dataset("universal/N",dtype='int',data=N)
f.create_dataset("universal/gridmax",dtype='f',data=xmax)
f.create_dataset("universal/maxF",dtype='f',data=maxF)
f.create_dataset("universal/maxeps",dtype='f',data=args.maxeps)
f.create_dataset("universal/xmax",dtype='f',data=args.xmax)
f.create_dataset("universal/usefill",dtype='f',data=args.use_fill)
## write universal (per h5 file) parameters
for i in range(args.nsam):
uccoefs, ucgvecs, epsin, epsout, uclevel, filling = phc.get_random()
# epslow = np.min([epsin,epsout])
epshi = np.max([epsin,epsout])
if args.epsneg: # if epsneg, we only sample 1 eps and take negative
epslow = -epshi
else:
epslow = 0
if args.orires != 0:
totalres = int(orires/0.6) # this is the original sample, res 2000
else:
totalres = int(args.ngrid/0.6)
input = phc.getunitcell(uccoefs, ucgvecs, epslow, epshi, uclevel,ucres=totalres)
if args.ndim == 2:
input = input[int(totalres/5):-int(totalres/5),int(totalres/5):-int(totalres/5)]
elif args.ndim == 3:
input = input[int(totalres/5):-int(totalres/5),int(totalres/5):-int(totalres/5),int(totalres/5):-int(totalres/5)]
input = gaussian_filter(input,(totalres/args.sigmafactor)) # this is our high res input from [-xmax,xmax] inclusive of boundaries.
if args.ndim == 2:
if args.orires != 0:
x = np.linspace(-xmax,xmax,input.shape[0])
y = np.linspace(-xmax,xmax,input.shape[1])
f = interpolate.interp2d(x,y, input)
xnew = np.linspace(-xmax,xmax,N)
ynew = np.linspace(-xmax,xmax,N)
input = f(xnew,ynew)
inputsolve = input[1:-1,1:-1] # Dirichlet BC: ignore boundary points
elif args.ndim == 3:
if args.orires != 0:
x = np.linspace(-xmax,xmax,input.shape[0])
y = np.linspace(-xmax,xmax,input.shape[1])
z = np.linspace(-xmax,xmax,input.shape[2])
f = interpolate.RegularGridInterpolator((x,y,z), input)
xnew = np.linspace(-xmax,xmax,N)
ynew = np.linspace(-xmax,xmax,N)
znew = np.linspace(-xmax,xmax,N)
newpoints = np.array(np.meshgrid(xnew,ynew,znew)).reshape(3,-1).T
input = f(newpoints).reshape(N,N,N)
input = np.swapaxes(input,0,1) # transposing swap axes.
# Checked that agree at boundaries. i.e. for i = [0,1], input[i,:,:] == inputnew[i,:,:], input[:,i,:] == inputnew[:,i,:], etc
inputsolve = input[1:-1,1:-1,1:-1] # ignore boundary points
else:
raise ValueError("only 2D or 3D")
sch = TISE(inputsolve,dim=ndim,xmax=xmax)
eigval, eigvec = sch.solve(num_eig=2)
ind = np.argsort(eigval) #eigval and eigvec are already np arrays
eigval = eigval[ind]
eigvec = eigvec[:,ind]
if args.lowres:
xnew = np.linspace(-xmax,xmax,args.ngrid)
ynew = np.linspace(-xmax,xmax,args.ngrid)
inputsave = f(xnew,ynew) # want lowres input to be the same as target
else:
inputsave = input
with h5py.File(h5out,"a") as f:
if args.lowres:
f.create_dataset("unitcell/small_potential_fil/"+str(i),dtype='f',data=input)
# f.create_dataset("unitcell/potential_orires/"+str(i),dtype='f',data=input)
f.create_dataset("unitcell/potential_fil/"+str(i),dtype='f',data=inputsave)
f.create_dataset("unitcell/sigmafac/"+str(i),dtype='f',data=args.sigmafactor)
f.create_dataset("unitcell/epslow/"+str(i),dtype='f',data=epslow)
f.create_dataset("unitcell/epshi/"+str(i),dtype='f',data=epshi)
f.create_dataset("unitcell/uccoefs/"+str(i),dtype='complex64',data=uccoefs)
f.create_dataset("unitcell/ucgvecs/"+str(i),dtype='f',data=ucgvecs)
f.create_dataset("unitcell/uclevel/"+str(i),dtype='f',data=uclevel)
# f.create_dataset("unitcell/orires/"+str(i),dtype='f',data=args.orires)
f.create_dataset("unitcell/filling/"+str(i),dtype='f',data=filling)
f.create_dataset("eigval_fil/"+str(i),dtype='f',data=eigval)
f.create_dataset("eigvec_fil/"+str(i),dtype='f',data=eigvec)
if i % 50 == 0:
print(f"{i}samples done!")
| 1.882813 | 2 |
awwards/admin.py | Jay-68/awwards | 0 | 12773095 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from awwards.models import Profile, Project, Review
# Register your models here.
admin.site.register(Project)
admin.site.register(Profile)
admin.site.register(Review) | 1.210938 | 1 |
projects/FastRetri/fastretri/datasets.py | UU-tracktech/fast-reid | 0 | 12773096 | <reponame>UU-tracktech/fast-reid
# encoding: utf-8
"""
@author: <NAME>
@contact: <EMAIL>
"""
import os
from processor.pipeline.reidentification.fastreid.fastreid.data.datasets import DATASET_REGISTRY
from processor.pipeline.reidentification.fastreid.fastreid.data.datasets.bases import ImageDataset
__all__ = ["Cars196", "CUB", "SOP", "InShop"]
@DATASET_REGISTRY.register()
class Cars196(ImageDataset):
dataset_dir = 'Cars_196'
dataset_name = "cars"
def __init__(self, root='datasets', **kwargs):
self.root = root
self.dataset_dir = os.path.join(self.root, self.dataset_dir)
train_file = os.path.join(self.dataset_dir, "train.txt")
test_file = os.path.join(self.dataset_dir, "test.txt")
required_files = [
self.dataset_dir,
train_file,
test_file,
]
self.check_before_run(required_files)
train = self.process_label_file(train_file, is_train=True)
query = self.process_label_file(test_file, is_train=False)
super(Cars196, self).__init__(train, query, [], **kwargs)
def process_label_file(self, file, is_train):
data_list = []
with open(file, 'r') as f:
lines = f.read().splitlines()
for line in lines:
img_name, label = line.split(',')
if is_train:
label = self.dataset_name + '_' + str(label)
data_list.append((os.path.join(self.dataset_dir, img_name), label, '0'))
return data_list
@DATASET_REGISTRY.register()
class CUB(Cars196):
dataset_dir = "CUB_200_2011"
dataset_name = "cub"
@DATASET_REGISTRY.register()
class SOP(Cars196):
dataset_dir = "Stanford_Online_Products"
dataset_name = "sop"
@DATASET_REGISTRY.register()
class InShop(Cars196):
dataset_dir = "InShop"
dataset_name = "inshop"
def __init__(self, root="datasets", **kwargs):
self.root = root
self.dataset_dir = os.path.join(self.root, self.dataset_dir)
train_file = os.path.join(self.dataset_dir, "train.txt")
query_file = os.path.join(self.dataset_dir, "test_query.txt")
gallery_file = os.path.join(self.dataset_dir, "test_gallery.txt")
required_files = [
train_file,
query_file,
gallery_file,
]
self.check_before_run(required_files)
train = self.process_label_file(train_file, True)
query = self.process_label_file(query_file, False)
gallery = self.process_label_file(gallery_file, False)
super(Cars196, self).__init__(train, query, gallery, **kwargs)
| 2.15625 | 2 |
lib/tests/dos_circular_fragment.py | bbhunter/graphql-cop | 89 | 12773097 | <reponame>bbhunter/graphql-cop<gh_stars>10-100
"""Circular Fragment tests."""
from lib.utils import graph_query, curlify
def circular_fragment(url, proxy, headers):
"""Check for circular fragment."""
res = {
'result':False,
'title':'Circular Fragment',
'description':'Circular Fragment allowed in Query',
'impact':'Denial of Service',
'severity':'HIGH',
'curl_verify':''
}
q = '''
query {
__schema {
...A
}
}
fragment A on __Schema {
__typename
...B
}
fragment B on __Schema {
...A
}
'''
gql_response = graph_query(url, proxies=proxy, headers=headers, payload=q)
res['curl_verify'] = curlify(gql_response)
try:
if not 'errors' in gql_response.json():
res['result'] = True
except:
pass
return res
| 2.453125 | 2 |
src/lgt/group/generators.py | nftqcd/lgt | 0 | 12773098 | <filename>src/lgt/group/generators.py
"""
generators.py
Contains methods for generating elements of SU(2), SU(3) gauge groups.
Note:
- The `eps` (type: float) argument to the below functions controls the
'distance' from the identity matrix.
"""
from __future__ import absolute_import, print_function, division, annotations
import numpy as np
# from re import split
# from multiprocessing import pool
def generate_SU2(eps: float) -> np.ndarray:
"""Returns a single randomly initialized SU(2) matrix."""
r_rand_nums = np.random.uniform(0, 0.5, (4))
r = np.empty((4))
r[1:] = eps * r_rand_nums[1:] / np.linalg.norm(r_rand_nums[1:])
r[0] = np.sign(r_rand_nums[0]) * np.sqrt(1 - eps ** 2)
r11 = +r[0] + 1j * r[3]
r12 = +r[2] + 1j * r[1]
r21 = -r[2] + 1j * r[1]
r22 = +r[0] - 1j * r[3]
return np.array([[r11, r12], [r21, r22]])
def generate_SU3(eps: float) -> np.ndarray:
"""Returns a single randomly initialized SU(3) mtx."""
r = np.identity(3, dtype=np.complexfloating)
s = np.identity(3, dtype=np.complexfloating)
t = np.identity(3, dtype=np.complexfloating)
r[:2, :2] = generate_SU2(eps)
s[0:3:2, 0:3:2] = generate_SU2(eps)
t[1:, 1:] = generate_SU2(eps)
return np.dot(np.dot(r, s), t)
def generate_SU3_array(n: int, eps: float) -> np.ndarray:
"""Generates a 2*n array of SU(3) mtxs; eps controls dist from Identity"""
arr = np.zeros((2 * n, 3, 3), dtype=np.complexfloating)
for i in range(n):
mtx = generate_SU3(eps)
arr[2 * i] = mtx
arr[2 * i + 1] = mtx.conj().T
return arr
| 2.59375 | 3 |
src/utils/plotsolution.py | Vnicius/ed-tsp | 0 | 12773099 | <filename>src/utils/plotsolution.py
# -*- coding: utf-8 -*-
import math
import matplotlib.pyplot as plt
from copy import copy
import random
def get_plot_values(graph, has_points=False):
xs = []
ys = []
xs_line = []
ys_line = []
N = len(graph)
if has_points:
for node in graph:
xs.append(node.point.x)
ys.append(node.point.y)
xs_line, ys_line = xs, ys
else:
for i in range(N):
xs.append(150.0 + 100 * math.cos(math.pi * 2 * i / N))
ys.append(150.0 + 100 * math.sin(math.pi * 2 * i / N))
for node in graph:
xs_line.append(xs[node.key])
ys_line.append(ys[node.key])
xs_line.append(xs_line[0])
ys_line.append(ys_line[0])
return xs, ys, xs_line, ys_line
def plot_solution(graph, title="", has_points=False, show_key=False):
xs, ys, xs_line, ys_line = get_plot_values(graph, has_points=has_points)
plt.title(title)
plt.plot(xs_line, ys_line, 'r')
plt.plot(xs, ys, 'bo')
plt.plot([xs_line[0]], [ys_line[0]], 'ro')
if show_key:
for index, (x, y) in enumerate(zip(xs, ys)):
plt.text(x, y, str(index), color="green", fontsize=12)
plt.show()
def plot_animated(graph, title="", has_points=False, show_key=False):
xs, ys, xs_line, ys_line = get_plot_values(graph, has_points=has_points)
plt.clf()
plt.title(title)
plt.plot(xs_line, ys_line, 'r')
plt.plot(xs, ys, 'bo')
plt.plot([xs_line[0]], [ys_line[0]], 'ro')
if show_key:
for index, (x, y) in enumerate(zip(xs, ys)):
plt.text(x, y, str(index), color="green", fontsize=12)
plt.pause(0.01)
| 3.203125 | 3 |
migration/migrator/migrations/course/20190710182514_add_view_date_for_teams.py | elihschiff/Submitty | 411 | 12773100 | """Migration for a given Submitty course database."""
def up(config, database, semester, course):
if not database.table_has_column('teams', 'last_viewed_time'):
database.execute('ALTER TABLE teams ADD COLUMN last_viewed_time timestamp with time zone')
def down(config, database, semester, course):
pass
| 2.625 | 3 |
util/build_benchmarks_page.py | wenq1/duktape | 4,268 | 12773101 | #!/usr/bin/env python2
import os
import sys
import re
import json
def main():
# Adapt manually.
duk = '/usr/local/bin/duk'
lzstring = '/home/duktape/duktape/lz-string/libs/lz-string.js'
duktape_repo = '/home/duktape/duktape'
duktape_testrunner_repo = '/home/duktape/duktape-testrunner'
duktape_testclient_config = '/home/duktape/duktape-testclient-config.yaml'
benchmarks_template = '/home/duktape/duktape/website/benchmarks.html'
merge_count = 1000
# Get the hashes we're interested in, in increasing merge order.
# os.system('cd %s && git pull --rebase' % duktape_repo)
os.system('cd %s && git log -n %d --merges --oneline --decorate=no --pretty=format:%%H > /tmp/tmp-hashes.txt' % (duktape_repo, merge_count))
hashes = []
with open('/tmp/tmp-hashes.txt', 'rb') as f:
for line in f:
line = line.strip()
if line != '':
hashes.append(line)
hashes.reverse()
print('%d hashes found' % len(hashes))
# Get any release tags matching the hashes for annotations.
re_release_tag = re.compile('^v\d+\.\d+\.\d+$')
annotations = []
for x,h in enumerate(hashes):
os.system('cd %s && git tag -l --points-at %s > /tmp/tmp-taglog.txt' % (duktape_repo, h))
with open('/tmp/tmp-taglog.txt', 'rb') as f:
for line in f:
line = line.strip()
m = re_release_tag.match(line)
if m is None:
continue
annotations.append({ 'x': x, 'tag': line })
print(json.dumps(annotations, indent=4))
# Get test data for hashed, and pack it into a JSON object embedded
# into the page.
req = { 'repo_full': 'svaarala/duktape', 'sha_list': hashes }
with open('/tmp/tmp-request.json', 'wb') as f:
f.write(json.dumps(req))
os.system('cd %s && cd client-simple-node && nodejs client.js --request-uri /query-commit-simple --config %s --request-file /tmp/tmp-request.json --output-file /tmp/tmp-result.json' % (duktape_testrunner_repo, duktape_testclient_config))
with open('/tmp/tmp-result.json', 'rb') as f:
data = json.loads(f.read())
for commit in data:
for run in commit.get('runs', []):
# Censor some fields which take a lot of space
if run.has_key('output_uri'):
del run['output_uri']
if run.has_key('result') and run['result'].has_key('traceback'):
del run['result']['traceback']
doc = {
'commit_simples': data,
'annotations': annotations
}
with open('/tmp/tmp-graphdata.json', 'wb') as f:
f.write(json.dumps(doc))
# There's a lot of JSON data so use http://pieroxy.net/blog/pages/lz-string/index.html
# to compress it. 'duk' executable can be used to compress data.
with open('/tmp/tmp-script.js', 'wb') as f:
f.write('''
var input = new TextDecoder().decode(readFile('/tmp/tmp-graphdata.json'));
var compressed = LZString.compressToBase64(input);
writeFile('/tmp/tmp-graphdata-compressed.txt', compressed);
''')
os.system('%s %s /tmp/tmp-script.js' % (duk, lzstring))
with open('/tmp/tmp-graphdata-compressed.txt', 'rb') as f:
graphdata = f.read()
# Embed the compressed data into the benchmarks.html template.
with open(benchmarks_template, 'rb') as f:
page = f.read()
page = page.replace('<!-- @DATA@ -->', \
'var rawGraphDataCompressed = "' + graphdata + '";')
with open('/tmp/benchmarks.html', 'wb') as f:
f.write(page)
# Done!
print('done')
if __name__ == '__main__':
main()
| 2.53125 | 3 |
canopy/session.py | CanopySimulations/canopy-python | 0 | 12773102 | <reponame>CanopySimulations/canopy-python
from typing import Optional, Callable, Awaitable, Union
import canopy
import aiohttp
import atexit
import asyncio
from aiohttp.client_exceptions import ClientResponseError, ClientConnectionError, ServerTimeoutError, ClientError
import logging
logger = logging.getLogger(__name__)
class Session(object):
_sync_client: canopy.openapi.ApiClient
_async_client: canopy.openapi_asyncio.ApiClient
_is_closed: bool = False
def __init__(
self,
authentication_data: Optional[canopy.AuthenticationData] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
username: Optional[str] = None,
tenant_name: Optional[str] = None,
password: Optional[str] = None,
proxy: Optional[canopy.ProxyConfiguration] = None,
openapi_configuration: Optional[canopy.openapi.Configuration] = None):
self._configuration = openapi_configuration if openapi_configuration is not None else canopy.openapi.Configuration()
if self._configuration.host is None:
self._configuration.host = 'https://api.canopysimulations.com'
if proxy is not None:
self._configuration.proxy = proxy.auth_url
self._configuration.proxy_headers = proxy.headers
self._sync_client = canopy.openapi.ApiClient(configuration=self._configuration)
self._async_client = canopy.openapi_asyncio.ApiClient(configuration=self._configuration)
if authentication_data is not None:
client_id = authentication_data.client_id
client_secret = authentication_data.client_secret
username = authentication_data.username
tenant_name = authentication_data.tenant_name
password = <PASSWORD>_<PASSWORD>
self._authentication = canopy.Authentication(
self._sync_client,
client_id,
client_secret,
username,
tenant_name,
password)
self._units = canopy.Units()
self._user_settings = canopy.UserSettingsCache(self._sync_client, self._authentication)
self._tenant_users = canopy.TenantUsersCache(self._sync_client, self._authentication)
self._tenant_sim_version = canopy.TenantSimVersionCache(self._sync_client, self._authentication)
self._study_types = canopy.StudyTypesCache(self._sync_client, self._authentication, self._tenant_sim_version)
atexit.register(self.close)
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
canopy.run(self.close())
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
async def close(self):
if self._is_closed:
return
self._is_closed = True
await self.async_client.close()
self.sync_client.close()
print('closing')
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def default_api_concurrency(self) -> int:
return 5
@property
def default_blob_storage_concurrency(self) -> int:
return 5
@property
def configuration(self) -> canopy.openapi.Configuration:
return self._configuration
@property
def sync_client(self) -> canopy.openapi.ApiClient:
return self._sync_client
@property
def async_client(self) -> canopy.openapi_asyncio.ApiClient:
return self._async_client
@property
def async_client_session(self) -> aiohttp.ClientSession:
return self._async_client.rest_client.pool_manager
@property
def authentication(self) -> canopy.Authentication:
return self._authentication
@property
def user_settings(self) -> canopy.UserSettingsCache:
return self._user_settings
@property
def tenant_users(self) -> canopy.TenantUsersCache:
return self._tenant_users
@property
def tenant_sim_version(self) -> canopy.TenantSimVersionCache:
return self._tenant_sim_version
@property
def study_types(self) -> canopy.StudyTypesCache:
return self._study_types
@property
def units(self) -> canopy.Units:
return self._units
@property
def async_default_timeout(self) -> aiohttp.ClientSession:
return self._async_client.rest_client.default_timeout
async def try_load_text(self, url: str, error_subject: str) -> str:
return await canopy.request_with_retry(lambda: self._load_text(url), error_subject, False)
async def try_load_bytes(self, url: str, error_subject: str) -> bytes:
return await canopy.request_with_retry(lambda: self._load_bytes(url), error_subject, False)
async def _load_text(self, url: str) -> str:
async with self._get_async_session(url) as response:
return await response.text()
async def _load_bytes(self, url: str) -> bytes:
async with self._get_async_session(url) as response:
return await response.read()
def _get_async_session(self, url: str):
return self.async_client_session.get(
url,
raise_for_status=True,
timeout=self.async_default_timeout,
proxy=self.configuration.proxy)
| 2.28125 | 2 |
drfexample/locations/models.py | craigderington/django-drf-examples | 0 | 12773103 | <reponame>craigderington/django-drf-examples
from django.db import models
# Create your models here.
class Location(models.Model):
"""
The Location data model for geo-coding
"""
ip_addr = models.CharField(max_length=15, null=False, blank=False)
time_zone = models.CharField(max_length=255, null=False, blank=False)
latitude = models.FloatField(default=0.00)
longitude = models.FloatField(default=0.00)
region = models.CharField(max_length=255, null=False, blank=False)
region_name = models.CharField(max_length=255, null=False, blank=False)
city = models.CharField(max_length=255, null=False, blank=False)
country_name = models.CharField(max_length=255, null=False, blank=False)
country_code = models.CharField(max_length=255, null=False, blank=False)
country_code3 = models.CharField(max_length=255, null=False, blank=False)
postal_code = models.CharField(max_length=10, null=False, blank=False)
dma_code = models.PositiveIntegerField(default=0)
area_code = models.PositiveIntegerField(default=0)
metro_code = models.PositiveIntegerField(default=0)
def __str__(self):
if self.region and self.city:
return '{} from {} - {}'.format(
self.ip_addr,
self.region,
self.postal_code
)
def get_latlng(self):
if self.latitude and self.longitude:
return 'Lat:{} Lng:{}'.format(
self.latitude,
self.longitude
)
| 2.734375 | 3 |
raciocinio_algoritmico/Prova 01/05.py | PedroMoreira87/python | 0 | 12773104 | # Entrada de dados
from math import acos, degrees
a = float(input('a: '))
b = float(input('b: '))
c = float(input('c: '))
# Verificar se os lados A, B e C formam um triângulo
if a < b + c and b < a + c and c < a + b:
if a != b and a != c and b != c:
print('Triângulo Escaleno')
elif a == b and a == c and b == c:
print('Triângulo Equilátero')
else:
print('Triângulo Isósceles')
else:
print('Não formam um triângulo')
| 4.09375 | 4 |
google/cloud/security/common/data_access/project_dao.py | pombredanne/forseti-security | 1 | 12773105 | <reponame>pombredanne/forseti-security
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the data access object (DAO)."""
import json
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access.sql_queries import select_data
from google.cloud.security.common.gcp_type import project
from google.cloud.security.common.gcp_type import resource
from google.cloud.security.common.gcp_type import resource_util
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
class ProjectDao(dao.Dao):
"""Data access object (DAO)."""
# pylint: disable=arguments-differ
@staticmethod
def map_row_to_object(row):
"""Instantiate a Project from database row.
TODO: Make this go away when we start using an ORM.
ProjectDao has a special case because the database schema doesn't
match the GCP API fields.
Args:
row (dict): The database row to map.
Returns:
Project: A Project, created from the row.
"""
return project.Project(
project_id=row['project_id'],
project_number=row['project_number'],
display_name=row['project_name'],
lifecycle_state=row['lifecycle_state'],
parent=resource_util.create_resource(
resource_id=row['parent_id'],
resource_type=row['parent_type']))
# pylint: enable=arguments-differ
def get_project_numbers(self, resource_name, timestamp):
"""Select the project numbers from a projects snapshot table.
Args:
resource_name (str): The resource name.
timestamp (str): The timestamp, formatted as YYYYMMDDTHHMMSSZ.
Returns:
list: A list of project numbers.
Raises:
MySQLError: An error with MySQL has occurred.
"""
project_numbers_sql = select_data.PROJECT_NUMBERS.format(timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_numbers_sql, ())
return [row['project_number'] for row in rows]
def get_project(self, project_id, timestamp):
"""Get a project from a particular snapshot.
Args:
project_id (str): The id of the project.
timestamp (str): The snapshot timestamp.
Returns:
Project: A Project, if found.
"""
project_query = select_data.PROJECT_BY_ID.format(timestamp)
rows = self.execute_sql_with_fetch(
resource.ResourceType.PROJECT, project_query, (project_id,))
if rows:
return self.map_row_to_object(rows[0])
return None
def get_project_by_number(self, project_number, timestamp):
"""Get a project from a particular snapshot.
Args:
project_number (int): The number of the project.
timestamp (str): The snapshot timestamp.
Returns:
Project: A Project, if found.
"""
project_query = select_data.PROJECT_BY_NUMBER.format(timestamp)
rows = self.execute_sql_with_fetch(
resource.ResourceType.PROJECT, project_query, (project_number,))
if rows:
return self.map_row_to_object(rows[0])
return None
def get_projects(self, timestamp):
"""Get projects from a particular snapshot.
Args:
timestamp (str): The snapshot timestamp.
Returns:
list: A list of Projects.
"""
projects_query = select_data.PROJECTS.format(timestamp)
rows = self.execute_sql_with_fetch(
resource.ResourceType.PROJECT, projects_query, ())
return [self.map_row_to_object(row) for row in rows]
def get_project_policies(self, resource_name, timestamp):
"""Get the project policies.
This does not raise any errors on database or json parse errors
because we want to return as many projects as possible.
Args:
resource_name (str): The resource type.
timestamp (str): The timestamp of the snapshot.
Returns:
dict: A dict containing the projects (gcp_type.project.Project)
and their iam policies (dict).
"""
project_policies = {}
query = select_data.PROJECT_IAM_POLICIES_RAW.format(
timestamp, timestamp)
rows = self.execute_sql_with_fetch(
resource_name, query, ())
for row in rows:
try:
proj = self.map_row_to_object(row)
project_policies[proj] = json.loads(row['iam_policy'])
except ValueError:
LOGGER.warn('Error parsing json:\n %s', row['iam_policy'])
return project_policies
def get_project_raw_data(self, resource_name, timestamp, **kwargs):
"""Select the project raw data from a projects snapshot table.
Args:
resource_name (str): The resource name.
timestamp (str): Snapshot timestamp, formatted as YYYYMMDDTHHMMSSZ.
**kwargs (dict): Additional args.
Returns:
list: List of project raw data.
"""
project_id = kwargs.get('project_id')
project_number = kwargs.get('project_number')
if project_id is not None:
project_raw_sql = select_data.PROJECT_RAW.format(timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_raw_sql, (project_id,))
elif project_number is not None:
project_raw_sql = select_data.PROJECT_RAW_BY_NUMBER.format(
timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_raw_sql, (project_number,))
else:
project_raw_sql = select_data.PROJECT_RAW_ALL.format(timestamp)
rows = self.execute_sql_with_fetch(
resource_name, project_raw_sql, ())
return [row['raw_project'] for row in rows]
| 2.109375 | 2 |
tests/test_H0.py | CNERG/RadClass | 1 | 12773106 | <reponame>CNERG/RadClass
import numpy as np
import pytest
import os
from datetime import datetime, timedelta
from RadClass.RadClass import RadClass
from RadClass.H0 import H0
import tests.test_data as test_data
# initialize sample data
start_date = datetime(2019, 2, 2)
delta = timedelta(seconds=1)
timestamps = np.arange(start_date,
start_date + (test_data.timesteps * delta),
delta).astype('datetime64[s]').astype('float64')
live = np.full((len(timestamps),), test_data.livetime)
sample_val = 1.0
spectra = np.full((len(timestamps), test_data.energy_bins),
np.full((1, test_data.energy_bins), sample_val))
# setting up for rejected null hypothesis
rejected_H0_time = test_data.timesteps//2
spectra[rejected_H0_time:] = 100.0
@pytest.fixture(scope="module", autouse=True)
def init_test_file():
# create sample test file with above simulated data
yield test_data.create_file(live, timestamps, spectra)
os.remove(test_data.filename)
def test_init():
significance = 0.1
gross = False
energy_bins = 10
analysis = H0(significance=significance,
gross=gross,
energy_bins=energy_bins)
np.testing.assert_equal(analysis.significance, significance)
np.testing.assert_equal(analysis.gross, gross)
np.testing.assert_equal(analysis.triggers.shape, (0, energy_bins+1))
def test_gross():
stride = 10
integration = 10
# run handler script with analysis parameter
analysis = H0()
classifier = RadClass(stride, integration, test_data.datapath,
test_data.filename, analysis=analysis)
classifier.run_all()
obs_timestamp = analysis.triggers[0][0]
exp_timestamp = timestamps[-(rejected_H0_time+integration)]
np.testing.assert_equal(obs_timestamp,
exp_timestamp)
# there should only be one rejected hypothesis
obs_rows = analysis.triggers.shape[0]
exp_rows = 1
np.testing.assert_equal(obs_rows, exp_rows)
def test_channel():
stride = 10
integration = 10
# run handler script with analysis parameter
analysis = H0(gross=False, energy_bins=test_data.energy_bins)
classifier = RadClass(stride, integration, test_data.datapath,
test_data.filename, analysis=analysis)
classifier.run_all()
obs_timestamp = analysis.triggers[0][0]
exp_timestamp = timestamps[-(rejected_H0_time+integration)]
np.testing.assert_equal(obs_timestamp,
exp_timestamp)
# there should only be one rejected hypothesis
obs_rows = analysis.triggers.shape[0]
exp_rows = 1
np.testing.assert_equal(obs_rows, exp_rows)
# columns = 1 for timestamp + energy_bins
obs_cols = analysis.triggers.shape[1]
exp_cols = test_data.energy_bins+1
np.testing.assert_equal(obs_cols, exp_cols)
def test_write_gross():
stride = 10
integration = 10
filename = 'h0test_gross.csv'
# run handler script with analysis parameter
analysis = H0()
classifier = RadClass(stride, integration, test_data.datapath,
test_data.filename, analysis=analysis)
classifier.run_all()
analysis.write(filename)
results = np.loadtxt(filename, delimiter=',')
# expected shape is only 1D because only 1 entry is expected
obs = results.shape
exp = (4,)
np.testing.assert_equal(obs, exp)
os.remove(filename)
def test_write_channel():
stride = 10
integration = 10
filename = 'h0test_channel.csv'
# run handler script with analysis parameter
analysis = H0(gross=False, energy_bins=test_data.energy_bins)
classifier = RadClass(stride, integration, test_data.datapath,
test_data.filename, analysis=analysis)
classifier.run_all()
analysis.write(filename)
results = np.loadtxt(filename, delimiter=',')
# 1 extra columns are required for timestamp
# expected shape is only 1D because only 1 entry is expected
obs = results.shape
exp = (test_data.energy_bins+1,)
np.testing.assert_equal(obs, exp)
os.remove(filename)
| 2.140625 | 2 |
main.py | grinya007/recomlive_rnn | 0 | 12773107 | <reponame>grinya007/recomlive_rnn<filename>main.py
#!/usr/bin/env python3
import sys, os
from src.server import Server
from src.recommender import Recommender
"""Creates recommender object, see src/recommender.py for details
"""
recommender = Recommender(
int(os.getenv('RECOMMENDER_DOCS_LIMIT', 2000)),
int(os.getenv('RECOMMENDER_PERSONS_LIMIT', 2000)),
int(os.getenv('RECOMMENDER_RECS_LIMIT', 5)),
os.getenv('RECOMMENDER_TORCH_DEVICE', 'cpu')
)
def dispatcher(server, data, response):
"""Parses request data, calls response() callback
when there's response expected
Args:
server (Server): UDP server object
data (bytes): Bytes received by the server
response (function): A callback function,
sends bytes back to the client
"""
try:
"""Simple text protocol includes:
method (str): one of RECR, RECM, RR or PH
did (str): arbitrary document ID
pid (str): arbitrary person ID
"""
method, did, pid = data.decode('ascii').split(',')
if method == 'RECR':
"""Records a visit: person pid visited document did
"""
recommender.record(did, pid)
elif method == 'RECM':
"""Makes recommendations:
a person pid is at document did
returns a list of dids (where to go next)
"""
recs = recommender.recommend(did, pid)
response(pack_response('OK', recs))
elif method == 'RR':
"""Does both of the above
"""
recommender.record(did, pid)
recs = recommender.recommend(did, pid)
response(pack_response('OK', recs))
elif method == 'PH':
"""Returns person pid's visits history
"""
dids = recommender.person_history(pid)
response(pack_response('OK', dids))
else:
"""Data is garbage
"""
raise Exception
except:
response(pack_response('BADMSG'))
def pack_response(status, data = []):
return bytes(','.join([status] + data), 'ascii')
if __name__ == '__main__':
"""Creates UDP server daemon, see src/server.py for details
"""
server = Server(dispatcher, port = int(os.getenv('RECOMMENDER_PORT', 25000)))
server.command(sys.argv[1])
| 2.828125 | 3 |
skactiveml/utils/_validation.py | LukasLuehrs/scikit-activeml | 0 | 12773108 | import copy
import warnings
from collections.abc import Iterable
from inspect import Parameter, signature
import numpy as np
from sklearn.utils.validation import (
check_array,
column_or_1d,
assert_all_finite,
check_consistent_length,
check_random_state as check_random_state_sklearn,
)
from ._label import MISSING_LABEL, check_missing_label, is_unlabeled
def check_scalar(
x,
name,
target_type,
min_inclusive=True,
max_inclusive=True,
min_val=None,
max_val=None,
):
"""Validate scalar parameters type and value.
Parameters
----------
x : object
The scalar parameter to validate.
name : str
The name of the parameter to be printed in error messages.
target_type : type or tuple
Acceptable data types for the parameter.
min_val : float or int, optional (default=None)
The minimum valid value the parameter can take. If None (default) it
is implied that the parameter does not have a lower bound.
min_inclusive : bool, optional (default=True)
If true, the minimum valid value is inclusive, otherwise exclusive.
max_val : float or int, optional (default=None)
The maximum valid value the parameter can take. If None (default) it
is implied that the parameter does not have an upper bound.
max_inclusive : bool, optional (default=True)
If true, the maximum valid value is inclusive, otherwise exclusive.
Raises
-------
TypeError
If the parameter's type does not match the desired type.
ValueError
If the parameter's value violates the given bounds.
"""
if not isinstance(x, target_type):
raise TypeError(
"`{}` must be an instance of {}, not {}.".format(
name, target_type, type(x)
)
)
if min_inclusive:
if min_val is not None and x < min_val:
raise ValueError(
"`{}`= {}, must be >= " "{}.".format(name, x, min_val)
)
else:
if min_val is not None and x <= min_val:
raise ValueError(
"`{}`= {}, must be > " "{}.".format(name, x, min_val)
)
if max_inclusive:
if max_val is not None and x > max_val:
raise ValueError(
"`{}`= {}, must be <= " "{}.".format(name, x, max_val)
)
else:
if max_val is not None and x >= max_val:
raise ValueError(
"`{}`= {}, must be < " "{}.".format(name, x, max_val)
)
def check_classifier_params(classes, missing_label, cost_matrix=None):
"""Check whether the parameters are compatible to each other (only if
`classes` is not None).
Parameters
----------
classes : array-like, shape (n_classes)
Array of class labels.
missing_label : {number, str, None, np.nan}
Symbol to represent a missing label.
cost_matrix : array-like, shape (n_classes, n_classes), default=None
Cost matrix. If None, cost matrix will be not checked.
"""
check_missing_label(missing_label)
if classes is not None:
check_classes(classes)
dtype = np.array(classes).dtype
check_missing_label(missing_label, target_type=dtype, name="classes")
n_labeled = is_unlabeled(y=classes, missing_label=missing_label).sum()
if n_labeled > 0:
raise ValueError(
f"`classes={classes}` contains "
f"`missing_label={missing_label}.`"
)
if cost_matrix is not None:
check_cost_matrix(cost_matrix=cost_matrix, n_classes=len(classes))
else:
if cost_matrix is not None:
raise ValueError(
"You cannot specify 'cost_matrix' without "
"specifying 'classes'."
)
def check_classes(classes):
"""Check whether class labels are uniformly strings or numbers.
Parameters
----------
classes : array-like, shape (n_classes)
Array of class labels.
"""
if not isinstance(classes, Iterable):
raise TypeError(
"'classes' is not iterable. Got {}".format(type(classes))
)
try:
classes_sorted = np.array(sorted(set(classes)))
if len(classes) != len(classes_sorted):
raise ValueError("Duplicate entries in 'classes'.")
except TypeError:
types = sorted(t.__qualname__ for t in set(type(v) for v in classes))
raise TypeError(
"'classes' must be uniformly strings or numbers. Got {}".format(
types
)
)
def check_class_prior(class_prior, n_classes):
"""Check if the class_prior is a valid prior.
Parameters
----------
class_prior : numeric | array_like, shape (n_classes)
A class prior.
n_classes : int
The number of classes.
Returns
-------
class_prior : np.ndarray, shape (n_classes)
Numpy array as prior.
"""
if class_prior is None:
raise TypeError("'class_prior' must not be None.")
check_scalar(n_classes, name="n_classes", target_type=int, min_val=1)
if np.isscalar(class_prior):
check_scalar(
class_prior,
name="class_prior",
target_type=(int, float),
min_val=0,
)
class_prior = np.array([class_prior] * n_classes)
else:
class_prior = check_array(class_prior, ensure_2d=False)
is_negative = np.sum(class_prior < 0)
if class_prior.shape != (n_classes,) or is_negative:
raise ValueError(
"`class_prior` must be either a non-negative"
"float or a list of `n_classes` non-negative "
"floats."
)
return class_prior.reshape(-1)
def check_cost_matrix(
cost_matrix,
n_classes,
only_non_negative=False,
contains_non_zero=False,
diagonal_is_zero=False,
):
"""Check whether cost matrix has shape `(n_classes, n_classes)`.
Parameters
----------
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix.
n_classes : int
Number of classes.
only_non_negative : bool, optional (default=True)
This parameter determines whether the matrix must contain only non
negative cost entries.
contains_non_zero : bool, optional (default=True)
This parameter determines whether the matrix must contain at least on
non-zero cost entry.
diagonal_is_zero : bool, optional (default=True)
This parameter determines whether the diagonal cost entries must be
zero.
Returns
-------
cost_matrix_new : np.ndarray, shape (n_classes, n_classes)
Numpy array as cost matrix.
"""
check_scalar(n_classes, target_type=int, name="n_classes", min_val=1)
cost_matrix_new = check_array(
np.array(cost_matrix, dtype=float), ensure_2d=True
)
if cost_matrix_new.shape != (n_classes, n_classes):
raise ValueError(
"'cost_matrix' must have shape ({}, {}). "
"Got {}.".format(n_classes, n_classes, cost_matrix_new.shape)
)
if np.sum(cost_matrix_new < 0) > 0:
if only_non_negative:
raise ValueError(
"'cost_matrix' must contain only non-negative cost entries."
)
else:
warnings.warn("'cost_matrix' contains negative cost entries.")
if n_classes != 1 and np.sum(cost_matrix_new != 0) == 0:
if contains_non_zero:
raise ValueError(
"'cost_matrix' must contain at least one non-zero cost "
"entry."
)
else:
warnings.warn(
"'cost_matrix' contains contains no non-zero cost entry."
)
if np.sum(np.diag(cost_matrix_new) != 0) > 0:
if diagonal_is_zero:
raise ValueError(
"'cost_matrix' must contain only cost entries being zero on "
"its diagonal."
)
else:
warnings.warn(
"'cost_matrix' contains non-zero cost entries on its diagonal."
)
return cost_matrix_new
def check_X_y(
X=None,
y=None,
X_cand=None,
sample_weight=None,
sample_weight_cand=None,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
multi_output=False,
allow_nan=None,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
missing_label=MISSING_LABEL,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Labeled input data.
y : nd-array, list or sparse matrix
Labels for X.
X_cand : nd-array, list or sparse matrix (default=None)
Unlabeled input data
sample_weight : array-like of shape (n_samples,) (default=None)
Sample weights.
sample_weight_cand : array-like of shape (n_candidates,) (default=None)
Sample weights of the candidates.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool (default=True)
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean or 'allow-nan', (default=True)
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2D.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
allow_nan : boolean (default=None)
Whether to allow np.nan in y.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
missing_label : {scalar, string, np.nan, None}, (default=np.nan)
Value to represent a missing label.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
candidates : object
The converted and validated candidates
Only returned if candidates is not None.
sample_weight : np.ndarray
The converted and validated sample_weight.
sample_weight_cand : np.ndarray
The converted and validated sample_weight_cand.
Only returned if candidates is not None.
"""
if allow_nan is None:
allow_nan = True if missing_label is np.nan else False
if X is not None:
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
if y is not None:
if multi_output:
y = check_array(
y,
accept_sparse="csr",
force_all_finite=True,
ensure_2d=False,
dtype=None,
)
else:
y = column_or_1d(y, warn=True)
assert_all_finite(y, allow_nan=allow_nan)
if y_numeric and y.dtype.kind == "O":
y = y.astype(np.float64)
if X is not None and y is not None:
check_consistent_length(X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape)
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
if (
y.ndim > 1
and y.shape[1] > 1
or sample_weight.ndim > 1
and sample_weight.shape[1] > 1
):
check_consistent_length(y.T, sample_weight.T)
if X_cand is not None:
X_cand = check_array(
X_cand,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
if X is not None and X_cand.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of candidates does not match"
"the number of features of X"
)
if sample_weight_cand is None:
sample_weight_cand = np.ones(len(X_cand))
sample_weight_cand = check_array(sample_weight_cand, ensure_2d=False)
check_consistent_length(X_cand, sample_weight_cand)
if X_cand is None:
return X, y, sample_weight
else:
return X, y, X_cand, sample_weight, sample_weight_cand
def check_random_state(random_state, seed_multiplier=None):
"""Check validity of the given random state.
Parameters
----------
random_state : None | int | instance of RandomState
If random_state is None, return the RandomState singleton used by
np.random.
If random_state is an int, return a new RandomState.
If random_state is already a RandomState instance, return it.
Otherwise raise ValueError.
seed_multiplier : None | int, optional (default=None)
If the random_state and seed_multiplier are not None, draw a new int
from the random state, multiply it with the multiplier, and use the
product as the seed of a new random state.
Returns
-------
random_state: instance of RandomState
The validated random state.
"""
if random_state is None or seed_multiplier is None:
return check_random_state_sklearn(random_state)
check_scalar(
seed_multiplier, name="seed_multiplier", target_type=int, min_val=1
)
random_state = copy.deepcopy(random_state)
random_state = check_random_state_sklearn(random_state)
seed = (random_state.randint(1, 2**31) * seed_multiplier) % (2**31)
return np.random.RandomState(seed)
def check_indices(indices, A, dim="adaptive", unique=True):
"""Check if indices fit to array.
Parameters
----------
indices : array-like of shape (n_indices, n_dim) or (n_indices,)
The considered indices, where for every `i = 0, ..., n_indices - 1`
`indices[i]` is interpreted as an index to the array `A`.
A : array-like
The array that is indexed.
dim : int or tuple of ints
The dimensions of the array that are indexed.
If `dim` equals `'adaptive'`, `dim` is set to first indices
corresponding to the shape of `indices`. E.g., if `indices` is of
shape (n_indices,), `dim` is set `0`.
unique: bool or `check_unique`
If `unique` is `True` unique indices are returned. If `unique` is
`'check_unique'` an exception is raised if the indices are not unique.
Returns
-------
indices: tuple of np.ndarrays or np.ndarray
The validated indices.
"""
indices = check_array(indices, dtype=int, ensure_2d=False)
A = check_array(A, allow_nd=True, force_all_finite=False, ensure_2d=False)
if unique == "check_unique":
if indices.ndim == 1:
n_unique_indices = len(np.unique(indices))
else:
n_unique_indices = len(np.unique(indices, axis=0))
if n_unique_indices < len(indices):
raise ValueError(
f"`indices` contains two different indices of the "
f"same value."
)
elif unique:
if indices.ndim == 1:
indices = np.unique(indices)
else:
indices = np.unique(indices, axis=0)
check_type(dim, "dim", int, tuple, target_vals=["adaptive"])
if dim == "adaptive":
if indices.ndim == 1:
dim = 0
else:
dim = tuple(range(indices.shape[1]))
if isinstance(dim, tuple):
for n in dim:
check_type(n, "entry of `dim`", int)
if A.ndim <= max(dim):
raise ValueError(
f"`dim` contains entry of value {max(dim)}, but all"
f"entries of dim must be smaller than {A.ndim}."
)
if len(dim) != indices.shape[1]:
raise ValueError(
f"shape of `indices` along dimension 1 is "
f"{indices.shape[0]}, but must be {len(dim)}"
)
indices = tuple(indices.T)
for (i, n) in enumerate(indices):
if np.any(indices[i] >= A.shape[dim[i]]):
raise ValueError(
f"`indices[{i}]` contains index of value "
f"{np.max(indices[i])} but all indices must be"
f" less than {A.shape[dim[i]]}."
)
return indices
else:
if A.ndim <= dim:
raise ValueError(
f"`dim` has value {dim}, but must be smaller than "
f"{A.ndim}."
)
if np.any(indices >= A.shape[dim]):
raise ValueError(
f"`indices` contains index of value "
f"{np.max(indices)} but all indices must be"
f" less than {A.shape[dim]}."
)
return indices
def check_type(
obj, name, *target_types, target_vals=None, indicator_funcs=None
):
"""Check if obj is one of the given types. It is also possible to allow
specific values. Further it is possible to pass indicator functions
that can also accept obj. Thereby obj must either have a correct type
a correct value or be accepted by an indicator function.
Parameters
----------
obj: object
The object to be checked.
name: str
The variable name of the object.
target_types : iterable
The possible types.
target_vals : iterable, optional (default=None)
Possible further values that the object is allowed to equal.
indicator_funcs : iterable, optional (default=None)
Possible further custom indicator (boolean) functions that accept
the object by returning `True` if the object is passed as a parameter.
"""
target_vals = target_vals if target_vals is not None else []
indicator_funcs = indicator_funcs if indicator_funcs is not None else []
wrong_type = not isinstance(obj, target_types)
wrong_value = obj not in target_vals
wrong_index = all(not i_func(obj) for i_func in indicator_funcs)
if wrong_type and wrong_value and wrong_index:
error_str = f"`{name}` "
if len(target_types) == 0 and len(target_vals) == 0:
error_str += f" must"
if len(target_vals) == 0 and len(target_types) > 0:
error_str += f" has type `{type(obj)}`, but must"
elif len(target_vals) > 0 and len(target_types) == 0:
error_str += f" has value `{obj}`, but must"
else:
error_str += f" has type `{type(obj)}` and value `{obj}`, but must"
if len(target_types) == 1:
error_str += f" have type `{target_types[0]}`"
elif 1 <= len(target_types) <= 3:
error_str += " have type"
for i in range(len(target_types) - 1):
error_str += f" `{target_types[i]}`,"
error_str += f" or `{target_types[len(target_types) - 1]}`"
elif len(target_types) > 3:
error_str += (
f" have one of the following types: {set(target_types)}"
)
if len(target_vals) > 0:
if len(target_types) > 0 and len(indicator_funcs) == 0:
error_str += " or"
elif len(target_types) > 0 and len(indicator_funcs) > 0:
error_str += ","
error_str += (
f" equal one of the following values: {set(target_vals)}"
)
if len(indicator_funcs) > 0:
if len(target_types) > 0 or len(target_vals) > 0:
error_str += " or"
error_str += (
f" be accepted by one of the following custom boolean "
f"functions: {set(i_f.__name__ for i_f in indicator_funcs)}"
)
raise TypeError(error_str + ".")
def check_callable(func, name, n_free_parameters=None):
"""Checks if function is a callable and if the number of free parameters is
correct.
Parameters
----------
func: callable
The functions to be validated.
name: str
The name of the function
n_free_parameters: int, optional (default=None)
The number of free parameters. If `n_free_parameters` is `None`,
`n_free_parameters` is set to `1`.
"""
if n_free_parameters is None:
n_free_parameters = 1
if not callable(func):
raise TypeError(
f"`{name}` must be callable. " f"`{name}` is of type {type(func)}"
)
# count the number of arguments that have no default value
n_free_params = len(
list(
filter(
lambda x: x.default == Parameter.empty,
signature(func).parameters.values(),
)
)
)
if n_free_params != n_free_parameters:
raise ValueError(
f"The number of free parameters of the callable has to "
f"equal {n_free_parameters}. "
f"The number of free parameters is {n_free_params}."
)
def check_bound(
bound=None, X=None, ndim=2, epsilon=0, bound_must_be_given=False
):
"""Validates bound and returns the bound of X if bound is None.
`bound` or `X` must not be None.
Parameters
----------
bound: array-like, shape (2, ndim), optional (default=None)
The given bound of shape
[[x1_min, x2_min, ..., xndim_min], [x1_max, x2_max, ..., xndim_max]]
X: matrix-like, shape (n_samples, ndim), optional (default=None)
The sample matrix X is the feature matrix representing samples.
ndim: int, optional (default=2)
The number of dimensions.
epsilon: float, optional (default=0)
The minimal distance between the returned bound and the values of `X`,
if `bound` is not specified.
bound_must_be_given: bool, optional (default=False)
Whether it is allowed for the bound to be `None` and to be inferred by
`X`.
Returns
-------
bound: array-like, shape (2, ndim), optional (default=None)
The given bound or bound of X.
"""
if X is not None:
X = check_array(X)
if X.shape[1] != ndim:
raise ValueError(
f"`X` along axis 1 must be of length {ndim}. "
f"`X` along axis 1 is of length {X.shape[1]}."
)
if bound is not None:
bound = check_array(bound)
if bound.shape != (2, ndim):
raise ValueError(
f"Shape of `bound` must be (2, {ndim}). "
f"Shape of `bound` is {bound.shape}."
)
elif bound_must_be_given:
raise ValueError("`bound` must not be `None`.")
if bound is None and X is not None:
minima = np.nanmin(X, axis=0) - epsilon
maxima = np.nanmax(X, axis=0) + epsilon
bound = np.append(minima.reshape(1, -1), maxima.reshape(1, -1), axis=0)
return bound
elif bound is not None and X is not None:
if np.any(np.logical_or(bound[0] > X, X > bound[1])):
warnings.warn("`X` contains values not within range of `bound`.")
return bound
elif bound is not None:
return bound
else:
raise ValueError("`X` or `bound` must not be None.")
def check_budget_manager(
budget,
budget_manager,
default_budget_manager_class,
default_budget_manager_dict=None,
):
"""Validate if budget manager is a budgetmanager class and create a
copy 'budget_manager_'.
"""
if default_budget_manager_dict is None:
default_budget_manager_dict = {}
if budget_manager is None:
budget_manager_ = default_budget_manager_class(
budget=budget, **default_budget_manager_dict
)
else:
if budget is not None and budget != budget_manager.budget:
warnings.warn(
"budgetmanager is already given such that the budget "
"is not used. The given budget differs from the "
"budget_managers budget."
)
budget_manager_ = copy.deepcopy(budget_manager)
return budget_manager_
| 2.734375 | 3 |
state_machine/drivers/pycubedmini/lib/bq25883.py | PyCubed-Mini/software_example_beepsat | 30 | 12773109 | <reponame>PyCubed-Mini/software_example_beepsat<gh_stars>10-100
"""
`bq25883`
====================================================
CircuitPython driver for the BQ25883 2-cell USB boost-mode charger.
* Author(s): <NAME>
Implementation Notes
--------------------
"""
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_bits import ROBits, RWBits
from adafruit_register.i2c_bit import ROBit, RWBit
# Registers
# _BATV_LIM = const(0x00)#RW
_CHRGI_LIM = const(0x01)#RW
# _VIN_LIM = const(0x02)#RW
_IIN_LIM = const(0x03)#RW
# _TERM_CTRL = const(0x04)#RW
_CHRGR_CRTL1 = const(0x05)#RW
_CHRGR_CRTL2 = const(0x06)#RW
# _CHRGR_CRTL3 = const(0x07)#RW
# _CHRGR_CRTL4 = const(0x08)#RW
# _OTG_CTRL = const(0x09)#RW
# _ICO_LIM = const(0x0A)
# _CHRG_STAT1 = const(0x0B)
# _CHRG_STAT2 = const(0x0C)
# _NTC_STAT = const(0x0D)
# _FAULT_STAT = const(0x0E)
# _CHRGR_FLAG1 = const(0x0F)
# _CHRGR_FLAG2 = const(0x10)
# _FAULT_FLAG = const(0x11)
# _CHRGR_MSK1 = const(0x12)#partial RW
# _CHRGR_MSK2 = const(0x13)#partial RW
# _FAULT_MSK = const(0x14)#partial RW
# _ADC_CTRL = const(0x15)#partial RW
# _ADC_FN_CTRL = const(0x16)#partial RW
# _IBUS_ADC1 = const(0x17)
# _IBUS_ADC0 = const(0x18)
# _ICHG_ADC1 = const(0x19)
# _ICHG_ADC0 = const(0x1A)
# _VBUS_ADC1 = const(0x1B)
# _VBUS_ADC0 = const(0x1C)
# _VBAT_ADC1 = const(0x1D)
# _VBAT_ADC0 = const(0x1E)
# _VSYS_ADC1 = const(0x1F)
# _VSYS_ADC0 = const(0x20)
# _TS_ADC1 = const(0x21)
# _TS_ADC0 = const(0x22)
# _TDIE_ADC1 = const(0x23)
# _TDIE_ADC0 = const(0x24)
_PART_INFO = const(0x25)#partial RW
class BQ25883:
_pn = ROBits(4,_PART_INFO,3,1,False)
# _fault_status = ROBits(8,_FAULT_STAT,0,1,False)
# _chrgr_status1 = ROBits(8,_CHRG_STAT1,0,1,False)
# _chrgr_status2 = ROBits(8,_CHRG_STAT2,0,1,False)
# _chrg_status = ROBits(3,_CHRG_STAT1,0,1,False)
# _otg_ctrl = ROBits(8,_OTG_CTRL,0,1,False)
_chrg_ctrl2 = ROBits(8,_CHRGR_CRTL2,0,1,False)
# _ntc_stat = ROBits(3,_NTC_STAT,0,1,False)
# _ichrg_adc0 = ROBits(7,_ICHG_ADC0,0,1,False)
# _ichrg_adc1 = ROBits(8,_ICHG_ADC1,0,1,False)
# _ichrg_adc = ROBits(16,_ICHG_ADC1,0,2,False)
# _vbatt_adc = ROBits(16,_VBAT_ADC1,0,2,False)
# _vbatt_adc0 = ROBits(8,_VBAT_ADC0,0,1,False)
# _vbatt_adc1 = ROBits(8,_VBAT_ADC1,0,1,False)
# _vbatt_limit = RWBits(8,_BATV_LIM,0,1,False)
_wdt = RWBits(2,_CHRGR_CRTL1,4,1,False)
_chrg_timer = RWBits(2,_CHRGR_CRTL1,1,1,False)
_ichrg = RWBits(6,_CHRGI_LIM,0,1,False)
_iinlim = RWBits(5,_IIN_LIM,0,1,False)
# _adc_res = RWBits(2,_ADC_CTRL,4,1,False)
# _pfm_dis = RWBit(_CHRGR_CRTL3,7,1,False)
_en_chrg = RWBit(_CHRGR_CRTL2, 3, 1, False)
# _reg_rst = RWBit(_PART_INFO, 7, 1, False)
_stat_dis = RWBit(_CHRGR_CRTL1, 6, 1, False)
# _en_ichrg_adc = RWBit(_ADC_FN_CTRL, 6, 1, False)
# _en_ibus_adc = RWBit(_ADC_FN_CTRL, 7, 1, False)
# _en_vbat_adc = RWBit(_ADC_FN_CTRL, 4, 1, False)
# _en_adc = RWBit(_ADC_CTRL, 7, 1, False)
# _adc_rate = RWBit(_ADC_CTRL, 6, 1, False)
def __init__(self, i2c_bus, addr=0x6B):
self.i2c_device = I2CDevice(i2c_bus, addr,probe=False)
self.i2c_addr = addr
if not self._pn == 3: print("Unable to find BQ25883")
self._iinlim=0 # set input current limit to 500mA
self._chrg_timer=0b00 # 5 hours
# self.adc=False
# @property
# def status(self):
# print('Fault:',bin(self._fault_status))
# print('Charger Status 1:',bin(self._chrgr_status1))
# print('Charger Status 2:',bin(self._chrgr_status2))
# print('Charge Status:',bin(self._chrg_status))
# print('Charge Control2:',bin(self._chrg_ctrl2))
# print('NTC Status:',bin(self._ntc_stat))
# print('OTG:',hex(self._otg_ctrl))
@property
def charging(self):
print('Charge Control2:',bin(self._chrg_ctrl2))
@charging.setter
def charging(self,value):
assert type(value) == bool
self._en_chrg=value
@property
def charging_current(self):
print('Charger Current Limit (ICHRG):',hex(self._ichrg))
@charging_current.setter
def charging_current(self,value):
# default:0x1e=1500mA, 0x8=400mA
self._ichrg=value
@property
def wdt(self):
print('Watchdog Timer:',bin(self._wdt))
@wdt.setter
def wdt(self,value):
if not value:
self._wdt=0
else:
self._wdt=value
@property
def led(self):
print('Status LED:',bin(self._stat_dis))
@led.setter
def led(self,value):
assert type(value) == bool
self._stat_dis=not value
# def measure_current(self):
# return (self._ichrg_adc0,self._ichrg_adc1)
# @property
# def enable_adc(self):
# return self.adc
# @enable_adc.setter
# def enable_adc(self,value):
# self._en_adc=False
# if value:
# self._adc_rate=0b00
# self._en_ichrg_adc=True
# self._en_ibus_adc=True
# self._en_vbat_adc=True
# self._en_adc=True
# if not self._en_adc:
# print('error starting adc. ')
# self.adc=True
# else:
# self.adc=False
| 2.28125 | 2 |
rl.py | llSourcell/Sensor_Networks | 40 | 12773110 | <reponame>llSourcell/Sensor_Networks
from gridworld import GridWorldMDP
from qlearn import QLearner
import numpy as np
import matplotlib.pyplot as plt
def plot_convergence(utility_grids, policy_grids):
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
utility_ssd = np.sum(np.square(np.diff(utility_grids)), axis=(0, 1))
ax1.plot(utility_ssd, 'b.-')
ax1.set_ylabel('Change in Utility', color='b')
policy_changes = np.count_nonzero(np.diff(policy_grids), axis=(0, 1))
ax2.plot(policy_changes, 'r.-')
ax2.set_ylabel('Change in Best Policy', color='r')
if __name__ == '__main__':
shape = (3, 4)
goal = (0, -1)
trap = (1, -1)
obstacle = (1, 1)
start = (2, 0)
default_reward = -0.1
goal_reward = 1
trap_reward = -1
reward_grid = np.zeros(shape) + default_reward
reward_grid[goal] = goal_reward
reward_grid[trap] = trap_reward
reward_grid[obstacle] = 0
terminal_mask = np.zeros_like(reward_grid, dtype=np.bool)
terminal_mask[goal] = True
terminal_mask[trap] = True
obstacle_mask = np.zeros_like(reward_grid, dtype=np.bool)
obstacle_mask[1, 1] = True
gw = GridWorldMDP(reward_grid=reward_grid,
obstacle_mask=obstacle_mask,
terminal_mask=terminal_mask,
action_probabilities=[
(-1, 0.1),
(0, 0.8),
(1, 0.1),
],
no_action_probability=0.0)
mdp_solvers = {'Value Iteration': gw.run_value_iterations,
'Policy Iteration': gw.run_policy_iterations}
for solver_name, solver_fn in mdp_solvers.items():
print('Final result of {}:'.format(solver_name))
policy_grids, utility_grids = solver_fn(iterations=25, discount=0.5)
print(policy_grids[:, :, -1])
print(utility_grids[:, :, -1])
plt.figure()
gw.plot_policy(utility_grids[:, :, -1])
plot_convergence(utility_grids, policy_grids)
plt.show()
ql = QLearner(num_states=(shape[0] * shape[1]),
num_actions=4,
learning_rate=0.8,
discount_rate=0.9,
random_action_prob=0.5,
random_action_decay_rate=0.99,
dyna_iterations=0)
start_state = gw.grid_coordinates_to_indices(start)
iterations = 1000
flat_policies, flat_utilities = ql.learn(start_state,
gw.generate_experience,
iterations=iterations)
new_shape = (gw.shape[0], gw.shape[1], iterations)
ql_utility_grids = flat_utilities.reshape(new_shape)
ql_policy_grids = flat_policies.reshape(new_shape)
print('Final result of QLearning:')
print(ql_policy_grids[:, :, -1])
print(ql_utility_grids[:, :, -1])
plt.figure()
gw.plot_policy(ql_utility_grids[:, :, -1], ql_policy_grids[:, :, -1])
plot_convergence(ql_utility_grids, ql_policy_grids)
plt.show()
| 2.4375 | 2 |
test.py | erikhoward/py3-disco | 0 | 12773111 | # Python application to test miniconda data science installation
import math
import os
import sys
libs = ["numpy", "pandas", "matplotlib", "sklearn", "skimage", "cv2",
"sqlalchemy", "bokeh", "nltk", "missingno", "geopandas", "wordcloud",
"lightgbm", "scipy", "xgboost", "catboost", "keras"]
def main():
print("Please wait, testing Python environment....")
test_is_python_35()
test_libs()
test_tensorflow()
test_keras()
def test_libs():
for x in libs:
try:
__import__(x)
except ImportError:
print("Testing {:s} -> FAIL".format(x))
continue
print("Testing {:s} -> OK".format(x))
def test_keras():
try:
import keras
except ImportError:
print("Testing keras -> FAIL")
return
print("Testing keras -> OK")
def test_tensorflow():
try:
import tensorflow
except ImportError:
print("Testing tensorflow -> FAIL")
return
print("Testing tensorflow -> OK")
def test_is_python_35():
major = sys.version_info.major
minor = sys.version_info.minor
if major == 3:
pass
else:
print("You are running Python {}, but we need Python {}.".format(major, 3))
print("Stopping here.")
#Stop here
sys.exit(1)
return None
# assert major == 3, "Stopping here - we need Python 3."
if minor >= 5:
print("Testing Python version-> py{}.{} OK".format(major, minor))
else:
print("Warning: You should be running Python 3.5 or newer, " +
"you have Python {}.{}.".format(major, minor))
main()
| 2.90625 | 3 |
tests/test_vector.py | cs207FinalProjectGroup/cs207-FinalProject | 0 | 12773112 | <filename>tests/test_vector.py
import sys
import os
import numpy as np
import pytest
sys.path.append('..')
import autodiff as ad
def test_create_vector():
v = ad.create_vector('v', [1, 2])
assert(v[0].getValue() == 1)
assert(v[1].getValue() == 2)
derivs = ad.get_deriv(v)
assert(np.array_equal(np.array([deriv.get('v1', 0) for deriv in derivs]), np.array([1, 0])))
assert(np.array_equal(np.array([deriv.get('v2', 0) for deriv in derivs]), np.array([0, 1])))
jacobian = ad.get_jacobian(v, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
jacobian = ad.get_jacobian(v, ['v1', 'v2', 'hello'])
assert(np.array_equal(jacobian, np.array([[1, 0, 0], [0, 1, 0]])))
v = ad.create_vector('v', [1, 2], [3, 4])
assert(v[0].getValue() == 1)
assert(v[1].getValue() == 2)
derivs = ad.get_deriv(v)
assert(np.array_equal(np.array([deriv.get('v1', 0) for deriv in derivs]), np.array([3, 0])))
assert(np.array_equal(np.array([deriv.get('v2', 0) for deriv in derivs]), np.array([0, 4])))
jacobian = ad.get_jacobian(v, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[3, 0], [0, 4]])))
jacobian = ad.get_jacobian(v, ['v1', 'v2', 'hello'])
assert(np.array_equal(jacobian, np.array([[3, 0, 0], [0, 4, 0]])))
with pytest.raises(Exception):
v = ad.create_vector('v', [1, 2], [3, 4, 5])
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = np.array([x, y])
assert(np.array_equal(ad.get_value(v), np.array([1, 2])))
jacobian = ad.get_jacobian(v, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[0, 0], [0, 0]])))
jacobian = ad.get_jacobian(v, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = np.array([x, 2 * y])
assert(np.array_equal(ad.get_value(v), np.array([1, 4])))
jacobian = ad.get_jacobian(v, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 2]])))
jacobian = ad.get_jacobian(v, ['y', 'x'])
assert(np.array_equal(jacobian, np.array([[0, 1], [2, 0]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = np.array([x + y, 2 * y])
assert(np.array_equal(ad.get_value(v), np.array([3, 4])))
jacobian = ad.get_jacobian(v, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 1], [0, 2]])))
jacobian = ad.get_jacobian(v, ['y', 'x'])
assert(np.array_equal(jacobian, np.array([[1, 1], [2, 0]])))
def test_add():
v1 = ad.create_vector('v', [1, 2])
v2 = ad.create_vector('v', [1, 5])
v3 = v1 + v2
assert(v3[0].getValue() == 2)
assert(v3[1].getValue() == 7)
jacobian = ad.get_jacobian(v3, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[2, 0], [0, 2]])))
v1 = ad.create_vector('v', [1, 2])
v2 = v1 + 10
assert(v2[0].getValue() == 11)
assert(v2[1].getValue() == 12)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
v1 = ad.create_vector('v', [1, 2])
v2 = ad.Scalar('v2', 4)
v3 = ad.Scalar('v1', 7)
v4 = v1 + np.array([v2, v3])
assert(v4[0].getValue() == 5)
assert(v4[1].getValue() == 9)
jacobian = ad.get_jacobian(v4, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 1], [1, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = ad.create_vector('v', [1, 5])
v3 = v1 + v2
assert(v3[0].getValue() == 2)
assert(v3[1].getValue() == 7)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([x + y, x])
v3 = v1 + v2
assert(v3[0].getValue() == 4)
assert(v3[1].getValue() == 3)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[2, 1], [1, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, 10])
v3 = v1 + v2
assert(v3[0].getValue() == 3)
assert(v3[1].getValue() == 12)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, 1], [0, 1]])))
def test_mul():
v1 = ad.create_vector('v', [1, 2])
v2 = ad.create_vector('w', [3, 5])
v3 = v1 * v2
assert(v3[0].getValue() == 3)
assert(v3[1].getValue() == 10)
jacobian = ad.get_jacobian(v3, ['v1', 'v2', 'w1', 'w2'])
assert(np.array_equal(jacobian, np.array([[3, 0, 1, 0], [0, 5, 0, 2]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v = ad.Scalar('v', 3)
v1 = np.array([x, y])
v2 = np.array([v, 3 * v])
v3 = v1 * v2
assert(v3[0].getValue() == 3)
assert(v3[1].getValue() == 18)
jacobian = ad.get_jacobian(v3, ['x', 'y', 'v'])
assert(np.array_equal(jacobian, np.array([[3, 0, 1], [0, 9, 6]])))
v1 = ad.create_vector('v', [2, 3])
v3 = v1 * v1
assert(v3[0].getValue() == 4)
assert(v3[1].getValue() == 9)
jacobian = ad.get_jacobian(v3, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[4, 0], [0, 6]])))
v1 = ad.create_vector('v', [1, 2])
v2 = v1 * 10
assert(v2[0].getValue() == 10)
assert(v2[1].getValue() == 20)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[10, 0], [0, 10]])))
x = ad.Scalar('x', 5)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([x * y, (x + y)])
v3 = v1 * v2
assert(v3[0].getValue() == 50)
assert(v3[1].getValue() == 14)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[20, 25], [2, 9]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, 10])
v3 = v1 * v2
assert(v3[0].getValue() == 2)
assert(v3[1].getValue() == 20)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[2, 1], [0, 10]])))
def test_neg():
v1 = ad.create_vector('v', [1, 2])
v2 = -v1
assert(v2[0].getValue() == -1)
assert(v2[1].getValue() == -2)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[-1, 0], [0, -1]])))
v3 = -v2
assert(v3[0].getValue() == 1)
assert(v3[1].getValue() == 2)
jacobian = ad.get_jacobian(v3, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
v1 = ad.create_vector('v', [1, 2])
v2 = -1 * -v1
assert(v2[0].getValue() == 1)
assert(v2[1].getValue() == 2)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
def test_sub():
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, x])
v3 = v1 - v2
assert(v3[0].getValue() == -1)
assert(v3[1].getValue() == 1)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, -1], [-1, 1]])))
v1 = ad.create_vector('v', [1, 2])
v2 = v1 - 10
assert(v2[0].getValue() == -9)
assert(v2[1].getValue() == -8)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, 1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = ad.create_vector('v', [1, 5])
v3 = v1 - v2
assert(v3[0].getValue() == 0)
assert(v3[1].getValue() == -3)
jacobian = ad.get_jacobian(v3, ['x', 'y', 'v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0, -1, 0], [0, 1, 0, -1]])))
x = ad.Scalar('x', 1)
y = ad.Scalar('y', 2)
v1 = np.array([x, y])
v2 = np.array([y, 10])
v3 = v1 - v2
assert(v3[0].getValue() == -1)
assert(v3[1].getValue() == -8)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[1, -1], [0, 1]])))
def test_pow():
v1 = ad.create_vector('v', [2, 5])
v2 = v1 ** 2
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 25)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[4, 0], [0, 10]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 5)
v1 = np.array([x, y])
v2 = v1 ** 2
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 25)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[4, 0], [0, 10]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = (v1 ** 2) ** 3
assert(v2[0].getValue() == 64)
assert(v2[1].getValue() == 729)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[6 * (2 ** 5), 0], [0, 6 * (3 ** 5)]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = np.array([y, 2])
v3 = v1 ** v2
assert(v3[0].getValue() == 8)
assert(v3[1].getValue() == 9)
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[12, np.log(2) * 8], [0, 6]])))
def test_rpow():
v1 = ad.create_vector('v', [2, 5])
v2 = 2 ** v1
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 32)
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * 4, 0], [0, np.log(2) * 32]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 5)
v1 = np.array([x, y])
v2 = 2 ** v1
assert(v2[0].getValue() == 4)
assert(v2[1].getValue() == 32)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * 4, 0], [0, np.log(2) * 32]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = 2 ** (2 * v1)
assert(v2[0].getValue() == 16)
assert(v2[1].getValue() == 64)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * 32, 0], [0, np.log(2) * 128]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x, y])
v2 = (2 ** 2) ** v1
assert(v2[0].getValue() == 16)
assert(v2[1].getValue() == 64)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * (2 ** 4) * 2, 0], [0, np.log(2) * (2 ** 6) * 2]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x + y, x])
v2 = (2 ** 2) ** v1
assert(v2[0].getValue() == 2 ** 10)
assert(v2[1].getValue() == 16)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * (2 ** 10) * 2, np.log(2) * (2 ** 10) * 2], [np.log(2) * (2 ** 4) * 2, 0]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = np.array([x + y, x])
v2 = 2 ** (2 * v1)
assert(v2[0].getValue() == 2 ** 10)
assert(v2[1].getValue() == 16)
jacobian = ad.get_jacobian(v2, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.log(2) * (2 ** 10) * 2, np.log(2) * (2 ** 10) * 2], [np.log(2) * (2 ** 4) * 2, 0]])))
def test_exp():
v1 = ad.create_vector('v', [2, 5])
v2 = ad.exp(v1)
assert(np.isclose(v2[0].getValue(), np.exp(2)))
assert(np.isclose(v2[1].getValue(), np.exp(5)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[np.exp(2), 0], [0, np.exp(5)]])))
v1 = ad.create_vector('v', [2, 5])
v2 = ad.exp(2 * v1)
assert(np.isclose(v2[0].getValue(), np.exp(4)))
assert(np.isclose(v2[1].getValue(), np.exp(10)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, 2 * np.array([[np.exp(4), 0], [0, np.exp(10)]])))
x = ad.Scalar('x', 2)
y = ad.Scalar('y', 3)
v1 = ad.exp(np.array([x + y, x * y]))
assert(np.isclose(v1[0].getValue(), np.exp(5)))
assert(np.isclose(v1[1].getValue(), np.exp(6)))
jacobian = ad.get_jacobian(v1, ['x', 'y'])
assert(np.array_equal(jacobian, np.array([[np.exp(5), np.exp(5)], [3 * np.exp(6), 2 * np.exp(6)]])))
def test_sin():
v1 = ad.create_vector('v', [0, 100])
v2 = ad.sin(v1)
assert(v2[0].getValue() == 0)
assert(np.isclose(v2[1].getValue(), np.sin(100)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.array_equal(jacobian, np.array([[1, 0], [0, np.cos(100)]])))
v1 = ad.Scalar('x', 4)
v2 = ad.Scalar('y', 10)
v3 = ad.sin(np.array([v1, v2])) / ad.sin(np.array([v1, v2]))
assert(np.isclose(v3[0].getValue(), 1))
assert(np.isclose(v3[1].getValue(), 1))
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.isclose(jacobian, np.array([[0, 0], [0, 0]])).all())
v1 = ad.Scalar('x', 4)
v2 = ad.Scalar('y', 10)
v3 = ad.sin(np.array([v1, v2])) ** 2
assert(np.isclose(v3[0].getValue(), np.sin(4) ** 2))
assert(np.isclose(v3[1].getValue(), np.sin(10) ** 2))
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.isclose(jacobian, np.array([[2 * np.sin(4) * np.cos(4), 0], [0, 2 * np.sin(10) * np.cos(10)]])).all())
v1 = ad.Scalar('x', 4)
v2 = ad.Scalar('y', 10)
v3 = ad.sin(np.array([v1 * v2, v1 + v2])) ** 2
assert(np.isclose(v3[0].getValue(), np.sin(40) ** 2))
assert(np.isclose(v3[1].getValue(), np.sin(14) ** 2))
jacobian = ad.get_jacobian(v3, ['x', 'y'])
assert(np.isclose(jacobian, np.array([[2 * np.sin(40) * np.cos(40) * 10, 2 * np.sin(40) * np.cos(40) * 4],
[2 * np.sin(14) * np.cos(14), 2 * np.sin(14) * np.cos(14)]])).all())
def test_cos():
#Similar to sin.
v1 = ad.create_vector('v', [0, 100])
v2 = ad.cos(v1)
assert(v2[0].getValue() == 1)
assert(np.isclose(v2[1].getValue(), np.cos(100)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.isclose(jacobian, np.array([[0, 0], [0, -np.sin(100)]])).all())
def test_tan():
v1 = ad.create_vector('v', [0, 100])
v2 = ad.tan(v1)
assert(v2[0].getValue() == 0)
assert(np.isclose(v2[1].getValue(), np.tan(100)))
jacobian = ad.get_jacobian(v2, ['v1', 'v2'])
assert(np.isclose(jacobian, np.array([[1, 0], [0, 1 / (np.cos(100) ** 2)]])).all())
| 2.515625 | 3 |
tests/test_portproxy.py | pseeth/portproxy | 1 | 12773113 | <reponame>pseeth/portproxy
def test_portproxy():
# No tests yet...
pass | 1.085938 | 1 |
NNServer/test/test_app.py | yinkn/iam_plus | 0 | 12773114 | """
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/train -d '{"userName":"TEST1","dataset":[[-15, -57, 88, 50, 16, 83, 198, 16, -70],[202, -53, 140, 134, 0, 84, 165, -16, -15],[15, -67, 96, 108, 0, 79, 212, -16, -23],[16, -67, 126, 119, -15, 67, 258, -16, -16],[53, -65, 155, 59, 15, 65, 179, -16, 15],[56, -68, 104, 137, -16, 88, 382, 16, -70]], "dataset2":[[41, -47, 1065, -15, 85, 181, 892, 16, -15],[15, -15, 922, 164, 80, 488, 302, 67, 51],[448, 1770, 2118, 1348, 495, 117, 1286, 430, 633],[47, -34, 1405, 75, 49, 228, 1115, 89, 16],[550, 1080, 383, 321, 97, 350, 1420, 43, 1114],[89, 430, 1034, 97, 36, 112, 387, 57, 16]]}'
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/predict -d '{"userName":"DEMO1","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]}'
"""
"""
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/register -d '{"userName":"TEST1","dataset":[[-15, -57, 88, 50, 16, 83, 198, 16, -70],[202, -53, 140, 134, 0, 84, 165, -16, -15],[15, -67, 96, 108, 0, 79, 212, -16, -23],[16, -67, 126, 119, -15, 67, 258, -16, -16],[53, -65, 155, 59, 15, 65, 179, -16, 15],[56, -68, 104, 137, -16, 88, 382, 16, -70]], "dataset2":[[41, -47, 1065, -15, 85, 181, 892, 16, -15],[15, -15, 922, 164, 80, 488, 302, 67, 51],[448, 1770, 2118, 1348, 495, 117, 1286, 430, 633],[47, -34, 1405, 75, 49, 228, 1115, 89, 16],[550, 1080, 383, 321, 97, 350, 1420, 43, 1114],[89, 430, 1034, 97, 36, 112, 387, 57, 16]]}'
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/login -d '{"userName":"DEMO1","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]}'
curl -i -X POST -H "Content-Type:application/json" http://localhost:5000/train -d '{"userName":"test3", "password":""}'
"""
import os
import sys
import unittest
import json
import logging
sys.path.append("../src")
import app
class FlaskAppTest(unittest.TestCase):
def setUp(self):
self.client = app.app.test_client()
def tearDown(self):
pass
def test_register_login(self):
logging.debug("test_login:")
response = self.client.post('/register',
data=json.dumps({"userName":"DEMO1", "password":"<PASSWORD>","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110],[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]})
, content_type='application/json'
, follow_redirects=True)
self.assertEqual(response.status_code, 200)
logging.debug("test_login:")
response = self.client.post('/login',
data=json.dumps({"userName":"DEMO1", "password":"<PASSWORD>","dataset":[[200,27,102,80,-36,80,579,66,21,61,208,71,-110],[200,27,102,80,-36,80,579,66,21,61,208,71,-110]]})
, content_type='application/json'
, follow_redirects=True)
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
app.init_log()
unittest.main() | 1.78125 | 2 |
oandapyV20-examples-master/src/streaming_trans.py | cdibble2011/OANDA | 127 | 12773115 | # -*- coding: utf-8 -*-
"""Simple demo of streaming transaction data."""
from oandapyV20 import API
from oandapyV20.exceptions import V20Error, StreamTerminated
from oandapyV20.endpoints.transactions import TransactionsStream
from exampleauth import exampleAuth
accountID, access_token = exampleAuth()
api = API(access_token=access_token, environment="practice")
s = TransactionsStream(accountID=accountID)
MAXTRANS = 10
print("read from stream until {} transactions received".format(MAXTRANS))
try:
n = 0
for R in api.request(s):
print(R)
n += 1
if n > MAXTRANS:
s.terminate("max transactions received")
except StreamTerminated as e:
print("{}".format(e))
except V20Error as e:
print("Error: {}".format(e))
| 2.6875 | 3 |
project euler solutions/Problem_059.py | helq/old_code | 0 | 12773116 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
def decrypt(text,passw):
l = min(len(text), len(passw))
if l == 0: return ""
return "".join(chr(ord(text[n])^ord(passw[n])) for n in xrange(l) ) + decrypt(text[l:], passw)
text_encrypt = "".join(map(lambda x: chr(x), eval( "[" + open("059-cipher1.txt", "Ur").readline()[:-1] + "]" )))
posible_pass = [chr(i) + chr(j) + chr(k) for i in range(97,123) for j in range(97,123) for k in range(97,123)]
max_num_space = 0
for i in range(len(posible_pass)):
num_space = decrypt(text_encrypt, posible_pass[i]).count(" ")
if max_num_space < num_space:
max_num_space = num_space
max_num_space_index = i
# max_num_space_index 4423
print sum( ord(n) for n in decrypt(text_encrypt, posible_pass[max_num_space_index]) ) | 3.40625 | 3 |
rpython/jit/codewriter/call.py | kantai/passe-pypy-taint-tracking | 2 | 12773117 | <gh_stars>1-10
#
# Contains the logic to decide, based on the policy, which graphs
# to transform to JitCodes or not.
#
from rpython.jit.codewriter import support
from rpython.jit.codewriter.jitcode import JitCode
from rpython.jit.codewriter.effectinfo import (VirtualizableAnalyzer,
QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze,
EffectInfo, CallInfoCollection)
from rpython.translator.simplify import get_funcobj, get_functype
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.translator.backendopt.canraise import RaiseAnalyzer
from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer
class CallControl(object):
virtualref_info = None # optionally set from outside
has_libffi_call = False # default value
def __init__(self, cpu=None, jitdrivers_sd=[]):
assert isinstance(jitdrivers_sd, list) # debugging
self.cpu = cpu
self.jitdrivers_sd = jitdrivers_sd
self.jitcodes = {} # map {graph: jitcode}
self.unfinished_graphs = [] # list of graphs with pending jitcodes
self.callinfocollection = CallInfoCollection()
if hasattr(cpu, 'rtyper'): # for tests
self.rtyper = cpu.rtyper
translator = self.rtyper.annotator.translator
self.raise_analyzer = RaiseAnalyzer(translator)
self.readwrite_analyzer = ReadWriteAnalyzer(translator)
self.virtualizable_analyzer = VirtualizableAnalyzer(translator)
self.quasiimmut_analyzer = QuasiImmutAnalyzer(translator)
self.randomeffects_analyzer = RandomEffectsAnalyzer(translator)
#
for index, jd in enumerate(jitdrivers_sd):
jd.index = index
def find_all_graphs(self, policy):
try:
return self.candidate_graphs
except AttributeError:
pass
is_candidate = policy.look_inside_graph
assert len(self.jitdrivers_sd) > 0
todo = [jd.portal_graph for jd in self.jitdrivers_sd]
if hasattr(self, 'rtyper'):
for oopspec_name, ll_args, ll_res in support.inline_calls_to:
c_func, _ = support.builtin_func_for_spec(self.rtyper,
oopspec_name,
ll_args, ll_res)
todo.append(c_func.value._obj.graph)
candidate_graphs = set(todo)
def callers():
graph = top_graph
print graph
while graph in coming_from:
graph = coming_from[graph]
print '<-', graph
coming_from = {}
while todo:
top_graph = todo.pop()
for _, op in top_graph.iterblockops():
if op.opname not in ("direct_call", "indirect_call", "oosend"):
continue
kind = self.guess_call_kind(op, is_candidate)
# use callers() to view the calling chain in pdb
if kind != "regular":
continue
for graph in self.graphs_from(op, is_candidate):
if graph in candidate_graphs:
continue
assert is_candidate(graph)
todo.append(graph)
candidate_graphs.add(graph)
coming_from[graph] = top_graph
self.candidate_graphs = candidate_graphs
return candidate_graphs
def graphs_from(self, op, is_candidate=None):
if is_candidate is None:
is_candidate = self.is_candidate
if op.opname == 'direct_call':
funcobj = get_funcobj(op.args[0].value)
graph = funcobj.graph
if is_candidate(graph):
return [graph] # common case: look inside this graph
else:
assert op.opname in ('indirect_call', 'oosend')
if op.opname == 'indirect_call':
graphs = op.args[-1].value
else:
v_obj = op.args[1].concretetype
graphs = v_obj._lookup_graphs(op.args[0].value)
#
if graphs is None:
# special case: handle the indirect call that goes to
# the 'instantiate' methods. This check is a bit imprecise
# but it's not too bad if we mistake a random indirect call
# for the one to 'instantiate'.
from rpython.rtyper.lltypesystem import rclass
CALLTYPE = op.args[0].concretetype
if (op.opname == 'indirect_call' and len(op.args) == 2 and
CALLTYPE == rclass.OBJECT_VTABLE.instantiate):
graphs = list(self._graphs_of_all_instantiate())
#
if graphs is not None:
result = []
for graph in graphs:
if is_candidate(graph):
result.append(graph)
if result:
return result # common case: look inside these graphs,
# and ignore the others if there are any
# residual call case: we don't need to look into any graph
return None
def _graphs_of_all_instantiate(self):
for vtable in self.rtyper.lltype2vtable.values():
if vtable.instantiate:
yield vtable.instantiate._obj.graph
def guess_call_kind(self, op, is_candidate=None):
if op.opname == 'direct_call':
funcptr = op.args[0].value
if self.jitdriver_sd_from_portal_runner_ptr(funcptr) is not None:
return 'recursive'
funcobj = get_funcobj(funcptr)
if getattr(funcobj, 'graph', None) is None:
return 'residual'
targetgraph = funcobj.graph
if hasattr(targetgraph, 'func'):
# must never produce JitCode for a function with
# _gctransformer_hint_close_stack_ set!
if getattr(targetgraph.func,
'_gctransformer_hint_close_stack_', False):
return 'residual'
if hasattr(targetgraph.func, 'oopspec'):
return 'builtin'
elif op.opname == 'oosend':
SELFTYPE, methname, opargs = support.decompose_oosend(op)
if SELFTYPE.oopspec_name is not None:
return 'builtin'
if self.graphs_from(op, is_candidate) is None:
return 'residual'
return 'regular'
def is_candidate(self, graph):
# used only after find_all_graphs()
return graph in self.candidate_graphs
def grab_initial_jitcodes(self):
for jd in self.jitdrivers_sd:
jd.mainjitcode = self.get_jitcode(jd.portal_graph)
jd.mainjitcode.is_portal = True
def enum_pending_graphs(self):
while self.unfinished_graphs:
graph = self.unfinished_graphs.pop()
yield graph, self.jitcodes[graph]
def get_jitcode(self, graph, called_from=None):
# 'called_from' is only one of the callers, used for debugging.
try:
return self.jitcodes[graph]
except KeyError:
# must never produce JitCode for a function with
# _gctransformer_hint_close_stack_ set!
if hasattr(graph, 'func') and getattr(graph.func,
'_gctransformer_hint_close_stack_', False):
raise AssertionError(
'%s has _gctransformer_hint_close_stack_' % (graph,))
#
fnaddr, calldescr = self.get_jitcode_calldescr(graph)
jitcode = JitCode(graph.name, fnaddr, calldescr,
called_from=called_from)
self.jitcodes[graph] = jitcode
self.unfinished_graphs.append(graph)
return jitcode
def get_jitcode_calldescr(self, graph):
"""Return the calldescr that describes calls to the 'graph'.
This returns a calldescr that is appropriate to attach to the
jitcode corresponding to 'graph'. It has no extra effectinfo,
because it is not needed there; it is only used by the blackhole
interp to really do the call corresponding to 'inline_call' ops.
"""
fnptr = self.rtyper.type_system.getcallable(graph)
FUNC = get_functype(lltype.typeOf(fnptr))
assert self.rtyper.type_system.name == "lltypesystem"
fnaddr = llmemory.cast_ptr_to_adr(fnptr)
NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void]
calldescr = self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS),
FUNC.RESULT, EffectInfo.MOST_GENERAL)
return (fnaddr, calldescr)
def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE,
extraeffect=None):
"""Return the calldescr that describes all calls done by 'op'.
This returns a calldescr that we can put in the corresponding
call operation in the calling jitcode. It gets an effectinfo
describing the effect of the call: which field types it may
change, whether it can force virtualizables, whether it can
raise, etc.
"""
NON_VOID_ARGS = [x.concretetype for x in op.args[1:]
if x.concretetype is not lltype.Void]
RESULT = op.result.concretetype
# check the number and type of arguments
FUNC = get_functype(op.args[0].concretetype)
ARGS = FUNC.ARGS
assert NON_VOID_ARGS == [T for T in ARGS if T is not lltype.Void]
assert RESULT == FUNC.RESULT
# ok
# get the 'elidable' and 'loopinvariant' flags from the function object
elidable = False
loopinvariant = False
call_release_gil_target = llmemory.NULL
if op.opname == "direct_call":
funcobj = get_funcobj(op.args[0].value)
assert getattr(funcobj, 'calling_conv', 'c') == 'c', (
"%r: getcalldescr() with a non-default call ABI" % (op,))
func = getattr(funcobj, '_callable', None)
elidable = getattr(func, "_elidable_function_", False)
loopinvariant = getattr(func, "_jit_loop_invariant_", False)
if loopinvariant:
assert not NON_VOID_ARGS, ("arguments not supported for "
"loop-invariant function!")
if getattr(func, "_call_aroundstate_target_", None):
call_release_gil_target = func._call_aroundstate_target_
call_release_gil_target = llmemory.cast_ptr_to_adr(
call_release_gil_target)
# build the extraeffect
random_effects = self.randomeffects_analyzer.analyze(op)
if random_effects:
extraeffect = EffectInfo.EF_RANDOM_EFFECTS
# random_effects implies can_invalidate
can_invalidate = random_effects or self.quasiimmut_analyzer.analyze(op)
if extraeffect is None:
if self.virtualizable_analyzer.analyze(op):
extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
elif loopinvariant:
extraeffect = EffectInfo.EF_LOOPINVARIANT
elif elidable:
if self._canraise(op):
extraeffect = EffectInfo.EF_ELIDABLE_CAN_RAISE
else:
extraeffect = EffectInfo.EF_ELIDABLE_CANNOT_RAISE
elif self._canraise(op):
extraeffect = EffectInfo.EF_CAN_RAISE
else:
extraeffect = EffectInfo.EF_CANNOT_RAISE
#
effectinfo = effectinfo_from_writeanalyze(
self.readwrite_analyzer.analyze(op), self.cpu, extraeffect,
oopspecindex, can_invalidate, call_release_gil_target)
#
assert effectinfo is not None
if elidable or loopinvariant:
assert extraeffect != EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
# XXX this should also say assert not can_invalidate, but
# it can't because our analyzer is not good enough for now
# (and getexecutioncontext() can't really invalidate)
#
return self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS), RESULT,
effectinfo)
def _canraise(self, op):
if op.opname == 'pseudo_call_cannot_raise':
return False
try:
return self.raise_analyzer.can_raise(op)
except lltype.DelayedPointer:
return True # if we need to look into the delayed ptr that is
# the portal, then it's certainly going to raise
def calldescr_canraise(self, calldescr):
effectinfo = calldescr.get_extra_info()
return effectinfo.check_can_raise()
def jitdriver_sd_from_portal_graph(self, graph):
for jd in self.jitdrivers_sd:
if jd.portal_graph is graph:
return jd
return None
def jitdriver_sd_from_portal_runner_ptr(self, funcptr):
for jd in self.jitdrivers_sd:
if funcptr is jd.portal_runner_ptr:
return jd
return None
def jitdriver_sd_from_jitdriver(self, jitdriver):
for jd in self.jitdrivers_sd:
if jd.jitdriver is jitdriver:
return jd
return None
def get_vinfo(self, VTYPEPTR):
seen = set()
for jd in self.jitdrivers_sd:
if jd.virtualizable_info is not None:
if jd.virtualizable_info.is_vtypeptr(VTYPEPTR):
seen.add(jd.virtualizable_info)
if seen:
assert len(seen) == 1
return seen.pop()
else:
return None
def could_be_green_field(self, GTYPE, fieldname):
GTYPE_fieldname = (GTYPE, fieldname)
for jd in self.jitdrivers_sd:
if jd.greenfield_info is not None:
if GTYPE_fieldname in jd.greenfield_info.green_fields:
return True
return False
| 1.921875 | 2 |
python/occurence.py | albandewilde/huffman_coding_tp | 0 | 12773118 | <gh_stars>0
from typing import Dict
from to_bin import read_file, write_to_file
import json
def get_occurence(occurence, letter):
if letter not in occurence:
occurence[letter] = 1
else:
occurence[letter] += 1
return occurence
def text_to_bin(content: str, file: str, file_output:str) -> None:
bin_str = read_file(file)
dico: Dict[str, str] = json.loads(bin_str)
file_content: str = ''
for letter in content:
file_content += dico[letter]
write_to_file(file_content, file_output)
| 3.5625 | 4 |
reframe/core/schedulers/pjm.py | LorienLV/reframe | 0 | 12773119 | <reponame>LorienLV/reframe
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import functools
import itertools
import re
import time
import reframe.core.runtime as rt
import reframe.core.schedulers as sched
import reframe.utility.osext as osext
from reframe.core.backends import register_scheduler
from reframe.core.exceptions import (JobError,
JobSchedulerError)
from reframe.utility import seconds_to_hms
JOB_STATES = {
'ACC': 'ACCEPTED',
'RJT': 'REJECTED',
'QUE': 'QUEUED',
'RNA': 'ACQUIRING_RESOURCES',
'RNP': 'EXECUTING_PROLOGE',
'RUN': 'EXECUTING',
'RNE': 'EXECUTING_EPILOGUE',
'RNO': 'TERMINATING',
'EXT': 'EXITED',
'CCL': 'EXITED_BY_INTERRUPTION',
'HLD': 'FIXED_STATE_DUE_TO_USER',
'CCL': 'FIXED_STATE_DUE_TO_ERROR',
}
def pjm_state_completed(state):
completion_states = {
'REJECTED',
'EXITED',
'EXITED_BY_INTERRUPTION'
}
if state:
return all(s in completion_states for s in state.split(','))
return False
def pjm_state_pending(state):
pending_states = {
'ACCEPTED',
'QUEUED',
'ACQUIRING_RESOURCES',
'FIXED_STATE_DUE_TO_USER',
'FIXED_STATE_DUE_TO_ERROR',
}
if state:
return any(s in pending_states for s in state.split(','))
_run_strict = functools.partial(osext.run_command, check=True)
class _PJMJob(sched.Job):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._is_cancelling = False
@property
def is_cancelling(self):
return self._is_cancelling
@register_scheduler('pjm')
class PjmJobScheduler(sched.JobScheduler):
def __init__(self):
self._prefix = '#PJM'
self._submit_timeout = rt.runtime().get_option(
f'schedulers/@{self.registered_name}/job_submit_timeout'
)
self._use_nodes_opt = rt.runtime().get_option(
f'schedulers/@{self.registered_name}/use_nodes_option'
)
def make_job(self, *args, **kwargs):
return _PJMJob(*args, **kwargs)
def _format_option(self, option):
return self._prefix + ' ' + option
def emit_preamble(self, job):
num_tasks_per_node = job.num_tasks_per_node or 1
num_cpus_per_task = job.num_cpus_per_task or 1
preamble = [
self._format_option('-N "%s"' % job.name),
self._format_option('--mpi "proc=%d,max-proc-per-node=%d"' %
(job.num_tasks, num_tasks_per_node))
]
outfile_fmt = '-o "%s"' % job.stdout
errfile_fmt = '-e "%s"' % job.stderr
preamble += [self._format_option(outfile_fmt),
self._format_option(errfile_fmt)]
if job.time_limit is not None:
h, m, s = seconds_to_hms(job.time_limit)
preamble.append(
self._format_option('-L elapse=%d:%d:%d' % (h, m, s))
)
# setting -L node >= 1 is the same as --exclusive in SLURM.
if self._use_nodes_opt:
num_nodes = job.num_tasks // num_tasks_per_node
preamble.append(self._format_option('-L node=%d' % num_nodes))
for opt in job.options + job.cli_options:
preamble.append(self._format_option(opt))
# Filter out empty statements before returning
return list(filter(None, preamble))
def submit(self, job):
cmd = f'pjsub {job.script_filename}'
completed = _run_strict(cmd, timeout=self._submit_timeout)
jobid_match = re.search(r'\[INFO\] PJM [0-9]+ pjsub Job (?P<jobid>\d+) submitted.',
completed.stdout)
if not jobid_match:
raise JobSchedulerError(
'could not retrieve the job id of the submitted job'
)
job._jobid = jobid_match.group('jobid')
job._submit_time = time.time()
def allnodes(self):
raise NotImplementedError('PJM backend does not support node listing')
def filternodes(self, job, nodes):
raise NotImplementedError('PJM backend does not support '
'node filtering')
def _cancel_if_pending_too_long(self, job):
if not job.max_pending_time or not pjm_state_pending(job.state):
return
t_pending = time.time() - job.submit_time
if t_pending >= job.max_pending_time:
self.log(f'maximum pending time for job exceeded; cancelling it')
self.cancel(job)
job._exception = JobError('maximum pending time exceeded',
job.jobid)
def poll(self, *jobs):
'''Update the status of the jobs.'''
if jobs:
# Filter out non-jobs
jobs = [job for job in jobs if job is not None]
if not jobs:
return
for job in jobs:
jobinfo = osext.run_command(
f'pjstat -H -S {job.jobid}'
)
state_match = re.search(
r'^\s*STATE\s*:\s*(?P<state>[A-Z]+)', jobinfo.stdout, re.MULTILINE
)
# if not state_match:
# self.log(f'Job state not found (job info follows):\n{jobinfo}')
# continue
state = state_match.group('state') if state_match else 'QUE'
job._state = JOB_STATES[state]
self._cancel_if_pending_too_long(job)
if pjm_state_completed(state):
exitcode_match = re.search(
r'^\s*EXIT CODE\s*:\s*(?P<code>\d+)', jobinfo, re.MULTILINE
)
if exitcode_match:
job._exitcode = int(exitcode_match.group('code'))
completion_time_match = re.search(
r'^\s*JOB END DATE\s*:\s*(?P<date>'
'[0-9]{4}\/[0-9]{2}\/[0-9]{2}\s+[0-9]{2}:[0-9]{2}:[0-9]{2})',
jobinfo, re.MULTILINE
)
if completion_time_match:
job._completion_time = completion_time_match.group('date')
def wait(self, job):
# Quickly return in case we have finished already
if self.finished(job):
return
intervals = itertools.cycle([1, 2, 3])
while not self.finished(job):
self.poll(job)
time.sleep(next(intervals))
def cancel(self, job):
_run_strict(f'pjdel {job.jobid}', timeout=self._submit_timeout)
job._is_cancelling = True
def finished(self, job):
if job.exception:
raise job.exception
return pjm_state_completed(job.state)
| 1.75 | 2 |
demo/demo.py | nielsonf/hello_world | 0 | 12773120 | <filename>demo/demo.py
def cube(a):
"""Cube the number a.
Args:
a (float): The number to be squared.
"""
return a**3
| 3.59375 | 4 |
literal/apps/authentication/dto.py | spanickroon/Text-From-Photo-Django-API | 0 | 12773121 | import typing
from pydantic import BaseModel
class AuthenticationDTO(BaseModel):
username: typing.Optional[str]
email: typing.Optional[str]
token: typing.Optional[str]
password: typing.Optional[str]
class RegisterDTO(BaseModel):
username: str
token: str
class LoginDTO(BaseModel):
username: str
token: str
| 2.390625 | 2 |
python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py | L-Net-1992/Paddle | 11 | 12773122 | <gh_stars>10-100
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
paddle.framework.set_default_dtype("float64")
paddle.enable_static()
import numpy as np
import unittest
from convert import convert_params_for_cell_static
from rnn_numpy import SimpleRNNCell, LSTMCell, GRUCell
class TestSimpleRNNCell(unittest.TestCase):
def __init__(self, bias=True, place="cpu"):
super(TestSimpleRNNCell, self).__init__(methodName="runTest")
self.bias = bias
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
rnn1 = SimpleRNNCell(16, 32, bias=self.bias)
mp = paddle.static.Program()
sp = paddle.static.Program()
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
rnn2 = paddle.nn.SimpleRNNCell(16,
32,
bias_ih_attr=self.bias,
bias_hh_attr=self.bias)
place = self.place
exe = paddle.static.Executor(place)
scope = paddle.fluid.Scope()
with paddle.static.scope_guard(scope):
exe.run(sp)
convert_params_for_cell_static(rnn1, rnn2, place)
self.mp = mp
self.sp = sp
self.rnn1 = rnn1
self.rnn2 = rnn2
self.executor = exe
self.scope = scope
def test_with_initial_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
feed_dict = {x_data.name: x, init_h.name: prev_h}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp, feed=feed_dict, fetch_list=[y, h])
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
y1, h1 = rnn1(x)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
feed_dict = {x_data.name: x}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp,
feed=feed_dict,
fetch_list=[y, h],
use_prune=True)
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
class TestGRUCell(unittest.TestCase):
def __init__(self, bias=True, place="cpu"):
super(TestGRUCell, self).__init__(methodName="runTest")
self.bias = bias
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
rnn1 = GRUCell(16, 32, bias=self.bias)
mp = paddle.static.Program()
sp = paddle.static.Program()
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
rnn2 = paddle.nn.GRUCell(16,
32,
bias_ih_attr=self.bias,
bias_hh_attr=self.bias)
place = self.place
exe = paddle.static.Executor(place)
scope = paddle.fluid.Scope()
with paddle.static.scope_guard(scope):
exe.run(sp)
convert_params_for_cell_static(rnn1, rnn2, place)
self.mp = mp
self.sp = sp
self.rnn1 = rnn1
self.rnn2 = rnn2
self.place = place
self.executor = exe
self.scope = scope
def test_with_initial_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
feed_dict = {x_data.name: x, init_h.name: prev_h}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp, feed=feed_dict, fetch_list=[y, h])
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
y1, h1 = rnn1(x)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
feed_dict = {x_data.name: x}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp,
feed=feed_dict,
fetch_list=[y, h],
use_prune=True)
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
class TestLSTMCell(unittest.TestCase):
def __init__(self, bias=True, place="cpu"):
super(TestLSTMCell, self).__init__(methodName="runTest")
self.bias = bias
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
rnn1 = LSTMCell(16, 32, bias=self.bias)
mp = paddle.static.Program()
sp = paddle.static.Program()
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
rnn2 = paddle.nn.LSTMCell(16,
32,
bias_ih_attr=self.bias,
bias_hh_attr=self.bias)
place = self.place
exe = paddle.static.Executor(place)
scope = paddle.fluid.Scope()
with paddle.static.scope_guard(scope):
exe.run(sp)
convert_params_for_cell_static(rnn1, rnn2, place)
self.mp = mp
self.sp = sp
self.rnn1 = rnn1
self.rnn2 = rnn2
self.place = place
self.executor = exe
self.scope = scope
def test_with_initial_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
prev_h = np.random.randn(4, 32)
prev_c = np.random.randn(4, 32)
y1, (h1, c1) = rnn1(x, (prev_h, prev_c))
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
init_c = paddle.fluid.data(
"init_c", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data, (init_h, init_c))
feed_dict = {x_data.name: x, init_h.name: prev_h, init_c.name: prev_c}
with paddle.static.scope_guard(scope):
y2, h2, c2 = exe.run(mp, feed=feed_dict, fetch_list=[y, h, c])
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2, atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
y1, (h1, c1) = rnn1(x)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data)
feed_dict = {x_data.name: x}
with paddle.static.scope_guard(scope):
y2, h2, c2 = exe.run(mp,
feed=feed_dict,
fetch_list=[y, h, c],
use_prune=True)
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2, atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \
else ["cpu"]
for bias in [True, False]:
for device in devices:
for test_class in [TestSimpleRNNCell, TestGRUCell, TestLSTMCell]:
suite.addTest(test_class(bias, device))
return suite
| 1.929688 | 2 |
test/test_schema.py | blawson/dataforj | 1 | 12773123 | import unittest
import yaml
from pyspark.sql import *
from dataforj import schema
from test.test_samples import flow_simple, simple_yaml_text, flow_complex
city_yaml_text = '''
- name: city
tests:
- not_null
- accepted_values: ['Amsterdam', 'Dublin', 'Frankfurt']
'''
flag_yaml_text = '''
- name: flag
tests:
- not_null
'''
combined_yaml_text = f'{city_yaml_text}\n{flag_yaml_text}'
spark = SparkSession \
.builder \
.appName("Unit Test") \
.getOrCreate()
ut_step_df = spark.createDataFrame(
[
('Amsterdam', True),
('Dublin', False),
],
['city', 'flag']
)
ut_bad_city_df = spark.createDataFrame(
[
('Amsterdam', True),
('New York', False)
],
['city', 'flag']
)
ut_bad_flag_df = spark.createDataFrame(
[
('Amsterdam', True),
('New York', None)
],
['city', 'flag']
)
class SchemaTest(unittest.TestCase):
def test_schema_two_fields(self):
'''Test to make sure two fields can be validated in the same YAML'''
schema_yaml = yaml.safe_load(combined_yaml_text)
schema.check_schema_yaml('ut_step_df', ut_step_df, schema_yaml)
def test_positive_accepted_values(self):
schema_yaml = yaml.safe_load(city_yaml_text)
schema.check_schema_yaml('ut_step_df', ut_step_df, schema_yaml)
def test_negative_accepted_values(self):
schema_yaml = yaml.safe_load(city_yaml_text)
with self.assertRaises(AssertionError) as excinfo:
schema.check_schema_yaml('ut_bad_city_df', ut_bad_city_df, schema_yaml)
self.assertEqual('Output of step [ut_bad_city_df] column [city] should only have values '
'in the accepted list [Amsterdam, Dublin, Frankfurt]. These values '
'were also found [\'New York\'].',
str(excinfo.exception))
def test_positive_null(self):
schema_yaml = yaml.safe_load(flag_yaml_text)
schema.check_schema_yaml('ut_step_df', ut_step_df, schema_yaml)
def test_negative_accepted_values(self):
schema_yaml = yaml.safe_load(flag_yaml_text)
with self.assertRaises(AssertionError) as excinfo:
schema.check_schema_yaml('ut_bad_flag_df', ut_bad_flag_df, schema_yaml)
self.assertEqual('Output of step ut_bad_flag_df column flag should not be null.',
str(excinfo.exception))
def test_check_schema(self):
'''Test to make sure we can read in the YAML file from the example project'''
schema.check_schema('ut_step_df', ut_step_df, 'example/schemas/filter_schema.yaml')
if __name__ == "__main__":
SchemaTest().test_check_schema()
| 2.9375 | 3 |
dgcnn/main.py | linhaojia13/GCN_pointcloud | 15 | 12773124 | <filename>dgcnn/main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Contact: <EMAIL>
@File: main.py
@Time: 2018/10/13 10:39 PM
"""
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from data import ModelNet40
from model import DGCNN, FASTDGCNN
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
import random
def _init_():
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists('checkpoints/'+args.exp_name):
os.makedirs('checkpoints/'+args.exp_name)
if not os.path.exists('checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('checkpoints/'+args.exp_name+'/'+'models')
os.system('cp main.py checkpoints'+'/'+args.exp_name+'/'+'main.py.backup')
os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
os.system('cp util.py checkpoints' + '/' + args.exp_name + '/' + 'util.py.backup')
os.system('cp data.py checkpoints' + '/' + args.exp_name + '/' + 'data.py.backup')
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, io):
train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
if args.model == 'dgcnn':
model = DGCNN(args).to(device)
elif args.model == 'fastdgcnn':
model = FASTDGCNN(args).to(device)
else:
raise Exception("Not implemented")
print(str(model))
model = nn.DataParallel(model)
print("Let's use", torch.cuda.device_count(), "GPUs!")
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
opt.load_state_dict(checkpoint['opt'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
#scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr, last_epoch=args.start_epoch-1)
scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=20, gamma=0.8)#0.7 0.8
#scheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=0.9825, last_epoch=args.start_epoch-1)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.start_epoch, args.epochs):
#scheduler.step()
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
for data, label in train_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
scheduler.step()
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
train_loss*1.0/count,
metrics.accuracy_score(
train_true, train_pred),
metrics.balanced_accuracy_score(
train_true, train_pred))
io.cprint(outstr)
if epoch%10 == 0:
# save running checkpoint per 10 epoch
torch.save({'epoch': epoch + 1,
'arch': args.model,
'state_dict': model.state_dict(),
'opt' : opt.state_dict()},
'checkpoints/%s/models/checkpoint_latest.pth.tar' % args.exp_name)
####################
# Test
####################
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += loss.item() * batch_size
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
test_loss*1.0/count,
test_acc,
avg_per_class_acc)
io.cprint(outstr)
if test_acc >= best_test_acc:
best_test_acc = test_acc
torch.save({'epoch': epoch + 1,
'arch': args.model,
'state_dict': model.state_dict(),
'opt' : opt.state_dict()},
'checkpoints/%s/models/checkpoint_best.pth.tar' % args.exp_name)
def test(args, io):
test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points),
batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
if args.model == 'pointnet':
model = PointNet(args).to(device)
elif args.model == 'dgcnn':
model = DGCNN(args).to(device)
else:
raise Exception("Not implemented")
print(str(model))
model = nn.DataParallel(model)
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
model = model.eval()
test_acc = 0.0
count = 0.0
test_true = []
test_pred = []
SHAPE_NAMES = [line.rstrip() for line in \
open('data/modelnet40_ply_hdf5_2048/shape_names.txt')]
NUM_CLASSES = 40
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
for data, label in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
io.cprint(outstr)
for i in range(test_true.shape[0]):
l = test_true[i]
total_seen_class[l] += 1
total_correct_class[l] += (test_pred[i] == l)
class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
for i, name in enumerate(SHAPE_NAMES):
io.cprint('%10s:\t%0.3f' % (name, class_accuracies[i]))
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--model', type=str, default='dgcnn', metavar='N',
choices=['pointnet', 'dgcnn', 'semigcn','fastdgcnn'],
help='Model to use, [pointnet, dgcnn]')
parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N',
choices=['modelnet40'])
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--start_epoch', default=0, type=int,
help='manual epoch number (useful on restarts) (default: 0)')
parser.add_argument('--epochs', type=int, default=250, metavar='N',
help='number of episode to train ')
parser.add_argument('--use_sgd', type=bool, default=False,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--emb_dims', type=int, default=1024, metavar='N',
help='Dimension of embeddings')
parser.add_argument('--k', type=int, default=20, metavar='N',
help='Num of nearest neighbors to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to checkpoint (default: None)')
parser.add_argument('--K', type=int, default=1,
help='filter scale (receptive field size), must be > 0; 1 for GCN, >1 for ChebNet')
parser.add_argument('--adj_sq', action='store_true', default=False,
help='use A^2 instead of A as an adjacency matrix')
parser.add_argument('--scale_identity', action='store_true', default=False,
help='use 2I instead of I for self connections')
args = parser.parse_args()
_init_()
io = IOStream('checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
args.cuda = not args.no_cuda and torch.cuda.is_available()
setup_seed(args.seed)
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(args.seed)
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
test(args, io)
| 1.960938 | 2 |
docs/assignment-3/src/histogram_1d.py | aligholamee/Patterns | 1 | 12773125 | <filename>docs/assignment-3/src/histogram_1d.py
# ========================================
# [] File Name : histogram_1d.py
#
# [] Creation Date : April 2018
#
# [] Author 2 : <NAME>
#
# ========================================
import matplotlib.pyplot as plt
import numpy as np
# Number of samples
NUM_SAMPLES = 100
# Bin size of the Histogram
BIN_SIZE = 0.5
# Known density parameters
MEAN = 5
STANDARD_DEVIATION = 3
RANGE_MIN = 1
RANGE_MAX = 20
# Generates random normal numbers in a range
def truncated_normal(mean, std, num_samples, min, max):
"""
Return samples with normal distribution inside the given region
"""
return (np.random.normal(loc=mean, scale=std, size=num_samples) % (max - min) + min)
# Implements saomple counting strategy
def sample_count_in_bins(samples, bin_size):
"""
Find the number of existing samples in each bin of the Histogram and return k * n / v as the density of that bin.
Return a dictionary containing the bin steps and the density inside them.
"""
# Sample counts in each bin of the Histogram
sample_counts = {
'1': 0,
'2': 0
}
for sample in samples:
# Find the location of sample in Histogram
bin_index = int(sample / bin_size) + 1
bin_number_str = str(bin_index)
if bin_number_str in sample_counts:
# Update the value of that key
sample_counts[bin_number_str] += 1
else:
# Simply create that key inside dictionary and assign it as 1
sample_counts[bin_number_str] = 1
# Return the results dictionary
return sample_counts
# Draws the density in matplotlib
def draw_density(range_min, range_max, which_bin, bin_size, num_samples_in_bin, hist_height_of_each_sample):
interval_low = range_min + which_bin * bin_size
interval_high = interval_low + bin_size
# GENERATE MANY POINTS!!!!
x = np.linspace(1, 21, 1000)
plt.plot(x, list(map(lambda x: num_samples_in_bin*hist_height_of_each_sample if interval_low <= x <= interval_high else 0, x)), color='darkblue')
plt.fill_between(x, list(map(lambda x: num_samples_in_bin*hist_height_of_each_sample if interval_low <= x <= interval_high else 0, x)), color='darkblue')
# Implemenets the density estimation method
def find_density(sample_count_dict, num_samples, bin_size):
# This is the height of density for each sample that can be calculated as heigh = 1 / (n * v)
# v is bin size in this case
# n is the number of all samples
height_of_density_for_each_sample = 1 / (num_samples * bin_size)
# Iterate the sample_count_dict
for bin_number, sample_count in sample_count_dict.items():
draw_density(RANGE_MIN, RANGE_MAX, int(bin_number), BIN_SIZE, sample_count, height_of_density_for_each_sample)
# Display the plot
plt.title('Density estimation of a normal distribution')
plt.xlabel('Sample value')
plt.ylabel('Estimated Density')
plt.show()
# One dimensional array of data
samples_1d = truncated_normal(MEAN, STANDARD_DEVIATION, NUM_SAMPLES, RANGE_MIN, RANGE_MAX)
# Find the number of sample count in each bin of the Histogram
sample_counts_dict = sample_count_in_bins(samples_1d, BIN_SIZE)
# Estimate the density and plot it
find_density(sample_counts_dict, NUM_SAMPLES, BIN_SIZE)
| 3.78125 | 4 |
src/joint_state_publisher_gui/src/joint_state_publisher_gui/__init__.py | MrDavidAlv/TATTOTRONIX | 0 | 12773126 | <gh_stars>0
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import math
import random
import rospy
from python_qt_binding.QtCore import pyqtSlot
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtCore import Signal
from python_qt_binding.QtGui import QFont
from python_qt_binding.QtWidgets import QApplication
from python_qt_binding.QtWidgets import QHBoxLayout
from python_qt_binding.QtWidgets import QLabel
from python_qt_binding.QtWidgets import QLineEdit
from python_qt_binding.QtWidgets import QPushButton
from python_qt_binding.QtWidgets import QSlider
from python_qt_binding.QtWidgets import QVBoxLayout
from python_qt_binding.QtWidgets import QGridLayout
from python_qt_binding.QtWidgets import QScrollArea
from python_qt_binding.QtWidgets import QSpinBox
from python_qt_binding.QtWidgets import QWidget
RANGE = 10000
class JointStatePublisherGui(QWidget):
sliderUpdateTrigger = Signal()
def __init__(self, title, jsp, num_rows=0):
super(JointStatePublisherGui, self).__init__()
self.setWindowTitle(title)
self.jsp = jsp
self.joint_map = {}
self.vlayout = QVBoxLayout(self)
self.scrollable = QWidget()
self.gridlayout = QGridLayout()
self.scroll = QScrollArea()
self.scroll.setWidgetResizable(True)
self.jsp.set_source_update_cb(self.source_update_cb)
font = QFont("Helvetica", 9, QFont.Bold)
### Generate sliders ###
sliders = []
for name in self.jsp.joint_list:
if name not in self.jsp.free_joints:
continue
joint = self.jsp.free_joints[name]
if joint['min'] == joint['max']:
continue
joint_layout = QVBoxLayout()
row_layout = QHBoxLayout()
label = QLabel(name)
label.setFont(font)
row_layout.addWidget(label)
display = QLineEdit("0.00")
display.setAlignment(Qt.AlignRight)
display.setFont(font)
display.setReadOnly(True)
row_layout.addWidget(display)
joint_layout.addLayout(row_layout)
slider = QSlider(Qt.Horizontal)
slider.setFont(font)
slider.setRange(0, RANGE)
slider.setValue(RANGE/2)
joint_layout.addWidget(slider)
self.joint_map[name] = {'slidervalue': 0, 'display': display,
'slider': slider, 'joint': joint}
# Connect to the signal provided by QSignal
slider.valueChanged.connect(lambda event,name=name: self.onValueChangedOne(name))
sliders.append(joint_layout)
# Determine number of rows to be used in grid
self.num_rows = num_rows
# if desired num of rows wasn't set, default behaviour is a vertical layout
if self.num_rows == 0:
self.num_rows = len(sliders) # equals VBoxLayout
# Generate positions in grid and place sliders there
self.positions = self.generate_grid_positions(len(sliders), self.num_rows)
for item, pos in zip(sliders, self.positions):
self.gridlayout.addLayout(item, *pos)
# Set zero positions read from parameters
self.center()
# Synchronize slider and displayed value
self.sliderUpdate(None)
# Set up a signal for updating the sliders based on external joint info
self.sliderUpdateTrigger.connect(self.updateSliders)
self.scrollable.setLayout(self.gridlayout)
self.scroll.setWidget(self.scrollable)
self.vlayout.addWidget(self.scroll)
# Buttons for randomizing and centering sliders and
# Spinbox for on-the-fly selecting number of rows
self.randbutton = QPushButton('Randomize', self)
self.randbutton.clicked.connect(self.randomize_event)
self.vlayout.addWidget(self.randbutton)
self.ctrbutton = QPushButton('Center', self)
self.ctrbutton.clicked.connect(self.center_event)
self.vlayout.addWidget(self.ctrbutton)
self.maxrowsupdown = QSpinBox()
self.maxrowsupdown.setMinimum(1)
self.maxrowsupdown.setMaximum(len(sliders))
self.maxrowsupdown.setValue(self.num_rows)
self.maxrowsupdown.valueChanged.connect(self.reorggrid_event)
self.vlayout.addWidget(self.maxrowsupdown)
self.setLayout(self.vlayout)
def source_update_cb(self):
self.sliderUpdateTrigger.emit()
def onValueChangedOne(self, name):
# A slider value was changed, but we need to change the joint_info metadata.
joint_info = self.joint_map[name]
joint_info['slidervalue'] = joint_info['slider'].value()
joint = joint_info['joint']
joint['position'] = self.sliderToValue(joint_info['slidervalue'], joint)
joint_info['display'].setText("%.2f" % joint['position'])
@pyqtSlot()
def updateSliders(self):
self.update_sliders()
def update_sliders(self):
for name, joint_info in self.joint_map.items():
joint = joint_info['joint']
joint_info['slidervalue'] = self.valueToSlider(joint['position'],
joint)
joint_info['slider'].setValue(joint_info['slidervalue'])
def center_event(self, event):
self.center()
def center(self):
rospy.loginfo("Centering")
for name, joint_info in self.joint_map.items():
joint = joint_info['joint']
joint_info['slider'].setValue(self.valueToSlider(joint['zero'], joint))
def reorggrid_event(self, event):
self.reorganize_grid(event)
def reorganize_grid(self, number_of_rows):
self.num_rows = number_of_rows
# Remove items from layout (won't destroy them!)
items = []
for pos in self.positions:
item = self.gridlayout.itemAtPosition(*pos)
items.append(item)
self.gridlayout.removeItem(item)
# Generate new positions for sliders and place them in their new spots
self.positions = self.generate_grid_positions(len(items), self.num_rows)
for item, pos in zip(items, self.positions):
self.gridlayout.addLayout(item, *pos)
def generate_grid_positions(self, num_items, num_rows):
if num_rows == 0:
return []
positions = [(y, x) for x in range(int((math.ceil(float(num_items) / num_rows)))) for y in range(num_rows)]
positions = positions[:num_items]
return positions
def randomize_event(self, event):
self.randomize()
def randomize(self):
rospy.loginfo("Randomizing")
for name, joint_info in self.joint_map.items():
joint = joint_info['joint']
joint_info['slider'].setValue(
self.valueToSlider(random.uniform(joint['min'], joint['max']), joint))
def sliderUpdate(self, event):
for name, joint_info in self.joint_map.items():
joint_info['slidervalue'] = joint_info['slider'].value()
self.update_sliders()
def valueToSlider(self, value, joint):
return (value - joint['min']) * float(RANGE) / (joint['max'] - joint['min'])
def sliderToValue(self, slider, joint):
pctvalue = slider / float(RANGE)
return joint['min'] + (joint['max']-joint['min']) * pctvalue
| 1.132813 | 1 |
backblaze/utils.py | WardPearce/aiob2 | 0 | 12773127 | from datetime import datetime, timedelta
from typing import Union
from .models.file import UploadUrlModel
from .cache import Cache
def format_route_name(name: str) -> str:
"""Used to format route name.
Parameters
----------
name : str
Returns
-------
str
"""
return name.replace("Route", "").lower()
class UploadUrlCache:
def __init__(self, bucket_id: str, file_id: str = None) -> None:
"""Used to handled cached upload URLs.
Parameters
----------
bucket_id : str
file_id : str, optional
by default None
Notes
-----
If file_id passed Cache.upload_parts_urls is used
instead of Cache.upload_urls.
"""
if not file_id:
self.upload_cache = Cache.upload_urls
self.index = bucket_id
else:
self.upload_cache = Cache.upload_parts_urls
self.index = bucket_id + file_id
def find(self) -> Union[None, "UploadUrlCache"]:
"""Looks for cached item.
Returns
-------
UploadUrlModel
"""
if self.index in self.upload_cache:
if datetime.now() >= self.upload_cache[self.index]["expires"]:
self.upload_cache.pop(self.index, None)
else:
return self.upload_cache[self.index]["model"]
def save(self, upload_model: UploadUrlModel) -> UploadUrlModel:
"""Saves upload model into cache.
Parameters
----------
upload_model : UploadUrlModel
Returns
-------
UploadUrlModel
"""
self.upload_cache[self.index] = {
"expires": datetime.now() + timedelta(hours=23, minutes=50),
"model": upload_model
}
return upload_model
def delete(self) -> None:
"""Deletes upload out of the cache.
"""
self.upload_cache.pop(self.index, None)
def encode_name(name: str, encoding: str = "utf-8",
replace: bool = True) -> str:
"""Used to encode names correctly for b2.
Parameters
----------
name : str
encoding : str, optional
by default "utf-8"
replace : bool, optional
by default True
Returns
-------
str
"""
if replace:
name = name.replace(" ", "-")
return name.encode(encoding).decode(encoding)
| 2.5 | 2 |
programs/pgm14_14.py | danielsunzhongyuan/python_practice | 0 | 12773128 | <gh_stars>0
#
# This file contains the Python code from Program 14.14 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by <NAME>.
#
# Copyright (c) 2003 by <NAME>, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm14_14.txt
#
class SimpleRV(RandomVariable):
def getNext(self):
return RandomNumberGenerator.next
| 2.5 | 2 |
dpkt/sip.py | 0x4e38/dpkt | 1 | 12773129 | # $Id$
"""Session Initiation Protocol."""
import http
class Request(http.Request):
"""SIP request."""
__hdr_defaults__ = {
'method':'INVITE',
'uri':'sip:<EMAIL>',
'version':'2.0',
'headers':{ 'To':'', 'From':'', 'Call-ID':'', 'CSeq':'', 'Contact':'' }
}
__methods = dict.fromkeys((
'ACK', 'BYE', 'CANCEL', 'INFO', 'INVITE', 'MESSAGE', 'NOTIFY',
'OPTIONS', 'PRACK', 'PUBLISH', 'REFER', 'REGISTER', 'SUBSCRIBE',
'UPDATE'
))
__proto = 'SIP'
class Response(http.Response):
"""SIP response."""
__hdr_defaults__ = {
'version':'2.0',
'status':'200',
'reason':'OK',
'headers':{ 'To':'', 'From':'', 'Call-ID':'', 'CSeq':'', 'Contact':'' }
}
__proto = 'SIP'
| 2.453125 | 2 |
pictures/urls.py | bintadam/My-Gallery | 0 | 12773130 | from django.urls import path
from . import views
from os import name
from django.conf.urls.static import static
from django.conf import settings
urlpatterns=[
path('',views.welcome,name ='welcome'),
path('gallery/',views.pictures,name='pictures'),
path('search/', views.search_results, name='search_results'),
path('picture/<int:image_id>/',views.picture,name='imageid'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 2 | 2 |
test/OR-WE-514/main.py | eydam-prototyping/mp_modbus | 2 | 12773131 | import mp_modbus_master as mmm
import time
import struct
d1 = mmm.modbus_rtu_master(uart_no=2, parity=0, tx_pin=12, rx_pin=13, en_pin=32)
l = {
"U" : {"register": 305, "desc": "Voltage", "type": "uint16", "gain": 100, "unit": "V"},
"I" : {"register": 313, "desc": "Current", "type": "int32", "gain": 1000, "unit": "A"},
"Power" : {"register": 320, "desc": "Power", "type": "int32", "gain": 1000, "unit": "kW"},
"Power_R" : {"register": 328, "desc": "Reactive Power", "type": "int32", "gain": 1000, "unit": "VA"},
"Power_A" : {"register": 336, "desc": "Apparent Power", "type": "int32", "gain": 1000, "unit": "VA"},
"Energie_Total_aktiv" : {"register": 40960, "desc": "Total Energy", "type": "int32", "gain": 100, "unit": "kWh"},
}
for i in range(3):
time.sleep(1)
for e in l:
try:
#time.sleep(0.1)
#print(e)
print(d1.uart.read(10))
if l[e]["type"] == "uint16":
f = d1.read_holding_registers_async(l[e]["register"], 1)
#print(f)
v = struct.unpack(">h", f.data)[0]/l[e]["gain"]
if l[e]["type"] == "int32":
f = d1.read_holding_registers(l[e]["register"], 2)
#print(f)
v = struct.unpack(">I", f.data)[0]/l[e]["gain"]
print("{}: {} {}".format(e, v, l[e]["unit"]))
except:
time.sleep(0.1)
print("ERROR")
print(d1.uart.read(10))
while d1.uart.any() > 0:
print(d1.uart.read(10)) | 2.390625 | 2 |
app/src/main/python/booksite/SoxsSite.py | xiaoping2100/androidPyBook | 0 | 12773132 | import copy
import re
import requests
import urllib.parse
from bs4 import BeautifulSoup
from typing import List
try:
import basesite
except (ModuleNotFoundError, ImportError) as e:
from . import basesite
class SoxsSite(basesite.BaseSite):
def __init__(self):
self.site_info = basesite.SiteInfo(
type='网络小说',
statue='上线版本',
url='https://www.soxs.cc',
name='搜小说',
brief_name='搜小说',
version='1.1',
max_threading_number=50,
)
super().__init__(self.site_info)
self.base_url = 'https://www.soxs.cc'
self.encoding = 'utf-8'
self.search_url = 'https://www.soxs.cc/search.html'
self.session = requests.session()
@basesite.print_in_out
def get_books(self, search_info: str) -> List[basesite.Book]:
r = self.try_post_url(self.session, url=self.search_url, try_timeout=5,
params=f'searchtype=all&searchkey={urllib.parse.quote(search_info)}')
if r is None:
return []
soup = BeautifulSoup(r.content, 'html.parser')
book_tag_list = soup.select('div.novelslist2 > ul > li')
book_num = len(book_tag_list) - 1
if book_num == 0:
return []
search_book_results = []
book_soup_list = book_tag_list[1:]
for book_soup in book_soup_list:
span_list = book_soup.findAll('span')
book_url = self.base_url + span_list[1].find('a').attrs['href']
book_name = span_list[1].find('a').text
book_author = span_list[3].text
book_brief = f"最新章节:{span_list[2].find('a').text} 更新时间:{span_list[4].text.strip()}"
book = basesite.Book(site=self, url=book_url, name=book_name, author=book_author,
brief=book_brief)
search_book_results.append(book)
return search_book_results
@basesite.print_in_out
def get_chapters(self, book: basesite.Book) -> List[basesite.Chapter]:
r = self.try_get_url(self.session, book.url)
if r is None:
return []
soup = BeautifulSoup(r.content, 'html.parser')
chapter_soup_list = soup.select('div.caption + div dd a')
chapters = [basesite.Chapter(site=self,
url=self.base_url + chapter.attrs['href'],
title=chapter.text)
for chapter in chapter_soup_list]
return chapters
def get_chapter_content(self, chapter: basesite.Chapter) -> str:
session = copy.deepcopy(self.session)
r = self.try_get_url(session, chapter.url)
session.close()
if r is None:
return f'{chapter.title}\r\n下载失败'
soup = BeautifulSoup(r.content, 'html.parser')
content = soup.select_one('div.content').text
content2 = re.sub(r"您可以在百度.+查找最新章节!", "", content)
if m := re.search(r"\w+最新章节地址:https://www.soxs.cc", content2):
content2 = content2[:m.start()].strip()
# title = chapter.title if chapter.title.startswith("第") else f"第{chapter.title}"
# content3 = f'\r\n{title}\r\n{content2.strip()}'
return content2
def save_chapter(self, chapter, filename):
content = self.get_chapter_content(chapter)
with open(filename, 'w', encoding=self.encoding) as f:
f.write(content)
| 2.90625 | 3 |
maskrcnn_benchmark/modeling/roi_heads/box_head/attention.py | wuyuebupt/doubleheadsrcnn | 60 | 12773133 | <filename>maskrcnn_benchmark/modeling/roi_heads/box_head/attention.py<gh_stars>10-100
import torch
from torch import nn
from torch.nn import functional as F
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule, self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if idx < 0 or idx >= len(self._modules):
raise IndexError('index {} is out of range'.format(idx))
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules)
class FPNFFConv(nn.Module):
def __init__(self, in_channels):
super(FPNFFConv, self).__init__()
inter_channels = in_channels // 4
out_channels = in_channels
self.relu = nn.ReLU(inplace=True)
## top
self.bottleneck = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, inter_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
identity = x
## bottom
out = self.bottleneck(x)
## residual
out1 = out + identity
out1 = self.relu(out1)
return out1
### group non local
class _NonLocalBlockND_Group(nn.Module):
def __init__(self, in_channels, num_group, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True, relu_layer=True, use_softmax=True, use_ffconv=True, use_attention=True):
super(_NonLocalBlockND_Group, self).__init__()
assert dimension in [1, 2, 3]
assert dimension == 2
assert num_group in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
self.num_group = num_group
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
## inner channels are divided by num of groups
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
self.relu_layer = relu_layer
self.relu = nn.ReLU(inplace=True)
self.use_softmax = use_softmax
self.use_ffconv = use_ffconv
self.use_attention = use_attention
if self.use_softmax:
self.softmax = nn.Softmax(dim=2)
assert self.num_group <= self.inter_channels
if self.use_attention:
self.inter_channels_group = self.inter_channels // self.num_group
print (self.inter_channels_group)
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
assert sub_sample==False
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
self.W = nn.Sequential(conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0))
## BN first then RELU
if bn_layer:
self.W.add_module(
'bn', bn(self.in_channels)
)
## init the weights
nn.init.constant_(self.W[0].weight, 0)
nn.init.constant_(self.W[0].bias, 0)
if self.use_ffconv:
self.ffconv = FPNFFConv(self.in_channels)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
if self.use_attention:
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
if self.num_group == 1:
f = torch.matmul(theta_x, phi_x)
if self.use_softmax == True:
f_div_C = self.softmax(f)
else:
N = f.size(-1)
f_div_C = f / N
yy = torch.matmul(f_div_C, g_x)
yy = yy.permute(0, 2, 1).contiguous()
yy = yy.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(yy)
else:
g_xs = torch.split(g_x, self.inter_channels_group, dim=2)
theta_xs = torch.split(theta_x, self.inter_channels_group, dim=2)
phi_xs = torch.split(phi_x, self.inter_channels_group, dim=1)
y_group = []
for gx, tx, px in zip(g_xs, theta_xs, phi_xs):
f = torch.matmul(tx, px)
if self.use_softmax == True:
f_div_C = self.softmax(f)
else:
N = f.size(-1)
f_div_C = f / N
yy = torch.matmul(f_div_C, gx)
yy = yy.permute(0, 2, 1).contiguous()
y_group.append(yy)
y_out = torch.cat(y_group, dim=1)
y_out = y_out.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y_out)
z = W_y + x
## relu after residual
if self.relu_layer:
z = self.relu(z)
else:
z = x
## add ffconv
if self.use_ffconv:
zz = self.ffconv(z)
else:
zz = z
return zz
class NONLocalBlock2D_Group(_NonLocalBlockND_Group):
def __init__(self, in_channels, num_group=1, inter_channels=None, sub_sample=True, bn_layer=True, relu_layer=True, use_softmax=True, use_ffconv=True, use_attention=True):
super(NONLocalBlock2D_Group, self).__init__(in_channels,
num_group=num_group,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer, relu_layer=relu_layer, use_softmax=use_softmax, use_ffconv=use_ffconv, use_attention=use_attention)
## original non local
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
N = f.size(-1)
f_div_C = f / N
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 2.28125 | 2 |
tests/views/test_planing.py | DanielGrams/gsevp | 1 | 12773134 | def test_list(client, seeder, utils):
user_id, admin_unit_id = seeder.setup_base()
seeder.create_event(admin_unit_id)
url = utils.get_url("planing")
utils.get_ok(url)
url = utils.get_url("planing", keyword="name")
utils.get_ok(url)
| 1.859375 | 2 |
src/xrl/utils/pruner.py | k4ntz/XmodRL | 0 | 12773135 | <filename>src/xrl/utils/pruner.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.utils.prune as prune
# own pruning class for disabling nodes from prev layer
# based on threshold
class ThresholdPruning(prune.BasePruningMethod):
PRUNING_TYPE = "unstructured"
def __init__(self, pruning_feature, feature_size):
self.threshold = pruning_feature
self.feature_size = feature_size
def compute_mask(self, tensor, default_mask):
# calc avg weight of connection for each feature input
for i in range(self.feature_size):
avg_weight = torch.mean(tensor[i::self.feature_size])
if abs(avg_weight) < self.threshold:
print("Pruning weights for feature", i, ":", avg_weight, "<", self.threshold)
tensor[i::self.feature_size] = 0
return tensor != 0
# own pruning class for disabling nodes from prev layer
# based on ig values and threshold
class IGPruning(prune.BasePruningMethod):
PRUNING_TYPE = "unstructured"
def __init__(self, pruning_feature, feature_size):
self.ig_values = pruning_feature
self.threshold = 0.01
self.feature_size = feature_size
def compute_mask(self, tensor, default_mask):
# calc avg weight of connection for each feature input
for i in range(self.feature_size):
ig_value = self.ig_values[i]
if abs(ig_value) < self.threshold:
print("Pruning weights for feature", i, ": IG-Value", ig_value, "<", self.threshold)
tensor[i::self.feature_size] = 0
return tensor != 0
def prune_nn(nn, pruning_method = "threshold-pr", pruning_feature = 0.01):
# set params to prune
parameters_to_prune = (
(nn.h, 'weight'),
)
# select pruning method
if pruning_method == "threshold-pr":
pruning_method = ThresholdPruning
elif pruning_method == "ig-pr":
pruning_method = IGPruning
# prune
prune.global_unstructured(
parameters_to_prune,
pruning_method = pruning_method,
pruning_feature = pruning_feature,
feature_size = 21,
)
prune.remove(nn.h, 'weight')
# print
print("Sparsity in h.weight: {:.2f}%".format(
100. * float(torch.sum(nn.h.weight == 0.0))
/ float(nn.h.weight.nelement())
)
)
# create list which inputs should be set to zero
pruned_input = []
for i, param in enumerate(nn.parameters()):
if i == 1:
for i1 in range(param.shape[1]):
if param[0][i1] == 0:
pruned_input.append(i1)
# return pruned neural network
return nn, pruned_input | 2.5625 | 3 |
Task/Balanced-brackets/Python/balanced-brackets-2.py | LaudateCorpus1/RosettaCodeData | 1 | 12773136 | >>> from itertools import accumulate
>>> from random import shuffle
>>> def gen(n):
... txt = list('[]' * n)
... shuffle(txt)
... return ''.join(txt)
...
>>> def balanced(txt):
... brackets = ({'[': 1, ']': -1}.get(ch, 0) for ch in txt)
... return all(x>=0 for x in accumulate(brackets))
...
>>> for txt in (gen(N) for N in range(10)):
... print ("%-22r is%s balanced" % (txt, '' if balanced(txt) else ' not'))
...
'' is balanced
'][' is not balanced
'[]][' is not balanced
']][[[]' is not balanced
'][[][][]' is not balanced
'[[[][][]]]' is balanced
'][[[][][]][]' is not balanced
'][]][][[]][[][' is not balanced
'][[]]][][[]][[[]' is not balanced
'][[][[]]]][[[]][][' is not balanced
| 3.796875 | 4 |
0405.convert_a_number_to_hexadecimal/solution.py | WZMJ/Algorithms | 5 | 12773137 | class Solution:
def to_hex(self, num: int) -> str:
if num == 0:
return "0"
prefix, ans = "0123456789abcdef", ""
num = 2 ** 32 + num if num < 0 else num
while num:
item = num & 15 # num % 16
ans = prefix[item] + ans
num >>= 4 # num //= 4
return ans
| 3.234375 | 3 |
flexipage/tests/test_flexipage.py | eRestin/MezzGIS | 0 | 12773138 | <reponame>eRestin/MezzGIS
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test.utils import override_settings
from django.template import Template
from django.forms import ModelForm
from django.db import models
from flexipage.utils import get_flexi_template, get_flexi_template_location,\
get_template_variables, get_flexi_tags, get_flexi_forms, get_settings_forms,\
get_flexi_form_tags
import flexipage
from flexipage.models import FlexiPage, FlexiContent
from flexipage.tests.test_flexi_app import models
from django.test import TestCase
from django.template.base import TemplateDoesNotExist
from django.conf import settings
from flexipage.page_processors import get_flexi_variables_context, get_flexi_forms_context
import os
# in case of emergency, break glass - makes interactive shell in execution
# import readline # optional, will allow Up/Down/History in the console
# import code
# vars = globals().copy()
# vars.update(locals())
# shell = code.InteractiveConsole(vars)
# shell.interact()
test_template_location = 'flexipage/tests/test.html'
flexipage_module_directory = flexipage.__path__[0]
@override_settings(FLEXI_TEMPLATES=('test',test_template_location))
class TestFlexiPage(TestCase):
def setUp(self):
pass
def test_save_flexipage(self):
"Tests that using the test template available new flexipages can be created"
fp = FlexiPage()
fp.title = 'some title here'
fp.template_name = test_template_location
fp.save()
def test_save_flexipage_raise_error_on_no_template_name(self):
"Tests that a FlexiPage without a template raises an error"
fp = FlexiPage()
fp.title = 'some title here'
with self.assertRaises(AttributeError) as ex:
fp.save()
def test_check_for_flexicontent(self):
"Tests the check_for_flexicontent method on the FlexiPage model"
fp = FlexiPage()
fp.title = 'some title here'
fp.template_name = test_template_location
fp.save()
self.assertEqual(['flexi_second_variable'], fp.check_for_flexicontent())
def test_update_flexicontent(self):
"Test the update_flexicontent method"
# check that it will create new FlexiContent items from the template
fp = FlexiPage()
fp.title = 'some title here'
fp.template_name = test_template_location
fp.save()
flexi_contents = FlexiContent.objects.all()
self.assertEqual(len(flexi_contents), 1)
# add flexicontent to the template, check that this creates a new FlexiContent model
test_html_location = os.path.join(flexipage_module_directory,
'templates/flexipage/tests/test.html')
with open(test_html_location, 'r') as original_test_html_file:
original_html_string = original_test_html_file.read()
with open(test_html_location, 'a') as test_html_file:
test_html_file.write('\n {{ flexi_new_variable }} \n')
fp.save()
# Additional FlexiContent now fk'd to page model
flexi_contents = FlexiContent.objects.all()
self.assertEqual(len(flexi_contents), 2)
# remove flexicontent item to return template to original format
with open(test_html_location, 'w') as test_html_file_out:
test_html_file_out.write(original_html_string)
def test_flexipage_variables_context(self):
"""
Tests that all flexicontent models fk'd to the page are
included in the page context even if no longer present in the template
"""
fp = FlexiPage()
fp.title = 'some title here'
fp.template_name = test_template_location
fp.save()
flexi_contents = FlexiContent.objects.all()
self.assertEqual(len(flexi_contents), 1)
# add flexicontent to the template, check that this creates a new FlexiContent model
test_html_location = os.path.join(flexipage_module_directory,
'templates/flexipage/tests/test.html')
with open(test_html_location, 'r') as original_test_html_file:
original_html_string = original_test_html_file.read()
with open(test_html_location, 'a') as test_html_file:
test_html_file.write('\n {{ flexi_new_variable }} \n')
fp.save()
# Additional FlexiContent now fk'd to page model
flexi_contents = FlexiContent.objects.all()
self.assertEqual(len(flexi_contents), 2)
# remove flexicontent item to return template to original format
with open(test_html_location, 'w') as test_html_file_out:
test_html_file_out.write(original_html_string)
# The quantity of FlexiContent models stays the same
flexi_contents = FlexiContent.objects.all()
self.assertEqual(len(flexi_contents), 2)
# And the flexi_new_variable variable is still in the template context
self.assertIn('flexi_new_variable', get_flexi_variables_context(fp))
class TestFlexiAdmin(TestCase):
def test_template_changes_reflected_in_admin(self):
pass
@override_settings(FLEXI_TEMPLATES=('test',test_template_location))
class TestFlexiForms(TestCase):
def test_modelforms_render(self):
pass
def test_modelforms_save(self):
pass
@override_settings(FLEXI_TEMPLATES=('test','tests/test.html'))
class TestFlexiFormView(TestCase):
def test_context(self):
pass
| 2.46875 | 2 |
setup.py | davidfraser/mimetype-description | 0 | 12773139 | <reponame>davidfraser/mimetype-description
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="mimetype-description",
description="Human readable MIME type descriptions",
license="MIT",
url="https://github.com/chesstrian/mimetype-description",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.0.5",
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(),
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7"
)
| 1.453125 | 1 |
python/test.py | event-driven-robotics/imu_tk2 | 0 | 12773140 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul
@author: <NAME> <<EMAIL>>
"""
# %% Preliminaries
import numpy as np
import os
import sys
import glob
repos_path = os.getenv('GIT_REPOS_PATH')
bimvee_path = os.environ.get('BIMVEE_PATH')
wp5_slam_path = os.environ.get('WP5_SLAM_PATH')
imu_tk2_path = '/home/leandro/repos/tools/imu_tk'
imu_tk2_app = imu_tk2_path+'/bin/test_imu_calib'
data_path = '/home/leandro/results/IMUDataDump'
plot_path = data_path+'/plots'
params_path = data_path+'/params'
sys.path.insert(0, repos_path)
sys.path.insert(0, bimvee_path)
sys.path.insert(0, wp5_slam_path)
sys.path.insert(0, imu_tk2_path+'/python')
if(not os.path.isdir(plot_path)):
print('creating folder for plots')
os.mkdir(plot_path)
raw_acc_files_regex = '/test/acc[0-9][0-9]'
raw_gyr_files_regex = '/test/gyr[0-9][0-9]'
g_mag = 9.805622
nominal_gyr_scale = 250 * np.pi / (2.0 * 180.0 * 16384.0)
nominal_acc_scale = g_mag/16384.0
#this should meet the diferent calibration option in applyCalib.py
suffixes = ['base', 'optGyrBias', 'accMeans_base', 'accMeans_optGyrBias', 'gyroG']
start_time = 10
end_time = 40
#%% get raw data files
acc_files = np.sort(glob.glob(data_path+raw_acc_files_regex))
gyr_files = np.sort(glob.glob(data_path+raw_gyr_files_regex))
print(acc_files)
print(gyr_files)
assert acc_files.size == gyr_files.size, 'number of files for calibration should be the same'
#%% Read the parameters and plot them
from utils import readAllCalibParams#, plotAllCalibParams
skew = dict()
scale = dict()
bias = dict()
gmat = dict()
for suffix in suffixes:
# get the name of all param files in the folder
# get the name of all param files in the folder
acc_params_regex = '/acc[0-9][0-9]'+'.'+suffix
gyr_params_regex = '/gyr[0-9][0-9]'+'.'+suffix
acc_params = np.sort(glob.glob(params_path+acc_params_regex))
gyr_params = np.sort(glob.glob(params_path+gyr_params_regex))
readAllCalibParams(acc_params, gyr_params, suffix, skew, scale, bias, gmat)
#%% Apply Calibrations on each data
from utils import readRawData, calibrate
#skew, scale, bias = [suffix]['acc'|'gyr']
calib_acc_data = dict()
calib_gyr_data = dict()
acc_data = dict()
gyr_data = dict()
for acc, gyr in zip(acc_files, gyr_files):
#save the data
acc_data[acc] = readRawData(acc)
gyr_data[gyr] = readRawData(gyr)
print('calibrating data from files: ' + acc.split('/')[-1] + ' and ' + gyr.split('/')[-1])
# apply all the calibrations for each suffix
calib_acc_data[acc] = dict()
calib_gyr_data[gyr] = dict()
for suffix in suffixes:
temp_acc = []
temp_gyr = []
for calib_n in range(len(skew[suffix]['acc'])):
print('applying ' + str(suffix) + "_" + str(calib_n) + ' calibration')
sk = skew[suffix]['acc'][calib_n]
sc = scale[suffix]['acc'][calib_n]
bi = bias[suffix]['acc'][calib_n]
#temp_acc.append(str(suffix) + "_" + str(calib_n))
temp_acc.append( calibrate(acc_data[acc], sk, sc, bi) )
sk = skew[suffix]['gyr'][calib_n]
sc = scale[suffix]['gyr'][calib_n]
bi = bias[suffix]['gyr'][calib_n]
gi = gmat[suffix]['gyr'][calib_n]
#temp_acc.append(str(suffix) + "_" + str(calib_n))
temp_gyr.append(calibrate(gyr_data[gyr], sk, sc, bi, gi, temp_acc[-1]) )
temp_acc = np.array(temp_acc)
temp_gyr = np.array(temp_gyr)
calib_acc_data[acc][suffix] = temp_acc
calib_gyr_data[gyr][suffix] = temp_gyr
#%% Compute the orientation from the angular velocities
from utils import integrateOrientations, cropTime
orientations = dict()
for gyr_file in calib_gyr_data:
print('integrating orientations for: \n' + gyr_file)
print('Uncalibrated')
orientations[gyr_file] = dict()
#limit integration time for having some consistency among benchmarks
orientations[gyr_file]['uncalibrated'] = integrateOrientations(
cropTime(gyr_data[gyr_file], start_time, end_time),
nominal_gyr_scale)
for suffix in suffixes:
print(suffix)
orientations[gyr_file][suffix] = []
for j, calib_gyr in enumerate(calib_gyr_data[gyr_file][suffix]):
print('Calibrated run '+str(j))
orientations[gyr_file][suffix].append(
integrateOrientations(cropTime(calib_gyr, start_time, end_time))
)
orientations[gyr_file][suffix] = np.array(orientations[gyr_file][suffix])
#%% Apply gravity compensation with Madgwick filter
from utils import gravity_compensate
T_imu2mdg = np.array( [ [0, 0, 1], [0, -1, 0], [1, 0, 0] ] )
gcomp_acc_data = dict()
# apply gravity comp on uncalibrated data
for acc_file, gyr_file in zip(acc_data, gyr_data):
gcomp_acc_data[acc_file] = gravity_compensate(
acc_data[acc_file][:,0],
acc_data[acc_file][:,1:4]*nominal_acc_scale,
gyr_data[gyr_file][:,1:4]*nominal_gyr_scale,
g_mag, T_imu2mdg, plot_path)
#%% Apply gravity compensation on calibrated data
T_imu2mdg = np.array( [ [0, 0, 1], [0, -1, 0], [1, 0, 0] ] )
gcomp_calib_acc_data = dict()
for acc_file, gyr_file in zip(calib_acc_data, calib_gyr_data):
print(acc_file, gyr_file)
gcomp_calib_acc_data[acc_file] = dict()
for suffix in suffixes:
print(suffix)
gcomp_calib_acc_data[acc_file][suffix] = list()
for calib_acc, calib_gyr in zip(
calib_acc_data[acc_file][suffix],
calib_gyr_data[gyr_file][suffix]
):
print('calib run '+str(j))
gcomp_calib_acc_data[acc_file][suffix].append(
gravity_compensate(
calib_acc[:,0],
calib_acc[:,1:4],
calib_gyr[:,1:4],
g_mag, T_imu2mdg
)
)
gcomp_calib_acc_data[acc_file][suffix] = np.array(gcomp_calib_acc_data[acc_file][suffix])
#%% Integrate the acc to get the velocities
from utils import integrateVelocities, cropTime
velocities= dict()
for acc_file in gcomp_calib_acc_data:
print(acc_file)
velocities[acc_file] = dict()
velocities[acc_file]['uncalibrated'] = integrateVelocities(
cropTime(gcomp_acc_data[acc_file],start_time, end_time))
for suffix in suffixes:
print(suffix)
velocities[acc_file][suffix] = []
for j, (calib_acc) in enumerate(gcomp_calib_acc_data[acc_file][suffix]):
print('calib run '+str(j))
velocities[acc_file][suffix].append(
integrateVelocities(cropTime(calib_acc, start_time, end_time)))
velocities[acc_file][suffix] = np.array(
velocities[acc_file][suffix])
#%% Caculate the errors of the calibrations
from numpy.linalg import norm
from utils import MakeOrientationContinuous
errors_gyr = dict()
#TODO: make a function to get the error every a time interval
for gyr_file in calib_gyr_data:
print(gyr_file)
print('Uncalibrated')
errors_gyr[gyr_file] = dict()
ori = (orientations[gyr_file]['uncalibrated'])
errors_gyr[gyr_file]['uncalibrated'] = norm(ori[-1,1:4])
for suffix in suffixes:
print(suffix)
errors_gyr[gyr_file][suffix] = []
for orientation in orientations[gyr_file][suffix]:
ori = (orientation)
errors_gyr[gyr_file][suffix].append(norm(ori[-1,1:4]))
errors_gyr[gyr_file][suffix] = np.array(errors_gyr[gyr_file][suffix])
errors_acc = dict()
for acc_file in gcomp_calib_acc_data:
print(acc_file)
errors_acc[acc_file] = dict()
errors_acc[acc_file]['uncalibrated'] = norm(velocities[acc_file]['uncalibrated'][-1,1:4])
for suffix in suffixes:
print(suffix)
errors_acc[acc_file][suffix] = []
for vel in velocities[acc_file][suffix]:
errors_acc[acc_file][suffix].append(norm(vel[-1,1:4]))
errors_acc[acc_file][suffix] = np.array(errors_acc[acc_file][suffix])
#%% Plot a scatter
import re
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D as mlines
from matplotlib.patches import Patch as mpatches
plt.close('all')
alpha = 1
mss = ['s','x', '^']
plt.figure(figsize=(20,20))
for acc_file, gyr_file in zip(errors_acc, errors_gyr):
print(acc_file, gyr_file)
plt.plot(abs(errors_acc[acc_file]['uncalibrated']),
abs(errors_gyr[gyr_file]['uncalibrated']),
marker='d')
#plot the calibrated ones
for suffix, ms in zip(suffixes, mss):
print(suffix)
for ea, eg in zip(errors_acc[acc_file][suffix], errors_gyr[gyr_file][suffix]):
lab = re.findall(r'\d+', acc_file.split('/')[-1])[0]
plt.plot(abs(ea), abs(eg), alpha=alpha, marker=ms)
plt.annotate(lab, (abs(ea), abs(eg)))
plt.xlabel('|Orientation error|')
plt.ylabel('|Lin. Velocity error|')
l0 = mlines([],[], marker='d', ls='', label='uncalibrated')
l1 = mlines([],[], marker='s', ls='', label= suffixes[0])
l2 = mlines([],[], marker='x', ls='', label= suffixes[1])
l3 = mlines([],[], marker='^', ls='', label= suffixes[2])
handles1 = [l0,l1,l2, l3]
legend1 = plt.legend(handles=handles1, prop={'size':12});
colours = plt.rcParams['axes.prop_cycle'].by_key()['color']
handles2 = []
for i, file in enumerate(acc_params):
handles2.append(mpatches(color=colours[i], label='calib trial '+re.findall(r'\d+', file.split('/')[-1])[0]))
plt.legend(handles=handles2, loc=4)
plt.gca().add_artist(legend1)
plt.yscale('log')
plt.xscale('log')
plt.savefig(plot_path+'/errors.png')
#%% Per calibration trial average
from utils import setBoxColors
perCalib_acc_error = dict()
perCalib_gyr_error = dict()
trials = ['#'+re.findall(r'\d+', file.split('/')[-1])[0] for file in acc_params]
pos = (len(suffixes)+1)*(np.linspace(1,len(trials),len(trials)))-1
colours = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.close('all')
fig, ax = plt.subplots(1, 2, figsize=(20,10))#, sharex='all', sharey='all')
for i, suffix in enumerate(suffixes):
print(i)
perCalib_acc_error[suffix] = []
perCalib_gyr_error[suffix] = []
for acc_file, gyr_file in zip(errors_acc, errors_gyr):
perCalib_acc_error[suffix].append(errors_acc[acc_file][suffix])
perCalib_gyr_error[suffix].append(errors_gyr[gyr_file][suffix])
perCalib_acc_error[suffix] = np.array(perCalib_acc_error[suffix])
perCalib_gyr_error[suffix] = np.array(perCalib_gyr_error[suffix])
bp = ax[0].boxplot(perCalib_acc_error[suffix], widths = 0.6, positions=pos+i)
setBoxColors(bp, colours[i])
bp = ax[1].boxplot(perCalib_gyr_error[suffix], widths = 0.6, positions=pos+i)
setBoxColors(bp, colours[i])
# plot the uncalibrated data
uncalib_acc_error = []
uncalib_gyr_error = []
for acc_file, gyr_file in zip(errors_acc, errors_gyr):
uncalib_acc_error.append(errors_acc[acc_file]['uncalibrated'])
uncalib_gyr_error.append(errors_gyr[gyr_file]['uncalibrated'])
uncalib_acc_error = np.array(uncalib_acc_error)
uncalib_gyr_error = np.array(uncalib_gyr_error)
bp = ax[0].boxplot(uncalib_acc_error, widths = 0.6, positions=[-1])
setBoxColors(bp, 'k')
bp = ax[1].boxplot(uncalib_gyr_error, widths = 0.6, positions=[-1])
setBoxColors(bp, 'k')
plt.sca(ax[0])
plt.xticks( np.concatenate(([-1],pos+0.5)), ['uncalib']+trials)
ax[0].set_xlabel('calibration trial')
ax[0].set_ylabel('lin. vel. error')
plt.sca(ax[1])
plt.xticks(np.concatenate(([-1],pos+0.5)), ['uncalib']+trials)
ax[1].set_xlabel('calibration trial')
ax[1].set_ylabel('ori. error')
handles = []
for i, suffix in enumerate(suffixes):
handles.append(mpatches(color=colours[i], label=suffix))
ax[1].legend(handles=handles)
fig.savefig(plot_path + '/compare_calibration_trials.pdf')
#%% Per calibration (suffix) average
from utils import setBoxColors
perTest_acc_error = dict()
perTest_gyr_error = dict()
tests = ['#'+re.findall(r'\d+', file.split('/')[-1])[0] for file in acc_data]
pos = (len(suffixes)+1)*(np.linspace(1,len(tests),len(tests)))-1
colours = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.close('all')
fig, ax = plt.subplots(1, 2, figsize=(20,10))#, sharex='all', sharey='all')
for i, suffix in enumerate(suffixes):
print(i)
perTest_acc_error[suffix] = []
perTest_gyr_error[suffix] = []
for acc_file, gyr_file in zip(errors_acc, errors_gyr):
perTest_acc_error[suffix].append(errors_acc[acc_file][suffix])
perTest_gyr_error[suffix].append(errors_gyr[gyr_file][suffix])
perTest_acc_error[suffix] = np.array(perTest_acc_error[suffix])
perTest_gyr_error[suffix] = np.array(perTest_gyr_error[suffix])
bp = ax[0].boxplot(perTest_acc_error[suffix].T, widths = 0.6, positions=pos+i)
setBoxColors(bp, colours[i])
bp = ax[1].boxplot(perTest_gyr_error[suffix].T, widths = 0.6, positions=pos+i)
setBoxColors(bp, colours[i])
# plot the uncalibrated data
uncalib_acc_error = []
uncalib_gyr_error = []
for acc_file, gyr_file in zip(errors_acc, errors_gyr):
uncalib_acc_error.append(errors_acc[acc_file]['uncalibrated'])
uncalib_gyr_error.append(errors_gyr[gyr_file]['uncalibrated'])
uncalib_acc_error = np.array(uncalib_acc_error)
uncalib_gyr_error = np.array(uncalib_gyr_error)
bp = ax[0].boxplot(uncalib_acc_error.T, widths = 0.6, positions=[-1])
setBoxColors(bp, 'k')
bp = ax[1].boxplot(uncalib_gyr_error.T, widths = 0.6, positions=[-1])
setBoxColors(bp, 'k')
plt.sca(ax[0])
plt.xticks( np.concatenate(([-1],pos+0.5)), ['uncalib']+tests)
ax[0].set_xlabel('test #')
ax[0].set_ylabel('lin. vel. error')
plt.sca(ax[1])
plt.xticks(np.concatenate(([-1],pos+0.5)), ['uncalib']+tests)
ax[1].set_xlabel('test #')
ax[1].set_ylabel('ori. error')
handles = []
for i, suffix in enumerate(suffixes):
handles.append(mpatches(color=colours[i], label=suffix))
ax[1].legend(handles=handles)
fig.savefig(plot_path + '/compare_calibration_modes.pdf') | 1.703125 | 2 |
estrategias/Valdrighi.py | lucasmoschen/jogos_vorazes | 1 | 12773141 | # coding: utf8
from .jogadores import Jogador
import random
class MeuJogador(Jogador):
def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores):
if rodada==1:
self.comida=comida_atual
escolhas = ['c' for x in reputacoes_dos_jogadores]
elif len(reputacoes_dos_jogadores)<=6:
escolhas = ['d' for x in reputacoes_dos_jogadores]
else:
escolhas=[]
for rep in reputacoes_dos_jogadores:
if not (0.2<rep<0.8):
escolhas.append('d')
else:
escolhas.append('c')
#aux=random.random()
#if aux>rep:
#escolhas.append('d')
#else:
#escolhas.append('c')
return escolhas
| 3.265625 | 3 |
example_usage_multinmf_conv_em.py | fakufaku/separake | 10 | 12773142 | <gh_stars>1-10
import numpy as np
import numpy.random as random
import matplotlib.pyplot as plt
import pyroomacoustics as pra
from scipy.io import wavfile
from multinmf_conv_em import multinmf_conv_em
def example_usage_multinmf_conv_em():
#
# example_usage_multinmf_conv_em()
#
# Example of usage of EM algorithm for multichannel NMF decomposition in
# convolutive mixture
#
#
# input
# -----
#
# ...
#
# output
# ------
#
# estimated source images are written in the results_dir
#
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Copyright 2017 <NAME>, adapted to Python
# Copyright 2010 <NAME>
# (alexey.ozerov -at- irisa.fr)
#
# This software is distributed under the terms of the GNU Public License
# version 3 (http://www.gnu.org/licenses/gpl.txt)
#
# If you use this code please cite this paper
#
# <NAME> and <NAME>,
# "Multichannel nonnegative matrix factorization in convolutive mixtures for audio source separation,"
# IEEE Trans. on Audio, Speech and Lang. Proc. special issue on Signal Models and Representations
# of Musical and Environmental Sounds, vol. 18, no. 3, pp. 550-563, March 2010.
# Available: http://www.irisa.fr/metiss/ozerov/Publications/OzerovFevotte_IEEE_TASLP10.pdf
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
NMF_CompPerSrcNum = 4
nsrc = 3
stft_win_len = 2048
data_dir = 'data/Speech/'
results_dir = 'data/Speech/'
file_prefix = '3sources_3channels'
# Input time-frequency representation
print('Input time-frequency representation')
fs, x = wavfile.read(data_dir + file_prefix + '_mix.wav')
x = x / (2**15)
mix_nsamp = x.shape[0]
nchan = x.shape[1]
# TODO STFT
window = pra.cosine(stft_win_len)
# X is (nchan, nframe, nbin)
X = np.array(
[pra.stft(x[:,ch], stft_win_len, stft_win_len // 2, win=window, transform=np.fft.rfft) for ch in range(nchan)]
)
# move axes to match Ozerov's order (nbin, nfram, nchan)
X = np.moveaxis(X, [0,1,2], [2,1,0])
nbin = X.shape[0]
nfram = X.shape[1]
# Random initialization of multichannel NMF parameters
print('Random initialization of multichannel NMF parameters')
K = NMF_CompPerSrcNum * nsrc
source_NMF_ind = []
for j in range(nsrc):
source_NMF_ind.append(np.arange(NMF_CompPerSrcNum) + j * NMF_CompPerSrcNum)
mix_psd = 0.5 * (np.mean(np.abs(np.sum(X**2, axis=2)), axis=1))
random_phases = random.randn(nchan, nsrc, nbin) + 1j * random.randn(nchan, nsrc, nbin)
random_phases /= np.abs(random_phases)
A_init = (0.5 *
(1.9 * np.abs(random.randn(nchan, nsrc, nbin)) + 0.1 * np.ones((nchan, nsrc, nbin)))
* random_phases
)
# W is intialized so that its enegy follows mixture PSD
W_init = 0.5 * (
( np.abs(random.randn(nbin,K)) + np.ones((nbin,K)) )
* ( mix_psd[:,np.newaxis] * np.ones((1,K)) )
)
# W_init = np.load("W_dictionary_em.npy")
# print(W_init.shape)
# K = W_init.shape[1]
H_init = 0.5 * ( np.abs(random.randn(K,nfram)) + np.ones((K,nfram)) )
Sigma_b_init = mix_psd / 100
# run 500 iterations of multichannel NMF EM algorithm (with annealing)
A_init = np.moveaxis(A_init, [2], [0])
W_EM, H_EM, Ae_EM, Sigma_b_EM, Se_EM, log_like_arr = \
multinmf_conv_em(X, W_init, H_init, A_init, Sigma_b_init, source_NMF_ind, iter_num=300)
Ae_EM = np.moveaxis(Ae_EM, [0], [2])
# Computation of the spatial source images
print('Computation of the spatial source images\n')
Ie_EM = np.zeros((nbin,nfram,nsrc,nchan), dtype=np.complex)
for j in range(nsrc):
for f in range(nbin):
Ie_EM[f,:,j,:] = np.outer(Se_EM[f,:,j], Ae_EM[:,j,f])
# Inverse STFT
ie_EM = []
for j in range(nsrc):
# channel-wise istft with synthesis window
ie_EM = []
for ch in range(nchan):
ie_EM.append(
pra.istft(Ie_EM[:,:,j,ch].T, stft_win_len, stft_win_len // 2, win=window, transform=np.fft.irfft)
)
# write the separated source to a wav file
out_filename = results_dir + '_sim_EM_' + str(j) + '.wav'
wavfile.write(out_filename, fs, np.array(ie_EM).T)
# Plot estimated W and H
print('Plot estimated W and H')
plt.figure()
plot_ind = 1
for k in range(NMF_CompPerSrcNum):
for j in range(nsrc):
plt.subplot(NMF_CompPerSrcNum, nsrc, plot_ind)
plt.plot(np.log10(np.maximum(W_EM[:,source_NMF_ind[j][k]], 1e-40)))
plt.title('Source_{}, log10(W_{})'.format(j, k))
plot_ind += 1
plt.tight_layout()
plt.figure()
plot_ind = 1
for k in range(NMF_CompPerSrcNum):
for j in range(nsrc):
plt.subplot(NMF_CompPerSrcNum, nsrc, plot_ind)
plt.plot(H_EM[source_NMF_ind[j][k],:])
plt.title('Source_{}, H_{}'.format(j, k))
plot_ind = plot_ind + 1
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(log_like_arr)
plt.show()
if __name__ == '__main__':
example_usage_multinmf_conv_em()
| 2.265625 | 2 |
datasets/multi_task_dataset_coco.py | zhangwenwen/multi-task.chainer | 0 | 12773143 | <filename>datasets/multi_task_dataset_coco.py
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
class Multi_task_COCO(GetterDataset):
def __init__(self):
super(Multi_task_COCO, self).__init__()
def __len__(self):
pass
def _get_annotations(self):
pass
def _get_image(self):
pass
def _get_mask(self):
pass
def _parse_dataset_config(self):
detection_datasets = self.exp_config['detection_datasets']
segmentation_datasets = self.exp_config['segmentation_datasets']
| 2.015625 | 2 |
office365/onedrive/driveitems/thumbnail.py | rikeshtailor/Office365-REST-Python-Client | 544 | 12773144 | <reponame>rikeshtailor/Office365-REST-Python-Client
from office365.runtime.client_value import ClientValue
class Thumbnail(ClientValue):
"""
The thumbnail resource type represents a thumbnail for an image, video, document,
or any item that has a bitmap representation.
"""
pass
| 2.1875 | 2 |
tacker/service.py | priya-pp/Priya | 3 | 12773145 | <gh_stars>1-10
# Copyright 2011 VMware, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging as std_logging
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from oslo_utils import excutils
from tacker.common import config
from tacker import wsgi
service_opts = [
cfg.IntOpt('periodic_interval',
default=40,
help=_('Seconds between running periodic tasks')),
cfg.IntOpt('api_workers',
default=0,
help=_('Number of separate worker processes for service')),
cfg.IntOpt('periodic_fuzzy_delay',
default=5,
help=_('Range of seconds to randomly delay when starting the '
'periodic task scheduler to reduce stampeding. '
'(Disable by setting to 0)')),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class WsgiService(service.ServiceBase):
"""Base class for WSGI based services.
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
"""
def __init__(self, app_name):
self.app_name = app_name
self.wsgi_app = None
def start(self):
self.wsgi_app = _run_wsgi(self.app_name)
def wait(self):
if self.wsgi_app:
self.wsgi_app.wait()
def stop(self):
pass
def reset(self):
pass
class TackerApiService(WsgiService):
"""Class for tacker-api service."""
@classmethod
def create(cls, app_name='tacker'):
# Setup logging early
config.setup_logging(cfg.CONF)
# Dump the initial option values
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
service = cls(app_name)
return service
def serve_wsgi(cls):
try:
service = cls.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Unrecoverable error: please check log '
'for details.'))
return service
def _run_wsgi(app_name):
app = config.load_paste_app(app_name)
if not app:
LOG.error(_('No known API applications configured.'))
return
server = wsgi.Server("Tacker")
server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host,
workers=cfg.CONF.api_workers)
# Dump all option values here after all options are parsed
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
LOG.info(_("Tacker service started, listening on %(host)s:%(port)s"),
{'host': cfg.CONF.bind_host,
'port': cfg.CONF.bind_port})
return server
| 1.890625 | 2 |
k8s/submit_kbs_jobs.py | kcyu2014/nas-landmarkreg | 8 | 12773146 | <gh_stars>1-10
import IPython
import yaml
import math
from kubernetes import client, config
from kubernetes.client.rest import ApiException
import os
# load the default configs.
from kubernetes.client import V1Job
from argparse import ArgumentParser
import time
NAMESPACE='cvlab-deadline'
INTERACTIVE_UPPER_LIMIT = 14 * 24 * 3600
config.load_kube_config()
parser = ArgumentParser('kubernetes submission')
parser.add_argument('--script', type=str, default=None, required=True,
help='Task Submission file, generated by other scripts.')
parser.add_argument('--job_template', type=str, default=None, help='to choose the default file.')
parser.add_argument('--gpu', type=int, default=1, help='GPU number')
parser.add_argument('--cpu', type=int, default=None, help='CPU number for each experiments')
parser.add_argument('--time', type=float, default=4, help='Running days for each job.')
parser.add_argument('--job_name', type=str, default=None, help='Job name')
parser.add_argument('--runtype', type=str, default='job', choices=['job', 'pod'])
parser.add_argument('--partition', type=str, default='gpu', help='Partition to differentiate the '
'interactive job and others.')
parser.add_argument('--force-delete', default=False, action='store_true', help='force to delete the pod if exists.')
def obtain_default_job_object(path=None):
path = path or os.path.dirname(__file__) + '/template_iclr_experiments.yaml'
with open(path, 'r') as f:
job_t = yaml.safe_load(f)
job_t = V1Job(job_t['apiVersion'], kind=job_t['kind'], metadata=job_t['metadata'], spec=job_t['spec'])
return job_t
def create_job(job_t):
k8s_apps_v1 = client.BatchV1Api()
resp = k8s_apps_v1.create_namespaced_job(namespace=NAMESPACE, body=job_t)
print("Job created. status='%s'" % resp.metadata.name)
def create_pod(pod_t, original_command=None):
"""Interactive pod creation. Add the support of wait and run
Parameters
----------
pod_t : PodSpec
original_command : list[str], optional
can pass for visualization purpose, by default None
"""
resp = client.CoreV1Api().create_namespaced_pod(namespace=NAMESPACE, body=pod_t)
pod_name = resp.metadata.name
print("Interactive Pod created. name='%s'" % pod_name)
if original_command:
print("Please try to execute this command to check:")
print("Original command: ", original_command)
try:
api_instance = client.CoreV1Api()
waitSecond = 0
while waitSecond < 3600:
time.sleep(1)
waitSecond += 1
api_ressponse = api_instance.read_namespaced_pod(name=pod_name, namespace=NAMESPACE)
if api_ressponse.status.phase == 'Running':
print("Pod is now running!")
break
if waitSecond % 10 == 0:
print(f"Waiting {waitSecond} ...")
except ApiException as e:
print('Found exception in reading the logs')
os.system(f'kubectl exec -it {pod_name} -- bash')
def process_name(args):
if args.job_name:
name = args.job_name.replace('_', '-').lower()
name = name.replace('.', '').replace('/', '-')
if len(name) > 63:
name = name[:4] + '--' + name[-57:]
if args.partition == 'interactive':
name += '-interactive'
k_type = 'pod'
else:
name = 'default'
name = name.replace('=', '-').replace('_', '-')
return name
if __name__ == '__main__':
args = parser.parse_args()
k_type = args.runtype
name = process_name(args)
if args.script == 'delete' or args.force_delete:
# DELETE the pod accordingly.
if args.partition == 'interactive':
k_type = 'pod'
os.system('kubectl delete {} {} --grace-period 0'.format(k_type, name))
if args.script == 'delete':
print('deleting script, stop here.')
exit()
elif args.script == 'nohup':
print('do not submit a job here.')
exit()
print("Running script at ", args.script)
job = obtain_default_job_object(args.job_template)
job.spec['activeDeadlineSeconds'] = int(args.time * 24 * 3600)
job.metadata['name'] = name
new_containers = job.spec['template']['spec']['containers'][0]
new_containers['command'] = ['bash', args.script]
# Wrapping the cpu partition into account.
if args.partition == 'cpu':
args.gpu = 0
elif args.partition == 'gpu':
args.gpu = math.ceil(float(args.gpu) / 2)
elif args.partition in ['v100', 'q6', 'q8'] :
pass
print(f"Using {args.gpu} V100.")
new_containers['resources']['limits']['nvidia.com/gpu'] = int(args.gpu)
if args.cpu is None:
try:
num_cpu = int(new_containers['resources']['limits']['cpu'])
except KeyError:
num_cpu = 8
else:
num_cpu = args.cpu
print(f"Using {num_cpu} CPUs ")
new_containers['resources']['limits']['cpu'] = num_cpu
print("New containers ", job.spec['template']['spec']['containers'][0])
if args.partition == 'interactive':
print("Creating interactive pod for debugging.")
interactive_pod = job.spec['template']
if 'metadata' not in interactive_pod.keys():
interactive_pod['metadata'] = job.metadata
interactive_pod['metadata']['name'] = name
for c_spec in interactive_pod['spec']['containers']:
orig_command = c_spec['command']
c_spec['command'] = ['sleep', str(INTERACTIVE_UPPER_LIMIT)]
create_pod(interactive_pod, original_command=orig_command)
else:
if args.runtype == 'pod':
# create pod
interactive_pod = job.spec['template']
if 'metadata' not in interactive_pod.keys():
interactive_pod['metadata'] = job.metadata
interactive_pod.metadata.name = name
create_pod(interactive_pod)
elif args.runtype == 'job':
create_job(job)
| 2.1875 | 2 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/plugins/apps.py | osoco/better-ways-of-thinking-about-software | 3 | 12773147 | <filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/plugins/apps.py
"""
Plugins Application Configuration
Signal handlers are connected here.
"""
from django.apps import AppConfig
from django.conf import settings
from edx_django_utils.plugins import connect_plugin_receivers
from openedx.core.djangoapps.plugins.constants import ProjectType
class PluginsConfig(AppConfig):
"""
Application Configuration for Plugins.
"""
name = 'openedx.core.djangoapps.plugins'
plugin_app = {}
def ready(self):
"""
Connect plugin receivers to their signals.
"""
if settings.ROOT_URLCONF == 'lms.urls':
project_type = ProjectType.LMS
else:
project_type = ProjectType.CMS
connect_plugin_receivers(project_type)
| 2.203125 | 2 |
tests/service/user/test_async_user.py | dicomgrid/sdk-python | 9 | 12773148 | <reponame>dicomgrid/sdk-python
import pytest
@pytest.mark.asyncio
class TestAsyncUser:
"""Test async user."""
async def test_get(self, async_api):
"""Test user get."""
user = await async_api.User.get().get()
assert user
async def test_get_with_only(self, async_api):
"""Test user get."""
user = await async_api.User.get().only('email').get()
assert 'email' in user
# + response dict
assert len(user) == 1
user = await async_api.User.get().only(['email', 'name']).get()
assert 'email' in user
assert 'name' in user
# + response dict
assert len(user) == 2
async def test_namespace_list(self, async_api):
"""Test namespace get."""
namespaces = await async_api.User.namespace_list().get()
assert namespaces.namespaces
| 2.65625 | 3 |
paperscraper/scholar/__init__.py | henrykrumb/paperscraper | 16 | 12773149 | from .scholar import * # noqa
| 0.996094 | 1 |
WebAPI/DB.py | cbluoss/TerraCtl | 0 | 12773150 | <filename>WebAPI/DB.py
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy(app)
class State(db.Model):
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime, unique=True, nullable=False)
state = db.Column(db.Text, unique=True, nullable=False)
def __repr__(self):
return '<State at %r>' % self.date
| 2.765625 | 3 |