hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f50725f3c31d176be58c5bde9bb440a69602f34 | 11,325 | py | Python | IPython/terminal/tests/test_interactivshell.py | pyarnold/ipython | c4797f7f069d0a974ddfa1e4251c7550c809dba0 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-12-18T01:07:55.000Z | 2020-12-18T01:07:55.000Z | IPython/terminal/tests/test_interactivshell.py | pyarnold/ipython | c4797f7f069d0a974ddfa1e4251c7550c809dba0 | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/terminal/tests/test_interactivshell.py | pyarnold/ipython | c4797f7f069d0a974ddfa1e4251c7550c809dba0 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for the key interactiveshell module.
Authors
-------
* Julian Taylor
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import sys
import types
import unittest
from IPython.core.inputtransformer import InputTransformer
from IPython.testing.decorators import skipif
from IPython.utils import py3compat
from IPython.testing import tools as tt
# Decorator for interaction loop tests -----------------------------------
class mock_input_helper(object):
"""Machinery for tests of the main interact loop.
Used by the mock_input decorator.
"""
def mock_input(testfunc):
"""Decorator for tests of the main interact loop.
Write the test as a generator, yield-ing the input strings, which IPython
will see as if they were typed in at the prompt.
"""
return test_method
# Test classes -----------------------------------------------------------
| 37.131148 | 82 | 0.604768 | # -*- coding: utf-8 -*-
"""Tests for the key interactiveshell module.
Authors
-------
* Julian Taylor
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import sys
import types
import unittest
from IPython.core.inputtransformer import InputTransformer
from IPython.testing.decorators import skipif
from IPython.utils import py3compat
from IPython.testing import tools as tt
# Decorator for interaction loop tests -----------------------------------
class mock_input_helper(object):
"""Machinery for tests of the main interact loop.
Used by the mock_input decorator.
"""
def __init__(self, testgen):
self.testgen = testgen
self.exception = None
self.ip = get_ipython()
def __enter__(self):
self.orig_raw_input = self.ip.raw_input
self.ip.raw_input = self.fake_input
return self
def __exit__(self, etype, value, tb):
self.ip.raw_input = self.orig_raw_input
def fake_input(self, prompt):
try:
return next(self.testgen)
except StopIteration:
self.ip.exit_now = True
return u''
except:
self.exception = sys.exc_info()
self.ip.exit_now = True
return u''
def mock_input(testfunc):
"""Decorator for tests of the main interact loop.
Write the test as a generator, yield-ing the input strings, which IPython
will see as if they were typed in at the prompt.
"""
def test_method(self):
testgen = testfunc(self)
with mock_input_helper(testgen) as mih:
mih.ip.interact(display_banner=False)
if mih.exception is not None:
# Re-raise captured exception
etype, value, tb = mih.exception
import traceback
traceback.print_tb(tb, file=sys.stdout)
del tb # Avoid reference loop
raise value
return test_method
# Test classes -----------------------------------------------------------
class InteractiveShellTestCase(unittest.TestCase):
def rl_hist_entries(self, rl, n):
"""Get last n readline history entries as a list"""
return [rl.get_history_item(rl.get_current_history_length() - x)
for x in range(n - 1, -1, -1)]
def test_runs_without_rl(self):
"""Test that function does not throw without readline"""
ip = get_ipython()
ip.has_readline = False
ip.readline = None
ip._replace_rlhist_multiline(u'source', 0)
@skipif(not get_ipython().has_readline, 'no readline')
def test_runs_without_remove_history_item(self):
"""Test that function does not throw on windows without
remove_history_item"""
ip = get_ipython()
if hasattr(ip.readline, 'remove_history_item'):
del ip.readline.remove_history_item
ip._replace_rlhist_multiline(u'source', 0)
@skipif(not get_ipython().has_readline, 'no readline')
@skipif(not hasattr(get_ipython().readline, 'remove_history_item'),
'no remove_history_item')
def test_replace_multiline_hist_disabled(self):
"""Test that multiline replace does nothing if disabled"""
ip = get_ipython()
ip.multiline_history = False
ghist = [u'line1', u'line2']
for h in ghist:
ip.readline.add_history(h)
hlen_b4_cell = ip.readline.get_current_history_length()
hlen_b4_cell = ip._replace_rlhist_multiline(u'sourc€\nsource2',
hlen_b4_cell)
self.assertEqual(ip.readline.get_current_history_length(),
hlen_b4_cell)
hist = self.rl_hist_entries(ip.readline, 2)
self.assertEqual(hist, ghist)
@skipif(not get_ipython().has_readline, 'no readline')
@skipif(not hasattr(get_ipython().readline, 'remove_history_item'),
'no remove_history_item')
def test_replace_multiline_hist_adds(self):
"""Test that multiline replace function adds history"""
ip = get_ipython()
hlen_b4_cell = ip.readline.get_current_history_length()
hlen_b4_cell = ip._replace_rlhist_multiline(u'sourc€', hlen_b4_cell)
self.assertEqual(hlen_b4_cell,
ip.readline.get_current_history_length())
@skipif(not get_ipython().has_readline, 'no readline')
@skipif(not hasattr(get_ipython().readline, 'remove_history_item'),
'no remove_history_item')
def test_replace_multiline_hist_keeps_history(self):
"""Test that multiline replace does not delete history"""
ip = get_ipython()
ip.multiline_history = True
ghist = [u'line1', u'line2']
for h in ghist:
ip.readline.add_history(h)
# start cell
hlen_b4_cell = ip.readline.get_current_history_length()
# nothing added to rl history, should do nothing
hlen_b4_cell = ip._replace_rlhist_multiline(u'sourc€\nsource2',
hlen_b4_cell)
self.assertEqual(ip.readline.get_current_history_length(),
hlen_b4_cell)
hist = self.rl_hist_entries(ip.readline, 2)
self.assertEqual(hist, ghist)
@skipif(not get_ipython().has_readline, 'no readline')
@skipif(not hasattr(get_ipython().readline, 'remove_history_item'),
'no remove_history_item')
def test_replace_multiline_hist_replaces_twice(self):
"""Test that multiline entries are replaced twice"""
ip = get_ipython()
ip.multiline_history = True
ip.readline.add_history(u'line0')
# start cell
hlen_b4_cell = ip.readline.get_current_history_length()
ip.readline.add_history('l€ne1')
ip.readline.add_history('line2')
# replace cell with single line
hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne1\nline2',
hlen_b4_cell)
ip.readline.add_history('l€ne3')
ip.readline.add_history('line4')
# replace cell with single line
hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne3\nline4',
hlen_b4_cell)
self.assertEqual(ip.readline.get_current_history_length(),
hlen_b4_cell)
hist = self.rl_hist_entries(ip.readline, 3)
expected = [u'line0', u'l€ne1\nline2', u'l€ne3\nline4']
# perform encoding, in case of casting due to ASCII locale
enc = sys.stdin.encoding or "utf-8"
expected = [py3compat.unicode_to_str(e, enc) for e in expected]
self.assertEqual(hist, expected)
@skipif(not get_ipython().has_readline, 'no readline')
@skipif(not hasattr(get_ipython().readline, 'remove_history_item'),
'no remove_history_item')
def test_replace_multiline_hist_replaces_empty_line(self):
"""Test that multiline history skips empty line cells"""
ip = get_ipython()
ip.multiline_history = True
ip.readline.add_history(u'line0')
# start cell
hlen_b4_cell = ip.readline.get_current_history_length()
ip.readline.add_history('l€ne1')
ip.readline.add_history('line2')
hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne1\nline2',
hlen_b4_cell)
ip.readline.add_history('')
hlen_b4_cell = ip._replace_rlhist_multiline(u'', hlen_b4_cell)
ip.readline.add_history('l€ne3')
hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne3', hlen_b4_cell)
ip.readline.add_history(' ')
hlen_b4_cell = ip._replace_rlhist_multiline(' ', hlen_b4_cell)
ip.readline.add_history('\t')
ip.readline.add_history('\t ')
hlen_b4_cell = ip._replace_rlhist_multiline('\t', hlen_b4_cell)
ip.readline.add_history('line4')
hlen_b4_cell = ip._replace_rlhist_multiline(u'line4', hlen_b4_cell)
self.assertEqual(ip.readline.get_current_history_length(),
hlen_b4_cell)
hist = self.rl_hist_entries(ip.readline, 4)
# expect no empty cells in history
expected = [u'line0', u'l€ne1\nline2', u'l€ne3', u'line4']
# perform encoding, in case of casting due to ASCII locale
enc = sys.stdin.encoding or "utf-8"
expected = [py3compat.unicode_to_str(e, enc) for e in expected]
self.assertEqual(hist, expected)
@mock_input
def test_inputtransformer_syntaxerror(self):
ip = get_ipython()
transformer = SyntaxErrorTransformer()
ip.input_splitter.python_line_transforms.append(transformer)
ip.input_transformer_manager.python_line_transforms.append(transformer)
try:
#raise Exception
with tt.AssertPrints('4', suppress=False):
yield u'print(2*2)'
with tt.AssertPrints('SyntaxError: input contains', suppress=False):
yield u'print(2345) # syntaxerror'
with tt.AssertPrints('16', suppress=False):
yield u'print(4*4)'
finally:
ip.input_splitter.python_line_transforms.remove(transformer)
ip.input_transformer_manager.python_line_transforms.remove(
transformer)
class SyntaxErrorTransformer(InputTransformer):
def push(self, line):
pos = line.find('syntaxerror')
if pos >= 0:
e = SyntaxError('input contains "syntaxerror"')
e.text = line
e.offset = pos + 1
raise e
return line
def reset(self):
pass
class TerminalMagicsTestCase(unittest.TestCase):
def test_paste_magics_message(self):
"""Test that an IndentationError while using paste magics doesn't
trigger a message about paste magics and also the opposite."""
ip = get_ipython()
s = ('for a in range(5):\n'
'print(a)')
tm = ip.magics_manager.registry['TerminalMagics']
with tt.AssertPrints("If you want to paste code into IPython, try the "
"%paste and %cpaste magic functions."):
ip.run_cell(s)
with tt.AssertNotPrints("If you want to paste code into IPython, try the "
"%paste and %cpaste magic functions."):
tm.store_or_execute(s, name=None)
def test_paste_magics_blankline(self):
"""Test that code with a blank line doesn't get split (gh-3246)."""
ip = get_ipython()
s = ('def pasted_func(a):\n'
' b = a+1\n'
'\n'
' return b')
tm = ip.magics_manager.registry['TerminalMagics']
tm.store_or_execute(s, name=None)
self.assertEqual(ip.user_ns['pasted_func'](54), 55)
| 1,971 | 7,735 | 258 |
54c05dacc36e2a160c7bf7dd66ce48a4d7ce3753 | 5,704 | py | Python | research/lstm_object_detection/models/mobilenet_defs.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | 1 | 2019-09-11T09:41:11.000Z | 2019-09-11T09:41:11.000Z | research/lstm_object_detection/models/mobilenet_defs.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | research/lstm_object_detection/models/mobilenet_defs.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definitions for modified MobileNet models used in LSTD."""
import tensorflow as tf
from nets import mobilenet_v1
from nets.mobilenet import conv_blocks as mobilenet_convs
from nets.mobilenet import mobilenet
slim = tf.contrib.slim
def mobilenet_v1_lite_def(depth_multiplier, low_res=False):
"""Conv definitions for a lite MobileNet v1 model.
Args:
depth_multiplier: float depth multiplier for MobileNet.
low_res: An option of low-res conv input for interleave model.
Returns:
Array of convolutions.
Raises:
ValueError: On invalid channels with provided depth multiplier.
"""
conv = mobilenet_v1.Conv
sep_conv = mobilenet_v1.DepthSepConv
return [
conv(kernel=[3, 3], stride=2, depth=32),
sep_conv(kernel=[3, 3], stride=1, depth=64),
sep_conv(kernel=[3, 3], stride=2, depth=128),
sep_conv(kernel=[3, 3], stride=1, depth=128),
sep_conv(kernel=[3, 3], stride=2, depth=256),
sep_conv(kernel=[3, 3], stride=1, depth=256),
sep_conv(kernel=[3, 3], stride=2, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1 if low_res else 2, depth=1024),
sep_conv(
kernel=[3, 3],
stride=1,
depth=int(_find_target_depth(1024, depth_multiplier)))
]
def mobilenet_v2_lite_def(reduced=False, is_quantized=False, low_res=False):
"""Conv definitions for a lite MobileNet v2 model.
Args:
reduced: Determines the scaling factor for expanded conv. If True, a factor
of 6 is used. If False, a factor of 3 is used.
is_quantized: Whether the model is trained in quantized mode.
low_res: Whether the input to the model is of half resolution.
Returns:
Array of convolutions.
"""
expanded_conv = mobilenet_convs.expanded_conv
expand_input = mobilenet_convs.expand_input_by_factor
op = mobilenet.op
return dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {
'center': True,
'scale': True
},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm,
'activation_fn': tf.nn.relu6
},
(expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {
'padding': 'SAME'
}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=2,
num_outputs=24),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=1,
num_outputs=24),
op(expanded_conv, stride=2, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=2, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1 if low_res else 2, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv,
stride=1,
num_outputs=320,
project_activation_fn=(tf.nn.relu6
if is_quantized else tf.identity))
],
)
| 39.337931 | 81 | 0.610975 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definitions for modified MobileNet models used in LSTD."""
import tensorflow as tf
from nets import mobilenet_v1
from nets.mobilenet import conv_blocks as mobilenet_convs
from nets.mobilenet import mobilenet
slim = tf.contrib.slim
def mobilenet_v1_lite_def(depth_multiplier, low_res=False):
"""Conv definitions for a lite MobileNet v1 model.
Args:
depth_multiplier: float depth multiplier for MobileNet.
low_res: An option of low-res conv input for interleave model.
Returns:
Array of convolutions.
Raises:
ValueError: On invalid channels with provided depth multiplier.
"""
conv = mobilenet_v1.Conv
sep_conv = mobilenet_v1.DepthSepConv
def _find_target_depth(original, depth_multiplier):
# Find the target depth such that:
# int(target * depth_multiplier) == original
pseudo_target = int(original / depth_multiplier)
for target in range(pseudo_target - 1, pseudo_target + 2):
if int(target * depth_multiplier) == original:
return target
raise ValueError('Cannot have %d channels with depth multiplier %0.2f' %
(original, depth_multiplier))
return [
conv(kernel=[3, 3], stride=2, depth=32),
sep_conv(kernel=[3, 3], stride=1, depth=64),
sep_conv(kernel=[3, 3], stride=2, depth=128),
sep_conv(kernel=[3, 3], stride=1, depth=128),
sep_conv(kernel=[3, 3], stride=2, depth=256),
sep_conv(kernel=[3, 3], stride=1, depth=256),
sep_conv(kernel=[3, 3], stride=2, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1 if low_res else 2, depth=1024),
sep_conv(
kernel=[3, 3],
stride=1,
depth=int(_find_target_depth(1024, depth_multiplier)))
]
def mobilenet_v2_lite_def(reduced=False, is_quantized=False, low_res=False):
"""Conv definitions for a lite MobileNet v2 model.
Args:
reduced: Determines the scaling factor for expanded conv. If True, a factor
of 6 is used. If False, a factor of 3 is used.
is_quantized: Whether the model is trained in quantized mode.
low_res: Whether the input to the model is of half resolution.
Returns:
Array of convolutions.
"""
expanded_conv = mobilenet_convs.expanded_conv
expand_input = mobilenet_convs.expand_input_by_factor
op = mobilenet.op
return dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {
'center': True,
'scale': True
},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm,
'activation_fn': tf.nn.relu6
},
(expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {
'padding': 'SAME'
}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=2,
num_outputs=24),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=1,
num_outputs=24),
op(expanded_conv, stride=2, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=2, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1 if low_res else 2, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv,
stride=1,
num_outputs=320,
project_activation_fn=(tf.nn.relu6
if is_quantized else tf.identity))
],
)
| 445 | 0 | 27 |
3b0f676854ce6949e3f8f80ac4c42b4708ce1fb3 | 8,842 | py | Python | annotation/anotherDocActiveThing.py | jakelever/corona-ml | 8ceb22af50d7277ebf05f2fd21bbbf68c080ed76 | [
"MIT"
] | 7 | 2021-02-01T22:39:23.000Z | 2021-08-09T16:28:38.000Z | annotation/anotherDocActiveThing.py | jakelever/corona-ml | 8ceb22af50d7277ebf05f2fd21bbbf68c080ed76 | [
"MIT"
] | 1 | 2021-05-17T13:14:40.000Z | 2021-05-20T10:26:09.000Z | annotation/anotherDocActiveThing.py | jakelever/corona-ml | 8ceb22af50d7277ebf05f2fd21bbbf68c080ed76 | [
"MIT"
] | 1 | 2021-01-04T14:11:18.000Z | 2021-01-04T14:11:18.000Z | import sys
sys.path.append("../pipeline")
import mysql.connector
import pickle
import argparse
import json
import itertools
from collections import defaultdict,Counter
from collections.abc import Iterable
import numpy as np
import time
import os
from scipy import stats
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare data for active learning')
#parser.add_argument('--db',required=True,type=str,help='JSON with database settings')
parser.add_argument('--inDir',required=True,type=str,help='Output dir to put matrices')
parser.add_argument('--negThreshold',required=False,default=0.3,type=float,help='Threshold below which is a confident negative (default=0.25)')
parser.add_argument('--posThreshold',required=False,default=0.7,type=float,help='Threshold above which is a confident positive (default=0.75)')
#parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
X_annotated = np.load(os.path.join(args.inDir,'X_annotated.npy'))
y_annotated = np.load(os.path.join(args.inDir,'y_annotated.npy'))
X_undecided = np.load(os.path.join(args.inDir,'X_undecided.npy'))
undecided_scores = np.load(os.path.join(args.inDir,'undecided_scores.npy'))
with open(os.path.join(args.inDir,'undecided_docs.pickle'),'rb') as f:
undecided_docs = pickle.load(f)
if False:
with open(args.db) as f:
database = json.load(f)
mydb = mysql.connector.connect(
host=database['host'],
user=database['user'],
passwd=database['passwd'],
database=database['database']
)
mycursor = mydb.cursor()
#loadDocumentIDMapping(mycursor,undecided_docs)
#baselineConfNumber = getConfidenceNumbers(X_annotated,y_annotated[:,args.label_index],X_undecided,args.posThreshold,args.negThreshold)
#print("baselineConfNumber=",baselineConfNumber)
#outcomes = searchForBestDocumentToAnnotate(X_annotated,y_annotated,X_undecided,args.posThreshold)
current_y = np.copy(y_annotated)
current_train_X = np.copy(X_annotated)
current_unknown_X = np.copy(X_undecided)
num_iter = current_unknown_X.shape[0]
prev_done = []
start_time = time.time()
for i in range(num_iter):
multi_scores = getMultiScores(current_train_X, current_y, current_unknown_X)
np.savetxt('multi_scores_%04d.csv' % i, multi_scores, delimiter=',', fmt="%f")
min_scores = multi_scores.min(axis=1)
min_score_percentiles = stats.rankdata(min_scores,"average") / min_scores.shape[0]
#print(min_score_percentiles.shape)
#print(min_score_percentiles[409])
current_outcomes = searchForBestDocumentToAnnotate(current_train_X,current_y,current_unknown_X,args.posThreshold,show_time=False)
for j in prev_done:
current_outcomes[j,:] = -1
np.savetxt('current_outcomes_%04d.csv' % i, current_outcomes, delimiter=',', fmt="%d")
best_doc_change = current_outcomes.min(axis=1).max()
best_doc_index = current_outcomes.min(axis=1).argmax()
best_min_score_percentile = min_score_percentiles[best_doc_index]
print("# best_doc_index=%d, best_doc_change=%d, train_size=%d" % (best_doc_index,best_doc_change,current_train_X.shape[0]))
print("# best_min_score_percentile = %f" % best_min_score_percentile)
which_label_was_min = current_outcomes[best_doc_index,:].argmin()
label_score_percentiles = stats.rankdata(multi_scores[:,which_label_was_min],"average") / multi_scores.shape[0]
label_score_percentile_for_doc = label_score_percentiles[best_doc_index]
num_where_label_was_min = (current_outcomes.min(axis=1) == current_outcomes[:,which_label_was_min]).sum()
print("which_label_was_min = %d" % which_label_was_min)
print("num_where_label_was_min = %d/%d (%.1f%%)" % (num_where_label_was_min,current_outcomes.shape[0],100*num_where_label_was_min/current_outcomes.shape[0]))
print("label_score_percentile_for_doc = %f" % label_score_percentile_for_doc)
prev_done.append(best_doc_index)
current_train_X = np.vstack([current_train_X,current_unknown_X[best_doc_index,:]])
#current_unknown_X = np.delete(current_unknown_X,best_doc_index,0)
current_y = np.vstack([current_y,np.zeros((1,current_y.shape[1]))])
current_y[current_y.shape[0]-1,current_outcomes[best_doc_index,:].argmax()] = 1
outputTimeEstimates(i,num_iter,start_time)
#break
np.savetxt('undecided_scores.csv', undecided_scores, delimiter=',', fmt="%f")
| 36.841667 | 159 | 0.755598 | import sys
sys.path.append("../pipeline")
import mysql.connector
import pickle
import argparse
import json
import itertools
from collections import defaultdict,Counter
from collections.abc import Iterable
import numpy as np
import time
import os
from scipy import stats
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD
def nice_time(seconds):
days = int(seconds) // (24*60*60)
seconds -= days * (24*60*60)
hours = int(seconds) // (60*60)
seconds -= hours * (60*60)
minutes = int(seconds) // (60)
seconds -= minutes * (60)
bits = []
if days:
bits.append( "1 day" if days == 1 else "%d days" % days)
if hours:
bits.append( "1 hour" if hours == 1 else "%d hours" % hours)
if minutes:
bits.append( "1 minute" if minutes == 1 else "%d minutes" % minutes)
bits.append( "1 second" if seconds == 1 else "%.1f seconds" % seconds)
return ", ".join(bits)
def outputTimeEstimates(index,total_count,start_time):
now = time.time()
perc = 100*(index+1)/total_count
time_so_far = (now-start_time)
time_per_item = time_so_far / (index+1)
remaining_items = total_count - index
remaining_time = time_per_item * remaining_items
total_time = time_so_far + remaining_time
print("%.1f%% (%d/%d)" % (perc,index+1,total_count))
print("time_per_item = %.4fs (%s)" % (time_per_item,nice_time(time_per_item)))
print("remaining_items = %d" % remaining_items)
print("time_so_far = %.1fs (%s)" % (time_so_far,nice_time(time_so_far)))
print("remaining_time = %.1fs (%s)" % (remaining_time,nice_time(remaining_time)))
print("total_time = %.1fs (%s)" % (total_time,nice_time(total_time)))
print()
def getMultiScores(X_train,y_train,X_test):
assert y_train.shape[1] > 1
clf = OneVsRestClassifier(LogisticRegression(class_weight='balanced',random_state=0,C=21))
clf.fit(X_train, y_train)
scores = clf.predict_proba(X_test)
return scores
def getScores(X_train,y_train,X_test):
clf = LogisticRegression(class_weight='balanced',random_state=0,C=21)
clf.fit(X_train, y_train)
assert clf.classes_.tolist() == [0,1]
scores = clf.predict_proba(X_test)[:,1]
return scores
def searchForBestDocumentToAnnotate(X_annotated,y_annotated,X_undecided,posThreshold,show_time=True):
start = time.time()
X_annotated_plus_one = np.vstack([X_annotated,np.zeros((1,X_annotated.shape[1]))])
outcomes = np.zeros((X_undecided.shape[0],y_annotated.shape[1]),dtype=np.int32)
for docindex in range(X_undecided.shape[0]):
if show_time and (docindex%10) == 0:
outputTimeEstimates(docindex,X_undecided.shape[0],start)
X_annotated_plus_one[X_annotated_plus_one.shape[0]-1,:] = X_undecided[docindex,:]
neg_matrix = np.zeros((X_undecided.shape[0],y_annotated.shape[1]))
for label_index in range(y_annotated.shape[1]):
y_with_artifical_addition = np.concatenate([y_annotated[:,label_index],[0]])
neg_scores = getScores(X_annotated_plus_one, y_with_artifical_addition, X_undecided)
neg_matrix[:,label_index] = neg_scores
for label_index in range(y_annotated.shape[1]):
y_with_artifical_addition = np.concatenate([y_annotated[:,label_index],[1]])
pos_scores = getScores(X_annotated_plus_one, y_with_artifical_addition, X_undecided)
pos_matrix = neg_matrix.copy()
pos_matrix[:,label_index] = pos_scores
numHasHighConfPositive = int(sum(pos_matrix.max(axis=1) > posThreshold))
outcomes[docindex,label_index] = numHasHighConfPositive
#outcomes[docindex,0] = numHighConfNegative
#outcomes[docindex,1] = numHighConfPositive
#outcomes[iteration,0] = numHighConf
if show_time:
outputTimeEstimates(X_undecided.shape[0]-1,X_undecided.shape[0],start)
return outcomes
def loadDocumentIDMapping(mycursor,undecided_docs):
sql = "SELECT document_id,pubmed_id,cord_uid FROM documents"
print(sql)
mycursor.execute(sql)
myresult = mycursor.fetchall()
pubmed_to_document_id = {}
cord_to_document_id = {}
pubmed_to_document_id = {str(pubmed_id):document_id for document_id,pubmed_id,cord_ui in myresult if pubmed_id }
cord_to_document_id = {cord_ui:document_id for document_id,pubmed_id,cord_ui in myresult if cord_ui }
for d in undecided_docs:
cord_uid = d['cord_uid']
pubmed_id = d['pubmed_id']
if cord_uid in cord_to_document_id:
document_id = cord_to_document_id[cord_uid]
elif pubmed_id in pubmed_to_document_id:
document_id = pubmed_to_document_id[pubmed_id]
else:
continue
#raise RuntimeError("Couldn't find matching document for annotation with cord_uid=%s and pubmed_id=%s" % (cord_uid,pubmed_id))
d['document_id'] = document_id
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare data for active learning')
#parser.add_argument('--db',required=True,type=str,help='JSON with database settings')
parser.add_argument('--inDir',required=True,type=str,help='Output dir to put matrices')
parser.add_argument('--negThreshold',required=False,default=0.3,type=float,help='Threshold below which is a confident negative (default=0.25)')
parser.add_argument('--posThreshold',required=False,default=0.7,type=float,help='Threshold above which is a confident positive (default=0.75)')
#parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
X_annotated = np.load(os.path.join(args.inDir,'X_annotated.npy'))
y_annotated = np.load(os.path.join(args.inDir,'y_annotated.npy'))
X_undecided = np.load(os.path.join(args.inDir,'X_undecided.npy'))
undecided_scores = np.load(os.path.join(args.inDir,'undecided_scores.npy'))
with open(os.path.join(args.inDir,'undecided_docs.pickle'),'rb') as f:
undecided_docs = pickle.load(f)
if False:
with open(args.db) as f:
database = json.load(f)
mydb = mysql.connector.connect(
host=database['host'],
user=database['user'],
passwd=database['passwd'],
database=database['database']
)
mycursor = mydb.cursor()
#loadDocumentIDMapping(mycursor,undecided_docs)
#baselineConfNumber = getConfidenceNumbers(X_annotated,y_annotated[:,args.label_index],X_undecided,args.posThreshold,args.negThreshold)
#print("baselineConfNumber=",baselineConfNumber)
#outcomes = searchForBestDocumentToAnnotate(X_annotated,y_annotated,X_undecided,args.posThreshold)
current_y = np.copy(y_annotated)
current_train_X = np.copy(X_annotated)
current_unknown_X = np.copy(X_undecided)
num_iter = current_unknown_X.shape[0]
prev_done = []
start_time = time.time()
for i in range(num_iter):
multi_scores = getMultiScores(current_train_X, current_y, current_unknown_X)
np.savetxt('multi_scores_%04d.csv' % i, multi_scores, delimiter=',', fmt="%f")
min_scores = multi_scores.min(axis=1)
min_score_percentiles = stats.rankdata(min_scores,"average") / min_scores.shape[0]
#print(min_score_percentiles.shape)
#print(min_score_percentiles[409])
current_outcomes = searchForBestDocumentToAnnotate(current_train_X,current_y,current_unknown_X,args.posThreshold,show_time=False)
for j in prev_done:
current_outcomes[j,:] = -1
np.savetxt('current_outcomes_%04d.csv' % i, current_outcomes, delimiter=',', fmt="%d")
best_doc_change = current_outcomes.min(axis=1).max()
best_doc_index = current_outcomes.min(axis=1).argmax()
best_min_score_percentile = min_score_percentiles[best_doc_index]
print("# best_doc_index=%d, best_doc_change=%d, train_size=%d" % (best_doc_index,best_doc_change,current_train_X.shape[0]))
print("# best_min_score_percentile = %f" % best_min_score_percentile)
which_label_was_min = current_outcomes[best_doc_index,:].argmin()
label_score_percentiles = stats.rankdata(multi_scores[:,which_label_was_min],"average") / multi_scores.shape[0]
label_score_percentile_for_doc = label_score_percentiles[best_doc_index]
num_where_label_was_min = (current_outcomes.min(axis=1) == current_outcomes[:,which_label_was_min]).sum()
print("which_label_was_min = %d" % which_label_was_min)
print("num_where_label_was_min = %d/%d (%.1f%%)" % (num_where_label_was_min,current_outcomes.shape[0],100*num_where_label_was_min/current_outcomes.shape[0]))
print("label_score_percentile_for_doc = %f" % label_score_percentile_for_doc)
prev_done.append(best_doc_index)
current_train_X = np.vstack([current_train_X,current_unknown_X[best_doc_index,:]])
#current_unknown_X = np.delete(current_unknown_X,best_doc_index,0)
current_y = np.vstack([current_y,np.zeros((1,current_y.shape[1]))])
current_y[current_y.shape[0]-1,current_outcomes[best_doc_index,:].argmax()] = 1
outputTimeEstimates(i,num_iter,start_time)
#break
np.savetxt('undecided_scores.csv', undecided_scores, delimiter=',', fmt="%f")
| 4,087 | 0 | 143 |
459fa3f4bf59ee2dc26b184c1108b9c9de325588 | 4,134 | py | Python | zebra_zpl/image.py | Children-With-Diabetes/py-zebra-zpl | 1ba99976a4322e7856d2c01aad7a9370d9f6c560 | [
"MIT"
] | null | null | null | zebra_zpl/image.py | Children-With-Diabetes/py-zebra-zpl | 1ba99976a4322e7856d2c01aad7a9370d9f6c560 | [
"MIT"
] | null | null | null | zebra_zpl/image.py | Children-With-Diabetes/py-zebra-zpl | 1ba99976a4322e7856d2c01aad7a9370d9f6c560 | [
"MIT"
] | null | null | null | import string
import PIL.Image
from .printable import Printable
class _ImageHandler:
"""Convert PIL images to ZPL
Based on Java example from:
http://www.jcgonzalez.com/java-image-to-zpl-example
"""
@staticmethod
@staticmethod
@property
@property
| 31.082707 | 83 | 0.495162 | import string
import PIL.Image
from .printable import Printable
class _ImageHandler:
"""Convert PIL images to ZPL
Based on Java example from:
http://www.jcgonzalez.com/java-image-to-zpl-example
"""
def __init__(self, image: PIL.Image):
self._i = image.convert('L')
@staticmethod
def _image_compression_char(multiplier: int) -> str:
alpha = string.ascii_uppercase[6:-1]
if (multiplier - 1) in range(len(alpha)):
return alpha[multiplier - 1]
if multiplier >= 20 and multiplier <= 400:
multi = multiplier // 20
return string.ascii_lowercase[6:][multi-1]
return ''
@staticmethod
def binary_to_hex_str(data: str) -> str:
if len(data) < 8:
data += '0'*(8-len(data))
return '{:02X}'.format(int(data, 2))
def _process_pixel_row(self, y: int) -> str:
out = ''
accumulate = ''
for x in range(self._i.width):
p = self._i.getpixel((x, y))
color = '0' if p == 255 else '1'
accumulate += str(int(color))
if len(accumulate) == 8:
out += self.binary_to_hex_str(accumulate)
accumulate = ''
if accumulate:
out += self.binary_to_hex_str(accumulate)
return f'{out}\n'
@property
def row_bytes(self):
width_bytes = int(self._i.width / 8)
if self._i.width % 8 > 0:
width_bytes += 1
return width_bytes
@property
def total_bytes(self):
return self._i.height * self.row_bytes
def _image_to_binary(self):
return ''.join([self._process_pixel_row(y) for y in range(self._i.height)])
def get_zpl_image_data(self):
maxlinea = self.row_bytes * 2
code = ''
linea = ''
previous_line = ''
counter = 1
o = self._image_to_binary()
aux = o[0]
first_char = False
for c in o:
if first_char:
aux = c
first_char = False
continue
if c == '\n':
if counter >= maxlinea and aux == '0':
linea += ','
elif counter >= maxlinea and aux == 'F':
linea += '!'
elif counter > 20:
multi20 = int((counter/20)*20)
resto20 = counter % 20
linea += self._image_compression_char(multi20)
if resto20:
linea += self._image_compression_char(resto20)
linea += aux
else:
linea += self._image_compression_char(counter) + aux
counter = 1
first_char = True
if linea == previous_line:
code += ":"
else:
code += linea
previous_line = linea
linea = ''
continue
if aux == c:
counter += 1
else:
if counter >= 20:
multi20 = int((counter/20)*20)
resto20 = counter % 20
linea += self._image_compression_char(multi20)
if resto20:
linea += self._image_compression_char(resto20)
linea += aux
else:
linea += self._image_compression_char(counter) + aux
counter = 1
aux = c
return code
class Image(Printable):
def __init__(self, image: PIL.Image, **kwargs):
self._data = None
self._i = _ImageHandler(image)
super().__init__(data=self.img_data, **kwargs)
@property
def img_data(self):
if self._data is None:
self._data = self._i.get_zpl_image_data()
return self._data
def to_zpl(self):
data_len = len(self.img_data)
i = self._i
zpl = f'^FO{self.x},{self.y}'
zpl += f'^GFA,{data_len},{i.total_bytes},{i.row_bytes}, {self.img_data}'
return zpl
| 3,516 | 96 | 235 |
6c6c9cb1343a02d111ad40ddac0947dad0d82727 | 426 | py | Python | tests/test_advanced.py | eldavojohn/uspto-pymongo-interface | 8bc6e71ed4e450cd491dbcd7498e93ea848aa88e | [
"MIT"
] | 1 | 2018-10-08T00:45:10.000Z | 2018-10-08T00:45:10.000Z | tests/test_advanced.py | eldavojohn/uspto-pymongo-interface | 8bc6e71ed4e450cd491dbcd7498e93ea848aa88e | [
"MIT"
] | null | null | null | tests/test_advanced.py | eldavojohn/uspto-pymongo-interface | 8bc6e71ed4e450cd491dbcd7498e93ea848aa88e | [
"MIT"
] | null | null | null | from .context import uspto
import unittest
class AdvancedTestSuite(unittest.TestCase):
"""Advanced test cases."""
print "here we go"
if __name__ == '__main__':
unittest.main()
| 21.3 | 45 | 0.711268 | from .context import uspto
import unittest
class AdvancedTestSuite(unittest.TestCase):
"""Advanced test cases."""
print "here we go"
def test_first_patent(self):
uspto.print_first_patent()
def test_patent_crawl(self):
uspto.crawl_patents_with_aggregate()
def test_map_reduce_to_state(self):
uspto.map_reduce_applicant_by_state()
if __name__ == '__main__':
unittest.main()
| 154 | 0 | 81 |
fdf96a349d16ab90fa2755f1d68c9b95d03c626e | 591 | py | Python | src/bot.py | NNNMM12345/Discord_Sandbot1 | 76ed7a97efd1d6d0eb7efd9aff78985e63cfb6c6 | [
"MIT"
] | 4 | 2019-01-02T20:31:17.000Z | 2020-09-06T09:43:22.000Z | src/bot.py | NNNMM12345/Discord_Sandbot1 | 76ed7a97efd1d6d0eb7efd9aff78985e63cfb6c6 | [
"MIT"
] | 2 | 2018-03-23T00:45:17.000Z | 2018-03-27T15:44:13.000Z | src/bot.py | NNNMM12345/Discord_Sandbot1 | 76ed7a97efd1d6d0eb7efd9aff78985e63cfb6c6 | [
"MIT"
] | 2 | 2018-03-24T22:48:33.000Z | 2018-03-24T22:49:09.000Z | import discord
from discord.ext import commands
| 31.105263 | 80 | 0.588832 | import discord
from discord.ext import commands
class Bot(commands.Bot):
def __init__(self, extensions):
super().__init__(command_prefix='!', case_insensitive=True)
# Load extension
for extension in extensions:
try:
self.load_extension(extension)
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print ('Failed to load extension {}\n{}'.format(extension, exc))
async def on_ready(self):
print('Logged in as {} ({})'.format(self.user.name, self.user.id))
| 458 | 3 | 81 |
7c463d2d5d527f1ff779006b05efd417309a680e | 1,336 | py | Python | migrations.py | jerryrwu/alcazard | 1403da83cd1986e298db4266f1d1d9d63dc8ab89 | [
"Apache-2.0"
] | 2 | 2019-03-26T14:51:23.000Z | 2020-11-06T13:11:30.000Z | migrations.py | jerryrwu/alcazard | 1403da83cd1986e298db4266f1d1d9d63dc8ab89 | [
"Apache-2.0"
] | null | null | null | migrations.py | jerryrwu/alcazard | 1403da83cd1986e298db4266f1d1d9d63dc8ab89 | [
"Apache-2.0"
] | 1 | 2020-10-30T18:24:40.000Z | 2020-10-30T18:24:40.000Z | import logging
from playhouse import migrate
from alcazar_logging import BraceAdapter
logger = BraceAdapter(logging.getLogger(__name__))
| 33.4 | 96 | 0.717066 | import logging
from playhouse import migrate
from alcazar_logging import BraceAdapter
logger = BraceAdapter(logging.getLogger(__name__))
def _record_migration(db, name):
db.execute_sql('INSERT INTO migration (name) VALUES (?001)', (name,))
def _handle_table_creation(db, migrations):
with db.atomic():
for migration_name, _ in migrations:
_record_migration(db, migration_name)
def _handle_migrations(db, migrations, current_migrations):
migrator = migrate.SqliteMigrator(db)
for migration_name, migration_fn in migrations:
if migration_name in current_migrations:
continue
logger.info('Running migration {}', migration_name)
with db.atomic():
migration_fn(migrator)
_record_migration(db, migration_name)
def apply_migrations(db, models, migrations):
db.create_tables(models)
current_migrations = {t[0] for t in db.execute_sql('SELECT name FROM migration').fetchall()}
if len(current_migrations) == 0: # Initial table creation, just insert all
logger.info('Migrations table was just created, inserting all current migrations.')
_handle_table_creation(db, migrations)
else:
logger.debug('Migrations detected, updating state.')
_handle_migrations(db, migrations, current_migrations)
| 1,100 | 0 | 92 |
127add1ede6fc7e323acde12acceb115ae953001 | 463 | py | Python | tentacruel/heos_watcher/__init__.py | paulhoule/tentacruel | 600f39157598b762226a1c07d78966981da5f7f9 | [
"MIT"
] | null | null | null | tentacruel/heos_watcher/__init__.py | paulhoule/tentacruel | 600f39157598b762226a1c07d78966981da5f7f9 | [
"MIT"
] | 39 | 2019-01-12T00:00:48.000Z | 2019-05-08T02:06:36.000Z | tentacruel/heos_watcher/__init__.py | paulhoule/tentacruel | 600f39157598b762226a1c07d78966981da5f7f9 | [
"MIT"
] | null | null | null | from uuid import UUID
from aio_pika import Exchange
from tentacruel import HeosClientProtocol
HEOS_NS = UUID('003df636-ad90-11e9-aca1-9eb6d06a70c5')
attributes = {
"/player_volume_changed": {
"device_id": "pid",
"name": "heos.volume",
"subattributes": ["level", "mute"]
},
"/player_now_playing_progress": {
"device_id": "pid",
"name": "heos.progress",
"subattributes": ["cur_pos", "duration"]
}
}
| 24.368421 | 54 | 0.61987 | from uuid import UUID
from aio_pika import Exchange
from tentacruel import HeosClientProtocol
HEOS_NS = UUID('003df636-ad90-11e9-aca1-9eb6d06a70c5')
attributes = {
"/player_volume_changed": {
"device_id": "pid",
"name": "heos.volume",
"subattributes": ["level", "mute"]
},
"/player_now_playing_progress": {
"device_id": "pid",
"name": "heos.progress",
"subattributes": ["cur_pos", "duration"]
}
}
| 0 | 0 | 0 |
0af00527171f4fdb332cc8b7ff7cfbe536057074 | 1,257 | py | Python | demo/amrparsing/create_span_concept_dict.py | raosudha89/vowpal_wabbit | 03e973838e022149d802ec3f5e2817dcbc9019d5 | [
"BSD-3-Clause"
] | 2 | 2016-05-20T16:27:07.000Z | 2021-10-01T16:35:53.000Z | demo/amrparsing/create_span_concept_dict.py | Sandy4321/vowpal_wabbit-1 | 03e973838e022149d802ec3f5e2817dcbc9019d5 | [
"BSD-3-Clause"
] | null | null | null | demo/amrparsing/create_span_concept_dict.py | Sandy4321/vowpal_wabbit-1 | 03e973838e022149d802ec3f5e2817dcbc9019d5 | [
"BSD-3-Clause"
] | 1 | 2021-10-01T16:35:54.000Z | 2021-10-01T16:35:54.000Z | import sys
import cPickle as pickle
from collections import OrderedDict
argv = sys.argv[1:]
if len(argv) < 1:
print "usage: create_span_concept_dict.py <span_concept_dataset.p> <output_filename>"
sys.exit()
span_concept_dataset = pickle.load(open(argv[0], "rb"))
output_filename = argv[1]
output_file = open(output_filename, 'w')
span_concept_dict = {}
for id, span_concept_data in span_concept_dataset.iteritems():
for [span, pos, concept, name, ner, nx_root, concept_idx] in span_concept_data:
if span_concept_dict.has_key(span):
if span_concept_dict[span].has_key(concept_idx):
span_concept_dict[span][concept_idx] += 1
else:
span_concept_dict[span][concept_idx] = 1
else:
span_concept_dict[span] = {concept_idx:1}
#Sort the concepts for each span by their frequency
for span, concepts in span_concept_dict.iteritems():
span_concept_dict[span] = OrderedDict(sorted(concepts.items(), key=lambda concepts: concepts[1], reverse=True))
for span, concepts in span_concept_dict.iteritems():
line = span.replace(" ", "_") + " "
for (concept_idx, count) in concepts.iteritems():
line += str(concept_idx) + ":" + str(count) + " "
output_file.write(line+"\n")
pickle.dump(span_concept_dict, open(output_filename + ".p", "wb"))
| 34.916667 | 112 | 0.739857 | import sys
import cPickle as pickle
from collections import OrderedDict
argv = sys.argv[1:]
if len(argv) < 1:
print "usage: create_span_concept_dict.py <span_concept_dataset.p> <output_filename>"
sys.exit()
span_concept_dataset = pickle.load(open(argv[0], "rb"))
output_filename = argv[1]
output_file = open(output_filename, 'w')
span_concept_dict = {}
for id, span_concept_data in span_concept_dataset.iteritems():
for [span, pos, concept, name, ner, nx_root, concept_idx] in span_concept_data:
if span_concept_dict.has_key(span):
if span_concept_dict[span].has_key(concept_idx):
span_concept_dict[span][concept_idx] += 1
else:
span_concept_dict[span][concept_idx] = 1
else:
span_concept_dict[span] = {concept_idx:1}
#Sort the concepts for each span by their frequency
for span, concepts in span_concept_dict.iteritems():
span_concept_dict[span] = OrderedDict(sorted(concepts.items(), key=lambda concepts: concepts[1], reverse=True))
for span, concepts in span_concept_dict.iteritems():
line = span.replace(" ", "_") + " "
for (concept_idx, count) in concepts.iteritems():
line += str(concept_idx) + ":" + str(count) + " "
output_file.write(line+"\n")
pickle.dump(span_concept_dict, open(output_filename + ".p", "wb"))
| 0 | 0 | 0 |
8642c823acca4db3ebf91ee66a8a446df6a868a9 | 55,071 | py | Python | coverage_test_proxy.py | urbas/CoAPthon3 | f6a3c25cde5371fb003a18b94a8f8e8bee5c534a | [
"MIT"
] | 51 | 2018-01-26T16:47:38.000Z | 2022-01-18T08:44:20.000Z | coverage_test_proxy.py | urbas/CoAPthon3 | f6a3c25cde5371fb003a18b94a8f8e8bee5c534a | [
"MIT"
] | 28 | 2018-02-21T12:24:21.000Z | 2021-08-03T15:50:06.000Z | coverage_test_proxy.py | urbas/CoAPthon3 | f6a3c25cde5371fb003a18b94a8f8e8bee5c534a | [
"MIT"
] | 48 | 2018-01-29T08:55:53.000Z | 2021-10-17T00:38:19.000Z | # -*- coding: utf-8 -*-
from queue import Queue
import random
import socket
import threading
import unittest
from coapclient import HelperClient
from coapforwardproxy import CoAPForwardProxy
from coapserver import CoAPServer
from coapthon import defines
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
if __name__ == '__main__':
unittest.main()
| 38.727848 | 123 | 0.603348 | # -*- coding: utf-8 -*-
from queue import Queue
import random
import socket
import threading
import unittest
from coapclient import HelperClient
from coapforwardproxy import CoAPForwardProxy
from coapserver import CoAPServer
from coapthon import defines
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class Tests(unittest.TestCase):
def setUp(self):
self.server_address = ("127.0.0.1", 5683)
self.current_mid = random.randint(1, 1000)
self.server_mid = random.randint(1000, 2000)
self.server = CoAPServer("127.0.0.1", 5684)
self.server_thread = threading.Thread(target=self.server.listen, args=(1,))
self.server_thread.start()
self.proxy = CoAPForwardProxy("127.0.0.1", 5683)
self.proxy_thread = threading.Thread(target=self.proxy.listen, args=(1,))
self.proxy_thread.start()
self.queue = Queue()
def tearDown(self):
self.proxy.close()
self.proxy_thread.join(timeout=25)
self.proxy = None
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
def _test_with_client(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
received_message = client.send_request(message)
if expected is not None:
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def _test_with_client_observe(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
client.send_request(message, self.client_callback)
if expected is not None:
received_message = self.queue.get()
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def client_callback(self, response):
print("Callback")
self.queue.put(response)
def _test_plugtest(self, message_list): # pragma: no cover
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram = serializer.serialize(message)
sock.sendto(datagram, message.destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
print(received_message.pretty_print())
print(expected.pretty_print())
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options is not None:
self.assertEqual(received_message.options, expected.options)
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
sock.close()
def _test_datagram(self, message_list): # pragma: no cover
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram, destination = message
sock.sendto(datagram, destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options is not None:
self.assertEqual(received_message.options, expected.options)
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
sock.close()
def test_get_forward(self):
print("TEST_GET_FORWARD")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/basic"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Basic Resource"
exchange1 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1])
def test_separate(self):
print("TEST_SEPARATE")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/separate"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["CON"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.max_age = 60
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/separate"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "POST"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.options = None
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.proxy_uri = "coap://127.0.0.1:5684/separate"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "PUT"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.options = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.proxy_uri = "coap://127.0.0.1:5684/separate"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.DELETED.number
expected.token = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
def test_post(self):
print("TEST_POST")
req = Request()
req.code = defines.Codes.POST.number
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
req.add_if_none_match()
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res?id=1"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
expected.location_query = "id=1"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.if_match = ["test", "not"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "test"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.if_match = ["not"]
req.payload = "not"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.PRECONDITION_FAILED.number
expected.token = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.if_match = ["not"]
req.payload = "not"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.PRECONDITION_FAILED.number
expected.token = None
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req._mid = self.current_mid
req.destination = self.server_address
req.add_if_none_match()
req.payload = "not"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.PRECONDITION_FAILED.number
expected.token = None
exchange5 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4, exchange5])
def test_post_block(self):
print("TEST_POST_BLOCK")
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \
"Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \
"Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \
"Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \
"nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \
"ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \
"ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \
"facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \
"sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \
"Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \
" urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \
" Praesent tristique turpis dui, at ultri"
req.block1 = (1, 1, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number
expected.token = None
expected.payload = None
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \
"Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \
"Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \
"Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \
"nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \
"ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \
"ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \
"facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \
"sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \
"Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \
" urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \
" Praesent tristique turpis dui, at ultri"
req.block1 = (0, 1, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (0, 1, 1024)
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum."
req.block1 = (1, 1, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (1, 1, 64)
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum."
req.block1 = (3, 1, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number
expected.token = None
expected.payload = None
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum."
req.block1 = (2, 0, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange5 = (req, expected)
self.current_mid += 1
self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5])
def test_get_block(self):
print("TEST_GET_BLOCK")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (0, 0, 512)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (0, 1, 512)
expected.size2 = 2041
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (1, 0, 256)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (1, 1, 256)
expected.size2 = 2041
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (2, 0, 128)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (2, 1, 128)
expected.size2 = 2041
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (3, 0, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (3, 1, 64)
expected.size2 = 2041
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (4, 0, 32)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (4, 1, 32)
expected.size2 = 2041
exchange5 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (5, 0, 16)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (5, 1, 16)
expected.size2 = 2041
exchange6 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (6, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (6, 0, 1024)
expected.size2 = 2041
exchange7 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = None
req.block2 = (7, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (7, 0, 1024)
expected.size2 = 2041
exchange8 = (req, expected)
self.current_mid += 1
self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7, exchange8])
def test_post_block_big(self):
print("TEST_POST_BLOCK_BIG")
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolo"
req.block1 = (0, 1, 16)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (0, 1, 16)
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "r sit amet, consectetur adipisci"
req.block1 = (1, 1, 32)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (1, 1, 32)
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "ng elit. Sed ut ultrices ligula. Pellentesque purus augue, cursu"
req.block1 = (2, 1, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (2, 1, 64)
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "s ultricies est in, vehicula congue metus. Vestibulum vel justo lacinia, porttitor quam vitae, " \
"feugiat sapien. Quisque finibus, "
req.block1 = (3, 1, 128)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (3, 1, 128)
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "nisi vitae rhoncus malesuada, augue mauris dapibus tellus, sit amet venenatis libero" \
" libero sed lorem. In pharetra turpis sed eros porta mollis. Quisque dictum dolor nisl," \
" imperdiet tincidunt augue malesuada vitae. Donec non felis urna. Suspendisse at hend"
req.block1 = (4, 1, 256)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (4, 1, 256)
exchange5 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "rerit ex, quis aliquet ante. Vivamus ultrices dolor at elit tincidunt, eget fringilla " \
"ligula vestibulum. In molestie sagittis nibh, ut efficitur tellus faucibus non. Maecenas " \
"posuere elementum faucibus. Morbi nisi diam, molestie non feugiat et, elementum eget magna." \
" Donec vel sem facilisis quam viverra ultrices nec eu lacus. Sed molestie nisi id ultrices " \
"interdum. Curabitur pharetra sed tellus in dignissim. Duis placerat aliquam metus, volutpat " \
"elementum augue aliquam a. Nunc sed dolor at orci maximus portt"
req.block1 = (5, 1, 512)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (5, 1, 512)
exchange6 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "itor ac sit amet eros. Mauris et nisi in tortor pharetra rhoncus sit amet hendrerit metus. " \
"Integer laoreet placerat cursus. Nam a nulla ex. Donec laoreet sagittis libero quis " \
"imperdiet. Vivamus facilisis turpis nec rhoncus venenatis. Duis pulvinar tellus vel quam " \
"maximus imperdiet. Mauris eget nibh orci. Duis ut cursus nibh. Nulla sed commodo elit. " \
"Suspendisse ac eros lacinia, mattis turpis at, porttitor justo. Vivamus molestie " \
"tincidunt libero. Etiam porttitor lacus odio, at lobortis tortor scelerisque nec. " \
"Nullam non ante vel nisi ultrices consectetur. Maecenas massa felis, tempor eget " \
"malesuada eget, pretium eu sapien. Vivamus dapibus ante erat, non faucibus orci sodales " \
"sit amet. Cras magna felis, sodales eget magna sed, eleifend rutrum ligula. Vivamus interdum " \
"enim enim, eu facilisis tortor dignissim quis. Ut metus nulla, mattis non lorem et, " \
"elementum ultrices orci. Quisque eleifend, arcu vitae ullamcorper pulvinar, ipsum ex " \
"sodales arcu, eget consectetur mauris metus ac tortor. Donec id sem felis. Maur"
req.block1 = (6, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange7 = (req, expected)
self.current_mid += 1
self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7])
def test_options(self):
print("TEST_OPTIONS")
path = "/storage/new_res"
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = "test"
req.add_option(option)
req.del_option(option)
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = "test"
req.add_option(option)
req.del_option_by_name("ETag")
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = "test"
req.add_option(option)
del req.etag
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange3 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3])
def test_content_type(self):
print("TEST_CONTENT_TYPE")
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "<value>test</value>"
req.content_type = defines.Content_types["application/xml"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "storage/new_res"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Basic Resource"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "test"
exchange4 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.accept = defines.Content_types["application/xml"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "<value>test</value>"
exchange5 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/storage/new_res"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.accept = defines.Content_types["application/json"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_ACCEPTABLE.number
expected.token = None
expected.payload = None
# expected.content_type = defines.Content_types["application/json"]
exchange6 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/xml"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "<value>0</value>"
expected.content_type = defines.Content_types["application/xml"]
print(expected.pretty_print())
exchange7 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7])
def test_ETAG(self):
print("TEST_ETAG")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/etag"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "ETag resource"
expected.etag = "0"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/etag"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
expected.etag = "1"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/etag"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.etag = "1"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.VALID.number
expected.token = None
expected.payload = None
expected.etag = "1"
exchange3 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3])
def test_child(self):
print("TEST_CHILD")
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/child"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "test"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "child"
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/child"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "test"
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.proxy_uri = "coap://127.0.0.1:5684/child"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "testPUT"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.proxy_uri = "coap://127.0.0.1:5684/child"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.DELETED.number
expected.token = None
expected.payload = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
def test_not_found(self):
print("TEST_not_found")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/not_found"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.token = 100
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = 100
expected.payload = None
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/not_found"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "testPOST"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.proxy_uri = "coap://127.0.0.1:5684/not_found"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "testPUT"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.payload = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.proxy_uri = "coap://127.0.0.1:5684/not_found"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.payload = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
def test_invalid(self):
print("TEST_INVALID")
# version
req = (b'\x00\x01\x8c\xda', self.server_address)
expected = Response()
expected.type = defines.Types["RST"]
expected._mid = None
expected.code = defines.Codes.BAD_REQUEST.number
exchange1 = (req, expected)
# version
req = (b'\x40', self.server_address)
expected = Response()
expected.type = defines.Types["RST"]
expected._mid = None
expected.code = defines.Codes.BAD_REQUEST.number
exchange2 = (req, expected)
# code
req = (b'\x40\x05\x8c\xda', self.server_address)
expected = Response()
expected.type = defines.Types["RST"]
expected._mid = None
expected.code = defines.Codes.BAD_REQUEST.number
exchange3 = (req, expected)
# option
req = (b'\x40\x01\x8c\xda\x94', self.server_address)
expected = Response()
expected.type = defines.Types["RST"]
expected._mid = None
expected.code = defines.Codes.BAD_REQUEST.number
exchange4 = (req, expected)
# payload marker
req = (b'\x40\x02\x8c\xda\x75\x62\x61\x73\x69\x63\xff', self.server_address)
expected = Response()
expected.type = defines.Types["RST"]
expected._mid = None
expected.code = defines.Codes.BAD_REQUEST.number
exchange5 = (req, expected)
self._test_datagram([exchange1, exchange2, exchange3, exchange4, exchange5])
def test_post_block_big_client(self):
print("TEST_POST_BLOCK_BIG_CLIENT")
req = Request()
req.code = defines.Codes.POST.number
req.proxy_uri = "coap://127.0.0.1:5684/big"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \
"Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \
"Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \
"Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \
"nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \
"ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \
"ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \
"facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \
"sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \
"Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \
" urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \
" Praesent tristique turpis dui, at ultricies lorem fermentum at. Vivamus sit amet ornare neque, " \
"a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \
"consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \
"nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \
"enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum." \
"Vivamus ut odio ac odio malesuada accumsan. Aenean vehicula diam at tempus ornare. Phasellus " \
"dictum mauris a mi consequat, vitae mattis nulla fringilla. Ut laoreet tellus in nisl efficitur," \
" a luctus justo tempus. Fusce finibus libero eget velit finibus iaculis. Morbi rhoncus purus " \
"vel vestibulum ullamcorper. Sed ac metus in urna fermentum feugiat. Nulla nunc diam, sodales " \
"aliquam mi id, varius porta nisl. Praesent vel nibh ac turpis rutrum laoreet at non odio. " \
"Phasellus ut posuere mi. Suspendisse malesuada velit nec mauris convallis porta. Vivamus " \
"sed ultrices sapien, at cras amet."
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange1 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1])
def test_observe_client(self):
print("TEST_OBSERVE_CLIENT")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/basic"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.observe = 0
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
exchange1 = (req, expected)
self.current_mid += 1
self._test_with_client_observe([exchange1])
def test_duplicate(self):
print("TEST_DUPLICATE")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/basic"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
self.current_mid += 1
self._test_plugtest([(req, expected), (req, expected)])
def test_duplicate_not_completed(self):
print("TEST_DUPLICATE_NOT_COMPLETED")
req = Request()
req.code = defines.Codes.GET.number
req.proxy_uri = "coap://127.0.0.1:5684/long"
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = None
expected.token = None
expected2 = Response()
expected2.type = defines.Types["CON"]
expected2._mid = None
expected2.code = defines.Codes.CONTENT.number
expected2.token = None
self.current_mid += 1
self._test_plugtest([(req, None), (req, expected), (None, expected2)])
if __name__ == '__main__':
unittest.main()
| 53,875 | 10 | 644 |
460b7bbb44e83bb656c795546fd27222fb6d006d | 1,948 | py | Python | tests/run_trading_bot_test.py | Ricky294/cryptodata | da9cbcfdfa155830b3e877e9d5736dace17ced88 | [
"MIT"
] | null | null | null | tests/run_trading_bot_test.py | Ricky294/cryptodata | da9cbcfdfa155830b3e877e9d5736dace17ced88 | [
"MIT"
] | null | null | null | tests/run_trading_bot_test.py | Ricky294/cryptodata | da9cbcfdfa155830b3e877e9d5736dace17ced88 | [
"MIT"
] | null | null | null | from datetime import datetime
import pandas as pd
import websocket
from tests import tests_path
from crypto_data.shared.utils import exclude_values
from crypto_data.binance.extract import get_candles, get_latest_candle_timestamp
from crypto_data.binance.schema import (
OPEN_TIME,
OPEN_PRICE,
CLOSE_PRICE,
HIGH_PRICE,
LOW_PRICE,
VOLUME,
COLUMNS,
)
from crypto_data.shared.candle_db import CandleDB
# candle_stream(
# symbol="btcusdt",
# interval="1h",
# candles=candles,
# on_open=on_open,
# on_close=on_close,
# on_candle=on_candle,
# on_candle_close=on_candle_close,
# )
| 23.190476 | 87 | 0.676078 | from datetime import datetime
import pandas as pd
import websocket
from tests import tests_path
from crypto_data.shared.utils import exclude_values
from crypto_data.binance.extract import get_candles, get_latest_candle_timestamp
from crypto_data.binance.schema import (
OPEN_TIME,
OPEN_PRICE,
CLOSE_PRICE,
HIGH_PRICE,
LOW_PRICE,
VOLUME,
COLUMNS,
)
from crypto_data.shared.candle_db import CandleDB
def on_open(_: websocket.WebSocketApp):
print("Stream open...")
def on_close(_: websocket.WebSocketApp):
print("Stream close...")
def on_candle(_: websocket.WebSocketApp, candle: dict):
print(candle)
def on_candle_close(_: websocket.WebSocketApp, candles: pd.DataFrame):
print(candles)
def test_get_candles():
columns_to_include = [
OPEN_TIME,
OPEN_PRICE,
CLOSE_PRICE,
HIGH_PRICE,
LOW_PRICE,
VOLUME,
]
db = CandleDB(f"{tests_path}/data/binance_candles.db")
candles = get_candles(
symbol="btcusdt",
interval="1h",
market="futures",
columns_to_include=columns_to_include,
db=db,
)
first_candle_timestamp = get_latest_candle_timestamp(
"btcusdt", "1h", "futures", db_candles=None
)
now = datetime.now()
last_candle_timestamp = int(
datetime(now.year, now.month, now.day, now.hour - 1).timestamp()
)
assert candles[OPEN_TIME].iat[0] == first_candle_timestamp
assert candles[OPEN_TIME].iat[len(candles[OPEN_TIME]) - 1] == last_candle_timestamp
assert set(columns_to_include).issubset(candles.columns)
assert not set(exclude_values(COLUMNS, columns_to_include)).issubset(
candles.columns
)
# candle_stream(
# symbol="btcusdt",
# interval="1h",
# candles=candles,
# on_open=on_open,
# on_close=on_close,
# on_candle=on_candle,
# on_candle_close=on_candle_close,
# )
| 1,168 | 0 | 115 |
218af6b1259c77c1d048dfd3081572f8d15aeaa6 | 627 | py | Python | NiLBS/body/human_body.py | joemarch010/NILBS | c6568818ec8acdb0fe4bd8d197278f0abb361d0b | [
"MIT"
] | 2 | 2021-04-01T07:55:11.000Z | 2021-12-10T02:57:59.000Z | NiLBS/body/human_body.py | joemarch010/NILBS | c6568818ec8acdb0fe4bd8d197278f0abb361d0b | [
"MIT"
] | null | null | null | NiLBS/body/human_body.py | joemarch010/NILBS | c6568818ec8acdb0fe4bd8d197278f0abb361d0b | [
"MIT"
] | null | null | null |
import numpy as np
from NiLBS.skinning.util import redistribute_weights
| 28.5 | 96 | 0.6874 |
import numpy as np
from NiLBS.skinning.util import redistribute_weights
class HumanBody:
def __init__(self, body_dict=None, body_dict_path=None, active_bones=None):
if body_dict_path is not None:
body_dict = np.load(body_dict_path)
self.vertex_template = body_dict['v_template']
self.weights = body_dict['weights']
self.faces = body_dict['f']
self.joints = body_dict['J']
self.bone_hierarchy = body_dict['kintree_table']
if active_bones is not None:
self.weights = redistribute_weights(self.weights, self.bone_hierarchy, active_bones) | 508 | -5 | 50 |
d022948a48bb25e619e60df0d1f33e7b1a71f499 | 2,689 | py | Python | server/video_process.py | brycecorbitt/FlashGuard | e018595fa7474065b6359c95b6ee78d2dfee24d5 | [
"MIT"
] | null | null | null | server/video_process.py | brycecorbitt/FlashGuard | e018595fa7474065b6359c95b6ee78d2dfee24d5 | [
"MIT"
] | null | null | null | server/video_process.py | brycecorbitt/FlashGuard | e018595fa7474065b6359c95b6ee78d2dfee24d5 | [
"MIT"
] | 2 | 2019-05-29T15:18:16.000Z | 2020-06-02T16:08:44.000Z | import cv2
import youtube_dl
import numpy as np
import os
import time
FLASH_MINIMUM = 3
tmp_dir = 'temp/'
ex = {'format': 'worstvideo[vcodec^=avc1][fps=30]/worst[vcodec^=avc1][fps=30]/worstvideo[vcodec=vp9][fps=30]/worst[vcodec=vp9][fps=30]', 'outtmpl': 'temp/temp.%(ext)s', 'recode_video': 'webm'}
ytdl = youtube_dl.YoutubeDL(ex)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
# https://www.youtube.com/watch?v=atkD-beZ9oI # baseline test
# https://www.youtube.com/watch?v=Yw_YDvLWKnY # surreal video
# https://www.youtube.com/watch?v=OCpzajWSp6I # mlg video
# https://www.youtube.com/watch?v=FVY5uZ18-x8 #pokemon video
if __name__ == '__main__':
main()
| 24.225225 | 192 | 0.605058 | import cv2
import youtube_dl
import numpy as np
import os
import time
FLASH_MINIMUM = 3
tmp_dir = 'temp/'
ex = {'format': 'worstvideo[vcodec^=avc1][fps=30]/worst[vcodec^=avc1][fps=30]/worstvideo[vcodec=vp9][fps=30]/worst[vcodec=vp9][fps=30]', 'outtmpl': 'temp/temp.%(ext)s', 'recode_video': 'webm'}
ytdl = youtube_dl.YoutubeDL(ex)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
def clear_tmp():
dirs = os.listdir(tmp_dir)
for d in dirs:
os.remove(tmp_dir + d)
def dl_tmp(url):
clear_tmp()
try:
ytdl.download([url])
return 0
except youtube_dl.utils.DownloadError:
return 1
def get_tmp():
files = os.listdir(tmp_dir)
if len(files) == 1:
return tmp_dir + files[0]
def process_video(path):
flash_distance = 0
cap = cv2.VideoCapture(path)
frames = []
flashes = []
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
if flashes:
flash_distance += 1
if not ret or cv2.waitKey(1) & 0xFF == ord('q'):
break
frames.append(frame.astype(np.int16))
if len(frames) > 3:
frames.pop(0)
# get the net color change in pixel values
difference = np.abs(np.subtract(frames[2], frames[0]))
avg_color_change = np.mean(np.sum(difference, axis=2))
# average = np.mean(difference)
if avg_color_change > 200:
flashes.append(flash_distance)
flash_distance = 0
frames.pop(0)
else:
cap.release()
return is_dangerous(flashes)
def process_video_url(link):
if dl_tmp(link) == 1:
print("Video could not be downloaded properly")
return
return process_video(get_tmp())
def is_dangerous(flash_data):
num_flashes = len(flash_data)
if num_flashes == 0:
return False
avg_flash_distance = np.mean(flash_data)
# print(num_flashes)
# print(avg_flash_distance)
if (num_flashes >= FLASH_MINIMUM) and (num_flashes/avg_flash_distance) > 1:
return True
return False
# https://www.youtube.com/watch?v=atkD-beZ9oI # baseline test
# https://www.youtube.com/watch?v=Yw_YDvLWKnY # surreal video
# https://www.youtube.com/watch?v=OCpzajWSp6I # mlg video
# https://www.youtube.com/watch?v=FVY5uZ18-x8 #pokemon video
def main():
start = time.time()
results = process_video_url('https://www.youtube.com/watch?v=Yw_YDvLWKnY')
elapsed = time.time() - start
print(results)
print(elapsed)
if __name__ == '__main__':
main()
| 1,848 | 0 | 161 |
8a02077158eed84ba37ce6ea33e5f141cd7a9c06 | 15,524 | py | Python | slim/nets/inception_v4.py | svedi/tensorflow_models | 0d58acf27841c5591b7caad8fc7e3498c219f382 | [
"Apache-2.0"
] | null | null | null | slim/nets/inception_v4.py | svedi/tensorflow_models | 0d58acf27841c5591b7caad8fc7e3498c219f382 | [
"Apache-2.0"
] | null | null | null | slim/nets/inception_v4.py | svedi/tensorflow_models | 0d58acf27841c5591b7caad8fc7e3498c219f382 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception V4 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat([branch_0, branch_1, branch_2], 3)
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat([branch_0, branch_1, branch_2], 3)
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat([
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')], 3)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat([
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')], 3)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# Auxiliary Head logits
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
# 17 x 17 x 1024
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768,
aux_logits.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes,
activation_fn=None,
scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a')
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
# 1536
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_v4.default_image_size = 299
inception_v4_arg_scope = inception_utils.inception_arg_scope
| 47.91358 | 80 | 0.622327 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception V4 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat([branch_0, branch_1, branch_2], 3)
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat([branch_0, branch_1, branch_2], 3)
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat([
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')], 3)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat([
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')], 3)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# Auxiliary Head logits
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
# 17 x 17 x 1024
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768,
aux_logits.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes,
activation_fn=None,
scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a')
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
# 1536
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_v4.default_image_size = 299
inception_v4_arg_scope = inception_utils.inception_arg_scope
| 75 | 0 | 25 |
c7d0a67ce9575c64567f250d546dfe7a3a82b6d0 | 6,625 | py | Python | pyopentsdb/query.py | mikecokina/pyopentsdb | b8d78e8f42aed4ebbd6ac3aff925071de41d6b52 | [
"MIT"
] | 2 | 2018-05-09T08:34:30.000Z | 2018-09-25T22:42:09.000Z | pyopentsdb/query.py | mikecokina/pyopentsdb | b8d78e8f42aed4ebbd6ac3aff925071de41d6b52 | [
"MIT"
] | 2 | 2018-12-24T10:51:30.000Z | 2019-01-21T13:55:11.000Z | pyopentsdb/query.py | mikecokina/pyopentsdb | b8d78e8f42aed4ebbd6ac3aff925071de41d6b52 | [
"MIT"
] | null | null | null | from queue import Queue
from queue import Empty
from threading import Thread
from pyopentsdb import errors
from pyopentsdb.utils import request_post
from pyopentsdb.conf import QueryPointer
class IterableQueue(object):
""" Transform standard python Queue instance to iterable one"""
def __init__(self, source_queue):
"""
:param source_queue: queue.Queue, (mandatory)
"""
self.source_queue = source_queue
def tsdb_query_metrics_validation(**kwargs):
"""
looking for metric and all related and required arguments in kwargs specified in OpenTSDB http api
:param kwargs: dict
:return:
"""
# tsdb query kwargs have to contain 'metrics' argument
if not kwargs.get('metrics'):
raise errors.MissingArgumentError("Missing argument 'metrics' in query")
# metrics can contain more than one metric in list
for metric_object in kwargs['metrics']:
# each metric in metrics has to specify aggregator function
if not metric_object.get('metric') or not metric_object.get('aggregator'):
raise errors.MissingArgumentError("Missing argument 'metric' or 'aggregator' in metrics object")
# each metric can contain filters
if metric_object.get('filters'):
for metric_filter in metric_object['filters']:
# if filter is presented , it has contain 'type', 'tagk' and 'filter' (filter definition)
if not metric_filter.get('type') or not metric_filter.get('tagk') or \
metric_filter.get('filter') is None:
raise errors.MissingArgumentError(
"Missing argument 'type', 'tagk' or 'filter' in filters object")
def query(host, r_session, **kwargs):
"""
:param host: str
:param r_session: requests.Session
:param kwargs: dict
:return: dict
"""
# todo: make sure kwargs of tsdb are not colliding kwargs of requests
try:
start = kwargs.pop('start')
except KeyError:
raise errors.MissingArgumentError("'start' is a required argument")
try:
tsdb_query_metrics_validation(**kwargs)
except errors.MissingArgumentError as e:
raise errors.MissingArgumentError(str(e))
# general driven arguments
end = kwargs.pop('end', None)
ms_resolution = bool(kwargs.pop('ms', False))
show_tsuids = bool(kwargs.pop('show_tsuids', False))
no_annotations = bool(kwargs.pop('no_annotations', False))
global_annotations = bool(kwargs.pop('global_annotations', False))
show_summary = bool(kwargs.pop('show_summary', False))
show_stats = bool(kwargs.pop('show_stats', False))
show_query = bool(kwargs.pop('show_query', False))
delete_match = bool(kwargs.pop('delete', False))
timezone = kwargs.pop('timezone', 'UTC')
use_calendar = bool(kwargs.pop('use_calendar', False))
queries = kwargs.pop('metrics')
params = {
'start': '{}'.format(int(start.timestamp())),
'msResolution': ms_resolution,
'showTSUIDs': show_tsuids,
'noAnnotations': no_annotations,
'globalAnnotations': global_annotations,
'showSummary': show_summary,
'showStats': show_stats,
'showQuery': show_query,
'delete': delete_match,
'timezone': timezone,
'useCalendar': use_calendar,
'queries': list(),
}
if end:
params.update({'end': int(end.timestamp())})
params.update({'queries': queries})
kwargs.update(dict(data=params))
return request_post(api_url(host, pointer=QueryPointer.QUERY), r_session, **kwargs)
def multiquery(host, r_session, query_chunks, max_tsdb_concurrency=40, **kwargs):
"""
OpenTSDB /api/query/ concurrency wrapper
:param host: str (mandatory); OpenTSDB host
:param r_session: requests.Session
:param query_chunks: list (mandatory); list of json serializable dicts representing OpenTSDB query
:param max_tsdb_concurrency: int (optional), default=40; maximum number of concurrency
threads hitting OpenTSDB api
:return: dict; json serializable
"""
__WORKER_RUN__ = True
# todo: optimize, in case one of worker fail, terminate execution
n_threads = min(len(query_chunks), max_tsdb_concurrency)
query_queue = Queue(maxsize=len(query_chunks) + n_threads)
result_queue = Queue(maxsize=len(query_chunks) + n_threads)
error_queue = Queue()
threads = list()
try:
for q in query_chunks:
# valiate all queries in query_chunks
tsdb_query_metrics_validation(**q)
# add query kwargs to queue for future execution in threads
query_queue.put(q)
for _ in range(n_threads):
query_queue.put("TERMINATOR")
for _ in range(n_threads):
t = Thread(target=tsdb_worker)
threads.append(t)
t.daemon = True
t.start()
for t in threads:
t.join()
except KeyboardInterrupt:
raise
finally:
__WORKER_RUN__ = False
if not error_queue.empty():
# if not empty, error_queue has to contain exception from tsdb_worker
raise error_queue.get()
if result_queue.qsize() != len(query_chunks):
# this statement is probably not necessary
raise errors.TsdbError("Number of queries and responses is not the same")
# make sure any other kind of response code won't be propagated to this place and will be catched and processed
# in previous part of code
return sum([val for val in IterableQueue(result_queue)], list())
| 34.505208 | 115 | 0.637736 | from queue import Queue
from queue import Empty
from threading import Thread
from pyopentsdb import errors
from pyopentsdb.utils import request_post
from pyopentsdb.conf import QueryPointer
class IterableQueue(object):
""" Transform standard python Queue instance to iterable one"""
def __init__(self, source_queue):
"""
:param source_queue: queue.Queue, (mandatory)
"""
self.source_queue = source_queue
def __iter__(self):
while True:
try:
yield self.source_queue.get_nowait()
except Empty:
return
def tsdb_query_metrics_validation(**kwargs):
"""
looking for metric and all related and required arguments in kwargs specified in OpenTSDB http api
:param kwargs: dict
:return:
"""
# tsdb query kwargs have to contain 'metrics' argument
if not kwargs.get('metrics'):
raise errors.MissingArgumentError("Missing argument 'metrics' in query")
# metrics can contain more than one metric in list
for metric_object in kwargs['metrics']:
# each metric in metrics has to specify aggregator function
if not metric_object.get('metric') or not metric_object.get('aggregator'):
raise errors.MissingArgumentError("Missing argument 'metric' or 'aggregator' in metrics object")
# each metric can contain filters
if metric_object.get('filters'):
for metric_filter in metric_object['filters']:
# if filter is presented , it has contain 'type', 'tagk' and 'filter' (filter definition)
if not metric_filter.get('type') or not metric_filter.get('tagk') or \
metric_filter.get('filter') is None:
raise errors.MissingArgumentError(
"Missing argument 'type', 'tagk' or 'filter' in filters object")
def query(host, r_session, **kwargs):
"""
:param host: str
:param r_session: requests.Session
:param kwargs: dict
:return: dict
"""
# todo: make sure kwargs of tsdb are not colliding kwargs of requests
try:
start = kwargs.pop('start')
except KeyError:
raise errors.MissingArgumentError("'start' is a required argument")
try:
tsdb_query_metrics_validation(**kwargs)
except errors.MissingArgumentError as e:
raise errors.MissingArgumentError(str(e))
# general driven arguments
end = kwargs.pop('end', None)
ms_resolution = bool(kwargs.pop('ms', False))
show_tsuids = bool(kwargs.pop('show_tsuids', False))
no_annotations = bool(kwargs.pop('no_annotations', False))
global_annotations = bool(kwargs.pop('global_annotations', False))
show_summary = bool(kwargs.pop('show_summary', False))
show_stats = bool(kwargs.pop('show_stats', False))
show_query = bool(kwargs.pop('show_query', False))
delete_match = bool(kwargs.pop('delete', False))
timezone = kwargs.pop('timezone', 'UTC')
use_calendar = bool(kwargs.pop('use_calendar', False))
queries = kwargs.pop('metrics')
params = {
'start': '{}'.format(int(start.timestamp())),
'msResolution': ms_resolution,
'showTSUIDs': show_tsuids,
'noAnnotations': no_annotations,
'globalAnnotations': global_annotations,
'showSummary': show_summary,
'showStats': show_stats,
'showQuery': show_query,
'delete': delete_match,
'timezone': timezone,
'useCalendar': use_calendar,
'queries': list(),
}
if end:
params.update({'end': int(end.timestamp())})
params.update({'queries': queries})
kwargs.update(dict(data=params))
return request_post(api_url(host, pointer=QueryPointer.QUERY), r_session, **kwargs)
def multiquery(host, r_session, query_chunks, max_tsdb_concurrency=40, **kwargs):
"""
OpenTSDB /api/query/ concurrency wrapper
:param host: str (mandatory); OpenTSDB host
:param r_session: requests.Session
:param query_chunks: list (mandatory); list of json serializable dicts representing OpenTSDB query
:param max_tsdb_concurrency: int (optional), default=40; maximum number of concurrency
threads hitting OpenTSDB api
:return: dict; json serializable
"""
__WORKER_RUN__ = True
# todo: optimize, in case one of worker fail, terminate execution
def tsdb_worker():
while __WORKER_RUN__:
query_kwargs = query_queue.get()
if query_kwargs == "TERMINATOR":
break
# if tehre is already at least one (just one) error in queue, terminate all running threads
# it is uselles and time consuming to finished rest of queries, if one of them fail
if not error_queue.empty():
break
try:
result = query(host, r_session, **dict(**query_kwargs, **kwargs))
result_queue.put(result)
except Exception as we:
error_queue.put(we)
break
n_threads = min(len(query_chunks), max_tsdb_concurrency)
query_queue = Queue(maxsize=len(query_chunks) + n_threads)
result_queue = Queue(maxsize=len(query_chunks) + n_threads)
error_queue = Queue()
threads = list()
try:
for q in query_chunks:
# valiate all queries in query_chunks
tsdb_query_metrics_validation(**q)
# add query kwargs to queue for future execution in threads
query_queue.put(q)
for _ in range(n_threads):
query_queue.put("TERMINATOR")
for _ in range(n_threads):
t = Thread(target=tsdb_worker)
threads.append(t)
t.daemon = True
t.start()
for t in threads:
t.join()
except KeyboardInterrupt:
raise
finally:
__WORKER_RUN__ = False
if not error_queue.empty():
# if not empty, error_queue has to contain exception from tsdb_worker
raise error_queue.get()
if result_queue.qsize() != len(query_chunks):
# this statement is probably not necessary
raise errors.TsdbError("Number of queries and responses is not the same")
# make sure any other kind of response code won't be propagated to this place and will be catched and processed
# in previous part of code
return sum([val for val in IterableQueue(result_queue)], list())
def api_url(host, pointer):
if pointer == QueryPointer.QUERY:
return '{}/api/query/'.format(host)
| 863 | 0 | 76 |
68fe1bd2aeb26294f3009435d3e3d763b7b7ffa7 | 10,564 | py | Python | old_versions/sgnn_pl_pure/loss.py | goodok/sgnn | a1ea5023c5b7e4f1a66afd1daed10a60786e6ac1 | [
"MIT"
] | 2 | 2020-08-10T13:55:01.000Z | 2020-08-13T16:06:25.000Z | old_versions/sgnn_pl_pure/loss.py | goodok/sgnn | a1ea5023c5b7e4f1a66afd1daed10a60786e6ac1 | [
"MIT"
] | null | null | null | old_versions/sgnn_pl_pure/loss.py | goodok/sgnn | a1ea5023c5b7e4f1a66afd1daed10a60786e6ac1 | [
"MIT"
] | 2 | 2020-11-13T17:48:13.000Z | 2020-11-13T17:50:35.000Z |
import numpy as np
import torch
import torch.nn.functional as F
import sparseconvnet as scn
import data_util
UNK_THRESH = 2
#UNK_THRESH = 3
UNK_ID = -1
# note: weight_missing_geo must be > 1
# hierarchical loss
| 45.339056 | 230 | 0.648429 |
import numpy as np
import torch
import torch.nn.functional as F
import sparseconvnet as scn
import data_util
UNK_THRESH = 2
#UNK_THRESH = 3
UNK_ID = -1
def compute_targets(target, hierarchy, num_hierarchy_levels, truncation, use_loss_masking, known):
assert(len(target.shape) == 5)
target_for_occs = [None] * num_hierarchy_levels
target_for_hier = [None] * num_hierarchy_levels
target_for_sdf = data_util.preprocess_sdf_pt(target, truncation)
known_mask = None
target_for_hier[-1] = target.clone()
target_occ = (torch.abs(target_for_sdf) < truncation).float()
if use_loss_masking:
target_occ[known >= UNK_THRESH] = UNK_ID
target_for_occs[-1] = target_occ
factor = 2
for h in range(num_hierarchy_levels-2,-1,-1):
target_for_occs[h] = torch.nn.MaxPool3d(kernel_size=2)(target_for_occs[h+1])
target_for_hier[h] = data_util.preprocess_sdf_pt(hierarchy[h], truncation)
factor *= 2
return target_for_sdf, target_for_occs, target_for_hier
# note: weight_missing_geo must be > 1
def compute_weights_missing_geo(weight_missing_geo, input_locs, target_for_occs, truncation):
num_hierarchy_levels = len(target_for_occs)
weights = [None] * num_hierarchy_levels
dims = target_for_occs[-1].shape[2:]
flatlocs = input_locs[:,3]*dims[0]*dims[1]*dims[2] + input_locs[:,0]*dims[1]*dims[2] + input_locs[:,1]*dims[2] + input_locs[:,2]
weights[-1] = torch.ones(target_for_occs[-1].shape, dtype=torch.int32).cuda()
weights[-1].view(-1)[flatlocs] += 1
weights[-1][torch.abs(target_for_occs[-1]) <= truncation] += 3
weights[-1] = (weights[-1] == 4).float() * (weight_missing_geo - 1) + 1
factor = 2
for h in range(num_hierarchy_levels-2,-1,-1):
weights[h] = weights[h+1][:,:,::2,::2,::2].contiguous()
factor *= 2
return weights
def apply_log_transform(sdf):
sgn = torch.sign(sdf)
out = torch.log(torch.abs(sdf) + 1)
out = sgn * out
return out
def compute_bce_sparse_dense(sparse_pred_locs, sparse_pred_vals, dense_tgts, weights, use_loss_masking, truncation=3, batched=True):
assert(len(dense_tgts.shape) == 5 and dense_tgts.shape[1] == 1)
dims = dense_tgts.shape[2:]
loss = 0.0 if batched else np.zeros(dense_tgts.shape[0], dtype=np.float32)
predvalues = sparse_pred_vals.view(-1)
flatlocs = sparse_pred_locs[:,3]*dims[0]*dims[1]*dims[2] + sparse_pred_locs[:,0]*dims[1]*dims[2] + sparse_pred_locs[:,1]*dims[2] + sparse_pred_locs[:,2]
tgtvalues = dense_tgts.view(-1)[flatlocs]
weight = None if weights is None else weights.view(-1)[flatlocs]
if use_loss_masking:
mask = tgtvalues != UNK_ID
tgtvalues = tgtvalues[mask]
predvalues = predvalues[mask]
if weight is not None:
weight = weight[mask]
else:
tgtvalues[tgtvalues == UNK_ID] = 0
if batched:
loss = F.binary_cross_entropy_with_logits(predvalues, tgtvalues, weight=weight)
else:
if dense_tgts.shape[0] == 1:
loss[0] = F.binary_cross_entropy_with_logits(predvalues, tgtvalues, weight=weight)
else:
raise
return loss
def compute_iou_sparse_dense(sparse_pred_locs, dense_tgts, use_loss_masking, truncation=3, batched=True):
assert(len(dense_tgts.shape) == 5 and dense_tgts.shape[1] == 1)
dims = dense_tgts.shape[2:]
corr = 0.0 if batched else np.zeros(dense_tgts.shape[0], dtype=np.float32)
union = 0.0 if batched else np.zeros(dense_tgts.shape[0], dtype=np.float32)
for b in range(dense_tgts.shape[0]):
tgt = dense_tgts[b,0]
if sparse_pred_locs[b] is None:
continue
predlocs = sparse_pred_locs[b]
# flatten locs # TODO not sure whats the most efficient way to compute this...
predlocs = predlocs[:,0] * dims[1] * dims[2] + predlocs[:,1] * dims[2] + predlocs[:,2]
tgtlocs = torch.nonzero(tgt == 1)
tgtlocs = tgtlocs[:,0] * dims[1] * dims[2] + tgtlocs[:,1] * dims[2] + tgtlocs[:,2]
if use_loss_masking:
tgtlocs = tgtlocs.cpu().numpy()
# mask out from pred
mask = torch.nonzero(tgt == UNK_ID)
mask = mask[:,0] * dims[1] * dims[2] + mask[:,1] * dims[2] + mask[:,2]
predlocs = predlocs.cpu().numpy()
if mask.shape[0] > 0:
_, mask, _ = np.intersect1d(predlocs, mask.cpu().numpy(), return_indices=True)
predlocs = np.delete(predlocs, mask)
else:
predlocs = predlocs.cpu().numpy()
tgtlocs = tgtlocs.cpu().numpy()
if batched:
corr += len(np.intersect1d(predlocs, tgtlocs, assume_unique=True))
union += len(np.union1d(predlocs, tgtlocs))
else:
corr[b] = len(np.intersect1d(predlocs, tgtlocs, assume_unique=True))
union[b] = len(np.union1d(predlocs, tgtlocs))
if not batched:
return np.divide(corr, union)
if union > 0:
return corr/union
return -1
def compute_l1_predsurf_sparse_dense(sparse_pred_locs, sparse_pred_vals, dense_tgts, weights, use_log_transform, use_loss_masking, known, batched=True, thresh=None):
assert(len(dense_tgts.shape) == 5 and dense_tgts.shape[1] == 1)
dims = dense_tgts.shape[2:]
loss = 0.0 if batched else np.zeros(dense_tgts.shape[0], dtype=np.float32)
locs = sparse_pred_locs if thresh is None else sparse_pred_locs[sparse_pred_vals.view(-1) <= thresh]
predvalues = sparse_pred_vals.view(-1) if thresh is None else sparse_pred_vals.view(-1)[sparse_pred_vals.view(-1) <= thresh]
flatlocs = locs[:,3]*dims[0]*dims[1]*dims[2] + locs[:,0]*dims[1]*dims[2] + locs[:,1]*dims[2] + locs[:,2]
tgtvalues = dense_tgts.view(-1)[flatlocs]
weight = None if weights is None else weights.view(-1)[flatlocs]
if use_loss_masking:
mask = known < UNK_THRESH
mask = mask.view(-1)[flatlocs]
predvalues = predvalues[mask]
tgtvalues = tgtvalues[mask]
if weight is not None:
weight = weight[mask]
if use_log_transform:
predvalues = apply_log_transform(predvalues)
tgtvalues = apply_log_transform(tgtvalues)
if batched:
if weight is not None:
loss = torch.abs(predvalues - tgtvalues)
loss = torch.mean(loss * weight)
else:
loss = torch.mean(torch.abs(predvalues - tgtvalues))
else:
if dense_tgts.shape[0] == 1:
if weight is not None:
loss_ = torch.abs(predvalues - tgtvalues)
loss[0] = torch.mean(loss_ * weight).item()
else:
loss[0] = torch.mean(torch.abs(predvalues - tgtvalues)).item()
else:
raise
return loss
# hierarchical loss
def compute_loss(output_sdf, output_occs, target_for_sdf, target_for_occs, target_for_hier, loss_weights, truncation, use_log_transform=True, weight_missing_geo=1, input_locs=None, use_loss_masking=True, known=None, batched=True):
assert(len(output_occs) == len(target_for_occs))
batch_size = target_for_sdf.shape[0]
loss = 0.0 if batched else np.zeros(batch_size, dtype=np.float32)
losses = [] if batched else [[] for i in range(len(output_occs) + 1)]
weights = [None] * len(target_for_occs)
if weight_missing_geo > 1:
weights = compute_weights_missing_geo(weight_missing_geo, input_locs, target_for_occs, truncation)
for h in range(len(output_occs)):
if len(output_occs[h][0]) == 0 or loss_weights[h] == 0:
if batched:
losses.append(-1)
else:
losses[h].extend([-1] * batch_size)
continue
cur_loss_occ = compute_bce_sparse_dense(output_occs[h][0], output_occs[h][1][:,0], target_for_occs[h], weights[h], use_loss_masking, batched=batched)
cur_known = None if not use_loss_masking else (target_for_occs[h] == UNK_ID)*UNK_THRESH
cur_loss_sdf = compute_l1_predsurf_sparse_dense(output_occs[h][0], output_occs[h][1][:,1], target_for_hier[h], weights[h], use_log_transform, use_loss_masking, cur_known, batched=batched)
cur_loss = cur_loss_occ + cur_loss_sdf
if batched:
loss += loss_weights[h] * cur_loss
losses.append(cur_loss.item())
else:
loss += loss_weights[h] * cur_loss
losses[h].extend(cur_loss)
# loss sdf
if len(output_sdf[0]) > 0 and loss_weights[-1] > 0:
cur_loss = compute_l1_predsurf_sparse_dense(output_sdf[0], output_sdf[1], target_for_sdf, weights[-1], use_log_transform, use_loss_masking, known, batched=batched)
if batched:
loss += loss_weights[-1] * cur_loss
losses.append(cur_loss.item())
else:
loss += loss_weights[-1] * cur_loss
losses[len(output_occs)].extend(cur_loss)
else:
if batched:
losses.append(-1)
else:
losses[len(output_occs)].extend([-1] * batch_size)
return loss, losses
def compute_l1_tgtsurf_sparse_dense(sparse_pred_locs, sparse_pred_vals, dense_tgts, truncation, use_loss_masking, known, batched=True, thresh=None):
assert(len(dense_tgts.shape) == 5 and dense_tgts.shape[1] == 1)
batch_size = dense_tgts.shape[0]
dims = dense_tgts.shape[2:]
loss = 0.0 if batched else np.zeros(dense_tgts.shape[0], dtype=np.float32)
pred_dense = torch.ones(batch_size * dims[0] * dims[1] * dims[2]).to(dense_tgts.device)
fill_val = -truncation
pred_dense.fill_(fill_val)
if thresh is not None:
tgtlocs = torch.nonzero(torch.abs(dense_tgts) <= thresh)
else:
tgtlocs = torch.nonzero(torch.abs(dense_tgts) < truncation)
batchids = tgtlocs[:,0]
tgtlocs = tgtlocs[:,0]*dims[0]*dims[1]*dims[2] + tgtlocs[:,2]*dims[1]*dims[2] + tgtlocs[:,3]*dims[2] + tgtlocs[:,4]
tgtvalues = dense_tgts.view(-1)[tgtlocs]
flatlocs = sparse_pred_locs[:,3]*dims[0]*dims[1]*dims[2] + sparse_pred_locs[:,0]*dims[1]*dims[2] + sparse_pred_locs[:,1]*dims[2] + sparse_pred_locs[:,2]
pred_dense[flatlocs] = sparse_pred_vals.view(-1)
predvalues = pred_dense[tgtlocs]
if use_loss_masking:
mask = known < UNK_THRESH
mask = mask.view(-1)[tgtlocs]
tgtvalues = tgtvalues[mask]
predvalues = predvalues[mask]
if batched:
loss = torch.mean(torch.abs(predvalues - tgtvalues)).item()
else:
if dense_tgts.shape[0] == 1:
loss[0] = torch.mean(torch.abs(predvalues - tgtvalues)).item()
else:
raise
return loss
| 10,160 | 0 | 182 |
8673e63a5911ec7d23cefb57c9ad17a81189a8e3 | 5,978 | py | Python | geomstats/integrator.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 2 | 2022-03-30T00:47:45.000Z | 2022-03-31T18:22:16.000Z | geomstats/integrator.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | null | null | null | geomstats/integrator.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | null | null | null | r"""Integrator functions used when no closed forms are available.
Lead author: Nicolas Guigui.
These are designed for first order ODE written of a variable x and a time
variable t:
.. math::
\frac{dx}{dt} = force(x, t)
where :math: `x` is called the state variable. It may represent many
variables by stacking arrays, e.g. position and velocity in a geodesic
equation.
"""
from geomstats.errors import check_parameter_accepted_values
STEP_FUNCTIONS = {
"euler": "euler_step",
"symp_euler": "symplectic_euler_step",
"leapfrog": "leapfrog_step",
"rk4": "rk4_step",
"rk2": "rk2_step",
}
def euler_step(force, state, time, dt):
"""Compute one step of the euler approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
derivatives = force(state, time)
new_state = state + derivatives * dt
return new_state
def symplectic_euler_step(force, state, time, dt):
"""Compute one step of the symplectic euler approximation.
Parameters
----------
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
force : callable
Vector field that is being integrated.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
raise NotImplementedError
def leapfrog_step(force, state, time, dt):
"""Compute one step of the leapfrog approximation.
Parameters
----------
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
force : callable
Vector field that is being integrated.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
raise NotImplementedError
def rk2_step(force, state, time, dt):
"""Compute one step of the rk2 approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
See Also
--------
https://en.wikipedia.org/wiki/Runge–Kutta_methods
"""
k1 = force(state, time)
k2 = force(state + dt / 2 * k1, time + dt / 2)
new_state = state + dt * k2
return new_state
def rk4_step(force, state, time, dt):
"""Compute one step of the rk4 approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
See Also
--------
https://en.wikipedia.org/wiki/Runge–Kutta_methods
"""
k1 = force(state, time)
k2 = force(state + dt / 2 * k1, time + dt / 2)
k3 = force(state + dt / 2 * k2, time + dt / 2)
k4 = force(state + dt * k3, time + dt)
new_state = state + dt / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
return new_state
def integrate(function, initial_state, end_time=1.0, n_steps=10, step="euler"):
"""Compute the flow under the vector field using symplectic euler.
Integration function to compute flows of vector fields
on a regular grid between 0 and a finite time from an initial state.
Parameters
----------
function : callable
Vector field to integrate.
initial_state : tuple of arrays
Initial position and speed.
end_time : float
Final integration time.
Optional, default : 1.
n_steps : int
Number of integration steps to use.
Optional, default : 10.
step : str, {'euler', 'rk4', 'group_rk2', 'group_rk4'}
Numerical scheme to use for elementary integration steps.
Optional, default : 'euler'.
Returns
-------
final_state : tuple
sequences of solutions every end_time / n_steps. The shape of each
element of the sequence is the same as the vectors passed in
initial_state.
"""
check_parameter_accepted_values(step, "step", STEP_FUNCTIONS)
dt = end_time / n_steps
states = [initial_state]
current_state = initial_state
step_function = globals()[STEP_FUNCTIONS[step]]
for i in range(n_steps):
current_state = step_function(
state=current_state, force=function, time=i * dt, dt=dt
)
states.append(current_state)
return states
| 28.065728 | 79 | 0.607059 | r"""Integrator functions used when no closed forms are available.
Lead author: Nicolas Guigui.
These are designed for first order ODE written of a variable x and a time
variable t:
.. math::
\frac{dx}{dt} = force(x, t)
where :math: `x` is called the state variable. It may represent many
variables by stacking arrays, e.g. position and velocity in a geodesic
equation.
"""
from geomstats.errors import check_parameter_accepted_values
STEP_FUNCTIONS = {
"euler": "euler_step",
"symp_euler": "symplectic_euler_step",
"leapfrog": "leapfrog_step",
"rk4": "rk4_step",
"rk2": "rk2_step",
}
def euler_step(force, state, time, dt):
"""Compute one step of the euler approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
derivatives = force(state, time)
new_state = state + derivatives * dt
return new_state
def symplectic_euler_step(force, state, time, dt):
"""Compute one step of the symplectic euler approximation.
Parameters
----------
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
force : callable
Vector field that is being integrated.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
raise NotImplementedError
def leapfrog_step(force, state, time, dt):
"""Compute one step of the leapfrog approximation.
Parameters
----------
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
force : callable
Vector field that is being integrated.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
raise NotImplementedError
def rk2_step(force, state, time, dt):
"""Compute one step of the rk2 approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
See Also
--------
https://en.wikipedia.org/wiki/Runge–Kutta_methods
"""
k1 = force(state, time)
k2 = force(state + dt / 2 * k1, time + dt / 2)
new_state = state + dt * k2
return new_state
def rk4_step(force, state, time, dt):
"""Compute one step of the rk4 approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
See Also
--------
https://en.wikipedia.org/wiki/Runge–Kutta_methods
"""
k1 = force(state, time)
k2 = force(state + dt / 2 * k1, time + dt / 2)
k3 = force(state + dt / 2 * k2, time + dt / 2)
k4 = force(state + dt * k3, time + dt)
new_state = state + dt / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
return new_state
def integrate(function, initial_state, end_time=1.0, n_steps=10, step="euler"):
"""Compute the flow under the vector field using symplectic euler.
Integration function to compute flows of vector fields
on a regular grid between 0 and a finite time from an initial state.
Parameters
----------
function : callable
Vector field to integrate.
initial_state : tuple of arrays
Initial position and speed.
end_time : float
Final integration time.
Optional, default : 1.
n_steps : int
Number of integration steps to use.
Optional, default : 10.
step : str, {'euler', 'rk4', 'group_rk2', 'group_rk4'}
Numerical scheme to use for elementary integration steps.
Optional, default : 'euler'.
Returns
-------
final_state : tuple
sequences of solutions every end_time / n_steps. The shape of each
element of the sequence is the same as the vectors passed in
initial_state.
"""
check_parameter_accepted_values(step, "step", STEP_FUNCTIONS)
dt = end_time / n_steps
states = [initial_state]
current_state = initial_state
step_function = globals()[STEP_FUNCTIONS[step]]
for i in range(n_steps):
current_state = step_function(
state=current_state, force=function, time=i * dt, dt=dt
)
states.append(current_state)
return states
| 0 | 0 | 0 |
4bcfe6f5e75c6f352c785ad65e7375a9edd97d19 | 3,524 | py | Python | haipproxy/scheduler.py | searchlyf/haipproxy | 33be5298c2dc11372b6faa8ec7f4c10d3bcb7ec1 | [
"MIT"
] | null | null | null | haipproxy/scheduler.py | searchlyf/haipproxy | 33be5298c2dc11372b6faa8ec7f4c10d3bcb7ec1 | [
"MIT"
] | null | null | null | haipproxy/scheduler.py | searchlyf/haipproxy | 33be5298c2dc11372b6faa8ec7f4c10d3bcb7ec1 | [
"MIT"
] | null | null | null | """
This module schedules all the tasks according to config.rules.
"""
import click
import logging
import multiprocessing
import schedule
import time
from scrapy.crawler import CrawlerRunner
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from haipproxy.client import SquidClient
from haipproxy.config.rules import CRAWLER_TASKS, CRAWLER_QUEUE_MAPS
from haipproxy.crawler.spiders import SPIDER_MAP
from haipproxy.settings import (
SPIDER_AJAX_Q,
SPIDER_GFW_Q,
SPIDER_AJAX_GFW_Q,
TIMER_RECORDER,
)
from haipproxy.utils import get_redis_conn, acquire_lock, release_lock
DEFAULT_CRAWLER_QS = [SPIDER_AJAX_Q, SPIDER_GFW_Q, SPIDER_AJAX_GFW_Q]
logger = logging.getLogger(__name__)
def scheduler_start(tasks):
"""Start specified scheduler."""
default_tasks = CRAWLER_TASKS
SchedulerCls = CrawlerScheduler
scheduler = SchedulerCls(default_tasks)
scheduler.schedule_all_right_now()
scheduler.schedule_with_delay()
| 30.119658 | 85 | 0.648978 | """
This module schedules all the tasks according to config.rules.
"""
import click
import logging
import multiprocessing
import schedule
import time
from scrapy.crawler import CrawlerRunner
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from haipproxy.client import SquidClient
from haipproxy.config.rules import CRAWLER_TASKS, CRAWLER_QUEUE_MAPS
from haipproxy.crawler.spiders import SPIDER_MAP
from haipproxy.settings import (
SPIDER_AJAX_Q,
SPIDER_GFW_Q,
SPIDER_AJAX_GFW_Q,
TIMER_RECORDER,
)
from haipproxy.utils import get_redis_conn, acquire_lock, release_lock
DEFAULT_CRAWLER_QS = [SPIDER_AJAX_Q, SPIDER_GFW_Q, SPIDER_AJAX_GFW_Q]
logger = logging.getLogger(__name__)
class BaseScheduler:
def __init__(self, tasks):
"""
init function for schedulers.
:param name: scheduler name, generally the value is used by the scheduler
:param tasks: tasks in config.rules
"""
self.tasks = tasks
def schedule_with_delay(self):
for task in self.tasks:
interval = task.get("interval")
schedule.every(interval).minutes.do(self.schedule_task_with_lock, task)
while True:
schedule.run_pending()
time.sleep(1)
def schedule_all_right_now(self):
with multiprocessing.Pool() as pool:
pool.map(self.schedule_task_with_lock, self.tasks)
def get_lock(self, redis_conn, task):
if not task.get("enable"):
return None
task_name = task.get("name")
lock_indentifier = acquire_lock(redis_conn, task_name)
return lock_indentifier
def schedule_task_with_lock(self, task):
raise NotImplementedError
class CrawlerScheduler(BaseScheduler):
def schedule_task_with_lock(self, task):
"""Crawler scheduler filters tasks according to task type"""
task_name = task.get("name")
if not task.get("enable"):
return None
task_queue = CRAWLER_QUEUE_MAPS[task_name]
redis_conn = get_redis_conn()
interval = task.get("interval")
urls = task.get("resource")
lock_indentifier = acquire_lock(redis_conn, task_name)
if not lock_indentifier:
return False
pipe = redis_conn.pipeline(True)
try:
now = int(time.time())
pipe.hget(TIMER_RECORDER, task_name)
r = pipe.execute()[0]
if not r or (now - int(r.decode("utf-8"))) >= interval * 60:
pipe.lpush(task_queue, *urls)
pipe.hset(TIMER_RECORDER, task_name, now)
pipe.execute()
logger.info(
"crawler task {} has been stored into redis successfully".format(
task_name
)
)
return True
else:
return None
finally:
release_lock(redis_conn, task_name, lock_indentifier)
def scheduler_start(tasks):
"""Start specified scheduler."""
default_tasks = CRAWLER_TASKS
SchedulerCls = CrawlerScheduler
scheduler = SchedulerCls(default_tasks)
scheduler.schedule_all_right_now()
scheduler.schedule_with_delay()
def crawler_start(tasks):
runner = CrawlerRunner(get_project_settings())
for task in tasks:
if task in SPIDER_MAP:
runner.crawl(SPIDER_MAP[task])
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
| 863 | 1,594 | 69 |
fa8bbb4c41cefeb292ed9b5182148e5383987b33 | 2,107 | py | Python | medipack/lib/meditor.py | hritikgupta/medipack | 86407dbfe2f79ee8ebc9b1aa697ca41c2857e914 | [
"MIT"
] | 7 | 2018-10-01T17:46:37.000Z | 2021-11-13T00:07:57.000Z | medipack/lib/meditor.py | hritikgupta/medipack | 86407dbfe2f79ee8ebc9b1aa697ca41c2857e914 | [
"MIT"
] | 7 | 2018-10-01T13:11:46.000Z | 2020-05-15T22:26:50.000Z | medipack/lib/meditor.py | hritikgupta/medipack | 86407dbfe2f79ee8ebc9b1aa697ca41c2857e914 | [
"MIT"
] | 7 | 2018-09-29T18:44:29.000Z | 2019-09-06T00:51:44.000Z | import os
import subprocess as sp
from .srbColour import Colour
| 37.625 | 100 | 0.613194 | import os
import subprocess as sp
from .srbColour import Colour
class Meditor:
def getLength(filename):
result = sp.Popen(["ffprobe", filename],
stdout = sp.PIPE, stderr = sp.STDOUT)
arr = [x for x in result.stdout.readlines() if "Duration".encode('utf-8') in x]
x = arr[0].decode('utf-8')
x = x.split(' ')
dur = x.index('Duration:') + 1
return x[dur].split('.')[0]
def extract_audio(inp,out):
video_codec = " "
audio_codec = " "
exec_command = 'ffmpeg -i ' + str(inp) + video_codec + audio_codec + out
Colour.print(exec_command,Colour.GREEN)
os.system(exec_command)
def extract_video(inp,out):
video_codec = " -c:v copy "
audio_codec = " -an "
exec_command = 'ffmpeg -i ' + str(inp) + video_codec + audio_codec + out
Colour.print(exec_command,Colour.GREEN)
os.system(exec_command)
def video_trimmer(inp,trimmer,out):
video_codec = " -c:v copy "
audio_codec = " -c:a copy "
exec_command = 'ffmpeg -i ' + str(inp) + trimmer + video_codec + audio_codec + out
Colour.print(exec_command,Colour.GREEN)
os.system(exec_command)
def video_cropper(inp,filters,out):
video_codec = "" # senseless to say 'crop video and copy video, both at same time'
audio_codec = " -c:a copy "
exec_command = 'ffmpeg -i ' + str(inp) + filters + video_codec + audio_codec + out
Colour.print(exec_command,Colour.GREEN)
os.system(exec_command)
def video_resizer(inp,resizer,out):
video_codec = "" # senseless to say 'change quality video and copy video, both at same time'
audio_codec = " -c:a copy "
exec_command = 'ffmpeg -i ' + str(inp) + resizer + video_codec + audio_codec + out
Colour.print(exec_command,Colour.GREEN)
os.system(exec_command)
def audio_cutter(inp,trimmer,out):
exec_command = 'ffmpeg -i ' + str(inp) + trimmer + codec + out
Colour.print(exec_command,Colour.GREEN)
os.system(exec_command)
| 1,837 | -7 | 211 |
a343dd3be46b0b1c71ff80808ad25d2733b56f65 | 1,176 | py | Python | NLP/libs/NLP/classification/train_data.py | YarosJ/prestige-of-districts | 56ef437bff3f55e843a0602e0d33467582a50e5d | [
"MIT"
] | 6 | 2019-04-30T11:01:10.000Z | 2021-02-17T17:58:24.000Z | NLP/libs/NLP/classification/train_data.py | YarosJ/prestige-of-districts | 56ef437bff3f55e843a0602e0d33467582a50e5d | [
"MIT"
] | null | null | null | NLP/libs/NLP/classification/train_data.py | YarosJ/prestige-of-districts | 56ef437bff3f55e843a0602e0d33467582a50e5d | [
"MIT"
] | 1 | 2019-09-25T03:19:52.000Z | 2019-09-25T03:19:52.000Z | #!/usr/bin/env python
# coding: utf8
from __future__ import unicode_literals
import random
import operator
from typing import Dict
categories = {'FAULT': 0, 'INFO': 0, 'TOXIC': 0, 'REPAIR': 0}
| 30.947368 | 123 | 0.647109 | #!/usr/bin/env python
# coding: utf8
from __future__ import unicode_literals
import random
import operator
from typing import Dict
categories = {'FAULT': 0, 'INFO': 0, 'TOXIC': 0, 'REPAIR': 0}
def train_data(*args, coefficient: float = 20):
categories_counts: Dict[str, float] = categories.copy()
result = []
for arr in args:
for document in arr:
categories_counts[document["categories"][0]] += 1
categories_counts['INFO'] = categories_counts['INFO'] * 1.3 # temporal fix multi language classification problem
categories_counts['TOXIC'] = categories_counts['TOXIC'] / 1.5 # temporal fix multi language classification problem
print(categories_counts)
min_cat_count: float = min(categories_counts.items(), key=operator.itemgetter(1))[1]
for document in arr:
result_cats: Dict[str, float] = categories.copy()
cat = document["categories"][0]
result_cats[cat] = coefficient * (min_cat_count / categories_counts[cat])
result.append((document["text"], {
"cats": result_cats
}))
random.shuffle(result)
return result
| 957 | 0 | 23 |
30efe457e2cdfa8b5a921c646458acb2e2e64f67 | 279 | py | Python | Code/stacks-and-queues/reverse.py | lukeaparker/CS1.3-Data-Struct | 2b09fa95ddb1fd3e21c42ccacdc9e19fc53382a8 | [
"MIT"
] | null | null | null | Code/stacks-and-queues/reverse.py | lukeaparker/CS1.3-Data-Struct | 2b09fa95ddb1fd3e21c42ccacdc9e19fc53382a8 | [
"MIT"
] | null | null | null | Code/stacks-and-queues/reverse.py | lukeaparker/CS1.3-Data-Struct | 2b09fa95ddb1fd3e21c42ccacdc9e19fc53382a8 | [
"MIT"
] | null | null | null |
input_num = '22235253534090'
reverse(input_num)
| 14.684211 | 28 | 0.569892 |
input_num = '22235253534090'
def reverse(input_num):
stack1 = []
stack2 = []
for i in input_num:
stack1.append(i)
for i in stack1:
pop = stack1.pop()
stack2.insert(0, i)
print(stack2)
return stack2
reverse(input_num)
| 195 | 0 | 23 |
d506975b831bb63cb8084904c0509b92d0510073 | 12,478 | py | Python | research/cv/cct/src/models/cct/cct.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/cct/src/models/cct/cct.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/cct/src/models/cct/cct.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""cct model"""
import mindspore.common.initializer as weight_init
import mindspore.nn as nn
from src.models.cct.tokenizer import Tokenizer
from src.models.cct.transformers import TransformerClassifier
from src.models.cct.var_init import KaimingNormal
class CCT(nn.Cell):
"""CCT Model"""
def init_weights(self):
"""init_weights"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv2d):
cell.weight.set_data(
weight_init.initializer(
KaimingNormal(
mode='fan_in'),
cell.weight.shape,
cell.weight.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(
weight_init.initializer(
weight_init.TruncatedNormal(
sigma=0.02),
cell.weight.shape,
cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(
weight_init.initializer(
weight_init.Zero(),
cell.bias.shape,
cell.bias.dtype))
def _cct(arch,
num_layers,
num_heads,
mlp_ratio,
embedding_dim,
kernel_size=3,
stride=None,
padding=None,
**kwargs):
"""get cct model with parameters"""
print(f'=> using arch: {arch}')
stride = stride if stride is not None else max(1, (kernel_size // 2) - 1)
padding = padding if padding is not None else max(1, (kernel_size // 2))
model = CCT(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
**kwargs)
return model
def cct_2(arch, **kwargs):
"""cct_2"""
return _cct(
arch,
num_layers=2,
num_heads=2,
mlp_ratio=1,
embedding_dim=128,
**kwargs)
def cct_4(arch, **kwargs):
"""cct_4"""
return _cct(
arch,
num_layers=4,
num_heads=2,
mlp_ratio=1,
embedding_dim=128,
**kwargs)
def cct_6(arch, **kwargs):
"""cct_6"""
return _cct(
arch,
num_layers=6,
num_heads=4,
mlp_ratio=2,
embedding_dim=256,
**kwargs)
def cct_7(arch, **kwargs):
"""cct_7"""
return _cct(
arch,
num_layers=7,
num_heads=4,
mlp_ratio=2,
embedding_dim=256,
**kwargs)
def cct_14(arch, **kwargs):
"""cct_14"""
return _cct(
arch,
num_layers=14,
num_heads=6,
mlp_ratio=3,
embedding_dim=384,
**kwargs)
def cct_2_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32"""
return cct_2(
'cct_2_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_2_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_2(
'cct_2_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_4_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_4(
'cct_4_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_4_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_4(
'cct_4_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x1_32(img_size=32, positional_embedding='learnable', num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x1_32',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x1_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x1_32_sine',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_6_3x2_32_sine"""
return cct_6(
'cct_6_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_7_3x1_32"""
return cct_7(
'cct_7_3x1_32',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_7_3x1_32_sine"""
return cct_7(
'cct_7_3x1_32_sine',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_c100(
img_size=32,
positional_embedding='learnable',
num_classes=100,
**kwargs):
"""cct_7_3x1_32_c100"""
return cct_7(
'cct_7_3x1_32_c100',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_sine_c100(
img_size=32,
positional_embedding='sine',
num_classes=100,
**kwargs):
"""cct_7_3x1_32_sine_c100"""
return cct_7(
'cct_7_3x1_32_sine_c100',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_7_3x2_32"""
return cct_7(
'cct_7_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_7_3x2_32_sine"""
return cct_7(
'cct_7_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_7x2_224(
img_size=224,
positional_embedding='learnable',
num_classes=102):
"""cct_7_7x2_224"""
return cct_7(
'cct_7_7x2_224',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes)
def cct_7_7x2_224_sine(
img_size=224,
positional_embedding='sine',
num_classes=102,
**kwargs):
"""cct_7_7x2_224_sine"""
return cct_7(
'cct_7_7x2_224_sine',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_224(
img_size=224,
positional_embedding='learnable',
num_classes=1000,
**kwargs):
"""cct_14_7x2_224"""
return cct_14(
'cct_14_7x2_224',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_384(
img_size=384,
positional_embedding='learnable',
num_classes=1000,
**kwargs):
"""cct_14_7x2_384"""
return cct_14(
'cct_14_7x2_384',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_384_fl(
img_size=384,
positional_embedding='learnable',
num_classes=102,
**kwargs):
"""cct_14_7x2_384_fl"""
return cct_14(
'cct_14_7x2_384_fl',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
| 25.780992 | 79 | 0.578699 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""cct model"""
import mindspore.common.initializer as weight_init
import mindspore.nn as nn
from src.models.cct.tokenizer import Tokenizer
from src.models.cct.transformers import TransformerClassifier
from src.models.cct.var_init import KaimingNormal
class CCT(nn.Cell):
"""CCT Model"""
def __init__(
self,
img_size=224,
embedding_dim=768,
n_input_channels=3,
n_conv_layers=1,
kernel_size=7,
stride=2,
padding=3,
pooling_kernel_size=3,
pooling_stride=2,
dropout=0.,
attention_dropout=0.1,
stochastic_depth=0.1,
num_layers=14,
num_heads=6,
mlp_ratio=4.0,
num_classes=1000,
positional_embedding='learnable'):
super(CCT, self).__init__()
self.tokenizer = Tokenizer(
n_input_channels=n_input_channels,
n_output_channels=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
pooling_kernel_size=pooling_kernel_size,
pooling_stride=pooling_stride,
max_pool=True,
activation=nn.ReLU,
n_conv_layers=n_conv_layers,
conv_bias=False)
self.classifier = TransformerClassifier(
sequence_length=self.tokenizer.sequence_length(
n_channels=n_input_channels,
height=img_size,
width=img_size),
embedding_dim=embedding_dim,
seq_pool=True,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth=stochastic_depth,
num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
num_classes=num_classes,
positional_embedding=positional_embedding)
self.init_weights()
def construct(self, x):
x = self.tokenizer(x)
x = self.classifier(x)
return x
def init_weights(self):
"""init_weights"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv2d):
cell.weight.set_data(
weight_init.initializer(
KaimingNormal(
mode='fan_in'),
cell.weight.shape,
cell.weight.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(
weight_init.initializer(
weight_init.TruncatedNormal(
sigma=0.02),
cell.weight.shape,
cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(
weight_init.initializer(
weight_init.Zero(),
cell.bias.shape,
cell.bias.dtype))
def _cct(arch,
num_layers,
num_heads,
mlp_ratio,
embedding_dim,
kernel_size=3,
stride=None,
padding=None,
**kwargs):
"""get cct model with parameters"""
print(f'=> using arch: {arch}')
stride = stride if stride is not None else max(1, (kernel_size // 2) - 1)
padding = padding if padding is not None else max(1, (kernel_size // 2))
model = CCT(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
**kwargs)
return model
def cct_2(arch, **kwargs):
"""cct_2"""
return _cct(
arch,
num_layers=2,
num_heads=2,
mlp_ratio=1,
embedding_dim=128,
**kwargs)
def cct_4(arch, **kwargs):
"""cct_4"""
return _cct(
arch,
num_layers=4,
num_heads=2,
mlp_ratio=1,
embedding_dim=128,
**kwargs)
def cct_6(arch, **kwargs):
"""cct_6"""
return _cct(
arch,
num_layers=6,
num_heads=4,
mlp_ratio=2,
embedding_dim=256,
**kwargs)
def cct_7(arch, **kwargs):
"""cct_7"""
return _cct(
arch,
num_layers=7,
num_heads=4,
mlp_ratio=2,
embedding_dim=256,
**kwargs)
def cct_14(arch, **kwargs):
"""cct_14"""
return _cct(
arch,
num_layers=14,
num_heads=6,
mlp_ratio=3,
embedding_dim=384,
**kwargs)
def cct_2_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32"""
return cct_2(
'cct_2_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_2_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_2(
'cct_2_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_4_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_4(
'cct_4_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_4_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_4(
'cct_4_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x1_32(img_size=32, positional_embedding='learnable', num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x1_32',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x1_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x1_32_sine',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_6_3x2_32_sine"""
return cct_6(
'cct_6_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_7_3x1_32"""
return cct_7(
'cct_7_3x1_32',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_7_3x1_32_sine"""
return cct_7(
'cct_7_3x1_32_sine',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_c100(
img_size=32,
positional_embedding='learnable',
num_classes=100,
**kwargs):
"""cct_7_3x1_32_c100"""
return cct_7(
'cct_7_3x1_32_c100',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_sine_c100(
img_size=32,
positional_embedding='sine',
num_classes=100,
**kwargs):
"""cct_7_3x1_32_sine_c100"""
return cct_7(
'cct_7_3x1_32_sine_c100',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_7_3x2_32"""
return cct_7(
'cct_7_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_7_3x2_32_sine"""
return cct_7(
'cct_7_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_7x2_224(
img_size=224,
positional_embedding='learnable',
num_classes=102):
"""cct_7_7x2_224"""
return cct_7(
'cct_7_7x2_224',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes)
def cct_7_7x2_224_sine(
img_size=224,
positional_embedding='sine',
num_classes=102,
**kwargs):
"""cct_7_7x2_224_sine"""
return cct_7(
'cct_7_7x2_224_sine',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_224(
img_size=224,
positional_embedding='learnable',
num_classes=1000,
**kwargs):
"""cct_14_7x2_224"""
return cct_14(
'cct_14_7x2_224',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_384(
img_size=384,
positional_embedding='learnable',
num_classes=1000,
**kwargs):
"""cct_14_7x2_384"""
return cct_14(
'cct_14_7x2_384',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_384_fl(
img_size=384,
positional_embedding='learnable',
num_classes=102,
**kwargs):
"""cct_14_7x2_384_fl"""
return cct_14(
'cct_14_7x2_384_fl',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
| 1,712 | 0 | 54 |
b7bbadbd5e3fea86210ad485005e52358c20ba8f | 2,769 | py | Python | pyproj/utils.py | matthew-brett/pyproj | 5749ae3448041fa2a9e1444ef9d569ae6c2b9976 | [
"MIT"
] | null | null | null | pyproj/utils.py | matthew-brett/pyproj | 5749ae3448041fa2a9e1444ef9d569ae6c2b9976 | [
"MIT"
] | null | null | null | pyproj/utils.py | matthew-brett/pyproj | 5749ae3448041fa2a9e1444ef9d569ae6c2b9976 | [
"MIT"
] | null | null | null | from array import array
def _copytobuffer(x):
"""
return a copy of x as an object that supports the python Buffer
API (python array if input is float, list or tuple, numpy array
if input is a numpy array). returns copyofx, isfloat, islist,
istuple (islist is True if input is a list, istuple is true if
input is a tuple, isfloat is true if input is a float).
"""
# make sure x supports Buffer API and contains doubles.
isfloat = False
islist = False
istuple = False
# first, if it's a numpy array scalar convert to float
# (array scalars don't support buffer API)
if hasattr(x, "shape"):
if x.shape == ():
return _copytobuffer_return_scalar(x)
else:
try:
# typecast numpy arrays to double.
# (this makes a copy - which is crucial
# since buffer is modified in place)
x.dtype.char
# Basemap issue
# https://github.com/matplotlib/basemap/pull/223/files
# (deal with input array in fortran order)
inx = x.copy(order="C").astype("d")
# inx,isfloat,islist,istuple
return inx, False, False, False
except:
try: # perhaps they are Numeric/numarrays?
# sorry, not tested yet.
# i don't know Numeric/numarrays has `shape'.
x.typecode()
inx = x.astype("d")
# inx,isfloat,islist,istuple
return inx, False, False, False
except:
raise TypeError("input must be an array, list, tuple or scalar")
else:
# perhaps they are regular python arrays?
if hasattr(x, "typecode"):
# x.typecode
inx = array("d", x)
# try to convert to python array
# a list.
elif type(x) == list:
inx = array("d", x)
islist = True
# a tuple.
elif type(x) == tuple:
inx = array("d", x)
istuple = True
# a scalar?
else:
return _copytobuffer_return_scalar(x)
return inx, isfloat, islist, istuple
| 34.185185 | 84 | 0.548935 | from array import array
def _copytobuffer_return_scalar(x):
try:
# inx,isfloat,islist,istuple
return array("d", (float(x),)), True, False, False
except:
raise TypeError("input must be an array, list, tuple or scalar")
def _copytobuffer(x):
"""
return a copy of x as an object that supports the python Buffer
API (python array if input is float, list or tuple, numpy array
if input is a numpy array). returns copyofx, isfloat, islist,
istuple (islist is True if input is a list, istuple is true if
input is a tuple, isfloat is true if input is a float).
"""
# make sure x supports Buffer API and contains doubles.
isfloat = False
islist = False
istuple = False
# first, if it's a numpy array scalar convert to float
# (array scalars don't support buffer API)
if hasattr(x, "shape"):
if x.shape == ():
return _copytobuffer_return_scalar(x)
else:
try:
# typecast numpy arrays to double.
# (this makes a copy - which is crucial
# since buffer is modified in place)
x.dtype.char
# Basemap issue
# https://github.com/matplotlib/basemap/pull/223/files
# (deal with input array in fortran order)
inx = x.copy(order="C").astype("d")
# inx,isfloat,islist,istuple
return inx, False, False, False
except:
try: # perhaps they are Numeric/numarrays?
# sorry, not tested yet.
# i don't know Numeric/numarrays has `shape'.
x.typecode()
inx = x.astype("d")
# inx,isfloat,islist,istuple
return inx, False, False, False
except:
raise TypeError("input must be an array, list, tuple or scalar")
else:
# perhaps they are regular python arrays?
if hasattr(x, "typecode"):
# x.typecode
inx = array("d", x)
# try to convert to python array
# a list.
elif type(x) == list:
inx = array("d", x)
islist = True
# a tuple.
elif type(x) == tuple:
inx = array("d", x)
istuple = True
# a scalar?
else:
return _copytobuffer_return_scalar(x)
return inx, isfloat, islist, istuple
def _convertback(isfloat, islist, istuple, inx):
# if inputs were lists, tuples or floats, convert back to original type.
if isfloat:
return inx[0]
elif islist:
return inx.tolist()
elif istuple:
return tuple(inx)
else:
return inx
| 464 | 0 | 46 |
b86241b35ed456fca8e62338d08ae2fe58b443e5 | 514 | py | Python | aao/spiders/__init__.py | rkenny2/aao | 57ccba1b833bfbb030616d1c1f69015b7ac65af2 | [
"MIT"
] | 27 | 2018-08-20T09:31:07.000Z | 2022-03-31T06:12:50.000Z | aao/spiders/__init__.py | rkenny2/aao | 57ccba1b833bfbb030616d1c1f69015b7ac65af2 | [
"MIT"
] | null | null | null | aao/spiders/__init__.py | rkenny2/aao | 57ccba1b833bfbb030616d1c1f69015b7ac65af2 | [
"MIT"
] | 4 | 2018-07-15T23:34:02.000Z | 2021-05-28T15:39:47.000Z | import importlib
package = 'aao.spiders.bookmakers'
SpiderBet365 = importlib.import_module(
'.bet365', package).SpiderBet365
SpiderBwin = importlib.import_module(
'.bwin', package).SpiderBwin
Spider888sport = importlib.import_module(
'.888sport', package).Spider888sport
SpiderWilliamhill = importlib.import_module(
'.williamhill', package).SpiderWilliamhill
spiders = {
'bet365': SpiderBet365,
'bwin': SpiderBwin,
'888sport': Spider888sport,
'williamhill': SpiderWilliamhill,
}
| 25.7 | 46 | 0.743191 | import importlib
package = 'aao.spiders.bookmakers'
SpiderBet365 = importlib.import_module(
'.bet365', package).SpiderBet365
SpiderBwin = importlib.import_module(
'.bwin', package).SpiderBwin
Spider888sport = importlib.import_module(
'.888sport', package).Spider888sport
SpiderWilliamhill = importlib.import_module(
'.williamhill', package).SpiderWilliamhill
spiders = {
'bet365': SpiderBet365,
'bwin': SpiderBwin,
'888sport': Spider888sport,
'williamhill': SpiderWilliamhill,
}
| 0 | 0 | 0 |
0b23dcf2a889f6d295e096ffba1f1642692453de | 2,279 | py | Python | statsSend/jenkins/jenkinsJob.py | luigiberrettini/build-deploy-stats | 52a0bf5aeb8d2f8ef62e4e836eb0b9874dea500d | [
"MIT"
] | 2 | 2017-07-04T14:30:35.000Z | 2017-07-04T16:04:53.000Z | statsSend/jenkins/jenkinsJob.py | luigiberrettini/build-deploy-stats | 52a0bf5aeb8d2f8ef62e4e836eb0b9874dea500d | [
"MIT"
] | null | null | null | statsSend/jenkins/jenkinsJob.py | luigiberrettini/build-deploy-stats | 52a0bf5aeb8d2f8ef62e4e836eb0b9874dea500d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from reporting.category import Category
from statsSend.jenkins.jenkinsBuild import JenkinsBuild | 36.758065 | 107 | 0.584906 | #!/usr/bin/env python3
from reporting.category import Category
from statsSend.jenkins.jenkinsBuild import JenkinsBuild
class JenkinsJob:
def __init__(self, session, url):
self.session = session
self.url = url
self.builds = []
@property
def id(self):
return '/'.join(self.url.strip('/').split('/job/')[1:])
#{
# "_class": "org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject",
# "name": "father",
# "url": "http://jk.domain/job/grandfather/job/father/",
# "jobs": [
# { "name": "deploying", "url": "http://jk.domain/job/grandfather/job/father/job/children/" },
# ...
# ]
#}
#
# OR
#
#{
# "_class": "org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject",
# "name": "father",
# "url": "http://jk.domain/job/grandfather/job/father/",
# "builds": [
# {
# "_class": "org.jenkinsci.plugins.workflow.job.WorkflowRun",
# "id": "2",
# "url": "http://jk.domain/job/grandfather/job/father/1/",
# "timestamp": 1493225323359,
# "duration": 30315,
# "result": "SUCCESS"
# },
# ...
# ]
#}
async def retrieve_buildable_descendants(self):
url = '{:s}?tree=name,url,jobs[name,url],builds[id,url,result,duration,timestamp]'.format(self.url)
job_json_dict = await self.session.get_resource_at_once_as_json(url)
if ('builds' in job_json_dict):
self.builds.extend(job_json_dict['builds'])
yield self
else:
for child_job_json_dict in job_json_dict['jobs']:
child_job = JenkinsJob(self.session, child_job_json_dict['url'])
async for buildable_descendant in child_job.retrieve_buildable_descendants():
yield buildable_descendant
def to_category(self):
return Category('Jenkins', self.url)
def retrieve_builds_since_posix_timestamp(self, since_posix_timestamp):
for build_json_dict in self.builds:
if (build_json_dict['timestamp'] >= since_posix_timestamp):
yield JenkinsBuild(self.id, build_json_dict) | 1,048 | 1,089 | 23 |
a7bd2c4211c196da6104a551e21f6df02b968760 | 6,004 | py | Python | setupsrc/pl_setup/update_pdfium.py | pypdfium2-team/pypdfium2 | 9a1796ba9f058102997652086a48e28af9cd3579 | [
"Apache-2.0",
"BSD-3-Clause"
] | 17 | 2021-12-13T05:36:20.000Z | 2022-03-13T22:56:16.000Z | setupsrc/pl_setup/update_pdfium.py | pypdfium2-team/pypdfium2 | 9a1796ba9f058102997652086a48e28af9cd3579 | [
"Apache-2.0",
"BSD-3-Clause"
] | 51 | 2021-12-04T13:21:35.000Z | 2022-03-28T13:33:29.000Z | setupsrc/pl_setup/update_pdfium.py | pypdfium2-team/pypdfium2 | 9a1796ba9f058102997652086a48e28af9cd3579 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2022-02-01T22:56:47.000Z | 2022-03-16T13:26:35.000Z | #! /usr/bin/env python3
# SPDX-FileCopyrightText: 2022 geisserml <geisserml@gmail.com>
# SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
# Download the PDFium binaries and generate ctypes bindings
import os
import sys
import shutil
import tarfile
import argparse
import traceback
from urllib import request
from os.path import join, abspath, dirname
from concurrent.futures import ThreadPoolExecutor
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from pl_setup.packaging_base import (
DataTree,
VerNamespace,
PlatformNames,
run_cmd,
call_ctypesgen,
set_version,
)
ReleaseRepo = "https://github.com/bblanchon/pdfium-binaries"
ReleaseURL = ReleaseRepo + "/releases/download/chromium%2F"
ReleaseExtension = "tgz"
ReleaseNames = {
PlatformNames.darwin_x64 : "pdfium-mac-x64",
PlatformNames.darwin_arm64 : "pdfium-mac-arm64",
PlatformNames.linux_x64 : "pdfium-linux-x64",
PlatformNames.linux_x86 : "pdfium-linux-x86",
PlatformNames.linux_arm64 : "pdfium-linux-arm64",
PlatformNames.linux_arm32 : "pdfium-linux-arm",
PlatformNames.musllinux_x64 : "pdfium-linux-musl-x64",
PlatformNames.musllinux_x86 : "pdfium-linux-musl-x86",
PlatformNames.windows_x64 : "pdfium-win-x64",
PlatformNames.windows_x86 : "pdfium-win-x86",
PlatformNames.windows_arm64 : "pdfium-win-arm64",
}
if __name__ == "__main__":
run_cli()
| 29.004831 | 106 | 0.650067 | #! /usr/bin/env python3
# SPDX-FileCopyrightText: 2022 geisserml <geisserml@gmail.com>
# SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
# Download the PDFium binaries and generate ctypes bindings
import os
import sys
import shutil
import tarfile
import argparse
import traceback
from urllib import request
from os.path import join, abspath, dirname
from concurrent.futures import ThreadPoolExecutor
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from pl_setup.packaging_base import (
DataTree,
VerNamespace,
PlatformNames,
run_cmd,
call_ctypesgen,
set_version,
)
ReleaseRepo = "https://github.com/bblanchon/pdfium-binaries"
ReleaseURL = ReleaseRepo + "/releases/download/chromium%2F"
ReleaseExtension = "tgz"
ReleaseNames = {
PlatformNames.darwin_x64 : "pdfium-mac-x64",
PlatformNames.darwin_arm64 : "pdfium-mac-arm64",
PlatformNames.linux_x64 : "pdfium-linux-x64",
PlatformNames.linux_x86 : "pdfium-linux-x86",
PlatformNames.linux_arm64 : "pdfium-linux-arm64",
PlatformNames.linux_arm32 : "pdfium-linux-arm",
PlatformNames.musllinux_x64 : "pdfium-linux-musl-x64",
PlatformNames.musllinux_x86 : "pdfium-linux-musl-x86",
PlatformNames.windows_x64 : "pdfium-win-x64",
PlatformNames.windows_x86 : "pdfium-win-x86",
PlatformNames.windows_arm64 : "pdfium-win-arm64",
}
def get_latest_version():
git_ls = run_cmd(["git", "ls-remote", "%s.git" % ReleaseRepo], cwd=None, capture=True)
tag = git_ls.split("\t")[-1]
return int( tag.split("/")[-1] )
def handle_versions(latest_version):
v_minor = VerNamespace["V_MINOR"]
v_libpdfium = VerNamespace["V_LIBPDFIUM"]
is_sourcebuild = VerNamespace["IS_SOURCEBUILD"]
if is_sourcebuild:
print("Switching from sourcebuild to pre-built binaries.")
set_version("IS_SOURCEBUILD", False)
else:
assert v_libpdfium.isnumeric()
if int(v_libpdfium) < latest_version:
print("New PDFium build")
set_version("V_MINOR", v_minor+1)
else:
print("No new PDFium build - will re-create bindings without incrementing version")
if v_libpdfium != str(latest_version):
set_version("V_LIBPDFIUM", str(latest_version))
def clear_data(download_files):
for pl_dir in download_files:
if os.path.isdir(pl_dir):
shutil.rmtree(pl_dir)
def _get_package(args):
dirpath, file_url, file_path = args
print("'%s' -> '%s'" % (file_url, file_path))
try:
request.urlretrieve(file_url, file_path)
except Exception:
traceback.print_exc()
return
return dirpath, file_path
def download_releases(latest_version, download_files):
base_url = "%s%s/" % (ReleaseURL, latest_version)
args_list = []
for dirpath, arcname in download_files.items():
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filename = "%s.%s" % (arcname, ReleaseExtension)
file_url = base_url + filename
file_path = join(dirpath, filename)
args_list.append( (dirpath, file_url, file_path) )
archives = {}
with ThreadPoolExecutor( len(args_list) ) as pool:
for output in pool.map(_get_package, args_list):
if output is not None:
dirpath, file_path = output
archives[dirpath] = file_path
return archives
def unpack_archives(archives):
for file in archives.values():
if ReleaseExtension == "tgz":
arc_opener = tarfile.open
else:
raise ValueError("Unknown archive extension '%s'" % ReleaseExtension)
extraction_path = join(os.path.dirname(file), "build_tar")
with arc_opener(file) as archive:
archive.extractall(extraction_path)
os.remove(file)
def generate_bindings(archives):
for platform_dir in archives.keys():
build_dir = join(platform_dir,"build_tar")
bin_dir = join(build_dir, "lib")
dirname = os.path.basename(platform_dir)
if dirname.startswith("windows"):
target_name = "pdfium.dll"
bin_dir = join(build_dir, "bin")
elif dirname.startswith("darwin"):
target_name = "pdfium.dylib"
elif "linux" in dirname:
target_name = "pdfium"
else:
raise ValueError("Unknown platform directory name '%s'" % dirname)
items = os.listdir(bin_dir)
assert len(items) == 1
shutil.move(join(bin_dir, items[0]), join(platform_dir, target_name))
call_ctypesgen(platform_dir, join(build_dir, "include"))
shutil.rmtree(build_dir)
def get_download_files(platforms):
avail_keys = [k for k in ReleaseNames.keys()]
if platforms is None:
platforms = avail_keys
download_files = {}
for pl_name in platforms:
if pl_name in ReleaseNames:
download_files[ join(DataTree, pl_name) ] = ReleaseNames[pl_name]
else:
raise ValueError("Unknown platform name '%s'. Available keys are %s." % (pl_name, avail_keys))
return download_files
def main(platforms):
download_files = get_download_files(platforms)
latest_version = get_latest_version()
handle_versions(latest_version)
clear_data(download_files)
archives = download_releases(latest_version, download_files)
unpack_archives(archives)
generate_bindings(archives)
def parse_args(argv):
parser = argparse.ArgumentParser(
description = "Download pre-built PDFium packages and generate bindings",
)
parser.add_argument(
"--platforms", "-p",
metavar = "P",
nargs = "*",
)
return parser.parse_args(argv)
def run_cli(argv=sys.argv[1:]):
args = parse_args(argv)
return main(args.platforms)
if __name__ == "__main__":
run_cli()
| 4,326 | 0 | 253 |
55f548e6457977df06ca2f504c649c7bb491fcb8 | 217 | py | Python | gin/i_o/test/test_create_dataset_from_smiles.py | choderalab/gin | 9082431d8b664699a898c1e2fa490a18737d6e2d | [
"MIT"
] | 24 | 2019-07-20T22:37:09.000Z | 2021-07-07T07:13:56.000Z | gin/i_o/test/test_create_dataset_from_smiles.py | choderalab/gin | 9082431d8b664699a898c1e2fa490a18737d6e2d | [
"MIT"
] | 3 | 2021-05-10T05:29:59.000Z | 2022-02-10T00:15:05.000Z | gin/i_o/test/test_create_dataset_from_smiles.py | kuano-ai/gimlet | 9082431d8b664699a898c1e2fa490a18737d6e2d | [
"MIT"
] | 8 | 2019-08-09T17:30:20.000Z | 2021-12-01T13:27:46.000Z | from gin.i_o.from_smiles import to_mols
import pandas as pd
df = pd.read_csv('data/delaney-processed.csv')
smiles_array = df[['smiles']].values.flatten()
mols = to_mols(smiles_array)
for mol in mols:
print(mol)
| 21.7 | 46 | 0.741935 | from gin.i_o.from_smiles import to_mols
import pandas as pd
df = pd.read_csv('data/delaney-processed.csv')
smiles_array = df[['smiles']].values.flatten()
mols = to_mols(smiles_array)
for mol in mols:
print(mol)
| 0 | 0 | 0 |
20c79a53cae917612c347763df25a19850ccdcfb | 981 | py | Python | web/helpdesk/migrations/0008_auto_20170116_1712.py | stesla/arxcode | a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a | [
"MIT"
] | 5 | 2019-03-16T08:26:53.000Z | 2019-11-27T15:42:16.000Z | web/helpdesk/migrations/0008_auto_20170116_1712.py | stesla/arxcode | a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a | [
"MIT"
] | 7 | 2018-09-29T05:08:15.000Z | 2021-06-10T21:35:32.000Z | web/helpdesk/migrations/0008_auto_20170116_1712.py | stesla/arxcode | a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a | [
"MIT"
] | 7 | 2018-09-19T21:11:29.000Z | 2019-11-19T12:46:14.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-01-16 17:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 36.333333 | 255 | 0.626911 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-01-16 17:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('helpdesk', '0007_ticket_submitting_room'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='priority',
field=models.IntegerField(blank=3, choices=[(1, '1. Critical'), (2, '2. High'), (3, '3. Normal'), (4, '4. Low'), (5, '5. Very Low'), (6, '6. Super Low')], default=3, help_text='1 = Highest Priority, 5 = Low Priority', verbose_name='Priority'),
),
migrations.AlterField(
model_name='ticket',
name='submitting_room',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='objects.ObjectDB', verbose_name='Room where this was submitted'),
),
]
| 0 | 770 | 23 |
4cc04dddb430b232152853948a3923ccef6094d7 | 157 | py | Python | StringCompare.py | shakaka/ExcelReading | 25e26cdd98ee1bc5c9fda24b4b3645b23f0de46e | [
"Apache-2.0"
] | 1 | 2020-08-20T12:44:46.000Z | 2020-08-20T12:44:46.000Z | StringCompare.py | shakaka/ExcelReading | 25e26cdd98ee1bc5c9fda24b4b3645b23f0de46e | [
"Apache-2.0"
] | null | null | null | StringCompare.py | shakaka/ExcelReading | 25e26cdd98ee1bc5c9fda24b4b3645b23f0de46e | [
"Apache-2.0"
] | null | null | null | PhoneDirectory = ['John:009878788677' , 'Jefrey:67654654645' , 'Maria:8787677766']
for entry in PhoneDirectory:
if '7' in entry:
print('yeah')
| 22.428571 | 82 | 0.66879 | PhoneDirectory = ['John:009878788677' , 'Jefrey:67654654645' , 'Maria:8787677766']
for entry in PhoneDirectory:
if '7' in entry:
print('yeah')
| 0 | 0 | 0 |
b71edf427ff507754fa7cb3b8c604e5c6c67b139 | 2,055 | py | Python | puzzle15/part_two.py | Tomer23/advent-of-code-2021 | 6781616807c9e5910cd4fe512aa9a3a9ec6738e2 | [
"Apache-2.0"
] | null | null | null | puzzle15/part_two.py | Tomer23/advent-of-code-2021 | 6781616807c9e5910cd4fe512aa9a3a9ec6738e2 | [
"Apache-2.0"
] | null | null | null | puzzle15/part_two.py | Tomer23/advent-of-code-2021 | 6781616807c9e5910cd4fe512aa9a3a9ec6738e2 | [
"Apache-2.0"
] | null | null | null | FILENAME = './puzzle15/data/input'
small_cave = []
with open(FILENAME) as file:
for line in file:
small_cave.append([int(x) for x in list(line.strip())])
small_n = len(small_cave)
large_n = small_n * 5
cave = [[ 0 for _ in range(large_n)] for _ in range(large_n)]
for i in range(large_n):
for j in range(large_n):
change_i, i_l = divmod(i, small_n)
change_j, j_l = divmod(j, small_n)
if small_cave[i_l][j_l] + change_i + change_j > 9:
cave[i][j] = small_cave[i_l][j_l] - 9 + change_i + change_j
else:
cave[i][j] = small_cave[i_l][j_l] + change_i + change_j
scores = [[ 0 for _ in range(len(cave))] for _ in range(len(cave))]
for i in range(len(cave) - 1, -1 , -1):
for j in range(len(cave) - 1, -1 , -1):
if i < len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + min([scores[i + 1][j], scores[i][j + 1]])
elif i < len(cave) - 1 and j == len(cave) - 1:
scores[i][j] = cave[i][j] + scores[i + 1][j]
elif i == len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + scores[i][j + 1]
elif i == len(cave) - 1 and j == len(cave) - 1:
scores[i][j] = cave[i][j]
# b
# a c
# d
prev_value = 1000000000
current_value = 100000000
while current_value != prev_value:
prev_value = current_value
for i in range(0, len(cave)):
for j in range(0, len(cave)):
a, b, c, d = 100000, 100000, 100000, 100000
if i > 0:
a = scores[i - 1][j]
if j > 0:
b = scores[i][j - 1]
if i < len(cave) - 1:
d = scores[i + 1][j]
if j < len(cave) - 1:
c = scores[i][j + 1]
if i < len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + min([a, b, c, d])
current_value = sum([sum(x) for x in scores])
print(current_value)
print(scores[0][0] - cave[0][0]) | 33.145161 | 82 | 0.491971 | FILENAME = './puzzle15/data/input'
small_cave = []
with open(FILENAME) as file:
for line in file:
small_cave.append([int(x) for x in list(line.strip())])
small_n = len(small_cave)
large_n = small_n * 5
cave = [[ 0 for _ in range(large_n)] for _ in range(large_n)]
for i in range(large_n):
for j in range(large_n):
change_i, i_l = divmod(i, small_n)
change_j, j_l = divmod(j, small_n)
if small_cave[i_l][j_l] + change_i + change_j > 9:
cave[i][j] = small_cave[i_l][j_l] - 9 + change_i + change_j
else:
cave[i][j] = small_cave[i_l][j_l] + change_i + change_j
scores = [[ 0 for _ in range(len(cave))] for _ in range(len(cave))]
for i in range(len(cave) - 1, -1 , -1):
for j in range(len(cave) - 1, -1 , -1):
if i < len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + min([scores[i + 1][j], scores[i][j + 1]])
elif i < len(cave) - 1 and j == len(cave) - 1:
scores[i][j] = cave[i][j] + scores[i + 1][j]
elif i == len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + scores[i][j + 1]
elif i == len(cave) - 1 and j == len(cave) - 1:
scores[i][j] = cave[i][j]
# b
# a c
# d
prev_value = 1000000000
current_value = 100000000
while current_value != prev_value:
prev_value = current_value
for i in range(0, len(cave)):
for j in range(0, len(cave)):
a, b, c, d = 100000, 100000, 100000, 100000
if i > 0:
a = scores[i - 1][j]
if j > 0:
b = scores[i][j - 1]
if i < len(cave) - 1:
d = scores[i + 1][j]
if j < len(cave) - 1:
c = scores[i][j + 1]
if i < len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + min([a, b, c, d])
current_value = sum([sum(x) for x in scores])
print(current_value)
print(scores[0][0] - cave[0][0]) | 0 | 0 | 0 |
5d5ff45542bf2d1b577667a9265c1f9131176d2e | 38 | py | Python | Python/Introduction/Say ''Hello, World!'' With Python/solution.py | TanishqBhargava/HackerRank | a9fd69a19b7cfca864460c1bec63525f4d023e13 | [
"Apache-2.0"
] | 7 | 2020-04-02T16:18:46.000Z | 2021-02-12T14:06:44.000Z | Python/Introduction/Say ''Hello, World!'' With Python/solution.py | hamzaV2000/HackerRank-1 | a9fd69a19b7cfca864460c1bec63525f4d023e13 | [
"Apache-2.0"
] | null | null | null | Python/Introduction/Say ''Hello, World!'' With Python/solution.py | hamzaV2000/HackerRank-1 | a9fd69a19b7cfca864460c1bec63525f4d023e13 | [
"Apache-2.0"
] | 11 | 2020-05-06T08:28:43.000Z | 2021-12-08T17:25:45.000Z | #!/bin/python3
print("Hello, World!") | 12.666667 | 22 | 0.657895 | #!/bin/python3
print("Hello, World!") | 0 | 0 | 0 |
3865ef09a643d534599f12dec74093133848f10f | 4,675 | py | Python | freerouting-1-4-4-pm/3-build-distribution-with-jdk-14.py | pierremolinaro/ElCanari | fd9d87cee18ad484da263959a1c08424c7264eaf | [
"MIT"
] | 3 | 2019-12-18T12:47:51.000Z | 2020-12-21T14:07:43.000Z | freerouting-1-4-4-pm/3-build-distribution-with-jdk-14.py | pierremolinaro/ElCanari | fd9d87cee18ad484da263959a1c08424c7264eaf | [
"MIT"
] | 1 | 2018-09-11T09:11:45.000Z | 2018-09-12T12:13:10.000Z | freerouting-1-4-4-pm/3-build-distribution-with-jdk-14.py | pierremolinaro/ElCanari | fd9d87cee18ad484da263959a1c08424c7264eaf | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# https://developer.apple.com/library/archive/documentation/Security/Conceptual/CodeSigningGuide/Procedures/Procedures.html
#------------------------------------------------------------------------------
import sys, os, subprocess
#------------------------------------------------------------------------------
# FOR PRINTING IN COLOR
#------------------------------------------------------------------------------
BLACK = '\033[90m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
MAGENTA = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BLINK = '\033[5m'
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
#--- Get script absolute path
scriptDir = os.path.dirname (os.path.abspath (sys.argv [0]))
#--- Free routing dir
FREEROUTING_DIR = scriptDir + "/freerouting"
APP_VERSION = "1.4.4-pm"
#--- Goto Freerouting dir
os.chdir (FREEROUTING_DIR)
#--- Compile for distribution
runCommand (["bash", "gradlew", "dist"])
print (BLUE + BOLD + "DONE" + ENDC)
#--- Download and install JDK
# https://jdk.java.net/14/
JPACKAGE_JVM="https://download.java.net/java/GA/jdk14/076bab302c7b4508975440c56f6cc26a/36/GPL/openjdk-14_osx-x64_bin.tar.gz"
JPKG_DIR = scriptDir + "/jdk14"
JPKG_HOME = JPKG_DIR + "/jdk-14.jdk/Contents/Home"
JPKG_ARCHIVE = "jdk14.tar.gz"
if os.path.exists (JPKG_HOME) :
print (BLUE + BOLD + "JDK already installed" + ENDC)
else:
if not os.path.exists (JPKG_DIR) :
runCommand (["mkdir", "-p", JPKG_DIR])
os.chdir (JPKG_DIR)
#--- Download ?
if not os.path.exists (JPKG_ARCHIVE) :
print (BLUE + "Download JDK" + ENDC)
runCommand (["curl", "-o", JPKG_ARCHIVE, JPACKAGE_JVM])
#--- Install ?
if not os.path.exists (JPKG_DIR + "/runtime") :
print (BLUE + "Unpack JDK" + ENDC)
runCommand (["tar", "xvzf", JPKG_ARCHIVE])
print (BLUE + "Create runtime image" + ENDC)
runCommand ([
JPKG_HOME + "/bin/jlink",
"--module-path", JPKG_HOME + "/jmods",
"--add-modules", "java.desktop",
"--strip-debug",
"--no-header-files",
"--no-man-pages",
"--strip-native-commands",
"--vm=server",
"--compress=2",
"--output", "runtime"
])
#--- Build executable
os.chdir (scriptDir)
FREE_ROUTING_NAME = "Freerouting-" + APP_VERSION
runCommand (["rm", "-fr", FREE_ROUTING_NAME + ".app"])
runCommand ([
JPKG_HOME + "/bin/jpackage",
"--input", FREEROUTING_DIR + "/build/dist/",
"--name", FREE_ROUTING_NAME,
"--main-jar", "freerouting-executable.jar",
"--type", "app-image",
"--runtime-image", "jdk14/runtime",
# "--mac-sign",
# "--mac-signing-key-user-name", "pierre@pcmolinaro.name",
"--app-version", APP_VERSION
])
runCommand ([
"/usr/bin/codesign",
"--force",
"--sign", "Apple Development: pierre@pcmolinaro.name",
"--deep",
FREE_ROUTING_NAME + ".app"
])
runCommand ([
"/usr/bin/codesign",
"-dv",
"--verbose=4",
FREE_ROUTING_NAME + ".app"
])
runCommand ([
"/usr/bin/codesign",
"--verify",
"--deep",
"--strict",
"--verbose=2",
FREE_ROUTING_NAME + ".app"
])
# runCommand ([
# "spctl",
# "-a",
# FREE_ROUTING_NAME + ".app"
# ])
# runCommand ([
# "spctl",
# "--assess",
# "--verbose=4",
# "--type", "execute",
# FREE_ROUTING_NAME + ".app"
# ])
#--- Build DMG
PACKAGE_FILE = FREE_ROUTING_NAME + ".pkg"
runCommand (["/usr/bin/productbuild", "--component-compression", "auto", "--component", FREE_ROUTING_NAME + ".app", "/Applications", PACKAGE_FILE])
DISTRIBUTION_DIR = "Freerouting-" + APP_VERSION
runCommand (["/bin/rm", "-rf", DISTRIBUTION_DIR])
runCommand (["/bin/rm", "-f", FREE_ROUTING_NAME + ".dmg"])
runCommand (["/bin/mkdir", DISTRIBUTION_DIR])
runCommand (["/bin/cp", PACKAGE_FILE, DISTRIBUTION_DIR])
runCommand (["/usr/bin/hdiutil", "create", "-srcfolder", FREE_ROUTING_NAME, FREE_ROUTING_NAME + ".dmg", "-fs", "HFS+"])
runCommand (["/bin/rm", PACKAGE_FILE])
runCommand (["/bin/rm", "-rf", DISTRIBUTION_DIR])
#------------------------------------------------------------------------------
| 30.555556 | 147 | 0.543316 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# https://developer.apple.com/library/archive/documentation/Security/Conceptual/CodeSigningGuide/Procedures/Procedures.html
#------------------------------------------------------------------------------
import sys, os, subprocess
#------------------------------------------------------------------------------
# FOR PRINTING IN COLOR
#------------------------------------------------------------------------------
BLACK = '\033[90m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
MAGENTA = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BLINK = '\033[5m'
#------------------------------------------------------------------------------
def runCommand (command) :
s = MAGENTA + BOLD + "+"
for c in command :
if " " in c :
s += " '" + c + "'"
else :
s += " " + c
s += ENDC
print (s)
childProcess = subprocess.Popen (command)
childProcess.wait ()
if childProcess.returncode != 0 :
sys.exit (childProcess.returncode)
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
#--- Get script absolute path
scriptDir = os.path.dirname (os.path.abspath (sys.argv [0]))
#--- Free routing dir
FREEROUTING_DIR = scriptDir + "/freerouting"
APP_VERSION = "1.4.4-pm"
#--- Goto Freerouting dir
os.chdir (FREEROUTING_DIR)
#--- Compile for distribution
runCommand (["bash", "gradlew", "dist"])
print (BLUE + BOLD + "DONE" + ENDC)
#--- Download and install JDK
# https://jdk.java.net/14/
JPACKAGE_JVM="https://download.java.net/java/GA/jdk14/076bab302c7b4508975440c56f6cc26a/36/GPL/openjdk-14_osx-x64_bin.tar.gz"
JPKG_DIR = scriptDir + "/jdk14"
JPKG_HOME = JPKG_DIR + "/jdk-14.jdk/Contents/Home"
JPKG_ARCHIVE = "jdk14.tar.gz"
if os.path.exists (JPKG_HOME) :
print (BLUE + BOLD + "JDK already installed" + ENDC)
else:
if not os.path.exists (JPKG_DIR) :
runCommand (["mkdir", "-p", JPKG_DIR])
os.chdir (JPKG_DIR)
#--- Download ?
if not os.path.exists (JPKG_ARCHIVE) :
print (BLUE + "Download JDK" + ENDC)
runCommand (["curl", "-o", JPKG_ARCHIVE, JPACKAGE_JVM])
#--- Install ?
if not os.path.exists (JPKG_DIR + "/runtime") :
print (BLUE + "Unpack JDK" + ENDC)
runCommand (["tar", "xvzf", JPKG_ARCHIVE])
print (BLUE + "Create runtime image" + ENDC)
runCommand ([
JPKG_HOME + "/bin/jlink",
"--module-path", JPKG_HOME + "/jmods",
"--add-modules", "java.desktop",
"--strip-debug",
"--no-header-files",
"--no-man-pages",
"--strip-native-commands",
"--vm=server",
"--compress=2",
"--output", "runtime"
])
#--- Build executable
os.chdir (scriptDir)
FREE_ROUTING_NAME = "Freerouting-" + APP_VERSION
runCommand (["rm", "-fr", FREE_ROUTING_NAME + ".app"])
runCommand ([
JPKG_HOME + "/bin/jpackage",
"--input", FREEROUTING_DIR + "/build/dist/",
"--name", FREE_ROUTING_NAME,
"--main-jar", "freerouting-executable.jar",
"--type", "app-image",
"--runtime-image", "jdk14/runtime",
# "--mac-sign",
# "--mac-signing-key-user-name", "pierre@pcmolinaro.name",
"--app-version", APP_VERSION
])
runCommand ([
"/usr/bin/codesign",
"--force",
"--sign", "Apple Development: pierre@pcmolinaro.name",
"--deep",
FREE_ROUTING_NAME + ".app"
])
runCommand ([
"/usr/bin/codesign",
"-dv",
"--verbose=4",
FREE_ROUTING_NAME + ".app"
])
runCommand ([
"/usr/bin/codesign",
"--verify",
"--deep",
"--strict",
"--verbose=2",
FREE_ROUTING_NAME + ".app"
])
# runCommand ([
# "spctl",
# "-a",
# FREE_ROUTING_NAME + ".app"
# ])
# runCommand ([
# "spctl",
# "--assess",
# "--verbose=4",
# "--type", "execute",
# FREE_ROUTING_NAME + ".app"
# ])
#--- Build DMG
PACKAGE_FILE = FREE_ROUTING_NAME + ".pkg"
runCommand (["/usr/bin/productbuild", "--component-compression", "auto", "--component", FREE_ROUTING_NAME + ".app", "/Applications", PACKAGE_FILE])
DISTRIBUTION_DIR = "Freerouting-" + APP_VERSION
runCommand (["/bin/rm", "-rf", DISTRIBUTION_DIR])
runCommand (["/bin/rm", "-f", FREE_ROUTING_NAME + ".dmg"])
runCommand (["/bin/mkdir", DISTRIBUTION_DIR])
runCommand (["/bin/cp", PACKAGE_FILE, DISTRIBUTION_DIR])
runCommand (["/usr/bin/hdiutil", "create", "-srcfolder", FREE_ROUTING_NAME, FREE_ROUTING_NAME + ".dmg", "-fs", "HFS+"])
runCommand (["/bin/rm", PACKAGE_FILE])
runCommand (["/bin/rm", "-rf", DISTRIBUTION_DIR])
#------------------------------------------------------------------------------
| 293 | 0 | 23 |
0bf8e8edc7e5afc247166fdd4df07eace630c1df | 21,091 | py | Python | train_bboxnet.py | alvari1997/cluster_classifier | 36a40631704a4e47fb84cc3f162b7867ef600fbf | [
"MIT"
] | null | null | null | train_bboxnet.py | alvari1997/cluster_classifier | 36a40631704a4e47fb84cc3f162b7867ef600fbf | [
"MIT"
] | null | null | null | train_bboxnet.py | alvari1997/cluster_classifier | 36a40631704a4e47fb84cc3f162b7867ef600fbf | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
from cProfile import label
from dis import dis
import os
import random
from socket import MSG_DONTROUTE
from sklearn import cluster
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import LidarDataset, BoxDataset
from pointnet.box_model import BoxNet
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
from model_utils import BoxNetLoss, parse_output_to_tensors, get_box3d_corners_helper, get_box3d_corners
import open3d as o3d
from provider import angle2class, size2class, class2angle, class2size, compute_box3d_iou, size2class2, give_pred_box_corners, get_3d_box
#from viz_util import draw_lidar, draw_lidar_simple
Loss = BoxNetLoss()
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 3 # one cluster for each type
NUM_OBJECT_POINT = 512
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
corners3d: (N, 8, 3)
"""
template = np.array([
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
]) / 2
corners3d = boxes3d[:, None, 3:6] * template[None, :, :]
corners3d = rotate_points_along_z(corners3d, boxes3d[:, 6]).reshape(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
cosa = np.cos(angle)
sina = np.sin(angle)
ones = np.ones_like(angle, dtype=np.float32)
zeros = np.zeros_like(angle, dtype=np.float32)
rot_matrix = np.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), axis=1).reshape(-1, 3, 3)
points_rot = np.matmul(points, rot_matrix)
return points_rot
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--num_points', type=int, default=128, help='input size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, required=False, help="dataset path")
parser.add_argument('--dataset_type', type=str, default='bbox', help="dataset type bbox|lidar")
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.dataset_type == 'bbox':
box_dataset = BoxDataset(
#root=opt.dataset,
root='train_unbbox_dataset',
classification=True,
npoints=opt.num_points,
data_augmentation=False)
test_box_dataset = BoxDataset(
#root=opt.dataset,
root='test_unbbox_dataset',
classification=True,
split='test',
npoints=opt.num_points,
data_augmentation=False)
else:
exit('wrong dataset type')
box_dataloader = torch.utils.data.DataLoader(
box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testboxdataloader = torch.utils.data.DataLoader(
test_box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
print(len(box_dataset), len(test_box_dataset))
num_classes = len(box_dataset.classes)
print('classes', num_classes)
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = BoxNet(n_classes=num_classes, n_channel=3)
if opt.model != '':
classifier.load_state_dict(torch.load(opt.model))
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999),eps=1e-08, weight_decay=0.0)
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=20, gamma=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
#optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()
num_batch = len(box_dataset) / opt.batchSize
plt.ion()
figure = plt.figure()
ax = figure.add_subplot(111)
idx = []
test_loss = []
train_loss = []
plot1, = ax.plot(idx, test_loss, label='test')
plot2, = ax.plot(idx, train_loss, label='train')
plt.ylim(0, 10)
plt.xlim(0, 158200)
plt.xlabel("i")
plt.ylabel("loss")
plt.legend(loc="lower left")
plt.title("loss-iteration")
for epoch in range(opt.nepoch):
scheduler.step()
for i, data in enumerate(box_dataloader, 0):
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
optimizer.zero_grad()
classifier = classifier.train()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
#box3d_center = center_boxnet + center_delta
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# heading_scores (32, 12) which bin is the heading
# heading_residual (32, 12) residual angle
# size_scores (32, 3) which bin is the size
# size_residual (32, 3, 3) residual size
'''
2.Center
center: torch.Size([32, 3]) torch.float32
stage1_center: torch.Size([32, 3]) torch.float32
center_label:[32,3]
3.Heading
heading_scores: torch.Size([32, 12]) torch.float32
heading_residual_normalized: torch.Size([32, 12]) torch.float32
heading_residual: torch.Size([32, 12]) torch.float32
heading_class_label:(32)
heading_residual_label:(32)
4.Size
size_scores: torch.Size([32, 8]) torch.float32
size_residual_normalized: torch.Size([32, 8, 3]) torch.float32
size_residual: torch.Size([32, 8, 3]) torch.float32
size_class_label:(32)
size_residual_label:(32,3)'''
# compute GT
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6]
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
#print(' ')
#print(heading_class_label)
#print(heading_scores.data.max(1)[1])
#print(heading_residual_label)
#print(heading_residual)
#print(size_class_label)
#print(size_scores.data.max(1)[1])
#print(size_residual_label)
#scls_onehot = torch.eye(NUM_SIZE_CLUSTER)[size_class_label.long()].cuda() # 32,8
#scls_onehot_repeat = scls_onehot.view(-1, NUM_SIZE_CLUSTER, 1).repeat(1, 1, 3) # 32,8,3
#predicted_size_residual = torch.sum( \
# size_residual * scls_onehot_repeat.cuda(), dim=1)#32,3
#print(size_residual_label-predicted_size_residual)
#print(size_residual_label-size_residual)
#print(box3d_center_label)
#print(box3d_center)
#print(' ')
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy (FIX: flipped box results in IOU = 0 maybe)
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
'''# Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc...
lines = [[0, 1], [1, 2], [2, 3], [0, 3],
[4, 5], [5, 6], [6, 7], [4, 7],
[0, 4], [1, 5], [2, 6], [3, 7]]
# Use the same color for all lines
colors = [[1, 0, 0] for _ in range(len(lines))]
colors1 = [[0, 1, 0] for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np_pred_box[0])
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
line_set1 = o3d.geometry.LineSet()
line_set1.points = o3d.utility.Vector3dVector(np_gt_box[0])
line_set1.lines = o3d.utility.Vector2iVector(lines)
line_set1.colors = o3d.utility.Vector3dVector(colors1)
# Create a visualization object and window
#vis = o3d.visualization.Visualizer()
#vis.create_window()
# Display the bounding boxes:
#vis.add_geometry(line_set)
#o3d.visualization.draw_geometries([line_set,line_set1,pcd])
#o3d.visualization.draw_geometries([line_set1])
#np_points = points1.cpu().detach().numpy()
#np_points = np.transpose(np_points)
#pcd = o3d.geometry.PointCloud()
#pcd.points = o3d.utility.Vector3dVector(np_points)
#o3d.visualization.draw_geometries([pcd])
o3d.visualization.draw_geometries([line_set, line_set1])'''
loss.backward()
optimizer.step()
print('[%d: %d/%d] train loss: %f MIOU: %f' % (epoch, i, num_batch, loss.item(), np.mean(iou3dbox)))
#print('[%d: %d/%d] train loss: %f' % (epoch, i, num_batch, loss.item()))
loss_train = loss.item()
if i % 10 == 0:
j, data = next(enumerate(testboxdataloader, 0))
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
classifier = classifier.eval()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# compute GT, probably wrong setup
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6] #+ 3/2*np.pi
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
print('[%d: %d/%d] %s loss: %f MIOU: %f' % (epoch, i, num_batch, blue('test'), loss.item(), np.mean(iou3dbox)))
test_loss.append(loss.item())
train_loss.append(loss_train)
#loss_list[epoch*791 + i] = loss.item()
idx.append(epoch*791 + i)
plot1.set_xdata(idx)
plot1.set_ydata(test_loss)
plot2.set_xdata(idx)
plot2.set_ydata(train_loss)
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.01)
torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))
'''total_correct = 0
total_testset = 0
for i,data in tqdm(enumerate(testdataloader, 0)):
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
classifier = classifier.eval()
pred, _, _, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct += correct.item()
total_testset += points.size()[0]
print("final accuracy {}".format(total_correct / float(total_testset)))''' | 39.79434 | 210 | 0.555261 | from __future__ import print_function
import argparse
from cProfile import label
from dis import dis
import os
import random
from socket import MSG_DONTROUTE
from sklearn import cluster
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import LidarDataset, BoxDataset
from pointnet.box_model import BoxNet
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
from model_utils import BoxNetLoss, parse_output_to_tensors, get_box3d_corners_helper, get_box3d_corners
import open3d as o3d
from provider import angle2class, size2class, class2angle, class2size, compute_box3d_iou, size2class2, give_pred_box_corners, get_3d_box
#from viz_util import draw_lidar, draw_lidar_simple
Loss = BoxNetLoss()
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 3 # one cluster for each type
NUM_OBJECT_POINT = 512
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
corners3d: (N, 8, 3)
"""
template = np.array([
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
]) / 2
corners3d = boxes3d[:, None, 3:6] * template[None, :, :]
corners3d = rotate_points_along_z(corners3d, boxes3d[:, 6]).reshape(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
cosa = np.cos(angle)
sina = np.sin(angle)
ones = np.ones_like(angle, dtype=np.float32)
zeros = np.zeros_like(angle, dtype=np.float32)
rot_matrix = np.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), axis=1).reshape(-1, 3, 3)
points_rot = np.matmul(points, rot_matrix)
return points_rot
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--num_points', type=int, default=128, help='input size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, required=False, help="dataset path")
parser.add_argument('--dataset_type', type=str, default='bbox', help="dataset type bbox|lidar")
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.dataset_type == 'bbox':
box_dataset = BoxDataset(
#root=opt.dataset,
root='train_unbbox_dataset',
classification=True,
npoints=opt.num_points,
data_augmentation=False)
test_box_dataset = BoxDataset(
#root=opt.dataset,
root='test_unbbox_dataset',
classification=True,
split='test',
npoints=opt.num_points,
data_augmentation=False)
else:
exit('wrong dataset type')
box_dataloader = torch.utils.data.DataLoader(
box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testboxdataloader = torch.utils.data.DataLoader(
test_box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
print(len(box_dataset), len(test_box_dataset))
num_classes = len(box_dataset.classes)
print('classes', num_classes)
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = BoxNet(n_classes=num_classes, n_channel=3)
if opt.model != '':
classifier.load_state_dict(torch.load(opt.model))
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999),eps=1e-08, weight_decay=0.0)
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=20, gamma=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
#optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()
num_batch = len(box_dataset) / opt.batchSize
plt.ion()
figure = plt.figure()
ax = figure.add_subplot(111)
idx = []
test_loss = []
train_loss = []
plot1, = ax.plot(idx, test_loss, label='test')
plot2, = ax.plot(idx, train_loss, label='train')
plt.ylim(0, 10)
plt.xlim(0, 158200)
plt.xlabel("i")
plt.ylabel("loss")
plt.legend(loc="lower left")
plt.title("loss-iteration")
for epoch in range(opt.nepoch):
scheduler.step()
for i, data in enumerate(box_dataloader, 0):
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
optimizer.zero_grad()
classifier = classifier.train()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
#box3d_center = center_boxnet + center_delta
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# heading_scores (32, 12) which bin is the heading
# heading_residual (32, 12) residual angle
# size_scores (32, 3) which bin is the size
# size_residual (32, 3, 3) residual size
'''
2.Center
center: torch.Size([32, 3]) torch.float32
stage1_center: torch.Size([32, 3]) torch.float32
center_label:[32,3]
3.Heading
heading_scores: torch.Size([32, 12]) torch.float32
heading_residual_normalized: torch.Size([32, 12]) torch.float32
heading_residual: torch.Size([32, 12]) torch.float32
heading_class_label:(32)
heading_residual_label:(32)
4.Size
size_scores: torch.Size([32, 8]) torch.float32
size_residual_normalized: torch.Size([32, 8, 3]) torch.float32
size_residual: torch.Size([32, 8, 3]) torch.float32
size_class_label:(32)
size_residual_label:(32,3)'''
# compute GT
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6]
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
#print(' ')
#print(heading_class_label)
#print(heading_scores.data.max(1)[1])
#print(heading_residual_label)
#print(heading_residual)
#print(size_class_label)
#print(size_scores.data.max(1)[1])
#print(size_residual_label)
#scls_onehot = torch.eye(NUM_SIZE_CLUSTER)[size_class_label.long()].cuda() # 32,8
#scls_onehot_repeat = scls_onehot.view(-1, NUM_SIZE_CLUSTER, 1).repeat(1, 1, 3) # 32,8,3
#predicted_size_residual = torch.sum( \
# size_residual * scls_onehot_repeat.cuda(), dim=1)#32,3
#print(size_residual_label-predicted_size_residual)
#print(size_residual_label-size_residual)
#print(box3d_center_label)
#print(box3d_center)
#print(' ')
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy (FIX: flipped box results in IOU = 0 maybe)
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
'''# Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc...
lines = [[0, 1], [1, 2], [2, 3], [0, 3],
[4, 5], [5, 6], [6, 7], [4, 7],
[0, 4], [1, 5], [2, 6], [3, 7]]
# Use the same color for all lines
colors = [[1, 0, 0] for _ in range(len(lines))]
colors1 = [[0, 1, 0] for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np_pred_box[0])
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
line_set1 = o3d.geometry.LineSet()
line_set1.points = o3d.utility.Vector3dVector(np_gt_box[0])
line_set1.lines = o3d.utility.Vector2iVector(lines)
line_set1.colors = o3d.utility.Vector3dVector(colors1)
# Create a visualization object and window
#vis = o3d.visualization.Visualizer()
#vis.create_window()
# Display the bounding boxes:
#vis.add_geometry(line_set)
#o3d.visualization.draw_geometries([line_set,line_set1,pcd])
#o3d.visualization.draw_geometries([line_set1])
#np_points = points1.cpu().detach().numpy()
#np_points = np.transpose(np_points)
#pcd = o3d.geometry.PointCloud()
#pcd.points = o3d.utility.Vector3dVector(np_points)
#o3d.visualization.draw_geometries([pcd])
o3d.visualization.draw_geometries([line_set, line_set1])'''
loss.backward()
optimizer.step()
print('[%d: %d/%d] train loss: %f MIOU: %f' % (epoch, i, num_batch, loss.item(), np.mean(iou3dbox)))
#print('[%d: %d/%d] train loss: %f' % (epoch, i, num_batch, loss.item()))
loss_train = loss.item()
if i % 10 == 0:
j, data = next(enumerate(testboxdataloader, 0))
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
classifier = classifier.eval()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# compute GT, probably wrong setup
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6] #+ 3/2*np.pi
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
print('[%d: %d/%d] %s loss: %f MIOU: %f' % (epoch, i, num_batch, blue('test'), loss.item(), np.mean(iou3dbox)))
test_loss.append(loss.item())
train_loss.append(loss_train)
#loss_list[epoch*791 + i] = loss.item()
idx.append(epoch*791 + i)
plot1.set_xdata(idx)
plot1.set_ydata(test_loss)
plot2.set_xdata(idx)
plot2.set_ydata(train_loss)
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.01)
torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))
'''total_correct = 0
total_testset = 0
for i,data in tqdm(enumerate(testdataloader, 0)):
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
classifier = classifier.eval()
pred, _, _, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct += correct.item()
total_testset += points.size()[0]
print("final accuracy {}".format(total_correct / float(total_testset)))''' | 0 | 0 | 0 |
c890e90a3e98b6bea29fc02df9a08e7506ee1738 | 855 | py | Python | questions/permutations/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 141 | 2017-12-12T21:45:53.000Z | 2022-03-25T07:03:39.000Z | questions/permutations/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 32 | 2015-10-05T14:09:52.000Z | 2021-05-30T10:28:41.000Z | questions/permutations/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 56 | 2015-09-30T05:23:28.000Z | 2022-03-08T07:57:11.000Z | '''
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
'''
| 21.923077 | 74 | 0.48655 | '''
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
'''
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def generate_permutation(nums, ret, curr, visited):
if len(curr) == len(nums):
ret.append(list(curr))
return
for num in nums:
if num in visited:
continue
visited.add(num)
curr.append(num)
generate_permutation(nums, ret, curr, visited)
curr.pop()
visited.remove(num)
ret = []
curr = []
visited = set()
generate_permutation(nums, ret, curr, visited)
return ret
| 625 | -6 | 49 |
2a67c24d6509b5402887a363e7b1d2d6392f873a | 3,211 | py | Python | src/nn-mnist.py | NormalReedus/cds-visual-analytics | 4c71251dd71f1850fd9b09c494f766bc6125e747 | [
"MIT"
] | null | null | null | src/nn-mnist.py | NormalReedus/cds-visual-analytics | 4c71251dd71f1850fd9b09c494f766bc6125e747 | [
"MIT"
] | null | null | null | src/nn-mnist.py | NormalReedus/cds-visual-analytics | 4c71251dd71f1850fd9b09c494f766bc6125e747 | [
"MIT"
] | null | null | null | import os
import sys
sys.path.append("..")
import argparse
from pathlib import Path
# Import teaching utils
import pandas as pd
import numpy as np
from utils.neuralnetwork import NeuralNetwork
# Import sklearn metrics
from sklearn import metrics
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "train neural network on the full MNIST dataset and view the classifier metrics")
parser.add_argument("-d", "--data_path", default = Path('../data/'), type = Path, help = "path to where the MNIST csv-files dataset is saved or where to save it")
parser.add_argument("-e", "--epochs", default = 5, type = int, help = "numbers of epochs to train")
args = parser.parse_args()
main(data_path = args.data_path, epochs = args.epochs) | 38.22619 | 166 | 0.721893 | import os
import sys
sys.path.append("..")
import argparse
from pathlib import Path
# Import teaching utils
import pandas as pd
import numpy as np
from utils.neuralnetwork import NeuralNetwork
# Import sklearn metrics
from sklearn import metrics
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
def load_mnist(data_path):
img_path = os.path.join(data_path, 'mnist_img.csv')
label_path = os.path.join(data_path, 'mnist_label.csv')
if os.path.isfile(img_path) and os.path.isfile(label_path):
img = pd.read_csv(img_path)
label = pd.read_csv(label_path).squeeze() # Squeezes DataFrame into Series
else:
if not os.path.isdir(data_path):
os.mkdir(data_path)
img, label = fetch_openml('mnist_784', version=1, return_X_y=True)
img.to_csv(img_path, sep=',', encoding='utf-8', index=False)
label.to_csv(label_path, sep=',', encoding='utf-8', index=False)
# We might need to excplicitly convert to numpy arrays for some versions of pandas and sklearn
return (np.array(img), np.array(label))
def main(data_path, epochs):
# Load data as np arrays
img, label = load_mnist(data_path)
# We are assuming the min and max values for pixel intensities
# are between 0 and 255. The minmax normalization from session 7
# might give values between say 10 and 230, which might not work
# well when given a new image that has pixel values above or below those
img = img / 255.0 # normalize pixel vals to between 0 and 1 as float
classes = sorted(set(label))
num_classes = len(classes)
# Split our data 80/20 - train/test
img_train, img_test, label_train, label_test = train_test_split(img, label, random_state=1337, test_size=0.2)
# Convert labels to binary representation (e.g. 2 becomes [0,0,1,0,0,0,0,0,0,0])
label_train = LabelBinarizer().fit_transform(label_train)
label_test = LabelBinarizer().fit_transform(label_test)
# Specify the neural network structure
neural_network = NeuralNetwork([img_train.shape[1], 32, 16, num_classes]) # 1 input node for every pixel in images, 1 output node for every class
# Train the model
neural_network.fit(img_train, label_train, epochs=epochs)
# Make predictions on all test images
label_pred = neural_network.predict(img_test)
label_pred = label_pred.argmax(axis=1) # Give us the highest probability label
# Generate comparative metrics with test data
classifier_metrics = metrics.classification_report(label_test.argmax(axis=1), label_pred)
print(classifier_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "train neural network on the full MNIST dataset and view the classifier metrics")
parser.add_argument("-d", "--data_path", default = Path('../data/'), type = Path, help = "path to where the MNIST csv-files dataset is saved or where to save it")
parser.add_argument("-e", "--epochs", default = 5, type = int, help = "numbers of epochs to train")
args = parser.parse_args()
main(data_path = args.data_path, epochs = args.epochs) | 2,241 | 0 | 46 |
223cc87943392828b955528f3c1b7ea45f818aeb | 903 | py | Python | oelint_adv/rule_base/rule_vars_multiinherit.py | gstroz/oelint-adv | 089b43492df0b2ca78e17df26c215e5e19ed90cc | [
"BSD-2-Clause"
] | null | null | null | oelint_adv/rule_base/rule_vars_multiinherit.py | gstroz/oelint-adv | 089b43492df0b2ca78e17df26c215e5e19ed90cc | [
"BSD-2-Clause"
] | null | null | null | oelint_adv/rule_base/rule_vars_multiinherit.py | gstroz/oelint-adv | 089b43492df0b2ca78e17df26c215e5e19ed90cc | [
"BSD-2-Clause"
] | null | null | null | import re
from oelint_adv.cls_item import Variable
from oelint_adv.cls_rule import Rule
| 34.730769 | 88 | 0.522702 | import re
from oelint_adv.cls_item import Variable
from oelint_adv.cls_rule import Rule
class VarMultiInherit(Rule):
def __init__(self):
super().__init__(id="oelint.var.multiinherit",
severity="warning",
message="'{INH}' is included multiple times")
def check(self, _file, stash):
res = []
items = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR, attributeValue="inherit")
keys = []
for i in items:
for y in [x.strip() for x in re.split(r"\s|,", i.VarValue) if x]:
if y not in keys:
keys.append(y)
else:
res += self.finding(i.Origin, i.InFileLine,
self.Msg.replace("{INH}", y))
return res
| 730 | 7 | 76 |
44d8a84ea755c05320437dbed3d21ac22210b698 | 631 | py | Python | bigdataProxy.py | 4evernaive/YOLOv3Tiny_Face_Mask | 4053aac90d6eaece71662b1fcc96b3d974663bc2 | [
"MIT"
] | null | null | null | bigdataProxy.py | 4evernaive/YOLOv3Tiny_Face_Mask | 4053aac90d6eaece71662b1fcc96b3d974663bc2 | [
"MIT"
] | null | null | null | bigdataProxy.py | 4evernaive/YOLOv3Tiny_Face_Mask | 4053aac90d6eaece71662b1fcc96b3d974663bc2 | [
"MIT"
] | 2 | 2021-01-06T14:19:22.000Z | 2021-01-06T15:35:04.000Z | from google.oauth2 import service_account
from google.cloud import bigquery
from datetime import datetime
| 37.117647 | 171 | 0.770206 | from google.oauth2 import service_account
from google.cloud import bigquery
from datetime import datetime
def injectNotificationDataSet(device,image_url,time,area,stream_url,nomask,allp):
client = bigquery.Client(project='chatbot-108aea001-296006',credentials=service_account.Credentials.from_service_account_file('2020chatbot-108AEA001-7234299f4f96.json'))
tableId = 'chatbot-108aea001-296006.warning_alert.hama114514'
model = [{
'device': device,
'area':area,
'all':allp,
'nomask':nomask,
'image_url': image_url,
'stream_url':stream_url,
'time':time
}]
client.insert_rows_json(tableId, model) | 499 | 0 | 24 |
8e07f6d0a40c30c1ae062554ab83d36a008a77cc | 1,451 | py | Python | flexneuart/ir_datasets/base.py | gitter-badger/FlexNeuART | f69e5421bdebe9db0d993b5470dace61872f90df | [
"Apache-2.0"
] | 101 | 2020-08-06T07:06:00.000Z | 2022-03-02T15:25:59.000Z | flexneuart/ir_datasets/base.py | gitter-badger/FlexNeuART | f69e5421bdebe9db0d993b5470dace61872f90df | [
"Apache-2.0"
] | 9 | 2020-11-05T23:17:06.000Z | 2021-08-21T06:07:30.000Z | flexneuart/ir_datasets/base.py | gitter-badger/FlexNeuART | f69e5421bdebe9db0d993b5470dace61872f90df | [
"Apache-2.0"
] | 17 | 2020-09-09T22:08:03.000Z | 2022-03-25T09:50:30.000Z | #
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base class for configurable processing components. Processing components are
designed to be pipelined.
"""
| 38.184211 | 92 | 0.680221 | #
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base class for configurable processing components. Processing components are
designed to be pipelined.
"""
class BaseTextProcessor:
def __call__(self, input_dict : dict):
"""Process all input components to produce one or more outputs.
:param input_dict: input data, keys are names and values are string
:return: the processor can produce more than one output piece, which need
to be represented as a dictionary. For example, the HTML parser
can generate a body and a title field. The naming conventions
of output depends on the component, but two common approaches would be:
1. Use the input field name
2. <input field name> . <operation type>
"""
raise NotImplementedError
| 0 | 707 | 23 |
3e208f6701e2557c8499a4694562c2b1b0ec266b | 1,289 | py | Python | yamlip.py | jjmurre/yamlip | 479c4c7a476a922354191eb3bb2601550784fdbd | [
"MIT"
] | null | null | null | yamlip.py | jjmurre/yamlip | 479c4c7a476a922354191eb3bb2601550784fdbd | [
"MIT"
] | null | null | null | yamlip.py | jjmurre/yamlip | 479c4c7a476a922354191eb3bb2601550784fdbd | [
"MIT"
] | null | null | null | """yamlip - A yaml interpolation tool"""
__version__ = '0.0.1'
__author__ = 'Jan Murre <jan.murre@catalyz.nl>'
__all__ = []
import functools
from string import Template
from attrdict import AttrDict
import yaml
import click
@click.command()
@click.argument("source_yaml_file")
@click.option("-o", "--output")
| 26.306122 | 90 | 0.679597 | """yamlip - A yaml interpolation tool"""
__version__ = '0.0.1'
__author__ = 'Jan Murre <jan.murre@catalyz.nl>'
__all__ = []
import functools
from string import Template
from attrdict import AttrDict
import yaml
import click
class DotTemplate(Template):
idpattern = r"[a-z][\.\-_a-z0-9]*"
def rgetattr(obj, initial_attr, *args):
def _getattr(obj, attr):
try:
return getattr(obj, attr, *args)
except AttributeError:
return f"<<no substitute: {initial_attr}>>"
return functools.reduce(_getattr, [obj] + initial_attr.split('.'))
def fetch_interpolated_yaml(src_yaml_fn):
with open(src_yaml_fn) as sf:
src_yaml = sf.read()
yaml_tmpl = DotTemplate(src_yaml)
ip_vars = AttrDict(yaml.safe_load(src_yaml))
placeholders = ["".join(hit) for hit in yaml_tmpl.pattern.findall(yaml_tmpl.template)]
substitutions = {p: rgetattr(ip_vars, p) for p in placeholders}
return yaml_tmpl.safe_substitute(substitutions)
@click.command()
@click.argument("source_yaml_file")
@click.option("-o", "--output")
def yamlip(source_yaml_file, output):
result = fetch_interpolated_yaml(source_yaml_file)
if output:
with open(output, 'w') as tf:
tf.write(result)
else:
click.echo(result)
| 836 | 46 | 91 |
afe5f5cbf718b4ce6119b99515def79331c8a71c | 1,084 | py | Python | datastructure/practice/c7/c_7_39.py | stoneyangxu/python-kata | 979af91c74718a525dcd2a83fe53ec6342af9741 | [
"MIT"
] | null | null | null | datastructure/practice/c7/c_7_39.py | stoneyangxu/python-kata | 979af91c74718a525dcd2a83fe53ec6342af9741 | [
"MIT"
] | null | null | null | datastructure/practice/c7/c_7_39.py | stoneyangxu/python-kata | 979af91c74718a525dcd2a83fe53ec6342af9741 | [
"MIT"
] | null | null | null | import unittest
from datastructure.links.PositionList import PositionList
if __name__ == '__main__':
unittest.main()
| 21.68 | 57 | 0.640221 | import unittest
from datastructure.links.PositionList import PositionList
class PositionalQueue:
def __init__(self):
self._position_list = PositionList()
def __len__(self):
return len(self._position_list)
def is_empty(self):
return self._position_list.is_empty()
def enqueue(self, e):
return self._position_list.add_last(e)
def dequeue(self):
p = self._position_list.first()
answer = p.element()
self._position_list.delete(p)
return answer
def delete(self, p):
self._position_list.delete(p)
class MyTestCase(unittest.TestCase):
def test_something(self):
queue = PositionalQueue()
self.assertEqual(True, queue.is_empty())
queue.enqueue(1)
p = queue.enqueue(2)
queue.enqueue(3)
queue.delete(p)
self.assertEqual(False, queue.is_empty())
self.assertEqual(2, len(queue))
self.assertEqual(1, queue.dequeue())
self.assertEqual(3, queue.dequeue())
if __name__ == '__main__':
unittest.main()
| 709 | 16 | 233 |
d2bbec169cf1fbb94e5c0ece719624ec9804f905 | 2,166 | py | Python | mergify_engine/tests/unit/rules/test_parser.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | mergify_engine/tests/unit/rules/test_parser.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | mergify_engine/tests/unit/rules/test_parser.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2018 Julien Danjou <jd@mergify.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pyparsing
import pytest
from mergify_engine.rules import parser
| 39.381818 | 85 | 0.545706 | # -*- encoding: utf-8 -*-
#
# Copyright © 2018 Julien Danjou <jd@mergify.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pyparsing
import pytest
from mergify_engine.rules import parser
def test_search():
for line, result in (
("base:master", {"=": ("base", "master")}),
("base!=master", {"!=": ("base", "master")}),
("base~=^stable/", {"~=": ("base", "^stable/")}),
("-base:foobar", {"-": {"=": ("base", "foobar")}}),
("-author~=jd", {"-": {"~=": ("author", "jd")}}),
("¬author~=jd", {"-": {"~=": ("author", "jd")}}),
("conflict", {"=": ("conflict", True)}),
("locked", {"=": ("locked", True)}),
("-locked", {"-": {"=": ("locked", True)}}),
("assignee:sileht", {"=": ("assignee", "sileht")}),
("#assignee=3", {"=": ("#assignee", 3)}),
("#assignee>1", {">": ("#assignee", 1)}),
("#assignee>=2", {">=": ("#assignee", 2)}),
("assignee=@org/team", {"=": ("assignee", "@org/team")}),
(
"status-success=my ci has spaces",
{"=": ("status-success", "my ci has spaces")},
),
("status-success='my quoted ci'", {"=": ("status-success", "my quoted ci")}),
(
'status-success="my double quoted ci"',
{"=": ("status-success", "my double quoted ci")},
),
):
assert result == tuple(parser.search.parseString(line, parseAll=True))[0]
def test_invalid():
for line in ("arf", "-heyo", "locked=1", "++head=master", "foo=bar", "#foo=bar"):
with pytest.raises(pyparsing.ParseException):
parser.search.parseString(line, parseAll=True)
| 1,423 | 0 | 46 |
9fb8e676a9ceb154af1c8c119fa1f77cd42228d6 | 740 | py | Python | melodb/loggers/CompositeLogger.py | omarboukhris/melodb | 043907857cd7a73857d8d9b06be0a2282f740253 | [
"BSL-1.0"
] | null | null | null | melodb/loggers/CompositeLogger.py | omarboukhris/melodb | 043907857cd7a73857d8d9b06be0a2282f740253 | [
"BSL-1.0"
] | null | null | null | melodb/loggers/CompositeLogger.py | omarboukhris/melodb | 043907857cd7a73857d8d9b06be0a2282f740253 | [
"BSL-1.0"
] | null | null | null |
from melodb.loggers import ILogger, ConsoleLogger, MongoLogger
from typing import List
| 22.424242 | 62 | 0.744595 |
from melodb.loggers import ILogger, ConsoleLogger, MongoLogger
from typing import List
class CompositeLogger(ILogger):
def __init__(self, loggers: List[ILogger]):
super(CompositeLogger, self).__init__("")
self.loggers = loggers
@staticmethod
def build_composite_logger(
component_name: str,
dburl: str = "mongodb://localhost:27017/"):
return CompositeLogger([
ConsoleLogger(component_name),
MongoLogger(component_name, dburl)
])
def info(self, log_message: str):
for logger in self.loggers:
logger.info(log_message)
def warn(self, log_message: str):
for logger in self.loggers:
logger.warn(log_message)
def error(self, log_message: str):
for logger in self.loggers:
logger.error(log_message)
| 483 | 145 | 23 |
fd3666ee0fab2cd9c640657bd1edaac0f3682818 | 10,562 | py | Python | marsyas-vamp/marsyas/scripts/large-evaluators/tempo-reference-implementation/beat_histogram.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | marsyas-vamp/marsyas/scripts/large-evaluators/tempo-reference-implementation/beat_histogram.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | marsyas-vamp/marsyas/scripts/large-evaluators/tempo-reference-implementation/beat_histogram.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | import math
import itertools
import operator
import numpy
import pylab
import scipy.fftpack
import overlap
def autocorrelation(signal):
""" this matches Marsyas exactly. """
N = signal.shape[1]
ffts = scipy.fftpack.fft(signal, 2*N, axis=1) / (2*N)
ffts_abs = abs(ffts)
ffts_abs_scaled = ffts_abs**0.5
scratch = (scipy.fftpack.ifft(ffts_abs_scaled, axis=1
).real)*(2*N)
xcorr = scratch[:,:N]
return xcorr
GCD_TOLERANCE = 0.1
TOLERANCE = 1.04
MAX_BPM = 1000
| 30.703488 | 83 | 0.506249 | import math
import itertools
import operator
import numpy
import pylab
import scipy.fftpack
import overlap
def autocorrelation(signal):
""" this matches Marsyas exactly. """
N = signal.shape[1]
ffts = scipy.fftpack.fft(signal, 2*N, axis=1) / (2*N)
ffts_abs = abs(ffts)
ffts_abs_scaled = ffts_abs**0.5
scratch = (scipy.fftpack.ifft(ffts_abs_scaled, axis=1
).real)*(2*N)
xcorr = scratch[:,:N]
return xcorr
def find_peaks(defs, signal, number=10, peak_neighbors=1):
candidates = []
for i in xrange(4*defs.BPM_MIN+peak_neighbors,
4*defs.BPM_MAX-peak_neighbors-1):
#for i in xrange(200, 720):
if signal[i-1] < signal[i] > signal[i+1]:
ok = True
for j in xrange(i-peak_neighbors, i):
if signal[j] >= signal[i]:
ok = False
for j in xrange(i+1, i+peak_neighbors):
if signal[j] >= signal[i]:
ok = False
if ok:
candidates.append( (signal[i], i) )
candidates.sort(reverse=True)
peaks = []
#pylab.figure()
#pylab.plot(signal)
for c in candidates[:number]:
index = c[1]
mag = c[0]
peaks.append(index)
#print c
#pylab.plot(index, mag, 'o')
#pylab.show()
return numpy.array(peaks)
def autocorr_index_to_bpm(index, oss_sr):
return 60.0*oss_sr / index
def bpm_to_autocorr_index(bpm, oss_sr):
return 60.0*oss_sr / bpm
GCD_TOLERANCE = 0.1
def approximate_gcd(a, b):
#print "gcd:", a, b
if b < GCD_TOLERANCE:
return a
else:
return approximate_gcd(b, math.fmod(a,b))
def approximate_lcm(a, b):
#print "lcm:", a, b
return a*b / approximate_gcd(a,b)
TOLERANCE = 1.04
def approximate_match(a, b):
if a/TOLERANCE < b/TOLERANCE < a*TOLERANCE:
return True
if a/TOLERANCE < b*TOLERANCE < a*TOLERANCE:
return True
if b/TOLERANCE < a/TOLERANCE < b*TOLERANCE:
return True
if b/TOLERANCE < a*TOLERANCE < b*TOLERANCE:
return True
return False
MAX_BPM = 1000
def get_mults(bpm):
cands = []
k = 1
cand = bpm*k
while cand < MAX_BPM:
cands.append(cand)
k += 1
cand = bpm*k
return cands
def approximate_gcds(values):
values = numpy.array(values)
values = numpy.round(values)
print "BPMS:\t", values
combos = itertools.combinations(values, 3)
lcms = {}
for combo in combos:
keep = set()
lcm = 0
mycands = get_mults(combo[0])
cands = list(mycands)
for v in combo[1:]:
mycands = get_mults(v)
keep = set()
for a in mycands:
for b in cands:
#print a, b,
if approximate_match(a, b):
#print "yes"
keep.add(a)
keep.add(b)
#else:
# print "no"
cands = keep
#print "----"
#print keep
try:
lcm = min(keep)
if lcm in lcms:
lcms[lcm] += 1
else:
lcms[lcm] = 1
except:
pass
#print "lcm (%.1f, %.1f, %.1f):\t%.1f" %(
# combo[0], combo[1], combo[2], lcm)
keeps = {}
for l in lcms:
done = False
for k in keeps:
if approximate_match(l, k):
keeps[k] += lcms[l]
done = True
break
if not done:
keeps[l] = lcms[l]
print keeps
lcm = max(keeps.iteritems(), key=operator.itemgetter(1))[0]
return lcm
def beat_histogram(defs, oss_sr, oss_data, plot=False):
### overlap
overlapped = overlap.sliding_window(
#numpy.append(
# numpy.zeros(defs.BH_WINDOWSIZE - defs.BH_HOPSIZE),
# oss_data[:-2*defs.BH_HOPSIZE]),
oss_data,
defs.BH_WINDOWSIZE, defs.BH_HOPSIZE)
#beat_histogram_sr = oss_sr / defs.BH_HOPSIZE
#for i in range(len(overlapped[0])):
# print overlapped[0][i]
#exit(1)
### autocorrelation
autocorr = autocorrelation(overlapped)
### beat histogram
Hn = numpy.zeros( (autocorr.shape[0], 4*defs.BPM_MAX) )
for i in xrange( autocorr.shape[0] ):
#if i > 0 and i != (defs.BH_WINDOWSIZE / defs.BH_HOPSIZE):
# Hn[i] = Hn[i-1]
prev_Hni = 4*defs.BPM_MAX-1
pprev_Hni = prev_Hni
sumamp = 0.0
count = 1
for j in xrange(1, autocorr.shape[1]):
factor = 8/2
Hni = int(oss_sr * 60.0 * factor / (j+1) + 0.5);
#bpm = autocorr_bpms[i]
if Hni < 4*defs.BPM_MAX:
amp = autocorr[i][j]
#print j, Hni, amp
if amp < 0:
amp = 0
if prev_Hni == Hni:
sumamp += amp
count += 1
else:
sumamp += amp
Hn[i][prev_Hni] = sumamp / float(count)
sumamp = 0.0
count = 1
### linear interpolate not-set bins
if pprev_Hni - prev_Hni > 1:
x0 = prev_Hni
x1 = pprev_Hni
y0 = Hn[i][prev_Hni]
y1 = Hn[i][pprev_Hni]
for k in xrange(prev_Hni+1, pprev_Hni):
Hn[i][k] = y0 + (y1-y0)*(k-x0)/(x1-x0)
#print x0, x1, y0, y1, Hn[i][pprev_Hni-1]
pprev_Hni = prev_Hni
prev_Hni = Hni
#numpy.savetxt('bh.txt', Hn[0])
#for a in range(0, 20):
# numpy.savetxt("bh-combo-%i.txt" % a, Hn[a])
#if plot:
# pylab.figure()
# Hn_bpms = numpy.arange( 4*defs.BPM_MAX) / 4.0
# pylab.plot(Hn_bpms, summed_beat_histograms)
# pylab.title("Beat histogram")
### time stretch, add
harmonic_strengthened_bh = numpy.zeros( Hn.shape )
for i in xrange( Hn.shape[0] ):
### unchecked direct translation of marsyas
factor2 = 0.5
factor4 = 0.25
stretched = numpy.zeros( Hn.shape[1] )
numSamples = Hn.shape[1]
for t in xrange( Hn.shape[1] ):
ni = t*factor2
li = int(ni) % numSamples
ri = li + 1
w = ni - li
#print "%i\t%i\t%f\t%f" % (li, ri, w, ni)
#zzz
if ri < numSamples:
stretched[t] += Hn[i][li] + w * (Hn[i][ri] - Hn[i][li])
else:
stretched[t] += Hn[t]
ni = t*factor4
li = int(ni) % numSamples
ri = li + 1
w = ni - li
if ri < numSamples:
stretched[t] += Hn[i][li] + w * (Hn[i][ri] - Hn[i][li])
else:
stretched[t] += Hn[t]
harmonic_strengthened_bh[i] = (
Hn[i]
+ stretched
)
if defs.WRITE_BH:
samps = numpy.arange(defs.BH_WINDOWSIZE)
numpy.savetxt("out/aq-%i.txt" % (i+1),
numpy.vstack((samps, autocorr[i])).transpose())
bpms = numpy.arange(4*defs.BPM_MAX)/4.0
numpy.savetxt("out/bh-%i.txt" % (i+1),
numpy.vstack((bpms, Hn[i])).transpose())
numpy.savetxt("out/hbh-%i.txt" % (i+1),
numpy.vstack((bpms, harmonic_strengthened_bh[i])).transpose())
#for a in range(0, 20):
# numpy.savetxt("bh-combo-%i.txt" % a, harmonic_strengthened_bh[a])
#if plot:
# Hn_bpms = numpy.arange( 4*defs.BPM_MAX) / 4.0
# pylab.plot(Hn_bpms, harmonic_strengthened_bh)
### pick top 8 candidates
#peaks = []
#for i in xrange( Hn.shape[0] ):
# these_peaks = find_peaks(harmonic_strengthened_bh[i],
# number=8, width=11)
# peaks.append(these_peaks)
#summed = numpy.sum(harmonic_strengthened_bh, axis=0)
#summed = numpy.sum(Hn, axis=0)
if plot:
pylab.figure()
sHn = numpy.sum(Hn, axis=0)
sHBH = numpy.sum(harmonic_strengthened_bh, axis=0)
pylab.plot(numpy.arange(len(sHn))/4.0, sHn, label="sum")
pylab.plot(numpy.arange(len(sHBH))/4.0, sHBH, label="enhanced")
if defs.OPTIONS_BH == 3:
b, a = scipy.signal.butter(1, 0.1)
filtered = scipy.signal.filtfilt(b, a, sHBH)
pylab.plot(numpy.arange(len(filtered))/4.0, filtered, label="filtered")
pylab.title("Summed beat histogram")
# folded_hist = numpy.zeros(60*4)
# for i in xrange(1, len(summed)-1):
# bpm = i/4.0
# j = i
# while bpm < 15:
# bpm *= 2
# j *= 2
# while bpm > 30:
# bpm /= 2.0
# j /= 2.0
# #j = int(round(j))
# j = int(j)
# #print "%i\tto\t%i" % (i, j)
# if j >= len(folded_hist):
# continue
# folded_hist [j] += summed[i]
#
if defs.WRITE_BH:
combo_peaks = open('out/beat_histogram.txt', 'w')
peaks = []
bh_total = numpy.zeros( (Hn.shape[0], 10) )
for i in xrange( Hn.shape[0] ):
these_peaks = find_peaks(defs, harmonic_strengthened_bh[i],
number=10, peak_neighbors=1)
bh_total[i,:] = these_peaks
if defs.WRITE_BH:
tl = []
for b in these_peaks:
tl.append("%.2f" % (b/4.0))
text = " ".join(tl)
combo_peaks.write( text + "\n")
bpms = numpy.array(these_peaks)/4.0
bpms_strengths = [harmonic_strengthened_bh[i][4*b] for b in bpms]
numpy.savetxt("out/bh-peaks-%i.txt" % (i+1),
numpy.vstack((bpms, bpms_strengths)).transpose())
peaks.append( numpy.array(these_peaks) / 4.0)
if defs.WRITE_BH:
combo_peaks.close()
if defs.CHECK_REFERENCE:
calc = bh_total / 4.0
ref = numpy.loadtxt(
"reference/%s/beat_histogram.txt" % defs.basename)
delta = calc - ref
maxerr = numpy.abs(delta).max()
if maxerr < 1e-12:
print "BH ok, maximum deviation %.2g" % maxerr
else:
pylab.figure()
pylab.title("BH: calculated - reference")
pylab.plot(delta)
pylab.show()
exit(1)
#cand_peaks = find_peaks(sHn,
# number=8, peak_neighbors=11) / 4.0
#pylab.plot(numpy.arange(len(sHn))/4.0, sHn)
#pylab.show()
#pylab.plot(cand_peaks)
return peaks
| 9,846 | 0 | 211 |
6f8a5b379f68a2e3171d1a9f70029cb8a66e0056 | 7,359 | py | Python | brain-bert/utils/utils.py | MrDoghead/brain-commonsense | 8af7ab25d9113c623660e6eb928de0f4a43abd20 | [
"MIT"
] | null | null | null | brain-bert/utils/utils.py | MrDoghead/brain-commonsense | 8af7ab25d9113c623660e6eb928de0f4a43abd20 | [
"MIT"
] | null | null | null | brain-bert/utils/utils.py | MrDoghead/brain-commonsense | 8af7ab25d9113c623660e6eb928de0f4a43abd20 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.decomposition import PCA
from scipy.stats import zscore
import time
import csv
import os
import nibabel
from sklearn.metrics.pairwise import euclidean_distances
from scipy.ndimage.filters import gaussian_filter
from utils.ridge_tools import cross_val_ridge, corr
import time as tm
import sys
# train/test is the full NLP feature
# train/test_pca is the NLP feature reduced to 10 dimensions via PCA that has been fit on the training data
# feat_dir is the directory where the NLP features are stored
# train_indicator is an array of 0s and 1s indicating whether the word at this index is in the training set
| 38.528796 | 176 | 0.646963 | import numpy as np
from sklearn.decomposition import PCA
from scipy.stats import zscore
import time
import csv
import os
import nibabel
from sklearn.metrics.pairwise import euclidean_distances
from scipy.ndimage.filters import gaussian_filter
from utils.ridge_tools import cross_val_ridge, corr
import time as tm
import sys
def delay_one(mat, d):
# delays a matrix by a delay d. Positive d ==> row t has row t-d
new_mat = np.zeros_like(mat)
if d>0:
new_mat[d:] = mat[:-d]
elif d<0:
new_mat[:d] = mat[-d:]
else:
new_mat = mat
return new_mat
def delay_mat(mat, delays):
# delays a matrix by a set of delays d.
# a row t in the returned matrix has the concatenated:
# row(t-delays[0],t-delays[1]...t-delays[last] )
new_mat = np.concatenate([delay_one(mat, d) for d in delays],axis = -1)
return new_mat
# train/test is the full NLP feature
# train/test_pca is the NLP feature reduced to 10 dimensions via PCA that has been fit on the training data
# feat_dir is the directory where the NLP features are stored
# train_indicator is an array of 0s and 1s indicating whether the word at this index is in the training set
def get_nlp_features_fixed_length(layer, seq_len, feat_type, feat_dir, train_indicator, SKIP_WORDS=20, END_WORDS=5176):
loaded = np.load(feat_dir + '/' + feat_type + '_length_'+str(seq_len)+ '_layer_' + str(layer) + '.npy')
if feat_type == 'elmo':
train = loaded[SKIP_WORDS:END_WORDS,:][:,:512][train_indicator] # only forward LSTM
test = loaded[SKIP_WORDS:END_WORDS,:][:,:512][~train_indicator] # only forward LSTM
elif feat_type == 'bert' or feat_type == 'transformer_xl' or feat_type == 'use':
train = loaded[SKIP_WORDS:END_WORDS,:][train_indicator]
test = loaded[SKIP_WORDS:END_WORDS,:][~train_indicator]
else:
print('Unrecognized NLP feature type {}. Available options elmo, bert, transformer_xl, use'.format(feat_type))
pca = PCA(n_components=10, svd_solver='full')
pca.fit(train)
train_pca = pca.transform(train)
test_pca = pca.transform(test)
return train, test, train_pca, test_pca
def CV_ind(n, n_folds):
ind = np.zeros((n))
n_items = int(np.floor(n/n_folds))
for i in range(0,n_folds -1):
ind[i*n_items:(i+1)*n_items] = i
ind[(n_folds-1)*n_items:] = (n_folds-1)
return ind
def TR_to_word_CV_ind(TR_train_indicator,SKIP_WORDS=20,END_WORDS=5176):
time = np.load('./data/fMRI/time_fmri.npy')
runs = np.load('./data/fMRI/runs_fmri.npy')
time_words = np.load('./data/fMRI/time_words_fmri.npy')
time_words = time_words[SKIP_WORDS:END_WORDS]
word_train_indicator = np.zeros([len(time_words)], dtype=bool)
words_id = np.zeros([len(time_words)],dtype=int)
# w=find what TR each word belongs to
for i in range(len(time_words)):
words_id[i] = np.where(time_words[i]> time)[0][-1]
if words_id[i] <= len(runs) - 15:
offset = runs[int(words_id[i])]*20 + (runs[int(words_id[i])]-1)*15
if TR_train_indicator[int(words_id[i])-offset-1] == 1:
word_train_indicator[i] = True
return word_train_indicator
def prepare_fmri_features(train_features, test_features, word_train_indicator, TR_train_indicator, SKIP_WORDS=20, END_WORDS=5176):
time = np.load('./data/fMRI/time_fmri.npy')
runs = np.load('./data/fMRI/runs_fmri.npy')
time_words = np.load('./data/fMRI/time_words_fmri.npy')
time_words = time_words[SKIP_WORDS:END_WORDS]
words_id = np.zeros([len(time_words)])
# w=find what TR each word belongs to
for i in range(len(time_words)):
words_id[i] = np.where(time_words[i]> time)[0][-1]
all_features = np.zeros([time_words.shape[0], train_features.shape[1]])
all_features[word_train_indicator] = train_features
all_features[~word_train_indicator] = test_features
p = all_features.shape[1]
tmp = np.zeros([time.shape[0], p])
for i in range(time.shape[0]):
tmp[i] = np.mean(all_features[(words_id<=i)*(words_id>=i-1)],0)
tmp = delay_mat(tmp, np.arange(1,5))
# remove the edges of each run
tmp = np.vstack([zscore(tmp[runs==i][20:-15]) for i in range(1,5)])
tmp = np.nan_to_num(tmp)
return tmp[TR_train_indicator], tmp[~TR_train_indicator]
def run_class_time_CV_fmri_crossval_ridge(data, predict_feat_dict,
regress_feat_names_list = [],method = 'kernel_ridge',
lambdas = np.array([0.1,1,10,100,1000]),
detrend = False, n_folds = 4, skip=5):
nlp_feat_type = predict_feat_dict['nlp_feat_type']
feat_dir = predict_feat_dict['nlp_feat_dir']
layer = predict_feat_dict['layer']
seq_len = predict_feat_dict['seq_len']
n_words = data.shape[0]
n_voxels = data.shape[1]
ind = CV_ind(n_words, n_folds=n_folds)
corrs = np.zeros((n_folds, n_voxels))
acc = np.zeros((n_folds, n_voxels))
acc_std = np.zeros((n_folds, n_voxels))
preds_d = np.zeros((data.shape[0], data.shape[1]))
all_test_data = []
for ind_num in range(n_folds):
train_ind = ind!=ind_num
test_ind = ind==ind_num
word_CV_ind = TR_to_word_CV_ind(train_ind)
_,_,tmp_train_features,tmp_test_features = get_nlp_features_fixed_length(layer, seq_len, nlp_feat_type, feat_dir, word_CV_ind)
train_features,test_features = prepare_fmri_features(tmp_train_features, tmp_test_features, word_CV_ind, train_ind)
# split data
train_data = data[train_ind]
test_data = data[test_ind]
# skip TRs between train and test data
if ind_num == 0: # just remove from front end
train_data = train_data[skip:,:]
train_features = train_features[skip:,:]
elif ind_num == n_folds-1: # just remove from back end
train_data = train_data[:-skip,:]
train_features = train_features[:-skip,:]
else:
train_data = train_data[skip:-skip,:]
train_features = train_features[skip:-skip,:]
# normalize data
train_data = np.nan_to_num(zscore(np.nan_to_num(train_data)))
test_data = np.nan_to_num(zscore(np.nan_to_num(test_data)))
all_test_data.append(test_data)
train_features = np.nan_to_num(zscore(train_features))
test_features = np.nan_to_num(zscore(test_features))
start_time = tm.time()
weights, chosen_lambdas = cross_val_ridge(train_features,train_data, n_splits = 10, lambdas = np.array([10**i for i in range(-6,10)]), method = 'plain',do_plot = False)
preds = np.dot(test_features, weights)
corrs[ind_num,:] = corr(preds,test_data)
preds_d[test_ind] = preds
print('fold {} completed, took {} seconds'.format(ind_num, tm.time()-start_time))
del weights
'''
print(corrs)
print(corrs.shape)
top_k = 10
avg_corrs = np.mean(corrs,0)
top_k_voxel_ind = np.argsort(avg_corrs)[::-1][0:top_k]
print(top_k_voxel_ind)
'''
return corrs, acc, acc_std, preds_d, np.vstack(all_test_data)
| 6,538 | 0 | 169 |
6887058b47e712db78ed24dea8c1377cfe14d302 | 5,122 | py | Python | Code/loader.py | MLPA-DKU/Gait-Analysis | 2c288561be65e76bebd894df8293d856c4078e2c | [
"MIT"
] | 5 | 2020-07-23T05:55:54.000Z | 2021-07-09T22:15:33.000Z | Code/loader.py | MLPA-DKU/Gait-Analysis | 2c288561be65e76bebd894df8293d856c4078e2c | [
"MIT"
] | null | null | null | Code/loader.py | MLPA-DKU/Gait-Analysis | 2c288561be65e76bebd894df8293d856c4078e2c | [
"MIT"
] | 2 | 2020-07-23T06:05:54.000Z | 2021-04-13T05:55:24.000Z | import pandas as pd
import os
import numpy as np
import datetime
import csv
from Code.create_collector import vti_init
from Code.preprocessing import vector_merge
| 33.477124 | 110 | 0.618118 | import pandas as pd
import os
import numpy as np
import datetime
import csv
from Code.create_collector import vti_init
from Code.preprocessing import vector_merge
def path_loader(target):
path_collector = dict()
directories = sorted([folder for folder in os.listdir(target)
if os.path.isdir(os.path.join(target, folder))])
keymap_dir = os.path.join(target, 'keymap.txt')
if os.path.exists(keymap_dir) is not True:
pass
else:
with open(keymap_dir, "r") as f:
reader = csv.reader(f, delimiter=":")
lines = list(reader)[0]
for dataset_name in directories:
label_dir = os.path.join(target, dataset_name)
file_name = [os.path.join(label_dir, file)
for file in os.listdir(label_dir)
if file.endswith(".npy")]
if not dataset_name in path_collector.keys():
path_collector[dataset_name] = file_name
return path_collector
def data_loader(param, target):
path_collector = path_loader(f'../Datasets/{param.folder}')
collected_dataset = dict()
datasets = list()
for sample_folder, pathlist in path_collector.items():
_, nb_combine = sample_folder.split('_')
if int(nb_combine) != target:
continue
for datapath in sorted(pathlist):
filename = datapath.split('/')[-1]
if param.datatype == "disease":
stype, datatype = filename.split('_')
total_dataset = np.load(datapath)
collected_dataset[stype] = total_dataset
elif param.datatype == "type":
stype, datatype = filename.split('_')
total_dataset = np.load(datapath)
collected_dataset[stype] = total_dataset
for sensor in param.sensor_type:
datasets.append(collected_dataset[sensor])
return datasets
def viz_loader(param):
data_dir = f'../Raw/{param.datatype}'
directories = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
# directories = sorted(directories)
dataset = dict()
for folder_name in directories:
dataset[folder_name] = list()
for key in dataset.keys():
folder_dir = os.path.join(data_dir, key)
files_collecter = [file for file in os.listdir(folder_dir) if file.endswith(".csv")]
for files in files_collecter:
file_names = os.path.join(folder_dir, files)
dataset[key].append(file_names)
return dataset
def vti_loader(param):
data_dir = f'../Datasets/vti/{param.datatype}'
pressure_dirs = [folder for folder in os.listdir(os.path.join(data_dir, 'pressure'))
if os.path.isdir(os.listdir(os.path.join(data_dir, 'pressure', folder)))]
def create_loader(param):
data_dir = f"../Raw/{param.datatype}"
# data_dir = f"../Raw/{datetime.datetime.today().strftime('%y%m%d')}"
if os.path.exists(data_dir) is not True:
os.mkdir(data_dir)
rsub = param.collect["remover"]
directories = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
if param.datatype == "type":
directories = sorted(directories)
dataset = dict()
for folder_name in directories:
dataset[folder_name] = list()
for key in dataset.keys():
folder_dir = os.path.join(data_dir, key)
files_collecter = [file for file in os.listdir(folder_dir) if file.endswith(".csv")]
for files in files_collecter:
if files in rsub:
continue
else:
file_names = os.path.join(folder_dir, files)
dataset[key].append(file_names)
return dataset
def vector_loader(param):
data_dir = f'../Raw/{param.datatype}'
rsub = param.collect["remover"]
directories = [folder for folder in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, folder))]
if param.datatype == "type":
directories = sorted(directories)
dataset = dict()
collected = dict()
class_count = list()
for folder_name in directories:
dataset[folder_name] = list()
for key in dataset.keys():
folder_dir = os.path.join(data_dir, key)
files_collecter = [file for file in os.listdir(folder_dir) if file.endswith(".csv")]
for files in files_collecter:
if files in rsub:
continue
else:
file_names = os.path.join(folder_dir, files)
dataset[key].append(file_names)
for key, files in dataset.items():
for idx, file in enumerate(files):
# class_name = file.split('/')[-2]
peo_nb, class_text = file.split('/')[-1].split('_')
class_nb = class_text.split('.')[0]
class_count.append(int(class_nb))
# left, right
pressure, acc, gyro = vti_init(param, file)
collected[int(peo_nb)] = [int(class_nb), [pressure, acc, gyro]]
return vector_merge(collected, list(set(class_count)))
| 4,813 | 0 | 138 |
5bab5badf9683279c88080d58b0c2344b2ed1d22 | 1,344 | py | Python | main.py | GustavoHdezH/Website-Blocker | df6e2bee470399404653274f906e3463afee5f0a | [
"MIT"
] | null | null | null | main.py | GustavoHdezH/Website-Blocker | df6e2bee470399404653274f906e3463afee5f0a | [
"MIT"
] | null | null | null | main.py | GustavoHdezH/Website-Blocker | df6e2bee470399404653274f906e3463afee5f0a | [
"MIT"
] | null | null | null | import time
from datetime import datetime as dt
"""
host files for windows windows c:\windows\system32\drivers\etc
host files for linux & Mac /ect/hosts
"""
# list paths
hosts_path_system = r"C:\Windows\System32\drivers\etc\hosts"
host_dir = hosts_path_system
#host_dir = "hosts" local
redir = "127.0.0.1"
# list websites to block
websites_list =[
"www.facebook.com",
"www.youtube.com",
"www.google.com.mx"
]
# Define working hours
from_hour = 7
to_hour = 13
#Main Program
while True:
if dt(dt.now().year, dt.now().month, dt.now().day, from_hour) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, to_hour):
print("En hora de trabajar: Bloqueo Activo ")
with open(host_dir, 'r+') as file:
content = file.read()
for website in websites_list:
if website in content:
pass
else:
file.write(redir + " " + website + "\n")
else:
with open(host_dir, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in websites_list):
file.write(line)
file.truncate()
print("Es hora de relajarse: Bloqueo Desactivado")
time.sleep(1) #Seconds | 31.255814 | 136 | 0.58631 | import time
from datetime import datetime as dt
"""
host files for windows windows c:\windows\system32\drivers\etc
host files for linux & Mac /ect/hosts
"""
# list paths
hosts_path_system = r"C:\Windows\System32\drivers\etc\hosts"
host_dir = hosts_path_system
#host_dir = "hosts" local
redir = "127.0.0.1"
# list websites to block
websites_list =[
"www.facebook.com",
"www.youtube.com",
"www.google.com.mx"
]
# Define working hours
from_hour = 7
to_hour = 13
#Main Program
while True:
if dt(dt.now().year, dt.now().month, dt.now().day, from_hour) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, to_hour):
print("En hora de trabajar: Bloqueo Activo ")
with open(host_dir, 'r+') as file:
content = file.read()
for website in websites_list:
if website in content:
pass
else:
file.write(redir + " " + website + "\n")
else:
with open(host_dir, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in websites_list):
file.write(line)
file.truncate()
print("Es hora de relajarse: Bloqueo Desactivado")
time.sleep(1) #Seconds | 0 | 0 | 0 |
05a1dfdbc5346ff59ea827502b83908fd5ea228d | 1,124 | py | Python | aurora/drivers/util.py | andykee/aurora | 927385b5d8243ecd1c2e6eaab246e4d457510212 | [
"MIT"
] | null | null | null | aurora/drivers/util.py | andykee/aurora | 927385b5d8243ecd1c2e6eaab246e4d457510212 | [
"MIT"
] | 1 | 2021-06-02T23:11:23.000Z | 2021-06-22T22:14:12.000Z | aurora/drivers/util.py | andykee/aurora | 927385b5d8243ecd1c2e6eaab246e4d457510212 | [
"MIT"
] | null | null | null | import importlib
import pkgutil
import aurora.drivers
| 27.414634 | 107 | 0.696619 | import importlib
import pkgutil
import aurora.drivers
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
def import_namespace_plugins():
# NOTE: driver class MUST be importable at the top level (i.e. imported in the driver __init__.py file)
for finder, name, ispkg in iter_namespace(aurora.drivers):
if ispkg:
importlib.import_module(name)
def get_namespace_plugins(ns_pkg=None):
if ns_pkg is None:
import aurora.drivers as ns_pkg
return {
name: importlib.import_module(name)
for finder, name, ispkg
in iter_namespace(ns_pkg)
if ispkg
}
def list_drivers(ns_pkg=None):
ns_plugins = get_namespace_plugins(ns_pkg)
if ns_plugins:
print('Drivers found:\n' + '\n'.join(ns_plugins))
else:
print('No drivers are installed') | 972 | 0 | 92 |
d9cd1da08e71c31b14ac37edb655f6ce2477eb27 | 2,731 | py | Python | cifar10.py | moskomule/sam.pytorch | 766a038e9ece49fcf74283b309a377bc95054197 | [
"MIT"
] | 101 | 2020-12-30T07:31:33.000Z | 2022-03-30T08:22:39.000Z | cifar10.py | moskomule/sam.pytorch | 766a038e9ece49fcf74283b309a377bc95054197 | [
"MIT"
] | 1 | 2021-04-05T19:57:14.000Z | 2021-04-15T01:40:01.000Z | cifar10.py | moskomule/sam.pytorch | 766a038e9ece49fcf74283b309a377bc95054197 | [
"MIT"
] | 10 | 2020-12-31T02:43:10.000Z | 2022-03-27T10:02:34.000Z | from functools import partial
from typing import Tuple
import chika
import homura
import torch
import torch.nn.functional as F
from homura import lr_scheduler, reporters, trainers
from homura.vision import DATASET_REGISTRY, MODEL_REGISTRY
from sam import SAMSGD as _SAMSGD
@chika.config
@chika.config
@chika.main(cfg_cls=Config, strict=True)
if __name__ == '__main__':
main()
| 29.365591 | 107 | 0.633834 | from functools import partial
from typing import Tuple
import chika
import homura
import torch
import torch.nn.functional as F
from homura import lr_scheduler, reporters, trainers
from homura.vision import DATASET_REGISTRY, MODEL_REGISTRY
from sam import SAMSGD as _SAMSGD
def SAM(lr=1e-1, momentum=0.0, dampening=0.0,
weight_decay=0.0, nesterov=False, rho=0.05):
return partial(_SAMSGD, **locals())
@chika.config
class Optim:
epochs: int = 200
name: str = chika.choices("sam", "sgd")
lr: float = 0.1
weight_decay: float = 5e-4
rho: float = 5e-2
@chika.config
class Config:
optim: Optim
model: str = chika.choices("resnet20", "resnet56", "se_resnet56", "wrn28_2", "resnext29_32x4d")
batch_size: int = 128
use_amp: bool = False
jit_model: bool = False
seed: int = 1
gpu: int = chika.bounded(0, 0, torch.cuda.device_count())
class Trainer(trainers.SupervisedTrainer):
def iteration(self,
data: Tuple[torch.Tensor, torch.Tensor]
) -> None:
if not self.is_train:
return super().iteration(data)
input, target = data
def closure():
self.optimizer.zero_grad()
output = self.model(input)
loss = self.loss_f(output, target)
loss.backward()
return loss
loss = self.optimizer.step(closure)
self.reporter.add("loss", loss)
def _main(cfg):
model = MODEL_REGISTRY(cfg.model)(num_classes=10)
if cfg.jit_model:
model = torch.jit.script(model)
train_loader, test_loader = DATASET_REGISTRY("cifar10")(cfg.batch_size, num_workers=4, download=True)
optimizer = (SAM(lr=cfg.optim.lr, momentum=0.9, weight_decay=cfg.optim.weight_decay, rho=cfg.optim.rho)
if cfg.optim.name == "sam" else
homura.optim.SGD(lr=cfg.optim.lr, momentum=0.9, weight_decay=cfg.optim.weight_decay))
scheduler = lr_scheduler.CosineAnnealingWithWarmup(cfg.optim.epochs, 4, 5)
with Trainer(model,
optimizer,
F.cross_entropy,
reporters=[reporters.TensorboardReporter('.')],
scheduler=scheduler,
use_amp=cfg.use_amp,
) as trainer:
for _ in trainer.epoch_range(cfg.optim.epochs):
trainer.train(train_loader)
trainer.test(test_loader)
trainer.scheduler.step()
print(f"Max Test Accuracy={max(trainer.reporter.history('accuracy/test')):.3f}")
@chika.main(cfg_cls=Config, strict=True)
def main(cfg: Config):
torch.cuda.set_device(cfg.gpu)
with homura.set_seed(cfg.seed):
_main(cfg)
if __name__ == '__main__':
main()
| 1,757 | 420 | 161 |
076ca4822407724857fe3e31d79539177654a693 | 16,470 | py | Python | app/admin/views.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 11 | 2017-08-23T17:41:43.000Z | 2018-10-24T03:00:38.000Z | app/admin/views.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 480 | 2017-07-14T00:29:11.000Z | 2020-01-06T19:04:51.000Z | app/admin/views.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 22 | 2017-07-07T00:07:32.000Z | 2020-02-27T19:43:14.000Z | import csv
import io
from flask import (
current_app,
flash,
redirect,
render_template,
request,
Response,
url_for,
)
from flask_login import current_user
from . import admin_bp
from .forms import (
CancelCarpoolAdminForm,
DeleteDestinationForm,
DestinationForm,
ProfilePurgeForm,
)
from geoalchemy2.shape import to_shape
from .. import db
from ..email import send_email
from ..carpool.views import (
cancel_carpool,
email_driver_rider_cancelled_request,
)
from ..models import (
Carpool,
Destination,
Person,
Role,
PersonRole,
RideRequest,
)
@admin_bp.route('/admin/')
@admin_bp.route('/admin/stats/')
@admin_bp.route('/admin/users/<uuid>')
@admin_bp.route('/admin/users/<uuid>/purge', methods=['GET', 'POST'])
@admin_bp.route('/admin/users/<user_uuid>/togglerole', methods=['POST'])
@admin_bp.route('/admin/users')
@admin_bp.route('/admin/drivers_and_riders')
@admin_bp.route('/admin/users.csv')
@admin_bp.route('/admin/carpools')
@admin_bp.route('/admin/carpools.csv')
@admin_bp.route('/admin/destinations')
@admin_bp.route('/admin/destinations/new', methods=['GET', 'POST'])
@admin_bp.route('/admin/destinations/<uuid>', methods=['GET', 'POST'])
@admin_bp.route('/admin/destinations/<uuid>/delete', methods=['GET', 'POST'])
@admin_bp.route('/admin/destinations/<uuid>/togglehidden', methods=['POST'])
@admin_bp.route('/admin/emailpreview/<template>')
@admin_bp.route('/admin/<uuid>/cancel', methods=['GET', 'POST'])
| 33.47561 | 109 | 0.613236 | import csv
import io
from flask import (
current_app,
flash,
redirect,
render_template,
request,
Response,
url_for,
)
from flask_login import current_user
from . import admin_bp
from .forms import (
CancelCarpoolAdminForm,
DeleteDestinationForm,
DestinationForm,
ProfilePurgeForm,
)
from geoalchemy2.shape import to_shape
from .. import db
from ..email import send_email
from ..carpool.views import (
cancel_carpool,
email_driver_rider_cancelled_request,
)
from ..models import (
Carpool,
Destination,
Person,
Role,
PersonRole,
RideRequest,
)
@admin_bp.route('/admin/')
def admin_index():
return render_template(
'admin/index.html',
)
@admin_bp.route('/admin/stats/')
def admin_stats():
return render_template(
'admin/stats.html',
carpool_count=Carpool.query.count(),
ride_request_count_approved=RideRequest.query.filter_by(status='approved').count(),
ride_request_count_requested=RideRequest.query.filter_by(status='requested').count(),
destination_count=Destination.query.count(),
driver_count=Carpool.query.distinct(Carpool.driver_id).count(),
)
@admin_bp.route('/admin/users/<uuid>')
def user_show(uuid):
user = Person.uuid_or_404(uuid)
return render_template(
'admin/users/show.html',
user=user,
)
@admin_bp.route('/admin/users/<uuid>/purge', methods=['GET', 'POST'])
def user_purge(uuid):
user = Person.uuid_or_404(uuid)
form = ProfilePurgeForm()
if form.validate_on_submit():
if form.submit.data:
if user.id == current_user.id:
flash("You can't purge yourself", 'error')
current_app.logger.info("User %s tried to purge themselves",
current_user.id)
return redirect(url_for('admin.user_show', uuid=user.uuid))
if user.has_roles('admin'):
flash("You can't purge other admins", 'error')
current_app.logger.info("User %s tried to purge admin %s",
current_user.id, user.id)
return redirect(url_for('admin.user_show', uuid=user.uuid))
try:
# Delete the ride requests for this user
for req in user.get_ride_requests_query():
current_app.logger.info("Deleting user %s's request %s",
user.id, req.id)
email_driver_rider_cancelled_request(req, req.carpool,
user)
db.session.delete(req)
# Delete the carpools for this user
for pool in user.get_driving_carpools():
current_app.logger.info("Deleting user %s's pool %s",
user.id, pool.id)
cancel_carpool(pool)
db.session.delete(pool)
# Delete the user's account
current_app.logger.info("Deleting user %s", user.id)
db.session.delete(user)
db.session.commit()
except:
db.session.rollback()
current_app.logger.exception("Problem deleting user account")
flash("There was a problem purging the user", 'error')
return redirect(url_for('admin.user_show', uuid=user.uuid))
flash("You deleted the user from the database", 'success')
return redirect(url_for('admin.user_list'))
else:
return redirect(url_for('admin.user_show', uuid=user.uuid))
return render_template(
'admin/users/purge.html',
user=user,
form=form,
)
@admin_bp.route('/admin/users/<user_uuid>/togglerole', methods=['POST'])
def user_toggle_role(user_uuid):
user = Person.uuid_or_404(user_uuid)
role = Role.first_by_name_or_404(request.form.get('role_name'))
if current_user.uuid == user.uuid:
flash("You cannot modify your own roles", 'error')
return redirect(url_for('admin.user_show', uuid=user.uuid))
pr = PersonRole.query.filter_by(person_id=user.id, role_id=role.id).first()
if pr:
db.session.delete(pr)
flash('Role {} removed from this user'.format(role.name), 'success')
else:
user.roles.append(role)
flash('Role {} added to this user'.format(role.name), 'success')
db.session.commit()
return redirect(url_for('admin.user_show', uuid=user.uuid))
@admin_bp.route('/admin/users')
def user_list():
page = request.args.get('page')
page = int(page) if page is not None else None
per_page = 15
users = Person.query.\
order_by(Person.created_at.desc()).\
paginate(page, per_page)
return render_template(
'admin/users/list.html',
users=users,
)
@admin_bp.route('/admin/drivers_and_riders')
def driver_and_rider_list():
page = request.args.get('page')
page = int(page) if page is not None else 1
per_page = 15
query = '''
select d.name destination, cp.leave_time leave_time,
cp.return_time return_time, 'rider' as rider_driver,
p.name person_name, p.email email, p.phone_number phone,
p.preferred_contact_method contact, p.uuid uuid
from carpools cp, destinations d, people p, riders r
where cp.destination_id=d.id and cp.id=r.carpool_id and
r.status='approved' and r.person_id=p.id
union
select d.name destination, cp.leave_time leave_time,
cp.return_time returntime, 'driver' as rider_driver,
p.name person_name, p.email email, p.phone_number phone,
p.preferred_contact_method contact, p.uuid uuid
from carpools cp, destinations d, people p
where cp.destination_id=d.id and cp.driver_id=p.id
order by destination, leave_time, person_name
'''
result = list(db.engine.execute(query))
if per_page * page > len(result):
paginated_result = result[per_page * (page - 1):]
else:
paginated_result = result[per_page * (page - 1):per_page * page]
return render_template(
'admin/users/drivers_and_riders.html',
drivers_and_riders=paginated_result,
page=page,
not_last=(per_page * page) < len(result),
not_first=(page > 1)
)
@admin_bp.route('/admin/users.csv')
def user_list_csv():
output = io.StringIO()
writer = csv.writer(output)
writer.writerow(['Nomad carpool drivers and riders'])
writer.writerow(['carpool_id', 'destination', 'carpool leave time', 'carpool return time',
'driver/rider', 'name', 'email', 'phone', 'preferred contact method'])
query = '''
select cp.id carpool_id, d.name destination, cp.leave_time leave_time,
cp.return_time return_time, 'rider' as rider_driver,
p.name person_name, p.email email, p.phone_number phone,
p.preferred_contact_method contact
from carpools cp, destinations d, people p, riders r
where cp.destination_id=d.id and cp.id=r.carpool_id and
r.status='approved' and r.person_id=p.id
union
select cp.id carpool_id, d.name destination, cp.leave_time leave_time,
cp.return_time returntime, 'driver' as rider_driver,
p.name person_name, p.email email, p.phone_number phone,
p.preferred_contact_method contact
from carpools cp, destinations d, people p
where cp.destination_id=d.id and cp.driver_id=p.id
order by destination, leave_time, person_name
'''
for row in db.engine.execute(query):
writer.writerow([
row.carpool_id,
row.destination,
row.leave_time.strftime('%x %X'),
row.return_time.strftime('%x %X'),
row.rider_driver,
row.person_name,
row.email,
row.phone,
row.contact
])
return Response(
output.getvalue(),
mimetype='text/csv',
headers={
'Content-disposition': 'attachment; filename=nomad_users.csv'
}
)
@admin_bp.route('/admin/carpools')
def carpool_list():
page = request.args.get('page')
page = int(page) if page is not None else None
per_page = 15
carpools = Carpool.query.\
order_by(Carpool.created_at.desc()).\
paginate(page, per_page)
return render_template(
'admin/carpool/list.html',
carpools=carpools,
)
@admin_bp.route('/admin/carpools.csv')
def carpool_list_csv():
output = io.StringIO()
writer = csv.writer(output)
writer.writerow(['Nomad carpools'])
writer.writerow(['from', 'from lat/lon',
'destination', 'destination lat/lon', 'destination address',
'leave time', 'return time',
'driver name', 'drive email',
'max riders', 'ride requests', 'approved riders',
'status', 'reason for cancellation'
])
query = '''
select cp.from_place as from_place, st_x(cp.from_point) as from_lon, st_y(cp.from_point) as from_lat,
d.name as destination, st_x(d.point) as destination_lon, st_y(d.point) as destination_lat,
d.address as destination_address,
cp.leave_time as leave_time,
cp.return_time as return_time,
dp.name as driver_name, dp.email as driver_email,
cp.max_riders as max_riders,
cp.canceled as canceled,
cp.cancel_reason as cancel_reason,
(select count(*) from riders where carpool_id=cp.id) as request_count,
(select count(*) from riders where carpool_id=cp.id and status='approved') as approved_count
from carpools cp
full outer join destinations d on (cp.destination_id=d.id)
inner join people dp on (dp.id=cp.driver_id)
'''
for row in db.engine.execute(query):
writer.writerow([
row.from_place,
','.join(map(str, [row.from_lat, row.from_lon])),
row.destination,
','.join(map(str, [row.destination_lat, row.destination_lon])),
row.destination_address,
row.leave_time.strftime('%x %X'),
row.return_time.strftime('%x %X'),
row.driver_name,
row.driver_email,
row.max_riders,
row.request_count,
row.approved_count,
"Canceled" if row.canceled else 'Active',
row.cancel_reason,
])
return Response(
output.getvalue(),
mimetype='text/csv',
headers={
'Content-disposition': 'attachment; filename=nomad_carpools.csv'
}
)
@admin_bp.route('/admin/destinations')
def destinations_list():
page = request.args.get('page')
page = int(page) if page is not None else None
per_page = 15
destinations = Destination.query.\
order_by(Destination.created_at.desc()).\
paginate(page, per_page)
return render_template(
'admin/destinations/list.html',
destinations=destinations,
)
@admin_bp.route('/admin/destinations/new', methods=['GET', 'POST'])
def destinations_add():
dest_form = DestinationForm()
if dest_form.validate_on_submit():
destination = Destination(
name=dest_form.name.data,
address=dest_form.address.data,
point='SRID=4326;POINT({} {})'.format(
dest_form.destination_lon.data,
dest_form.destination_lat.data),
)
db.session.add(destination)
db.session.commit()
flash("You added a destination.", 'success')
return redirect(
url_for('admin.destinations_list')
)
return render_template(
'admin/destinations/add.html',
form=dest_form,
)
@admin_bp.route('/admin/destinations/<uuid>', methods=['GET', 'POST'])
def destinations_show(uuid):
dest = Destination.uuid_or_404(uuid)
point = to_shape(dest.point)
edit_form = DestinationForm(
name=dest.name,
address=dest.address,
destination_lat=point.y,
destination_lon=point.x,
)
if edit_form.validate_on_submit():
dest.name = edit_form.name.data
dest.address = edit_form.address.data
dest.point = 'SRID=4326;POINT({} {})'.format(
edit_form.destination_lon.data,
edit_form.destination_lat.data
)
_send_destination_action_email(dest, 'modified', 'modified')
db.session.commit()
flash("Your destination was updated", 'success')
return redirect(url_for('admin.destinations_show', uuid=uuid))
return render_template(
'admin/destinations/edit.html',
form=edit_form,
dest=dest,
)
@admin_bp.route('/admin/destinations/<uuid>/delete', methods=['GET', 'POST'])
def destinations_delete(uuid):
dest = Destination.uuid_or_404(uuid)
delete_form = DeleteDestinationForm()
if delete_form.validate_on_submit():
if delete_form.submit.data:
_send_destination_action_email(dest, 'cancelled', 'deleted')
db.session.delete(dest)
db.session.commit()
flash("Your destination was deleted", 'success')
return redirect(url_for('admin.destinations_list'))
else:
return redirect(url_for('admin.destinations_show', uuid=uuid))
return render_template(
'admin/destinations/delete.html',
dest=dest,
form=delete_form,
)
def _send_destination_action_email(destination, verb, template_base):
for carpool in destination.carpools:
subject = 'Carpool on {} {}'.format(
carpool.leave_time_formatted,
verb
)
# For carpool riders
for ride_request in carpool.ride_requests:
send_email(
'admin_destination_{}'.format(template_base),
ride_request.person.email,
subject,
destination=destination,
carpool=carpool,
person=ride_request.person,
)
# For carpool driver
send_email(
'admin_destination_{}'.format(template_base),
carpool.driver.email,
subject,
destination=destination,
carpool=carpool,
person=carpool.driver,
)
@admin_bp.route('/admin/destinations/<uuid>/togglehidden', methods=['POST'])
def destinations_toggle_hidden(uuid):
dest = Destination.uuid_or_404(uuid)
dest.hidden = not dest.hidden
db.session.add(dest)
db.session.commit()
if dest.hidden:
flash("Your destination was hidden", 'success')
else:
flash("Your destination was unhidden", 'success')
return redirect(url_for('admin.destinations_show', uuid=uuid))
@admin_bp.route('/admin/emailpreview/<template>')
def email_preview(template):
# get enough sample data to cover all templates
carpool = Carpool.query.first()
data = {
'destination': carpool.destination,
'carpool': carpool,
'person': carpool.driver,
'rider': Person.query.first(),
'driver': carpool.driver,
'ride_request': RideRequest.query.first(),
'reason': 'Placeholder reason'
}
text = render_template('email/{}.txt'.format(template), **data)
html = render_template('email/{}.html'.format(template), **data)
return render_template('admin/emailpreview.html', template=template, text=text, html=html)
@admin_bp.route('/admin/<uuid>/cancel', methods=['GET', 'POST'])
def admin_cancel_carpool(uuid):
carpool = Carpool.uuid_or_404(uuid)
cancel_form = CancelCarpoolAdminForm()
if cancel_form.validate_on_submit():
if cancel_form.submit.data:
cancel_carpool(carpool, cancel_form.reason.data, notify_driver=True)
flash('The carpool was cancelled', 'success')
# TODO: redirect to carpool list page when available
return redirect(url_for('admin.admin_index'))
return redirect(url_for('carpool.details', uuid=carpool.uuid))
return render_template('carpools/cancel.html', form=cancel_form)
| 14,544 | 0 | 397 |
e8764ac8179743789cb0b11839a0841f88bd1b2f | 3,914 | py | Python | experiments/tune_plot.py | trangnv/geb-simulations-h20 | df86e1ad1ff8e98cf2c3f6025d1626d260a3b125 | [
"MIT"
] | 7 | 2021-08-31T13:11:51.000Z | 2022-02-10T09:05:16.000Z | experiments/tune_plot.py | trangnv/geb-simulations-h20 | df86e1ad1ff8e98cf2c3f6025d1626d260a3b125 | [
"MIT"
] | null | null | null | experiments/tune_plot.py | trangnv/geb-simulations-h20 | df86e1ad1ff8e98cf2c3f6025d1626d260a3b125 | [
"MIT"
] | 8 | 2021-09-03T08:29:09.000Z | 2021-12-04T04:20:49.000Z | import sys
import os
import json
import glob
import pandas as pd
import plotly
import plotly.graph_objs as go
if len(sys.argv) != 2:
print("Usage: python tune_plot.py <result_dir>")
print("Example: python tune_pot.py ~/ray_results/objective_mean_2021-04-08_00-07-44/")
result_dir = sys.argv[1]
tune_run = os.path.basename(os.path.normpath(result_dir))
results = glob.glob(os.path.join(result_dir, "*", "result.json"))
score = []
kp = []
ki = []
kd = []
alpha = []
fullPID = False
for results_file in results:
print(results_file)
with open(results_file) as f:
try:
d = json.load(f)
except:
continue
score.append(d['score'])
kp.append(d['config']['kp'])
ki.append(d['config']['ki'])
if 'kd' in d['config']:
kd.append(d['config']['kd'])
fullPID = True
alpha.append(d['config']['alpha'])
# 5D plot
if fullPID:
#Set marker properties
markersize = [x * 20 for x in alpha]
markercolor = score
#Make Plotly figure
fig1 = go.Scatter3d(x=kp,
y=ki,
z=kd,
marker=dict(size=markersize,
color=markercolor,
opacity=0.5,
line=dict(width=2,
color='DarkSlateGrey'),
reversescale=False,
colorscale='blues'),
line=dict (width=0.02),
mode='markers')
#Make Plot.ly Layout
kp_range = [min(kp), max(kp)]
ki_range = [min(ki), max(kd)]
kd_range = [min(ki), max(kd)]
#ki_range = [0, 6e-6]
#kd_range = [0, 6e-6]
mylayout = go.Layout(scene=dict(xaxis=dict(title="kp", range=kp_range, showexponent = 'all', exponentformat = 'e'),
yaxis=dict(title="ki", range=ki_range, showexponent = 'all', exponentformat = 'e'),
zaxis=dict(title="kd", range=kd_range, showexponent = 'all', exponentformat = 'e')))
#Plot and save html
plotly.offline.plot({"data": [fig1],
"layout": mylayout},
image = 'png',
image_filename = 'tune_analyze_PID.png',
auto_open=True,
filename=("PID Scores Plot " + tune_run + ".html"))
else:
#Set marker properties
#markersize = [x * 20 for x in alpha]
markersize = [10 for x in alpha]
markercolor = score
#Make Plotly figure
fig1 = go.Scatter3d(x=kp,
y=ki,
z=alpha,
marker=dict(size=markersize,
color=markercolor,
opacity=0.5,
line=dict(width=2,
color='DarkSlateGrey'),
reversescale=False,
colorscale='blues'),
line=dict (width=0.02),
mode='markers')
#Make Plot.ly Layout
mylayout = go.Layout(scene=dict(xaxis=dict(title="kp", showexponent = 'all', exponentformat = 'e'),
yaxis=dict(title="ki",showexponent = 'all', exponentformat = 'e'),
zaxis=dict(title="alpha", showexponent = 'all', exponentformat = 'e')))
#Plot and save html
plotly.offline.plot({"data": [fig1],
"layout": mylayout},
image = 'png',
image_filename = 'tune_analyze_PI.png',
auto_open=True,
filename=("PI Scores Plot " + tune_run + ".html"))
| 36.240741 | 120 | 0.468574 | import sys
import os
import json
import glob
import pandas as pd
import plotly
import plotly.graph_objs as go
if len(sys.argv) != 2:
print("Usage: python tune_plot.py <result_dir>")
print("Example: python tune_pot.py ~/ray_results/objective_mean_2021-04-08_00-07-44/")
result_dir = sys.argv[1]
tune_run = os.path.basename(os.path.normpath(result_dir))
results = glob.glob(os.path.join(result_dir, "*", "result.json"))
score = []
kp = []
ki = []
kd = []
alpha = []
fullPID = False
for results_file in results:
print(results_file)
with open(results_file) as f:
try:
d = json.load(f)
except:
continue
score.append(d['score'])
kp.append(d['config']['kp'])
ki.append(d['config']['ki'])
if 'kd' in d['config']:
kd.append(d['config']['kd'])
fullPID = True
alpha.append(d['config']['alpha'])
# 5D plot
if fullPID:
#Set marker properties
markersize = [x * 20 for x in alpha]
markercolor = score
#Make Plotly figure
fig1 = go.Scatter3d(x=kp,
y=ki,
z=kd,
marker=dict(size=markersize,
color=markercolor,
opacity=0.5,
line=dict(width=2,
color='DarkSlateGrey'),
reversescale=False,
colorscale='blues'),
line=dict (width=0.02),
mode='markers')
#Make Plot.ly Layout
kp_range = [min(kp), max(kp)]
ki_range = [min(ki), max(kd)]
kd_range = [min(ki), max(kd)]
#ki_range = [0, 6e-6]
#kd_range = [0, 6e-6]
mylayout = go.Layout(scene=dict(xaxis=dict(title="kp", range=kp_range, showexponent = 'all', exponentformat = 'e'),
yaxis=dict(title="ki", range=ki_range, showexponent = 'all', exponentformat = 'e'),
zaxis=dict(title="kd", range=kd_range, showexponent = 'all', exponentformat = 'e')))
#Plot and save html
plotly.offline.plot({"data": [fig1],
"layout": mylayout},
image = 'png',
image_filename = 'tune_analyze_PID.png',
auto_open=True,
filename=("PID Scores Plot " + tune_run + ".html"))
else:
#Set marker properties
#markersize = [x * 20 for x in alpha]
markersize = [10 for x in alpha]
markercolor = score
#Make Plotly figure
fig1 = go.Scatter3d(x=kp,
y=ki,
z=alpha,
marker=dict(size=markersize,
color=markercolor,
opacity=0.5,
line=dict(width=2,
color='DarkSlateGrey'),
reversescale=False,
colorscale='blues'),
line=dict (width=0.02),
mode='markers')
#Make Plot.ly Layout
mylayout = go.Layout(scene=dict(xaxis=dict(title="kp", showexponent = 'all', exponentformat = 'e'),
yaxis=dict(title="ki",showexponent = 'all', exponentformat = 'e'),
zaxis=dict(title="alpha", showexponent = 'all', exponentformat = 'e')))
#Plot and save html
plotly.offline.plot({"data": [fig1],
"layout": mylayout},
image = 'png',
image_filename = 'tune_analyze_PI.png',
auto_open=True,
filename=("PI Scores Plot " + tune_run + ".html"))
| 0 | 0 | 0 |
017188de41467f7e7a397c25e24bc966ad998367 | 3,052 | py | Python | Pytorch/softmaxMnist.py | AutuanLiu/Machine-Learning-on-docker | 00eb7211a3a40a9da02114923647dfd6ac24f138 | [
"Apache-2.0"
] | 11 | 2018-03-18T11:06:59.000Z | 2020-02-23T03:24:43.000Z | Pytorch/softmaxMnist.py | AutuanLiu/Machine-Learning-on-docker | 00eb7211a3a40a9da02114923647dfd6ac24f138 | [
"Apache-2.0"
] | null | null | null | Pytorch/softmaxMnist.py | AutuanLiu/Machine-Learning-on-docker | 00eb7211a3a40a9da02114923647dfd6ac24f138 | [
"Apache-2.0"
] | 4 | 2018-03-28T13:04:26.000Z | 2019-05-29T05:49:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:softmaxMnist
Description : mnist data sets, softmax model
pytorch 不需要进行 one-hot 编码, 使用类别即可
Email : autuanliu@163.com
Date:18-1-16
"""
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.nn import Module, functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
# 网络模型定义
if __name__ == '__main__':
# some config
config = {'batch_size': 64, 'epoch_num': 100, 'lr': 0.001, 'in_feature': 28 * 28, 'out_feature': 10}
train_loader, test_loader = get_data(), get_data(flag=False)
# 模型实例与损失函数, 优化函数
model = Network()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=config['lr'], momentum=0.9)
# 训练与测试
for epoch in range(config['epoch_num']):
train_m(model, train_loader)
test_m(model, test_loader)
| 33.538462 | 130 | 0.6173 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:softmaxMnist
Description : mnist data sets, softmax model
pytorch 不需要进行 one-hot 编码, 使用类别即可
Email : autuanliu@163.com
Date:18-1-16
"""
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.nn import Module, functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
def get_data(flag=True):
mnist = MNIST('../datasets/mnist/', train=flag, transform=transforms.ToTensor(), download=flag)
loader = DataLoader(mnist, batch_size=config['batch_size'], shuffle=flag, drop_last=False)
return loader
# 网络模型定义
class Network(Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(config['in_feature'], 500)
self.l2 = nn.Linear(500, 350)
self.l3 = nn.Linear(350, 200)
self.l4 = nn.Linear(200, 130)
self.l5 = nn.Linear(130, config['out_feature'])
def forward(self, x):
data = x.view(-1, config['in_feature'])
y = F.relu(self.l1(data))
y = F.relu(self.l2(y))
y = F.relu(self.l3(y))
y = F.relu(self.l4(y))
return self.l5(y)
def train_m(mod, data_loader):
mod.train()
for batch_idx, (data, target) in enumerate(data_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = mod.forward(data)
loss = criterion.forward(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
len1 = batch_idx * len(data)
len2 = len(data_loader.dataset)
pec = 100. * batch_idx / len(data_loader)
print(f"Train Epoch: {epoch + 1} [{len1:5d}/{len2:5d} ({pec:3.2f}%)] \t Loss: {loss.data[0]:.5f}")
def test_m(mod, data_loader):
mod.eval()
test_loss, correct = 0, 0
for data, target in data_loader:
data, target = Variable(data, volatile=True), Variable(target)
output = mod(data)
# sum up batch loss
test_loss += criterion(output, target).data[0]
# get the index of the max
_, pred = output.data.max(1, keepdim=True)
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(data_loader.dataset)
len1 = len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len1, 100. * correct / len1))
if __name__ == '__main__':
# some config
config = {'batch_size': 64, 'epoch_num': 100, 'lr': 0.001, 'in_feature': 28 * 28, 'out_feature': 10}
train_loader, test_loader = get_data(), get_data(flag=False)
# 模型实例与损失函数, 优化函数
model = Network()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=config['lr'], momentum=0.9)
# 训练与测试
for epoch in range(config['epoch_num']):
train_m(model, train_loader)
test_m(model, test_loader)
| 1,887 | 1 | 144 |
ec134e9600b59b28012e9fd74a0fe7281e79c18f | 477 | py | Python | 01-basics/04-singlebyte-xor-detect.py | DavidBuchanan314/cryptopals-python3 | fe53fb1d324639193451e11d2d93cb251bce3021 | [
"MIT"
] | null | null | null | 01-basics/04-singlebyte-xor-detect.py | DavidBuchanan314/cryptopals-python3 | fe53fb1d324639193451e11d2d93cb251bce3021 | [
"MIT"
] | null | null | null | 01-basics/04-singlebyte-xor-detect.py | DavidBuchanan314/cryptopals-python3 | fe53fb1d324639193451e11d2d93cb251bce3021 | [
"MIT"
] | null | null | null | from sys import path; path += [".", ".."] # hacky...
from utils import *
if __name__ == "__main__":
ciphertexts = map(dehex, load_data("4.txt").split("\n"))
keyspace = list(range(0x100))
plaintexts = reduce(op.add, [
[xor(ct, [key]) for key in keyspace]
for ct in ciphertexts
])
best_plaintext = min(plaintexts, key=englishness) # I like this code
message = best_plaintext.decode()
assert(message == "Now that the party is jumping\n")
print(message.strip())
| 26.5 | 69 | 0.666667 | from sys import path; path += [".", ".."] # hacky...
from utils import *
if __name__ == "__main__":
ciphertexts = map(dehex, load_data("4.txt").split("\n"))
keyspace = list(range(0x100))
plaintexts = reduce(op.add, [
[xor(ct, [key]) for key in keyspace]
for ct in ciphertexts
])
best_plaintext = min(plaintexts, key=englishness) # I like this code
message = best_plaintext.decode()
assert(message == "Now that the party is jumping\n")
print(message.strip())
| 0 | 0 | 0 |
2f7f32685db80a3f2be2e3f54150ebbe5f582daf | 3,211 | py | Python | packages/sdk/tests/local/test_local_packaging.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | 12 | 2020-10-13T15:39:52.000Z | 2021-10-11T17:13:42.000Z | packages/sdk/tests/local/test_local_packaging.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | 475 | 2019-11-18T12:40:47.000Z | 2022-03-29T21:17:38.000Z | packages/sdk/tests/local/test_local_packaging.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-02-25T11:26:10.000Z | 2021-03-10T12:01:00.000Z | # Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import docker
import pytest
from pytest_mock import MockFixture
from odahuflow.sdk.local import packaging
from odahuflow.sdk.local.packaging import start_package
from odahuflow.sdk.models import K8sPackager, ModelPackaging, ModelPackagingSpec, PackagingIntegration, \
PackagingIntegrationSpec
# Format: ['artifact_name', 'artifact_path',
# 'expected_artifact_name', expected_artifact_path]
test_data = [
(
'wine-1.0', '/odahu/training',
'wine-1.0', '/odahu/training'
),
(
'wine-1.0.zip', '/odahu/training',
'wine-1.0', '/odahu/training'
),
(
'wine-1.0.zip.zip', None,
'wine-1.0.zip', '/odahu/default_output'
)
]
DEFAULT_OUTPUT_DIR = '/odahu/default_output'
@pytest.mark.parametrize(['artifact_name', 'artifact_path',
'expected_artifact_name', 'expected_artifact_path'],
test_data)
| 40.1375 | 113 | 0.730925 | # Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import docker
import pytest
from pytest_mock import MockFixture
from odahuflow.sdk.local import packaging
from odahuflow.sdk.local.packaging import start_package
from odahuflow.sdk.models import K8sPackager, ModelPackaging, ModelPackagingSpec, PackagingIntegration, \
PackagingIntegrationSpec
# Format: ['artifact_name', 'artifact_path',
# 'expected_artifact_name', expected_artifact_path]
test_data = [
(
'wine-1.0', '/odahu/training',
'wine-1.0', '/odahu/training'
),
(
'wine-1.0.zip', '/odahu/training',
'wine-1.0', '/odahu/training'
),
(
'wine-1.0.zip.zip', None,
'wine-1.0.zip', '/odahu/default_output'
)
]
DEFAULT_OUTPUT_DIR = '/odahu/default_output'
@pytest.mark.parametrize(['artifact_name', 'artifact_path',
'expected_artifact_name', 'expected_artifact_path'],
test_data)
def test_start_package__artifact_name_artifact_path(artifact_name, artifact_path,
expected_artifact_name, expected_artifact_path,
mocker: MockFixture):
packager = K8sPackager(
model_packaging=ModelPackaging(spec=ModelPackagingSpec(artifact_name=artifact_name)),
# mocking packaging_integration default_image
packaging_integration=PackagingIntegration(spec=PackagingIntegrationSpec(default_image='default_image')))
create_mp_config_file_mock = mocker.patch.object(packaging, 'create_mp_config_file')
config_mock = mocker.patch.object(packaging, 'config')
mocker.patch.object(docker, 'from_env')
mocker.patch.object(json, 'dumps')
mocker.patch.object(packaging, 'stream_container_logs')
mocker.patch.object(packaging, 'raise_error_if_container_failed')
read_mp_result_file_mock = mocker.patch.object(packaging, 'read_mp_result_file')
config_mock.LOCAL_MODEL_OUTPUT_DIR = DEFAULT_OUTPUT_DIR
start_package(packager, artifact_path)
expected_packager = K8sPackager(
model_packaging=ModelPackaging(spec=ModelPackagingSpec(artifact_name=expected_artifact_name)),
# mocking packaging_integration default_image
packaging_integration=PackagingIntegration(spec=PackagingIntegrationSpec(default_image='default_image')))
expected_full_artifact_path = os.path.join(expected_artifact_path, expected_artifact_name)
create_mp_config_file_mock.assert_called_with(expected_full_artifact_path, expected_packager)
read_mp_result_file_mock.assert_called_with(expected_full_artifact_path)
| 1,659 | 0 | 22 |
7b37bde3bf4c1a3743a3ed4fb7e54c5990ae1044 | 9,597 | py | Python | properties/migrations/0007_auto_20200629_2225.py | Zayanto/Protocol-CRM | c81489d69de581d8216e20f7dd80089116f85c7b | [
"MIT"
] | null | null | null | properties/migrations/0007_auto_20200629_2225.py | Zayanto/Protocol-CRM | c81489d69de581d8216e20f7dd80089116f85c7b | [
"MIT"
] | null | null | null | properties/migrations/0007_auto_20200629_2225.py | Zayanto/Protocol-CRM | c81489d69de581d8216e20f7dd80089116f85c7b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-29 22:25
from django.db import migrations, models
| 40.838298 | 263 | 0.550068 | # Generated by Django 3.0.7 on 2020-06-29 22:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('properties', '0006_remove_property_location'),
]
operations = [
migrations.CreateModel(
name='StageBuying',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agent_costs', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('notary_costs', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('legal_costs', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('accountant_costs', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('other_costs', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('buy_price', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('description', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='StageForRent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expected_rent', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='StageOpportunity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.CharField(max_length=200, null=True)),
('asking_price', models.CharField(max_length=200, null=True)),
('city', models.CharField(max_length=200, null=True)),
('residence_complex', models.CharField(max_length=200, null=True)),
('address', models.CharField(max_length=200, null=True)),
('zipcode', models.CharField(max_length=200, null=True)),
('building', models.CharField(max_length=200, null=True)),
('entrance', models.CharField(max_length=200, null=True)),
('floor', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('apartament_number', models.CharField(max_length=200, null=True)),
('reper', models.CharField(max_length=200, null=True)),
('vecinatati', models.CharField(max_length=200, null=True)),
('usable_sqm', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('build_sqm', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('destination', models.CharField(choices=[('birouri', 'Birouri'), ('rezidentaial', 'Rezidential'), ('comercial', 'Comercial')], default='rezidentaial', max_length=20, verbose_name='Destination')),
('layout', models.CharField(choices=[('decomandat', 'Decomandat'), ('semidecomandat', 'Semidecomandat'), ('nedecomandat', 'Nedecomandat'), ('circular', 'Circular'), ('vagon', 'Vagon')], default='decomandat', max_length=20, verbose_name='Layout')),
('comfort_type', models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3'), ('lux', 'Lux')], default='1', max_length=20, verbose_name='Comfort Type')),
('interior_state', models.CharField(choices=[('other', 'Other'), ('necesita-renovare', 'Necesita-Renovare'), ('renovat', 'Renovat'), ('nou', 'Nou'), ('caramida', 'Caramida')], default='other', max_length=20, verbose_name='Interior State')),
('number_of_rooms', models.IntegerField(null=True)),
('bedrooms', models.IntegerField(null=True)),
('kitchen', models.IntegerField(null=True)),
('bathrooms', models.IntegerField(null=True)),
('balcony', models.BooleanField(default=False)),
('garage', models.BooleanField(default=False)),
('building_type', models.CharField(choices=[('beton', 'Beton'), ('caramida', 'Caramida'), ('lemn', 'Lemn'), ('bca', 'Bca'), ('metal', 'Metal'), ('other', 'Other')], default='beton', max_length=20, verbose_name='Building Type')),
('building_construction_date', models.DateTimeField(blank=True, null=True)),
('basement', models.BooleanField(default=True)),
('potential_rent', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('description', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='StageRenovation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('investor', models.CharField(max_length=200, null=True)),
('renovation_budget', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('date_receiving_money', models.DateTimeField(blank=True)),
('date_receiving_key', models.DateTimeField(blank=True)),
('description', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='StageWithTenant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('actual_rent', models.DecimalField(decimal_places=1, max_digits=10, null=True)),
('description', models.TextField(blank=True)),
],
),
migrations.RemoveField(
model_name='property',
name='address',
),
migrations.RemoveField(
model_name='property',
name='apartament',
),
migrations.RemoveField(
model_name='property',
name='balcony',
),
migrations.RemoveField(
model_name='property',
name='basement',
),
migrations.RemoveField(
model_name='property',
name='bathrooms',
),
migrations.RemoveField(
model_name='property',
name='bedrooms',
),
migrations.RemoveField(
model_name='property',
name='build_sqm',
),
migrations.RemoveField(
model_name='property',
name='building',
),
migrations.RemoveField(
model_name='property',
name='building_age',
),
migrations.RemoveField(
model_name='property',
name='building_type',
),
migrations.RemoveField(
model_name='property',
name='buy_price',
),
migrations.RemoveField(
model_name='property',
name='city',
),
migrations.RemoveField(
model_name='property',
name='comfort_type',
),
migrations.RemoveField(
model_name='property',
name='construction_date',
),
migrations.RemoveField(
model_name='property',
name='construction_type',
),
migrations.RemoveField(
model_name='property',
name='destination',
),
migrations.RemoveField(
model_name='property',
name='entrance',
),
migrations.RemoveField(
model_name='property',
name='floor',
),
migrations.RemoveField(
model_name='property',
name='garage',
),
migrations.RemoveField(
model_name='property',
name='interior_state',
),
migrations.RemoveField(
model_name='property',
name='is_published',
),
migrations.RemoveField(
model_name='property',
name='kitchen',
),
migrations.RemoveField(
model_name='property',
name='layout',
),
migrations.RemoveField(
model_name='property',
name='lot_size',
),
migrations.RemoveField(
model_name='property',
name='notes',
),
migrations.RemoveField(
model_name='property',
name='rent',
),
migrations.RemoveField(
model_name='property',
name='reper',
),
migrations.RemoveField(
model_name='property',
name='residence_complex',
),
migrations.RemoveField(
model_name='property',
name='rooms',
),
migrations.RemoveField(
model_name='property',
name='sell_price',
),
migrations.RemoveField(
model_name='property',
name='state',
),
migrations.RemoveField(
model_name='property',
name='street_number',
),
migrations.RemoveField(
model_name='property',
name='title',
),
migrations.RemoveField(
model_name='property',
name='usable_sqm',
),
migrations.RemoveField(
model_name='property',
name='vecinatati',
),
migrations.RemoveField(
model_name='property',
name='zipcode',
),
migrations.DeleteModel(
name='Comment',
),
]
| 0 | 9,483 | 23 |
228a1835d7d8c0c4561e9b598ae0cb4c389c7e67 | 2,749 | py | Python | ros/src/twist_controller/twist_controller.py | Benson516/CarND-Capstone | 6f54bb59e81ce69f1ad1c011ecb73509b8f04c61 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | Benson516/CarND-Capstone | 6f54bb59e81ce69f1ad1c011ecb73509b8f04c61 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | Benson516/CarND-Capstone | 6f54bb59e81ce69f1ad1c011ecb73509b8f04c61 | [
"MIT"
] | null | null | null | import rospy
from yaw_controller import YawController
from lowpass import LowPassFilter
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
| 31.965116 | 89 | 0.548927 | import rospy
from yaw_controller import YawController
from lowpass import LowPassFilter
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, param_dict):
# TODO: Implement
self.yaw_controller = YawController(param_dict["wheel_base"],
param_dict["steer_ratio"],
0.1,
param_dict["max_lat_accel"],
param_dict["max_steer_angle"])
kp = 0.3
ki = 0.1
kd = 0.0
mn = 0.0 # Minimum throttle value
mx = 0.5 # 0.2 # Maximum throttle value
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5 # 1/(2pi*tau) = cutoff frequency
ts = 0.02 # Sample time
self.vel_lpf = LowPassFilter(tau, ts)
# Parameters
#------------------------------#
self.vehicle_mass = param_dict["vehicle_mass"]
self.fuel_capacity = param_dict["fuel_capacity"]
self.brake_deadband = param_dict["brake_deadband"]
self.decel_limit = param_dict["decel_limit"]
self.accel_limit = param_dict["accel_limit"]
self.wheel_radius = param_dict["wheel_radius"]
#------------------------------#
# Variables
#------------------------------#
self.last_time = rospy.get_time()
self.last_vel = 0.0
#------------------------------#
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
# return 1., 0., 0.
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
#
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0.0
if linear_vel == 0.0 and current_vel < 0.1:
throttle = 0.0
brake = 700 # N-m
elif throttle < 0.1 and vel_error < 0.0: # need to decelerate
throttle = 0.0
ideal_braking_time = 1.0 # sec.
decel = max(vel_error*ideal_braking_time, self.decel_limit)
brake = abs(decel) * self.vehicle_mass * self.wheel_radius # Torque N-m
return throttle, brake, steering
| 2,517 | 4 | 76 |
a3ae640f8a79b4b5a07050634a3c386363a3bf3c | 1,011 | py | Python | tests/unit/test_api.py | staticdev/github3.py | b9af598dcf1771c083dcc512a2aa8e5008bf4ea8 | [
"MIT"
] | null | null | null | tests/unit/test_api.py | staticdev/github3.py | b9af598dcf1771c083dcc512a2aa8e5008bf4ea8 | [
"MIT"
] | 32 | 2021-02-17T19:46:21.000Z | 2021-05-12T05:56:03.000Z | tests/unit/test_api.py | staticdev/github3.py | b9af598dcf1771c083dcc512a2aa8e5008bf4ea8 | [
"MIT"
] | null | null | null | """Unit tests for github4.api."""
import unittest.mock
import github4
class TestAPI(unittest.TestCase):
"""All tests for the github4.api module."""
def test_enterprise_login(self):
"""Show that github4.enterprise_login returns GitHubEnterprise."""
args = ("login", "password", None, "https://url.com/", None)
with unittest.mock.patch.object(github4.GitHubEnterprise, "login") as login:
g = github4.enterprise_login(*args)
assert isinstance(g, github4.GitHubEnterprise)
login.assert_called_once_with("login", "password", None, None)
def test_login(self):
"""Show that github4.login proxies to GitHub."""
args = ("login", "password", None, None)
with unittest.mock.patch.object(github4.GitHub, "login") as login:
g = github4.login(*args)
assert isinstance(g, github4.GitHub)
assert not isinstance(g, github4.GitHubEnterprise)
login.assert_called_once_with(*args)
| 37.444444 | 84 | 0.65183 | """Unit tests for github4.api."""
import unittest.mock
import github4
class TestAPI(unittest.TestCase):
"""All tests for the github4.api module."""
def test_enterprise_login(self):
"""Show that github4.enterprise_login returns GitHubEnterprise."""
args = ("login", "password", None, "https://url.com/", None)
with unittest.mock.patch.object(github4.GitHubEnterprise, "login") as login:
g = github4.enterprise_login(*args)
assert isinstance(g, github4.GitHubEnterprise)
login.assert_called_once_with("login", "password", None, None)
def test_login(self):
"""Show that github4.login proxies to GitHub."""
args = ("login", "password", None, None)
with unittest.mock.patch.object(github4.GitHub, "login") as login:
g = github4.login(*args)
assert isinstance(g, github4.GitHub)
assert not isinstance(g, github4.GitHubEnterprise)
login.assert_called_once_with(*args)
| 0 | 0 | 0 |
fa64022251c523bf266eb1aed5874bafa1bed4a3 | 4,880 | py | Python | tests/test_api.py | filepreviews/filepreviews-python | eb50d527a8f1a2942f1be2c2bc057b5da4879ecd | [
"MIT"
] | 4 | 2017-01-24T17:03:34.000Z | 2021-09-05T15:08:27.000Z | tests/test_api.py | filepreviews/filepreviews-python | eb50d527a8f1a2942f1be2c2bc057b5da4879ecd | [
"MIT"
] | 1 | 2021-05-15T22:10:20.000Z | 2021-05-15T22:10:20.000Z | tests/test_api.py | filepreviews/filepreviews-python | eb50d527a8f1a2942f1be2c2bc057b5da4879ecd | [
"MIT"
] | 2 | 2017-02-14T08:02:55.000Z | 2020-12-05T13:17:25.000Z | import json
import pytest
import responses
from filepreviews import API_URL, FilePreviews, exceptions
file_previews = FilePreviews(api_key="DUMMY_API_KEY", api_secret="DUMMY_SECRET_KEY")
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
| 27.727273 | 88 | 0.545697 | import json
import pytest
import responses
from filepreviews import API_URL, FilePreviews, exceptions
file_previews = FilePreviews(api_key="DUMMY_API_KEY", api_secret="DUMMY_SECRET_KEY")
@responses.activate
def test_api_generate():
def request_callback(request):
body = {
"id": "1",
"status": "pending",
"preview": None,
"thumbnails": None,
"original_file": None,
"user_data": None,
"url": "http://example.com/v2/previews/1/",
}
headers = {"content-type": "application/json", "location": body["url"]}
return (201, headers, json.dumps(body))
responses.add_callback(
responses.POST,
API_URL + "/previews/",
callback=request_callback,
content_type="application/json",
)
result = file_previews.generate("http://example.com/file.jpg")
assert result.status == "pending"
assert result.url == "http://example.com/v2/previews/1/"
@responses.activate
def test_api_retrieve():
def request_callback(request):
body = {
"status": "success",
"thumbnails": [
{
"url": "http://example.com/user_manual_original_1.png",
"requested_size": "original",
"resized": False,
"original_size": {"width": "612", "height": "792"},
"page": 1,
"size": {"width": "612", "height": "792"},
}
],
"url": "http://example.com/v2/previews/123/",
"id": "123",
"preview": {
"url": "http://example.com/user_manual_original_1.png",
"requested_size": "original",
"resized": False,
"original_size": {"width": "612", "height": "792"},
"page": 1,
"size": {"width": "612", "height": "792"},
},
"user_data": None,
"original_file": {
"mimetype": "application/pdf",
"name": "user_manual",
"extension": "pdf",
"encoding": "binary",
"total_pages": 1,
"metadata": {},
"type": "application",
"size": 416905,
},
}
headers = {"content-type": "application/json", "location": body["url"]}
return (201, headers, json.dumps(body))
responses.add_callback(
responses.GET,
API_URL + "/previews/123/",
callback=request_callback,
content_type="application/json",
)
result = file_previews.retrieve("123")
assert result.status == "success"
assert result.url == "http://example.com/v2/previews/123/"
@responses.activate
def test_api_error():
def request_callback(request):
body = "Server Error"
headers = {}
return (500, headers, body)
responses.add_callback(
responses.POST,
API_URL + "/previews/",
callback=request_callback,
content_type="application/json",
)
with pytest.raises(exceptions.APIError) as exc:
file_previews.generate("http://example.com/file.jpg")
assert str(exc.value) == (
"Invalid response object from API: " "Server Error (HTTP response code was 500)"
)
@responses.activate
def test_invalid_request_error():
def request_callback(request):
body = {
"error": {
"message": "This field may not be blank.",
"type": "invalid_request_error",
"param": "url",
}
}
headers = {}
return (400, headers, json.dumps(body))
responses.add_callback(
responses.POST,
API_URL + "/previews/",
callback=request_callback,
content_type="application/json",
)
with pytest.raises(exceptions.InvalidRequestError) as exc:
file_previews.generate("")
assert str(exc.value) == "This field may not be blank."
assert exc.value.param == "url"
@responses.activate
def test_authentication_error():
def request_callback(request):
body = {
"error": {
"message": "Invalid API Key provided.",
"type": "invalid_request_error",
}
}
headers = {}
return (401, headers, json.dumps(body))
responses.add_callback(
responses.POST,
API_URL + "/previews/",
callback=request_callback,
content_type="application/json",
)
file_previews = FilePreviews(api_key="WRONG_API_KEY", api_secret="WRONG_SECRET_KEY")
with pytest.raises(exceptions.AuthenticationError) as exc:
file_previews.generate("http://example.com/file.jpg")
assert str(exc.value) == "Invalid API Key provided."
| 4,470 | 0 | 110 |
10f9f5f2bc5a39c36c0807c9975bf5bc17848bc0 | 23,845 | py | Python | ai.py | mandaw2014/Rally | cafc12aff75bf1a158753d08ae36eb4056dcb1e0 | [
"MIT"
] | 1 | 2022-03-28T01:18:31.000Z | 2022-03-28T01:18:31.000Z | ai.py | mandaw2014/Rally | cafc12aff75bf1a158753d08ae36eb4056dcb1e0 | [
"MIT"
] | null | null | null | ai.py | mandaw2014/Rally | cafc12aff75bf1a158753d08ae36eb4056dcb1e0 | [
"MIT"
] | 1 | 2022-03-17T22:26:20.000Z | 2022-03-17T22:26:20.000Z | from ursina import *
from ursina import curve
from particles import ParticleSystem
sign = lambda x: -1 if x < 0 else (1 if x > 0 else 0) | 71.392216 | 1,156 | 0.625708 | from ursina import *
from ursina import curve
from particles import ParticleSystem
sign = lambda x: -1 if x < 0 else (1 if x > 0 else 0)
class AICar(Entity):
def __init__(self, car, sand_track, grass_track, snow_track, plains_track):
super().__init__(
model = "car.obj",
position = (0, 0, 0),
rotation = (0, 0, 0),
collider = "box",
scale = (1, 1, 1)
)
self.car = car
self.set_random_texture()
self.speed = 0
self.velocity_y = 0
self.rotation_speed = 0
self.max_rotation_speed = 2.6
self.topspeed = 30
self.acceleration = 0.35
self.friction = 0.6
self.drift_speed = 35
self.pivot_rotation_distance = 1
self.pivot = Entity()
self.pivot.position = self.position
self.pivot.rotation = self.rotation
self.number_of_particles = 0.05
self.particle_pivot = Entity()
self.particle_pivot.parent = self
self.particle_pivot.position = self.position - (0, 1, 2)
self.sand_track = sand_track
self.grass_track = grass_track
self.snow_track = snow_track
self.plains_track = plains_track
self.ai_list = None
self.set_enabled = True
self.old_pos = round(self.position)
self.slope = 100
# Sand Track Points
self.sap1 = PathObject((-41, -50, -7))
self.sap2 = PathObject((-26, -50, -25), (0, 90, 0))
self.sap3 = PathObject((-26, -50, -42), (0, 90, 0))
self.sap4 = PathObject((-48, -47, -55))
self.sap5 = PathObject((-100, -50, -61))
self.sap6 = PathObject((-128, -50, -95), (0, 90, 0))
self.sap7 = PathObject((-105, -50, -105))
self.sap8 = PathObject((-91, -50, -105))
self.sap9 = PathObject((-80, -46, -86), (0, 90, 0))
self.sap10 = PathObject((-75, -50, -34), (0, 90, 0))
self.sap11 = PathObject((-54, -50, -15))
# Grass Track Points
self.gp1 = PathObject((-47, -41, 15), (0, 90, 0))
self.gp2 = PathObject((12, -42, 14), (0, 90, 0))
self.gp3 = PathObject((48, -42, 34), (0, 0, 0))
self.gp4 = PathObject((37, -42, 68), (0, -90, 0))
self.gp5 = PathObject((10, -42, 60), (0, -180, 0))
self.gp6 = PathObject((-2, -42, -10), (0, -180, 0))
self.gp7 = PathObject((3, -42, -40), (0, -180, 0))
self.gp8 = PathObject((-13, -42, -63), (0, -90, 0))
self.gp9 = PathObject((-38, -42, -67), (0, -90, 0))
self.gp10 = PathObject((-94, -39, -57), (0, -90, 0))
self.gp11 = PathObject((-105, -42, -26), (0, -180, 0))
self.gp12 = PathObject((-106, -42, -2), (0, -180, 0))
self.gp13 = PathObject((-90, -42, 15), (0, 90, 0))
# Snow Track Points
self.snp1 = PathObject((32, -44, 94))
self.snp2 = PathObject((48, -44, 78), (0, 90, 0))
self.snp3 = PathObject((53, -44, 65), (0, 90, 0))
self.snp4 = PathObject((39, -44, 42))
self.snp5 = PathObject((-37, -44, 42))
self.snp6 = PathObject((-73, -43, 35), (0, 90, 0))
self.snp7 = PathObject((-76, -42, 2), (0, 90, 0))
self.snp8 = PathObject((-67, -44, -8))
self.snp9 = PathObject((47, -44, -8))
self.snp10 = PathObject((65, -42, -27), (0, 90, 0))
self.snp11 = PathObject((52, -43, -46))
self.snp12 = PathObject((5, -44, -51))
self.snp13 = PathObject((-25, -44, -39), (0, 90, 0))
self.snp14 = PathObject((-22, -44, 50), (0, 90, 0))
self.snp15 = PathObject((-21, -44, 106), (0, 90, 0))
self.snp16 = PathObject((-47, -41, 126))
self.snp17 = PathObject((-70, -44, 100), (0, 90, 0))
self.snp18 = PathObject((-55, -44, 85))
self.snp19 = PathObject((-14, -44, 94))
# Plains Track Points
self.plp1 = PathObject((57, -51, 76))
self.plp2 = PathObject((82, -51, 63), (0, 90, 0))
self.plp3 = PathObject((80, -51, 52), (0, 90, 0))
self.plp4 = PathObject((57, -51, 36))
self.plp5 = PathObject((-29, -51, 36))
self.plp6 = PathObject((-62, -51, 16), (0, 90, 0))
self.plp7 = PathObject((-42, -51, -11))
self.plp8 = PathObject((4, -51, -11))
self.plp9 = PathObject((41, -51, -25), (0, 90, 0))
self.plp10 = PathObject((41, -51, -46), (0, 90, 0))
self.plp11 = PathObject((25, -51, -66))
self.plp12 = PathObject((7, -51, -67))
self.plp13 = PathObject((-17, -51, -53), (0, 90, 0))
self.plp14 = PathObject((-18, -51, -6), (0, 90, 0))
self.plp15 = PathObject((-18, -46, 24), (0, 90, 0))
self.plp16 = PathObject((-3, -51, 75))
self.sand_path = [self.sap1, self.sap2, self.sap3, self.sap4, self.sap5, self.sap6, self.sap7, self.sap8, self.sap9, self.sap10, self.sap11]
self.grass_path = [self.gp1, self.gp2, self.gp3, self.gp4, self.gp5, self.gp6, self.gp7, self.gp8, self.gp9, self.gp10, self.gp11, self.gp12, self.gp13]
self.snow_path = [self.snp1, self.snp2, self.snp3, self.snp4, self.snp5, self.snp6, self.snp7, self.snp8, self.snp9, self.snp10, self.snp11, self.snp12, self.snp13, self.snp14, self.snp15, self.snp16, self.snp17, self.snp18, self.snp19]
self.plains_path = [self.plp1, self.plp2, self.plp3, self.plp4, self.plp5, self.plp6, self.plp7, self.plp8, self.plp9, self.plp10, self.plp11, self.plp12, self.plp13, self.plp14, self.plp15, self.plp16]
self.next_path = self.gp1
self.difficulty = 70
invoke(self.same_pos, delay = 5)
def set_random_texture(self):
i = random.randint(0, 5)
if i == 0:
if self.car.texture != "car-red.png":
self.texture = "car-red.png"
elif i == 1:
if self.car.texture != "car-blue.png":
self.texture = "car-blue.png"
elif i == 2:
if self.car.texture != "car-orange.png":
self.texture = "car-orange.png"
elif i == 3:
if self.car.texture != "car-green.png":
self.texture = "car-green.png"
elif i == 4:
if self.car.texture != "car-white.png":
self.texture = "car-white.png"
elif i == 5:
if self.car.texture != "car-black.png":
self.texture = "car-black.png"
def same_pos(self):
if self.enabled:
distance = sqrt((self.position[0] - self.old_pos[0]) ** 2 + (self.position[1] - self.old_pos[1]) ** 2 + (self.position[2] - self.old_pos[2]) ** 2)
if distance <= 2:
self.x += random.randint(-10, 10) * time.dt
self.y += 40 * time.dt
self.z += random.randint(-10, 10) * time.dt
self.old_pos = round(self.position)
invoke(self.same_pos, delay = 2)
def update(self):
if self.enabled:
self.pivot.position = self.position
if self.pivot.rotation_y != self.rotation_y:
if self.pivot.rotation_y > self.rotation_y:
self.pivot.rotation_y -= (self.drift_speed * ((self.pivot.rotation_y - self.rotation_y) / 40)) * time.dt
self.speed += self.pivot_rotation_distance / 4.5 * time.dt
if self.pivot.rotation_y < self.rotation_y:
self.pivot.rotation_y += (self.drift_speed * ((self.rotation_y - self.pivot.rotation_y) / 40)) * time.dt
self.speed -= self.pivot_rotation_distance / 4.5 * time.dt
if self.pivot.rotation_y - self.rotation_y < -20 or self.pivot.rotation_y - self.rotation_y > 20:
self.number_of_particles += 1 * time.dt
else:
self.number_of_particles -= 2 * time.dt
self.pivot_rotation_distance = (self.rotation_y - self.pivot.rotation_y)
if self.sand_track.enabled or self.sand_track.enabled:
self.difficulty = 70
elif self.snow_track.enabled or self.plains_track.enabled:
self.difficulty = 50
ground_check = raycast(origin = self.position, direction = self.down, distance = 2, ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ])
if ground_check.hit:
r = random.randint(0, 1)
if r == 0:
self.speed += self.acceleration * self.difficulty * time.dt
self.particles = ParticleSystem(position = self.particle_pivot.world_position, rotation_y = random.random() * 360, number_of_particles = self.number_of_particles)
if self.sand_track.enabled == True:
self.particles.texture = "particle_sand_track.png"
elif self.grass_track.enabled == True:
self.particles.texture = "particle_grass_track.png"
elif self.snow_track.enabled == True:
self.particles.texture = "particle_snow_track.png"
elif self.plains_track.enabled == True:
self.particles.texture = "particle_plains_track.png"
else:
self.particles.texture = "particle_sand_track.png"
self.particles.fade_out(duration = 0.2, delay = 1 - 0.2, curve = curve.linear)
invoke(self.particles.disable, delay = 1)
# Main AI bit
if self.sand_track.enabled:
self.look_at(self.next_path)
for p in self.sand_path:
if distance(p, self) < 12 and self.next_path == p:
self.next_path = self.sand_path[self.sand_path.index(p) - len(self.sand_path) + 1]
elif self.grass_track.enabled:
self.look_at(self.next_path)
for p in self.grass_path:
if distance(p, self) < 12 and self.next_path == p:
self.next_path = self.grass_path[self.grass_path.index(p) - len(self.grass_path) + 1]
elif self.snow_track.enabled:
self.look_at(self.next_path)
for p in self.snow_path:
if distance(p, self) < 12 and self.next_path == p:
self.next_path = self.snow_path[self.snow_path.index(p) - len(self.snow_path) + 1]
elif self.plains_track.enabled:
self.look_at(self.next_path)
for p in self.plains_path:
if distance(p, self) < 12 and self.next_path == p:
self.next_path = self.plains_path[self.plains_path.index(p) - len(self.plains_path) + 1]
else:
if self.speed != 0:
r = random.randint(0, 3)
if r == 1:
self.rotation_speed -= 20 * time.dt
self.drift_speed -= 10 * time.dt
elif r == 2:
self.rotation_speed += 20 * time.dt
self.drift_speed -= 10 * time.dt
else:
self.drift_speed += 0.01 * time.dt
if self.rotation_speed > 0:
self.rotation_speed -= 5 * time.dt
elif self.rotation_speed < 0:
self.rotation_speed += 5 * time.dt
if self.speed >= self.topspeed:
self.speed = self.topspeed
if self.speed <= 0.1:
self.speed = 0.1
self.pivot.rotation = self.rotation
if self.drift_speed <= 20:
self.drift_speed = 20
if self.drift_speed >= 40:
self.drift_speed = 40
if self.y <= -100:
if self.grass_track.enabled == True:
self.position = (-80 + random.randint(-5, 5), -30 + random.randint(-3, 5), 15 + random.randint(-5, 5))
self.rotation = (0, 90, 0)
elif self.sand_track.enabled == True:
self.position = (-63 + random.randint(-5, 5), -40 + random.randint(-3, 5), -7 + random.randint(-5, 5))
self.rotation = (0, 65, 0)
elif self.snow_track.enabled == True:
self.position = (-5 + random.randint(-5, 5), -35 + random.randint(-3, 5), 90 + random.randint(-5, 5))
self.rotation = (0, 90, 0)
elif self.plains_track.enabled == True:
self.position = (12 + random.randint(-5, 5), -40 + random.randint(-3, 5), 73 + random.randint(-5, 5))
self.rotation = (0, 90, 0)
else:
self.position = (0, 0, 0)
self.rotation = (0, 0, 0)
self.speed = 0
movementY = self.velocity_y * time.dt
direction = (0, sign(movementY), 0)
y_ray = boxcast(origin = self.world_position, direction = direction, distance = self.scale_y * 1.4 + abs(movementY), ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ])
if y_ray.hit:
self.jump_count = 0
self.velocity_y = 0
else:
self.y += movementY * 50 * time.dt
self.velocity_y -= 1
movementX = self.pivot.forward[0] * self.speed * time.dt
movementZ = self.pivot.forward[2] * self.speed * time.dt
if movementX != 0:
direction = (sign(movementX), 0, 0)
x_ray = boxcast(origin = self.world_position, direction = direction, distance = self.scale_x / 2 + abs(movementX), ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ], thickness = (1, 1))
if not x_ray.hit:
self.x += movementX
else:
top_x_ray = raycast(origin = self.world_position - (0, self.scale_y / 2 - 0.1, 0), direction = direction, distance = self.scale_x / 2, ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ])
if not top_x_ray.hit:
self.x += movementX
height_ray = raycast(origin = self.world_position + (sign(movementX) * self.scale_x / 2, -self.scale_y / 2, 0), direction = (0, 1, 0), ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ])
if height_ray.hit and y_ray.hit:
if height_ray.distance < self.slope * 10:
if height_ray.entity != self.ai_list[0] or height_ray.entity != self.ai_list[1] or height_ray.entity != self.ai_list[2]:
self.y += height_ray.distance
if movementZ != 0:
direction = (0, 0, sign(movementZ))
z_ray = boxcast(origin = self.world_position, direction = direction, distance = self.scale_z / 2 + abs(movementZ), ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ], thickness = (1, 1))
if not z_ray.hit:
self.z += movementZ
else:
top_z_ray = raycast(origin = self.world_position - (0, self.scale_y / 2 - 0.1, 0), direction = direction, distance = self.scale_z / 2, ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ])
if not top_z_ray.hit:
self.z += movementZ
height_ray = raycast(origin = self.world_position + (0, -self.scale_y / 2, sign(movementZ) * self.scale_z / 2), direction = (0, 1, 0), ignore = [self, self.sand_track.finish_line, self.sand_track.wall_trigger, self.grass_track.finish_line, self.grass_track.wall_trigger, self.grass_track.wall_trigger_ramp, self.snow_track.finish_line, self.snow_track.wall_trigger, self.snow_track.wall_trigger_end, self.plains_track.finish_line, self.plains_track.wall_trigger, self.sand_track.wall1, self.sand_track.wall2, self.sand_track.wall3, self.sand_track.wall4, self.grass_track.wall1, self.grass_track.wall2, self.grass_track.wall3, self.grass_track.wall4, self.snow_track.wall1, self.snow_track.wall2, self.snow_track.wall3, self.snow_track.wall4, self.snow_track.wall5, self.snow_track.wall6, self.snow_track.wall7, self.snow_track.wall8, self.snow_track.wall9, self.snow_track.wall10, self.snow_track.wall11, self.snow_track.wall12, self.plains_track.wall1, self.plains_track.wall2, self.plains_track.wall3, self.plains_track.wall4, self.plains_track.wall5, self.plains_track.wall6, self.plains_track.wall7, self.plains_track.wall8, ])
if height_ray.hit and y_ray.hit:
if height_ray.distance < self.slope * 10:
if height_ray.entity != self.ai_list[0] or height_ray.entity != self.ai_list[1] or height_ray.entity != self.ai_list[2]:
self.y += height_ray.distance
class PathObject(Entity):
def __init__(self, position = (0, 0, 0), rotation = (0, 0, 0)):
super().__init__(
model = "cube",
position = position,
rotation = rotation,
texture = "white_cube",
scale = (1, 20, 20),
visible = False,
alpha = 50,
) | 23,526 | 3 | 179 |
c2d338e18b66c01319cc8393b1f86d815804b2e7 | 2,791 | py | Python | parlai/agents/programr/robot/sentimentdata.py | roholazandie/ParlAI | 32352cab81ecb666aefd596232c5ed9f33cbaeb9 | [
"MIT"
] | null | null | null | parlai/agents/programr/robot/sentimentdata.py | roholazandie/ParlAI | 32352cab81ecb666aefd596232c5ed9f33cbaeb9 | [
"MIT"
] | null | null | null | parlai/agents/programr/robot/sentimentdata.py | roholazandie/ParlAI | 32352cab81ecb666aefd596232c5ed9f33cbaeb9 | [
"MIT"
] | null | null | null | import numpy as np
DISTRIBUTION_SIZE = 10
NEGATIVE_THRESHOLD = -2.5
| 32.453488 | 95 | 0.60301 | import numpy as np
DISTRIBUTION_SIZE = 10
NEGATIVE_THRESHOLD = -2.5
class SentimentData():
def __init__(self):
self._sentiment_values = np.zeros(DISTRIBUTION_SIZE)
self._rolling_sentiment = 0
self._neg_thresh = NEGATIVE_THRESHOLD
self._threshold_reached = False
self.init_weight()
@property
def values(self):
return self._sentiment_values
@property
def last_sentiment_value(self):
return self._sentiment_values[-1]
@property
def rolling_sentiment(self):
return self._rolling_sentiment
@property
def threshold_reached(self):
return self._threshold_reached
def init_weight(self):
# self._weight = (DISTRIBUTION_SIZE*2) / 100
self._weight = np.arange(0.1, 1.1, 0.1)
# print("Weight: {}".format(self._weight))
# NOTE: Most recent sentiment is the last element in self._sentiment_values
def append_sentiment(self, sentiment):
try:
if sentiment is None:
# print("None sentiment trying to be added. Ignore")
return
self._sentiment_values = np.append(self._sentiment_values, sentiment)
if len(self._sentiment_values) > DISTRIBUTION_SIZE:
self._sentiment_values = np.delete(self._sentiment_values, 0)
self.update_rolling()
except Exception as ex:
print("Error appending sentiment: {}".format(ex))
def append_sentiment_distribution(self, sentiment_distribution):
self._sentiment_distributions.append(sentiment_distribution)
def update_rolling(self):
try:
self._rolling_sentiment = np.sum(np.multiply(self._sentiment_values, self._weight))
# for sent in self._sentiment_values:
# if sent is None:
# pass
# else:
# self._rolling_sentiment += sent * self._weight
# self._weight += 0.1
# Trigger for a low sentiment
# if self._rolling_sentiment <= self._neg_thresh:
# self.threshold_reached()
# self.init_weight()
# self._rolling_sentiment = 0
# # self.append_sentiment(0.5)
# else:
# self._threshold_reached = False
# self.init_weight()
except Exception as ex:
print("Error updating rolling sentiment: {}".format(ex))
def check_threshold_reached(self):
# Trigger for a low sentiment
if self._rolling_sentiment <= self._neg_thresh:
self.init_weight()
self._rolling_sentiment = 0
self.append_sentiment(0.5)
return True
else:
return False | 2,292 | 407 | 23 |
9fb8bf70fcac7fc0e1d816b67d336fa1321bc8ed | 18,032 | py | Python | rlkit/torch/irl/disc_models/other_v1p0_disc_models.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 56 | 2019-10-20T03:09:02.000Z | 2022-03-25T09:21:40.000Z | rlkit/torch/irl/disc_models/other_v1p0_disc_models.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 3 | 2020-10-01T07:33:51.000Z | 2021-05-12T03:40:57.000Z | rlkit/torch/irl/disc_models/other_v1p0_disc_models.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 10 | 2019-11-04T16:56:09.000Z | 2022-03-25T09:21:41.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from rlkit.torch.core import PyTorchModule
from rlkit.torch.networks import Mlp, identity
from rlkit.torch import pytorch_util as ptu
from copy import deepcopy
# self.V_part = V_net
# # this is a hack so it's not added as a submodule
# self.target_V_part = [deepcopy(V_net)]
# self.soft_target_V_tau = soft_target_V_tau
# def cuda(self, *args, **kwargs):
# super().cuda(*args, **kwargs)
# self.target_V_part[0].cuda()
# def forward(self, obs_batch, act_batch, z_batch=None, pol_log_prob=None, next_obs_batch=None):
# obs_batch = self.obs_processor(obs_batch, False, z_batch)
# next_obs_batch = self.obs_processor(next_obs_batch, False, z_batch)
# r = self.r_part(obs_batch)
# V_s = self.V_part(obs_batch)
# V_s_prime = self.target_V_part[0](next_obs_batch).detach()
# shaping = self.gamma*V_s_prime - V_s
# f = r + shaping
# disc_logits = f - pol_log_prob
# clamped_disc_logits = torch.clamp(disc_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
# return clamped_disc_logits, r, shaping
# def _update_target_V_part(self):
# ptu.soft_update_from_to(self.V_part, self.target_V_part[0], self.soft_target_V_tau)
| 34.281369 | 113 | 0.609306 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from rlkit.torch.core import PyTorchModule
from rlkit.torch.networks import Mlp, identity
from rlkit.torch import pytorch_util as ptu
from copy import deepcopy
class ThreeWayResNetAIRLDisc(ResNetAIRLDisc):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.last_fc = nn.Linear(kwargs['hid_dim'], 3)
class AntLinClassDisc(nn.Module):
def __init__(
self,
input_dim,
num_layer_blocks=2,
hid_dim=100,
hid_act='relu',
use_bn=True,
clamp_magnitude=10.0,
z_dim=None
):
super().__init__()
if hid_act == 'relu':
hid_act_class = nn.ReLU
elif hid_act == 'tanh':
hid_act_class = nn.Tanh
else:
raise NotImplementedError()
self.clamp_magnitude = clamp_magnitude
self.mod_list = nn.ModuleList([nn.Linear(input_dim, hid_dim)])
if use_bn: self.mod_list.append(nn.BatchNorm1d(hid_dim))
self.mod_list.append(hid_act_class())
for i in range(num_layer_blocks - 1):
self.mod_list.append(nn.Linear(hid_dim, hid_dim))
if use_bn: self.mod_list.append(nn.BatchNorm1d(hid_dim))
self.mod_list.append(hid_act_class())
self.mod_list.append(nn.Linear(hid_dim, 1))
self.model = nn.Sequential(*self.mod_list)
self.obs_processor = AntLinClassObsGating(z_dim=z_dim)
def forward(self, obs_batch, act_batch, z_batch=None):
obs_batch = self.obs_processor(obs_batch, False, z_batch)
if act_batch is not None:
to_concat = [obs_batch, act_batch]
input_batch = torch.cat(to_concat, dim=1)
else:
raise NotImplementedError()
output = self.model(input_batch)
output = torch.clamp(output, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
return output
class AntLinClassObsGating(PyTorchModule):
def __init__(self, clamp_magnitude=10.0, z_dim=0):
self.save_init_params(locals())
super().__init__()
self.z_dim = z_dim
self.clamp_magnitude = clamp_magnitude
assert clamp_magnitude > 0.0
C_EMB_HID = 128
self.mlp = nn.Sequential(
nn.Linear(8 + z_dim, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, 1)
)
def forward(self, obs_batch, wrap_absorbing, z_batch=None):
assert z_batch is not None
assert not wrap_absorbing
ant_obs = obs_batch[:,:-12]
target_0 = obs_batch[:,-12:-10]
target_1 = obs_batch[:,-10:-8]
classification_batch = obs_batch[:,-8:]
logits = self.mlp(torch.cat([classification_batch, z_batch], dim=-1))
logits = torch.clamp(logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
gate = F.sigmoid(logits)
obs_batch = torch.cat(
[
ant_obs,
gate * target_0 + (1.0 - gate) * target_1
],
dim=-1
)
return obs_batch
class MlpGAILDisc(Mlp):
def __init__(self, *args, clamp_magnitude=10.0, **kwargs):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
assert clamp_magnitude > 0.
self.clamp_magnitude = clamp_magnitude
def forward(self, obs_batch, act_batch):
input_batch = torch.cat([obs_batch, act_batch], dim=1)
output = super().forward(input_batch)
output = torch.clamp(output, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
return output
class ResnetDisc(PyTorchModule):
def __init__(
self,
hidden_size,
n_layers,
output_size,
input_size,
hidden_activation=F.tanh,
output_activation=identity
):
self.save_init_params(locals())
super().__init__()
b_init_value = 0.1
hidden_init = ptu.fanin_init
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.mod_list = nn.ModuleList()
fc = nn.Linear(input_size, hidden_size)
hidden_init(fc.weight)
fc.bias.data.fill_(b_init_value)
self.mod_list.append(fc)
for _ in range(n_layers - 1):
fc = nn.Linear(hidden_size, hidden_size)
hidden_init(fc.weight)
fc.bias.data.fill_(b_init_value)
self.mod_list.append(fc)
fc = nn.Linear(hidden_size, output_size)
hidden_init(fc.weight)
fc.bias.data.fill_(b_init_value)
self.mod_list.append(fc)
print(self.mod_list)
def forward(self, obs_batch, act_batch):
input_batch = torch.cat([obs_batch, act_batch], dim=1)
x = input_batch
x = self.mod_list[0](x)
x = self.hidden_activation(x)
for i in range(1, len(self.mod_list)-1):
y = self.mod_list[i](x)
y = self.hidden_activation(x)
x = x + y
x = self.mod_list[-1](x)
x = self.output_activation(x)
return x
class SingleColorFetchCustomDisc(PyTorchModule):
def __init__(self, clamp_magnitude=10.0):
self.save_init_params(locals())
super().__init__()
self.clamp_magnitude = clamp_magnitude
assert clamp_magnitude > 0.0
C_EMB_HID = 64
self.color_embed_mlp = nn.Sequential(
nn.Linear(3, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, 1)
)
OBJ_EMB_HID = 128
OBJ_EMB_DIM = 64
self.object_state_embed_mlp = nn.Sequential(
nn.Linear(6, OBJ_EMB_HID),
nn.BatchNorm1d(OBJ_EMB_HID),
nn.ReLU(),
nn.Linear(OBJ_EMB_HID, OBJ_EMB_HID),
nn.BatchNorm1d(OBJ_EMB_HID),
nn.ReLU(),
nn.Linear(OBJ_EMB_HID, OBJ_EMB_DIM)
)
FINAL_HID = 64
self.final_mlp = nn.Sequential(
nn.Linear(OBJ_EMB_DIM + 4 + 4, FINAL_HID),
nn.BatchNorm1d(FINAL_HID),
nn.ReLU(),
nn.Linear(FINAL_HID, FINAL_HID),
nn.BatchNorm1d(FINAL_HID),
nn.ReLU(),
nn.Linear(FINAL_HID, 1)
)
def forward(self, obs_batch, act_batch):
obj_0_state = torch.cat([obs_batch[:,:3], obs_batch[:,6:9]], dim=-1)
obj_1_state = torch.cat([obs_batch[:,3:6], obs_batch[:,9:12]], dim=-1)
obj_0_color = obs_batch[:,12:15]
obj_1_color = obs_batch[:,15:18]
gripper_obs = obs_batch[:,-4:]
color_0_embed = self.color_embed_mlp(obj_0_color)
color_1_embed = self.color_embed_mlp(obj_1_color)
color_logits = color_0_embed - color_1_embed
color_logits = torch.clamp(color_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
color_gate = F.sigmoid(color_logits)
# color_logits = torch.cat([color_0_embed, color_1_embed], dim=-1)
# color_gates = F.softmax(color_logits, dim=-1)
state_0_embed = self.object_state_embed_mlp(obj_0_state)
state_1_embed = self.object_state_embed_mlp(obj_1_state)
gated_embed = color_gate*state_0_embed + (1.0 - color_gate)*state_1_embed
# gated_embed = color_gates[:,0:1]*state_0_embed + color_gates[:,1:2]*state_1_embed
concat_final_input = torch.cat([gated_embed, gripper_obs, act_batch], dim=-1)
disc_logits = self.final_mlp(concat_final_input)
clamped_disc_logits = torch.clamp(disc_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
return clamped_disc_logits
class SecondVersionSingleColorFetchCustomDisc(PyTorchModule):
def __init__(self, clamp_magnitude=10.0):
self.save_init_params(locals())
super().__init__()
self.clamp_magnitude = clamp_magnitude
assert clamp_magnitude > 0.0
C_EMB_HID = 32
self.color_embed_mlp = nn.Sequential(
nn.Linear(3, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, 1)
)
DISC_HID = 128
self.disc_part = nn.Sequential(
nn.Linear(6 + 4 + 4, DISC_HID),
nn.BatchNorm1d(DISC_HID),
nn.ReLU(),
nn.Linear(DISC_HID, DISC_HID),
nn.BatchNorm1d(DISC_HID),
nn.ReLU(),
nn.Linear(DISC_HID, 1)
)
def forward(self, obs_batch, act_batch):
obj_0_state = torch.cat([obs_batch[:,:3], obs_batch[:,6:9]], dim=-1)
obj_1_state = torch.cat([obs_batch[:,3:6], obs_batch[:,9:12]], dim=-1)
obj_0_color = obs_batch[:,12:15]
obj_1_color = obs_batch[:,15:18]
gripper_obs = obs_batch[:,-4:]
color_0_embed = self.color_embed_mlp(obj_0_color)
color_1_embed = self.color_embed_mlp(obj_1_color)
color_logits = color_0_embed - color_1_embed
color_logits = torch.clamp(color_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
color_gate = F.sigmoid(color_logits)
gated_obj_state = color_gate*obj_0_state + (1.0 - color_gate)*obj_1_state
concat_final_input = torch.cat([gated_obj_state, gripper_obs, act_batch], dim=-1)
disc_logits = self.disc_part(concat_final_input)
clamped_disc_logits = torch.clamp(disc_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
return clamped_disc_logits
class ObsGatingV1(PyTorchModule):
def __init__(self, clamp_magnitude=10.0, z_dim=0):
self.save_init_params(locals())
super().__init__()
self.z_dim = z_dim
self.clamp_magnitude = clamp_magnitude
assert clamp_magnitude > 0.0
C_EMB_HID = 32
self.color_embed_mlp = nn.Sequential(
nn.Linear(3 + z_dim, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, C_EMB_HID),
nn.BatchNorm1d(C_EMB_HID),
nn.ReLU(),
nn.Linear(C_EMB_HID, 1)
)
def forward(self, obs_batch, wrap_absorbing, z_batch=None):
obj_0_state = torch.cat([obs_batch[:,:3], obs_batch[:,6:9]], dim=-1)
obj_1_state = torch.cat([obs_batch[:,3:6], obs_batch[:,9:12]], dim=-1)
obj_0_color = obs_batch[:,12:15]
obj_1_color = obs_batch[:,15:18]
gripper_obs = obs_batch[:,18:22]
if wrap_absorbing:
absorbing = obs_batch[:,22:23]
if z_batch is None:
color_0_embed = self.color_embed_mlp(obj_0_color)
color_1_embed = self.color_embed_mlp(obj_1_color)
else:
color_0_embed = self.color_embed_mlp(torch.cat([obj_0_color, z_batch], dim=1))
color_1_embed = self.color_embed_mlp(torch.cat([obj_1_color, z_batch], dim=1))
color_logits = color_0_embed - color_1_embed
color_logits = torch.clamp(color_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
color_gate = F.sigmoid(color_logits)
gated_obj_state = color_gate*obj_0_state + (1.0 - color_gate)*obj_1_state
if wrap_absorbing:
concat_obs = torch.cat([gated_obj_state, gripper_obs, absorbing], dim=-1)
else:
concat_obs = torch.cat([gated_obj_state, gripper_obs], dim=-1)
return concat_obs
class ThirdVersionSingleColorFetchCustomDisc(PyTorchModule):
def __init__(self, clamp_magnitude=10.0, state_only=False, wrap_absorbing=False, z_dim=0):
self.save_init_params(locals())
super().__init__()
self.z_dim = z_dim
self.state_only = state_only
self.wrap_absorbing = wrap_absorbing
self.clamp_magnitude = clamp_magnitude
assert clamp_magnitude > 0.0
self.obs_processor = ObsGatingV1(clamp_magnitude=self.clamp_magnitude, z_dim=z_dim)
DISC_HID = 512
print('\n\nDISC HID IS %d\n\n' % DISC_HID)
input_dim = 10 if state_only else 14
# input_dim = 20 if state_only else 24
if wrap_absorbing: input_dim += 1
self.disc_part = nn.Sequential(
nn.Linear(input_dim, DISC_HID),
nn.BatchNorm1d(DISC_HID),
nn.ReLU(),
nn.Linear(DISC_HID, DISC_HID),
nn.BatchNorm1d(DISC_HID),
nn.ReLU(),
nn.Linear(DISC_HID, DISC_HID),
nn.BatchNorm1d(DISC_HID),
nn.ReLU(),
nn.Linear(DISC_HID, 1)
)
def forward(self, obs_batch, act_batch, z_batch=None):
# def forward(self, obs_batch, act_batch, next_obs_batch, z_batch=None):
obs_batch = self.obs_processor(obs_batch, self.wrap_absorbing, z_batch)
# next_obs_batch = self.obs_processor(next_obs_batch, self.wrap_absorbing, z_batch)
if self.state_only:
disc_logits = self.disc_part(obs_batch)
# disc_logits = self.disc_part(obs_batch, next_obs_batch)
else:
concat_input = torch.cat([obs_batch, act_batch], dim=-1)
# concat_input = torch.cat([obs_batch, act_batch, next_obs_batch], dim=-1)
disc_logits = self.disc_part(concat_input)
clamped_disc_logits = torch.clamp(disc_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
return clamped_disc_logits
class TransferVersionSingleColorFetchCustomDisc(PyTorchModule):
def __init__(self, clamp_magnitude=10.0, z_dim=0, gamma=0.99, soft_target_V_tau=0.005):
self.save_init_params(locals())
super().__init__()
self.gamma = gamma
self.z_dim = z_dim
self.clamp_magnitude = clamp_magnitude
assert clamp_magnitude > 0.0
self.obs_processor = ObsGatingV1(clamp_magnitude=self.clamp_magnitude, z_dim=z_dim)
# R_HID = 64
# print('\n\nR HID IS %d\n\n' % R_HID)
# input_dim = 10
# self.r_part = nn.Sequential(
# nn.Linear(input_dim, R_HID),
# nn.BatchNorm1d(R_HID),
# nn.ReLU(),
# nn.Linear(R_HID, R_HID),
# nn.BatchNorm1d(R_HID),
# nn.ReLU(),
# nn.Linear(R_HID, 1)
# )
# V_HID = 128
# print('\n\nR HID IS %d\n\n' % V_HID)
# input_dim = 10
# V_net = nn.Sequential(
# nn.Linear(input_dim, V_HID),
# nn.BatchNorm1d(V_HID),
# nn.ReLU(),
# nn.Linear(V_HID, V_HID),
# nn.BatchNorm1d(V_HID),
# nn.ReLU(),
# nn.Linear(V_HID, 1)
# )
R_HID = 256
print('\n\nR HID IS %d\n\n' % R_HID)
input_dim = 10
self.r_part = nn.Sequential(
nn.Linear(input_dim, R_HID),
nn.BatchNorm1d(R_HID),
nn.ReLU(),
nn.Linear(R_HID, R_HID),
nn.BatchNorm1d(R_HID),
nn.ReLU(),
nn.Linear(R_HID, R_HID),
nn.BatchNorm1d(R_HID),
nn.ReLU(),
nn.Linear(R_HID, 1)
)
V_HID = 256
print('\n\nR HID IS %d\n\n' % V_HID)
input_dim = 10
V_net = nn.Sequential(
nn.Linear(input_dim, V_HID),
nn.BatchNorm1d(V_HID),
nn.ReLU(),
nn.Linear(V_HID, V_HID),
nn.BatchNorm1d(V_HID),
nn.ReLU(),
nn.Linear(V_HID, V_HID),
nn.BatchNorm1d(V_HID),
nn.ReLU(),
nn.Linear(V_HID, 1)
)
self.V_part = V_net
self.target_V_part = deepcopy(V_net)
self.soft_target_V_tau = soft_target_V_tau
def forward(self, obs_batch, act_batch, z_batch=None, pol_log_prob=None, next_obs_batch=None):
pol_log_prob = torch.clamp(pol_log_prob, min=-10.0, max=10.0)
obs_batch = self.obs_processor(obs_batch, False, z_batch)
next_obs_batch = self.obs_processor(next_obs_batch, False, z_batch)
r = self.r_part(obs_batch)
V_s = self.V_part(obs_batch)
V_s_prime = self.target_V_part(next_obs_batch).detach()
shaping = self.gamma*V_s_prime - V_s
f = r + shaping
disc_logits = f - pol_log_prob
clamped_disc_logits = torch.clamp(disc_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
return clamped_disc_logits, r, shaping, V_s
def _update_target_V_part(self):
ptu.soft_update_from_to(self.V_part, self.target_V_part, self.soft_target_V_tau)
# self.V_part = V_net
# # this is a hack so it's not added as a submodule
# self.target_V_part = [deepcopy(V_net)]
# self.soft_target_V_tau = soft_target_V_tau
# def cuda(self, *args, **kwargs):
# super().cuda(*args, **kwargs)
# self.target_V_part[0].cuda()
# def forward(self, obs_batch, act_batch, z_batch=None, pol_log_prob=None, next_obs_batch=None):
# obs_batch = self.obs_processor(obs_batch, False, z_batch)
# next_obs_batch = self.obs_processor(next_obs_batch, False, z_batch)
# r = self.r_part(obs_batch)
# V_s = self.V_part(obs_batch)
# V_s_prime = self.target_V_part[0](next_obs_batch).detach()
# shaping = self.gamma*V_s_prime - V_s
# f = r + shaping
# disc_logits = f - pol_log_prob
# clamped_disc_logits = torch.clamp(disc_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
# return clamped_disc_logits, r, shaping
# def _update_target_V_part(self):
# ptu.soft_update_from_to(self.V_part, self.target_V_part[0], self.soft_target_V_tau)
| 15,587 | 230 | 768 |
3f4e80a85e1b0df0823521f49e0f3c23eaa69849 | 1,461 | py | Python | bin/Archive/read.json.py | rubenchazarra/AS_Function_Evaluator | 5942d074fb03726fe539f8ecf37db09f8132922e | [
"MIT"
] | null | null | null | bin/Archive/read.json.py | rubenchazarra/AS_Function_Evaluator | 5942d074fb03726fe539f8ecf37db09f8132922e | [
"MIT"
] | null | null | null | bin/Archive/read.json.py | rubenchazarra/AS_Function_Evaluator | 5942d074fb03726fe539f8ecf37db09f8132922e | [
"MIT"
] | null | null | null |
import argparse
import sys
import json
if __name__ == "__main__":
main()
| 20.871429 | 117 | 0.499658 |
import argparse
import sys
import json
def main():
global args
global json_file
parser = argparse.ArgumentParser()
parser.add_argument('-json_file', metavar='<json_file>', dest="json_file", help="JSON file output by PFAM database")
args = parser.parse_args()
## Prep global objects
json_file = str(args.json_file)
#Read JSON file
with open(json_file, 'r') as json_file:
data = json.load(json_file)[0]
#Print
print("Model length ----------------------")
print(data['model_length'])
print("Alignment ----------------------")
print(data['align'])
print("Env ----------------------")
print(data['env'])
print("Name ----------------------")
print(data['name'])
print("Accession ----------------------")
print(data['acc'])
print("Significative ----------------------")
print(data['sig'])
print("Expectation value (E-value) ----------------------")
print(data['evalue'])
print("Description ----------------------")
print(data['desc'])
print("Env ----------------------")
print(data['env'])
print("HMM ----------------------")
print(data['hmm'])
print("Active site ----------------------")
print(data['act_site'])
print("Type ----------------------")
print(data['type'])
print("Bits ----------------------")
print(data['bits'])
print("Clan ----------------------")
print(data['clan'])
print("Sequence ----------------------")
print(data['seq'])
if __name__ == "__main__":
main()
| 1,357 | 0 | 23 |
cc9098ccaf76a651fffae1eccde9e2b362f7b70a | 433 | py | Python | passing_numpy_array/setup.py | JFeaux/cython_demo | 5be2db83fb2c4c948d8c0f26dee578798202e94f | [
"MIT"
] | 1 | 2019-04-23T03:09:39.000Z | 2019-04-23T03:09:39.000Z | passing_numpy_array/setup.py | JFeaux/cython_demo | 5be2db83fb2c4c948d8c0f26dee578798202e94f | [
"MIT"
] | null | null | null | passing_numpy_array/setup.py | JFeaux/cython_demo | 5be2db83fb2c4c948d8c0f26dee578798202e94f | [
"MIT"
] | null | null | null | from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
sourcefiles = ['array_tools.pyx', '_sum.cpp']
extra_compile_args = []
libraries = []
ext = [Extension('*',
sourcefiles,
extra_compile_args=extra_compile_args,
libraries=[],
language='c++')
]
setup(ext_modules=cythonize(ext), include_dirs=[np.get_include()])
| 22.789474 | 66 | 0.690531 | from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
sourcefiles = ['array_tools.pyx', '_sum.cpp']
extra_compile_args = []
libraries = []
ext = [Extension('*',
sourcefiles,
extra_compile_args=extra_compile_args,
libraries=[],
language='c++')
]
setup(ext_modules=cythonize(ext), include_dirs=[np.get_include()])
| 0 | 0 | 0 |
e2b51acb6cb17ac32a52f8de649020de26bc5c20 | 9,128 | py | Python | primehub/models.py | InfuseAI/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 10 | 2021-09-13T23:14:22.000Z | 2022-02-06T06:07:40.000Z | primehub/models.py | KellenJohn/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 4 | 2021-08-10T03:10:27.000Z | 2021-12-16T02:11:50.000Z | primehub/models.py | KellenJohn/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 1 | 2021-12-21T11:59:51.000Z | 2021-12-21T11:59:51.000Z | import textwrap
from typing import Iterator, Any
from primehub import Helpful, cmd, Module
from primehub.utils.display import display_tree_like_format
| 33.807407 | 115 | 0.567704 | import textwrap
from typing import Iterator, Any
from primehub import Helpful, cmd, Module
from primehub.utils.display import display_tree_like_format
def timestamp_to_isoformat(timestamp):
unix_timestamp = int(int(timestamp) / 1000)
from datetime import datetime
return datetime.fromtimestamp(unix_timestamp)
class Models(Helpful, Module):
@cmd(name='list', description='List models', return_required=True)
def list(self) -> Iterator:
"""
List models
:rtype: Iterator
:returns: All registered models
"""
query = """
query QueryModels($group: String!) {
mlflow(where: { group: $group }) {
...MLflowSettingInfo
}
models(where: { group: $group }) {
...ModelInfo
}
}
fragment MLflowSettingInfo on MLflowSetting {
trackingUri
uiUrl
}
fragment ModelInfo on Model {
name
creationTimestamp
lastUpdatedTimestamp
description
latestVersions {
name
version
}
}
"""
results = self.request({'group': self.group_name}, query)
if 'data' in results:
results = results['data']
for m in results['models']:
m['creationTimestamp'] = timestamp_to_isoformat(m['creationTimestamp'])
m['lastUpdatedTimestamp'] = timestamp_to_isoformat(m['lastUpdatedTimestamp'])
versions = m.pop('latestVersions')
m['latestVersion'] = versions[0]['version']
yield m
return results
@cmd(name='get', description='Get the model', return_required=True)
def get(self, name: str) -> dict:
"""
Get the model
:type name: str
:param name: The model name
:rtype: dict
:return: The detail information of a model
"""
query = """
query QueryModel($group: String!, $name: String!) {
mlflow(where: { group: $group }) {
...MLflowSettingInfo
}
model(where: { group: $group, name: $name }) {
...ModelInfo
}
modelVersions(where: { group: $group, name: $name }) {
...ModelVersionInfo
}
}
fragment MLflowSettingInfo on MLflowSetting {
trackingUri
uiUrl
}
fragment ModelInfo on Model {
name
creationTimestamp
lastUpdatedTimestamp
description
latestVersions {
name
version
}
}
fragment ModelVersionInfo on ModelVersion {
name
version
creationTimestamp
lastUpdatedTimestamp
deployedBy
}
"""
results = self.request({'group': self.group_name, 'name': name}, query)
if 'data' not in results:
return results
results = results['data']
return results
@cmd(name='list-versions', description='List versions of the model', return_required=True)
def list_versions(self, model: str) -> Iterator:
"""
List versions of the model
:type model: str
:param model: The model name
:rtype: Iterator
:returns: All versions of a model
"""
query = """
query QueryModel($group: String!, $name: String!) {
modelVersions(where: { group: $group, name: $name }) {
...ModelVersionInfo
}
}
fragment ModelVersionInfo on ModelVersion {
name
version
creationTimestamp
lastUpdatedTimestamp
deployedBy
}
"""
results = self.request({'group': self.group_name, 'name': model}, query)
if 'data' not in results:
return results
results = results['data']
for m in results['modelVersions']:
m['creationTimestamp'] = timestamp_to_isoformat(m['creationTimestamp'])
m['lastUpdatedTimestamp'] = timestamp_to_isoformat(m['lastUpdatedTimestamp'])
yield m
return results
@cmd(name='get-version', description='Get a version of the model', return_required=True)
def get_version(self, model: str, version: str) -> dict:
"""
Get a version of the model
:type model: str
:param model: The model name
:type version: str
:param version: Verson number
:rtype: dict
:return: The detail information of a model version
"""
query = """
query QueryModelVersion($group: String!, $name: String!, $version: String!) {
mlflow(where: { group: $group }) {
...MLflowSettingInfo
}
modelVersion(where: { group: $group, name: $name, version: $version }) {
...ModelVersionInfo
run
}
}
fragment MLflowSettingInfo on MLflowSetting {
trackingUri
uiUrl
}
fragment ModelVersionInfo on ModelVersion {
name
version
creationTimestamp
lastUpdatedTimestamp
deployedBy
}
"""
results = self.request({'group': self.group_name, 'name': model, 'version': version}, query)
if 'data' not in results:
return results
results = results['data']['modelVersion']
return results
@cmd(name='deploy', description='Deploy the model version to the speific deployment', return_required=True)
def deploy(self, model: str, version: str, deploy_id: str) -> dict:
"""
Deploy the model version to the speific deployment
:type model: str
:param model: The model name
:type version: str
:param version: Verson number
:type deploy_id: str
:param deploy_id: Deployment id
:rtype: dict
:return: The detail information of the updated deployment
"""
return self.primehub.deployments.update(deploy_id, {'modelURI': f'models:/{model}/{version}'})
def help_description(self):
return "Manage models"
def display(self, action: dict, value: Any):
from io import StringIO
if action['func'] == 'get' and self.get_display().name != 'json':
value['model']['creationTimestamp'] = timestamp_to_isoformat(value['model']['creationTimestamp'])
value['model']['lastUpdatedTimestamp'] = timestamp_to_isoformat(value['model']['lastUpdatedTimestamp'])
versions = value.pop('modelVersions')
self.get_display().display(action, value, self.primehub.stdout)
self.get_display().display(action, "versions:", self.primehub.stdout)
for version in versions:
version['creationTimestamp'] = timestamp_to_isoformat(version['creationTimestamp'])
version['lastUpdatedTimestamp'] = timestamp_to_isoformat(version['lastUpdatedTimestamp'])
self.get_display().display(action, " -", self.primehub.stdout)
display_tree_like_format(version, self.primehub.stdout, 0, 2)
elif action['func'] == 'get_version' and self.get_display().name != 'json':
version = value
version['creationTimestamp'] = timestamp_to_isoformat(version['creationTimestamp'])
version['lastUpdatedTimestamp'] = timestamp_to_isoformat(version['lastUpdatedTimestamp'])
run = value.pop('run')
run['info']['startTime'] = timestamp_to_isoformat(run['info']['startTime'])
run['info']['endTime'] = timestamp_to_isoformat(run['info']['endTime'])
data = run.pop('data')
self.get_display().display(action, version, self.primehub.stdout)
self.get_display().display(action, "run:", self.primehub.stdout)
display_tree_like_format(run, self.primehub.stdout, 0, 2)
self.get_display().display(action, " data:", self.primehub.stdout)
# print metrics table
for metric in data['metrics']:
metric['timestamp'] = timestamp_to_isoformat(metric['timestamp'])
self.get_display().display(action, " metrics:", self.primehub.stdout)
metrics_io = StringIO()
self.get_display().display(action, data['metrics'], metrics_io)
self.get_display().display(action,
textwrap.indent(metrics_io.getvalue().strip(), ' ' * 6),
self.primehub.stdout)
# print params table
self.get_display().display(action, " params:", self.primehub.stdout)
s = StringIO()
self.get_display().display(action, data['params'], s)
self.get_display().display(action,
textwrap.indent(s.getvalue().strip(), ' ' * 6),
self.primehub.stdout)
else:
super(Models, self).display(action, value)
| 3,050 | 5,878 | 46 |
bd5ab117d585128256e9f43687b2f40ea381e07b | 1,691 | py | Python | omc_python_app/views.py | Hooker41/Exchange-Trading-Order-Management-Tool | d7f6878655f0fe08c15b6d2b0b5b0db487b97430 | [
"MIT"
] | null | null | null | omc_python_app/views.py | Hooker41/Exchange-Trading-Order-Management-Tool | d7f6878655f0fe08c15b6d2b0b5b0db487b97430 | [
"MIT"
] | null | null | null | omc_python_app/views.py | Hooker41/Exchange-Trading-Order-Management-Tool | d7f6878655f0fe08c15b6d2b0b5b0db487b97430 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic import TemplateView
from django.http import HttpResponse, JsonResponse, HttpResponseForbidden, HttpResponseBadRequest
import ccxt
# Create your views here.
exchangeIns = {}
| 27.721311 | 99 | 0.691307 | from django.shortcuts import render
from django.views.generic import TemplateView
from django.http import HttpResponse, JsonResponse, HttpResponseForbidden, HttpResponseBadRequest
import ccxt
# Create your views here.
def getExchanges(request):
exchangeName = request.GET["exchange"]
print(exchangeName)
exchanges = ccxt.exchanges
res = {
'exchanges' : exchanges
}
return JsonResponse(res)
exchangeIns = {}
def connectExchange(request):
exchangeName = request.POST['exchange']
apikey = request.POST['apikey']
secret = request.POST['secret']
exchangeIns[exchangeName] = eval ('ccxt.%s ()' % exchangeName)
exchangeIns[exchangeName].apiKey = apikey
exchangeIns[exchangeName].secret = secret
try:
balance = exchangeIns[exchangeName].fetch_balance()
except Exception as e:
return HttpResponseBadRequest()
try:
symbols = exchangeIns[exchangeName].symbols
btcSymbols = [k for k in symbols if 'BTC' in k]
except Exception as e:
return HttpResponseBadRequest()
return JsonResponse({exchangeName: True, 'symbol': btcSymbols})
def disconnectExchange(request):
exchangeName = request.GET['exchange']
del exchangeIns[exchangeName]
return JsonResponse({exchangeName: False})
def getTicker(request):
exchangeName = request.POST['exchange']
pair = request.POST['pair']
if exchangeIns[exchangeName]:
exchange = exchangeIns[exchangeName]
ticker = exchange.fetch_ticker(pair)
return JsonResponse({ 'bid': ticker['bid'], 'ask': ticker['ask'], 'last': ticker['last'] })
else:
return HttpResponseBadRequest()
| 1,361 | 0 | 91 |
b5da938235e6816cfd761ec8546d351be3bd180e | 30 | py | Python | lib/python3.4/tokenize.py | caiocsalvador/whats_the_craic | c49ef62f1acd7379f6fd90c2b93aa1fa00c8661d | [
"MIT"
] | 7 | 2017-04-26T12:28:22.000Z | 2021-02-09T18:59:50.000Z | django-ng/lib/python3.4/tokenize.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | 13 | 2015-12-04T03:38:37.000Z | 2015-12-12T00:15:46.000Z | django-ng/lib/python3.4/tokenize.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | 8 | 2017-06-01T08:42:16.000Z | 2020-07-23T12:30:19.000Z | /usr/lib/python3.4/tokenize.py | 30 | 30 | 0.8 | /usr/lib/python3.4/tokenize.py | 0 | 0 | 0 |
683aaa70f4e21185ee6a7cdb4a48ac989e06ef19 | 1,954 | py | Python | core/polyaxon/polypod/compiler/resolver/resolver.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | core/polyaxon/polypod/compiler/resolver/resolver.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | core/polyaxon/polypod/compiler/resolver/resolver.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict, Optional
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.polyflow import V1CompiledOperation
from polyaxon.polypod.compiler.resolver.base import BaseResolver
| 31.015873 | 98 | 0.701126 | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict, Optional
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.polyflow import V1CompiledOperation
from polyaxon.polypod.compiler.resolver.base import BaseResolver
def resolve(
owner_name: str,
project_name: str,
project_uuid: str,
run_name: str,
run_uuid: str,
run_path: str,
compiled_operation: V1CompiledOperation,
params: Optional[Dict[str, Dict]],
run=None,
resolver_cls=None,
created_at: datetime = None,
compiled_at: datetime = None,
):
resolver_cls = resolver_cls or BaseResolver
run_kind = compiled_operation.get_run_kind()
if run_kind not in resolver_cls.KINDS:
raise PolyaxonCompilerError(
"Resolver Error. "
"Specification with run kind: {} is not supported in this deployment version.".format(
run_kind
)
)
resolver = resolver_cls(
run=run,
compiled_operation=compiled_operation,
owner_name=owner_name,
project_name=project_name,
project_uuid=project_uuid,
run_name=run_name,
run_path=run_path,
run_uuid=run_uuid,
params=params,
created_at=created_at,
compiled_at=compiled_at,
)
if resolver:
return resolver, resolver.resolve()
| 1,092 | 0 | 23 |
37832db13561a35be24cf2bec836422c123e4278 | 7,192 | py | Python | fuzzers/030-iob/process_rdb.py | rw1nkler/prjxray | aff076b47dcf6d653eb3ce791b41fd6cf4343edd | [
"ISC"
] | 1 | 2021-12-16T03:09:59.000Z | 2021-12-16T03:09:59.000Z | fuzzers/030-iob/process_rdb.py | rw1nkler/prjxray | aff076b47dcf6d653eb3ce791b41fd6cf4343edd | [
"ISC"
] | null | null | null | fuzzers/030-iob/process_rdb.py | rw1nkler/prjxray | aff076b47dcf6d653eb3ce791b41fd6cf4343edd | [
"ISC"
] | 1 | 2020-11-10T01:57:12.000Z | 2020-11-10T01:57:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" IOB bits are more complicated than can be easily expressed to segmaker.
There are couple cases that need to be handled here:
- There are some bits that are always set for IN-only ports, but are cleared
selectively for OUT and INOUT ports.
- There are bits per each IOSTANDARD, in addition to drive patterns. These
can be merged to provide unique "(IOSTANDARD, DRIVE)" bit sets.
"""
import argparse
def filter_bits(site, bits):
""" Seperate top and bottom bits.
Some IOSTANDARD bits are tile wide, but really only apply to a half.
It is hard to write a fuzzer for this, but it is easy to filter by site,
and all bits appear to have a nice hard halve seperatation in the bitidx.
"""
if site == 'IOB_Y0':
min_bitidx = 64
max_bitidx = 127
elif site == 'IOB_Y1':
min_bitidx = 0
max_bitidx = 63
else:
assert False, site
return frozenset(inner())
if __name__ == "__main__":
main()
| 32.107143 | 82 | 0.525028 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" IOB bits are more complicated than can be easily expressed to segmaker.
There are couple cases that need to be handled here:
- There are some bits that are always set for IN-only ports, but are cleared
selectively for OUT and INOUT ports.
- There are bits per each IOSTANDARD, in addition to drive patterns. These
can be merged to provide unique "(IOSTANDARD, DRIVE)" bit sets.
"""
import argparse
def get_name(l):
parts = l.strip().split(' ')
return parts[0]
def get_site(l):
return get_name(l).split('.')[1]
def parse_bits(l):
parts = l.strip().split(' ')
if parts[1] in ['<0', '<const0>']:
return frozenset()
else:
return frozenset(parts[1:])
def filter_bits(site, bits):
""" Seperate top and bottom bits.
Some IOSTANDARD bits are tile wide, but really only apply to a half.
It is hard to write a fuzzer for this, but it is easy to filter by site,
and all bits appear to have a nice hard halve seperatation in the bitidx.
"""
if site == 'IOB_Y0':
min_bitidx = 64
max_bitidx = 127
elif site == 'IOB_Y1':
min_bitidx = 0
max_bitidx = 63
else:
assert False, site
def inner():
for bit in bits:
bitidx = int(bit.split('_')[1])
if bitidx < min_bitidx or bitidx > max_bitidx:
continue
yield bit
return frozenset(inner())
def main():
parser = argparse.ArgumentParser(
description="Convert IOB rdb into good rdb."
"")
parser.add_argument('input_rdb')
args = parser.parse_args()
iostandard_lines = []
with open(args.input_rdb) as f:
for l in f:
if ('.SSTL' in l or '.LVCMOS' in l
or '.LVTTL' in l) and 'IOB_' in l:
iostandard_lines.append(l)
else:
print(l.strip())
sites = {}
for l in iostandard_lines:
feature = get_name(l)
feature_parts = feature.split('.')
site = get_site(l)
iostandard = feature_parts[2]
bits = parse_bits(l)
bits = filter_bits(site, bits)
if site not in sites:
sites[site] = {}
group = feature_parts[3]
if group not in sites[site]:
sites[site][group] = {}
if group in ['DRIVE', 'SLEW']:
enum = feature_parts[4]
sites[site][group][(iostandard, enum)] = bits
elif group in ['IN', 'IN_DIFF', 'IN_ONLY', 'IN_USE', 'OUT',
'STEPDOWN']:
sites[site][group][(iostandard, None)] = bits
else:
assert False, group
for site in sites:
for iostandard, enum in sites[site]['DRIVE']:
sites[site]['DRIVE'][(iostandard, enum)] |= sites[site]['OUT'][(
iostandard, None)]
for iostandard, enum in sites[site]['IN']:
sites[site]['IN_ONLY'][(iostandard, enum)] -= sites[site]['IN'][(
iostandard, enum)]
common_bits = {}
for site in sites:
for group in sites[site]:
if (site, group) not in common_bits:
common_bits[(site, group)] = set()
for bits in sites[site][group].values():
common_bits[(site, group)] |= bits
slew_in_drives = {}
for site in sites:
common_bits[(site, 'DRIVE')] -= common_bits[(site, 'SLEW')]
common_bits[(site, 'DRIVE')] -= common_bits[(site, 'STEPDOWN')]
common_bits[(site, 'IN_ONLY')] |= common_bits[(site, 'DRIVE')]
common_bits[(site, 'IN_ONLY')] -= common_bits[(site, 'STEPDOWN')]
common_bits[(site, 'IN')] |= common_bits[(site, 'IN_DIFF')]
common_bits[(site, 'IN_DIFF')] |= common_bits[(site, 'IN')]
for iostandard, enum in sites[site]['DRIVE']:
slew_in_drive = common_bits[
(site, 'SLEW')] & sites[site]['DRIVE'][(iostandard, enum)]
if slew_in_drive:
if (site, iostandard) not in slew_in_drives:
slew_in_drives[(site, iostandard)] = set()
slew_in_drives[(site, iostandard)] |= slew_in_drive
sites[site]['DRIVE'][(iostandard, enum)] -= slew_in_drive
sites[site]['DRIVE'][(iostandard,
enum)] -= common_bits[(site, 'STEPDOWN')]
for site, iostandard in slew_in_drives:
for _, enum in sites[site]['SLEW']:
sites[site]['SLEW'][(iostandard,
enum)] |= slew_in_drives[(site, iostandard)]
for site in sites:
for iostandard, enum in sites[site]['DRIVE']:
sites[site]['DRIVE'][(iostandard, enum)] |= sites[site]['IN_USE'][(
iostandard, None)]
for iostandard, enum in sites[site]['IN']:
if sites[site]['IN_DIFF'][(iostandard, enum)]:
sites[site]['IN_DIFF'][(iostandard, enum)] |= \
sites[site]['IN'][(iostandard, enum)]
for site in sites:
del sites[site]['OUT']
del sites[site]['IN_USE']
allow_zero = ['SLEW']
for site in sites:
for group in sites[site]:
common_groups = {}
# Merge features that are identical.
#
# For example:
#
# IOB33.IOB_Y1.LVCMOS15.IN 38_42 39_41
# IOB33.IOB_Y1.LVCMOS18.IN 38_42 39_41
#
# Must be grouped.
for (iostandard, enum), bits in sites[site][group].items():
if bits not in common_groups:
common_groups[bits] = {
'IOSTANDARDS': set(),
'enums': set(),
}
common_groups[bits]['IOSTANDARDS'].add(iostandard)
if enum is not None:
common_groups[bits]['enums'].add(enum)
for bits, v in common_groups.items():
if v['enums']:
feature = 'IOB33.{site}.{iostandards}.{group}.{enums}'.format(
site=site,
iostandards='_'.join(sorted(v['IOSTANDARDS'])),
group=group,
enums='_'.join(sorted(v['enums'])),
)
else:
feature = 'IOB33.{site}.{iostandards}.{group}'.format(
site=site,
iostandards='_'.join(sorted(v['IOSTANDARDS'])),
group=group,
)
if not bits and group not in allow_zero:
continue
neg_bits = frozenset(
'!{}'.format(b)
for b in (common_bits[(site, group)] - bits))
print(
'{} {}'.format(feature, ' '.join(sorted(bits | neg_bits))))
if __name__ == "__main__":
main()
| 5,816 | 0 | 119 |
26b0f3e5bae776ac1dad54c81206538a9984b7fb | 2,175 | py | Python | pset8/mashup/helpers.py | Star1111/cs50 | 0ef91b4558e4e080512045cb5035ecf9f9294047 | [
"Unlicense"
] | 2 | 2020-11-03T08:31:31.000Z | 2021-03-20T16:40:34.000Z | pset8/mashup/helpers.py | Star1111/cs50 | 0ef91b4558e4e080512045cb5035ecf9f9294047 | [
"Unlicense"
] | null | null | null | pset8/mashup/helpers.py | Star1111/cs50 | 0ef91b4558e4e080512045cb5035ecf9f9294047 | [
"Unlicense"
] | null | null | null | import feedparser
import urllib.parse
from random import shuffle, seed
UKR_NEWS = ["https://news.yandex.ua/index.rss", "http://www.ukr-portal.com/php/rss_1.xml", "http://news.finance.ua/ru/rss", "http://www.ua.rian.ru/export/rss2/index.xml", "http://feeds.feedburner.com/zaxid/rss_ua", "http://www.dt.ua/export.rss", "https://malina-mix.com/anekdots.xml"]
def lookup(geo, lang="us"):
"""Looks up articles for geo."""
# check cache for geo
if geo in lookup.cache:
if lookup.query_counter[geo] < 10:
lookup.query_counter[geo] += 1
return lookup.cache[geo]
else:
del lookup.cache[geo]
del lookup.query_counter[geo]
if geo == "H++":
lookup.cache[geo] = {"link": "http://programming.kr.ua/ru", "title": "Главная"}, {"link": "http://programming.kr.ua/ru/news", "title": "News"}, {"link": "http://programming.kr.ua/ru/potential", "title": "Возможности"}, {"link": "http://programming.kr.ua/ru/about#contacts", "title": "Контакты"}
lookup.query_counter[geo] = 1
return lookup.cache[geo]
url = "http://news.google.com/news?ned=" + lang+ "&geo={}&output=rss"
# get feed from Google
feed = feedparser.parse(url.format(urllib.parse.quote(geo, safe="")))
# if no items in feed, get feed from other
if not feed["items"]:
if lang == "ru_ua":
# get random UKR_NEWS
seed()
shuffle(UKR_NEWS)
feed = feedparser.parse(UKR_NEWS[0])
if not feed["items"]:
# there is always news
feed = feedparser.parse("http://feeds.feedburner.com/zaxid/rss_ua")
else:
# get from Onion
feed = feedparser.parse("http://www.theonion.com/feeds/rss")
# cache results
lookup.cache[geo] = [{"link": item["link"], "title": item["title"]} for item in feed["items"]]
# add counter
lookup.query_counter[geo] = 1
# return results
return lookup.cache[geo]
# initialize cache
lookup.cache = {}
# initialize query counter
lookup.query_counter = {}
| 35.655738 | 302 | 0.581149 | import feedparser
import urllib.parse
from random import shuffle, seed
UKR_NEWS = ["https://news.yandex.ua/index.rss", "http://www.ukr-portal.com/php/rss_1.xml", "http://news.finance.ua/ru/rss", "http://www.ua.rian.ru/export/rss2/index.xml", "http://feeds.feedburner.com/zaxid/rss_ua", "http://www.dt.ua/export.rss", "https://malina-mix.com/anekdots.xml"]
def lookup(geo, lang="us"):
"""Looks up articles for geo."""
# check cache for geo
if geo in lookup.cache:
if lookup.query_counter[geo] < 10:
lookup.query_counter[geo] += 1
return lookup.cache[geo]
else:
del lookup.cache[geo]
del lookup.query_counter[geo]
if geo == "H++":
lookup.cache[geo] = {"link": "http://programming.kr.ua/ru", "title": "Главная"}, {"link": "http://programming.kr.ua/ru/news", "title": "News"}, {"link": "http://programming.kr.ua/ru/potential", "title": "Возможности"}, {"link": "http://programming.kr.ua/ru/about#contacts", "title": "Контакты"}
lookup.query_counter[geo] = 1
return lookup.cache[geo]
url = "http://news.google.com/news?ned=" + lang+ "&geo={}&output=rss"
# get feed from Google
feed = feedparser.parse(url.format(urllib.parse.quote(geo, safe="")))
# if no items in feed, get feed from other
if not feed["items"]:
if lang == "ru_ua":
# get random UKR_NEWS
seed()
shuffle(UKR_NEWS)
feed = feedparser.parse(UKR_NEWS[0])
if not feed["items"]:
# there is always news
feed = feedparser.parse("http://feeds.feedburner.com/zaxid/rss_ua")
else:
# get from Onion
feed = feedparser.parse("http://www.theonion.com/feeds/rss")
# cache results
lookup.cache[geo] = [{"link": item["link"], "title": item["title"]} for item in feed["items"]]
# add counter
lookup.query_counter[geo] = 1
# return results
return lookup.cache[geo]
# initialize cache
lookup.cache = {}
# initialize query counter
lookup.query_counter = {}
| 0 | 0 | 0 |
51d1b802f5cf738b16c5ae33596de8b712c69625 | 12,438 | py | Python | md.py | dormrod/molecular_dynamics_300_lines | 4c0993436af0d048fb0ccf56416156a3ff9575dc | [
"MIT"
] | 1 | 2021-11-28T03:50:43.000Z | 2021-11-28T03:50:43.000Z | md.py | dormrod/molecular_dynamics_300_lines | 4c0993436af0d048fb0ccf56416156a3ff9575dc | [
"MIT"
] | null | null | null | md.py | dormrod/molecular_dynamics_300_lines | 4c0993436af0d048fb0ccf56416156a3ff9575dc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Single Molecule Molecular Dynamics Code
Created 2018 by David of Theoretically Speaking
Please Modify!
"""
from __future__ import print_function
import os
import sys
import numpy as np
# Global variables for unit conversions
hartree = 4.35974465e-18 # J, atomic unit of energy
emass = 5.486e-4 # kg
dalton = 1.660539040e-27 # kg
avo = 6.02214086e23 # mol^-1
emass = 9.109534e-28 # g, atomic unit of mass
boltz = 1.38064852e-23 / hartree # E_h K^-1
bohr = 0.52917721067 # Angstroms
hbar = 6.626070040e-34 # Js
atomic_time = hbar / hartree
# Global files to prevent constant opening/closing
xyz_file = open("coordinates.xyz", "w")
energy_file = open("energies.dat", "w")
def display_header():
"""Write opening message to screen"""
print_dashed_line()
print("Welcome to the Theoretically Speaking molecular dynamics code")
print_dashed_line()
def print_dashed_line(length = 65):
"""Write --- line of given length to screen"""
line = "-" * length
print(line)
def string_to_boolean(string):
"""Converts input string of True or False to a boolean True or False"""
string = string.lower().strip()
true_strings = ["true", "t"]
false_strings = ["false", "f"]
if string in true_strings: return True
elif string in false_strings: return False
raise ValueError("Bad Boolean Value: " + string)
def get_input_parameters():
"""Ask user for input file name, read input parameters and store in dictionary"""
# Get list of available input files
input_files = get_recursive_file_list("inpt")
# Ask user to select input file from list
if len(input_files) == 0: # If cannot find any input files close program
print("No available input files. Exiting.")
sys.exit()
else:
while True:
print("Select an input file from the list:")
for i, file in enumerate(input_files):
print("[{0}] {1}".format(i, file))
try:
user_selection = int(input())
input_file = input_files[user_selection]
print("Input file selected: {0}".format(input_file))
print_dashed_line()
break
except: pass
# Open input file and read parameters into dictionary
parameters = {}
with open(input_file, "r") as file:
print("Reading input file")
# Skip header
for i in range(2): file.readline()
# Simulation parameters
try:
for i in range(2): file.readline()
parameters["time_total"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
parameters["time_step"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
parameters["box_size"] = float(file.readline().split()[0]) / bohr
parameters["write_freq"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
print(" - Simulation parameters read")
except:
print("Error in simulation parameters")
sys.exit()
# Atom data
try:
for i in range(2): file.readline()
num_atoms = parameters["num_atoms"] = int(file.readline().split()[0])
parameters["random_displacement"] = string_to_boolean(file.readline().split()[0])
parameters["random_displacement_limit"] = float(file.readline().split()[0]) / bohr
file.readline() # skip comment
name_to_index = {} # dictionary to convert atom name to array index
parameters["atom_names"] = [] # empty list for names
parameters["atom_masses"] = np.empty(num_atoms) # empty array for masses
parameters["atom_crds"] = np.empty([num_atoms, 3]) # empty array for coordinates
for i in range(num_atoms):
line = file.readline().split()
name_to_index[line[0]] = i
parameters["atom_names"].append(line[0])
parameters["atom_masses"][i] = float(line[1]) / (avo * emass)
parameters["atom_crds"][i] = np.array(line[2:5], dtype = float) / bohr
print(" - Atom data read")
except:
print("Error in atom data")
sys.exit()
# Bond Data
try:
for i in range(2): file.readline()
num_bonds = parameters["num_bonds"] = int(file.readline().split()[0])
file.readline() # skip comment
parameters["bond_pairs"] = np.empty([num_bonds, 2], dtype=int) # empty array for indices of bonded atom pairs
parameters["bond_params"] = np.empty([num_bonds, 2]) # empty array for harmonic bond r0 and k
for i in range(num_bonds):
line = file.readline().split()
parameters["bond_pairs"][i, 0] = name_to_index[line[0]]
parameters["bond_pairs"][i, 1] = name_to_index[line[1]]
parameters["bond_params"][i, 0] = float(line[2]) / bohr
parameters["bond_params"][i, 1] = float(line[3]) * (bohr * 1e-10)**2 / hartree
print(" - Bond data read")
except:
print("Error in bond data")
sys.exit()
print("Read successful")
print_dashed_line()
return parameters
def get_recursive_file_list(ext):
"""Get list of files with specifed extension in current directory and all subdirectories"""
# Search over all files in all subdirectories, add to list if have required extension
files = []
for dirpath, dirname, filenames in os.walk("./"):
for filename in filenames:
if filename.endswith(ext):
filepath = os.path.join(dirpath,filename)
files.append(filepath)
return files
def apply_periodic_boundary_condition(crds, box_size):
"""Apply periodicity to keep atoms within simulation box"""
crds[crds < 0] += box_size
crds[crds > box_size] -= box_size
return crds
def minimum_image_displacement(crd_0, crd_1, box_size):
"""Find displacement between nearest periodic images of atom pair"""
displacement = crd_0 - crd_1
displacement[displacement < -box_size / 2] += box_size
displacement[displacement > box_size / 2] -= box_size
return displacement
def initialise_coordinates(crds, box_size, displace, limit):
"""Recentre atoms in simulation box, apply periodic boundary, apply random displacement"""
crds += box_size / 2
crds = apply_periodic_boundary_condition(crds, box_size)
if displace:
displacements = np.random.uniform(low = -limit, high = limit, size = crds.shape)
crds += displacements
return crds
def calculate_energy(masses, crds, velocities, bond_pairs, bond_params, box_size):
"""Calculate kinetic, potential and total energy of system"""
kinetic_energy = 0.5 * (masses * np.sum(velocities ** 2, axis=1)).sum() # U=0.5*m*v^2
# Calculate harmonic potential energy using: U=0.5*k(r-r0)^2
for i, bond in enumerate(bond_pairs):
atom_0, atom_1 = bond[0], bond[1]
displacement = minimum_image_displacement(crds[atom_0, :], crds[atom_1, :], box_size)
distance = np.linalg.norm(displacement)
potential_energy = 0.5 * bond_params[i, 1] * (distance - bond_params[i, 0]) ** 2
total_energy = kinetic_energy + potential_energy # Total energy as sum of ke and pe
return np.array([kinetic_energy, potential_energy, total_energy])
def update_accelerations(masses, crds, bond_pairs, bond_params, box_size):
"""Calculate the acceleration on each atom using potential model and Newton's laws of motion"""
# Calculate forces using Hooke's law: F=-k(r-r0)
# Convert to acceleration using Newton's laws: F=ma, action has opposite reaction
accelerations = np.zeros_like(crds) # x,y,z accelerations for each atom
for i, bond in enumerate(bond_pairs):
atom_0, atom_1 = bond[0], bond[1]
displacement = minimum_image_displacement(crds[atom_0, :], crds[atom_1, :], box_size)
distance = np.linalg.norm(displacement)
force_direction = displacement / distance
force_magnitude = - bond_params[i, 1] * (distance - bond_params[i, 0])
force = force_magnitude * force_direction
accelerations[atom_0] += force / masses[atom_0]
accelerations[atom_1] -= force / masses[atom_1]
return accelerations
def update_coordinates(crds, accelerations, velocities, time_step, box_size):
"""Update coordinates using: x(t+dt)=x(t)+v(t)*dt+0.5*a(t)*dt**2"""
crds += velocities * time_step + 0.5 * accelerations * time_step ** 2
crds = apply_periodic_boundary_condition(crds, box_size)
return crds
def update_velocities(velocities, accelerations_start, accelerations_end, time_step):
"""Update velocities using: v(t+dt)=v(t)+0.5*dt*(a(t)+a(t+dt))"""
velocities += 0.5 * time_step * (accelerations_start + accelerations_end)
return velocities
def write_output_files(time_step, num_atoms, names, crds, energies):
"""Writes coordinates in XYZ file type to 'coordinates.xyz'
Write kinetic, potential and total energies to 'energies.dat'"""
# Write XYZ file
xyz_file.write("{0} \n\n".format(num_atoms))
for i, crd in enumerate(crds):
xyz = crd * bohr
xyz_file.write("{0} {1:.6f} {2:.6f} {3:.6f} \n".format(names[i], xyz[0], xyz[1], xyz[2]))
# Write energies
energy = energies * hartree * avo * 1e-3
energy_file.write("{0} {1} {2} {3} \n".format(time_step, energy[0], energy[1], energy[2]))
def main():
"""Handle input/output and molecular dynamics velocity-verlet algorithm"""
# Display opening message
display_header()
# Read user parameters from input file
input_parameters = get_input_parameters()
# Unpack parameters
time_total = input_parameters["time_total"]
time_step = input_parameters["time_step"]
box_size = input_parameters["box_size"]
write_freq = input_parameters["write_freq"]
num_atoms = input_parameters["num_atoms"]
displace_atoms = input_parameters["random_displacement"]
displacement_limit = input_parameters["random_displacement_limit"]
atom_names = input_parameters["atom_names"]
atom_masses = input_parameters["atom_masses"]
atom_crds = input_parameters["atom_crds"]
bond_pairs = input_parameters["bond_pairs"]
bond_params = input_parameters["bond_params"]
# Recentre coordinates and apply displacements
atom_crds = initialise_coordinates(atom_crds, box_size, displace_atoms, displacement_limit)
# Initialise Molecular Dynamics Variables
num_steps = int(time_total / time_step) # total number of steps of md
write_steps = int(write_freq / time_step) # number of steps to write out results
atom_vels = np.zeros_like(atom_crds) # velocities in x,y,z directions for all atoms
atom_acc_start = atom_acc_end = np.zeros_like(atom_crds) # accelerations at start and end of time step
atom_acc_start = update_accelerations(atom_masses, atom_crds, bond_pairs, bond_params, box_size) # calculate initial accelerations
system_energy = calculate_energy(atom_masses, atom_crds, atom_vels, bond_pairs, bond_params, box_size) # calculate initial energies
write_output_files(0, num_atoms, atom_names, atom_crds, system_energy)
# Molecular dynamics
print("Performing molecular dynamics simulation")
for step in range(1, num_steps+1):
# Velocity - Verlet algorithm
atom_crds = update_coordinates(atom_crds, atom_acc_start, atom_vels, time_step, box_size)
atom_acc_end = update_accelerations(atom_masses, atom_crds, bond_pairs, bond_params, box_size)
atom_vels = update_velocities(atom_vels, atom_acc_start, atom_acc_end, time_step)
atom_acc_start = atom_acc_end
# Write coordinates and energies
if step % write_steps == 0:
system_energy = calculate_energy(atom_masses, atom_crds, atom_vels, bond_pairs, bond_params, box_size)
write_output_files(step, num_atoms, atom_names, atom_crds, system_energy)
print("Completion: {:.3f}%".format(100 * float(step) / num_steps))
print_dashed_line()
print("Simulation complete \nCoordinates written to coordinates.xyz \nEnergies written to energies.dat")
print_dashed_line()
# Execute code if main file
if __name__ == "__main__":
main()
| 41.46 | 136 | 0.659752 | # -*- coding: utf-8 -*-
"""
Single Molecule Molecular Dynamics Code
Created 2018 by David of Theoretically Speaking
Please Modify!
"""
from __future__ import print_function
import os
import sys
import numpy as np
# Global variables for unit conversions
hartree = 4.35974465e-18 # J, atomic unit of energy
emass = 5.486e-4 # kg
dalton = 1.660539040e-27 # kg
avo = 6.02214086e23 # mol^-1
emass = 9.109534e-28 # g, atomic unit of mass
boltz = 1.38064852e-23 / hartree # E_h K^-1
bohr = 0.52917721067 # Angstroms
hbar = 6.626070040e-34 # Js
atomic_time = hbar / hartree
# Global files to prevent constant opening/closing
xyz_file = open("coordinates.xyz", "w")
energy_file = open("energies.dat", "w")
def display_header():
"""Write opening message to screen"""
print_dashed_line()
print("Welcome to the Theoretically Speaking molecular dynamics code")
print_dashed_line()
def print_dashed_line(length = 65):
"""Write --- line of given length to screen"""
line = "-" * length
print(line)
def string_to_boolean(string):
"""Converts input string of True or False to a boolean True or False"""
string = string.lower().strip()
true_strings = ["true", "t"]
false_strings = ["false", "f"]
if string in true_strings: return True
elif string in false_strings: return False
raise ValueError("Bad Boolean Value: " + string)
def get_input_parameters():
"""Ask user for input file name, read input parameters and store in dictionary"""
# Get list of available input files
input_files = get_recursive_file_list("inpt")
# Ask user to select input file from list
if len(input_files) == 0: # If cannot find any input files close program
print("No available input files. Exiting.")
sys.exit()
else:
while True:
print("Select an input file from the list:")
for i, file in enumerate(input_files):
print("[{0}] {1}".format(i, file))
try:
user_selection = int(input())
input_file = input_files[user_selection]
print("Input file selected: {0}".format(input_file))
print_dashed_line()
break
except: pass
# Open input file and read parameters into dictionary
parameters = {}
with open(input_file, "r") as file:
print("Reading input file")
# Skip header
for i in range(2): file.readline()
# Simulation parameters
try:
for i in range(2): file.readline()
parameters["time_total"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
parameters["time_step"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
parameters["box_size"] = float(file.readline().split()[0]) / bohr
parameters["write_freq"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
print(" - Simulation parameters read")
except:
print("Error in simulation parameters")
sys.exit()
# Atom data
try:
for i in range(2): file.readline()
num_atoms = parameters["num_atoms"] = int(file.readline().split()[0])
parameters["random_displacement"] = string_to_boolean(file.readline().split()[0])
parameters["random_displacement_limit"] = float(file.readline().split()[0]) / bohr
file.readline() # skip comment
name_to_index = {} # dictionary to convert atom name to array index
parameters["atom_names"] = [] # empty list for names
parameters["atom_masses"] = np.empty(num_atoms) # empty array for masses
parameters["atom_crds"] = np.empty([num_atoms, 3]) # empty array for coordinates
for i in range(num_atoms):
line = file.readline().split()
name_to_index[line[0]] = i
parameters["atom_names"].append(line[0])
parameters["atom_masses"][i] = float(line[1]) / (avo * emass)
parameters["atom_crds"][i] = np.array(line[2:5], dtype = float) / bohr
print(" - Atom data read")
except:
print("Error in atom data")
sys.exit()
# Bond Data
try:
for i in range(2): file.readline()
num_bonds = parameters["num_bonds"] = int(file.readline().split()[0])
file.readline() # skip comment
parameters["bond_pairs"] = np.empty([num_bonds, 2], dtype=int) # empty array for indices of bonded atom pairs
parameters["bond_params"] = np.empty([num_bonds, 2]) # empty array for harmonic bond r0 and k
for i in range(num_bonds):
line = file.readline().split()
parameters["bond_pairs"][i, 0] = name_to_index[line[0]]
parameters["bond_pairs"][i, 1] = name_to_index[line[1]]
parameters["bond_params"][i, 0] = float(line[2]) / bohr
parameters["bond_params"][i, 1] = float(line[3]) * (bohr * 1e-10)**2 / hartree
print(" - Bond data read")
except:
print("Error in bond data")
sys.exit()
print("Read successful")
print_dashed_line()
return parameters
def get_recursive_file_list(ext):
"""Get list of files with specifed extension in current directory and all subdirectories"""
# Search over all files in all subdirectories, add to list if have required extension
files = []
for dirpath, dirname, filenames in os.walk("./"):
for filename in filenames:
if filename.endswith(ext):
filepath = os.path.join(dirpath,filename)
files.append(filepath)
return files
def apply_periodic_boundary_condition(crds, box_size):
"""Apply periodicity to keep atoms within simulation box"""
crds[crds < 0] += box_size
crds[crds > box_size] -= box_size
return crds
def minimum_image_displacement(crd_0, crd_1, box_size):
"""Find displacement between nearest periodic images of atom pair"""
displacement = crd_0 - crd_1
displacement[displacement < -box_size / 2] += box_size
displacement[displacement > box_size / 2] -= box_size
return displacement
def initialise_coordinates(crds, box_size, displace, limit):
"""Recentre atoms in simulation box, apply periodic boundary, apply random displacement"""
crds += box_size / 2
crds = apply_periodic_boundary_condition(crds, box_size)
if displace:
displacements = np.random.uniform(low = -limit, high = limit, size = crds.shape)
crds += displacements
return crds
def calculate_energy(masses, crds, velocities, bond_pairs, bond_params, box_size):
"""Calculate kinetic, potential and total energy of system"""
kinetic_energy = 0.5 * (masses * np.sum(velocities ** 2, axis=1)).sum() # U=0.5*m*v^2
# Calculate harmonic potential energy using: U=0.5*k(r-r0)^2
for i, bond in enumerate(bond_pairs):
atom_0, atom_1 = bond[0], bond[1]
displacement = minimum_image_displacement(crds[atom_0, :], crds[atom_1, :], box_size)
distance = np.linalg.norm(displacement)
potential_energy = 0.5 * bond_params[i, 1] * (distance - bond_params[i, 0]) ** 2
total_energy = kinetic_energy + potential_energy # Total energy as sum of ke and pe
return np.array([kinetic_energy, potential_energy, total_energy])
def update_accelerations(masses, crds, bond_pairs, bond_params, box_size):
"""Calculate the acceleration on each atom using potential model and Newton's laws of motion"""
# Calculate forces using Hooke's law: F=-k(r-r0)
# Convert to acceleration using Newton's laws: F=ma, action has opposite reaction
accelerations = np.zeros_like(crds) # x,y,z accelerations for each atom
for i, bond in enumerate(bond_pairs):
atom_0, atom_1 = bond[0], bond[1]
displacement = minimum_image_displacement(crds[atom_0, :], crds[atom_1, :], box_size)
distance = np.linalg.norm(displacement)
force_direction = displacement / distance
force_magnitude = - bond_params[i, 1] * (distance - bond_params[i, 0])
force = force_magnitude * force_direction
accelerations[atom_0] += force / masses[atom_0]
accelerations[atom_1] -= force / masses[atom_1]
return accelerations
def update_coordinates(crds, accelerations, velocities, time_step, box_size):
"""Update coordinates using: x(t+dt)=x(t)+v(t)*dt+0.5*a(t)*dt**2"""
crds += velocities * time_step + 0.5 * accelerations * time_step ** 2
crds = apply_periodic_boundary_condition(crds, box_size)
return crds
def update_velocities(velocities, accelerations_start, accelerations_end, time_step):
"""Update velocities using: v(t+dt)=v(t)+0.5*dt*(a(t)+a(t+dt))"""
velocities += 0.5 * time_step * (accelerations_start + accelerations_end)
return velocities
def write_output_files(time_step, num_atoms, names, crds, energies):
"""Writes coordinates in XYZ file type to 'coordinates.xyz'
Write kinetic, potential and total energies to 'energies.dat'"""
# Write XYZ file
xyz_file.write("{0} \n\n".format(num_atoms))
for i, crd in enumerate(crds):
xyz = crd * bohr
xyz_file.write("{0} {1:.6f} {2:.6f} {3:.6f} \n".format(names[i], xyz[0], xyz[1], xyz[2]))
# Write energies
energy = energies * hartree * avo * 1e-3
energy_file.write("{0} {1} {2} {3} \n".format(time_step, energy[0], energy[1], energy[2]))
def main():
"""Handle input/output and molecular dynamics velocity-verlet algorithm"""
# Display opening message
display_header()
# Read user parameters from input file
input_parameters = get_input_parameters()
# Unpack parameters
time_total = input_parameters["time_total"]
time_step = input_parameters["time_step"]
box_size = input_parameters["box_size"]
write_freq = input_parameters["write_freq"]
num_atoms = input_parameters["num_atoms"]
displace_atoms = input_parameters["random_displacement"]
displacement_limit = input_parameters["random_displacement_limit"]
atom_names = input_parameters["atom_names"]
atom_masses = input_parameters["atom_masses"]
atom_crds = input_parameters["atom_crds"]
bond_pairs = input_parameters["bond_pairs"]
bond_params = input_parameters["bond_params"]
# Recentre coordinates and apply displacements
atom_crds = initialise_coordinates(atom_crds, box_size, displace_atoms, displacement_limit)
# Initialise Molecular Dynamics Variables
num_steps = int(time_total / time_step) # total number of steps of md
write_steps = int(write_freq / time_step) # number of steps to write out results
atom_vels = np.zeros_like(atom_crds) # velocities in x,y,z directions for all atoms
atom_acc_start = atom_acc_end = np.zeros_like(atom_crds) # accelerations at start and end of time step
atom_acc_start = update_accelerations(atom_masses, atom_crds, bond_pairs, bond_params, box_size) # calculate initial accelerations
system_energy = calculate_energy(atom_masses, atom_crds, atom_vels, bond_pairs, bond_params, box_size) # calculate initial energies
write_output_files(0, num_atoms, atom_names, atom_crds, system_energy)
# Molecular dynamics
print("Performing molecular dynamics simulation")
for step in range(1, num_steps+1):
# Velocity - Verlet algorithm
atom_crds = update_coordinates(atom_crds, atom_acc_start, atom_vels, time_step, box_size)
atom_acc_end = update_accelerations(atom_masses, atom_crds, bond_pairs, bond_params, box_size)
atom_vels = update_velocities(atom_vels, atom_acc_start, atom_acc_end, time_step)
atom_acc_start = atom_acc_end
# Write coordinates and energies
if step % write_steps == 0:
system_energy = calculate_energy(atom_masses, atom_crds, atom_vels, bond_pairs, bond_params, box_size)
write_output_files(step, num_atoms, atom_names, atom_crds, system_energy)
print("Completion: {:.3f}%".format(100 * float(step) / num_steps))
print_dashed_line()
print("Simulation complete \nCoordinates written to coordinates.xyz \nEnergies written to energies.dat")
print_dashed_line()
# Execute code if main file
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
8c6bf3b641e19bfc1b8c5b7ba9cc7a8c661082f1 | 385 | py | Python | arcade_solutions/the_core/minimal_number_of_coins.py | nickaigi/automatic-dollop | eb8222475c7871c1d5710242c5aed8c70ea0d2c8 | [
"Unlicense"
] | null | null | null | arcade_solutions/the_core/minimal_number_of_coins.py | nickaigi/automatic-dollop | eb8222475c7871c1d5710242c5aed8c70ea0d2c8 | [
"Unlicense"
] | null | null | null | arcade_solutions/the_core/minimal_number_of_coins.py | nickaigi/automatic-dollop | eb8222475c7871c1d5710242c5aed8c70ea0d2c8 | [
"Unlicense"
] | null | null | null |
if __name__ == '__main__':
coins = [1, 2, 10]
price = 28
print(minimal_number_of_coins(coins, price))
| 20.263158 | 48 | 0.532468 | def minimal_number_of_coins(coins, price):
bal = price
index = len(coins)
count = 0
while bal > 0:
if bal >= max(coins[:index]):
bal -= max(coins[:index])
count += 1
else:
index -= 1
return count
if __name__ == '__main__':
coins = [1, 2, 10]
price = 28
print(minimal_number_of_coins(coins, price))
| 246 | 0 | 22 |
5f1d561e0888224c7a27c412dd415c7268cd26e8 | 1,284 | py | Python | extensions/.stubs/clrclasses/System/Security/Principal/__init__.py | vicwjb/Pycad | 7391cd694b7a91ad9f9964ec95833c1081bc1f84 | [
"MIT"
] | 1 | 2020-03-25T03:27:24.000Z | 2020-03-25T03:27:24.000Z | extensions/.stubs/clrclasses/System/Security/Principal/__init__.py | vicwjb/Pycad | 7391cd694b7a91ad9f9964ec95833c1081bc1f84 | [
"MIT"
] | null | null | null | extensions/.stubs/clrclasses/System/Security/Principal/__init__.py | vicwjb/Pycad | 7391cd694b7a91ad9f9964ec95833c1081bc1f84 | [
"MIT"
] | null | null | null | from __clrclasses__.System.Security.Principal import GenericIdentity
from __clrclasses__.System.Security.Principal import GenericPrincipal
from __clrclasses__.System.Security.Principal import IdentityNotMappedException
from __clrclasses__.System.Security.Principal import IdentityReference
from __clrclasses__.System.Security.Principal import IdentityReferenceCollection
from __clrclasses__.System.Security.Principal import IIdentity
from __clrclasses__.System.Security.Principal import IPrincipal
from __clrclasses__.System.Security.Principal import NTAccount
from __clrclasses__.System.Security.Principal import PrincipalPolicy
from __clrclasses__.System.Security.Principal import SecurityIdentifier
from __clrclasses__.System.Security.Principal import TokenAccessLevels
from __clrclasses__.System.Security.Principal import TokenImpersonationLevel
from __clrclasses__.System.Security.Principal import WellKnownSidType
from __clrclasses__.System.Security.Principal import WindowsAccountType
from __clrclasses__.System.Security.Principal import WindowsBuiltInRole
from __clrclasses__.System.Security.Principal import WindowsIdentity
from __clrclasses__.System.Security.Principal import WindowsImpersonationContext
from __clrclasses__.System.Security.Principal import WindowsPrincipal
| 67.578947 | 80 | 0.901869 | from __clrclasses__.System.Security.Principal import GenericIdentity
from __clrclasses__.System.Security.Principal import GenericPrincipal
from __clrclasses__.System.Security.Principal import IdentityNotMappedException
from __clrclasses__.System.Security.Principal import IdentityReference
from __clrclasses__.System.Security.Principal import IdentityReferenceCollection
from __clrclasses__.System.Security.Principal import IIdentity
from __clrclasses__.System.Security.Principal import IPrincipal
from __clrclasses__.System.Security.Principal import NTAccount
from __clrclasses__.System.Security.Principal import PrincipalPolicy
from __clrclasses__.System.Security.Principal import SecurityIdentifier
from __clrclasses__.System.Security.Principal import TokenAccessLevels
from __clrclasses__.System.Security.Principal import TokenImpersonationLevel
from __clrclasses__.System.Security.Principal import WellKnownSidType
from __clrclasses__.System.Security.Principal import WindowsAccountType
from __clrclasses__.System.Security.Principal import WindowsBuiltInRole
from __clrclasses__.System.Security.Principal import WindowsIdentity
from __clrclasses__.System.Security.Principal import WindowsImpersonationContext
from __clrclasses__.System.Security.Principal import WindowsPrincipal
| 0 | 0 | 0 |
b833ad917e8e62666c705ef6a8024619ade36972 | 9,830 | py | Python | Python/Library/externalIndices.py | williamegomez/Clustering-Validation-Indices | dda99f115a34acaef513e7ac589f602eddf4c217 | [
"MIT"
] | 1 | 2019-10-11T11:28:47.000Z | 2019-10-11T11:28:47.000Z | Python/Library/externalIndices.py | williamegomez/Clustering-Validation-Indices | dda99f115a34acaef513e7ac589f602eddf4c217 | [
"MIT"
] | null | null | null | Python/Library/externalIndices.py | williamegomez/Clustering-Validation-Indices | dda99f115a34acaef513e7ac589f602eddf4c217 | [
"MIT"
] | 1 | 2019-10-11T11:32:42.000Z | 2019-10-11T11:32:42.000Z | import math
import numpy as np
from nltk.metrics.association import TOTAL
from sklearn import metrics
from matplotlib.mlab import entropy
| 41.476793 | 113 | 0.525025 | import math
import numpy as np
from nltk.metrics.association import TOTAL
from sklearn import metrics
from matplotlib.mlab import entropy
class ExternalIndices:
def __init__(self, true_labels, clust_labels):
self.true_labels = true_labels - 1
self.clust_labels = clust_labels - 1
self.N = len(self.true_labels)
self.n_clusters = np.max(clust_labels)
def Ext_Jaccard(self):
a = 0
b = 0
c = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
return a / (a + b + c)
def Ext_RandStatistic(self):
a = 0
d = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
d = d + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:] == False))
M = self.N * (self.N - 1) / 2
return (a + d) / M
def Ext_Folkes_Mallows(self):
a = 0
b = 0
c = 0
d = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
d = d + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:] == False))
return a / np.sqrt((a + b) * (a + c))
def Ext_F_Measure(self):
a = 0
b = 0
c = 0
d = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
d = d + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:] == False))
return 2 * a / (2 * a + b + c)
def Ext_Hubert_Gamma(self):
a = 0
b = 0
c = 0
d = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
M = self.N * (self.N - 1) / 2
return (M * a - (a + b) * (a + c)) / np.sqrt(((a + b) * M - (a + b) ** 2) * ((a + c) * M - (a + c) ** 2))
def Ext_Kulczynski(self):
a = 0
b = 0
c = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
return (1 / 2) * ((a / (a + c)) + (a / (a + b)))
def Ext_McNemar(self):
a = 0
b = 0
c = 0
d = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
d = d + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:] == False))
return (d - c) / np.sqrt(d + c)
def Ext_Phi_index(self):
a = 0
b = 0
c = 0
d = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
d = d + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:] == False))
return (1 / 2) * ((a / (a + c)) + (a / (a + b)))
def Ext_Rogers_Tanimoto(self):
a = 0
b = 0
c = 0
d = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
d = d + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:] == False))
return (a + d) / (a + d + 2 * b + 2 * c)
def Ext_Russel_Rao(self):
a = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
M = self.N * (self.N - 1) / 2
return a / M
def Ext_Sokal_Sneath(self):
a = 0
b = 0
c = 0
for j in range(len(self.true_labels)):
indtrue = self.true_labels[j] == self.true_labels
indclus = self.clust_labels[j] == self.clust_labels
a = a + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:])) # #pairs of j
b = b + np.sum(np.logical_and(indtrue[j + 1:], indclus[j + 1:] == False))
c = c + np.sum(np.logical_and(indtrue[j + 1:] == False, indclus[j + 1:]))
return a / (a + 2 * (b + c))
def Ext_Adjusted_rand_score(self):
return metrics.adjusted_rand_score(self.true_labels, self.clust_labels)
def Ext_Adjusted_Mutual_Information(self):
return metrics.adjusted_mutual_info_score(self.true_labels, self.clust_labels)
def Ext_Normalized_Mutual_Information(self):
return metrics.normalized_mutual_info_score(self.true_labels, self.clust_labels)
def Ext_Mutual_Information(self):
return metrics.mutual_info_score(self.true_labels, self.clust_labels)
def Ext_Homogeneity_Score(self):
return metrics.homogeneity_score(self.true_labels, self.clust_labels)
def Ext_Completeness_Score(self):
return metrics.completeness_score(self.true_labels, self.clust_labels)
def Ext_V_Measure_Score(self):
return metrics.v_measure_score(self.true_labels, self.clust_labels)
def Ext_Purity(self):
partition_purity = 0
for i in range(self.n_clusters):
elements = self.true_labels[self.clust_labels == i]
clus_purity = np.max(np.bincount(elements.astype(int)))
partition_purity = partition_purity + clus_purity
return partition_purity / self.N
def Ext_Conditional_Entropy_V_given_U(self):
partition_entropy = 0
for i in range(self.n_clusters):
elements = self.true_labels[self.clust_labels == i]
prob = np.bincount(elements.astype(int)) / len(elements)
prob = np.delete(prob, np.where(prob == 0))
clus_entropy = np.sum(prob * np.log2(prob))
partition_entropy = partition_entropy + clus_entropy * len(elements)
return partition_entropy / self.N
def Ext_Accuracy1(self):
clusmax = np.empty((0, 2))
p = 0
for i in np.unique(self.clust_labels).astype(int):
elements = self.true_labels[self.clust_labels == i]
clmax = np.max(np.bincount(elements.astype(int)))
clargm = np.argmax(np.bincount(elements.astype(int)))
if p > 0:
if np.sum(clusmax[:, 0] == clargm) == 0:
clusmax = np.concatenate((clusmax, np.array([[clargm, clmax]])), axis=0)
else:
if clusmax[clusmax[:, 0] == clargm, 1] < clmax:
clusmax[clusmax[:, 0] == clargm, :] = np.array([[clargm, clmax]])
else:
clusmax = np.concatenate((clusmax, np.array([[clargm, clmax]])), axis=0)
p += 1
return np.sum(clusmax[:, 1]) / len(self.clust_labels)
| 8,930 | 1 | 724 |
383ce389bab3cf0aa4ba4071205d243982ebb53c | 466 | py | Python | project/RealEstateMarketPlace/forms/RegisterForm.py | Mihaaai/RealEstateMarketplace | 9b9fa1376436801303e1ed0207ef09845a7d827e | [
"Apache-2.0"
] | null | null | null | project/RealEstateMarketPlace/forms/RegisterForm.py | Mihaaai/RealEstateMarketplace | 9b9fa1376436801303e1ed0207ef09845a7d827e | [
"Apache-2.0"
] | null | null | null | project/RealEstateMarketPlace/forms/RegisterForm.py | Mihaaai/RealEstateMarketplace | 9b9fa1376436801303e1ed0207ef09845a7d827e | [
"Apache-2.0"
] | null | null | null | from django import forms
from ..models import User
from django.contrib.auth.forms import UserCreationForm
| 27.411765 | 63 | 0.693133 | from django import forms
from ..models import User
from django.contrib.auth.forms import UserCreationForm
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = (
'email', 'first_name', 'last_name', 'phone_number',
)
email = forms.EmailField(required=True)
first_name = forms.CharField(required=True)
last_name = forms.CharField(required=True)
phone_number = forms.CharField(required=True)
| 0 | 336 | 23 |
da55e0f85c135dc0582e1cec208f874daa211773 | 129 | py | Python | pokemon/001bulbasaur.py | julio177/ascii-pokedex | 2727d3f3257abd746300248ae75e11cae2c40ea3 | [
"MIT"
] | null | null | null | pokemon/001bulbasaur.py | julio177/ascii-pokedex | 2727d3f3257abd746300248ae75e11cae2c40ea3 | [
"MIT"
] | null | null | null | pokemon/001bulbasaur.py | julio177/ascii-pokedex | 2727d3f3257abd746300248ae75e11cae2c40ea3 | [
"MIT"
] | null | null | null | '''Bulbasaur, Ivysaur and Venusaur'''
from __init__ import Pokemon
Bulbasaur = Pokemon('generation_1/001.txt')
print(Bulbasaur)
| 21.5 | 43 | 0.775194 | '''Bulbasaur, Ivysaur and Venusaur'''
from __init__ import Pokemon
Bulbasaur = Pokemon('generation_1/001.txt')
print(Bulbasaur)
| 0 | 0 | 0 |
eb09fc0a7afbff14bdc368e14c389265dedd069e | 138 | py | Python | tests/conftest.py | andreyfedoseev/django-media-definitions | a96c6d66cb4ea89e9521e419f9ecbea8b4ffe9af | [
"MIT"
] | 2 | 2017-05-15T07:59:00.000Z | 2017-07-29T08:58:26.000Z | tests/conftest.py | andreyfedoseev/django-media-definitions | a96c6d66cb4ea89e9521e419f9ecbea8b4ffe9af | [
"MIT"
] | null | null | null | tests/conftest.py | andreyfedoseev/django-media-definitions | a96c6d66cb4ea89e9521e419f9ecbea8b4ffe9af | [
"MIT"
] | 1 | 2018-02-16T05:02:12.000Z | 2018-02-16T05:02:12.000Z | import django
from django.conf import settings
| 17.25 | 45 | 0.746377 | import django
from django.conf import settings
def pytest_configure():
settings.configure(STATIC_URL="/static/")
django.setup()
| 67 | 0 | 23 |
4d5ea548eddd8bc8ca4e3717d9cb16eda6b1c591 | 951 | py | Python | leetcode/207.course-schedule.py | schio/algorithm_test | c240faca428a9adb2970591338d4792b2f4fb7f3 | [
"MIT"
] | null | null | null | leetcode/207.course-schedule.py | schio/algorithm_test | c240faca428a9adb2970591338d4792b2f4fb7f3 | [
"MIT"
] | null | null | null | leetcode/207.course-schedule.py | schio/algorithm_test | c240faca428a9adb2970591338d4792b2f4fb7f3 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=207 lang=python3
#
# [207] Course Schedule
#
# @lc code=start
# @lc code=end
| 19.8125 | 81 | 0.466877 | #
# @lc app=leetcode id=207 lang=python3
#
# [207] Course Schedule
#
# @lc code=start
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
graph = collections.defaultdict(list)
# make graph
for x, y in prerequisites:
graph[x].append(y)
traced = set()
visited = set()
def dfs(i):
# 순환 구조이면 False
if i in traced:
return False
# 이미 방문한 노드라면 True
if i in visited:
return True
traced.add(i)
for y in graph[i]:
if not dfs(y):
return False
# 탐색 종료, 순환 노드 삭제
traced.remove(i)
# 탐색 종료, 방문 노드 추가
visited.add(i)
return True
for x in list(graph):
if not dfs(x):
return False
return True
# @lc code=end
| 875 | -6 | 48 |
6a1b221b40ffc1a9f30f8e3c2ffc4fc69bcecad8 | 9,313 | py | Python | tests/test_security.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 199 | 2017-08-24T12:19:41.000Z | 2022-03-20T14:50:17.000Z | tests/test_security.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 275 | 2017-08-28T21:21:49.000Z | 2022-03-29T17:57:26.000Z | tests/test_security.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 73 | 2017-09-07T10:13:56.000Z | 2022-02-28T10:37:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for nipyapi security module."""
from __future__ import absolute_import
import pytest
from tests import conftest
import nipyapi
# Tells pytest to skip this module of security testing is not enabled.
pytestmark = pytest.mark.skipif(not conftest.test_security, reason='test_security disabled in Conftest')
# Useful for manual testing
# if conftest.test_security:
# test_host = nipyapi.config.default_host
# nipyapi.utils.set_endpoint('https://' + test_host + ':18443/nifi-registry-api', True, True)
# nipyapi.utils.set_endpoint('https://' + test_host + ':9443/nifi-api', True, True)
# TODO: Test adding users to existing set of users and ensuring no clobber
| 35.681992 | 104 | 0.719747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for nipyapi security module."""
from __future__ import absolute_import
import pytest
from tests import conftest
import nipyapi
# Tells pytest to skip this module of security testing is not enabled.
pytestmark = pytest.mark.skipif(not conftest.test_security, reason='test_security disabled in Conftest')
# Useful for manual testing
# if conftest.test_security:
# test_host = nipyapi.config.default_host
# nipyapi.utils.set_endpoint('https://' + test_host + ':18443/nifi-registry-api', True, True)
# nipyapi.utils.set_endpoint('https://' + test_host + ':9443/nifi-api', True, True)
def test_list_service_users():
# This test suite makes extensive use of this call in fixtures
pass
def test_get_service_user():
# This test suite makes extensive use of this call in fixtures
pass
def test_create_service_user():
with pytest.raises(AssertionError):
nipyapi.security.create_service_user(service='bob', identity='pie')
with pytest.raises(AssertionError):
nipyapi.security.create_service_user(service='nifi', identity=dict())
with pytest.raises(AssertionError):
nipyapi.security.create_service_user(service='nifi', identity='pie', strict=str())
r1 = nipyapi.security.create_service_user(conftest.test_basename)
assert isinstance(r1, nipyapi.nifi.UserEntity)
r2 = nipyapi.security.create_service_user(conftest.test_basename, 'registry')
assert isinstance(r2, nipyapi.registry.User)
with pytest.raises(ValueError):
nipyapi.security.create_service_user(conftest.test_basename, strict=True)
r3 = nipyapi.security.create_service_user(conftest.test_basename, strict=False)
assert isinstance(r3, nipyapi.nifi.UserEntity)
assert r3.component.identity == conftest.test_basename
def test_remove_service_user(fix_users):
n_user, r_user = fix_users()
r1 = nipyapi.security.remove_service_user(n_user)
assert nipyapi.security.get_service_user(n_user.component.identity) is None
assert isinstance(r1, nipyapi.nifi.UserEntity)
r2 = nipyapi.security.remove_service_user(r_user, 'registry')
assert nipyapi.security.get_service_user(r_user.identity, service='registry') is None
assert isinstance(r2, nipyapi.registry.User)
with pytest.raises(ValueError):
nipyapi.security.remove_service_user(n_user)
with pytest.raises(ValueError):
nipyapi.security.remove_service_user(r_user, 'registry')
r3 = nipyapi.security.remove_service_user(n_user, strict=False)
assert r3 is None
r4 = nipyapi.security.remove_service_user(r_user, 'registry', strict=False)
assert r4 is None
def test_create_service_user_group(fix_users, fix_user_groups):
# fix_user_groups provides the cleanup after testing
with pytest.raises(AssertionError):
nipyapi.security.create_service_user_group(identity=dict())
with pytest.raises(AssertionError):
nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='bob'
)
with pytest.raises(AssertionError):
nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='nifi',
users=['bob']
)
n_user, r_user = fix_users()
r1 = nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='nifi',
users=[n_user],
strict=True
)
assert isinstance(r1, nipyapi.nifi.UserGroupEntity)
r2 = nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='registry',
users=[r_user],
strict=True
)
assert isinstance(r2, nipyapi.registry.UserGroup)
with pytest.raises(ValueError):
nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='nifi',
users=[n_user],
strict=True
)
with pytest.raises(ValueError):
nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='registry',
users=[r_user],
strict=True
)
r3 = nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='nifi',
users=[n_user],
strict=False
)
assert isinstance(r3, nipyapi.nifi.UserGroupEntity)
r4 = nipyapi.security.create_service_user_group(
conftest.test_user_group_name,
service='registry',
users=[r_user],
strict=False
)
assert isinstance(r4, nipyapi.registry.UserGroup)
def test_list_service_user_groups(fix_user_groups):
n_group, r_group = fix_user_groups()
with pytest.raises(AssertionError):
nipyapi.security.list_service_user_groups(service='bob')
r1 = nipyapi.security.list_service_user_groups()
assert isinstance(r1[0], nipyapi.nifi.UserGroupEntity)
assert n_group.id in [x.id for x in r1]
r2 = nipyapi.security.list_service_user_groups('registry')
assert isinstance(r2[0], nipyapi.registry.UserGroup)
assert r_group.identifier in [x.identifier for x in r2]
def test_get_service_user_group(fix_user_groups):
n_group, r_group = fix_user_groups()
with pytest.raises(AssertionError):
nipyapi.security.get_service_user_group(identifier=dict())
with pytest.raises(AssertionError):
nipyapi.security.get_service_user_group(
identifier='bob',
identifier_type=dict())
with pytest.raises(AssertionError):
nipyapi.security.get_service_user_group(
identifier='bob',
identifier_type='id',
service='bob')
r1 = nipyapi.security.get_service_user_group(conftest.test_user_group_name)
assert isinstance(r1, nipyapi.nifi.UserGroupEntity)
assert r1.id == n_group.id
r2 = nipyapi.security.get_service_user_group(
identifier=conftest.test_user_group_name,
service='registry'
)
assert isinstance(r2, nipyapi.registry.UserGroup)
assert r2.identifier == r_group.identifier
def test_remove_service_user_group(fix_user_groups):
n_group, r_group = fix_user_groups()
r1 = nipyapi.security.remove_service_user_group(n_group)
assert nipyapi.security.get_service_user_group(n_group.component.identity) is None
assert isinstance(r1, nipyapi.nifi.UserGroupEntity)
r2 = nipyapi.security.remove_service_user_group(r_group, 'registry')
assert nipyapi.security.get_service_user_group(r_group.identity, service='registry') is None
assert isinstance(r2, nipyapi.registry.UserGroup)
with pytest.raises(ValueError):
nipyapi.security.remove_service_user_group(n_group)
with pytest.raises(ValueError):
nipyapi.security.remove_service_user_group(r_group, 'registry')
r3 = nipyapi.security.remove_service_user_group(n_group, strict=False)
assert r3 is None
r4 = nipyapi.security.remove_service_user_group(r_group, 'registry', strict=False)
assert r4 is None
def test_service_login():
with pytest.raises(AssertionError):
nipyapi.security.service_login(service='bob')
with pytest.raises(AssertionError):
nipyapi.security.service_login(username=dict())
with pytest.raises(AssertionError):
nipyapi.security.service_login(password=dict())
with pytest.raises(AssertionError):
nipyapi.security.service_login(bool_response='bob')
# This test suite makes extensive use of this call in fixtures
def test_set_service_auth_token():
# This test suite makes extensive use of this call in fixtures
pass
def test_service_logout():
# This test suite makes extensive use of this call in fixtures
pass
def test_get_service_access_status():
# This test suite makes extensive use of this call in fixtures
pass
def test_add_user_to_access_policy():
# ~ user = nipyapi.security.create_service_user(
# ~ identity='testuser',
# ~ service='nifi'
# ~ )
# ~ assert isinstance(user, nipyapi.nifi.UserEntity)
# ~ policy = nipyapi.security.add_user_to_access_policy(
# ~ user=user,
# ~ service='nifi'
# ~ )
# ~ assert isinstance(policy, nipyapi.nifi.AccessPolicyEntity)
pass
def test_add_user_group_to_access_policy():
# ~ user_group = nipyapi.security.create_service_user_group(
# ~ identity='testuser_group',
# ~ service='nifi'
# ~ )
# ~ assert isinstance(user_group, nipyapi.nifi.UserGroupEntity)
# ~ policy = nipyapi.security.add_user_group_to_access_policy(
# ~ user_group=user_group,
# ~ service='nifi'
# ~ )
# ~ assert isinstance(policy, nipyapi.nifi.AccessPolicyEntity)
pass
def test_update_access_policy():
pass
def test_get_access_policy_for_resource():
# This test suite makes extensive use of this call in fixtures
pass
def test_create_access_policy():
# This test suite makes extensive use of this call in fixtures
pass
def test_set_service_ssl_context():
# This test suite makes extensive use of this call in fixtures
pass
def test_bootstrap_security_policies():
# This test suite makes extensive use of this call in fixtures
pass
# TODO: Test adding users to existing set of users and ensuring no clobber
| 8,130 | 0 | 437 |
1a733cf776ecb3c6b1ac3142642e6e5092c63616 | 3,038 | py | Python | gettingstarted/urls.py | TomWerner/AlumniMentoring | d4bac09fc768232f0795a0672eb041a2225118ae | [
"MIT"
] | 2 | 2016-10-19T17:04:53.000Z | 2017-07-23T21:49:34.000Z | gettingstarted/urls.py | TomWerner/AlumniMentoring | d4bac09fc768232f0795a0672eb041a2225118ae | [
"MIT"
] | null | null | null | gettingstarted/urls.py | TomWerner/AlumniMentoring | d4bac09fc768232f0795a0672eb041a2225118ae | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm
from django.views.generic import CreateView
from django.views.generic import RedirectView
admin.autodiscover()
from django.conf import settings
from django.conf.urls.static import static
import django.contrib.auth.views
from mentoring.views import views
from mentoring.views import honors_admin
# Examples:
# url(r'^$', 'gettingstarted.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', views.home),
url(r'^admin/', admin.site.urls),
url(r'^(?i)honorsAdmin/$', honors_admin.home),
url(r'^(?i)honorsAdmin/mentors/$', honors_admin.mentors),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/view', honors_admin.mentor_detail),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/details', honors_admin.mentor_detail_page),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/approve', honors_admin.mentor_approve),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/deny', honors_admin.mentor_deny),
url(r'^(?i)honorsAdmin/mentees/$', honors_admin.mentees),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/view', honors_admin.mentee_detail),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/details', honors_admin.mentee_detail_page),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/approve', honors_admin.mentee_approve),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/deny', honors_admin.mentee_deny),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getmatches', honors_admin.mentee_get_matches),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getallmatches$', honors_admin.mentee_get_all_matches),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getallmatcheslist', honors_admin.mentee_get_all_matches_list),
url(r'^(?i)honorsAdmin/createPairing', honors_admin.create_pairing),
url(r'^(?i)honorsAdmin/resendPairing', honors_admin.resend_pairing_email),
url(r'^(?i)honorsAdmin/endPairing', honors_admin.end_pairing),
url(r'^(?i)honorsAdmin/feedbacks/([0-9]+)/view/', honors_admin.pairing_feedback),
url(r'^(?i)honorsAdmin/pairs/$', honors_admin.pairings),
url(r'^(?i)honorsAdmin/export/$', honors_admin.export),
url(r'^(?i)honorsAdmin/invite/$', honors_admin.invitations),
url(r'^(?i)honorsAdmin/send_invite/$', honors_admin.send_invite),
url(r'^(?i)honorsAdmin/preview_invite/$', honors_admin.preview_invite),
# Default django stuff
url(r'^(?i)accounts/logout/$', django.contrib.auth.views.logout),
url(r'^(?i)accounts/login/$', django.contrib.auth.views.login, {'template_name': 'admin/login.html'}),
url(r'^(?i)accounts/$', RedirectView.as_view(url='/')),
url(r'^(?i)thankyoumentor/', views.thank_you_mentor),
url(r'^(?i)thankyoumentee/', views.thank_you_mentee),
url(r'^(?i)newmentor/', views.new_mentor),
url(r'^(?i)newmentee/', views.new_mentee),
url(r'^(?i)confirmation/', views.confirm_account),
url(r'^(?i)feedback/', views.pairing_feedback),
] # + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 46.738462 | 106 | 0.705069 | from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm
from django.views.generic import CreateView
from django.views.generic import RedirectView
admin.autodiscover()
from django.conf import settings
from django.conf.urls.static import static
import django.contrib.auth.views
from mentoring.views import views
from mentoring.views import honors_admin
# Examples:
# url(r'^$', 'gettingstarted.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', views.home),
url(r'^admin/', admin.site.urls),
url(r'^(?i)honorsAdmin/$', honors_admin.home),
url(r'^(?i)honorsAdmin/mentors/$', honors_admin.mentors),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/view', honors_admin.mentor_detail),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/details', honors_admin.mentor_detail_page),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/approve', honors_admin.mentor_approve),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/deny', honors_admin.mentor_deny),
url(r'^(?i)honorsAdmin/mentees/$', honors_admin.mentees),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/view', honors_admin.mentee_detail),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/details', honors_admin.mentee_detail_page),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/approve', honors_admin.mentee_approve),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/deny', honors_admin.mentee_deny),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getmatches', honors_admin.mentee_get_matches),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getallmatches$', honors_admin.mentee_get_all_matches),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getallmatcheslist', honors_admin.mentee_get_all_matches_list),
url(r'^(?i)honorsAdmin/createPairing', honors_admin.create_pairing),
url(r'^(?i)honorsAdmin/resendPairing', honors_admin.resend_pairing_email),
url(r'^(?i)honorsAdmin/endPairing', honors_admin.end_pairing),
url(r'^(?i)honorsAdmin/feedbacks/([0-9]+)/view/', honors_admin.pairing_feedback),
url(r'^(?i)honorsAdmin/pairs/$', honors_admin.pairings),
url(r'^(?i)honorsAdmin/export/$', honors_admin.export),
url(r'^(?i)honorsAdmin/invite/$', honors_admin.invitations),
url(r'^(?i)honorsAdmin/send_invite/$', honors_admin.send_invite),
url(r'^(?i)honorsAdmin/preview_invite/$', honors_admin.preview_invite),
# Default django stuff
url(r'^(?i)accounts/logout/$', django.contrib.auth.views.logout),
url(r'^(?i)accounts/login/$', django.contrib.auth.views.login, {'template_name': 'admin/login.html'}),
url(r'^(?i)accounts/$', RedirectView.as_view(url='/')),
url(r'^(?i)thankyoumentor/', views.thank_you_mentor),
url(r'^(?i)thankyoumentee/', views.thank_you_mentee),
url(r'^(?i)newmentor/', views.new_mentor),
url(r'^(?i)newmentee/', views.new_mentee),
url(r'^(?i)confirmation/', views.confirm_account),
url(r'^(?i)feedback/', views.pairing_feedback),
] # + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 0 | 0 | 0 |
3afe37cef1db69cd3def4683c3a30bcaf890a308 | 3,207 | py | Python | test/integration/config_service_test.py | stelligent/potemkin-decorator | 2d30cf07a580f5aad67d7d595e3bcd622bc0e232 | [
"MIT"
] | 9 | 2020-03-25T02:20:54.000Z | 2021-12-29T08:09:17.000Z | test/integration/config_service_test.py | stelligent/potemkin-decorator | 2d30cf07a580f5aad67d7d595e3bcd622bc0e232 | [
"MIT"
] | 12 | 2020-03-24T17:42:45.000Z | 2020-05-08T21:46:59.000Z | test/integration/config_service_test.py | stelligent/potemkin-decorator | 2d30cf07a580f5aad67d7d595e3bcd622bc0e232 | [
"MIT"
] | 1 | 2020-08-25T13:47:30.000Z | 2020-08-25T13:47:30.000Z | import pytest
import potemkin
import boto3
from potemkin.configservice import evaluate_config_rule_and_wait_for_resource, config_rule_wait_for_resource, config_rule_wait_for_absent_resources, config_rule_wait_for_compliance_results
@potemkin.CloudFormationStack('test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack')
@pytest.mark.xfail(reason="deliberate fail")
@potemkin.CloudFormationStack('test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack')
@potemkin.CloudFormationStack(
'test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack'
)
@potemkin.CloudFormationStack(
'test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack'
)
| 34.858696 | 188 | 0.751169 | import pytest
import potemkin
import boto3
from potemkin.configservice import evaluate_config_rule_and_wait_for_resource, config_rule_wait_for_resource, config_rule_wait_for_absent_resources, config_rule_wait_for_compliance_results
@potemkin.CloudFormationStack('test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack')
def test_wait_for_compliance_results_success(stack_outputs, stack_name):
global expected_results_success
configservice = boto3.Session().client('config')
expected_results_success = {
stack_outputs['EIPOutput']: "NON_COMPLIANT",
stack_outputs['EIP2Output']: "NON_COMPLIANT",
"dummy": "NOT_APPLICABLE"
}
assert config_rule_wait_for_compliance_results(
configservice,
rule_name='eip-attached',
expected_results=expected_results_success)
@pytest.mark.xfail(reason="deliberate fail")
@potemkin.CloudFormationStack('test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack')
def test_wait_for_compliance_results_fail(stack_outputs, stack_name):
global expected_results_fail
configservice = boto3.Session().client('config')
expected_results_fail = {
stack_outputs['EIPOutput']: "NON_COMPLIANT",
stack_outputs['EIP2Output']: "COMPLIANT"
}
assert config_rule_wait_for_compliance_results(
configservice,
rule_name='eip-attached',
expected_results=expected_results_fail)
def test_wait_for_compliance_results_success_results_removed():
configservice = boto3.Session().client('config')
resource_ids = list(expected_results_success.keys())
assert [] == config_rule_wait_for_absent_resources(
configservice, rule_name='eip-attached', resource_ids=resource_ids)
def test_wait_for_compliance_results_fail_results_removed():
configservice = boto3.Session().client('config')
resource_ids = list(expected_results_fail.keys())
assert [] == config_rule_wait_for_absent_resources(
configservice, rule_name='eip-attached', resource_ids=resource_ids)
@potemkin.CloudFormationStack(
'test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack'
)
def test_config_rule_with_evaluate(stack_outputs, stack_name):
eipalloc = stack_outputs['EIPOutput']
configservice = boto3.Session().client('config')
actual_result = evaluate_config_rule_and_wait_for_resource(
configservice,
resource_id=eipalloc,
rule_name='eip-attached'
)
expected_compliance_type = 'NON_COMPLIANT'
assert actual_result['ComplianceType'] == expected_compliance_type
@potemkin.CloudFormationStack(
'test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack'
)
def test_config_rules_no_evaluate(stack_outputs, stack_name):
eipalloc = stack_outputs['EIPOutput']
configservice = boto3.Session().client('config')
actual_result = config_rule_wait_for_resource(
configservice,
resource_id=eipalloc,
rule_name='eip-attached'
)
expected_compliance_type = 'NON_COMPLIANT'
assert actual_result['ComplianceType'] == expected_compliance_type
| 2,287 | 0 | 134 |
d565975d5c6295b512c456ab9bae2b597eba5e6d | 260 | py | Python | partner_ngos/programs_management/doctype/programs/programs.py | AkramMutaher/partner_ngos | 4a345fb6989ff5a21db7fca07aa4e5174dca8f59 | [
"MIT"
] | 1 | 2021-06-03T17:14:08.000Z | 2021-06-03T17:14:08.000Z | partner_ngos/programs_management/doctype/programs/programs.py | AkramMutaher/partner_ngos | 4a345fb6989ff5a21db7fca07aa4e5174dca8f59 | [
"MIT"
] | null | null | null | partner_ngos/programs_management/doctype/programs/programs.py | AkramMutaher/partner_ngos | 4a345fb6989ff5a21db7fca07aa4e5174dca8f59 | [
"MIT"
] | 1 | 2021-10-09T16:20:09.000Z | 2021-10-09T16:20:09.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
| 23.636364 | 52 | 0.773077 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Programs(Document):
pass
| 0 | 10 | 23 |
37e220aa191b5c091355a0d9f206340516811513 | 1,035 | py | Python | minerals/context_processors.py | mhunterak/TD_Mineral_Catalog | dc76289274b83d44fd76dafb2e734843d11675a0 | [
"MIT"
] | null | null | null | minerals/context_processors.py | mhunterak/TD_Mineral_Catalog | dc76289274b83d44fd76dafb2e734843d11675a0 | [
"MIT"
] | null | null | null | minerals/context_processors.py | mhunterak/TD_Mineral_Catalog | dc76289274b83d44fd76dafb2e734843d11675a0 | [
"MIT"
] | null | null | null | '''
Context Processors do some pretty great work, like default arguments supplied
to templates when they're rendered. kind of like Macros in Flask, but even more
powerful.
'''
import string
from django.utils.datastructures import MultiValueDictKeyError
from .forms import SearchForm
from .static_vars import COLORS, GROUPS
def search_form(request):
'''renders the search form still uses a <form> wrapper to control action
Now pulls the query from the request data and presents it as the
initial field value
'''
try:
query = request.POST['search']
except MultiValueDictKeyError:
query = ""
return {
'SearchForm': SearchForm(initial={'search': query}),
}
def alphabet(request):
'''renders the capitol alphabet from A-Z'''
return {
'alphabet': string.ascii_uppercase,
}
def groups(request):
'''renders the mineral groups'''
return {'groups': GROUPS, }
def colors(request):
'''renders the available colors'''
return {'colors': COLORS, }
| 24.069767 | 79 | 0.687923 | '''
Context Processors do some pretty great work, like default arguments supplied
to templates when they're rendered. kind of like Macros in Flask, but even more
powerful.
'''
import string
from django.utils.datastructures import MultiValueDictKeyError
from .forms import SearchForm
from .static_vars import COLORS, GROUPS
def search_form(request):
'''renders the search form still uses a <form> wrapper to control action
Now pulls the query from the request data and presents it as the
initial field value
'''
try:
query = request.POST['search']
except MultiValueDictKeyError:
query = ""
return {
'SearchForm': SearchForm(initial={'search': query}),
}
def alphabet(request):
'''renders the capitol alphabet from A-Z'''
return {
'alphabet': string.ascii_uppercase,
}
def groups(request):
'''renders the mineral groups'''
return {'groups': GROUPS, }
def colors(request):
'''renders the available colors'''
return {'colors': COLORS, }
| 0 | 0 | 0 |
fa8f707b07aac17427883b74e5ed9efe4487c86a | 143 | py | Python | pushover/__init__.py | ccoder64/pushover-python | 6be770ecb7d269169718c02c14d9ba35fa0c8715 | [
"MIT"
] | null | null | null | pushover/__init__.py | ccoder64/pushover-python | 6be770ecb7d269169718c02c14d9ba35fa0c8715 | [
"MIT"
] | null | null | null | pushover/__init__.py | ccoder64/pushover-python | 6be770ecb7d269169718c02c14d9ba35fa0c8715 | [
"MIT"
] | null | null | null | """
pushover simple api
~~~~~~~~~~~~~~~~~~~
"""
__author__ = "toloy"
from .pushover import Pushover, PushoverException
| 13 | 50 | 0.524476 | """
pushover simple api
~~~~~~~~~~~~~~~~~~~
"""
__author__ = "toloy"
from .pushover import Pushover, PushoverException
| 0 | 0 | 0 |
a7f943ad32a943a1947fc358318e58687aeadc3a | 420 | py | Python | kubernetes_typed/client/models/v1beta1_certificate_signing_request_condition.py | sobolevn/kubernetes-typed | 5f0a770631c73a9831fbeaeebac188e8f4a52c54 | [
"Apache-2.0"
] | 22 | 2020-12-10T13:06:02.000Z | 2022-02-13T21:58:15.000Z | kubernetes_typed/client/models/v1beta1_certificate_signing_request_condition.py | sobolevn/kubernetes-typed | 5f0a770631c73a9831fbeaeebac188e8f4a52c54 | [
"Apache-2.0"
] | 4 | 2021-03-08T07:06:12.000Z | 2022-03-29T23:41:45.000Z | kubernetes_typed/client/models/v1beta1_certificate_signing_request_condition.py | sobolevn/kubernetes-typed | 5f0a770631c73a9831fbeaeebac188e8f4a52c54 | [
"Apache-2.0"
] | 2 | 2021-09-05T19:18:28.000Z | 2022-03-14T02:56:17.000Z | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1beta1CertificateSigningRequestConditionDict generated type."""
import datetime
from typing import TypedDict
V1beta1CertificateSigningRequestConditionDict = TypedDict(
"V1beta1CertificateSigningRequestConditionDict",
{
"lastUpdateTime": datetime.datetime,
"message": str,
"reason": str,
"type": str,
},
total=False,
)
| 26.25 | 67 | 0.709524 | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1beta1CertificateSigningRequestConditionDict generated type."""
import datetime
from typing import TypedDict
V1beta1CertificateSigningRequestConditionDict = TypedDict(
"V1beta1CertificateSigningRequestConditionDict",
{
"lastUpdateTime": datetime.datetime,
"message": str,
"reason": str,
"type": str,
},
total=False,
)
| 0 | 0 | 0 |
f0ac79bf020ed55f8efbaf8848de6863288c059a | 5,900 | py | Python | sdk/servicebus/azure-servicebus/tests/perf_tests/_test_base.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/servicebus/azure-servicebus/tests/perf_tests/_test_base.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/servicebus/azure-servicebus/tests/perf_tests/_test_base.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
from azure_devtools.perfstress_tests import PerfStressTest, get_random_bytes
from azure.servicebus import ServiceBusClient, ServiceBusReceiveMode, ServiceBusMessage
from azure.servicebus.aio import ServiceBusClient as AsyncServiceBusClient
from azure.servicebus.aio.management import ServiceBusAdministrationClient
MAX_QUEUE_SIZE = 40960
| 45.736434 | 181 | 0.685932 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
from azure_devtools.perfstress_tests import PerfStressTest, get_random_bytes
from azure.servicebus import ServiceBusClient, ServiceBusReceiveMode, ServiceBusMessage
from azure.servicebus.aio import ServiceBusClient as AsyncServiceBusClient
from azure.servicebus.aio.management import ServiceBusAdministrationClient
MAX_QUEUE_SIZE = 40960
class _ServiceTest(PerfStressTest):
service_client = None
async_service_client = None
def __init__(self, arguments):
super().__init__(arguments)
connection_string = self.get_from_env("AZURE_SERVICEBUS_CONNECTION_STRING")
if self.args.no_client_share:
self.service_client = ServiceBusClient.from_connection_string(connection_string)
self.async_service_client = AsyncServiceBusClient.from_connection_string(connection_string)
else:
if not _ServiceTest.service_client:
_ServiceTest.service_client = ServiceBusClient.from_connection_string(connection_string)
_ServiceTest.async_service_client = AsyncServiceBusClient.from_connection_string(connection_string)
self.service_client = _ServiceTest.service_client
self.async_service_client =_ServiceTest.async_service_client
async def close(self):
self.service_client.close()
await self.async_service_client.close()
await super().close()
@staticmethod
def add_arguments(parser):
super(_ServiceTest, _ServiceTest).add_arguments(parser)
parser.add_argument('--message-size', nargs='?', type=int, help='Size of a single message. Defaults to 100 bytes', default=100)
parser.add_argument('--no-client-share', action='store_true', help='Create one ServiceClient per test instance. Default is to share a single ServiceClient.', default=False)
parser.add_argument('--num-messages', nargs='?', type=int, help='Number of messages to send or receive. Defaults to 100', default=100)
class _QueueTest(_ServiceTest):
queue_name = "perfstress-" + str(uuid.uuid4())
def __init__(self, arguments):
super().__init__(arguments)
connection_string = self.get_from_env("AZURE_SERVICEBUS_CONNECTION_STRING")
self.async_mgmt_client = ServiceBusAdministrationClient.from_connection_string(connection_string)
async def global_setup(self):
await super().global_setup()
await self.async_mgmt_client.create_queue(self.queue_name, max_size_in_megabytes=MAX_QUEUE_SIZE)
async def global_cleanup(self):
await self.async_mgmt_client.delete_queue(self.queue_name)
await super().global_cleanup()
async def close(self):
await self.async_mgmt_client.close()
await super().close()
class _SendTest(_QueueTest):
def __init__(self, arguments):
super().__init__(arguments)
connection_string = self.get_from_env("AZURE_SERVICEBUS_CONNECTION_STRING")
self.async_mgmt_client = ServiceBusAdministrationClient.from_connection_string(connection_string)
self.sender = self.service_client.get_queue_sender(self.queue_name)
self.async_sender = self.async_service_client.get_queue_sender(self.queue_name)
async def close(self):
self.sender.close()
await self.async_sender.close()
await super().close()
class _ReceiveTest(_QueueTest):
def __init__(self, arguments):
super().__init__(arguments)
mode = ServiceBusReceiveMode.PEEK_LOCK if self.args.peeklock else ServiceBusReceiveMode.RECEIVE_AND_DELETE
self.receiver = self.service_client.get_queue_receiver(
queue_name=self.queue_name,
receive_mode=mode,
prefetch_count=self.args.num_messages,
max_wait_time=self.args.max_wait_time or None)
self.async_receiver = self.async_service_client.get_queue_receiver(
queue_name=self.queue_name,
receive_mode=mode,
prefetch_count=self.args.num_messages,
max_wait_time=self.args.max_wait_time or None)
async def _preload_queue(self):
data = get_random_bytes(self.args.message_size)
async with self.async_service_client.get_queue_sender(self.queue_name) as sender:
batch = await sender.create_message_batch()
for i in range(self.args.preload):
try:
batch.add_message(ServiceBusMessage(data))
except ValueError:
# Batch full
await sender.send_messages(batch)
print("Loaded {} messages".format(i))
batch = await sender.create_message_batch()
batch.add_message(ServiceBusMessage(data))
await sender.send_messages(batch)
async def global_setup(self):
await super().global_setup()
await self._preload_queue()
async def close(self):
self.receiver.close()
await self.async_receiver.close()
await super().close()
@staticmethod
def add_arguments(parser):
super(_ReceiveTest, _ReceiveTest).add_arguments(parser)
parser.add_argument('--peeklock', action='store_true', help='Receive using PeekLock mode and message settlement.', default=False)
parser.add_argument('--max-wait-time', nargs='?', type=int, help='Max time to wait for messages before closing. Defaults to 0.', default=0)
parser.add_argument('--preload', nargs='?', type=int, help='Number of messages to preload. Default is 10000.', default=10000)
| 4,534 | 514 | 149 |
9c6c38e7013c1655dd41c7d2f0d93f4df08fa931 | 1,652 | py | Python | mandel.py | SalahKouhen/Mandel_surf | 3c2c49085efa190213d782ceba05d40a1dd3e155 | [
"MIT"
] | null | null | null | mandel.py | SalahKouhen/Mandel_surf | 3c2c49085efa190213d782ceba05d40a1dd3e155 | [
"MIT"
] | null | null | null | mandel.py | SalahKouhen/Mandel_surf | 3c2c49085efa190213d782ceba05d40a1dd3e155 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
nx, ny = (1000,1000)
x = np.linspace(-2,1,nx)
y = np.linspace(-1.5,1.5,ny)
X, Y = np.meshgrid(x,y)
cgrid = X + 1j*Y
# For some numbers c doing z^2 + c again and again from 0 will diverge, not for others, plot it to get the mandelbrot set
Z = 0*cgrid
ZC = Z
for i in range(1,50):
Z = np.power(Z,2) + cgrid
ZC[Z>1000] = i
ZC = np.abs(ZC)
#fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
#surf = ax.plot_surface(X, Y, Z, linewidth=0, antialiased=False, cmap=cm.coolwarm)
mycount = [1]
# Get the mouse click
print(ZC)
fig,ax = plt.subplots()
plt.pcolormesh(X,Y,ZC)
fig.canvas.mpl_connect('button_press_event', onclick)
#fig.canvas.mpl_connect('button_press_event', lambda event: onclick(event, mycount))
'''
ax.set_xlim(-4.01, 4.01)
ax.set_ylim(-4.01, 4.01)
'''
plt.show()
'''
value = np.abs(grid)**(-1)
print(value)
value.flatten()
colour = np.stack((value,value,value))
print(colour)
fig = plt.figure()
ax = plt.axes(xlim=(-1,1),ylim=(-1,1))
ax.scatter(xv,yv,c=colour)
''' | 20.146341 | 121 | 0.62046 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
nx, ny = (1000,1000)
x = np.linspace(-2,1,nx)
y = np.linspace(-1.5,1.5,ny)
X, Y = np.meshgrid(x,y)
cgrid = X + 1j*Y
# For some numbers c doing z^2 + c again and again from 0 will diverge, not for others, plot it to get the mandelbrot set
Z = 0*cgrid
ZC = Z
for i in range(1,50):
Z = np.power(Z,2) + cgrid
ZC[Z>1000] = i
ZC = np.abs(ZC)
#fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
#surf = ax.plot_surface(X, Y, Z, linewidth=0, antialiased=False, cmap=cm.coolwarm)
mycount = [1]
# Get the mouse click
def onclick(event):
mycount[0] = mycount[0] + 1
plt.clf()
print(event.xdata, event.ydata)
nx, ny = (500,500)
scale = 3/(2**mycount[0])
x = np.linspace(event.xdata - scale,event.xdata + scale,nx)
y = np.linspace(event.ydata - scale,event.ydata + scale,ny)
X, Y = np.meshgrid(x,y)
cgrid = X + 1j*Y
Z = 0*cgrid
ZC = Z
for i in range(1,80 + mycount[0]*10):
Z = np.power(Z,2) + cgrid
ZC[Z>1000] = i
ZC = np.abs(ZC)
plt.pcolormesh(X,Y,ZC)
plt.pause(0.1)
print(mycount[0])
print(ZC)
fig,ax = plt.subplots()
plt.pcolormesh(X,Y,ZC)
fig.canvas.mpl_connect('button_press_event', onclick)
#fig.canvas.mpl_connect('button_press_event', lambda event: onclick(event, mycount))
'''
ax.set_xlim(-4.01, 4.01)
ax.set_ylim(-4.01, 4.01)
'''
plt.show()
'''
value = np.abs(grid)**(-1)
print(value)
value.flatten()
colour = np.stack((value,value,value))
print(colour)
fig = plt.figure()
ax = plt.axes(xlim=(-1,1),ylim=(-1,1))
ax.scatter(xv,yv,c=colour)
''' | 538 | 0 | 23 |
d2a28cf08377e6534f06c4530d85b60c48e5b7d9 | 43 | py | Python | examples/DecryptLoginExamples/crawlers/moocdl/__init__.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
] | null | null | null | examples/DecryptLoginExamples/crawlers/moocdl/__init__.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
] | null | null | null | examples/DecryptLoginExamples/crawlers/moocdl/__init__.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
] | null | null | null | '''initialize'''
from .moocdl import MOOCDL | 21.5 | 26 | 0.744186 | '''initialize'''
from .moocdl import MOOCDL | 0 | 0 | 0 |
83611bbfe1bc34d84dab3f4540c560a0d14a5ef5 | 231 | py | Python | chainerchem/links/__init__.py | corochann/chainerchem | 8e918e557fe9bce865d9d543ea2864d027827941 | [
"MIT"
] | 2 | 2018-03-05T18:23:47.000Z | 2018-04-12T05:00:40.000Z | chainerchem/links/__init__.py | corochann/chainerchem | 8e918e557fe9bce865d9d543ea2864d027827941 | [
"MIT"
] | null | null | null | chainerchem/links/__init__.py | corochann/chainerchem | 8e918e557fe9bce865d9d543ea2864d027827941 | [
"MIT"
] | null | null | null | from chainerchem.links import embed_atom_id # NOQA
from chainerchem.links import graph_linear # NOQA
from chainerchem.links.embed_atom_id import EmbedAtomID # NOQA
from chainerchem.links.graph_linear import GraphLinear # NOQA
| 38.5 | 63 | 0.831169 | from chainerchem.links import embed_atom_id # NOQA
from chainerchem.links import graph_linear # NOQA
from chainerchem.links.embed_atom_id import EmbedAtomID # NOQA
from chainerchem.links.graph_linear import GraphLinear # NOQA
| 0 | 0 | 0 |
eb6f8632414e8d89757d926482f8c1b445c62661 | 2,637 | py | Python | ichnaea/async/task.py | JaredKerim-Mozilla/ichnaea | cfaef2b903960374050be3ea2e4c1520687de56b | [
"Apache-1.1"
] | null | null | null | ichnaea/async/task.py | JaredKerim-Mozilla/ichnaea | cfaef2b903960374050be3ea2e4c1520687de56b | [
"Apache-1.1"
] | null | null | null | ichnaea/async/task.py | JaredKerim-Mozilla/ichnaea | cfaef2b903960374050be3ea2e4c1520687de56b | [
"Apache-1.1"
] | null | null | null | from celery import Task
from kombu.serialization import (
dumps as kombu_dumps,
loads as kombu_loads,
)
from ichnaea.cache import redis_pipeline
from ichnaea.db import db_worker_session
| 32.158537 | 78 | 0.633675 | from celery import Task
from kombu.serialization import (
dumps as kombu_dumps,
loads as kombu_loads,
)
from ichnaea.cache import redis_pipeline
from ichnaea.db import db_worker_session
class BaseTask(Task):
abstract = True
acks_late = False
ignore_result = True
max_retries = 3
_auto_retry = True
_shortname = None
@property
def shortname(self):
short = self._shortname
if short is None:
# strip off ichnaea prefix and tasks module
segments = self.name.split('.')
segments = [s for s in segments if s not in ('ichnaea', 'tasks')]
short = self._shortname = '.'.join(segments)
return short
def __call__(self, *args, **kw):
with self.stats_client.timer('task.' + self.shortname):
try:
result = super(BaseTask, self).__call__(*args, **kw)
except Exception as exc:
self.raven_client.captureException()
if self._auto_retry and not self.app.conf.CELERY_ALWAYS_EAGER:
raise self.retry(exc=exc) # pragma: no cover
raise
return result
def apply(self, *args, **kw):
# This method is only used when calling tasks directly and blocking
# on them. It's also used if always_eager is set, like in tests.
# Using this in real code should be rare, so the extra overhead of
# the check shouldn't matter.
if self.app.conf.CELERY_ALWAYS_EAGER:
# We do the extra check to make sure this was really used from
# inside tests
# We feed the task arguments through the de/serialization process
# to make sure the arguments can indeed be serialized.
serializer = self.app.conf.CELERY_TASK_SERIALIZER
content_type, encoding, data = kombu_dumps(args, serializer)
args = kombu_loads(data, content_type, encoding)
return super(BaseTask, self).apply(*args, **kw)
def redis_pipeline(self, execute=True):
# returns a context manager
return redis_pipeline(self.redis_client, execute=execute)
def db_session(self, commit=True):
# returns a context manager
return db_worker_session(self.app.db_rw, commit=commit)
@property
def geoip_db(self): # pragma: no cover
return self.app.geoip_db
@property
def raven_client(self):
return self.app.raven_client
@property
def redis_client(self):
return self.app.redis_client
@property
def stats_client(self):
return self.app.stats_client
| 1,972 | 446 | 23 |
fd072394fc780b200faf589c14c8b1f92d7586d1 | 1,494 | py | Python | cogs/utilities.py | PhilipMottershead/Dicebot | 8b282d3dd77be82c1f990c35385f11f3b8bd0371 | [
"MIT"
] | null | null | null | cogs/utilities.py | PhilipMottershead/Dicebot | 8b282d3dd77be82c1f990c35385f11f3b8bd0371 | [
"MIT"
] | null | null | null | cogs/utilities.py | PhilipMottershead/Dicebot | 8b282d3dd77be82c1f990c35385f11f3b8bd0371 | [
"MIT"
] | null | null | null | from discord.ext import commands
from discord.ext.commands import Context
from diceBot import roller
class Utilities(commands.Cog):
"""
General Utilities
"""
@commands.command()
async def ping(self, ctx: Context):
"""
Status check
"""
import time
start_time = time.time()
message = await ctx.send('pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms`')
end_time = time.time()
await message.edit(content='pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms` ' +
'`Response time: ' + str(int((end_time - start_time) * 1000)) + 'ms`')
@commands.command()
async def source(self, ctx: Context):
"""
Print a link to the source code
"""
await ctx.send(content='Created by Philip Mottershead'
'https://github.com/PhilipMottershead/Dicebot')
@commands.command()
async def feedback(self, ctx: Context):
"""
Report feedback or issues with the bot
"""
await ctx.send('If the bot is broken or you have any feedback you\'d like to submit please create a issue on '
'GitHub: https://github.com/PhilipMottershead/Dicebot')
@commands.command()
async def r(self, ctx: Context):
"""
Report feedback or issues with the bot
"""
await ctx.send(roller.rollDices(ctx.message.content))
| 33.954545 | 118 | 0.576305 | from discord.ext import commands
from discord.ext.commands import Context
from diceBot import roller
class Utilities(commands.Cog):
"""
General Utilities
"""
@commands.command()
async def ping(self, ctx: Context):
"""
Status check
"""
import time
start_time = time.time()
message = await ctx.send('pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms`')
end_time = time.time()
await message.edit(content='pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms` ' +
'`Response time: ' + str(int((end_time - start_time) * 1000)) + 'ms`')
@commands.command()
async def source(self, ctx: Context):
"""
Print a link to the source code
"""
await ctx.send(content='Created by Philip Mottershead'
'https://github.com/PhilipMottershead/Dicebot')
@commands.command()
async def feedback(self, ctx: Context):
"""
Report feedback or issues with the bot
"""
await ctx.send('If the bot is broken or you have any feedback you\'d like to submit please create a issue on '
'GitHub: https://github.com/PhilipMottershead/Dicebot')
@commands.command()
async def r(self, ctx: Context):
"""
Report feedback or issues with the bot
"""
await ctx.send(roller.rollDices(ctx.message.content))
| 0 | 0 | 0 |
cc103e6ed023370d51229ecddca11aaa38ef4a7e | 2,301 | py | Python | RSSReader.py | patel347/Yorazuya-Bot | 4ae77ca08c4f72ea84706b40ff551b5e2cb08dfb | [
"MIT"
] | null | null | null | RSSReader.py | patel347/Yorazuya-Bot | 4ae77ca08c4f72ea84706b40ff551b5e2cb08dfb | [
"MIT"
] | 1 | 2021-03-31T19:12:31.000Z | 2021-03-31T19:12:31.000Z | RSSReader.py | Kyutel/Yorazuya-Bot | 4ae77ca08c4f72ea84706b40ff551b5e2cb08dfb | [
"MIT"
] | null | null | null | import feedparser
import time
class RSSReader:
"""Class built upon feedparser to get new items from an rss feed"""
DATA_FILE = 'RSSData.txt'
DATA_FILE = 'RSSData.txt'
| 31.520548 | 92 | 0.614081 | import feedparser
import time
class RSSReader:
"""Class built upon feedparser to get new items from an rss feed"""
DATA_FILE = 'RSSData.txt'
def __init__(self, rssLink):
self.rssLink = rssLink
DATA_FILE = 'RSSData.txt'
def getDateOfLatestRead(self):
dateOfLatestRead = None
try:
dataFile = open(RSSReader.DATA_FILE,'r')
dateOfLatestRead = dataFile.readline()
dataFile.close()
except FileNotFoundError:
print("file not found, making file")
dateOfLatestRead = 'Sat, 05 Aug 2017 19:34:59 +0000' #date this file was created
dataFile = open(RSSReader.DATA_FILE,'w+')
dataFile.write(dateOfLatestRead)
dataFile.close()
dateOfLatestRead = time.strptime(dateOfLatestRead, "%a, %d %b %Y %H:%M:%S %z")
return dateOfLatestRead
def setDateOfLatestRead(self,dateToSet):
dataFile = open(RSSReader.DATA_FILE,'w+')
dataFile.write(dateToSet)
dataFile.close()
def getNewItems(self,latestDateRead):
feed = feedparser.parse(self.rssLink)
newLatestDateReadParsed = feed.entries[0].published_parsed
newLatestDateRead = feed.entries[0].published
newItems= []
for item in feed.entries:
#manual parsing because saved date has been manually parsed and has a -1
#the is_dst value casuing the comparisons to be incorrect.
item.parsedDate = time.strptime(item.published, "%a, %d %b %Y %H:%M:%S %z")
if latestDateRead < item.parsedDate:
newItems.insert(0,item)
if item.parsedDate> newLatestDateReadParsed:
newLatestDateRead = item.published
newLatestDateReadParsed = item.parsedDate
self.setDateOfLatestRead(newLatestDateRead)
return newItems
def printToConsole(self):
feed = feedparser.parse(RSS_LINK)
RSS_LINK = 'http://euw.leagueoflegends.com/en/rss.xml'
latestDateRead = getDateOfLatestRead()
newItems = getNewItems(latestDateRead)
if newItems != None:
for item in newItems:
print(item.title.encode('utf8'))
print('\n')
input()
| 1,983 | 0 | 135 |
bc91e141bb675209d3ac6e0bc451b35ae04e5206 | 150 | py | Python | module2.py | arajajyothibabu/PythonLearning | 53658ba3591e284733ef8a66551dadd515ab8edc | [
"MIT"
] | null | null | null | module2.py | arajajyothibabu/PythonLearning | 53658ba3591e284733ef8a66551dadd515ab8edc | [
"MIT"
] | null | null | null | module2.py | arajajyothibabu/PythonLearning | 53658ba3591e284733ef8a66551dadd515ab8edc | [
"MIT"
] | null | null | null | __author__ = 'Kalyan'
# this is a sample module for the understanding_modules assignment.
| 18.75 | 67 | 0.726667 | __author__ = 'Kalyan'
# this is a sample module for the understanding_modules assignment.
def greet(name):
return "module2 says hi to " + name
| 35 | 0 | 23 |
6e7925f4490b60b4578119586daf14599947939b | 176 | py | Python | src/blendvis/primitives/__init__.py | benjimaclellan/blendvis | f8e1b9a88d2d732a02b8c537f4b507a0b4a1684d | [
"MIT"
] | null | null | null | src/blendvis/primitives/__init__.py | benjimaclellan/blendvis | f8e1b9a88d2d732a02b8c537f4b507a0b4a1684d | [
"MIT"
] | null | null | null | src/blendvis/primitives/__init__.py | benjimaclellan/blendvis | f8e1b9a88d2d732a02b8c537f4b507a0b4a1684d | [
"MIT"
] | null | null | null | from blendvis.primitives.primitives import FontPrimitive, LinePrimitive, CubePrimitive, \
CameraPrimitive, SpherePrimitive, CurvePrimitive, GreasePencilPrimitive, Primitive | 88 | 89 | 0.857955 | from blendvis.primitives.primitives import FontPrimitive, LinePrimitive, CubePrimitive, \
CameraPrimitive, SpherePrimitive, CurvePrimitive, GreasePencilPrimitive, Primitive | 0 | 0 | 0 |
73f42c1536b7cbae9884bce03cfe3067637e0ad1 | 3,681 | py | Python | get_stock_data.py | jeremychonggg/Alpaca-Trading-Bot | 82df00e327e2e55f5a0cdf85cd950c49c59bf669 | [
"MIT"
] | null | null | null | get_stock_data.py | jeremychonggg/Alpaca-Trading-Bot | 82df00e327e2e55f5a0cdf85cd950c49c59bf669 | [
"MIT"
] | null | null | null | get_stock_data.py | jeremychonggg/Alpaca-Trading-Bot | 82df00e327e2e55f5a0cdf85cd950c49c59bf669 | [
"MIT"
] | null | null | null | import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/Indiana/Petersburg")
df_data_tickers[symbol] = df_data
return df_data_tickers
def get_historical_data(ticker_list, start_date, end_date=None, limit=10000, timeframe="1Day"):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
* timeframe - Timeframe for the aggregation. Available values are: `1Min`, `1Hour`, `1Day`
https://alpaca.markets/docs/api-documentation/api-v2/market-data/alpaca-data-api-v2/historical/#bars
"""
df_data_tickers = {}
for symbol in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start_date, "end": end_date, "limit": limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token": '', "symbol": symbol}
# r = requests.get(bar_url, headers=headers, params=params)
# r = r.json()
# data["bars"] += r["bars"]
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
except:
break
# Create a DataFrame for the data["bars"] of each stock
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
try:
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/New_York")
df_data_tickers[symbol] = df_data
except:
pass
print("---- Created for [{}]".format(symbol))
return df_data_tickers
| 39.159574 | 115 | 0.551481 | import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/Indiana/Petersburg")
df_data_tickers[symbol] = df_data
return df_data_tickers
def get_historical_data(ticker_list, start_date, end_date=None, limit=10000, timeframe="1Day"):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
* timeframe - Timeframe for the aggregation. Available values are: `1Min`, `1Hour`, `1Day`
https://alpaca.markets/docs/api-documentation/api-v2/market-data/alpaca-data-api-v2/historical/#bars
"""
df_data_tickers = {}
for symbol in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start_date, "end": end_date, "limit": limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token": '', "symbol": symbol}
# r = requests.get(bar_url, headers=headers, params=params)
# r = r.json()
# data["bars"] += r["bars"]
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
except:
break
# Create a DataFrame for the data["bars"] of each stock
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
try:
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/New_York")
df_data_tickers[symbol] = df_data
except:
pass
print("---- Created for [{}]".format(symbol))
return df_data_tickers
| 0 | 0 | 0 |
0d2ec34acd2b78a28677252958e616fde99ac3d1 | 597 | py | Python | stl_path.py | theodorsm/trex-scripts | 5d12e913c8c725f692d62f7458c1a49fb93d4c5b | [
"MIT"
] | 1 | 2022-02-07T22:02:44.000Z | 2022-02-07T22:02:44.000Z | stl_path.py | theodorsm/trex-scripts | 5d12e913c8c725f692d62f7458c1a49fb93d4c5b | [
"MIT"
] | null | null | null | stl_path.py | theodorsm/trex-scripts | 5d12e913c8c725f692d62f7458c1a49fb93d4c5b | [
"MIT"
] | 2 | 2022-02-07T22:02:45.000Z | 2022-03-11T23:10:33.000Z | import sys, os
from dotenv import dotenv_values
config = dotenv_values(".env")
cur_dir = os.path.dirname(__file__)
trex_path = f"{config['TREX_LOCATION']}/{config['TREX_VERSION']}"
interactive = os.path.abspath(f"{trex_path}/automation/trex_control_plane/interactive")
sys.path.insert(0, os.path.abspath(interactive))
STL_PROFILES_PATH = os.path.join(f"{trex_path}/stl")
EXT_LIBS_PATH = os.path.abspath(f"{trex_path}/external_libs")
assert os.path.isdir(STL_PROFILES_PATH), "Could not determine STL profiles path"
assert os.path.isdir(EXT_LIBS_PATH), "Could not determine external_libs path"
| 35.117647 | 87 | 0.782245 | import sys, os
from dotenv import dotenv_values
config = dotenv_values(".env")
cur_dir = os.path.dirname(__file__)
trex_path = f"{config['TREX_LOCATION']}/{config['TREX_VERSION']}"
interactive = os.path.abspath(f"{trex_path}/automation/trex_control_plane/interactive")
sys.path.insert(0, os.path.abspath(interactive))
STL_PROFILES_PATH = os.path.join(f"{trex_path}/stl")
EXT_LIBS_PATH = os.path.abspath(f"{trex_path}/external_libs")
assert os.path.isdir(STL_PROFILES_PATH), "Could not determine STL profiles path"
assert os.path.isdir(EXT_LIBS_PATH), "Could not determine external_libs path"
| 0 | 0 | 0 |
98da454bb8184e678d9da3b5c4db075b9b0f7815 | 3,237 | py | Python | Examples/PDFTool/DealPdf1_cmd.py | wxh0000mm/TKinterDesigner | 01878e78746082413a09444283edbd52118d15ef | [
"Apache-2.0"
] | 1 | 2022-03-09T08:43:41.000Z | 2022-03-09T08:43:41.000Z | Examples/PDFTool/DealPdf1_cmd.py | wxh0000mm/TKinterDesigner | 01878e78746082413a09444283edbd52118d15ef | [
"Apache-2.0"
] | null | null | null | Examples/PDFTool/DealPdf1_cmd.py | wxh0000mm/TKinterDesigner | 01878e78746082413a09444283edbd52118d15ef | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
import sys
import os
from os.path import abspath, dirname
sys.path.append(abspath(dirname(__file__)))
import tkinter
import tkinter.filedialog
from tkinter import *
import Fun
ElementBGArray={}
ElementBGArray_Resize={}
ElementBGArray_IM={}
from PyPDF2 import PdfFileReader, PdfFileWriter
| 34.073684 | 94 | 0.621563 | #coding=utf-8
import sys
import os
from os.path import abspath, dirname
sys.path.append(abspath(dirname(__file__)))
import tkinter
import tkinter.filedialog
from tkinter import *
import Fun
ElementBGArray={}
ElementBGArray_Resize={}
ElementBGArray_IM={}
from PyPDF2 import PdfFileReader, PdfFileWriter
def getRange(srcList,pageNo):
for item in sorted(srcList):
if(pageNo < int(item)):
return item
return "0"
def showMsg(uiName,msg):
listBox = Fun.GetElement(uiName,"ListBox_13")
listBox.insert(tkinter.END, msg)
def Form_1_onLoad(uiName):
Fun.SetText(uiName, "Entry_8","10,35,100")
def Button_3_onCommand(uiName,widgetName):
filePath= tkinter.filedialog.askopenfilename(initialdir=os.path.abspath('.'),title='选择文件')
Fun.SetText(uiName,"Entry_4",filePath)
input = PdfFileReader(open(filePath, "rb"))
pageCount = input.getNumPages()
Fun.SetText(uiName,"Entry_6",pageCount)
def Button_12_onCommand(uiName,widgetName):
openPath= tkinter.filedialog.askdirectory(initialdir=os.path.abspath('.'),title='打开目录查找')
# 文件信息
try:
filePath = Fun.GetText(uiName,"Entry_4")
input = PdfFileReader(open(filePath, "rb"))
except Exception as e:
Fun.MessageBox("文件异常,请检查!")
return
pageCount = input.getNumPages()
dirName = os.path.dirname(filePath)
# 分隔方式
content = Fun.GetText(uiName,"Entry_8")
if(len(content) <= 0):
Fun.MessageBox("数据格式不对,请重新输入")
return
strList = content.split(",")
#print(strList)
# 检查参数是否正常
try:
for i in strList:
if(len(i) <= 0):
Fun.MessageBox("数据格式不对,请重新输入")
return
pageNum = int(i)
if(pageNum >= pageCount):
Fun.MessageBox("要分割的页数不能超过总页数啊!")
return
except Exception as e:
print(e)
Fun.MessageBox("数据格式不对,请重新输入")
return
outPutDic = {}
for iPage in range(pageCount):
rang = getRange(strList,iPage)
if(rang == "0"):
rang = str(pageCount)
if(outPutDic.get(rang,-1) == -1):
outPutDic[rang] = {"fileName":rang+".pdf","outPut":PdfFileWriter()}
outPutDic[rang]['outPut'].addPage(input.getPage(iPage))
else:
if(outPutDic[rang] == None):
outPutDic[rang] = {"fileName": rang + ".pdf", "outPut": PdfFileWriter()}
outPutDic[rang]['outPut'].addPage(input.getPage(iPage))
else:
outPutDic[rang]['outPut'].addPage(input.getPage(iPage))
for item in outPutDic.values():
newFileName = os.path.join(dirName,item['fileName'])
outputStream = open(newFileName, "wb")
item['outPut'].write(outputStream)
outputStream.close()
msg = item['fileName'] + " has been created!"
showMsg(uiName,msg)
showMsg(uiName, "split pdf file over!")
'''
output = PdfFileWriter()
# 分别将page添加到输出output中
for iPage in range(int(strList[0])):
output.addPage(input.getPage(iPage))
newFileName = os.path.join(dirName,strList[0] + ".pdf")
outputStream = open(newFileName, "wb")
output.write(outputStream)
outputStream.close()
'''
| 3,006 | 0 | 110 |
536a6c5606d8009dfaf5fcd980a4c892a1731649 | 1,292 | py | Python | cart/tests.py | Zadigo/mycommerce | 145031ebb359389e680a820577a4b6b2d382646d | [
"MIT"
] | null | null | null | cart/tests.py | Zadigo/mycommerce | 145031ebb359389e680a820577a4b6b2d382646d | [
"MIT"
] | null | null | null | cart/tests.py | Zadigo/mycommerce | 145031ebb359389e680a820577a4b6b2d382646d | [
"MIT"
] | null | null | null | from django.test import Client
from django.test import RequestFactory, TestCase
from django.contrib.auth import get_user_model
from cart import views
| 35.888889 | 126 | 0.650155 | from django.test import Client
from django.test import RequestFactory, TestCase
from django.contrib.auth import get_user_model
from cart import views
def create_user():
USER_MODEL = get_user_model()
user = USER_MODEL.objects.create_user(
email='lucile@gmail.com',
password='touparette',
username='lucile'
)
return user
class TestCartApi(TestCase):
fixtures = ['carts.json']
# def test_cart_view(self):
# user = create_user()
# factory = RequestFactory()
# request = factory.post('api/v1/cart', data={'session_id': 'test_session'})
# response = views.cart_view(request)
# self.assertEqual(response.data['session_id'], 'test_session')
# self.assertEqual(len(response.data['results']), 1)
def test_add_to_cart_view(self):
factory = RequestFactory()
request = factory.post('api/v1/cart/add', data={'product': 1, 'default_size': 'Unique', 'session_id': 'test_session'})
response = views.cart_view(request)
def test_add_to_cart(self):
client = Client()
response = client.post('api/v1/cart/add', data={'product': 2, 'default_size': 'Unique', 'session_id': 'test_session'})
self.assertEqual(response.status_code, 200)
| 615 | 476 | 46 |
a6887c47659cb1c368114491263022c8dfb6eef1 | 1,677 | py | Python | iceworm/trees/nodes/func.py | wrmsr0/iceworm | 09431bb3cdc4f6796aafca41e37d42ebe0ddfeef | [
"BSD-3-Clause"
] | null | null | null | iceworm/trees/nodes/func.py | wrmsr0/iceworm | 09431bb3cdc4f6796aafca41e37d42ebe0ddfeef | [
"BSD-3-Clause"
] | 1 | 2021-01-19T14:29:19.000Z | 2021-01-19T14:34:27.000Z | iceworm/trees/nodes/func.py | wrmsr0/iceworm | 09431bb3cdc4f6796aafca41e37d42ebe0ddfeef | [
"BSD-3-Clause"
] | 1 | 2020-12-31T22:29:52.000Z | 2020-12-31T22:29:52.000Z | import enum
import typing as ta
from omnibus import collections as col
from omnibus import dataclasses as dc
from .base import Expr
from .base import Identifier
from .base import Node
from .base import QualifiedNameNode
from .base import SetQuantifier
from .base import SortItem
| 19.729412 | 70 | 0.714967 | import enum
import typing as ta
from omnibus import collections as col
from omnibus import dataclasses as dc
from .base import Expr
from .base import Identifier
from .base import Node
from .base import QualifiedNameNode
from .base import SetQuantifier
from .base import SortItem
class Precedence(enum.Enum):
PRECEDING = 'preceding'
FOLLOWING = 'following'
class FrameBound(Node, abstract=True):
pass
class NumFrameBound(FrameBound):
num: int
precedence: Precedence
class UnboundedFrameBound(FrameBound):
precedence: Precedence
class CurrentRowFrameBound(FrameBound):
pass
class Frame(Node, abstract=True):
pass
class RowsOrRange(enum.Enum):
ROWS = 'rows'
RANGE = 'range'
class SingleFrame(Frame):
rows_or_range: RowsOrRange
bound: FrameBound
class DoubleFrame(Frame):
rows_or_range: RowsOrRange
min: FrameBound
max: FrameBound
class Over(Node):
partition_by: ta.Sequence[Expr] = dc.field((), coerce=col.seq)
order_by: ta.Sequence[SortItem] = dc.field((), coerce=col.seq)
frame: ta.Optional[Frame] = None
class Kwarg(Node):
name: Identifier
value: Expr
class IgnoreOrRespect(enum.Enum):
IGNORE = 'ignore'
RESPECT = 'respect'
class FunctionCall(Node):
name: QualifiedNameNode
args: ta.Sequence[Expr] = dc.field((), coerce=col.seq)
kwargs: ta.Sequence[Kwarg] = dc.field((), coerce=col.seq)
set_quantifier: ta.Optional[SetQuantifier] = None
nulls: ta.Optional[IgnoreOrRespect] = None
within_group: ta.Sequence[SortItem] = dc.field((), coerce=col.seq)
over: ta.Optional[Over] = None
class FunctionCallExpr(Expr):
call: FunctionCall
| 0 | 1,060 | 322 |
5daa7c7b545e3d2dadc43b6d602c383c8384eb54 | 329 | py | Python | game/exceptions.py | ikacikac/mtrix | 9d65ce4f9fb08bf302f3322039eb882e8116890e | [
"MIT"
] | null | null | null | game/exceptions.py | ikacikac/mtrix | 9d65ce4f9fb08bf302f3322039eb882e8116890e | [
"MIT"
] | null | null | null | game/exceptions.py | ikacikac/mtrix | 9d65ce4f9fb08bf302f3322039eb882e8116890e | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
| 11.75 | 41 | 0.735562 | # -*- coding: utf8 -*-
class MovementException(Exception):
pass
class RightException(MovementException):
pass
class LeftException(MovementException):
pass
class RotateException(MovementException):
pass
class ColException(MovementException):
pass
class DownException(MovementException):
pass
| 0 | 160 | 138 |
94a825233662f1870149a97c9267cd1f71508949 | 651 | py | Python | 001_IntroCS/PS2/ps2c.py | PDmatrix/OSSU | dd482b6e4cdbdef5a8897c1b6ef135751681423a | [
"MIT"
] | null | null | null | 001_IntroCS/PS2/ps2c.py | PDmatrix/OSSU | dd482b6e4cdbdef5a8897c1b6ef135751681423a | [
"MIT"
] | null | null | null | 001_IntroCS/PS2/ps2c.py | PDmatrix/OSSU | dd482b6e4cdbdef5a8897c1b6ef135751681423a | [
"MIT"
] | null | null | null | balance = 999999
annualInterestRate = 0.18
monthlyInterestRate = annualInterestRate/12.0
monthlyLower = balance/12
monthlyUpper = (balance * (1+monthlyInterestRate)**12)/12.0
while True:
updatedBalance = balance
for i in range(12):
payment = (monthlyUpper + monthlyLower)/2.0
monthlyUnpaidBalance = updatedBalance - payment
updatedBalance = monthlyUnpaidBalance + monthlyInterestRate * monthlyUnpaidBalance
if updatedBalance < -0.01:
monthlyUpper = payment
elif updatedBalance > 0.01:
monthlyLower = payment
else:
break
print("Lowest payment: {:0.2f}".format(payment)) | 31 | 90 | 0.691244 | balance = 999999
annualInterestRate = 0.18
monthlyInterestRate = annualInterestRate/12.0
monthlyLower = balance/12
monthlyUpper = (balance * (1+monthlyInterestRate)**12)/12.0
while True:
updatedBalance = balance
for i in range(12):
payment = (monthlyUpper + monthlyLower)/2.0
monthlyUnpaidBalance = updatedBalance - payment
updatedBalance = monthlyUnpaidBalance + monthlyInterestRate * monthlyUnpaidBalance
if updatedBalance < -0.01:
monthlyUpper = payment
elif updatedBalance > 0.01:
monthlyLower = payment
else:
break
print("Lowest payment: {:0.2f}".format(payment)) | 0 | 0 | 0 |
b41050b80a9cc015d3baca79949feec94791e99c | 3,959 | py | Python | tests/integration/commands/deploy.py | wilzbach/cli | bac7edb42618f3aeecd81ec80d5bec144fa893c2 | [
"Apache-2.0"
] | null | null | null | tests/integration/commands/deploy.py | wilzbach/cli | bac7edb42618f3aeecd81ec80d5bec144fa893c2 | [
"Apache-2.0"
] | null | null | null | tests/integration/commands/deploy.py | wilzbach/cli | bac7edb42618f3aeecd81ec80d5bec144fa893c2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import time
from pytest import mark
@mark.parametrize('with_message', [True, False])
@mark.parametrize('hard_deployment', [True, False])
@mark.parametrize('final_release_state', [
'DEPLOYED', 'FAILED', 'UNKNOWN', 'TEMP_DEPLOYMENT_FAILURE'
])
@mark.parametrize('maintenance', [True, False])
@mark.parametrize('payload', [
None, {'stories': {'foo'}, 'services': ['bar', 'baz']}
])
| 37 | 79 | 0.621369 | # -*- coding: utf-8 -*-
import time
from pytest import mark
@mark.parametrize('with_message', [True, False])
@mark.parametrize('hard_deployment', [True, False])
@mark.parametrize('final_release_state', [
'DEPLOYED', 'FAILED', 'UNKNOWN', 'TEMP_DEPLOYMENT_FAILURE'
])
@mark.parametrize('maintenance', [True, False])
@mark.parametrize('payload', [
None, {'stories': {'foo'}, 'services': ['bar', 'baz']}
])
def test_deploy(runner, with_message, patch, hard_deployment,
final_release_state, maintenance, payload,
init_sample_app_in_cwd):
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
# Relative imports are used here since we need to trigger
# the cli init code in an isolated filesystem, inside an app dir.
# Weird things happen otherwise. Not the most efficient way, but works.
from story import api
from story.commands import test
from story.commands.deploy import deploy
patch.object(test, 'compile_app', return_value=payload)
patch.object(time, 'sleep')
patch.object(api.Config, 'get')
patch.object(api.Releases, 'create')
patch.object(api.Releases, 'get', side_effect=[
[{'state': 'QUEUED'}],
[{'state': 'DEPLOYING'}],
[{'state': final_release_state}],
])
patch.object(api.Apps, 'maintenance', return_value=maintenance)
args = []
if with_message:
message = 'hello world'
args.append('--message')
args.append(message)
else:
message = None
if hard_deployment:
args.append('--hard')
if payload is None:
result = runner.run(deploy, exit_code=1)
assert result.stdout == ''
return
else:
result = runner.run(deploy, exit_code=0, args=args)
if maintenance:
assert 'Your app is in maintenance mode.' in result.stdout
return
api.Config.get.assert_called_with('my_app')
api.Releases.create.assert_called_with(
api.Config.get(), payload, 'my_app', message, hard_deployment)
assert time.sleep.call_count == 3
if final_release_state == 'DEPLOYED':
assert 'Configured 1 story' in result.stdout
assert '- foo' in result.stdout
assert 'Deployed 2 services' in result.stdout
assert '- bar' in result.stdout
assert '- baz' in result.stdout
assert 'Created ingress route' in result.stdout
assert 'Configured logging' in result.stdout
assert 'Configured health checks' in result.stdout
assert 'Deployment successful!' in result.stdout
elif final_release_state == 'FAILED':
assert 'Deployment failed!' in result.stdout
assert 'story logs' in result.stdout
elif final_release_state == 'TEMP_DEPLOYMENT_FAILURE':
assert 'Deployment failed!' in result.stdout
assert 'status.storyscript.io' in result.stdout
else:
assert f'An unhandled state of your app has been encountered ' \
f'- {final_release_state}' in result.stdout
assert 'support@storyscript.io' in result.stdout
def test_deploy_no_stories(runner, patch, init_sample_app_in_cwd):
with runner.runner.isolated_filesystem():
with open('story.yml', 'w') as f:
f.write('app_name: my_app\n')
from story.commands import test
from story.commands.deploy import deploy
patch.object(test, 'compile_app', return_value={'stories': []})
result = runner.run(deploy, exit_code=1, args=[])
assert 'No stories were found for your app' in result.stdout
assert 'You can write an example story using:' in result.stdout
assert 'story template http > http.story' in result.stdout
| 3,499 | 0 | 45 |