hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
487b2742f256d49f88ac22c9264f7095601ac607 | 1,643 | py | Python | notes/SparkDifferentialJoin.py | ketanpurohit0/experimental | 02a7d5403037fe1fcc107ddf92e3b5a0748957d6 | [
"MIT"
] | 1 | 2020-08-25T04:16:07.000Z | 2020-08-25T04:16:07.000Z | notes/SparkDifferentialJoin.py | ketanpurohit0/experimental | 02a7d5403037fe1fcc107ddf92e3b5a0748957d6 | [
"MIT"
] | null | null | null | notes/SparkDifferentialJoin.py | ketanpurohit0/experimental | 02a7d5403037fe1fcc107ddf92e3b5a0748957d6 | [
"MIT"
] | null | null | null | import SparkHelper as sh
sparkSession = sh.getSpark()
sparkSession.sparkContext.setLogLevel("ERROR")
# URL
url = sh.getUrl('postgres','postgres','foobar_secret')
q1 = "SELECT * FROM foo_left"
q2 = "SELECT * FROM foo_right"
df1 = sh.getQueryDataFrame(sparkSession, url, q1)
df2 = sh.getQueryDataFrame(sparkSession, url, q2)
# Do an outer join
dfj = df1.join(df2, df1.name_left == df2.name_right, 'full_outer')
dfj.show()
# Now we have outer join where rows havent matched, but some have
# so extract the misses.
left_cols_only = [x for x in dfj.columns if 'left' in x]
df1miss = dfj.select(left_cols_only).filter("name_right is null")
right_cols_only = [x for x in dfj.columns if 'right' in x]
df2miss = dfj.select(right_cols_only).filter("name_left is null")
df1miss.show()
df2miss.show()
# We remove the misses from original frame (we only keep the good records
dfj = dfj.filter('name_left is not null and name_right is not null')
dfj.show()
# Now 'normalise' name on both sides of the misses
from pyspark.sql.functions import regexp_replace, col
df1miss = df1miss.withColumn('name_left', regexp_replace( col('name_left'), '(_[0-9]*_|_[0-9]*$)','@CX@'))
df2miss = df2miss.withColumn('name_right', regexp_replace( col('name_right'), '(_[0-9]*_|_[0-9]*$)','@CX@'))
df1miss.show()
df2miss.show()
# Attempt join again on the misses subset, this time with additional columns
# as the keys
dfj2 = df1miss.join(df2miss, [df1miss.name_left == df2miss.name_right, df1miss.uid_left == df2miss.uid_right], 'full_outer')
dfj2.show()
# Take a union
dfj3 = dfj.union(dfj2)
dfj3.show()
sparkSession.stop()
| 37.340909 | 125 | 0.715764 |
487b34c0ca841971125a8b74fe964b7cec0a6a76 | 13,227 | py | Python | py_proto/modules/drivers/camera/proto/camera_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
] | 2 | 2019-03-04T02:11:04.000Z | 2019-04-18T11:19:45.000Z | py_proto/modules/drivers/camera/proto/camera_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
] | 1 | 2019-03-15T08:37:53.000Z | 2019-03-15T08:37:53.000Z | py_proto/modules/drivers/camera/proto/camera_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
] | 1 | 2019-03-04T02:11:09.000Z | 2019-03-04T02:11:09.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/drivers/camera/proto/camera_conf.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/drivers/camera/proto/camera_conf.proto',
package='apollo.drivers.camera',
syntax='proto2',
serialized_pb=_b('\n.modules/drivers/camera/proto/camera_conf.proto\x12\x15\x61pollo.drivers.camera\"\xe3\x05\n\nCameraConf\x12\x10\n\x05index\x18\x01 \x02(\r:\x01\x30\x12\x18\n\x08position\x18\x02 \x01(\t:\x06\x63\x65ntre\x12\x14\n\ntopic_name\x18\x03 \x01(\t:\x00\x12!\n\x0cvideo_device\x18\x04 \x01(\t:\x0b/dev/video0\x12 \n\x0b\x63\x61mera_name\x18\x05 \x01(\t:\x0bhead_camera\x12\x19\n\x0f\x63\x61mera_info_url\x18\x06 \x01(\t:\x00\x12\x19\n\x0f\x63\x61mera_frame_id\x18\x07 \x01(\t:\x00\x12\x1c\n\x0e\x63\x61mera_timeout\x18\x08 \x01(\r:\x04\x31\x30\x30\x30\x12\x1c\n\rspin_interval\x18\t \x01(\x01:\x05\x30.005\x12\x17\n\tio_method\x18\n \x01(\t:\x04mmap\x12\x1b\n\x0cpixel_format\x18\x0b \x01(\t:\x05mjpeg\x12\x18\n\x0bimage_width\x18\x0c \x01(\r:\x03\x36\x34\x30\x12\x19\n\x0cimage_height\x18\r \x01(\r:\x03\x34\x38\x30\x12\x16\n\nframe_rate\x18\x0e \x01(\x04:\x02\x33\x30\x12\x16\n\nbrightness\x18\x0f \x01(\x05:\x02-1\x12\x14\n\x08\x63ontrast\x18\x10 \x01(\x05:\x02-1\x12\x16\n\nsaturation\x18\x11 \x01(\x05:\x02-1\x12\x15\n\tsharpness\x18\x12 \x01(\x05:\x02-1\x12\x10\n\x04gain\x18\x13 \x01(\x05:\x02-1\x12\x1a\n\x0c\x61utoexposure\x18\x14 \x01(\x08:\x04true\x12\x15\n\x08\x65xposure\x18\x15 \x01(\r:\x03\x31\x30\x30\x12\x18\n\tautofocus\x18\x16 \x01(\x08:\x05\x66\x61lse\x12\x11\n\x05\x66ocus\x18\x17 \x01(\x05:\x02-1\x12 \n\x12\x61uto_white_balance\x18\x18 \x01(\x08:\x04true\x12\x1b\n\rwhite_balance\x18\x19 \x01(\x05:\x04\x34\x30\x30\x30\x12\x1b\n\x10trigger_internal\x18\x1a \x01(\r:\x01\x30\x12\x17\n\x0btrigger_fps\x18\x1b \x01(\r:\x02\x33\x30\x12\x15\n\nerror_code\x18\x1c \x01(\r:\x01\x30')
)
_CAMERACONF = _descriptor.Descriptor(
name='CameraConf',
full_name='apollo.drivers.camera.CameraConf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='apollo.drivers.camera.CameraConf.index', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='apollo.drivers.camera.CameraConf.position', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("centre").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topic_name', full_name='apollo.drivers.camera.CameraConf.topic_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='video_device', full_name='apollo.drivers.camera.CameraConf.video_device', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("/dev/video0").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_name', full_name='apollo.drivers.camera.CameraConf.camera_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("head_camera").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_info_url', full_name='apollo.drivers.camera.CameraConf.camera_info_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_frame_id', full_name='apollo.drivers.camera.CameraConf.camera_frame_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_timeout', full_name='apollo.drivers.camera.CameraConf.camera_timeout', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spin_interval', full_name='apollo.drivers.camera.CameraConf.spin_interval', index=8,
number=9, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.005),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='io_method', full_name='apollo.drivers.camera.CameraConf.io_method', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("mmap").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pixel_format', full_name='apollo.drivers.camera.CameraConf.pixel_format', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("mjpeg").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_width', full_name='apollo.drivers.camera.CameraConf.image_width', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=640,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_height', full_name='apollo.drivers.camera.CameraConf.image_height', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=480,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='frame_rate', full_name='apollo.drivers.camera.CameraConf.frame_rate', index=13,
number=14, type=4, cpp_type=4, label=1,
has_default_value=True, default_value=30,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='brightness', full_name='apollo.drivers.camera.CameraConf.brightness', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='contrast', full_name='apollo.drivers.camera.CameraConf.contrast', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='saturation', full_name='apollo.drivers.camera.CameraConf.saturation', index=16,
number=17, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sharpness', full_name='apollo.drivers.camera.CameraConf.sharpness', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gain', full_name='apollo.drivers.camera.CameraConf.gain', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='autoexposure', full_name='apollo.drivers.camera.CameraConf.autoexposure', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exposure', full_name='apollo.drivers.camera.CameraConf.exposure', index=20,
number=21, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=100,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='autofocus', full_name='apollo.drivers.camera.CameraConf.autofocus', index=21,
number=22, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='focus', full_name='apollo.drivers.camera.CameraConf.focus', index=22,
number=23, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='auto_white_balance', full_name='apollo.drivers.camera.CameraConf.auto_white_balance', index=23,
number=24, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='white_balance', full_name='apollo.drivers.camera.CameraConf.white_balance', index=24,
number=25, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trigger_internal', full_name='apollo.drivers.camera.CameraConf.trigger_internal', index=25,
number=26, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trigger_fps', full_name='apollo.drivers.camera.CameraConf.trigger_fps', index=26,
number=27, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=30,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_code', full_name='apollo.drivers.camera.CameraConf.error_code', index=27,
number=28, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=813,
)
DESCRIPTOR.message_types_by_name['CameraConf'] = _CAMERACONF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CameraConf = _reflection.GeneratedProtocolMessageType('CameraConf', (_message.Message,), dict(
DESCRIPTOR = _CAMERACONF,
__module__ = 'modules.drivers.camera.proto.camera_conf_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.camera.CameraConf)
))
_sym_db.RegisterMessage(CameraConf)
# @@protoc_insertion_point(module_scope)
| 51.069498 | 1,613 | 0.730476 |
487c49f921ee4340fdfc140e8ff73bccf0d40cf6 | 3,273 | py | Python | test/broken_test_log.py | Brimizer/python-ant | 2b99693b4754156d401a0bd90e02357e8358c1f5 | [
"MIT"
] | null | null | null | test/broken_test_log.py | Brimizer/python-ant | 2b99693b4754156d401a0bd90e02357e8358c1f5 | [
"MIT"
] | null | null | null | test/broken_test_log.py | Brimizer/python-ant | 2b99693b4754156d401a0bd90e02357e8358c1f5 | [
"MIT"
] | 1 | 2019-01-11T22:22:06.000Z | 2019-01-11T22:22:06.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011, Martn Ral Villalba
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
LOG_LOCATION = '/tmp/python-ant.logtest.ant'
import unittest
from ant.core.log import *
| 33.397959 | 78 | 0.633364 |
487d11f426857e94a16bf2d204d4f9c9755beb01 | 3,181 | py | Python | python/GafferSceneUI/LightToCameraUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferSceneUI/LightToCameraUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferSceneUI/LightToCameraUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
import GafferSceneUI
Gaffer.Metadata.registerNode(
GafferScene.LightToCamera,
"description",
"""
Converts lights into cameras. Spotlights are converted to a perspective
camera with the field of view matching the cone angle, and distant lights are
converted to an orthographic camera.
""",
plugs = {
"filmFit" : filmFitMetadata(),
"distantAperture" : [
"description",
"""
The orthographic aperture used when converting distant lights
( which are theoretically infinite in extent )
""",
],
"clippingPlanes" : [
"description",
"""
Clipping planes for the created cameras. When creating a perspective camera, a near clip
<= 0 is invalid, and will be replaced with 0.01. Also, certain lights only start casting
light at some distance - if near clip is less than this, it will be increased.
""",
],
"filter" : [
"description",
"""
Specifies which lights to convert.
""",
],
}
)
| 33.840426 | 155 | 0.690663 |
487fc233ae954bd5e3c1f681b3650b436f4025ac | 2,678 | py | Python | eccProg.py | ganey/hm-gwmfr | c931113185a222d6c94c6a2679d39027de6f192f | [
"MIT"
] | 1 | 2021-06-18T02:57:19.000Z | 2021-06-18T02:57:19.000Z | eccProg.py | ganey/hm-gwmfr | c931113185a222d6c94c6a2679d39027de6f192f | [
"MIT"
] | 9 | 2021-03-04T10:41:52.000Z | 2021-10-18T15:14:37.000Z | eccProg.py | ganey/hm-gwmfr | c931113185a222d6c94c6a2679d39027de6f192f | [
"MIT"
] | 4 | 2021-02-24T10:03:31.000Z | 2021-07-09T17:33:59.000Z | #!/usr/bin/env python3
from time import sleep
import logging
import os
import subprocess
print("Nebra ECC Tool")
preTestFail = 0
afterTestFail = 0
ECC_SUCCESSFUL_TOUCH_FILEPATH = "/var/data/gwmfr_ecc_provisioned"
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
while preTestFail < 10:
preTest = subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "onboarding"], capture_output=True)
preTestResult = str(preTest.stdout.decode('ascii')).rstrip()
if "not responding to pings" not in preTestResult:
break
else:
print("Can't load provisioning tool, retrying")
preTestFail += 1
sleep(2)
if "ecc_response_exec_error" in preTestResult:
print("Provisioning")
while afterTestFail < 5:
subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "provision"])
print("Testing")
afterTest = subprocess.run(["/opt/gateway_mfr/bin/gateway_mfr", "ecc", "onboarding"], capture_output=True).stdout
afterTestResult = str(afterTest.decode('ascii')).rstrip()
print(afterTestResult)
if "ecc_response_exec_error" in afterTestResult:
print("\033[91mProgramming FAILED\033[0m")
print("Retrying provisioning")
afterTestFail += 1
sleep(2)
elif (len(afterTestResult) == 51 or len(afterTestResult) == 52):
print("\033[92mProgramming Success!\033[0m")
record_successful_provision()
break
else:
print("\033[91mAn Unknown Error Occured\033[0m")
print("Retrying provisioning")
afterTestFail += 1
sleep(2)
elif (len(preTestResult) == 50 or len(preTestResult) == 51 or len(preTestResult) == 52):
print("\033[93mKey Already Programmed\033[0m")
print(preTestResult)
record_successful_provision()
else:
print("An Unknown Error Occured")
print(preTestResult)
# This next bit of mank is so we can run the gwmfr container for longer
# by providing the OVERRIDE_GWMFR_EXIT environment variable for trouble
# shooting purposes.
if os.getenv('OVERRIDE_GWMFR_EXIT', None):
while(True):
print("GWMFR Utility Exit Overriden")
sleep(300)
| 33.898734 | 121 | 0.68596 |
4880ecda66e3c2c409be46833975599cd4502de6 | 641 | py | Python | doc/tutorial/getargs.py | OliverTED/doit | a6f75f312390aba352c3f00680cd32609323dbc2 | [
"MIT"
] | null | null | null | doc/tutorial/getargs.py | OliverTED/doit | a6f75f312390aba352c3f00680cd32609323dbc2 | [
"MIT"
] | 1 | 2018-10-02T19:28:08.000Z | 2018-10-02T19:28:08.000Z | doc/tutorial/getargs.py | smheidrich/doit | 1f9c3c755c96508ca2b1b2668f102f9d2da9c614 | [
"MIT"
] | null | null | null | DOIT_CONFIG = {'default_tasks': ['use_cmd', 'use_python']}
| 24.653846 | 58 | 0.452418 |
48823dc4e90bc1cf4bd23d8ad54e5dbd5c6870e3 | 4,022 | py | Python | examples/run_unlinkable.py | danesjenovdan/reference_implementation | 8374c45dd05a2e279bd5e53800aa237f64684a2b | [
"Apache-2.0"
] | 1 | 2020-11-09T21:07:00.000Z | 2020-11-09T21:07:00.000Z | examples/run_unlinkable.py | ac-ctrl/reference_implementation | e07f10447eaab9853e8f45a5d06e98741e265f5f | [
"Apache-2.0"
] | null | null | null | examples/run_unlinkable.py | ac-ctrl/reference_implementation | e07f10447eaab9853e8f45a5d06e98741e265f5f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
""" Simple example/demo of the unlinkable DP-3T design
This demo simulates some interactions between two phones,
represented by the contact tracing modules, and then runs
contact tracing.
"""
__copyright__ = """
Copyright 2020 EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
from datetime import timedelta
from dp3t.protocols.unlinkable import ContactTracer, TracingDataBatch
def report_broadcasted_ephids(name, app):
"""
Convenience function to report some broadcasted EphIDs
"""
reporting_time = app.start_of_today + timedelta(hours=10)
ephid = app.get_ephid_for_time(reporting_time)
print("At {}: {} broadcasts {}".format(reporting_time.time(), name, ephid.hex()))
def report_day(time):
"""
Convenience function to report start of the day
"""
print("---- {} ----".format(time))
def process_single_day(alice, bob, interaction_time=None):
"""
Convenience function, process and report on a single day
"""
report_day(alice.today)
report_broadcasted_ephids("Alice", alice)
report_broadcasted_ephids("Bob", bob)
if interaction_time:
print("Alice and Bob interact:")
ephid_bob = bob.get_ephid_for_time(interaction_time)
alice.add_observation(ephid_bob, interaction_time)
print(" Alice observes Bob's EphID {}".format(ephid_bob.hex()))
ephid_alice = alice.get_ephid_for_time(interaction_time)
bob.add_observation(ephid_alice, interaction_time)
print(" Bob observes Alice's EphID {}".format(ephid_alice.hex()))
else:
print("Alice and Bob do not interact")
# Advance to the next day
alice.next_day()
bob.next_day()
print("")
if __name__ == "__main__":
main()
| 30.014925 | 85 | 0.680507 |
4883b0040e8dc5ec47ef273298b6d359bf3bcacc | 2,409 | py | Python | EasyRecycle/tests/unittests/core/views/test_BecomeCommercialAPIView.py | YuriyLisovskiy/EasyRecycle | 49f1b84931145a3e95224e411d22ed7701e5bfe0 | [
"MIT"
] | null | null | null | EasyRecycle/tests/unittests/core/views/test_BecomeCommercialAPIView.py | YuriyLisovskiy/EasyRecycle | 49f1b84931145a3e95224e411d22ed7701e5bfe0 | [
"MIT"
] | null | null | null | EasyRecycle/tests/unittests/core/views/test_BecomeCommercialAPIView.py | YuriyLisovskiy/EasyRecycle | 49f1b84931145a3e95224e411d22ed7701e5bfe0 | [
"MIT"
] | null | null | null | from django.urls import reverse
from rest_framework import status
from rest_framework.test import force_authenticate
from rest_framework_simplejwt.state import User
from core.views import DeactivateSelfAPIView, BecomeCommercialAPIView
from tests.unittests.common import APIFactoryTestCase
| 42.263158 | 86 | 0.71565 |
48847182743877e2207fafdce5ef0b412ca3a606 | 55,929 | py | Python | colour/colorimetry/tests/test_spectrum.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
] | null | null | null | colour/colorimetry/tests/test_spectrum.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
] | null | null | null | colour/colorimetry/tests/test_spectrum.py | aurelienpierre/colour | 3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47 | [
"BSD-3-Clause"
] | null | null | null | """Defines the unit tests for the :mod:`colour.colorimetry.spectrum` module."""
import colour
import numpy as np
import unittest
import scipy
from distutils.version import LooseVersion
from colour.algebra import CubicSplineInterpolator
from colour.colorimetry.spectrum import SPECTRAL_SHAPE_DEFAULT
from colour.colorimetry.spectrum import (
SpectralShape,
SpectralDistribution,
MultiSpectralDistributions,
reshape_sd,
reshape_msds,
sds_and_msds_to_sds,
sds_and_msds_to_msds,
)
from colour.hints import Dict, Tuple
from colour.utilities import tstack
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"DATA_SAMPLE",
"DATA_SAMPLE_NON_UNIFORM",
"DATA_SAMPLE_INTERPOLATED",
"DATA_SAMPLE_INTERPOLATED_NON_UNIFORM",
"DATA_SAMPLE_NORMALISED",
"DATA_STANDARD_OBSERVER_2_DEGREE_CIE1931",
"DATA_CMFS",
"DATA_SAMPLE_ABRIDGED",
"DATA_MULTI_SAMPLE_ABRIDGED",
"TestSpectralShape",
"TestSpectralDistribution",
"TestMultiSpectralDistributions",
"TestReshapeSd",
"TestSdsAndMdsToSds",
"TestSdsAndMsdsToMsds",
]
DATA_SAMPLE: Dict = {
340: 0.0000,
360: 0.0000,
380: 0.0000,
400: 0.0641,
420: 0.0645,
440: 0.0562,
460: 0.0537,
480: 0.0559,
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
620: 0.1511,
640: 0.1688,
660: 0.1996,
680: 0.2397,
700: 0.2852,
720: 0.0000,
740: 0.0000,
760: 0.0000,
780: 0.0000,
800: 0.0000,
820: 0.0000,
}
DATA_SAMPLE_NON_UNIFORM: Dict = {
391.898: 16.331740,
392.069: 16.333122,
405.606: 40.197224,
406.794: 39.923366,
406.891: 39.924098,
407.026: 39.925138,
416.286: 40.064293,
418.690: 40.009950,
426.700: 18.045809,
426.726: 18.045986,
432.556: 38.435883,
464.742: 29.534647,
465.025: 29.534647,
465.147: 29.534647,
466.586: 38.226470,
477.175: 7.487795,
493.205: 7.684766,
505.217: 7.684766,
513.294: 20.701285,
513.328: 20.704211,
514.349: 20.704211,
514.516: 20.709788,
515.109: 20.709788,
538.034: 7.684766,
564.807: 20.704211,
566.247: 20.709788,
569.592: 32.103387,
580.133: 37.548490,
581.198: 37.548490,
582.642: 40.197224,
588.977: 18.045986,
589.159: 18.045809,
600.113: 8.643020,
600.603: 8.647157,
600.718: 8.640394,
601.068: 8.640394,
601.322: 8.647157,
601.484: 8.643020,
657.805: 14.448826,
658.288: 14.448826,
658.761: 8.537097,
674.438: 38.22647,
678.390: 20.709788,
703.725: 38.435883,
711.318: 8.647157,
711.519: 8.640394,
711.563: 22.532398,
711.699: 8.647157,
711.990: 22.536906,
723.132: 16.33174,
723.642: 16.333122,
761.265: 41.342187,
786.089: 8.850659,
805.862: 8.850659,
}
DATA_SAMPLE_INTERPOLATED: Tuple = (
0.000000000000000,
0.000230709627131,
0.000384144814593,
0.000507137093115,
0.000632114832536,
0.000778810112328,
0.000955965592105,
0.001163041382140,
0.001391921913876,
0.001628622810444,
0.001854997757177,
0.002050445372122,
0.002193616076555,
0.002264118965498,
0.002244228678230,
0.002120592268802,
0.001885936076555,
0.001540772596628,
0.001095107350478,
0.000570145756392,
0.000000000000000,
-0.000581325882457,
-0.001183945630981,
-0.001820338942229,
-0.002492278660287,
-0.003192248184715,
-0.003904858878589,
-0.004608267476544,
-0.005275593492823,
-0.005876336629317,
-0.006377794183612,
-0.006746478457031,
-0.006949534162679,
-0.006956155833489,
-0.006739005230263,
-0.006275628749720,
-0.005549874832536,
-0.004553311371393,
-0.003286643119019,
-0.001761129096236,
0.000000000000000,
0.001979832128906,
0.004221516875000,
0.006762744980469,
0.009617760000000,
0.012780285644531,
0.016226453125000,
0.019917728496094,
0.023803840000000,
0.027825705410156,
0.031918359375000,
0.036013880761719,
0.040044320000000,
0.043944626425781,
0.047655575625000,
0.051126696777344,
0.054319200000000,
0.057208903691406,
0.059789161875000,
0.062073791542969,
0.064100000000000,
0.065908998066406,
0.067477265625000,
0.068776060136719,
0.069791040000000,
0.070520520019531,
0.070973726875000,
0.071169054589844,
0.071132320000000,
0.070895018222656,
0.070492578125000,
0.069962617792969,
0.069343200000000,
0.068671087675781,
0.067979999375000,
0.067298864746094,
0.066650080000000,
0.066047763378906,
0.065496010625000,
0.064987150449219,
0.064500000000000,
0.064007805449219,
0.063510900625000,
0.063016138378906,
0.062528640000000,
0.062051989746094,
0.061588429375000,
0.061139052675781,
0.060704000000000,
0.060282652792969,
0.059873828125000,
0.059475973222656,
0.059087360000000,
0.058706279589844,
0.058331236875000,
0.057961145019531,
0.057595520000000,
0.057234675136719,
0.056879915625000,
0.056533733066406,
0.056200000000000,
0.055883811757812,
0.055589516250000,
0.055320009023437,
0.055076960000000,
0.054860961914063,
0.054671678750000,
0.054507994179687,
0.054368160000000,
0.054249944570312,
0.054150781250000,
0.054067916835938,
0.053998560000000,
0.053940029726562,
0.053889903750000,
0.053846166992187,
0.053807360000000,
0.053772727382813,
0.053742366250000,
0.053717374648438,
0.053700000000000,
0.053692483144531,
0.053693175625000,
0.053700270058594,
0.053712960000000,
0.053731311035156,
0.053756131875000,
0.053788845449219,
0.053831360000000,
0.053885940175781,
0.053955078125000,
0.054041364589844,
0.054147360000000,
0.054275465566406,
0.054427794375000,
0.054606042480469,
0.054811360000000,
0.055044222207031,
0.055304300625000,
0.055590334121094,
0.055900000000000,
0.056231407851562,
0.056587597500000,
0.056971279335937,
0.057383360000000,
0.057823168945313,
0.058288685000000,
0.058776762929688,
0.059283360000000,
0.059803762539063,
0.060332812500000,
0.060865134023438,
0.061395360000000,
0.061918358632813,
0.062429460000000,
0.062924682617188,
0.063400960000000,
0.063856367226563,
0.064290347500000,
0.064703938710938,
0.065100000000000,
0.065481647265625,
0.065846786250000,
0.066192982265625,
0.066519040000000,
0.066824853515625,
0.067111256250000,
0.067379871015625,
0.067632960000000,
0.067873274765625,
0.068103906250000,
0.068328134765625,
0.068549280000000,
0.068770551015625,
0.068994896250000,
0.069224853515625,
0.069462400000000,
0.069708802265625,
0.069964466250000,
0.070228787265625,
0.070500000000000,
0.070776270703125,
0.071059446250000,
0.071351597578125,
0.071653920000000,
0.071966845703125,
0.072290156250000,
0.072623095078125,
0.072964480000000,
0.073312815703125,
0.073666406250000,
0.074023467578125,
0.074382240000000,
0.074741100703125,
0.075098676250000,
0.075453955078125,
0.075806400000000,
0.076156060703125,
0.076503686250000,
0.076850837578125,
0.077200000000000,
0.077552701992188,
0.077904262500000,
0.078250762070313,
0.078590880000000,
0.078925561523438,
0.079257685000000,
0.079591729101562,
0.079933440000000,
0.080289498554688,
0.080667187500000,
0.081074058632813,
0.081517600000000,
0.082004903085938,
0.082542330000000,
0.083135180664063,
0.083787360000000,
0.084501045117188,
0.085276352500000,
0.086111005195313,
0.087000000000000,
0.087938453242188,
0.088930890000000,
0.089981833007812,
0.091092960000000,
0.092263452148438,
0.093490342500000,
0.094768864414062,
0.096092800000000,
0.097454828554687,
0.098846875000000,
0.100260458320312,
0.101687040000000,
0.103118372460937,
0.104546847500000,
0.105965844726563,
0.107370080000000,
0.108755953867188,
0.110121900000000,
0.111468733632812,
0.112800000000000,
0.114120657988281,
0.115431176875000,
0.116730532871094,
0.118017600000000,
0.119291174316406,
0.120549998125000,
0.121792784199219,
0.123018240000000,
0.124225091894531,
0.125412109375000,
0.126578129277344,
0.127722080000000,
0.128843005722656,
0.129940090625000,
0.131012683105469,
0.132060320000000,
0.133082750800781,
0.134079961875000,
0.135052200683594,
0.136000000000000,
0.136923531484375,
0.137820920000000,
0.138690739765625,
0.139532640000000,
0.140347216796875,
0.141135885000000,
0.141900750078125,
0.142644480000000,
0.143370177109375,
0.144081250000000,
0.144781285390625,
0.145473920000000,
0.146162712421875,
0.146851015000000,
0.147541845703125,
0.148237760000000,
0.148940722734375,
0.149651980000000,
0.150371931015625,
0.151100000000000,
0.151834687363281,
0.152574745625000,
0.153319862089844,
0.154070560000000,
0.154828088378906,
0.155594311875000,
0.156371600605469,
0.157162720000000,
0.157970720644531,
0.158798828125000,
0.159650332871094,
0.160528480000000,
0.161436359160156,
0.162376794375000,
0.163352233886719,
0.164364640000000,
0.165415378925781,
0.166505110625000,
0.167633678652344,
0.168800000000000,
0.170002988242187,
0.171244585000000,
0.172526722382812,
0.173850400000000,
0.175215795898437,
0.176622377500000,
0.178069012539063,
0.179554080000000,
0.181075581054688,
0.182631250000000,
0.184218665195313,
0.185835360000000,
0.187478933710938,
0.189147162500000,
0.190838110351563,
0.192550240000000,
0.194282523867188,
0.196034555000000,
0.197806658007813,
0.199600000000000,
0.201405046894531,
0.203174116875000,
0.204868198964844,
0.206468000000000,
0.207971350097656,
0.209390608125000,
0.210750067167969,
0.212083360000000,
0.213430864550781,
0.214837109375000,
0.216348179121094,
0.218009120000000,
0.219861345253906,
0.221940040625000,
0.224271569824219,
0.226870880000000,
0.229738907207031,
0.232859981875000,
0.236199234277344,
0.239700000000000,
0.243337282929688,
0.247262056250000,
0.251598942851562,
0.256394240000000,
0.261625952148438,
0.267213823750000,
0.273029372070313,
0.278905920000000,
0.284648628867188,
0.290044531250000,
0.294872563789062,
0.298913600000000,
0.301960483085938,
0.303828058750000,
0.304363208007813,
0.303454880000000,
0.301044124804688,
0.297134126250000,
0.291800234726562,
0.285200000000000,
0.277470396855469,
0.268408756875000,
0.257826504003906,
0.245651040000000,
0.231911267089844,
0.216723110625000,
0.200275041738281,
0.182813600000000,
0.164628916074219,
0.146040234375000,
0.127381435722656,
0.108986560000000,
0.091175328808594,
0.074238668125000,
0.058424230957031,
0.043921920000000,
0.030849410292969,
0.019237671875000,
0.009016492441406,
0.000000000000000,
-0.008014721386719,
-0.014901410625000,
-0.020510217441406,
-0.024767360000000,
-0.027665856933594,
-0.029256259375000,
-0.029637382988281,
-0.028947040000000,
-0.027352771230469,
-0.025042578125000,
-0.022215654785156,
-0.019073120000000,
-0.015808749277344,
-0.012599706875000,
-0.009597277832031,
-0.006917600000000,
-0.004632396074219,
-0.002759705625000,
-0.001254617128906,
0.000000000000000,
0.001146203203125,
0.002165737500000,
0.003010363984375,
0.003650560000000,
0.004073291015625,
0.004279782500000,
0.004283291796875,
0.004106880000000,
0.003781183828125,
0.003342187500000,
0.002828994609375,
0.002281600000000,
0.001738661640625,
0.001235272500000,
0.000800732421875,
0.000456320000000,
0.000213064453125,
0.000069517500000,
0.000009525234375,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
)
DATA_SAMPLE_INTERPOLATED_NON_UNIFORM: Tuple = (
16.329808636577400,
16.722487609243078,
17.780769796558388,
19.388440919210822,
21.429286697887836,
23.787092853276910,
26.345645106065515,
28.988729176941121,
31.600130786591194,
34.063635655703216,
36.263029504964656,
38.082098055062993,
39.404627026685688,
40.114402140520198,
40.111247588026082,
39.925103635141149,
39.890167058876870,
39.799323872976096,
39.680556569501256,
39.561845869769570,
39.471172495098209,
39.436517166804364,
39.485860606205222,
39.647183534617994,
39.948466673359860,
40.374181475729280,
40.480943048717741,
39.587763146544347,
37.399010971904012,
34.259517350145224,
30.574008069142096,
26.747208916768866,
23.183845680899680,
20.288644149408725,
18.466330110170176,
18.131455040802454,
19.618408431271092,
22.601624593221480,
26.621530082560952,
31.218551455196845,
35.933115267036499,
40.312773209586069,
44.121677838324345,
47.374686358105983,
50.100647613008405,
52.328410447109043,
54.086823704485290,
55.404736229214613,
56.310996865374428,
56.834454457042114,
57.003957848295137,
56.848355883210893,
56.396497405866839,
55.677231260340363,
54.719406290708918,
53.551871341049903,
52.203475255440743,
50.703066877958868,
49.079495052681693,
47.361608623686649,
45.578256435051173,
43.758287330852653,
41.930550155168532,
40.123893752076235,
38.367166965653183,
36.689218639976801,
35.118897619124496,
33.685052747173714,
32.416532868201863,
31.342186826286373,
30.490863465504646,
29.891411629934137,
29.572680163652254,
29.538621481156376,
33.594913456316092,
41.048330705400552,
45.551744552213606,
47.142069193311166,
46.264235269850914,
43.363173422990592,
38.883814293887831,
33.271088523700378,
26.969926753585888,
20.425259624702068,
14.082017778206605,
8.385131855257200,
3.728429425651282,
0.156961054721180,
-2.429909888236355,
-4.133307786198728,
-5.054357022143353,
-5.294181979047651,
-4.953907039889023,
-4.134656587644884,
-2.937555005292646,
-1.463726675809728,
0.185704017826468,
1.909612692638526,
3.606874965649032,
5.176366453880572,
6.516962774355742,
7.527539544097118,
8.122379803803623,
8.331483144611147,
8.237444938631239,
7.923124732066758,
7.471382071120567,
6.965076501995521,
6.487067570894484,
6.120214824020312,
5.947377807575871,
6.051416067764015,
6.515189150787608,
7.421556602849507,
8.833103319101975,
10.654113909347958,
12.714472051171430,
14.843629856075644,
16.871039435563851,
18.626152901139314,
19.938422364305275,
20.637299936564997,
20.706835013628812,
20.713132276905259,
20.620587314150111,
20.396819412610807,
20.054405097058748,
19.605938071902443,
19.064012041550384,
18.441220710411045,
17.750157782892924,
17.003416963404511,
16.213591956354293,
15.393276466150766,
14.555064197202412,
13.711548853917725,
12.875324140705192,
12.058983761973309,
11.275121422130562,
10.536330825585436,
9.855205676746426,
9.244339680022023,
8.716326539820709,
8.283759960550988,
7.959233646621334,
7.755341302440248,
7.684676632416213,
7.756084346822568,
7.961801724157814,
8.289468950531905,
8.726726048591537,
9.261213040983417,
9.880569950354237,
10.572436799350699,
11.324453610619505,
12.124260406807357,
12.959497210560945,
13.817804044526978,
14.686820931352152,
15.554187893683173,
16.407544954166731,
17.234532135449530,
18.022789460178267,
18.759956950999651,
19.433674630560375,
20.031582521507140,
20.541320646486646,
20.950529028145592,
21.246847689130675,
21.417916652088600,
21.451375939666065,
21.334865574509770,
21.056025579266414,
20.604750276779001,
20.494345115463702,
22.221705466331883,
25.661450384670388,
29.775598875658655,
33.531925166546948,
36.383260993760757,
38.399428506854193,
39.690190939354423,
40.365311524788659,
40.534553496684069,
40.307680088567857,
39.794454533967205,
39.104640066409281,
38.347999919421312,
37.634297326530451,
37.374685121273394,
39.119690201154867,
40.253206513156250,
38.299118522290833,
34.130306343480264,
28.883500017392159,
23.695429584694160,
19.702825086053920,
18.042370406596557,
17.942146149526579,
17.437952042997388,
16.603615436175311,
15.527509997088327,
14.298009393764401,
13.003487294231499,
11.732317366517600,
10.572873278650665,
9.613528698658669,
8.942657294569587,
8.648632734411382,
8.638275005914387,
8.608461770725761,
8.468358277399256,
8.236485894689265,
7.918459394063972,
7.519893546991550,
7.046403124940173,
6.503602899378026,
5.897107641773280,
5.232532123594114,
4.515491116308703,
3.751599391385226,
2.946471720291860,
2.105722874496783,
1.234967625468170,
0.339820744674198,
-0.574102996416954,
-1.501188826337108,
-2.435821973618090,
-3.372387666791723,
-4.305271134389827,
-5.228857604944231,
-6.137532306986749,
-7.025680469049206,
-7.887687319663434,
-8.717938087361247,
-9.510818000674471,
-10.260712288134929,
-10.962006178274445,
-11.609084899624833,
-12.196333680717927,
-12.718137750085543,
-13.168882336259514,
-13.542952667771653,
-13.834733973153785,
-14.038611480937735,
-14.148970419655322,
-14.160196017838375,
-14.066673504018716,
-13.862788106728164,
-13.542925054498539,
-13.101469575861673,
-12.532806899349385,
-11.831322253493493,
-10.991400866825831,
-10.007427967878208,
-8.873788785182459,
-7.584868547270405,
-6.135052482673858,
-4.518725819924658,
-2.730273787554616,
-0.764081614095559,
1.385465471920696,
3.723982241962316,
6.257083467497486,
8.990383919994379,
11.929498370921175,
14.976162260830137,
5.084237255461949,
-6.729230790271000,
-14.622433089729542,
-19.038838171389944,
-20.421914563728482,
-19.215130795221405,
-15.861955394344996,
-10.805856889575507,
-4.490303809389212,
2.641235317737622,
10.145291963328749,
17.578397598907866,
24.497083695998736,
30.457881726125084,
35.017323160810655,
37.731939471579153,
38.169944675202451,
36.103648065327157,
31.478944095141092,
24.247257545409393,
14.377470469911161,
2.089605892978410,
-12.208009328653077,
-28.102486071018522,
-45.180935210153116,
-63.030467622092019,
-81.238194182870473,
-99.391225768523640,
-117.076673255086732,
-133.881647518594917,
-149.393259435083451,
-163.198619880587444,
-174.884839731142108,
-184.039029862782684,
-190.248301151544382,
-193.099764473462415,
-192.180530704571822,
-187.077710720907902,
-177.378415398505837,
-162.669755613400923,
-142.538842241628146,
-116.572786159222886,
-84.358698242220214,
-45.483689366655454,
0.465129591436323,
53.867241377821607,
111.906385750398641,
165.969680931356891,
206.832112792864763,
225.268667207090687,
212.054330046203120,
157.964087182370434,
53.772924487761195,
24.128558965593012,
155.948189345952358,
238.667542187567193,
279.823268952005947,
286.952021100836475,
267.590450095627091,
229.275207397946076,
179.542944469361430,
125.930312771441450,
75.973963765754249,
37.210548913868003,
17.176719677350921,
17.152394375030820,
19.379794324130732,
21.517434431022508,
23.565545988087592,
25.524360287707406,
27.394108622263396,
29.175022284136993,
30.867332565709628,
32.471270759362753,
33.987068157477793,
35.414956052436175,
36.755165736619354,
38.007928502408760,
39.173475642185821,
40.252038448331973,
41.243848213228674,
42.149136229257330,
42.968133788799399,
43.701072184236310,
44.348182707949491,
44.909696652320392,
45.385845309730442,
45.776859972561077,
46.082971933193733,
46.304412484009845,
46.441412917390849,
46.494204525718175,
46.463018601373285,
46.348086436737596,
46.149639324192542,
45.867908556119552,
45.503125424900077,
45.055521222915552,
44.525327242547419,
43.912774776177095,
43.218095116186007,
42.441519554955640,
41.583279384867382,
40.643991169034841,
39.627797988936805,
38.540734852902020,
37.388854826093969,
36.178210973676094,
34.914856360811882,
33.604844052664760,
32.254227114398248,
30.869058611175774,
29.455391608160820,
28.019279170516839,
26.566774363407301,
25.103930251995678,
23.636799901445418,
22.171436376920010,
20.713892743582907,
19.270222066597562,
17.846477411127466,
16.448711842336071,
15.082978425386836,
13.755330225443233,
12.471820307668732,
11.238501737226787,
10.061427579280879,
8.946650898994456,
7.900224761530997,
6.928202232053956,
6.036636375726809,
5.231580257713018,
4.519086943176044,
3.905209497279358,
3.396000985186418,
2.997514472060695,
2.715803023065654,
2.556919703364760,
2.526917578121476,
2.631849712499274,
2.877769171661610,
3.270729020771954,
3.816782324993773,
4.521982149490528,
5.392381559425689,
6.434033619962720,
7.652991396265083,
)
DATA_SAMPLE_NORMALISED: Tuple = (
0.000000000000000,
0.000000000000000,
0.000000000000000,
22.475455820476860,
22.615708274894811,
19.705469845722302,
18.828892005610097,
19.600280504908834,
22.826086956521742,
24.719495091164092,
27.068723702664798,
30.504908835904626,
39.551192145862551,
47.685834502103788,
52.980364656381497,
59.186535764375883,
69.985974754558200,
84.046283309957929,
100.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
0.000000000000000,
)
DATA_STANDARD_OBSERVER_2_DEGREE_CIE1931: Dict = {
380: (0.001368, 0.000039, 0.006450),
385: (0.002236, 0.000064, 0.010550),
390: (0.004243, 0.000120, 0.020050),
395: (0.007650, 0.000217, 0.036210),
400: (0.014310, 0.000396, 0.067850),
405: (0.023190, 0.000640, 0.110200),
410: (0.043510, 0.001210, 0.207400),
415: (0.077630, 0.002180, 0.371300),
420: (0.134380, 0.004000, 0.645600),
425: (0.214770, 0.007300, 1.039050),
430: (0.283900, 0.011600, 1.385600),
435: (0.328500, 0.016840, 1.622960),
440: (0.348280, 0.023000, 1.747060),
445: (0.348060, 0.029800, 1.782600),
450: (0.336200, 0.038000, 1.772110),
455: (0.318700, 0.048000, 1.744100),
460: (0.290800, 0.060000, 1.669200),
465: (0.251100, 0.073900, 1.528100),
470: (0.195360, 0.090980, 1.287640),
475: (0.142100, 0.112600, 1.041900),
480: (0.095640, 0.139020, 0.812950),
485: (0.057950, 0.169300, 0.616200),
490: (0.032010, 0.208020, 0.465180),
495: (0.014700, 0.258600, 0.353300),
500: (0.004900, 0.323000, 0.272000),
505: (0.002400, 0.407300, 0.212300),
510: (0.009300, 0.503000, 0.158200),
515: (0.029100, 0.608200, 0.111700),
520: (0.063270, 0.710000, 0.078250),
525: (0.109600, 0.793200, 0.057250),
530: (0.165500, 0.862000, 0.042160),
535: (0.225750, 0.914850, 0.029840),
540: (0.290400, 0.954000, 0.020300),
545: (0.359700, 0.980300, 0.013400),
550: (0.433450, 0.994950, 0.008750),
555: (0.512050, 1.000000, 0.005750),
560: (0.594500, 0.995000, 0.003900),
565: (0.678400, 0.978600, 0.002750),
570: (0.762100, 0.952000, 0.002100),
575: (0.842500, 0.915400, 0.001800),
580: (0.916300, 0.870000, 0.001650),
585: (0.978600, 0.816300, 0.001400),
590: (1.026300, 0.757000, 0.001100),
595: (1.056700, 0.694900, 0.001000),
600: (1.062200, 0.631000, 0.000800),
605: (1.045600, 0.566800, 0.000600),
610: (1.002600, 0.503000, 0.000340),
615: (0.938400, 0.441200, 0.000240),
620: (0.854450, 0.381000, 0.000190),
625: (0.751400, 0.321000, 0.000100),
630: (0.642400, 0.265000, 0.000050),
635: (0.541900, 0.217000, 0.000030),
640: (0.447900, 0.175000, 0.000020),
645: (0.360800, 0.138200, 0.000010),
650: (0.283500, 0.107000, 0.000000),
655: (0.218700, 0.081600, 0.000000),
660: (0.164900, 0.061000, 0.000000),
665: (0.121200, 0.044580, 0.000000),
670: (0.087400, 0.032000, 0.000000),
675: (0.063600, 0.023200, 0.000000),
680: (0.046770, 0.017000, 0.000000),
685: (0.032900, 0.011920, 0.000000),
690: (0.022700, 0.008210, 0.000000),
695: (0.015840, 0.005723, 0.000000),
700: (0.011359, 0.004102, 0.000000),
705: (0.008111, 0.002929, 0.000000),
710: (0.005790, 0.002091, 0.000000),
715: (0.004109, 0.001484, 0.000000),
720: (0.002899, 0.001047, 0.000000),
725: (0.002049, 0.000740, 0.000000),
730: (0.001440, 0.000520, 0.000000),
735: (0.001000, 0.000361, 0.000000),
740: (0.000690, 0.000249, 0.000000),
745: (0.000476, 0.000172, 0.000000),
750: (0.000332, 0.000120, 0.000000),
755: (0.000235, 0.000085, 0.000000),
760: (0.000166, 0.000060, 0.000000),
765: (0.000117, 0.000042, 0.000000),
770: (0.000083, 0.000030, 0.000000),
775: (0.000059, 0.000021, 0.000000),
780: (0.000042, 0.000015, 0.000000),
}
DATA_CMFS: Dict = {
380: np.array([0.001368, 3.90e-05, 0.006450]),
385: np.array([0.002236, 6.40e-05, 0.010550]),
390: np.array([0.004243, 0.000120, 0.020050]),
395: np.array([0.007650, 0.000217, 0.036210]),
400: np.array([0.014310, 0.000396, 0.067850]),
405: np.array([0.023190, 0.000640, 0.110200]),
410: np.array([0.043510, 0.001210, 0.207400]),
415: np.array([0.077630, 0.002180, 0.371300]),
420: np.array([0.134380, 0.004000, 0.645600]),
425: np.array([0.214770, 0.007300, 1.039050]),
430: np.array([0.283900, 0.011600, 1.385600]),
435: np.array([0.328500, 0.016840, 1.622960]),
440: np.array([0.348280, 0.023000, 1.747060]),
445: np.array([0.348060, 0.029800, 1.782600]),
450: np.array([0.336200, 0.038000, 1.772110]),
455: np.array([0.318700, 0.048000, 1.744100]),
460: np.array([0.290800, 0.060000, 1.669200]),
465: np.array([0.251100, 0.073900, 1.528100]),
470: np.array([0.195360, 0.090980, 1.287640]),
475: np.array([0.142100, 0.112600, 1.041900]),
480: np.array([0.095640, 0.139020, 0.812950]),
485: np.array([0.057950, 0.169300, 0.616200]),
490: np.array([0.032010, 0.208020, 0.465180]),
495: np.array([0.014700, 0.258600, 0.353300]),
500: np.array([0.004900, 0.323000, 0.272000]),
505: np.array([0.002400, 0.407300, 0.212300]),
510: np.array([0.009300, 0.503000, 0.158200]),
515: np.array([0.029100, 0.608200, 0.111700]),
520: np.array([0.063270, 0.710000, 0.078250]),
525: np.array([0.109600, 0.793200, 0.057250]),
530: np.array([0.165500, 0.862000, 0.042160]),
535: np.array([0.225750, 0.914850, 0.029840]),
540: np.array([0.290400, 0.954000, 0.020300]),
545: np.array([0.359700, 0.980300, 0.013400]),
550: np.array([0.433450, 0.994950, 0.008750]),
555: np.array([0.512050, 1.000000, 0.005750]),
560: np.array([0.594500, 0.995000, 0.003900]),
565: np.array([0.678400, 0.978600, 0.002750]),
570: np.array([0.762100, 0.952000, 0.002100]),
575: np.array([0.842500, 0.915400, 0.001800]),
580: np.array([0.916300, 0.870000, 0.001650]),
585: np.array([0.978600, 0.816300, 0.001400]),
590: np.array([1.026300, 0.757000, 0.001100]),
595: np.array([1.056700, 0.694900, 0.001000]),
600: np.array([1.062200, 0.631000, 0.000800]),
605: np.array([1.045600, 0.566800, 0.000600]),
610: np.array([1.002600, 0.503000, 0.000340]),
615: np.array([0.938400, 0.441200, 0.000240]),
620: np.array([0.854450, 0.381000, 0.000190]),
625: np.array([0.751400, 0.321000, 0.000100]),
630: np.array([0.642400, 0.265000, 5.00e-05]),
635: np.array([0.541900, 0.217000, 3.00e-05]),
640: np.array([0.447900, 0.175000, 2.00e-05]),
645: np.array([0.360800, 0.138200, 1.00e-05]),
650: np.array([0.283500, 0.107000, 0.000000]),
655: np.array([0.218700, 0.081600, 0.000000]),
660: np.array([0.164900, 0.061000, 0.000000]),
665: np.array([0.121200, 0.044580, 0.000000]),
670: np.array([0.087400, 0.032000, 0.000000]),
675: np.array([0.063600, 0.023200, 0.000000]),
680: np.array([0.046770, 0.017000, 0.000000]),
685: np.array([0.032900, 0.011920, 0.000000]),
690: np.array([0.022700, 0.008210, 0.000000]),
695: np.array([0.015840, 0.005723, 0.000000]),
700: np.array([0.011359, 0.004102, 0.000000]),
705: np.array([0.008111, 0.002929, 0.000000]),
710: np.array([0.005790, 0.002091, 0.000000]),
715: np.array([0.004109, 0.001484, 0.000000]),
720: np.array([0.002899, 0.001047, 0.000000]),
725: np.array([0.002049, 0.000740, 0.000000]),
730: np.array([0.001440, 0.000520, 0.000000]),
735: np.array([0.001000, 0.000361, 0.000000]),
740: np.array([0.000690, 0.000249, 0.000000]),
745: np.array([0.000476, 0.000172, 0.000000]),
750: np.array([0.000332, 0.000120, 0.000000]),
755: np.array([0.000235, 8.50e-05, 0.000000]),
760: np.array([0.000166, 6.00e-05, 0.000000]),
765: np.array([0.000117, 4.20e-05, 0.000000]),
770: np.array([8.30e-05, 3.00e-05, 0.000000]),
775: np.array([5.90e-05, 2.10e-05, 0.000000]),
780: np.array([4.20e-05, 1.50e-05, 0.000000]),
}
DATA_SAMPLE_ABRIDGED: Dict = {
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
}
DATA_MULTI_SAMPLE_ABRIDGED: Dict = {
500: (0.004900, 0.323000, 0.272000),
510: (0.009300, 0.503000, 0.158200),
520: (0.063270, 0.710000, 0.078250),
530: (0.165500, 0.862000, 0.042160),
540: (0.290400, 0.954000, 0.020300),
550: (0.433450, 0.994950, 0.008750),
560: (0.594500, 0.995000, 0.003900),
}
if __name__ == "__main__":
unittest.main()
| 27.687624 | 87 | 0.63248 |
4884cbedf429b58f568f4a96e1328fccb2ba0bb3 | 5,483 | py | Python | experiments/src_exp/data_experimentation/test_clean_store_delete_playground.py | earny-joe/CvDisinfo-Detect | 722b07ef2820759fce8e917ffba6654ac5611c23 | [
"MIT"
] | 4 | 2020-04-27T04:45:53.000Z | 2022-03-28T10:50:07.000Z | experiments/src_exp/data_experimentation/test_clean_store_delete_playground.py | earny-joe/CvDisinfo-Detect | 722b07ef2820759fce8e917ffba6654ac5611c23 | [
"MIT"
] | 16 | 2020-04-12T19:09:10.000Z | 2022-03-12T00:38:18.000Z | experiments/src_exp/data_experimentation/test_clean_store_delete_playground.py | earny-joe/CvDisinfo-Detect | 722b07ef2820759fce8e917ffba6654ac5611c23 | [
"MIT"
] | 1 | 2020-07-19T23:15:05.000Z | 2020-07-19T23:15:05.000Z | # Comment
import pandas as pd
import re
from google.cloud import storage
from pathlib import Path
def download_blob(bucket_name, source_blob_name, destination_file_name):
"""Downloads a blob from the bucket."""
# bucket_name = "your-bucket-name"
# source_blob_name = "storage-object-name"
# destination_file_name = "local/path/to/file"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print(f"Blob {source_blob_name} downloaded to {destination_file_name}.")
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
# bucket_name = "your-bucket-name"
# source_file_name = "local/path/to/file"
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(f"File {source_file_name} uploaded to {destination_blob_name}.")
if __name__ == '__main__':
main()
| 31.331429 | 79 | 0.61718 |
4885063d81ad224f010848be20c3a4db6e1c984c | 18,988 | py | Python | codes/correl.py | KurmasanaWT/community | 5fc9e7da5b3e8df2bc9f85580a070de8c868a656 | [
"MIT"
] | null | null | null | codes/correl.py | KurmasanaWT/community | 5fc9e7da5b3e8df2bc9f85580a070de8c868a656 | [
"MIT"
] | null | null | null | codes/correl.py | KurmasanaWT/community | 5fc9e7da5b3e8df2bc9f85580a070de8c868a656 | [
"MIT"
] | null | null | null | from dash import dcc, html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import yfinance as yf
import math
from sklearn.linear_model import LinearRegression
from app import app
np.seterr(divide='ignore')
pd.options.display.float_format = '{:,.2f}'.format
# FORMATA E CONFIGURA GRFICOS
pio.templates["draft"] = go.layout.Template(
layout=go.Layout(
title_x = 0.0,
title_pad = dict(l=10, t=10),
margin = dict(l=50,t=50, b=50, r=50, pad=0, autoexpand=True),
font = dict(family="Arial", size=10),
autosize=True,
),
layout_annotations=[
dict(
name="draft watermark",
text="KWT-Community",
textangle=-30,
opacity=0.03,
font=dict(family="Arial", color="black", size=80),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
]
)
pio.templates.default = "seaborn+draft"
plotres:dict = dict(width=1920, height=1080)
config1 = {
"displaylogo": False,
"toImageButtonOptions": plotres,
"modeBarButtonsToAdd": [
"drawline",
"drawopenpath",
"drawclosedpath",
"drawcircle",
"drawrect",
"eraseshape",
"hoverClosestCartesian",
"hoverCompareCartesian"
]
}
# INPUTS PARA DROPDOWN MENU
tickers = pd.read_csv('db/tickers.csv', delimiter=';') # ativos da na bolsa brasileira
tickers['label'] = tickers['value']+" - "+tickers['label']
tickers['value'] = tickers['value']+".SA"
other = pd.read_csv('db/other.csv', delimiter=';') # outros ativos e ndices
other['label'] = other['value']+" - "+other['label']
tickers=pd.concat([tickers,other])
tickers = tickers.to_dict('records')
periods = pd.read_csv('db/periods.csv', delimiter=';').to_dict('records') # perodos de anlise
intervals = pd.read_csv('db/intervals.csv', delimiter=';').to_dict('records') # intervalos entre dados do perodo
def market_beta(X,Y,N):
"""
X = The independent variable which is the Market
Y = The dependent variable which is the Stock
N = The length of the Window
It returns the alphas and the betas of
the rolling regression
"""
# all the observations
obs = len(X)
# initiate the betas with null values
betas = np.full(obs, np.nan)
# initiate the alphas with null values
alphas = np.full(obs, np.nan)
for i in range((obs-N)):
regressor = LinearRegression()
regressor.fit(X.to_numpy()[i : i + N+1].reshape(-1,1), Y.to_numpy()[i : i + N+1])
betas[i+N] = regressor.coef_[0]
alphas[i+N] = regressor.intercept_
return(alphas, betas)
# LAYOUT
layout = dbc.Container(
children=[
dcc.Loading(
#className="kwtload",
id="load_o1",
color='#0a0',
style={'background-color':'rgba(0, 0, 0, 0.5)'},
parent_style={},
fullscreen=True,
children=html.Span(id="correl_load_o1", children=["LOADING..."]),
type="default",
),
dbc.Row([
html.Div(className='kwtdrops', children=[
html.H5("ATIVO"), dcc.Dropdown( id="ticker", options=tickers, value='VALE3.SA', clearable=False, style={'width':'300px'} ),
html.H5("BENCHMARK"), dcc.Dropdown( id="indexer", options=tickers, value='^BVSP', clearable=False, style={'width':'300px'} ),
html.H5("PERODO"), dcc.Dropdown( id="periods", options=periods, value='1y', clearable=False, style={'width':'10rem'} ),
html.H5("INTERVALO"), dcc.Dropdown( id="intervals", options=intervals, value='1d', clearable=False, style={'width':'10rem'} ),
dbc.Button(className="kwtchartbtn",id='submitb', n_clicks=0, children='Atualizar')
]),
]),
html.Br(),
dbc.Row([
dcc.Graph(id="correl_graph", config=config1),
dcc.Graph(id="correl_graph1", config=config1),
dcc.Graph(id="correl_graph2", config=config1),
dcc.Graph(id="correl_graph3", config=config1),
]),
], fluid=True)
####### CALLBACKS #######
####### CALLBACK PAINEL MERCADO
# | 39.394191 | 186 | 0.555825 |
488664091cf5f88c9226ceceac51ef1864450390 | 1,459 | py | Python | custom_components/blitzortung/geohash_utils.py | Nag94/HomeAssistantConfig | d5f806e05be8d92bf487c58322d20cd9b08c6b98 | [
"Unlicense"
] | 163 | 2020-08-01T12:19:46.000Z | 2022-03-28T09:04:57.000Z | custom_components/blitzortung/geohash_utils.py | Nag94/HomeAssistantConfig | d5f806e05be8d92bf487c58322d20cd9b08c6b98 | [
"Unlicense"
] | 81 | 2020-08-04T00:28:46.000Z | 2022-03-29T15:48:51.000Z | custom_components/blitzortung/geohash_utils.py | Nag94/HomeAssistantConfig | d5f806e05be8d92bf487c58322d20cd9b08c6b98 | [
"Unlicense"
] | 28 | 2020-08-02T12:02:24.000Z | 2022-03-22T00:07:34.000Z | import math
from collections import namedtuple
from . import geohash
Box = namedtuple("Box", ["s", "w", "n", "e"])
| 24.728814 | 87 | 0.607265 |
4887385245fd69a9bed6f6ead371a6a688979596 | 7,398 | py | Python | deepmask/models/DeepMask.py | TJUMMG/SiamDMU | 728ba1333b8d600a8b238a7d29901a01b653c33d | [
"Apache-2.0"
] | 3 | 2021-08-25T01:51:04.000Z | 2022-03-04T06:30:32.000Z | deepmask/models/DeepMask.py | TJUMMG/SiamDMU | 728ba1333b8d600a8b238a7d29901a01b653c33d | [
"Apache-2.0"
] | null | null | null | deepmask/models/DeepMask.py | TJUMMG/SiamDMU | 728ba1333b8d600a8b238a7d29901a01b653c33d | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torchvision
from collections import namedtuple
Config = namedtuple('Config', ['iSz', 'oSz', 'gSz'])
default_config = Config(iSz=160, oSz=56, gSz=112)
def updatePadding(net, nn_padding):
typename = torch.typename(net)
# print(typename)
if typename.find('Sequential') >= 0 or typename.find('Bottleneck') >= 0:
modules_keys = list(net._modules.keys())
for i in reversed(range(len(modules_keys))):
subnet = net._modules[modules_keys[i]]
out = updatePadding(subnet, nn_padding)
if out != -1:
p = out
in_c, out_c, k, s, _, d, g, b = \
subnet.in_channels, subnet.out_channels, \
subnet.kernel_size[0], subnet.stride[0], \
subnet.padding[0], subnet.dilation[0], \
subnet.groups, subnet.bias,
conv_temple = nn.Conv2d(in_c, out_c, k, stride=s, padding=0,
dilation=d, groups=g, bias=b)
conv_temple.weight = subnet.weight
conv_temple.bias = subnet.bias
if p > 1:
net._modules[modules_keys[i]] = nn.Sequential(SymmetricPad2d(p), conv_temple)
else:
net._modules[modules_keys[i]] = nn.Sequential(nn_padding(p), conv_temple)
else:
if typename.find('torch.nn.modules.conv.Conv2d') >= 0:
k_sz, p_sz = net.kernel_size[0], net.padding[0]
if ((k_sz == 3) or (k_sz == 7)) and p_sz != 0:
return p_sz
return -1
if __name__ == '__main__':
a = SymmetricPad2d(3)
x = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]])
out = a(x)
print(out)
import torch
Config = namedtuple('Config', ['iSz', 'oSz', 'gSz'])
config = Config(iSz=160, oSz=56, gSz=112)
model = DeepMask(config).cuda()
# training mode
x = torch.rand(32, 3, config.iSz+32, config.iSz+32).cuda()
pred_mask, pred_cls = model(x)
print("Output (training mode)", pred_mask.shape, pred_cls.shape)
# full image testing mode
model.context = False # really important!!
input_size = config.iSz + model.strides * 16 + (model.context * 32)
x = torch.rand(8, 3, input_size, input_size).cuda()
pred_mask, pred_cls = model(x)
print("Output (testing mode)", pred_mask.shape, pred_cls.shape)
| 40.42623 | 98 | 0.577994 |
488892bb03ecb663538dd964ad7c41d6e64946b1 | 363 | py | Python | phi.py | filiptronicek/constants | 404b6dbff33cff006d8f38ff08f6a4c2eaa19cdf | [
"MIT"
] | null | null | null | phi.py | filiptronicek/constants | 404b6dbff33cff006d8f38ff08f6a4c2eaa19cdf | [
"MIT"
] | null | null | null | phi.py | filiptronicek/constants | 404b6dbff33cff006d8f38ff08f6a4c2eaa19cdf | [
"MIT"
] | null | null | null | nums = [0,1]
for i in range(45):
if i % 15 == 0 or i == 44:
phi = calcFi()
print(phi)
if i == 44:
with open("outputs/phi.txt", "w") as f:
f.write(str(phi))
else:
calcFi() | 17.285714 | 51 | 0.429752 |
48891373f826c5ea6da77e9ee603e89703e2f555 | 3,657 | py | Python | var_gp/datasets.py | uber-research/var-gp | 1fc3935a84ff58ac529361c53c98e01338244062 | [
"Apache-2.0"
] | 11 | 2020-07-07T12:31:46.000Z | 2021-04-15T13:38:35.000Z | var_gp/datasets.py | thangbui/var-gp | c445bb5f3b961d179c6741227200329a89517d91 | [
"Apache-2.0"
] | 1 | 2021-02-27T20:46:27.000Z | 2021-04-22T16:48:10.000Z | var_gp/datasets.py | uber-research/var-gp | 1fc3935a84ff58ac529361c53c98e01338244062 | [
"Apache-2.0"
] | 4 | 2020-07-06T20:25:34.000Z | 2020-08-19T15:23:40.000Z | import os
import glob
import torch
import numpy as np
# from PIL import Image, UnidentifiedImageError
from torch.utils.data import Dataset
from torchvision.datasets import MNIST
| 26.309353 | 95 | 0.618266 |
488967f137108794225619fe7bf8270b7eb07b12 | 6,834 | py | Python | apps/ouvertime_record/views.py | dnetochaves/repense_rh | ba549bdadc90c088f258d9d640bd59fd696bb705 | [
"MIT"
] | null | null | null | apps/ouvertime_record/views.py | dnetochaves/repense_rh | ba549bdadc90c088f258d9d640bd59fd696bb705 | [
"MIT"
] | 3 | 2021-01-22T06:05:42.000Z | 2021-02-16T10:06:36.000Z | apps/ouvertime_record/views.py | dnetochaves/repense_rh | ba549bdadc90c088f258d9d640bd59fd696bb705 | [
"MIT"
] | null | null | null | from django.shortcuts import render, HttpResponse
from django.views.generic.list import ListView
from django.views.generic.edit import UpdateView, DeleteView, CreateView
from . models import OuverTimeRecord
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from django.views import View
import json
import csv
# Import for reportlab
import io
from django.http import FileResponse
from reportlab.pdfgen import canvas
# Import for Xhtm2
from django.template.loader import get_template
from xhtml2pdf import pisa
#import Xlwt
import xlwt
# ReportLab
def some_view(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="mypdf.pdf"'
buffer = io.BytesIO()
p = canvas.Canvas(buffer)
p.drawString(200, 810, 'Relatorio de Horas ReportLab')
times = OuverTimeRecord.objects.filter(
employee=request.user.employee.company.id)
y = 790
for time in times:
p.drawString(10, y, time.reason)
p.drawString(100, y, time.employee.name)
p.drawString(200, y, str(time.hours))
p.drawString(300, y, str(time.used))
y -= 40
p.showPage()
p.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
# Xhtml2
def link_callback(uri, rel):
"""
Convert HTML URIs to absolute system paths so xhtml2pdf can access those
resources
"""
result = finders.find(uri)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = list(os.path.realpath(path) for path in result)
path = result[0]
else:
sUrl = settings.STATIC_URL # Typically /static/
sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/
mUrl = settings.MEDIA_URL # Typically /media/
mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
else:
return uri
# make sure that file exists
if not os.path.isfile(path):
raise Exception(
'media URI must start with %s or %s' % (sUrl, mUrl)
)
return path
# Excel
| 29.205128 | 87 | 0.655692 |
488992b28e85b2d366e7bc829af9dec94065afc7 | 1,256 | py | Python | second_HW/second_hw_2.py | alex123012/Bioinf_HW | c2689715a8579b2ddccbf5dd1f4aed7dd4a52baa | [
"MIT"
] | null | null | null | second_HW/second_hw_2.py | alex123012/Bioinf_HW | c2689715a8579b2ddccbf5dd1f4aed7dd4a52baa | [
"MIT"
] | null | null | null | second_HW/second_hw_2.py | alex123012/Bioinf_HW | c2689715a8579b2ddccbf5dd1f4aed7dd4a52baa | [
"MIT"
] | 1 | 2021-04-12T11:29:27.000Z | 2021-04-12T11:29:27.000Z | import unittest as ut
import time
if __name__ == '__main__':
main()
# test()
| 20.933333 | 90 | 0.535032 |
6f7d0aa4d2bf1ce8c8c464a58fa65d15faebfee4 | 4,593 | py | Python | research/vrgripper/episode_to_transitions.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
] | 1 | 2021-10-18T01:27:04.000Z | 2021-10-18T01:27:04.000Z | research/vrgripper/episode_to_transitions.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
] | null | null | null | research/vrgripper/episode_to_transitions.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
] | 1 | 2020-09-21T08:29:02.000Z | 2020-09-21T08:29:02.000Z | # coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Functions for converting env episode data to tfrecords of transitions."""
import collections
import gin
import numpy as np
from PIL import Image
import six
from six.moves import range
import tensorflow.compat.v1 as tf
_bytes_feature = (
lambda v: tf.train.Feature(bytes_list=tf.train.BytesList(value=v)))
_int64_feature = (
lambda v: tf.train.Feature(int64_list=tf.train.Int64List(value=v)))
_float_feature = (
lambda v: tf.train.Feature(float_list=tf.train.FloatList(value=v)))
_IMAGE_KEY_PREFIX = 'image'
| 34.533835 | 80 | 0.735249 |
6f7dbf265364f7b257659f2bfab6a7eebd22f8b8 | 44,533 | py | Python | flopy/plot/map.py | mwtoews/flopy | e566845172380e3eae06981ca180923d2362ee56 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | flopy/plot/map.py | mwtoews/flopy | e566845172380e3eae06981ca180923d2362ee56 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | flopy/plot/map.py | mwtoews/flopy | e566845172380e3eae06981ca180923d2362ee56 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
from ..discretization import StructuredGrid, UnstructuredGrid
from ..utils import geometry
try:
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.collections import PathCollection, LineCollection
from matplotlib.path import Path
except (ImportError, ModuleNotFoundError):
plt = None
from . import plotutil
import warnings
warnings.simplefilter("always", PendingDeprecationWarning)
| 33.110037 | 79 | 0.545663 |
6f7e5bc7a0c9e42e5042391f78e81e9e318258c0 | 9,342 | py | Python | pacu/modules/rds__explore_snapshots/main.py | damienjburks/pacu | 5853f9668a7d78945c40d403bf88a47101ba2b3d | [
"BSD-3-Clause"
] | 1 | 2021-12-22T22:39:49.000Z | 2021-12-22T22:39:49.000Z | pacu/modules/rds__explore_snapshots/main.py | damienjburks/pacu | 5853f9668a7d78945c40d403bf88a47101ba2b3d | [
"BSD-3-Clause"
] | null | null | null | pacu/modules/rds__explore_snapshots/main.py | damienjburks/pacu | 5853f9668a7d78945c40d403bf88a47101ba2b3d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import argparse
from pathlib import Path
import json
import random
import string
from botocore.exceptions import ClientError
module_info = {
"name": "rds__explore_snapshots",
"author": "Alexander Morgenstern alexander.morgenstern@rhinosecuritylabs.com",
"category": "EXFIL",
"one_liner": "Creates copies of running RDS databases to access protected information",
"description": "Creates a snapshot of all database instances, restores new database instances from those snapshots, and then changes the master password to allow access to the copied database. After the database has been created, the connection information is given. After interactions with the database are complete, the temporary resources are deleted. If there is an unexpected crash during the module's execution, the subsequent run of the module will attempt to clean up any leftover temporary resources.",
"services": ["RDS"],
"prerequisite_modules": [],
"external_dependencies": [],
"arguments_to_autocomplete": ["--regions"],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info["description"])
parser.add_argument(
"--regions",
required=False,
default=None,
help="One or more (comma separated) AWS regions in the format us-east-1. Defaults to all session regions.",
)
TEMP_FILE = Path(__file__).parent / "temp.json"
WAIT_CONFIG = {"Delay": 10}
def main(args, pacu):
"""Main module function, called from Pacu"""
args = parser.parse_args(args)
if args.regions:
regions = args.regions.split(",")
else:
regions = pacu.get_regions("rds")
if not cleanup(pacu):
if pacu.input(" Cleanup Failed. Continue? (y/n) ") != "y":
return {"fail": "Failed to delete temporary data."}
summary_data = {"instances": 0}
for region in regions:
pacu.print("Region: {}".format(region))
client = pacu.get_boto3_client("rds", region)
pacu.print(" Getting RDS instances...")
active_instances = get_all_region_instances(client, pacu.print)
pacu.print(" Found {} RDS instance(s)".format(len(active_instances)))
for instance in active_instances:
prompt = " Target: {} (y/n)? ".format(instance["DBInstanceIdentifier"])
if pacu.input(prompt).lower() != "y":
continue
pacu.print(" Creating temporary snapshot...")
temp_snapshot = create_snapshot_from_instance(client, instance, pacu.print)
if not temp_snapshot:
pacu.print(" Failed to create temporary snapshot")
continue
pacu.print(" Restoring temporary instance from snapshot...")
temp_instance = restore_instance_from_snapshot(
client, temp_snapshot, pacu.print
)
if not temp_instance:
pacu.print(" Failed to create temporary instance")
delete_snapshot(client, temp_snapshot, pacu.print)
continue
process_instance(pacu, client, temp_instance)
pacu.print(" Deleting temporary resources...")
delete_instance(client, temp_instance, pacu.print)
delete_snapshot(client, temp_snapshot, pacu.print)
summary_data["instances"] += 1
if not cleanup(pacu):
summary_data["fail"] = "Failed to delete temporary data."
return summary_data
| 34.988764 | 515 | 0.650717 |
6f7fca20b708629aa57b407552bb18e0a01540bc | 9,846 | py | Python | mechroutines/trans/_routines/lj.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
] | null | null | null | mechroutines/trans/_routines/lj.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
] | null | null | null | mechroutines/trans/_routines/lj.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
] | null | null | null | """
Executes the automation part of 1DMin
"""
import statistics
import autofile
from autorun import run_script
from mechroutines.trans._routines import _geom as geom
from mechroutines.trans._routines import _gather as gather
from mechroutines.trans.runner import lj as lj_runner
from mechlib import filesys
from mechlib import amech_io
from mechlib.amech_io import printer as ioprinter
def onedmin(spc_name,
spc_dct, thy_dct, etrans_keyword_dct,
run_prefix, save_prefix):
""" Run the task
"""
bath_name = etrans_keyword_dct['bath']
tgt_dct, bath_dct = spc_dct[spc_name], spc_dct[bath_name]
tgt_info = filesys.inf.get_spc_info(tgt_dct)
bath_info = filesys.inf.get_spc_info(bath_dct)
lj_info = filesys.inf.combine_spc_info(tgt_info, bath_info)
# Build the modified thy objs
inp_thy_info = filesys.inf.get_es_info(
etrans_keyword_dct['inplvl'], thy_dct)
run_thy_info = filesys.inf.get_es_info(
etrans_keyword_dct['runlvl'], thy_dct)
tgt_mod_thy_info = filesys.inf.modify_orb_restrict(
tgt_info, inp_thy_info)
bath_mod_thy_info = filesys.inf.modify_orb_restrict(
bath_info, inp_thy_info)
lj_mod_thy_info = filesys.inf.modify_orb_restrict(
lj_info, run_thy_info)
# Build the target conformer filesystem objects
tgt_cnf_run_fs, tgt_cnf_save_fs = filesys.build_fs(
run_prefix, save_prefix, 'CONFORMER',
spc_locs=tgt_info, thy_locs=tgt_mod_thy_info[1:])
tgt_loc_info = filesys.mincnf.min_energy_conformer_locators(
tgt_cnf_save_fs, tgt_mod_thy_info)
tgt_min_cnf_locs, tgt_cnf_save_path = tgt_loc_info
# Create run fs if that directory has been deleted to run the jobs
tgt_cnf_run_fs[-1].create(tgt_min_cnf_locs)
tgt_cnf_run_path = tgt_cnf_run_fs[-1].path(tgt_min_cnf_locs)
# Get options from the dct or es options lst
# tgt_cnf_run_fs[-1].create(tgt_min_cnf_locs)
# tgt_cnf_run_path = filesys.build.cnf_paths_from_locs(
# tgt_cnf_run_fs, [tgt_min_cnf_locs])[0]
# Build the target energy transfer filesystem objects
etrans_run_fs = autofile.fs.energy_transfer(tgt_cnf_run_path)
etrans_save_fs = autofile.fs.energy_transfer(tgt_cnf_save_path)
etrans_locs = bath_info + lj_mod_thy_info[1:4]
# Build the bath conformer filesystem objects
# _, bath_thy_save_path = filesys.build.spc_thy_fs_from_root(
# save_prefix, bath_info, bath_mod_thy_info)
# ioprinter.debug_message('bath path', bath_thy_save_path)
# bath_cnf_save_fs = autofile.fs.conformer(bath_thy_save_path)
# Calculate and save the Lennard-Jones parameters, if needed
run_needed, nsamp_needed = _need_run(
etrans_save_fs, etrans_locs, etrans_keyword_dct)
if run_needed:
_runlj(nsamp_needed,
lj_info, lj_mod_thy_info,
tgt_mod_thy_info, bath_mod_thy_info,
tgt_cnf_save_fs, bath_cnf_save_fs,
etrans_run_fs, etrans_locs,
etrans_keyword_dct)
_savelj(etrans_run_fs, etrans_save_fs, etrans_locs,
etrans_keyword_dct)
else:
epath = etrans_save_fs[-1].file.lennard_jones_epsilon.path(etrans_locs)
spath = etrans_save_fs[-1].file.lennard_jones_sigma.path(etrans_locs)
ioprinter.info_message(
'- Lennard-Jones epsilon found at path {}'.format(epath))
ioprinter.info_message(
'- Lennard-Jones sigma found at path {}'.format(spath))
def _need_run(etrans_save_fs, etrans_locs, etrans_keyword_dct):
""" Check if job needs to run
"""
nsamp = etrans_keyword_dct['nsamp']
overwrite = etrans_keyword_dct['overwrite']
ex1 = etrans_save_fs[-1].file.lennard_jones_epsilon.exists(etrans_locs)
ex2 = etrans_save_fs[-1].file.lennard_jones_sigma.exists(etrans_locs)
if not ex1 or not ex2:
ioprinter.info_message(
'Either no Lennard-Jones epsilon or sigma found in',
'save filesys. Running OneDMin for params...')
run = True
nsamp_need = nsamp
elif overwrite:
ioprinter.info_message(
'User specified to overwrite parameters with new run...')
run = True
nsamp_need = nsamp
else:
inf_obj = etrans_save_fs[-1].file.info.read(etrans_locs)
nsampd = inf_obj.nsamp
if nsamp < nsampd:
run = True
nsamp_need = nsampd - nsamp
else:
run = False
nsamp_need = 0
return run, nsamp_need
def _runlj(nsamp_needed,
lj_info, lj_mod_thy_info,
tgt_mod_thy_info, bath_mod_thy_info,
tgt_cnf_save_fs, bath_cnf_save_fs,
etrans_run_fs, etrans_locs,
etrans_keyword_dct):
""" Run the Lennard-Jones parameters
"""
# Pull stuff from dct
njobs = etrans_keyword_dct['njobs']
smin = etrans_keyword_dct['smin']
smax = etrans_keyword_dct['smax']
conf = etrans_keyword_dct['conf']
# Determine the number of samples per job
nsamp_per_job = nsamp_needed // njobs
# Set the path to the executable
onedmin_exe_path = '/lcrc/project/CMRP/amech/OneDMin/build'
# Obtain the geometry for the target and bath
tgt_geo = geom.get_geometry(
tgt_cnf_save_fs, tgt_mod_thy_info, conf=conf)
bath_geo = geom.get_geometry(
bath_cnf_save_fs, bath_mod_thy_info, conf=conf)
# Set the path to the etrans lead fs
etrans_run_path = etrans_run_fs[-1].path(etrans_locs)
# Build the run directory
onedmin_run_path = lj_runner.build_rundir(etrans_run_path)
# Run an instancw of 1DMin for each processor
for idx in range(njobs):
# Build run directory
onedmin_job_path = lj_runner.make_jobdir(onedmin_run_path, idx)
# Write the input files
xyz1_str, xyz2_str = lj_runner.write_xyz(tgt_geo, bath_geo)
elstruct_inp_str, elstruct_sub_str = lj_runner.write_elstruct_inp(
lj_info, lj_mod_thy_info)
onedmin_str = lj_runner.write_input(
nsamp_per_job, smin=smin, smax=smax,
target_name='target.xyz', bath_name='bath.xyz')
input_strs = (
xyz1_str, xyz2_str,
elstruct_inp_str, elstruct_sub_str,
onedmin_str)
input_names = (
'target.xyz', 'bath.xyz',
'qc.mol', 'ene.x',
'input.dat')
inp = tuple(zip(input_strs, input_names))
amech_io.writer.write_files(
inp, onedmin_job_path, exe_names=('ene.x'))
# Write the batch submission script for each instance
onedmin_sub_str = lj_runner.write_onedmin_sub(
njobs, onedmin_run_path, onedmin_exe_path,
exe_name='onedmin-dd-molpro.x')
sub_inp = ((onedmin_sub_str, 'build.sh'),)
amech_io.writer.write_files(
sub_inp, onedmin_run_path, exe_names=('build.sh'))
# Submit the all of the OneDMin jobs
ioprinter.info_message('Running each OneDMin job...', newline=2)
run_script(onedmin_sub_str, onedmin_run_path)
def _savelj(etrans_run_fs, etrans_save_fs, etrans_locs,
etrans_keyword_dct):
""" Save the Lennard-Jones parameters
"""
# Read the dictionary
ljpotential = etrans_keyword_dct['pot']
# Set the run path to read the files
etrans_run_path = etrans_run_fs[-1].path(etrans_locs)
# Read any epsilons and sigma currently in the filesystem
ioprinter.info_message(
'Reading Lennard-Jones parameters and Geoms from filesystem...',
newline=1)
fs_geoms, fs_epsilons, fs_sigmas = gather.read_filesys(
etrans_save_fs, etrans_locs)
gather.print_lj_parms(fs_sigmas, fs_epsilons)
# Read the lj from all the output files
ioprinter.info_message(
'Reading Lennard-Jones parameters and Geoms from output...',
newline=1)
run_geoms, run_epsilons, run_sigmas = gather.read_output(etrans_run_path)
gather.print_lj_parms(run_sigmas, run_epsilons)
# Read the program and version for onedmin
prog_version = gather.prog_version(etrans_run_path)
# Add the lists from the two together
geoms = fs_geoms + run_geoms
sigmas = fs_sigmas + run_sigmas
epsilons = fs_epsilons + run_epsilons
# Average the sigma and epsilon values
if geoms and sigmas and epsilons:
assert len(geoms) == len(sigmas) == len(epsilons), (
'Number of geoms, sigmas, and epsilons not the same'
)
avg_sigma = statistics.mean(sigmas)
avg_epsilon = statistics.mean(epsilons)
nsampd = len(sigmas)
ioprinter.info_message(
'Average Sigma to save [unit]:', avg_sigma, newline=1)
ioprinter.info_message('Average Epsilont to save [unit]:', avg_epsilon)
ioprinter.info_message('Number of values = ', nsampd)
# Update the trajectory file
traj = []
for geo, eps, sig in zip(geoms, epsilons, sigmas):
comment = 'Epsilon: {} Sigma: {}'.format(eps, sig)
traj.append((comment, geo))
# Write the info obj
inf_obj = autofile.schema.info_objects.lennard_jones(
nsampd, potential=ljpotential,
program='OneDMin', version=prog_version)
# Set up the electronic structure input file
onedmin_inp_str = '<ONEDMIN INP>'
els_inp_str = '<ELSTRUCT INP>'
# Write the params to the save file system
etrans_save_fs[-1].file.lj_input.write(onedmin_inp_str, etrans_locs)
etrans_save_fs[-1].file.info.write(inf_obj, etrans_locs)
etrans_save_fs[-1].file.molpro_inp_file.write(els_inp_str, etrans_locs)
etrans_save_fs[-1].file.epsilon.write(avg_epsilon, etrans_locs)
etrans_save_fs[-1].file.sigma.write(avg_sigma, etrans_locs)
etrans_save_fs[1].file.trajectory.write(traj, etrans_locs)
| 36.065934 | 79 | 0.683628 |
6f801ba5e156e09ae80a405057d4699d1492e731 | 7,182 | py | Python | barry/convert.py | jyotiska/barry | 53f3b3f8c070cbc5b2d9dcadebe9f776d170b6ed | [
"MIT"
] | null | null | null | barry/convert.py | jyotiska/barry | 53f3b3f8c070cbc5b2d9dcadebe9f776d170b6ed | [
"MIT"
] | null | null | null | barry/convert.py | jyotiska/barry | 53f3b3f8c070cbc5b2d9dcadebe9f776d170b6ed | [
"MIT"
] | null | null | null | from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException
import pandas as pd
import requests
from StringIO import StringIO
def detect_file_extension(filename):
"""Extract and return the extension of a file given a filename.
Args:
filename (str): name of the file
Returns:
str: extension of the file
Raises:
BarryFileException: if extension not present in filename
"""
if filename is None:
raise BarryFileException("Input file name cannot be None")
split_filename = filename.split(".")
if len(split_filename) > 1:
return str(split_filename[-1]).lower()
else:
raise BarryFileException("Could not determine input file type from file extension")
def xls_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLS file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def xlsx_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLSX file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def csv_to_df(filename, skip_rows, skip_header, columns):
"""Converts a CSV file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_csv(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def url_to_df(url, skip_rows, skip_header, columns):
"""Converts a CSV from HTTP URL to Pandas dataframe.
Args:
url (str): http url of the csv
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
url_content = requests.get(url).content
return pd.read_csv(StringIO(url_content), skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def df_to_xls(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_xlsx(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_json(df, out_filename):
"""Writes a Pandas dataframe to a JSON file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_json(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_csv(df, out_filename):
"""Writes a Pandas dataframe to a CSV file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_csv(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def sort_df(df, sort_column, ascending):
"""Sort a DataFrame with the column name passed in ascending/descending order.
Args:
df (dataframe): dataframe that needs to be sorted
sort_column (str): column to be sorted on
ascending (bool): sort order, ascending if True, descending if False
Returns:
dataframe: a pandas dataframe
Raises:
BarryDFException: if there is any error while sorting the dataframe
"""
try:
return df.sort(columns=sort_column, ascending=ascending)
except Exception as e:
raise BarryDFException("Could not sort dataframe on columns %s" % (sort_column))
| 31.362445 | 107 | 0.657059 |
6f812a049e2bbe774645e522bf2bfebedf410de7 | 451 | py | Python | flaskerize/schematics/flask-api/files/{{ name }}.template/commands/seed_command.py | darkguinito/myflaskerize | e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b | [
"BSD-3-Clause"
] | 1 | 2020-11-29T13:00:48.000Z | 2020-11-29T13:00:48.000Z | flaskerize/schematics/flask-api/files/{{ name }}.template/commands/seed_command.py | darkguinito/myflaskerize | e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b | [
"BSD-3-Clause"
] | null | null | null | flaskerize/schematics/flask-api/files/{{ name }}.template/commands/seed_command.py | darkguinito/myflaskerize | e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b | [
"BSD-3-Clause"
] | null | null | null | from flask_script import Command
from app import db
| 22.55 | 80 | 0.501109 |
6f83b0761d99f7aecd9538ff15dd5acb4d03288c | 118 | py | Python | com/aptitute_tests/RSL.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
] | null | null | null | com/aptitute_tests/RSL.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
] | null | null | null | com/aptitute_tests/RSL.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
] | null | null | null | a = [2, 4, 5, 7, 8, 9]
sum = 0
for i in range(len(a) - 1):
if a[i] % 2 == 0:
sum = sum + a[i]
print(sum)
| 14.75 | 27 | 0.415254 |
6f8670b2eab9bbcfcba5a2bac0b023bbc676fc76 | 8,724 | py | Python | plivo/rest/client.py | vaibhav-plivo/plivo-python | e4ae7559ba4647ac0e1af523c94d49a3fd6a24ca | [
"MIT"
] | null | null | null | plivo/rest/client.py | vaibhav-plivo/plivo-python | e4ae7559ba4647ac0e1af523c94d49a3fd6a24ca | [
"MIT"
] | 10 | 2020-10-19T06:47:45.000Z | 2021-06-25T15:41:34.000Z | plivo/rest/client.py | vaibhav-plivo/plivo-python | e4ae7559ba4647ac0e1af523c94d49a3fd6a24ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Core client, used for all API requests.
"""
import os
import platform
from collections import namedtuple
from plivo.base import ResponseObject
from plivo.exceptions import (AuthenticationError, InvalidRequestError,
PlivoRestError, PlivoServerError,
ResourceNotFoundError, ValidationError)
from plivo.resources import (Accounts, Addresses, Applications, Calls,
Conferences, Endpoints, Identities, Messages,
Numbers, Pricings, Recordings, Subaccounts)
from plivo.resources.live_calls import LiveCalls
from plivo.resources.queued_calls import QueuedCalls
from plivo.utils import is_valid_mainaccount, is_valid_subaccount
from plivo.version import __version__
from requests import Request, Session
AuthenticationCredentials = namedtuple('AuthenticationCredentials',
'auth_id auth_token')
PLIVO_API = 'https://api.plivo.com'
PLIVO_API_BASE_URI = '/'.join([PLIVO_API, 'v1/Account'])
def fetch_credentials(auth_id, auth_token):
"""Fetches the right credentials either from params or from environment"""
if not (auth_id and auth_token):
try:
auth_id = os.environ['PLIVO_AUTH_ID']
auth_token = os.environ['PLIVO_AUTH_TOKEN']
except KeyError:
raise AuthenticationError('The Plivo Python SDK '
'could not find your auth credentials.')
if not (is_valid_mainaccount(auth_id) or is_valid_subaccount(auth_id)):
raise AuthenticationError('Invalid auth_id supplied: %s' % auth_id)
return AuthenticationCredentials(auth_id=auth_id, auth_token=auth_token)
| 38.09607 | 79 | 0.568432 |
6f86c0ab72e5425a1aa9524504a9484391892c01 | 4,301 | py | Python | Project2/part3/part3controller.py | tyrenyabe/CSE461 | 8a0ae4b4a3f41d659ff784488037583c638d8c43 | [
"MIT"
] | null | null | null | Project2/part3/part3controller.py | tyrenyabe/CSE461 | 8a0ae4b4a3f41d659ff784488037583c638d8c43 | [
"MIT"
] | null | null | null | Project2/part3/part3controller.py | tyrenyabe/CSE461 | 8a0ae4b4a3f41d659ff784488037583c638d8c43 | [
"MIT"
] | null | null | null | # Part 3 of UWCSE's Project 3
#
# based on Lab Final from UCSC's Networking Class
# which is based on of_tutorial by James McCauley
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.addresses import IPAddr, IPAddr6, EthAddr
log = core.getLogger()
#statically allocate a routing table for hosts
#MACs used in only in part 4
IPS = {
"h10" : ("10.0.1.10", '00:00:00:00:00:01'),
"h20" : ("10.0.2.20", '00:00:00:00:00:02'),
"h30" : ("10.0.3.30", '00:00:00:00:00:03'),
"serv1" : ("10.0.4.10", '00:00:00:00:00:04'),
"hnotrust" : ("172.16.10.100", '00:00:00:00:00:05'),
}
def launch ():
"""
Starts the component
"""
core.openflow.addListenerByName("ConnectionUp", start_switch)
| 29.662069 | 86 | 0.670077 |
6f86e19366559629464f6c94fe703e3f9d6154c1 | 956 | py | Python | lessons/terminal.report.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
] | null | null | null | lessons/terminal.report.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
] | null | null | null | lessons/terminal.report.py | thepros847/python_programiing | d177f79d0d1f21df434bf3f8663ae6469fcf8357 | [
"MIT"
] | null | null | null | #students exams data entries for terminal report card
print("Westside Educational Complex--End Of second Terminal Report--Class-KKJA--Name:Theodora Obaa Yaa Gyarbeng")
while True:
student_score = float(input ("Enter the student score:"))
if student_score >= 1.0 and student_score <= 39.9:
print("student_score is F9", "fail")
elif student_score >= 40 and student_score <= 49.9:
print("student_score is E8", "pass" )
elif student_score >= 50 and student_score <= 59.9:
print("student_score is D7", "credit")
elif student_score >= 60 and student_score <= 69.9:
print("student_score is C4", "good")
elif student_score >= 70 and student_score <= 79.9:
print("student_score is B2", "very_good")
elif student_score >= 80 and student_score <= 100:
print("student_score is A1", "excellent")
else:
print("student_score is invalid entry")
student = []
| 39.833333 | 113 | 0.654812 |
6f8acfd21a05c8bfceaaf10a028a424c71fb2404 | 201 | py | Python | Python/ex_semanal.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
] | null | null | null | Python/ex_semanal.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
] | null | null | null | Python/ex_semanal.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
] | null | null | null | n = int(input())
l = []
c = 0
for i in range(0,n):
p = input()
print('c -> ', c)
if p in l:
c += 1
l.append(p)
print("Falta(m) {} pomekon(s).".format(151 - (n-c))) | 12.5625 | 52 | 0.41791 |
6f8bf885df7a7d6ca45c1dc88e7c1c7b2dc8f976 | 551 | py | Python | util/templatetags/custom_tags.py | dvcolgan/ludumdare27 | 15387d1313de488ee7503189614d42245e6ae56b | [
"MIT"
] | null | null | null | util/templatetags/custom_tags.py | dvcolgan/ludumdare27 | 15387d1313de488ee7503189614d42245e6ae56b | [
"MIT"
] | null | null | null | util/templatetags/custom_tags.py | dvcolgan/ludumdare27 | 15387d1313de488ee7503189614d42245e6ae56b | [
"MIT"
] | null | null | null | from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
#@register.filter
#def format_difference(value):
# number = int(value)
# if number > 0:
# return mark_safe('<span style="color: green">+' + str(number) + '</span>')
# elif number < 0:
# return mark_safe('<span style="color: red">' + str(number) + '</span>')
# else:
# return mark_safe(str(number))
| 26.238095 | 83 | 0.662432 |
6f8c3e1625cd1f82b18d5c123f61e8f119682b0c | 507 | py | Python | Day1/day1.py | leblancpj/AoC21 | 12ceb0fa56245ca803708c042dc72bdefb38e298 | [
"MIT"
] | null | null | null | Day1/day1.py | leblancpj/AoC21 | 12ceb0fa56245ca803708c042dc72bdefb38e298 | [
"MIT"
] | null | null | null | Day1/day1.py | leblancpj/AoC21 | 12ceb0fa56245ca803708c042dc72bdefb38e298 | [
"MIT"
] | null | null | null | # Given a series of input numbers, count the number of times
# the values increase from one to the next.
import pandas as pd
# Part 1
sample = pd.read_csv(".\Day1\sample.txt", header=None, squeeze=True)
input = pd.read_csv(".\Day1\input.txt", header=None, squeeze=True)
#print(type(input))
ans = input.diff(1).apply(lambda x: x > 0).sum()
#print(ans)
# Part 2
#print(sample)
rolling = input.rolling(window=3,min_periods=3,center=True)
print(rolling.sum().dropna().diff(1).apply(lambda x: x > 0).sum())
| 28.166667 | 69 | 0.706114 |
6f8ebcc85be160184b266c276cc0f3687216d3eb | 206 | py | Python | KivyTest.py | ethanmac9/GeneralTools | 96d2cd38b2b5f012113c92b751cd6fdfa8e1f1df | [
"MIT"
] | 1 | 2016-09-24T17:38:06.000Z | 2016-09-24T17:38:06.000Z | KivyTest.py | ethanmac9/GeneralTools | 96d2cd38b2b5f012113c92b751cd6fdfa8e1f1df | [
"MIT"
] | null | null | null | KivyTest.py | ethanmac9/GeneralTools | 96d2cd38b2b5f012113c92b751cd6fdfa8e1f1df | [
"MIT"
] | null | null | null | import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
kivy.require('1.9.0')
glApp = GUITestApp()
glApp.run()
| 17.166667 | 40 | 0.713592 |
6f8edf6b803563f114318f388210647b9924420a | 11,263 | py | Python | avalanche/evaluation/metrics/gpu_usage.py | aishikhar/avalanche | 39c361aba1663795ed33f093ab2e15cc5792026e | [
"MIT"
] | 1 | 2021-08-11T19:43:38.000Z | 2021-08-11T19:43:38.000Z | avalanche/evaluation/metrics/gpu_usage.py | aishikhar/avalanche | 39c361aba1663795ed33f093ab2e15cc5792026e | [
"MIT"
] | null | null | null | avalanche/evaluation/metrics/gpu_usage.py | aishikhar/avalanche | 39c361aba1663795ed33f093ab2e15cc5792026e | [
"MIT"
] | 1 | 2021-04-09T08:10:27.000Z | 2021-04-09T08:10:27.000Z | ################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-01-2021 #
# Author(s): Vincenzo Lomonaco, Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
import GPUtil
from threading import Thread
import time
import warnings
from typing import Optional, TYPE_CHECKING, List
from avalanche.evaluation import Metric, PluginMetric
from avalanche.evaluation.metric_results import MetricValue, MetricResult
from avalanche.evaluation.metric_utils import get_metric_name, \
phase_and_task, stream_type
if TYPE_CHECKING:
from avalanche.training import BaseStrategy
def gpu_usage_metrics(gpu_id, every=0.5, minibatch=False, epoch=False,
experience=False, stream=False) -> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param gpu_id: GPU device ID.
:param every: seconds after which update the maximum GPU
usage
:param minibatch: If True, will return a metric able to log the minibatch
max GPU usage.
:param epoch: If True, will return a metric able to log the epoch
max GPU usage.
:param experience: If True, will return a metric able to log the experience
max GPU usage.
:param stream: If True, will return a metric able to log the evaluation
max stream GPU usage.
:return: A list of plugin metrics.
"""
metrics = []
if minibatch:
metrics.append(MinibatchMaxGPU(gpu_id, every))
if epoch:
metrics.append(EpochMaxGPU(gpu_id, every))
if experience:
metrics.append(ExperienceMaxGPU(gpu_id, every))
if stream:
metrics.append(StreamMaxGPU(gpu_id, every))
return metrics
__all__ = [
'MaxGPU',
'MinibatchMaxGPU',
'EpochMaxGPU',
'ExperienceMaxGPU',
'StreamMaxGPU',
'gpu_usage_metrics'
]
| 29.717678 | 80 | 0.589097 |
6f8f1cd00d467ef37a750fa1bf46d98ba2fd1d86 | 988 | py | Python | 901-1000/971.flip-binary-tree-to-match-preorder-traversal.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
] | null | null | null | 901-1000/971.flip-binary-tree-to-match-preorder-traversal.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
] | null | null | null | 901-1000/971.flip-binary-tree-to-match-preorder-traversal.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=971 lang=python3
#
# [971] Flip Binary Tree To Match Preorder Traversal
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
| 25.333333 | 78 | 0.526316 |
6f8f8270104be3d27614c913a1c738aa92c207d4 | 5,799 | py | Python | hydraulics/ifcb/classifier.py | axiom-data-science/hydraulics | 15031d9694cfe1e552c260079ff60a9faa101ba2 | [
"MIT"
] | 1 | 2021-09-14T23:07:24.000Z | 2021-09-14T23:07:24.000Z | hydraulics/ifcb/classifier.py | axiom-data-science/hydraulics | 15031d9694cfe1e552c260079ff60a9faa101ba2 | [
"MIT"
] | null | null | null | hydraulics/ifcb/classifier.py | axiom-data-science/hydraulics | 15031d9694cfe1e552c260079ff60a9faa101ba2 | [
"MIT"
] | null | null | null | import base64
import datetime
import io
import json
import os
import requests
from collections import namedtuple
from urllib.parse import urlparse
import faust
import numpy as np
import keras_preprocessing.image as keras_img
from avro import schema
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.avro import AvroSerializer
from biovolume import calc_biovolume
from blob import Blob, BlobConfig
config_path = os.environ.get('IFCB_STREAM_APP_CONFIG', 'config.json')
with open(config_path) as config_file:
config = json.load(config_file)
Stats = namedtuple(
'Stats',
['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob', 'classification_time', 'biovolume', 'carbon', 'hab']
)
ClassifierStats = namedtuple(
'ClassifierStats',
['sample_name', 'prob', 'classifier', 'classification_time']
)
schema_config = {
'url': config['schema.registry.url'],
'ssl.ca.location': None
}
# need to use CachedSchemaRegistryClient to get schema
# - need to copy config because it is consumed when used in CachedSchemaRegistryClient
schema_config_copy = schema_config.copy()
cached_schema_client = CachedSchemaRegistryClient(schema_config)
key_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-key')[1])
value_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-value')[1])
key_schema = avro.loads(key_schema)
value_schema = avro.loads(value_schema)
producer = AvroProducer({
'bootstrap.servers': config['bootstrap.servers'],
'schema.registry.url': config['schema.registry.url']
},
default_key_schema=key_schema,
default_value_schema=value_schema
)
app = faust.App(
config['app_name'],
broker=config['broker'],
topic_partitions=config['topic_partitions'],
store='rocksdb://',
consumer_auto_offset_reset='earliest',
version=1
)
image_topic = app.topic(config['image_topic'])
stats_topic = app.topic(config['stats_topic'])
classifier_stats_table = app.Table('ifcb-classifier-stats', default=ClassifierStats)
diatoms = config['diatoms']
class_names = config['class_names']
hab_species = config['hab_species']
def publish_stats(feature_key, image, classifier_stats, blob_config=BlobConfig()):
"""Calculate biovolume, carbon, hab, and publish to Kafka"""
# calculate biovolume
# - scale biovolume for 3d (from ifcb-analysis)
blob = Blob(image, blob_config)
biovolume = calc_biovolume(blob)
mu = 1/3.4
biovolume = biovolume * mu ** 3
carbon = calc_carbon(classifier_stats[0], biovolume)
hab = classifier_stats[0] in hab_species
time, ifcb_id, roi = feature_key.split('_')
roi = int(roi)
timestamp = int(datetime.datetime.strptime(time[1:], '%Y%m%dT%H%M%S').timestamp())
stats = Stats(
timestamp,
ifcb_id,
roi,
classifier_stats[0],
classifier_stats[2],
classifier_stats[1],
classifier_stats[3],
biovolume,
carbon,
hab
)
# send to topic with Avro schema
producer.poll(0)
producer.produce(
topic=config['stats_topic'],
key={
'pid': f"{time}_{ifcb_id}",
'roi': int(roi)
},
value=stats._asdict()
)
producer.flush()
def get_classifier(url):
"""Given TF style url, return name and version"""
parse_results = urlparse(url)
_, version, _, name_raw = parse_results.path.split('/')
name = name_raw.split(':')[0]
return (name, version)
def calc_carbon(english_name, scaled_biovolume, diatom_list=diatoms):
"""Given volume in u3/cell return carbon in pg C/cell.
$log_10(C) = log(a) + b \cdot log_10(V)$
"""
if english_name in diatom_list:
carbon = 10**(-0.665 + 0.939*np.log10(scaled_biovolume))
else:
carbon = 10**(-0.993 + 0.881*np.log10(scaled_biovolume))
return carbon
if __name__ == '__main__':
app.main()
| 31.862637 | 113 | 0.682876 |
6f8fb26b38bd837cc1990577bdd9ed4c340e3f51 | 1,296 | py | Python | upcfcardsearch/c260.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c260.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c260.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.utils import get | 56.347826 | 442 | 0.706019 |
6f904b06f4d8b0199476081eeaa6c6a6c588158e | 98 | py | Python | test_cookiecutter_ali92hm/__main__.py | ali92hm/test-cookiecutter | 5fbfef57e7ea8ae8b826958cb1db21812cc8467e | [
"ISC"
] | null | null | null | test_cookiecutter_ali92hm/__main__.py | ali92hm/test-cookiecutter | 5fbfef57e7ea8ae8b826958cb1db21812cc8467e | [
"ISC"
] | null | null | null | test_cookiecutter_ali92hm/__main__.py | ali92hm/test-cookiecutter | 5fbfef57e7ea8ae8b826958cb1db21812cc8467e | [
"ISC"
] | null | null | null | from .cli import entrypoint
if __name__ == "__main__": # pragma: no cover
entrypoint.main()
| 19.6 | 46 | 0.693878 |
6f9177f95c9276da027118820c1944dc489b0063 | 137 | py | Python | backend/elasticsurgery/views/__init__.py | EDITD/ElasticSurgery | 458571d48541d1ddbbfeb20e04703592e5f869e0 | [
"MIT"
] | null | null | null | backend/elasticsurgery/views/__init__.py | EDITD/ElasticSurgery | 458571d48541d1ddbbfeb20e04703592e5f869e0 | [
"MIT"
] | 27 | 2019-09-25T14:19:44.000Z | 2022-02-12T21:39:17.000Z | backend/elasticsurgery/views/__init__.py | EDITD/ElasticSurgery | 458571d48541d1ddbbfeb20e04703592e5f869e0 | [
"MIT"
] | null | null | null | from flask import jsonify
from ..app import app
| 15.222222 | 37 | 0.671533 |
6f91ab094e27b4b2253d62e62b30b9f0a9a83bbc | 1,726 | py | Python | amftrack/notebooks/analysis/data_info.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | 1 | 2021-06-10T02:51:53.000Z | 2021-06-10T02:51:53.000Z | amftrack/notebooks/analysis/data_info.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | null | null | null | amftrack/notebooks/analysis/data_info.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | null | null | null | inst_25 = [(35,0,15),(29,0,20),(9,0,11),(9,13,35),(3,0,19),(37,0,8),(11,0,30),(19,0,25),(13,0,25),(39,0,18)]
inst_bait = [(10,0,10), (14,0,11), (33,0,26),(4,2,18),(4,20,30),(39,117,137),(12,5,21),(28,0,14),(32,5,14),(32,15,44),(36,0,9),(40,0,14),(2,1,15),(2,17,35),(5,160,168),(11,158,164),(13,116,131)]
inst_30 = []
inst_25late = [(32,160,190),(38,61,76),(39,446,466),(40,153,153+37),(39,269,329),(40,262,287),(38,7,42)]
inst_25late_extended = [(39,269,369),(40,153,190),(38,7,50),(38,61,105),(32,160,199),(39,446,486),(35,70,119),(38,106,130),(36,204,233),(30,57,94),(29,221,241),(40,262,312),(29,160,184),(30,0,24)]
inst_25_100P = [(38,131,131+80)]
# treatments = {'25*' : inst_25late,'25' : inst_25,'baits' : inst_bait, '30' : inst_30}
treatments = {'25_100' : inst_25_100P, '25*' : inst_25late,'25' : inst_25,'baits' : inst_bait, '30' : inst_30}
plate_number = {(9,0,11) : 296, (9,13,35) : 296, (3,0,19) : 340, (37,0,8) : 269,(11,0,30) : 314, (19,0,25) : 344, (13,0,25) : 298, (39,0,18) : 297, (35,0,15) : 351,(10,0,10) : 395,(14,0,11) : 399, (33,0,26) : 420, (4,2,18) : 423, (4,20,30) : 423,(8,0,17): 434 ,(8,20,30) : 434,(39,117,137) : 433, (12,5,21) : 436, (28,0,14): 405,(32,5,45):409,(36,0,9) : 419,(40,0,14) : 425,(2,1,15):435,(2,17,35):435,(5,160,168):382,(11,158,164) : 416,(13,116,131) : 424, (29,0,20) : 373,(32,15,44):409, (32,5,14) : 409, (40,153,153+37) : 69,(39,269,329) : 94, (40,262,287) : 102,(38,7,42) : 59, (32,160,190) : 152,(38,61,76) : 137,(39,446,466) : 26, (38,131,131+80):721}
comments = {395 : 'ignore', 399 : 'left', 405 : 'left', 409 : 'right', 416 : 'middle', 419 : 'middle', 420 : 'left', 423: 'right', 424 : 'left', 425 : 'middle', 433 : 'right', 435 : 'middle', 436 : 'left'} | 143.833333 | 656 | 0.551564 |
6f92f4f38b2cf0fe2438ae66792a5537b2e53d3f | 30 | py | Python | version.py | iridiumcow/OoT-Randomizer | 3a5d2bebb2131e593f6611bd5c062ddd6fdac8ff | [
"MIT"
] | null | null | null | version.py | iridiumcow/OoT-Randomizer | 3a5d2bebb2131e593f6611bd5c062ddd6fdac8ff | [
"MIT"
] | null | null | null | version.py | iridiumcow/OoT-Randomizer | 3a5d2bebb2131e593f6611bd5c062ddd6fdac8ff | [
"MIT"
] | null | null | null | __version__ = '5.2.158 f.LUM'
| 15 | 29 | 0.666667 |
6f93e22cf26c9a478c3691514ddab933b92e050e | 280 | py | Python | scripts/test_process_traj.py | hyyh28/trajectory-transformer | 4a369b6d1c950c76d1792cf004644fa13040319c | [
"MIT"
] | null | null | null | scripts/test_process_traj.py | hyyh28/trajectory-transformer | 4a369b6d1c950c76d1792cf004644fa13040319c | [
"MIT"
] | null | null | null | scripts/test_process_traj.py | hyyh28/trajectory-transformer | 4a369b6d1c950c76d1792cf004644fa13040319c | [
"MIT"
] | null | null | null | import numpy as np
import pickle
expert_file = 'maze_expert.npy'
imitation_agent_file = 'maze_agent.npy'
with open(imitation_agent_file, 'rb') as handle:
agent_data = pickle.load(handle)
with open(expert_file, 'rb') as handle:
expert_data = pickle.load(handle)
print("OK") | 31.111111 | 48 | 0.757143 |
6f949785191c10e7a989c8350cfc26a75e30b461 | 3,967 | py | Python | test/test_strings.py | harthur/celestial-snips-app | 5dfb86002e0d109c16c8d01ee77b0e909c263270 | [
"MIT"
] | 1 | 2021-11-23T02:44:14.000Z | 2021-11-23T02:44:14.000Z | test/test_strings.py | harthur/celestial-snips-app | 5dfb86002e0d109c16c8d01ee77b0e909c263270 | [
"MIT"
] | null | null | null | test/test_strings.py | harthur/celestial-snips-app | 5dfb86002e0d109c16c8d01ee77b0e909c263270 | [
"MIT"
] | null | null | null | import unittest
from celestial import Celestial
from strings import CelestialStrings
from datetime import datetime
import pytest
import math
| 34.198276 | 125 | 0.633224 |
6f960fdd0b967c14a7efcefaab212681557a8931 | 3,290 | py | Python | hiburn/config.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
] | 8 | 2020-04-06T08:47:26.000Z | 2021-02-23T17:10:12.000Z | hiburn/config.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
] | 2 | 2020-05-14T16:59:33.000Z | 2021-06-19T23:48:35.000Z | hiburn/config.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
] | 2 | 2020-05-02T22:49:01.000Z | 2020-05-12T02:39:26.000Z |
import copy
import json
import logging
from . import utils
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
| 38.255814 | 99 | 0.50152 |
6f9664a20f8e76c49fc5cef19c2bbf5957b352e2 | 746 | py | Python | examples/4_randomized_timing.py | jonikula/pyosmo | ab2ec1d97dd3e0faf5e2b62ac7bcfc1042e60575 | [
"MIT"
] | null | null | null | examples/4_randomized_timing.py | jonikula/pyosmo | ab2ec1d97dd3e0faf5e2b62ac7bcfc1042e60575 | [
"MIT"
] | null | null | null | examples/4_randomized_timing.py | jonikula/pyosmo | ab2ec1d97dd3e0faf5e2b62ac7bcfc1042e60575 | [
"MIT"
] | null | null | null | from osmo import Osmo
import random
import time
osmo = Osmo(PositiveCalculator())
osmo.generate()
| 22.606061 | 64 | 0.630027 |
6f985fc4f5c199385b03c83c5b2b06f32b9bac8b | 3,475 | py | Python | ec2/physbam/utils.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | 20 | 2017-07-03T19:09:09.000Z | 2021-09-10T02:53:56.000Z | ec2/physbam/utils.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | null | null | null | ec2/physbam/utils.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | 9 | 2017-09-17T02:05:06.000Z | 2020-01-31T00:12:01.000Z | #!/usr/bin/env python
# Author: Omid Mashayekhi <omidm@stanford.edu>
import sys
import os
import subprocess
import config
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import ec2
temp_file_name = '_temp_file_'
| 26.937984 | 97 | 0.636835 |
6f9bfb5d1846d96697c801d792f6539b53696861 | 75 | py | Python | Lista5/Lista5ex1.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
] | null | null | null | Lista5/Lista5ex1.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
] | null | null | null | Lista5/Lista5ex1.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
] | null | null | null | frase = input().split()
for palavra in frase:
print(palavra[2], end='') | 25 | 29 | 0.64 |
6f9d6fb07fd37fbb906d2b22ed6f41821f271822 | 198 | py | Python | ishashad.py | albusdemens/Twitter-mining-project | 67a2bd651459568bb74d64dde9cd76fc7925fd32 | [
"MIT"
] | null | null | null | ishashad.py | albusdemens/Twitter-mining-project | 67a2bd651459568bb74d64dde9cd76fc7925fd32 | [
"MIT"
] | null | null | null | ishashad.py | albusdemens/Twitter-mining-project | 67a2bd651459568bb74d64dde9cd76fc7925fd32 | [
"MIT"
] | null | null | null | #To run the code, write
#from ishashad import ishashad
#then ishashad(number) | 18 | 37 | 0.60101 |
6f9e12faf187ffa4348ad42a554949db236c4d07 | 2,393 | py | Python | tests/utils/test_utils.py | OpenLMIS-Angola/superset-patchup | b20a56b274fb4b2f5f765e5d91c290a28bccf635 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_utils.py | OpenLMIS-Angola/superset-patchup | b20a56b274fb4b2f5f765e5d91c290a28bccf635 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_utils.py | OpenLMIS-Angola/superset-patchup | b20a56b274fb4b2f5f765e5d91c290a28bccf635 | [
"Apache-2.0"
] | null | null | null | """
This module tests utils
"""
from unittest.mock import patch, MagicMock
from superset_patchup.utils import get_complex_env_var, is_safe_url, is_valid_provider
from superset_patchup.oauth import CustomSecurityManager
| 34.681159 | 86 | 0.662766 |
6f9e8adaed53c25080171ad9a2dca161824d3a7c | 14,215 | py | Python | xitorch/_tests/test_integrate.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
] | null | null | null | xitorch/_tests/test_integrate.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
] | null | null | null | xitorch/_tests/test_integrate.py | Jaikinator/xitorch | 053db8d27a7777baa7f572c2d37004e788ff4cb8 | [
"MIT"
] | null | null | null | import random
import torch
import numpy as np
from torch.autograd import gradcheck, gradgradcheck
import xitorch as xt
from xitorch.integrate import quad, solve_ivp, mcquad, SQuad
from xitorch._tests.utils import device_dtype_float_test
################################## quadrature ##################################
################################## ivp ##################################
class IVPNNModule(torch.nn.Module):
# dydt: -a * y * t - b * y - c * y
class IVPModule(xt.EditableModule):
# dydt: -a * y * t - b * y - c * y
################################## mcquad ##################################
class MCQuadLogProbNNModule(torch.nn.Module):
class MCQuadFcnModule(xt.EditableModule):
def get_true_output(w, a):
# return a*a*w*w
return 1.0 / torch.sqrt(1 + w * w / (a * a))
################################## SQuad ##################################
if __name__ == "__main__":
# with torch.autograd.detect_anomaly():
test_mcquad()
| 35.987342 | 116 | 0.578825 |
6fa1472d7ffbd2b29874a36ce2b115c7eb0c4881 | 120 | py | Python | biothings-hub/files/nde-hub/hub/dataload/sources/figshare/dumper.py | NIAID-Data-Ecosystem/nde-crawlers | c0aff7dc1625ece1e67d03ad56555da7dbdc4a11 | [
"Apache-2.0"
] | null | null | null | biothings-hub/files/nde-hub/hub/dataload/sources/figshare/dumper.py | NIAID-Data-Ecosystem/nde-crawlers | c0aff7dc1625ece1e67d03ad56555da7dbdc4a11 | [
"Apache-2.0"
] | null | null | null | biothings-hub/files/nde-hub/hub/dataload/sources/figshare/dumper.py | NIAID-Data-Ecosystem/nde-crawlers | c0aff7dc1625ece1e67d03ad56555da7dbdc4a11 | [
"Apache-2.0"
] | null | null | null | from hub.dataload.nde import NDEFileSystemDumper
| 20 | 48 | 0.808333 |
6fa203b91e4061ab9a5aeb13af78a9c24d505f2c | 785 | py | Python | faiss_utils.py | yizt/keras-lbl-IvS | 3f98b698c56ae40954b4920da167f7c9e32024c8 | [
"Apache-2.0"
] | 22 | 2019-01-13T12:56:56.000Z | 2020-11-03T01:39:20.000Z | faiss_utils.py | yizt/keras-lbl-IvS | 3f98b698c56ae40954b4920da167f7c9e32024c8 | [
"Apache-2.0"
] | null | null | null | faiss_utils.py | yizt/keras-lbl-IvS | 3f98b698c56ae40954b4920da167f7c9e32024c8 | [
"Apache-2.0"
] | 5 | 2019-04-01T09:19:55.000Z | 2020-05-26T14:38:06.000Z | # -*- coding: utf-8 -*-
"""
File Name faiss_utils
Description : faiss
Author : mick.yi
date 2019/1/4
"""
import faiss
import numpy as np
def update_multi(index, vectors, ids):
"""
:param index:
:param vectors:
:param ids:
:return:
ValueError: array is not C-contiguous
"""
idx = np.argsort(ids)
#
index.remove_ids(ids[idx])
index.add_with_ids(vectors[idx], ids[idx])
| 21.216216 | 47 | 0.602548 |
6fa2447d022693958e9639f974d13fb89a57e078 | 3,201 | py | Python | custom/logistics/api.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2017-02-10T03:14:51.000Z | 2017-02-10T03:14:51.000Z | custom/logistics/api.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | null | null | null | custom/logistics/api.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | null | null | null | import requests
from custom.api.utils import EndpointMixin
| 41.038462 | 108 | 0.650109 |
6fa2c35d5d796a2e58e703cd256e4f54f2acff9f | 432 | py | Python | users/migrations/0004_auto_20191028_2154.py | icnmtrx/classified | c9515352e046293dacd66ba28cb32ae378edf832 | [
"MIT"
] | null | null | null | users/migrations/0004_auto_20191028_2154.py | icnmtrx/classified | c9515352e046293dacd66ba28cb32ae378edf832 | [
"MIT"
] | 2 | 2021-06-08T20:56:16.000Z | 2021-09-08T01:41:42.000Z | users/migrations/0004_auto_20191028_2154.py | icnmtrx/classified | c9515352e046293dacd66ba28cb32ae378edf832 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-10-28 21:54
from django.db import migrations, models
| 22.736842 | 90 | 0.62963 |
6fa38b8004759e97015cce47bf001559adb56f2e | 3,426 | py | Python | functionaltests/api/v2/test_pool.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | functionaltests/api/v2/test_pool.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | functionaltests/api/v2/test_pool.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions
from functionaltests.common import datagen
from functionaltests.api.v2.base import DesignateV2Test
from functionaltests.api.v2.clients.pool_client import PoolClient
| 36.83871 | 75 | 0.700525 |
6fa4cb77b9686bd974f4ba0799278420d18f452c | 1,928 | py | Python | fewshot/models/basic_model_VAT_ENT.py | AhmedAyad89/Consitent-Prototypical-Networks-Semi-Supervised-Few-Shot-Learning | b0b805733ee6c42cee5ddd9eace94edd29f6120d | [
"MIT"
] | 22 | 2019-03-13T02:19:17.000Z | 2021-08-06T03:13:00.000Z | fewshot/models/basic_model_VAT_ENT.py | mattochal/Consitent-Prototypical-Networks-Semi-Supervised-Few-Shot-Learning | b0b805733ee6c42cee5ddd9eace94edd29f6120d | [
"MIT"
] | 1 | 2019-07-27T14:33:02.000Z | 2020-06-01T11:03:20.000Z | fewshot/models/basic_model_VAT_ENT.py | mattochal/Consitent-Prototypical-Networks-Semi-Supervised-Few-Shot-Learning | b0b805733ee6c42cee5ddd9eace94edd29f6120d | [
"MIT"
] | 5 | 2019-03-07T06:18:51.000Z | 2019-10-22T05:33:23.000Z | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from fewshot.models.kmeans_utils import compute_logits
from fewshot.models.model import Model
from fewshot.models.refine_model import RefineModel
from fewshot.models.basic_model_VAT import BasicModelVAT
from fewshot.models.model_factory import RegisterModel
from fewshot.models.nnlib import (concat, weight_variable)
from fewshot.utils import logger
from fewshot.utils.debug import debug_identity
from fewshot.models.SSL_utils import *
l2_norm = lambda t: tf.sqrt(tf.reduce_sum(tf.pow(t, 2)))
log = logger.get()
| 33.824561 | 116 | 0.769191 |
6fa4dc608c43d4d875d4dbb6404a617e1898a215 | 169 | py | Python | FileStorage/utils/__init__.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
] | 1 | 2020-07-15T10:02:40.000Z | 2020-07-15T10:02:40.000Z | FileStorage/utils/__init__.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
] | null | null | null | FileStorage/utils/__init__.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
] | null | null | null | """
@Author : xiaotao
@Email : 18773993654@163.com
@Lost modifid : 2020/4/24 10:02
@Filename : __init__.py.py
@Description :
@Software : PyCharm
""" | 21.125 | 32 | 0.609467 |
6fa61d901e4c25cb458862fd5a69f1d44a079c88 | 3,864 | py | Python | molsysmt/tools/items.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | molsysmt/tools/items.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | molsysmt/tools/items.py | dprada/molsysmt | 83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d | [
"MIT"
] | null | null | null | import numpy as np
import re as re
from molsysmt._private_tools.lists_and_tuples import is_list_or_tuple
| 26.465753 | 93 | 0.626812 |
6fa63eda0afd91d2591e9bd285c10c6a046d2252 | 5,021 | py | Python | runner/monitor.py | wynterl/federated-learning-lib | 5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3 | [
"IBM-pibs"
] | null | null | null | runner/monitor.py | wynterl/federated-learning-lib | 5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3 | [
"IBM-pibs"
] | null | null | null | runner/monitor.py | wynterl/federated-learning-lib | 5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3 | [
"IBM-pibs"
] | 1 | 2021-05-11T05:09:30.000Z | 2021-05-11T05:09:30.000Z | #!/usr/bin/env python3
import argparse
import subprocess as sp
import select
import sys
import time
import yaml
if __name__ == '__main__':
"""
We can daemonize our connections to our remote machines, list the FL processes on remote
machines, or kill FL processes on remote machines. We can either pass a specfic run's metadata
file, or we can use a 'global' metadata file to list all processes on a list of machines.
"""
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['daemonize', 'list', 'kill'])
parser.add_argument('--config')
# read metadata config for the specified run
args = parser.parse_args()
if (args.config):
with open(args.config) as config_file:
config = yaml.load(config_file.read(), Loader=yaml.Loader)
if 'timestamp' in config:
machines = [config['agg_machine']] + config['party_machines']
usernames = [config['agg_username']] + config['party_usernames']
run_id = config['timestamp'] if 'timestamp' in config else ''
else:
machines = config['machines']
usernames = config['usernames']
run_id = ''
localp = sp.Popen('mkdir -p {}/.ssh'.format(config['local_staging_dir']).split())
exit_code = localp.wait()
# decide what to run based on input
if args.action == 'daemonize':
daemonize_cmd = 'ssh '\
'-o "ControlMaster=auto" '\
'-o "ControlPath={}/.ssh/master-%r@%h:%p" '\
'-o "ControlPersist=yes" '\
'-Nn {}@{}'
cmds = [daemonize_cmd.format(config['local_staging_dir'], u, m) for m, u in zip(machines,usernames)]
elif args.action == 'list':
if 'timestamp' in config:
list_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} '\
'"set -o pipefail; '\
'pgrep -u {} -f \\"bash.*run_agg\.py.*{}|bash.*run_party\.py.*{}\\" '\
'| xargs --no-run-if-empty -I@ pgrep -P @ -f \\"run\\" -a"'
cmds = [list_cmd.format(config['local_staging_dir'], u, m, u, run_id, run_id, u, run_id, run_id) for m, u in zip(machines, usernames)]
else:
list_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} ' \
'"set -o pipefail; '\
'pgrep -f \\"bash.*run_agg\.py|bash.*run_party\.py\\" '\
'| tee >(xargs --no-run-if-empty -I@ pgrep -P @) '\
'| xargs --no-run-if-empty ps -o user:8,pid,ppid,cmd p"'
cmds = [list_cmd.format(config['local_staging_dir'], u, m) for m, u in zip(machines, usernames)]
elif args.action == 'kill':
if 'timestamp' in config:
kill_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} '\
'"set -o pipefail; '\
'pgrep -u {} -f \\"bash.*run_agg\.py.*{}|run_party\.py.*{}\\" '\
'| tee >(xargs --no-run-if-empty pgrep -P) | tee >(xargs --no-run-if-empty kill)"'
cmds = [kill_cmd.format(config['local_staging_dir'], u, m, u, run_id, run_id, u, run_id, run_id) for m, u in zip(machines, usernames)]
else:
kill_cmd = \
'ssh -o "ControlMaster=no" -o "ControlPath={}/.ssh/master-%r@%h:%p" {}@{} '\
'"set -o pipefail; '\
'pgrep -u {} -f \\"run_agg\.py|run_party\.py\\" '\
'&& pkill -u {} -f \\"run_agg\.py|run_party\.py\\""'
cmds = [kill_cmd.format(config['local_staging_dir'], u, m, u, u) for m, u in zip(machines, usernames)]
else:
print('Action not handled. Exiting.')
exit(1)
# start all processes
procs = [sp.Popen(c, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, universal_newlines=True) for c in cmds]
stdout = ['' for _ in machines]
stderr = ['' for _ in machines]
loops = 0
# wait for output and finally exit when processes end, obtaining all output
polls = list(p.poll() for p in procs)
while any(r == None for r in polls):
ret = select.select([p.stdout.fileno() for p,r in zip(procs,polls) if r == None], [], [])
for fd in ret[0]:
for i,p in enumerate(procs):
if p.stdout.fileno() == fd:
stdout[i] += '\t{}'.format(p.stdout.readline())
polls = tuple(p.poll() for p in procs)
loops += 1
for i,p in enumerate(procs):
for line in p.stdout:
stdout[i] += '\t{}'.format(line)
for line in p.stderr:
stderr[i] += '\t{}'.format(line)
if not stdout[i].strip():
stderr[i] += '\tNo processes found.\n'
# print output
if args.action != 'daemonize':
for i,m in enumerate(machines):
print("{}:".format(m))
if stdout[i].strip():
print(stdout[i])
if stderr[i].strip():
print(stderr[i])
| 44.04386 | 146 | 0.54292 |
6fa6d9568fc3d20a8fc6e092cfbe18e8adeb1122 | 134 | py | Python | brian2sampledevice/__init__.py | brian-team/brian2sampledevice | d4b4e932aff35b3350e22039b05fcb022623e5fc | [
"MIT"
] | null | null | null | brian2sampledevice/__init__.py | brian-team/brian2sampledevice | d4b4e932aff35b3350e22039b05fcb022623e5fc | [
"MIT"
] | null | null | null | brian2sampledevice/__init__.py | brian-team/brian2sampledevice | d4b4e932aff35b3350e22039b05fcb022623e5fc | [
"MIT"
] | null | null | null | from .device import SampleDevice
from .codeobject import SampleDeviceCodeObject
__all__ = ['SampleDevice', 'SampleDeviceCodeObject']
| 26.8 | 52 | 0.828358 |
6fa6de489d3ecbdc05135c1a882460c438344d63 | 149 | py | Python | tests/node_test.py | allenai/beaker-py | 99c8d7f6e9938807ca5405964ef35633a19e8d68 | [
"Apache-2.0"
] | null | null | null | tests/node_test.py | allenai/beaker-py | 99c8d7f6e9938807ca5405964ef35633a19e8d68 | [
"Apache-2.0"
] | 20 | 2021-12-16T13:23:07.000Z | 2022-03-31T16:40:02.000Z | tests/node_test.py | allenai/beaker-py | 99c8d7f6e9938807ca5405964ef35633a19e8d68 | [
"Apache-2.0"
] | null | null | null | from beaker import Beaker
| 24.833333 | 64 | 0.778523 |
6fa73ff97a0db35ad4fb506ba2a01779a69e92b2 | 5,984 | py | Python | crawling/data_crawler_set.py | CLUG-kr/cau_hashkeyword | f2b747ce8c5705ea58fd94d2a1d10110d6f4f511 | [
"MIT"
] | 5 | 2019-01-20T06:12:47.000Z | 2019-06-27T13:29:26.000Z | crawling/data_crawler_set.py | AllyHyeseongKim/cau_hashkeyword | 1050dbc74d28765aec6e5870d6f3dae325f5caad | [
"MIT"
] | 4 | 2019-01-10T14:58:45.000Z | 2019-05-16T05:02:53.000Z | crawling/data_crawler_set.py | AllyHyeseongKim/cau_hashkeyword | 1050dbc74d28765aec6e5870d6f3dae325f5caad | [
"MIT"
] | 1 | 2019-02-20T09:11:54.000Z | 2019-02-20T09:11:54.000Z |
# coding: utf-8
# In[2]:
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
# Fetch the service account key JSON file contents
cred = credentials.Certificate('/Users/Solomon/Desktop/cau-hashkeyword-serviceAccountKey.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://cau-hashkeyword.firebaseio.com'
})
ref = db.reference('server/saving-data/crawling')
# In[3]:
from bs4 import BeautifulSoup
from urllib.request import urlopen
from selenium import webdriver
import re
# In[3]:
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x1080')
options.add_argument('disable-gpu')
# In[10]:
cau_title_list = []
cau_date_list = []
cau_url_list = []
driver = webdriver.Chrome("/usr/local/bin/chromedriver", chrome_options=options)
driver.get("https://www.cau.ac.kr/cms/FR_CON/index.do?MENU_ID=100")
driver.implicitly_wait(3)
cau_base_url = "https://www.cau.ac.kr/cms/FR_CON/BoardView.do?MENU_ID=100&CONTENTS_NO=1&SITE_NO=2&P_TAB_NO=&TAB_NO=&BOARD_SEQ=4&BOARD_CATEGORY_NO=&BBS_SEQ="
# BBS_SEQ=19642 (id=board_19642)
board_list = driver.find_element_by_id("tbody").find_elements_by_tag_name("li")
board_list.reverse()
# count = 0
for item in board_list:
# if count < 10: pass #
# else:
cau_title_list.append(item.find_element_by_class_name("txtL").find_element_by_tag_name('a').text)
cau_date_list.append(item.find_element_by_class_name("txtInfo").find_element_by_class_name("date").text)
cau_url_list.append(cau_base_url + item.get_attribute("id").replace("board_",""))
# count += 1
driver.close()
# list , insert(0,data) O(n)
# class collections.deque([iterable[, maxlen]]) dequeue() O(1)
# High Performance .
# list reverse , append (, reverse O(n))
# In[11]:
lib_title_list = []
lib_date_list = []
lib_url_list = []
driver = webdriver.Chrome("/usr/local/bin/chromedriver", chrome_options=options)
driver.get("https://library.cau.ac.kr/#/bbs/notice?offset=0&max=20")
driver.implicitly_wait(3)
try:
# tbody[0] . , .
board_list = driver.find_elements_by_tag_name("tbody")[1].find_elements_by_class_name("ikc-item")
board_list.reverse()
for item in board_list: # tbody ikc-item , IndexError: list index out of range ( )
lib_title_list.append(item.find_elements_by_tag_name("td")[2].find_element_by_tag_name('a').text) #
lib_date_list.append(item.find_elements_by_tag_name("td")[3].find_elements_by_tag_name("span")[1].text)
except IndexError:
print("IndexError")
pass
lib_base_url = "https://library.cau.ac.kr/#/bbs/notice/"
# id
lib_sub_url = "?offset=0&max=20"
# url id ..
driver.close()
# In[14]:
#
dorm_title_list = []
dorm_date_list = []
dorm_url_list = []
dormnotice_url = "https://dormitory.cau.ac.kr/bbs/bbs_list.php?bbsID=notice"
dormnotice_page = urlopen(dormnotice_url)
dormnotice_soup = BeautifulSoup(dormnotice_page, "lxml")
dormnotice_list = dormnotice_soup.find(id='content').find('div').find_all('tr',{'bgcolor':'#fffcdb'})
dormnotice_list.reverse()
if dormnotice_list == []:
print("No data")
else :
for item in dormnotice_list:
dorm_title_list.append(item.find('span',class_='bbsTitle').get_text())
dorm_url_list.append(item.find('a')['href'])
dorm_date_list.append("20" + item.find_all('td',class_='t_c')[3].get_text())
#try-except ?
# In[13]:
ict_title_list = []
ict_date_list = []
ict_url_list = []
ictnotice_url = "http://ict.cau.ac.kr/20150610/sub05/sub05_01_list.php"
ictnotice_page = urlopen(ictnotice_url)
ictnotice_soup = BeautifulSoup(ictnotice_page, "lxml")
ict_base_url = "http://ict.cau.ac.kr/20150610/sub05/sub05_01_list.php?cmd=view&cpage=1&idx="
# id
ict_sub_url = "&search_gbn=1&search_keyword="
ictnotice_list = ictnotice_soup.find('tbody').find_all('tr')
ictnotice_list.reverse()
if ictnotice_list == []:
print("No data")
else:
for item in ictnotice_list:
ict_title_list.append(item.find('td',class_='cont').find('a').get_text())
ict_url_list.append(ict_base_url + item.find('td',class_='cont').find('a')['href'][-7:-3] + ict_sub_url)
ict_date_list.append(item.find_all('td')[2].get_text())
# In[10]:
# ? ( )
cse_title_list = []
cse_date_list = []
cse_url_list = []
csenotice_url = "http://cse.cau.ac.kr/20141201/sub05/sub0501.php"
csenotice_page = urlopen(csenotice_url)
csenotice_soup = BeautifulSoup(csenotice_page, "lxml")
csenotice_list = csenotice_soup.find('table',class_='nlist').find_all('tr')
csenotice_list.reverse()
if csenotice_list == []:
print("No data")
else:
for item in csenotice_list:
if item.find('td').get_text() != '':
cse_title_list.append(re.sub('[\n\t\xa0]','',item.find('a').get_text())) # sub
cse_url_list.append(csenotice_url + item.find_all('td')[2].find('a')['href'])
cse_date_list.append(item.find_all('td')[4].get_text())
# In[15]:
# Firebase
import json
from collections import OrderedDict
crawling_data = OrderedDict()
crawling_data['caunotice'] = {'title':cau_title_list, 'date':cau_date_list, 'url':cau_url_list}
crawling_data['library'] = {'title':lib_title_list, 'date':lib_date_list, 'url':"https://library.cau.ac.kr/#/bbs/notice?offset=0&max=20"}
crawling_data['dorm'] = {'title':dorm_title_list, 'date':dorm_date_list, 'url':dorm_url_list}
crawling_data['ict'] = {'title':ict_title_list, 'date':ict_date_list, 'url':ict_url_list}
crawling_data['cse'] = {'title':cse_title_list, 'date':cse_date_list, 'url':cse_url_list}
crawling_json = json.dumps(crawling_data, ensure_ascii=False, indent="\t")
webpage_ref = ref.child('webpages')
webpage_ref.set(json.loads(crawling_json))
| 30.070352 | 156 | 0.723429 |
6fa85d4b0b5bfa6ac386b4e088bb46a5cbd9b94a | 614 | py | Python | compose.py | luyao777/speech-robot | a00c9ac554b7b7a86af4a57d33acb50bbdc17822 | [
"Apache-2.0"
] | null | null | null | compose.py | luyao777/speech-robot | a00c9ac554b7b7a86af4a57d33acb50bbdc17822 | [
"Apache-2.0"
] | null | null | null | compose.py | luyao777/speech-robot | a00c9ac554b7b7a86af4a57d33acb50bbdc17822 | [
"Apache-2.0"
] | null | null | null | #coding: utf-8
from aip import AipSpeech
from config import DefaultConfig as opt
| 26.695652 | 55 | 0.583062 |
6fa87cb16c669518081acc3c339dbfde1687dc05 | 8,754 | py | Python | tests/test_input_output.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
] | 1 | 2020-11-20T17:17:50.000Z | 2020-11-20T17:17:50.000Z | tests/test_input_output.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
] | 12 | 2020-11-19T05:22:13.000Z | 2020-12-15T03:50:33.000Z | tests/test_input_output.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
] | null | null | null | import unittest
import os
import pathlib
import h5py
from desc.input_reader import InputReader
from desc.equilibrium_io import hdf5Writer, hdf5Reader
from desc.configuration import Configuration, Equilibrium
#from desc.input_output import read_input
#class TestIO(unittest.TestCase):
# """tests for input/output functions"""
#
# def test_min_input(self):
# dirname = os.path.dirname(__file__)
# filename = os.path.join(dirname, 'MIN_INPUT')
# inputs = read_input(filename)
#
# self.assertEqual(len(inputs), 26)
| 37.09322 | 79 | 0.629312 |
6fa98703634598fbfefb544679b794421049f04d | 880 | py | Python | backend/vcdat/test_end_to_end.py | CDAT/vcdat | 0d257da607bd2ed064917922af29247c6c0aaae1 | [
"BSD-3-Clause"
] | 4 | 2018-03-29T01:51:37.000Z | 2019-09-11T14:07:34.000Z | backend/vcdat/test_end_to_end.py | CDAT/vcdat | 0d257da607bd2ed064917922af29247c6c0aaae1 | [
"BSD-3-Clause"
] | 101 | 2018-03-20T16:58:20.000Z | 2019-11-25T19:26:52.000Z | backend/vcdat/test_end_to_end.py | CDAT/vcdat | 0d257da607bd2ed064917922af29247c6c0aaae1 | [
"BSD-3-Clause"
] | 1 | 2018-10-22T20:50:25.000Z | 2018-10-22T20:50:25.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
import pytest
# Declare Fixtures
# --------------------------------------------------------------------
# End to End testing with Selenium
# --------------------------------------------------------------------
| 26.666667 | 78 | 0.621591 |
6fa9d472e775eb87721d162cdd4f797206aefbc8 | 264 | py | Python | scripts/makeToast.py | zgrannan/Technical-Theatre-Assistant | 8928e5f4f179f75f92035e898d102dd55f32e3f3 | [
"MIT"
] | 3 | 2017-01-05T20:02:23.000Z | 2017-10-02T19:55:58.000Z | scripts/makeToast.py | zgrannan/Technical-Theatre-Assistant | 8928e5f4f179f75f92035e898d102dd55f32e3f3 | [
"MIT"
] | 1 | 2016-05-17T20:20:19.000Z | 2016-05-17T20:20:28.000Z | scripts/makeToast.py | zgrannan/Technical-Theatre-Assistant | 8928e5f4f179f75f92035e898d102dd55f32e3f3 | [
"MIT"
] | null | null | null | #makes a toast with the given string ID
from sys import argv
if ( argv[0] == "makeToast.py" ):
print make_toast(argv[1])
| 24 | 109 | 0.708333 |
6fab9608b18da127d6b2008d803781b981e8468d | 334 | py | Python | crisiscleanup/calls/migrations/0011_merge_20180122_2308.py | CrisisCleanup/wcicp-call-service | 0a00e092625e2a48c9807737a4b72e343e1ab0b9 | [
"Apache-1.1"
] | null | null | null | crisiscleanup/calls/migrations/0011_merge_20180122_2308.py | CrisisCleanup/wcicp-call-service | 0a00e092625e2a48c9807737a4b72e343e1ab0b9 | [
"Apache-1.1"
] | null | null | null | crisiscleanup/calls/migrations/0011_merge_20180122_2308.py | CrisisCleanup/wcicp-call-service | 0a00e092625e2a48c9807737a4b72e343e1ab0b9 | [
"Apache-1.1"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-22 23:08
from __future__ import unicode_literals
from django.db import migrations
| 19.647059 | 48 | 0.658683 |
6faccc1a6a5c598d0d380be6821bb6343c6e2509 | 572 | py | Python | test/market_feature_1/test_nose_plugin.py | StefanRatzke/nose-market-features | e63b2367b77e860c919d3a33612a3ba6ff632f5f | [
"Apache-2.0"
] | 5 | 2015-01-12T10:04:15.000Z | 2018-12-20T13:49:47.000Z | test/market_feature_1/test_nose_plugin.py | StefanRatzke/nose-market-features | e63b2367b77e860c919d3a33612a3ba6ff632f5f | [
"Apache-2.0"
] | 16 | 2015-10-06T12:35:20.000Z | 2019-04-01T15:05:17.000Z | test/market_feature_1/test_nose_plugin.py | StefanRatzke/nose-market-features | e63b2367b77e860c919d3a33612a3ba6ff632f5f | [
"Apache-2.0"
] | 2 | 2015-03-02T09:49:11.000Z | 2019-03-26T15:27:41.000Z | from unittest import skip
import unittest2
from nose.plugins.attrib import attr
from nose.tools import assert_equals
| 21.185185 | 49 | 0.653846 |
6fad4b2d42f41ae40846094b93b5343afe778c0f | 2,359 | py | Python | charmcraft/manifest.py | aznashwan/charmcraft | 9310ddaf800307b7ff96438173766309f7cb616f | [
"Apache-2.0"
] | 32 | 2020-05-21T09:19:52.000Z | 2022-03-31T17:05:59.000Z | charmcraft/manifest.py | aznashwan/charmcraft | 9310ddaf800307b7ff96438173766309f7cb616f | [
"Apache-2.0"
] | 417 | 2020-05-11T14:06:02.000Z | 2022-03-31T16:15:59.000Z | charmcraft/manifest.py | aznashwan/charmcraft | 9310ddaf800307b7ff96438173766309f7cb616f | [
"Apache-2.0"
] | 44 | 2020-05-27T09:33:42.000Z | 2022-03-30T14:34:03.000Z | # Copyright 2020-2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Charmcraft manifest.yaml related functionality."""
import datetime
import logging
import pathlib
from typing import Optional, List
import yaml
from charmcraft import __version__, config, linters
logger = logging.getLogger(__name__)
def create_manifest(
basedir: pathlib.Path,
started_at: datetime.datetime,
bases_config: Optional[config.BasesConfiguration],
linting_results: List[linters.CheckResult],
):
"""Create manifest.yaml in basedir for given base configuration.
For packing bundles, `bases` will be skipped when bases_config is None.
Charms should always include a valid bases_config.
:param basedir: Directory to create Charm in.
:param started_at: Build start time.
:param bases_config: Relevant bases configuration, if any.
:returns: Path to created manifest.yaml.
"""
content = {
"charmcraft-version": __version__,
"charmcraft-started-at": started_at.isoformat() + "Z",
}
# Annotate bases only if bases_config is not None.
if bases_config is not None:
bases = [
{
"name": r.name,
"channel": r.channel,
"architectures": r.architectures,
}
for r in bases_config.run_on
]
content["bases"] = bases
# include the linters results (only for attributes)
attributes_info = [
{"name": result.name, "result": result.result}
for result in linting_results
if result.check_type == linters.CheckType.attribute
]
content["analysis"] = {"attributes": attributes_info}
filepath = basedir / "manifest.yaml"
filepath.write_text(yaml.dump(content))
return filepath
| 31.039474 | 75 | 0.693514 |
6fae14b3638b39a8b273b347bee6855c94403d97 | 5,034 | py | Python | ricnn/RICNN.py | jiangruoqiao/RICNN_RepeatGongCheng-sPaper | 6e8ee6955f6c1c60aa5c32e341664fe350902482 | [
"Apache-2.0"
] | 33 | 2018-12-02T12:15:39.000Z | 2021-12-06T14:10:32.000Z | ricnn/RICNN.py | jiangruoqiao/RICNN_RepeatGongCheng-sPaper | 6e8ee6955f6c1c60aa5c32e341664fe350902482 | [
"Apache-2.0"
] | 5 | 2019-02-10T20:04:00.000Z | 2021-07-03T12:52:20.000Z | ricnn/RICNN.py | jiangruoqiao/RICNN_RepeatGongCheng-sPaper | 6e8ee6955f6c1c60aa5c32e341664fe350902482 | [
"Apache-2.0"
] | 9 | 2018-12-02T12:39:51.000Z | 2020-05-07T08:30:38.000Z | #ecoding:utf-8
import DatasetLoader
import RICNNModel
import tensorflow as tf
import sys
import numpy as np
import regularization as re
import os
import trainLoader
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
TRAIN_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw_100.h5'
TEST_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw.h5'
TRAIN_LABELS = '/media/liuqi/Files/dataset/rotate_100_simple.h5'
TEST_LABELS = '/home/liuqi/Desktop/mnist_rotation_new/mnist_all_rotation_normalized_float_test.amat'
LOADED_SIZE = 28
DESIRED_SIZE = 227
# model constants
NUMBER_OF_CLASSES = 10
NUMBER_OF_FILTERS = 40
NUMBER_OF_FC_FEATURES = 5120
NUMBER_OF_TRANSFORMATIONS = 8
# optimization constants
BATCH_SIZE = 64
TEST_CHUNK_SIZE = 100
ADAM_LEARNING_RATE = 1e-5
PRINTING_INTERVAL = 10
# set seeds
np.random.seed(100)
tf.set_random_seed(100)
x = tf.placeholder(tf.float32, shape=[None,
DESIRED_SIZE,
DESIRED_SIZE,
1,
NUMBER_OF_TRANSFORMATIONS])
y_gt = tf.placeholder(tf.float32, shape=[None, NUMBER_OF_CLASSES])
keep_prob = tf.placeholder(tf.float32)
logits, raw_feature, regularization_loss = RICNNModel.define_model(x,
keep_prob,
NUMBER_OF_CLASSES,
NUMBER_OF_FILTERS,
NUMBER_OF_FC_FEATURES)
with tf.name_scope('loss'):
with tf.name_scope('re_loss'):
re_loss = re.regu_constraint(raw_feature, logits)
with tf.name_scope('sotfmax_loss'):
sotfmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_gt))
with tf.name_scope('total_loss'):
total_loss = sotfmax_loss
train_step = tf.train.AdamOptimizer(ADAM_LEARNING_RATE).minimize(total_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.initialize_all_variables())
train_data_loader = trainLoader.DataLoader(TRAIN_FILENAME,
TRAIN_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_data_loader = DatasetLoader.DataLoader(TEST_FILENAME,
TEST_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_size = test_data_loader.all()[1].shape[0]
assert test_size % TEST_CHUNK_SIZE == 0
number_of_test_chunks = test_size / TEST_CHUNK_SIZE
while (True):
batch = train_data_loader.next_batch(BATCH_SIZE) # next_batch from the loader
txt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
if (train_data_loader.is_new_epoch()):
train_accuracy = session.run(accuracy, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss = session.run(re_loss,feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss_1 = session.run(sotfmax_loss, feed_dict={x: batch[0],
y_gt: batch[1],
keep_prob: 1.0})
print(print_loss)
print(print_loss_1)
train_context = "epochs:" + str(train_data_loader.get_completed_epochs()) + '\n'
txt_file.write(train_context)
loss_context = "softmax_loss:" + str(print_loss_1) + '\n'
txt_file.write(loss_context)
txt_file.close()
print("completed_epochs %d, training accuracy %g" %
(train_data_loader.get_completed_epochs(), train_accuracy))
sys.stdout.flush()
if (train_data_loader.get_completed_epochs() % PRINTING_INTERVAL == 0):
sum = 0.0
xt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
for chunk_index in xrange(number_of_test_chunks):
chunk = test_data_loader.next_batch(TEST_CHUNK_SIZE)
sum += session.run(accuracy, feed_dict={x : chunk[0],
y_gt : chunk[1],
keep_prob : 1.0})
test_accuracy = sum / number_of_test_chunks
new_context = "testing accuracy: " + str(test_accuracy) + '\n'
txt_file.write(new_context)
txt_file.close()
print("testing accuracy %g" % test_accuracy)
sys.stdout.flush()
session.run(train_step, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 0.5})
| 43.025641 | 106 | 0.585419 |
6fae2120fe92d529be7069156c9a883d0d04d5b4 | 820 | py | Python | mcot/core/_scripts/gcoord/split.py | MichielCottaar/mcot.core | de00c15b946a99a048694f3d8b6ad822a835b299 | [
"MIT"
] | null | null | null | mcot/core/_scripts/gcoord/split.py | MichielCottaar/mcot.core | de00c15b946a99a048694f3d8b6ad822a835b299 | [
"MIT"
] | null | null | null | mcot/core/_scripts/gcoord/split.py | MichielCottaar/mcot.core | de00c15b946a99a048694f3d8b6ad822a835b299 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Extract radial, sulcal, and gyral orientations from gyral coordinate NIFTI file"""
| 45.555556 | 110 | 0.704878 |
6faf92769295dcd3722767bcf2f74a8c180b6d9e | 3,558 | py | Python | src/pyshark/packet/fields.py | Lovemma/pyshark | 06d00edacf75aa4a9abd77e0c8cab8de33ea27fb | [
"MIT"
] | 10 | 2018-09-13T15:50:43.000Z | 2021-01-30T19:55:36.000Z | src/pyshark/packet/fields.py | Lovemma/pyshark | 06d00edacf75aa4a9abd77e0c8cab8de33ea27fb | [
"MIT"
] | 1 | 2019-09-09T14:15:34.000Z | 2019-09-09T14:15:34.000Z | src/pyshark/packet/fields.py | Lovemma/pyshark | 06d00edacf75aa4a9abd77e0c8cab8de33ea27fb | [
"MIT"
] | 9 | 2018-07-24T09:32:55.000Z | 2021-01-22T21:23:06.000Z | import binascii
from pyshark.packet.common import Pickleable, SlotsPickleable
| 29.65 | 124 | 0.603991 |
6faffdb266238fb5626bfa67ef31c69f49889449 | 1,264 | py | Python | tasks/swipe_card.py | devBezel/among_us_tasker | dd13c13a7d2d776143522ccfa27696d69524707b | [
"MIT"
] | null | null | null | tasks/swipe_card.py | devBezel/among_us_tasker | dd13c13a7d2d776143522ccfa27696d69524707b | [
"MIT"
] | null | null | null | tasks/swipe_card.py | devBezel/among_us_tasker | dd13c13a7d2d776143522ccfa27696d69524707b | [
"MIT"
] | null | null | null | import pyautogui
import time
import datetime
| 29.395349 | 79 | 0.557753 |
6fb000a6fd5b519a73bbb7413dd210206c96960d | 370 | py | Python | python/geeksforgeeks/arrays/rearrengment/reverse_a_string.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
] | null | null | null | python/geeksforgeeks/arrays/rearrengment/reverse_a_string.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
] | null | null | null | python/geeksforgeeks/arrays/rearrengment/reverse_a_string.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
] | null | null | null | # https://www.geeksforgeeks.org/write-a-program-to-reverse-an-array-or-string/
# Time: O(n)
# Space: 1
arr = [1,2,3]
result = reverseByMiddles(arr)
print(result)
print(reverseByMiddles(arr = [1,2,3,4]))
| 18.5 | 78 | 0.627027 |
6fb0bb046f2c82f4fc248ab9913d7e957fa16a08 | 1,397 | py | Python | test_service.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-derek-repka | c724a2b921fdb1e6ba1200b477a4add291941107 | [
"MIT"
] | null | null | null | test_service.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-derek-repka | c724a2b921fdb1e6ba1200b477a4add291941107 | [
"MIT"
] | 4 | 2018-10-09T17:16:38.000Z | 2021-06-10T20:56:30.000Z | test_service.py | cmput401-fall2018/web-app-ci-cd-with-travis-ci-derek-repka | c724a2b921fdb1e6ba1200b477a4add291941107 | [
"MIT"
] | null | null | null | from service import Service
from unittest import TestCase
from mock import patch
import sys
| 27.94 | 58 | 0.740873 |
6fb2b10f3d4883634b84a549a2f3de0c0993ed24 | 1,512 | py | Python | libweasyl/libweasyl/alembic/versions/eff79a07a88d_use_timestamp_column_for_latest_.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | 111 | 2016-05-18T04:18:18.000Z | 2021-11-03T02:05:19.000Z | libweasyl/libweasyl/alembic/versions/eff79a07a88d_use_timestamp_column_for_latest_.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | 1,103 | 2016-05-29T05:17:53.000Z | 2022-03-31T18:12:40.000Z | libweasyl/libweasyl/alembic/versions/eff79a07a88d_use_timestamp_column_for_latest_.py | TheWug/weasyl | a568a542cc58c11e30621fb672c701531d4306a8 | [
"Apache-2.0"
] | 47 | 2016-05-29T20:48:37.000Z | 2021-11-12T09:40:40.000Z | """Use TIMESTAMP column for latest submission
Revision ID: eff79a07a88d
Revises: 83e6b2a46191
Create Date: 2017-01-08 22:20:43.814375
"""
# revision identifiers, used by Alembic.
revision = 'eff79a07a88d'
down_revision = '83e6b2a46191'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
import libweasyl
from libweasyl.legacy import UNIXTIME_OFFSET
| 30.857143 | 130 | 0.707011 |
6fb43ce461d11d32148db76a6319e68f78bca70e | 3,499 | py | Python | MBTA/step1_group.py | 404nofound/MBTA_Python | eb5c1bb3fe5f2640dc42949402955c0f73e1c8b1 | [
"Apache-2.0"
] | 1 | 2019-04-15T04:39:13.000Z | 2019-04-15T04:39:13.000Z | MBTA/step1_group.py | 404nofound/MBTA_Python | eb5c1bb3fe5f2640dc42949402955c0f73e1c8b1 | [
"Apache-2.0"
] | null | null | null | MBTA/step1_group.py | 404nofound/MBTA_Python | eb5c1bb3fe5f2640dc42949402955c0f73e1c8b1 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import os
# Function, divided all data into groups by time period, like [1AM-3AM; 3AM-5Am ...]
def binning(column, points, labels=None, month=0, stop=0):
'''
Notes: The Row Data from MBTA webiste
The Time format is from 3:00 to 27:00, means 3:00 AM today to next day 3:00 AM
And in the csv file, it use int to replace date format, like 300 means 3:00 AM; 1500 means 3:00 PM
:param column: use which column to divide, here we use TIME_PERIOD column
:param points: the break points we use to divide
:param labels: the labels for result groups that have been divided
:param month: used to record error
:param stop: used to record error
'''
# Get max time and min time from data
minval = column.min()
maxval = column.max()
# Handle break points and labels errors and print
while maxval <= points[len(points)-1]:
print ('Month: ' + str(month) + ' Stop: ' + stop)
del points[len(points)-1]
del labels[len(points)-1]
while minval >= points[0]:
print ('Month: ' + str(month) + ' Stop: ' + stop)
del points[0]
del labels[0]
# The full break points includes min, max time
break_points = [minval] + points + [maxval]
# If user doesn't provide labels, using int number to replace, here I have provided labels, so it doesn't work
if not labels:
labels = range(len(points)+1)
# cut() function to divide data into groups and return them
columnbin = pd.cut(column, bins=break_points, labels=labels, include_lowest=True)
return columnbin
# Function, make directory. if exist, do nothing
# Using Pandas read every months' row data, from January to July, there only 7 months provide by MBTA this year until now
for month in range(1,8):
csvfile = pd.read_csv('/Users/Eddy/Desktop/Python_MBTA/MBTA_Raw_Entry_Data/2018_0' + str(month) + '.csv')
# Format file to prepare data analysis
df = pd.DataFrame(csvfile)
# Divide data into different part group by stop id
grouped = df.groupby('GTFS_STOP_ID', as_index=False)
# For every stop's data, using binning() function to divide into different time period
for stop, group in grouped:
# Define break points
points = [500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300, 2500]
# Define labels
labels = ['3AM-5AM', '5AM-7AM', '7AM-9AM', '9AM-11AM', '11AM-1PM', '1PM-3PM', '3PM-5PM', '5PM-7PM', '7PM-9PM',
'9PM-11PM', '11PM-1AM', '1AM-3AM']
# Create new column [TIME_PERIOD_Bin] for the result returned by binning() function
group['TIME_PERIOD_Bin'] = binning(group['TIME_PERIOD'], points, labels, month, stop)
# Format all the data again
df_station = pd.DataFrame(group)
# Until now, all data have been grouped by stop_id, and then grouped by time period that we create
group_time = df_station.groupby('TIME_PERIOD_Bin')
# Make directory to store new csv files
mkdir('/Users/Eddy/Desktop/Python_MBTA/Step1/' + str(month))
# Calculate the sum of entry people number for every stops and every periods
data1 = pd.DataFrame(group_time['STATION_ENTRIES'].agg(np.sum))
# Write into the csv files
data1.to_csv('/Users/Eddy/Desktop/Python_MBTA/Step1/' + str(month) + "/" + stop + '.csv') | 38.877778 | 121 | 0.656759 |
6fb582e0d7d4fa8f242a226b9d6f0299b62ae75d | 1,574 | py | Python | tests/dags/test_job_operator_jinja.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
] | null | null | null | tests/dags/test_job_operator_jinja.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
] | null | null | null | tests/dags/test_job_operator_jinja.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
] | null | null | null | from utils import default_args
from datetime import timedelta
from airflow import DAG
from airflow_kubernetes_job_operator import (
KubernetesJobOperator,
JobRunnerDeletePolicy,
KubernetesLegacyJobOperator,
)
dag = DAG(
"kub-job-op-test-jinja",
default_args=default_args,
description="Test base job operator",
schedule_interval=None,
catchup=False,
user_defined_macros={
"test_macro": lambda a: f"my {a}",
"default_image": "ubuntu",
},
)
namespace = None
envs = {
"TIC_COUNT": 3,
"PASS_ARG": "a test",
"JINJA_ENV": "{{ ds }}",
}
default_delete_policy = JobRunnerDeletePolicy.Never
KubernetesJobOperator(
task_id="test-job-success",
namespace=namespace,
image="{{default_image}}",
body_filepath="./templates/test_job.success.jinja.yaml",
envs=envs,
dag=dag,
delete_policy=default_delete_policy,
jinja_job_args={"test": "lama"},
)
# bash_script = """
# #/usr/bin/env bash
# echo "Legacy start for taskid {{ti.task_id}} {{job.test}}"
# cur_count=0
# while true; do
# cur_count=$((cur_count + 1))
# if [ "$cur_count" -ge "$TIC_COUNT" ]; then
# break
# fi
# date
# sleep 1
# done
# echo "Complete"
# """
# KubernetesLegacyJobOperator(
# task_id="legacy-test-job-success",
# image="{{default_image}}",
# cmds=["bash", "-c", bash_script],
# dag=dag,
# is_delete_operator_pod=True,
# env_vars=envs,
# delete_policy=default_delete_policy,
# )
if __name__ == "__main__":
dag.clear(reset_dag_runs=True)
dag.run()
| 22.169014 | 60 | 0.653113 |
6fb591a1133b9da158581aa46d45eeabf6264943 | 6,458 | py | Python | spacecapsule/executor.py | zengzhilong/space-capsule | 26fd0bd42f83c918dfb06bb188009971406e7965 | [
"Apache-2.0"
] | 7 | 2022-02-18T03:14:43.000Z | 2022-03-16T08:38:55.000Z | spacecapsule/executor.py | zengzhilong/space-capsule | 26fd0bd42f83c918dfb06bb188009971406e7965 | [
"Apache-2.0"
] | 6 | 2022-03-10T12:16:59.000Z | 2022-03-18T01:19:00.000Z | spacecapsule/executor.py | zengzhilong/space-capsule | 26fd0bd42f83c918dfb06bb188009971406e7965 | [
"Apache-2.0"
] | 3 | 2022-02-17T08:40:33.000Z | 2022-03-29T01:38:46.000Z | import json
import jsonpath
import paramiko
from spacecapsule.history import store_experiment, rollback_command
from subprocess import Popen, PIPE
from spacecapsule.k8s import prepare_api, copy_tar_file_to_namespaced_pod, executor_command_inside_namespaced_pod
from spacecapsule.template import chaosblade_prepare_script, resource_path, chaosblade_inject, chaosblade_prepare, \
chaosblade_jvm_delay, chaosblade_prepare_script_vm
| 48.19403 | 120 | 0.722979 |
6fb7c5d5fb34e77d36095b18c408e6edbe184b8f | 2,952 | py | Python | experiments/2d_shallowwater/gen.py | flabowski/POD-UQNN | 1c81be432e69d24ae894828f42918fbc1fe54bc1 | [
"MIT"
] | 15 | 2020-05-29T11:42:14.000Z | 2022-03-20T03:53:44.000Z | experiments/2d_shallowwater/gen.py | flabowski/POD-UQNN | 1c81be432e69d24ae894828f42918fbc1fe54bc1 | [
"MIT"
] | null | null | null | experiments/2d_shallowwater/gen.py | flabowski/POD-UQNN | 1c81be432e69d24ae894828f42918fbc1fe54bc1 | [
"MIT"
] | 11 | 2020-06-09T01:16:22.000Z | 2021-04-27T08:53:02.000Z | """POD-NN modeling for 1D, unsteady Burger Equation."""
#%% Imports
import sys
import os
import pickle
import numpy as np
sys.path.append(os.path.join("..", ".."))
from poduqnn.podnnmodel import PodnnModel
from poduqnn.mesh import read_multi_space_sol_input_mesh
from poduqnn.handling import clean_dir, split_dataset
from hyperparams import HP as hp
resdir = "cache"
clean_dir(resdir)
# Getting data from the files
# fake_x = np.zeros(hp["n_s"] + hp["n_s_tst"])
# test_size = hp["n_s_tst"] / (hp["n_s"] + hp["n_s_tst"])
# train_tst_idx = split_dataset(fake_x, fake_x, test_size, idx_only=True)
train_tst_idx = ([129, 13, 161, 10, 3, 4, 68, 19, 108, 63, 62, 147, 117, 113, 165, 80, 124, 33, 41, 37, 79, 184, 154, 83, 102, 190, 195, 148, 46, 114, 16, 155, 121, 104, 120, 58, 53, 78, 160, 193, 126, 115, 95, 127, 166, 131, 49, 100, 84, 35, 12, 27, 118, 167, 66, 56, 106, 175, 143, 97, 87, 1, 183, 111, 36, 158, 153, 199, 17, 31, 177, 194, 182, 59, 187, 130, 163, 92, 48, 96, 82, 6, 123, 98, 192, 43, 26, 181, 170, 134, 72, 50, 24, 174, 122, 103, 71, 138, 110, 7, 65, 51, 28, 173, 172, 34, 90, 119, 185, 15, 186, 101, 85, 60, 75, 39, 38, 5, 141, 89, 57, 144, 64, 67, 171, 157, 94, 70, 142, 54, 74, 146, 191, 112, 107, 189, 30, 32, 133, 169, 151, 23, 21, 99, 2, 22, 116, 91, 145, 178, 137, 135, 40, 73, 47, 52, 25, 93, 128, 88, 109, 44, 29, 198, 159, 125, 11, 45, 197, 149, 69, 188, 164, 0, 18, 176, 9, 168, 77, 132], [76, 42, 179, 61, 105, 136, 86, 196, 8, 14, 139, 20, 150, 152, 180, 162, 140, 81, 55, 156])
with open(os.path.join("cache", "train_tst_idx.pkl"), "wb") as f:
pickle.dump(train_tst_idx, f)
datadir = "data"
mu_path = os.path.join(datadir, "INPUT_MONTE_CARLO.dat")
# x_mesh, connectivity, X_v, U = \
# read_multi_space_sol_input_mesh(hp["n_s"], 1, 1, train_tst_idx[0],
# hp["mesh_idx"], datadir, mu_path,
# hp["mu_idx"])
# np.save(os.path.join("cache", "x_mesh.npy"), x_mesh)
# np.save(os.path.join("cache", "connectivity.npy"), connectivity)
# np.save(os.path.join("cache", "X_v.npy"), X_v)
# np.save(os.path.join("cache", "U.npy"), U)
x_mesh = np.load(os.path.join("cache", "x_mesh.npy"))
connectivity = np.load(os.path.join("cache", "connectivity.npy"))
X_v = np.load(os.path.join("cache", "X_v.npy"))
U = np.load(os.path.join("cache", "U.npy"))
# x_mesh = np.load(os.path.join("cache", "x_mesh.npy"))
# connectivity = np.load(os.path.join("cache", "connectivity.npy"))
# X_v = np.load(os.path.join("cache", "X_v.npy"))
# U = np.load(os.path.join("cache", "U.npy"))
#%% Init the model
model = PodnnModel(resdir, hp["n_v"], x_mesh, hp["n_t"])
#%% Generate the dataset from the mesh and params
X_v_train, v_train, \
X_v_val, v_val, \
U_val = model.convert_multigpu_data(U, X_v, hp["train_val"], hp["eps"])
model.initVNNs(hp["n_M"], hp["h_layers"], hp["lr"], hp["lambda"],
hp["adv_eps"], hp["soft_0"], hp["norm"])
| 50.033898 | 910 | 0.618225 |
6fb84b781b83af75ecf875a208c9ad2d89873dec | 13,981 | py | Python | template_container_human/labels/slice_7.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
] | null | null | null | template_container_human/labels/slice_7.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
] | null | null | null | template_container_human/labels/slice_7.py | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
] | null | null | null | coordinates_01EE00 = ((121, 126),
(121, 132), (121, 134), (121, 135), (121, 137), (121, 138), (121, 139), (121, 140), (121, 141), (122, 114), (122, 115), (122, 116), (122, 117), (122, 119), (122, 125), (122, 127), (122, 128), (122, 142), (123, 110), (123, 111), (123, 112), (123, 113), (123, 120), (123, 124), (123, 126), (123, 130), (123, 132), (123, 133), (123, 134), (123, 135), (123, 136), (123, 137), (123, 138), (123, 139), (123, 140), (123, 141), (123, 144), (124, 107), (124, 109), (124, 114), (124, 115), (124, 116), (124, 117), (124, 118), (124, 120), (124, 124), (124, 125), (124, 126), (124, 127), (124, 128), (124, 129), (124, 131), (124, 132), (124, 133), (124, 134), (124, 135), (124, 136), (124, 137), (124, 138), (124, 139), (124, 140), (124, 141), (124, 142), (124, 146), (125, 105), (125, 110), (125, 111), (125, 112), (125, 113), (125, 114), (125, 115), (125, 116), (125, 117),
(125, 118), (125, 119), (125, 120), (125, 122), (125, 124), (125, 125), (125, 126), (125, 127), (125, 128), (125, 129), (125, 130), (125, 131), (125, 132), (125, 133), (125, 134), (125, 135), (125, 136), (125, 137), (125, 138), (125, 139), (125, 140), (125, 141), (125, 142), (125, 143), (125, 144), (125, 148), (126, 103), (126, 107), (126, 108), (126, 109), (126, 110), (126, 111), (126, 112), (126, 113), (126, 114), (126, 115), (126, 116), (126, 117), (126, 118), (126, 119), (126, 120), (126, 124), (126, 125), (126, 126), (126, 127), (126, 128), (126, 129), (126, 130), (126, 131), (126, 132), (126, 133), (126, 134), (126, 135), (126, 136), (126, 137), (126, 138), (126, 139), (126, 140), (126, 141), (126, 142), (126, 143), (126, 144), (126, 145), (126, 146), (126, 149), (127, 102), (127, 105), (127, 106), (127, 107), (127, 108), (127, 109), (127, 110),
(127, 111), (127, 112), (127, 113), (127, 114), (127, 115), (127, 116), (127, 117), (127, 118), (127, 119), (127, 120), (127, 121), (127, 122), (127, 123), (127, 124), (127, 125), (127, 126), (127, 127), (127, 128), (127, 129), (127, 130), (127, 131), (127, 132), (127, 133), (127, 134), (127, 135), (127, 136), (127, 137), (127, 138), (127, 139), (127, 140), (127, 141), (127, 142), (127, 143), (127, 144), (127, 145), (127, 146), (127, 147), (127, 148), (127, 150), (128, 103), (128, 105), (128, 106), (128, 107), (128, 108), (128, 109), (128, 110), (128, 111), (128, 112), (128, 113), (128, 114), (128, 115), (128, 116), (128, 117), (128, 118), (128, 119), (128, 120), (128, 121), (128, 122), (128, 123), (128, 124), (128, 125), (128, 126), (128, 127), (128, 128), (128, 129), (128, 130), (128, 131), (128, 132), (128, 133), (128, 134), (128, 135), (128, 136),
(128, 137), (128, 138), (128, 139), (128, 140), (128, 141), (128, 142), (128, 143), (128, 144), (128, 145), (128, 146), (128, 147), (128, 149), (129, 103), (129, 105), (129, 106), (129, 107), (129, 108), (129, 109), (129, 110), (129, 111), (129, 112), (129, 113), (129, 114), (129, 115), (129, 116), (129, 117), (129, 118), (129, 119), (129, 120), (129, 121), (129, 122), (129, 123), (129, 124), (129, 125), (129, 126), (129, 127), (129, 128), (129, 129), (129, 130), (129, 131), (129, 132), (129, 133), (129, 134), (129, 135), (129, 136), (129, 137), (129, 138), (129, 139), (129, 140), (129, 141), (129, 142), (129, 143), (129, 144), (129, 145), (129, 146), (129, 147), (129, 149), (130, 104), (130, 107), (130, 108), (130, 109), (130, 110), (130, 111), (130, 112), (130, 113), (130, 114), (130, 115), (130, 116), (130, 117), (130, 118), (130, 119), (130, 120),
(130, 121), (130, 122), (130, 123), (130, 124), (130, 125), (130, 126), (130, 127), (130, 128), (130, 129), (130, 130), (130, 131), (130, 132), (130, 133), (130, 134), (130, 135), (130, 136), (130, 137), (130, 138), (130, 139), (130, 140), (130, 141), (130, 142), (130, 143), (130, 144), (130, 145), (130, 146), (130, 148), (131, 104), (131, 106), (131, 107), (131, 108), (131, 109), (131, 110), (131, 111), (131, 112), (131, 113), (131, 114), (131, 115), (131, 116), (131, 117), (131, 118), (131, 119), (131, 120), (131, 121), (131, 122), (131, 123), (131, 124), (131, 125), (131, 126), (131, 127), (131, 128), (131, 129), (131, 130), (131, 131), (131, 132), (131, 133), (131, 134), (131, 135), (131, 136), (131, 137), (131, 138), (131, 139), (131, 140), (131, 141), (131, 142), (131, 143), (131, 144), (131, 145), (131, 147), (132, 108), (132, 109), (132, 110),
(132, 111), (132, 112), (132, 113), (132, 114), (132, 115), (132, 116), (132, 117), (132, 118), (132, 119), (132, 120), (132, 121), (132, 122), (132, 123), (132, 124), (132, 125), (132, 126), (132, 127), (132, 128), (132, 129), (132, 130), (132, 131), (132, 132), (132, 133), (132, 134), (132, 135), (132, 136), (132, 137), (132, 138), (132, 139), (132, 140), (132, 141), (132, 142), (132, 143), (132, 144), (132, 146), (133, 108), (133, 110), (133, 111), (133, 112), (133, 113), (133, 114), (133, 115), (133, 116), (133, 117), (133, 118), (133, 119), (133, 120), (133, 121), (133, 122), (133, 123), (133, 124), (133, 125), (133, 126), (133, 127), (133, 128), (133, 129), (133, 130), (133, 131), (133, 132), (133, 133), (133, 134), (133, 135), (133, 136), (133, 137), (133, 138), (133, 139), (133, 140), (133, 141), (133, 142), (133, 143), (133, 144), (133, 146),
(134, 107), (134, 108), (134, 109), (134, 110), (134, 111), (134, 112), (134, 113), (134, 114), (134, 115), (134, 116), (134, 117), (134, 118), (134, 119), (134, 120), (134, 121), (134, 122), (134, 123), (134, 124), (134, 125), (134, 126), (134, 127), (134, 128), (134, 129), (134, 130), (134, 131), (134, 132), (134, 133), (134, 134), (134, 135), (134, 136), (134, 137), (134, 138), (134, 139), (134, 140), (134, 141), (134, 142), (134, 143), (134, 145), (135, 106), (135, 108), (135, 109), (135, 110), (135, 111), (135, 112), (135, 113), (135, 114), (135, 115), (135, 116), (135, 117), (135, 118), (135, 119), (135, 120), (135, 121), (135, 122), (135, 123), (135, 124), (135, 125), (135, 126), (135, 127), (135, 128), (135, 129), (135, 130), (135, 131), (135, 132), (135, 133), (135, 134), (135, 135), (135, 136), (135, 137), (135, 138), (135, 139), (135, 140),
(135, 141), (135, 142), (135, 143), (135, 145), (136, 105), (136, 107), (136, 110), (136, 111), (136, 112), (136, 113), (136, 114), (136, 115), (136, 116), (136, 117), (136, 118), (136, 119), (136, 120), (136, 121), (136, 122), (136, 123), (136, 124), (136, 125), (136, 126), (136, 127), (136, 128), (136, 129), (136, 130), (136, 131), (136, 132), (136, 133), (136, 134), (136, 135), (136, 136), (136, 137), (136, 138), (136, 139), (136, 140), (136, 141), (136, 142), (136, 144), (137, 105), (137, 109), (137, 110), (137, 111), (137, 112), (137, 113), (137, 114), (137, 115), (137, 116), (137, 117), (137, 118), (137, 119), (137, 120), (137, 121), (137, 122), (137, 123), (137, 124), (137, 125), (137, 126), (137, 127), (137, 128), (137, 129), (137, 130), (137, 131), (137, 132), (137, 133), (137, 134), (137, 135), (137, 136), (137, 137), (137, 138), (137, 139),
(137, 140), (137, 141), (137, 142), (137, 144), (138, 105), (138, 107), (138, 110), (138, 112), (138, 113), (138, 114), (138, 115), (138, 116), (138, 117), (138, 118), (138, 119), (138, 120), (138, 121), (138, 122), (138, 123), (138, 124), (138, 125), (138, 126), (138, 127), (138, 128), (138, 129), (138, 130), (138, 131), (138, 132), (138, 133), (138, 134), (138, 135), (138, 136), (138, 137), (138, 138), (138, 139), (138, 140), (138, 141), (138, 143), (139, 106), (139, 110), (139, 112), (139, 113), (139, 114), (139, 115), (139, 116), (139, 117), (139, 118), (139, 119), (139, 120), (139, 121), (139, 122), (139, 123), (139, 124), (139, 125), (139, 126), (139, 127), (139, 128), (139, 129), (139, 130), (139, 131), (139, 132), (139, 133), (139, 134), (139, 135), (139, 136), (139, 137), (139, 138), (139, 139), (139, 140), (139, 142), (140, 110), (140, 112),
(140, 113), (140, 114), (140, 115), (140, 116), (140, 117), (140, 118), (140, 119), (140, 120), (140, 121), (140, 122), (140, 123), (140, 124), (140, 125), (140, 126), (140, 127), (140, 128), (140, 129), (140, 130), (140, 131), (140, 132), (140, 133), (140, 134), (140, 135), (140, 138), (140, 139), (140, 141), (141, 110), (141, 115), (141, 116), (141, 117), (141, 118), (141, 119), (141, 120), (141, 121), (141, 122), (141, 123), (141, 124), (141, 125), (141, 126), (141, 127), (141, 128), (141, 129), (141, 130), (141, 131), (141, 132), (141, 133), (141, 134), (141, 136), (141, 137), (141, 138), (141, 139), (141, 141), (142, 110), (142, 112), (142, 113), (142, 114), (142, 115), (142, 116), (142, 117), (142, 118), (142, 119), (142, 120), (142, 121), (142, 122), (142, 123), (142, 124), (142, 125), (142, 126), (142, 127), (142, 128), (142, 129), (142, 130),
(142, 131), (142, 132), (142, 133), (142, 135), (142, 138), (142, 141), (143, 115), (143, 117), (143, 118), (143, 119), (143, 120), (143, 121), (143, 122), (143, 123), (143, 124), (143, 125), (143, 126), (143, 127), (143, 128), (143, 129), (143, 130), (143, 131), (143, 132), (143, 134), (143, 138), (143, 140), (144, 115), (144, 117), (144, 118), (144, 119), (144, 120), (144, 121), (144, 122), (144, 123), (144, 124), (144, 125), (144, 126), (144, 127), (144, 128), (144, 129), (144, 130), (144, 131), (144, 138), (145, 115), (145, 117), (145, 118), (145, 119), (145, 120), (145, 121), (145, 122), (145, 123), (145, 124), (145, 125), (145, 126), (145, 127), (145, 128), (145, 129), (145, 133), (145, 138), (145, 139), (146, 116), (146, 119), (146, 120), (146, 121), (146, 122), (146, 123), (146, 124), (146, 125), (146, 126), (146, 127), (146, 128), (146, 138),
(147, 117), (147, 119), (147, 120), (147, 121), (147, 122), (147, 123), (147, 124), (147, 125), (147, 126), (147, 127), (147, 129), (148, 119), (148, 121), (148, 122), (148, 123), (148, 124), (148, 125), (148, 126), (148, 128), (149, 119), (149, 121), (149, 122), (149, 123), (149, 128), (150, 118), (150, 120), (150, 121), (150, 125), (150, 126), (150, 129), (151, 118), (151, 123), (151, 127), (151, 128), (151, 130), (151, 132), (152, 120), (152, 121), (152, 128), (152, 133), )
coordinates_00EE00 = ((98, 135),
(99, 121), (99, 122), (99, 135), (99, 136), (100, 120), (100, 122), (100, 135), (101, 114), (101, 120), (101, 123), (101, 129), (101, 135), (101, 137), (102, 114), (102, 119), (102, 121), (102, 123), (102, 128), (102, 130), (102, 136), (103, 114), (103, 119), (103, 121), (103, 122), (103, 123), (103, 125), (103, 126), (103, 130), (104, 112), (104, 115), (104, 118), (104, 120), (104, 123), (104, 128), (104, 130), (105, 111), (105, 114), (105, 116), (105, 117), (105, 119), (105, 120), (105, 121), (105, 122), (105, 123), (105, 124), (105, 125), (105, 126), (105, 127), (105, 128), (105, 129), (105, 130), (105, 132), (106, 111), (106, 113), (106, 114), (106, 115), (106, 118), (106, 120), (106, 123), (106, 125), (106, 126), (106, 127), (106, 128), (106, 129), (106, 130), (106, 134), (107, 111), (107, 113), (107, 114), (107, 115), (107, 116), (107, 117),
(107, 118), (107, 120), (107, 123), (107, 124), (107, 125), (107, 126), (107, 127), (107, 128), (107, 129), (107, 130), (107, 131), (107, 132), (107, 134), (108, 111), (108, 113), (108, 114), (108, 115), (108, 116), (108, 117), (108, 118), (108, 119), (108, 120), (108, 123), (108, 126), (108, 127), (108, 128), (108, 129), (108, 130), (108, 131), (108, 132), (108, 133), (108, 135), (109, 111), (109, 115), (109, 116), (109, 117), (109, 118), (109, 119), (109, 120), (109, 121), (109, 123), (109, 124), (109, 125), (109, 128), (109, 129), (109, 130), (109, 131), (109, 132), (109, 133), (109, 134), (109, 136), (110, 110), (110, 112), (110, 113), (110, 114), (110, 117), (110, 118), (110, 119), (110, 120), (110, 121), (110, 123), (110, 126), (110, 127), (110, 128), (110, 129), (110, 130), (110, 131), (110, 132), (110, 133), (110, 134), (110, 135), (110, 138),
(111, 109), (111, 111), (111, 115), (111, 117), (111, 118), (111, 119), (111, 120), (111, 122), (111, 128), (111, 130), (111, 131), (111, 132), (111, 133), (111, 134), (111, 135), (111, 136), (111, 140), (112, 107), (112, 111), (112, 117), (112, 119), (112, 120), (112, 121), (112, 123), (112, 128), (112, 130), (112, 131), (112, 132), (112, 133), (112, 134), (112, 135), (112, 136), (112, 137), (112, 138), (113, 105), (113, 109), (113, 110), (113, 111), (113, 113), (113, 118), (113, 120), (113, 121), (113, 122), (113, 123), (113, 124), (113, 125), (113, 126), (113, 127), (113, 130), (113, 131), (113, 132), (113, 133), (113, 134), (113, 135), (113, 136), (113, 137), (113, 138), (113, 139), (113, 141), (114, 105), (114, 107), (114, 108), (114, 109), (114, 110), (114, 111), (114, 114), (114, 118), (114, 119), (114, 120), (114, 121), (114, 122), (114, 123),
(114, 128), (114, 129), (114, 131), (114, 132), (114, 133), (114, 134), (114, 135), (114, 136), (114, 137), (114, 138), (114, 139), (114, 141), (115, 105), (115, 107), (115, 108), (115, 109), (115, 110), (115, 111), (115, 112), (115, 113), (115, 116), (115, 117), (115, 118), (115, 119), (115, 120), (115, 121), (115, 122), (115, 123), (115, 124), (115, 127), (115, 130), (115, 132), (115, 133), (115, 134), (115, 135), (115, 136), (115, 137), (115, 138), (115, 139), (115, 141), (116, 105), (116, 126), (116, 131), (116, 133), (116, 134), (116, 135), (116, 136), (116, 137), (116, 138), (116, 139), (116, 141), (117, 106), (117, 108), (117, 109), (117, 110), (117, 111), (117, 112), (117, 113), (117, 114), (117, 115), (117, 116), (117, 117), (117, 118), (117, 119), (117, 120), (117, 121), (117, 122), (117, 124), (117, 131), (117, 140), (118, 132), (118, 134),
(118, 135), (118, 136), (118, 138), (118, 140), (119, 132), (119, 135), (119, 139), (119, 140), )
coordinates_E0E1E1 = ((126, 127),
(126, 134), (127, 118), (127, 126), (127, 134), (128, 118), (128, 125), (128, 128), (129, 119), (129, 128), (129, 129), (130, 123), (130, 128), (130, 130), (131, 122), (131, 128), (131, 129), (132, 122), (132, 128), (134, 122), (136, 121), (137, 121), )
coordinates_E1E1E1 = ((111, 125),
(112, 114), )
| 582.541667 | 865 | 0.500036 |
6fb8b1b58b2f38e1b4575b537fc9d0698a19d441 | 565 | py | Python | day1/ex4.py | dsky1990/python_30days | 3a9d8a29bd32979be1f4ef01be44999073dab5c4 | [
"MIT"
] | 1 | 2018-10-20T14:45:31.000Z | 2018-10-20T14:45:31.000Z | day1/ex4.py | dsky1990/python_30days | 3a9d8a29bd32979be1f4ef01be44999073dab5c4 | [
"MIT"
] | null | null | null | day1/ex4.py | dsky1990/python_30days | 3a9d8a29bd32979be1f4ef01be44999073dab5c4 | [
"MIT"
] | null | null | null | cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars -drivers
cars_driven = drivers
carpool_carpacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print("There are", cars, "cars available")
print("There are only", drivers, "drivers available")
print("There will be", cars_not_driven, "empty cars today")
print("We can transport", carpool_carpacity, "people today")
print("We have", passengers, "to carpool today")
print("We need to put about", average_passengers_per_car, "people in each car") | 37.666667 | 79 | 0.771681 |
6fb8b2f2709c0ffde62b6fe00c0e82cc6d0aeb7e | 6,599 | py | Python | faculty_xval/bin/jobs_cross_validation_executor.py | facultyai/faculty-xval | 73468b6c7b2be1a21728ac01f0fe16e7c20110a0 | [
"Apache-2.0"
] | 4 | 2019-04-10T17:57:29.000Z | 2019-04-17T16:13:49.000Z | faculty_xval/bin/jobs_cross_validation_executor.py | facultyai/faculty-xval | 73468b6c7b2be1a21728ac01f0fe16e7c20110a0 | [
"Apache-2.0"
] | 4 | 2019-04-12T08:26:24.000Z | 2019-04-20T06:21:09.000Z | faculty_xval/bin/jobs_cross_validation_executor.py | facultyai/faculty-xval | 73468b6c7b2be1a21728ac01f0fe16e7c20110a0 | [
"Apache-2.0"
] | 1 | 2021-04-04T10:48:14.000Z | 2021-04-04T10:48:14.000Z | import json
import logging
import os
import click
import numpy as np
from keras import backend as K
from keras.models import load_model as keras_load
from sklearn.base import clone as sklearn_clone
from sklearn.externals import joblib
from faculty_xval.utilities import keras_clone_and_compile
LOGGER = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
def load_model(path, model_type):
"""
Load the model using the method appropriate for its type ("keras" or other).
Parameters
----------
path: String
File path to look for the model.
model_type: String
String specifying the type of model to be loaded. Anything other than
"keras" will be loaded using joblib.
"""
if model_type == "keras":
# Load Keras model.
LOGGER.info("Loading Keras model")
model = keras_load(path)
LOGGER.info("Model loading complete")
else:
# Load model of other type.
LOGGER.info("Loading model with joblib")
model = joblib.load(path)
LOGGER.info("Model loading complete")
return model
def clone_model(model, model_type):
"""
Clone the model using the method appropriate for its type ("keras",
"sklearn" or other). Reset the state of the model so that each train/test
split is independent.
Parameters
----------
model: Scikit-Learn/Keras Model
Model to be cloned.
model_type: String
String specifying the type of model to be cloned. Recognised options
are "keras" and "sklearn". Any other option results in the function
returning the input model, thus doing nothing.
Returns
-------
cloned: Scikit-Learn/Keras Model
The cloned model with reset state.
"""
if model_type == "keras":
cloned = keras_clone_and_compile(model)
elif model_type == "sklearn":
cloned = sklearn_clone(model)
else:
cloned = model
LOGGER.warning(
"Model type not recognised. "
+ "Cannot reset the state of the model automatically"
)
return cloned
def validate(
model, features, targets, i_train, i_test, fit_kwargs=None, predict_kwargs=None
):
"""
Fit the model on specific training data, and predict on specific test data.
Parameters
----------
model: sklearn/keras Model
Model to cross-validate.
features: list of np.array
Features for training/testing. For multi-input models, the list contains
multiple Numpy arrays.
targets: list of np.array
Targets for training/testing. For multi-output models, the list contains
multiple Numpy arrays.
i_train: np.array
np.array of indices corresponding to the rows used for training
i_test: np.array
np.array of indices corresponding to the rows used for testing
fit_kwargs: dict, optional, default = None
Dictionary of any additional kwargs to be used by the model during
fitting.
predict_kwargs: dict, optional, default = None
Dictionary of any additional kwargs to be used by the model during
prediction.
Returns
--------
predictions: np.array
Model predictions.
"""
if fit_kwargs is None:
fit_kwargs = {}
if predict_kwargs is None:
predict_kwargs = {}
LOGGER.info("Training the model")
features_train = [x[i_train] for x in features]
targets_train = [y[i_train] for y in targets]
if len(features_train) == 1:
features_train = features_train[0].copy()
if len(targets_train) == 1:
targets_train = targets_train[0].copy()
model.fit(features_train, targets_train, **fit_kwargs)
LOGGER.info("Generating model predictions")
features_test = [x[i_test] for x in features]
if len(features_test) == 1:
features_test = features_test[0].copy()
predictions = model.predict(features_test, **predict_kwargs)
return np.array(predictions)
if __name__ == "__main__":
main()
| 29.859729 | 86 | 0.641612 |
6fb91a03150c13b0761acd74d5215ec8826d01b1 | 2,262 | py | Python | Solution.py | TheMLGuy/Simple-Web-Scraper | 4b4a27e7af3c0cf7bbe5aea4036f52487666dd85 | [
"Apache-2.0"
] | null | null | null | Solution.py | TheMLGuy/Simple-Web-Scraper | 4b4a27e7af3c0cf7bbe5aea4036f52487666dd85 | [
"Apache-2.0"
] | null | null | null | Solution.py | TheMLGuy/Simple-Web-Scraper | 4b4a27e7af3c0cf7bbe5aea4036f52487666dd85 | [
"Apache-2.0"
] | 1 | 2021-02-06T15:47:04.000Z | 2021-02-06T15:47:04.000Z | from bs4 import BeautifulSoup
import requests
import math
import time
start_url='https://www.macys.com'
domain='https://www.macys.com'
''' get soup '''
'''find all anchor tags'''
'''print all 'title' attributes'''
'''iterate through all pages for each soup object'''
'''filehandlers for output.txt and urlHandler.txt'''
'''generates soup object for each url'''
'''main function'''
if __name__=='__main__':
start_time=time.time()
items=[]
tags=findAllATags(url=start_url)
'''executing getItems for tags[12:] because first 11 have no relevant information'''
for i in tags[12:]:
getItems(i.attrs['href'])
print(time.time()-start_time)
| 29.763158 | 188 | 0.59107 |
6fb98dc934dbec999a997933b56e8bd368d485a6 | 1,243 | py | Python | contactnetwork/urls.py | pszgaspar/protwis | 4989a67175ef3c95047d795c843cf6b9cf4141fa | [
"Apache-2.0"
] | 21 | 2016-01-20T09:33:14.000Z | 2021-12-20T19:19:45.000Z | contactnetwork/urls.py | pszgaspar/protwis | 4989a67175ef3c95047d795c843cf6b9cf4141fa | [
"Apache-2.0"
] | 75 | 2016-02-26T16:29:58.000Z | 2022-03-21T12:35:13.000Z | contactnetwork/urls.py | pszgaspar/protwis | 4989a67175ef3c95047d795c843cf6b9cf4141fa | [
"Apache-2.0"
] | 77 | 2016-01-22T08:44:26.000Z | 2022-02-01T15:54:56.000Z | from django.conf.urls import url
from contactnetwork import views
# from django.views.generic import TemplateView
urlpatterns = [
url(r'^clusteringdata$', views.ClusteringData, name='clusteringdata'),
url(r'^clustering$', views.Clustering, name='clustering'),
url(r'^structure_clustering$', views.Clustering, name='clustering'),
url(r'^distances', views.ShowDistances, name='distances'),
url(r'^distancedatagroups', views.DistanceDataGroups, name='distancedatagroups'),
url(r'^distancedata', views.DistanceData, name='distancedata'),
url(r'^interactions[/]?$', views.Interactions, name='interactions'),
url(r'^comparative_analysis[/]?$', views.Interactions, name='interactions'),
url(r'^interactiondata', views.InteractionData, name='interactiondata'),
url(r'^browser[/]?$', views.InteractionBrowser, name='interactionsbrowser'),
url(r'^browserdata', views.InteractionBrowserData, name='interactionsbrowserdata'),
url(r'^state_contacts[/]?$', views.StateContacts, name='statecontacts'),
url(r'^pdbtreedata', views.PdbTreeData, name='pdbtreedata'),
url(r'^pdbtabledata', views.PdbTableData, name='pdbtabledata'),
url(r'^pdb/(?P<pdbname>\w+)$', views.ServePDB, name='serve_pdb'),
]
| 51.791667 | 87 | 0.722446 |
6fb9c69f0b059e1e502f8dc0059898eb346e1f36 | 320 | py | Python | app.py | jero2rome/HelloWorld-Python | 67a4b600415e4c0011e4598efea874d0b49abd79 | [
"MIT"
] | null | null | null | app.py | jero2rome/HelloWorld-Python | 67a4b600415e4c0011e4598efea874d0b49abd79 | [
"MIT"
] | null | null | null | app.py | jero2rome/HelloWorld-Python | 67a4b600415e4c0011e4598efea874d0b49abd79 | [
"MIT"
] | null | null | null | course = "Python Programming"
print(course.upper())
print(course.lower())
print(course.title())
course = " Python Programming"
print(course)
print(course.strip())
print(course.find("Pro"))
print(course.find("pro"))
print(course.replace("P", "-"))
print("Programming" in course)
print("Programming" not in course)
| 18.823529 | 34 | 0.709375 |
6fbc9ebec03dc29c3a7fc5b24ded057f188d61a0 | 1,401 | py | Python | FPN_Backend/api_utility/validators.py | DeeMATT/friendly-invention | c308168b088ad8e65ab6eddcdba22552a9c6987a | [
"MIT"
] | null | null | null | FPN_Backend/api_utility/validators.py | DeeMATT/friendly-invention | c308168b088ad8e65ab6eddcdba22552a9c6987a | [
"MIT"
] | null | null | null | FPN_Backend/api_utility/validators.py | DeeMATT/friendly-invention | c308168b088ad8e65ab6eddcdba22552a9c6987a | [
"MIT"
] | null | null | null | import re
from data_transformer.views import stringIsInteger
| 24.155172 | 87 | 0.635974 |
6fbe42378fbc286f445856d3f64bebf5d1265f7a | 1,173 | py | Python | app/model.py | hfikry92/fast-api-auth-starter | 4d90980da7084961f8f25591aea587509e790f80 | [
"MIT"
] | 43 | 2020-12-14T18:19:15.000Z | 2022-03-30T05:57:43.000Z | app/model.py | hfikry92/fast-api-auth-starter | 4d90980da7084961f8f25591aea587509e790f80 | [
"MIT"
] | 3 | 2021-02-19T09:56:35.000Z | 2022-03-30T13:26:50.000Z | app/model.py | hfikry92/fast-api-auth-starter | 4d90980da7084961f8f25591aea587509e790f80 | [
"MIT"
] | 16 | 2020-12-14T02:49:35.000Z | 2022-02-15T10:39:39.000Z | from pydantic import BaseModel, Field, EmailStr
| 27.27907 | 191 | 0.535379 |
6fbf3fd6784e5fc825699ad96db6d0683069d114 | 188 | py | Python | griffin_powermate/__init__.py | alex-ong/griffin-powermate | 59f53647de2fd025b27552c37e22e4c8c176aba6 | [
"MIT"
] | 11 | 2015-07-06T03:48:00.000Z | 2022-03-08T15:45:02.000Z | griffin_powermate/__init__.py | alex-ong/griffin-powermate | 59f53647de2fd025b27552c37e22e4c8c176aba6 | [
"MIT"
] | 2 | 2018-03-30T17:11:53.000Z | 2018-03-31T10:49:22.000Z | griffin_powermate/__init__.py | alex-ong/griffin-powermate | 59f53647de2fd025b27552c37e22e4c8c176aba6 | [
"MIT"
] | 6 | 2015-07-10T23:12:38.000Z | 2021-07-05T05:17:22.000Z | __version__ = '1.0.2'
__author__ = 'Christian Musa <christianmusa@gmail.com>'
__url__ = 'https://github.com/crash7/griffin-powermate'
__all__ = []
from griffin_powermate import * | 31.333333 | 59 | 0.718085 |
6fc12a48d65fbf13cb0aa456154ff1b694f2a2ec | 3,142 | py | Python | flink/test_flink.py | chekanskiy/bi-dataproc-initialization-actions | 8682ca537153f50ab58dc081a9d222ec61f8aa8e | [
"Apache-2.0"
] | 1 | 2020-10-28T01:19:16.000Z | 2020-10-28T01:19:16.000Z | flink/test_flink.py | chekanskiy/bi-dataproc-initialization-actions | 8682ca537153f50ab58dc081a9d222ec61f8aa8e | [
"Apache-2.0"
] | null | null | null | flink/test_flink.py | chekanskiy/bi-dataproc-initialization-actions | 8682ca537153f50ab58dc081a9d222ec61f8aa8e | [
"Apache-2.0"
] | null | null | null | import unittest
from parameterized import parameterized
import os
from integration_tests.dataproc_test_case import DataprocTestCase
METADATA = 'flink-start-yarn-session=false'
if __name__ == '__main__':
unittest.main()
| 37.404762 | 109 | 0.559198 |
6fc1e91d9ee7bd81df3b499400e72d6a896fdb9d | 4,017 | py | Python | regression/testplan/firmware_small.py | sld-columbia/nvdla-sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | [
"Apache-2.0"
] | 407 | 2017-10-25T14:24:25.000Z | 2022-03-31T08:02:01.000Z | regression/testplan/firmware_small.py | sld-columbia/nvdla-sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | [
"Apache-2.0"
] | 227 | 2017-11-02T07:15:38.000Z | 2022-01-19T02:29:51.000Z | regression/testplan/firmware_small.py | sld-columbia/nvdla-sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | [
"Apache-2.0"
] | 199 | 2017-10-26T07:26:40.000Z | 2022-03-27T20:02:13.000Z | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test_plan
import settings
# Convenience globals
kmd = Module.runScript
devices = Module.deviceTargets
ces = ["Core Engine Scheduler"]
nn = ["Neural Network"]
convd = ["CONV HW - Direct"]
convi = ["CONV HW - Image"]
convw = ["CONV HW - Winograd"]
convp = ["CONV HW - Pipeline"]
sdpx1 = ["SDP X1 HW"]
sdpx2 = ["SDP X2 HW"]
sdpy = ["SDP Y HW"]
sdpf = ["SDP HW - Full"]
cdp = ["CDP HW"]
pdp = ["PDP HW"]
Module.register_tests = registerTests
| 42.284211 | 179 | 0.707742 |
6fc2f27600b643c88618b8d387f08bcf982ed303 | 790 | py | Python | peaksql/datasets/narrowpeak.py | vanheeringen-lab/PeakSQL | b9290394605273b3a92815a8662fe22406254bfc | [
"MIT"
] | null | null | null | peaksql/datasets/narrowpeak.py | vanheeringen-lab/PeakSQL | b9290394605273b3a92815a8662fe22406254bfc | [
"MIT"
] | 17 | 2020-02-22T19:05:00.000Z | 2020-05-20T10:15:01.000Z | peaksql/datasets/narrowpeak.py | vanheeringen-lab/PeakSQL | b9290394605273b3a92815a8662fe22406254bfc | [
"MIT"
] | 4 | 2020-02-28T12:47:18.000Z | 2020-03-06T13:00:20.000Z | import numpy as np
from typing import List, Tuple
from .base import _DataSet
| 29.259259 | 88 | 0.653165 |
6fc517dc6f5fe11349e844166e7ed87944abff49 | 2,654 | py | Python | strip_ansi_escape_codes.py | neilrjones/DevOps-Python-tools | 12646de550ba296cf26e4a058e9a1cc1cc723c8f | [
"MIT"
] | 1 | 2022-02-22T00:20:00.000Z | 2022-02-22T00:20:00.000Z | strip_ansi_escape_codes.py | neilrjones/DevOps-Python-tools | 12646de550ba296cf26e4a058e9a1cc1cc723c8f | [
"MIT"
] | null | null | null | strip_ansi_escape_codes.py | neilrjones/DevOps-Python-tools | 12646de550ba296cf26e4a058e9a1cc1cc723c8f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2018-09-09 23:06:06 +0100 (Sun, 09 Sep 2018)
#
# https://github.com/harisekhon/devops-python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback
# to help improve or steer this or other code I publish # pylint: disable=line-too-long
#
# https://www.linkedin.com/in/harisekhon
#
"""
Strip ANSI Escape Codes from Text String input
Works as a standard unix filter program, reading from file arguments or standard input and printing to standard output
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pylib'))
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import die, ERRORS, log_option, strip_ansi_escape_codes
from harisekhon import CLI
except ImportError as _:
print('module import failed: %s' % _, file=sys.stderr)
print("Did you remember to build the project by running 'make'?", file=sys.stderr)
print("Alternatively perhaps you tried to copy this program out without it's adjacent libraries?", file=sys.stderr)
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.2'
# pylint: disable=too-few-public-methods
if __name__ == '__main__':
StripAnsiEscapeCodes().main()
| 31.223529 | 119 | 0.637905 |
6fc58731a5e67b957a08a7b99ed3506623297e19 | 301 | py | Python | vk_bot/mods/other/counting.py | triangle1984/GLaDOS | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
] | 3 | 2019-12-12T05:48:34.000Z | 2020-12-07T19:23:41.000Z | vk_bot/mods/other/counting.py | anar66/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
] | 1 | 2019-11-15T14:28:49.000Z | 2019-11-15T14:28:49.000Z | vk_bot/mods/other/counting.py | triangle1984/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
] | 5 | 2019-11-20T14:20:30.000Z | 2022-02-05T10:37:01.000Z | from vk_bot.core.modules.basicplug import BasicPlug
import time
| 25.083333 | 51 | 0.538206 |
6fc63d77d8ed73c401918b676d06084cc00b6c87 | 954 | py | Python | wind-oci-marketplace/setup.py | LaudateCorpus1/wind | d10dbc6baa98acab4927ff2b7a880b4727185582 | [
"UPL-1.0",
"Apache-2.0"
] | 1 | 2022-02-07T15:56:24.000Z | 2022-02-07T15:56:24.000Z | wind-oci-marketplace/setup.py | LaudateCorpus1/wind | d10dbc6baa98acab4927ff2b7a880b4727185582 | [
"UPL-1.0",
"Apache-2.0"
] | null | null | null | wind-oci-marketplace/setup.py | LaudateCorpus1/wind | d10dbc6baa98acab4927ff2b7a880b4727185582 | [
"UPL-1.0",
"Apache-2.0"
] | 1 | 2022-02-18T01:23:46.000Z | 2022-02-18T01:23:46.000Z | ## Copyright 2021, Oracle and/or its affiliates.
## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#!/usr/bin/env python
from setuptools import setup
setup(name='wind-marketplace-library',
version="1.0.0",
description='Robot Framework test library for OCI Marketplace',
long_description='Robot Framework test library for OCI Marketplace',
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Framework :: WIND Robot Framework',
],
author='arun.poonia@oracle.com',
author_email='arun.poonia@oracle.com',
packages=['MarketplaceLibrary'],
license = "UPL-1.0",
install_requires=[
],
extras_require={
'dev': [
]
},
platforms='any',
include_package_data=True,
zip_safe=False) | 31.8 | 105 | 0.634172 |
6fc8616e9f969ac9e3ee973ff4f5b06bdc7a38e6 | 17 | py | Python | pointcloud2raster/__version__.py | NorthArrowResearch/pointcloud2raster | 419d82c9a62947e0258e308d2812f745df09794d | [
"MIT"
] | null | null | null | pointcloud2raster/__version__.py | NorthArrowResearch/pointcloud2raster | 419d82c9a62947e0258e308d2812f745df09794d | [
"MIT"
] | null | null | null | pointcloud2raster/__version__.py | NorthArrowResearch/pointcloud2raster | 419d82c9a62947e0258e308d2812f745df09794d | [
"MIT"
] | null | null | null | __version__="0.3" | 17 | 17 | 0.764706 |
6fc8809070d19daecb0e75b0cf66f5240983ed79 | 1,392 | py | Python | api/views.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
] | 8 | 2019-08-27T20:08:22.000Z | 2021-07-23T22:49:47.000Z | api/views.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
] | 73 | 2020-03-11T18:07:29.000Z | 2022-03-28T18:07:47.000Z | api/views.py | oil-rope/oil-and-rope | 6d59c87d4809f120417a90c1624952085486bb06 | [
"MIT"
] | 4 | 2020-02-22T19:44:17.000Z | 2022-03-08T09:42:45.000Z | from django.http import JsonResponse
from django.shortcuts import reverse
from django.urls import NoReverseMatch
from django.views import View
from rest_framework import __version__ as drf_version
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from oilandrope import __version__
| 27.294118 | 56 | 0.66954 |
6fc9a185be50739efc8e58ecf9750c6070063f42 | 1,525 | py | Python | 5_AIOps/Mlflow.py | stormsinbrewing/Covid-19-Predictor | 36bd768be8ac4aa81de86fe7f6bb592a7a711856 | [
"MIT"
] | 3 | 2021-05-22T02:03:57.000Z | 2021-05-23T07:45:01.000Z | 5_AIOps/Mlflow.py | stormsinbrewing/Covid-19-Predictor | 36bd768be8ac4aa81de86fe7f6bb592a7a711856 | [
"MIT"
] | null | null | null | 5_AIOps/Mlflow.py | stormsinbrewing/Covid-19-Predictor | 36bd768be8ac4aa81de86fe7f6bb592a7a711856 | [
"MIT"
] | null | null | null | from verta import Client
import pickle
import mlflow
import mlflow.sklearn
from mlflow.tracking import MlflowClient
import os
# Function Calls ("MajorII","CovidPredictor","Version 1","model.pkl","pickle","pytorch")
downloadArtifact("MajorII","CovidPredictor","Version 1","pickle")
logModel("pytorch","CovidPredictor")
#serveModel("CovidPredictor")
| 36.309524 | 182 | 0.72459 |
6fc9dcd451ff1e384504442cc9b3cb27f7b5e6af | 3,832 | py | Python | src/arclink/apps/misc/dump_db.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 94 | 2015-02-04T13:57:34.000Z | 2021-11-01T15:10:06.000Z | src/arclink/apps/misc/dump_db.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 233 | 2015-01-28T15:16:46.000Z | 2021-08-23T11:31:37.000Z | src/arclink/apps/misc/dump_db.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 95 | 2015-02-13T15:53:30.000Z | 2021-11-02T14:54:54.000Z | #!/usr/bin/env python
#*****************************************************************************
# dump_db.py
#
# Dump inventory database in XML format
#
# (c) 2006 Andres Heinloo, GFZ Potsdam
# (c) 2007 Mathias Hoffmann, GFZ Potsdam
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version. For more information, see http://www.gnu.org/
#*****************************************************************************
import sys
from seiscomp import logs
from seiscomp.db.seiscomp3 import sc3wrap
from seiscomp.db.seiscomp3.inventory import Inventory as SC3Inventory
from seiscomp.db.seiscomp3.routing import Routing as SC3Routing
from seiscomp3 import Core, Client, DataModel, Logging
VERSION = "1.2 (2012.313)"
if __name__ == "__main__":
logs.debug = Logging.debug
logs.info = Logging.info
logs.notice = Logging.notice
logs.warning = Logging.warning
logs.error = Logging.error
app = DumpDB(len(sys.argv), sys.argv)
sys.exit(app())
| 33.034483 | 111 | 0.592119 |
6fca5a30c5488af2b1fdbc0fc7967e56d0f30371 | 619 | py | Python | ypricemagic/magic.py | cartercarlson/ypricemagic | f17fec155db7fb44ee624cd6e75193f17c6238cf | [
"MIT"
] | 1 | 2022-03-28T16:07:07.000Z | 2022-03-28T16:07:07.000Z | ypricemagic/magic.py | cartercarlson/ypricemagic | f17fec155db7fb44ee624cd6e75193f17c6238cf | [
"MIT"
] | null | null | null | ypricemagic/magic.py | cartercarlson/ypricemagic | f17fec155db7fb44ee624cd6e75193f17c6238cf | [
"MIT"
] | null | null | null |
import logging
from typing import Optional
from y import magic
from y.datatypes import UsdPrice
from y.typing import AnyAddressType, Block
logger = logging.getLogger(__name__)
| 36.411765 | 85 | 0.704362 |
6fcab68471b1398dafce699d3dbe1a96583efdf6 | 4,248 | py | Python | zerver/tests/test_realm_playgrounds.py | moazzammoriani/zulip | ca506f71dc8b733827a6bf532b107291b4839e55 | [
"Apache-2.0"
] | 3 | 2021-09-12T05:05:28.000Z | 2021-12-30T09:45:20.000Z | zerver/tests/test_realm_playgrounds.py | moazzammoriani/zulip | ca506f71dc8b733827a6bf532b107291b4839e55 | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_realm_playgrounds.py | moazzammoriani/zulip | ca506f71dc8b733827a6bf532b107291b4839e55 | [
"Apache-2.0"
] | null | null | null | from zerver.lib.actions import do_add_realm_playground
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import RealmPlayground, get_realm
| 38.27027 | 96 | 0.62194 |
6fcb558a97ebc6e7f0af8053d4b5eef0ee0f741f | 2,733 | py | Python | seisflows/plugins/optimize/NLCG.py | niyiyu2316/seisflows | f861f1bf59b24854e7ca6a0dfc383f2dcd396969 | [
"BSD-2-Clause"
] | 1 | 2021-12-01T07:37:57.000Z | 2021-12-01T07:37:57.000Z | seisflows/plugins/optimize/NLCG.py | niyiyu/seisflows | f861f1bf59b24854e7ca6a0dfc383f2dcd396969 | [
"BSD-2-Clause"
] | null | null | null | seisflows/plugins/optimize/NLCG.py | niyiyu/seisflows | f861f1bf59b24854e7ca6a0dfc383f2dcd396969 | [
"BSD-2-Clause"
] | null | null | null | #
# This is Seisflows
#
# See LICENCE file
#
#
###############################################################################
# Import system modules
import os
# Import Numpy
import numpy as np
# Local imports
from seisflows.tools import unix
from seisflows.tools.math import dot
from seisflows.tools.tools import loadtxt, savetxt, loadnpy, savenpy
# Utility functions
def fletcher_reeves(g_new, g_old, precond=lambda x: x):
num = dot(precond(g_new), g_new)
den = dot(g_old, g_old)
beta = num/den
return beta
def pollak_ribere(g_new, g_old, precond=lambda x: x):
num = dot(precond(g_new), g_new-g_old)
den = dot(g_old, g_old)
beta = num/den
return beta
def check_conjugacy(g_new, g_old):
return abs(dot(g_new, g_old) / dot(g_new, g_new))
def check_descent(p_new, g_new):
return dot(p_new, g_new) / dot(g_new, g_new)
| 24.621622 | 79 | 0.559458 |