hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3310368135e649bf028187ed7a04013100b76b9e | 4,905 | py | Python | python/registration_gui.py | zivy/ISBI2018_TUTORIAL | 4fa3d695982785f858fc35ac3ff02822bf5a1cdd | [
"Apache-2.0"
] | 26 | 2018-03-15T19:46:16.000Z | 2022-01-11T11:26:28.000Z | python/registration_gui.py | zivy/ISBI2018_TUTORIAL | 4fa3d695982785f858fc35ac3ff02822bf5a1cdd | [
"Apache-2.0"
] | 1 | 2018-04-02T15:27:13.000Z | 2018-04-02T16:12:04.000Z | python/registration_gui.py | zivy/ISBI2018_TUTORIAL | 4fa3d695982785f858fc35ac3ff02822bf5a1cdd | [
"Apache-2.0"
] | 16 | 2018-03-16T13:50:03.000Z | 2021-09-11T08:11:46.000Z | import SimpleITK as sitk
import matplotlib.pyplot as plt
import numpy as np
#
# Set of methods used for displaying the registration metric during the optimization.
#
# Callback invoked when the StartEvent happens, sets up our new data.
# Callback invoked when the EndEvent happens, do cleanup of data and figure.
# Callback invoked when the IterationEvent happens, update our data and display new figure.
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def overlay_binary_segmentation_contours(image, mask, window_min, window_max):
"""
Given a 2D image and mask:
a. resample the image and mask into isotropic grid (required for display).
b. rescale the image intensities using the given window information.
c. overlay the contours computed from the mask onto the image.
"""
# Resample the image (linear interpolation) and mask (nearest neighbor interpolation) into an isotropic grid,
# required for display.
original_spacing = image.GetSpacing()
original_size = image.GetSize()
min_spacing = min(original_spacing)
new_spacing = [min_spacing, min_spacing]
new_size = [int(round(original_size[0]*(original_spacing[0]/min_spacing))),
int(round(original_size[1]*(original_spacing[1]/min_spacing)))]
resampled_img = sitk.Resample(image, new_size, sitk.Transform(),
sitk.sitkLinear, image.GetOrigin(),
new_spacing, image.GetDirection(), 0.0,
image.GetPixelID())
resampled_msk = sitk.Resample(mask, new_size, sitk.Transform(),
sitk.sitkNearestNeighbor, mask.GetOrigin(),
new_spacing, mask.GetDirection(), 0.0,
mask.GetPixelID())
# Create the overlay: cast the mask to expected label pixel type, and do the same for the image after
# window-level, accounting for the high dynamic range of the CT.
return sitk.LabelMapContourOverlay(sitk.Cast(resampled_msk, sitk.sitkLabelUInt8),
sitk.Cast(sitk.IntensityWindowing(resampled_img,
windowMinimum=window_min,
windowMaximum=window_max),
sitk.sitkUInt8),
opacity = 1,
contourThickness=[2,2])
def display_coronal_with_overlay(temporal_slice, coronal_slice, images, masks, label, window_min, window_max):
"""
Display a coronal slice from the 4D (3D+time) CT with a contour overlaid onto it. The contour is the edge of
the specific label.
"""
img = images[temporal_slice][:,coronal_slice,:]
msk = masks[temporal_slice][:,coronal_slice,:]==label
overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max)
# Flip the image so that corresponds to correct radiological view.
plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img)))
plt.axis('off')
plt.show()
def display_coronal_with_label_maps_overlay(coronal_slice, mask_index, image, masks, label, window_min, window_max):
"""
Display a coronal slice from a 3D CT with a contour overlaid onto it. The contour is the edge of
the specific label from the specific mask. Function is used to display results of transforming a segmentation
using registration.
"""
img = image[:,coronal_slice,:]
msk = masks[mask_index][:,coronal_slice,:]==label
overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max)
# Flip the image so that corresponds to correct radiological view.
plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img)))
plt.axis('off')
plt.show()
| 44.189189 | 116 | 0.66422 | import SimpleITK as sitk
import matplotlib.pyplot as plt
import numpy as np
#
# Set of methods used for displaying the registration metric during the optimization.
#
# Callback invoked when the StartEvent happens, sets up our new data.
def start_plot():
global metric_values, multires_iterations, ax, fig
fig, ax = plt.subplots(1,1, figsize=(8,4))
metric_values = []
multires_iterations = []
plt.show()
# Callback invoked when the EndEvent happens, do cleanup of data and figure.
def end_plot():
global metric_values, multires_iterations, ax, fig
del metric_values
del multires_iterations
del ax
del fig
# Callback invoked when the IterationEvent happens, update our data and display new figure.
def plot_values(registration_method):
global metric_values, multires_iterations, ax, fig
metric_values.append(registration_method.GetMetricValue())
# Plot the similarity metric values
ax.plot(metric_values, 'r')
ax.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')
ax.set_xlabel('Iteration Number',fontsize=12)
ax.set_ylabel('Metric Value',fontsize=12)
fig.canvas.draw()
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
def overlay_binary_segmentation_contours(image, mask, window_min, window_max):
"""
Given a 2D image and mask:
a. resample the image and mask into isotropic grid (required for display).
b. rescale the image intensities using the given window information.
c. overlay the contours computed from the mask onto the image.
"""
# Resample the image (linear interpolation) and mask (nearest neighbor interpolation) into an isotropic grid,
# required for display.
original_spacing = image.GetSpacing()
original_size = image.GetSize()
min_spacing = min(original_spacing)
new_spacing = [min_spacing, min_spacing]
new_size = [int(round(original_size[0]*(original_spacing[0]/min_spacing))),
int(round(original_size[1]*(original_spacing[1]/min_spacing)))]
resampled_img = sitk.Resample(image, new_size, sitk.Transform(),
sitk.sitkLinear, image.GetOrigin(),
new_spacing, image.GetDirection(), 0.0,
image.GetPixelID())
resampled_msk = sitk.Resample(mask, new_size, sitk.Transform(),
sitk.sitkNearestNeighbor, mask.GetOrigin(),
new_spacing, mask.GetDirection(), 0.0,
mask.GetPixelID())
# Create the overlay: cast the mask to expected label pixel type, and do the same for the image after
# window-level, accounting for the high dynamic range of the CT.
return sitk.LabelMapContourOverlay(sitk.Cast(resampled_msk, sitk.sitkLabelUInt8),
sitk.Cast(sitk.IntensityWindowing(resampled_img,
windowMinimum=window_min,
windowMaximum=window_max),
sitk.sitkUInt8),
opacity = 1,
contourThickness=[2,2])
def display_coronal_with_overlay(temporal_slice, coronal_slice, images, masks, label, window_min, window_max):
"""
Display a coronal slice from the 4D (3D+time) CT with a contour overlaid onto it. The contour is the edge of
the specific label.
"""
img = images[temporal_slice][:,coronal_slice,:]
msk = masks[temporal_slice][:,coronal_slice,:]==label
overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max)
# Flip the image so that corresponds to correct radiological view.
plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img)))
plt.axis('off')
plt.show()
def display_coronal_with_label_maps_overlay(coronal_slice, mask_index, image, masks, label, window_min, window_max):
"""
Display a coronal slice from a 3D CT with a contour overlaid onto it. The contour is the edge of
the specific label from the specific mask. Function is used to display results of transforming a segmentation
using registration.
"""
img = image[:,coronal_slice,:]
msk = masks[mask_index][:,coronal_slice,:]==label
overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max)
# Flip the image so that corresponds to correct radiological view.
plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img)))
plt.axis('off')
plt.show()
| 829 | 0 | 88 |
f4b2b6a614552cb471ee1018cb6473daef2454ed | 4,499 | py | Python | valet/utils/protobuf/communicate_pb2.py | sadmicrowave/valet | 39724c3f2a49b2253e89044af2b103e3e89d4cd8 | [
"MIT"
] | null | null | null | valet/utils/protobuf/communicate_pb2.py | sadmicrowave/valet | 39724c3f2a49b2253e89044af2b103e3e89d4cd8 | [
"MIT"
] | null | null | null | valet/utils/protobuf/communicate_pb2.py | sadmicrowave/valet | 39724c3f2a49b2253e89044af2b103e3e89d4cd8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: communicate.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='communicate.proto',
package='valet',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11\x63ommunicate.proto\x12\x05valet\"!\n\x0eRequestMessage\x12\x0f\n\x07message\x18\x01 \x01(\t\"/\n\x0cReplyMessage\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2B\n\x0b\x43ommunicate\x12\x33\n\x03Say\x12\x15.valet.RequestMessage\x1a\x13.valet.ReplyMessage\"\x00\x62\x06proto3'
)
_REQUESTMESSAGE = _descriptor.Descriptor(
name='RequestMessage',
full_name='valet.RequestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='valet.RequestMessage.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=61,
)
_REPLYMESSAGE = _descriptor.Descriptor(
name='ReplyMessage',
full_name='valet.ReplyMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='valet.ReplyMessage.status', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='valet.ReplyMessage.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=110,
)
DESCRIPTOR.message_types_by_name['RequestMessage'] = _REQUESTMESSAGE
DESCRIPTOR.message_types_by_name['ReplyMessage'] = _REPLYMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RequestMessage = _reflection.GeneratedProtocolMessageType('RequestMessage', (_message.Message,), {
'DESCRIPTOR' : _REQUESTMESSAGE,
'__module__' : 'communicate_pb2'
# @@protoc_insertion_point(class_scope:valet.RequestMessage)
})
_sym_db.RegisterMessage(RequestMessage)
ReplyMessage = _reflection.GeneratedProtocolMessageType('ReplyMessage', (_message.Message,), {
'DESCRIPTOR' : _REPLYMESSAGE,
'__module__' : 'communicate_pb2'
# @@protoc_insertion_point(class_scope:valet.ReplyMessage)
})
_sym_db.RegisterMessage(ReplyMessage)
_COMMUNICATE = _descriptor.ServiceDescriptor(
name='Communicate',
full_name='valet.Communicate',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=112,
serialized_end=178,
methods=[
_descriptor.MethodDescriptor(
name='Say',
full_name='valet.Communicate.Say',
index=0,
containing_service=None,
input_type=_REQUESTMESSAGE,
output_type=_REPLYMESSAGE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_COMMUNICATE)
DESCRIPTOR.services_by_name['Communicate'] = _COMMUNICATE
# @@protoc_insertion_point(module_scope)
| 31.243056 | 329 | 0.759724 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: communicate.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='communicate.proto',
package='valet',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11\x63ommunicate.proto\x12\x05valet\"!\n\x0eRequestMessage\x12\x0f\n\x07message\x18\x01 \x01(\t\"/\n\x0cReplyMessage\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2B\n\x0b\x43ommunicate\x12\x33\n\x03Say\x12\x15.valet.RequestMessage\x1a\x13.valet.ReplyMessage\"\x00\x62\x06proto3'
)
_REQUESTMESSAGE = _descriptor.Descriptor(
name='RequestMessage',
full_name='valet.RequestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='valet.RequestMessage.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=61,
)
_REPLYMESSAGE = _descriptor.Descriptor(
name='ReplyMessage',
full_name='valet.ReplyMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='valet.ReplyMessage.status', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='valet.ReplyMessage.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=110,
)
DESCRIPTOR.message_types_by_name['RequestMessage'] = _REQUESTMESSAGE
DESCRIPTOR.message_types_by_name['ReplyMessage'] = _REPLYMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RequestMessage = _reflection.GeneratedProtocolMessageType('RequestMessage', (_message.Message,), {
'DESCRIPTOR' : _REQUESTMESSAGE,
'__module__' : 'communicate_pb2'
# @@protoc_insertion_point(class_scope:valet.RequestMessage)
})
_sym_db.RegisterMessage(RequestMessage)
ReplyMessage = _reflection.GeneratedProtocolMessageType('ReplyMessage', (_message.Message,), {
'DESCRIPTOR' : _REPLYMESSAGE,
'__module__' : 'communicate_pb2'
# @@protoc_insertion_point(class_scope:valet.ReplyMessage)
})
_sym_db.RegisterMessage(ReplyMessage)
_COMMUNICATE = _descriptor.ServiceDescriptor(
name='Communicate',
full_name='valet.Communicate',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=112,
serialized_end=178,
methods=[
_descriptor.MethodDescriptor(
name='Say',
full_name='valet.Communicate.Say',
index=0,
containing_service=None,
input_type=_REQUESTMESSAGE,
output_type=_REPLYMESSAGE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_COMMUNICATE)
DESCRIPTOR.services_by_name['Communicate'] = _COMMUNICATE
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
00abcc7f4cf9a20a02896d0e5d048eb36a63e80c | 2,801 | py | Python | main.py | TheWithz/discord-selfbot.py | 848fb994a4a8e2a1ccb69a948219b7ce356e8ad3 | [
"MIT"
] | null | null | null | main.py | TheWithz/discord-selfbot.py | 848fb994a4a8e2a1ccb69a948219b7ce356e8ad3 | [
"MIT"
] | null | null | null | main.py | TheWithz/discord-selfbot.py | 848fb994a4a8e2a1ccb69a948219b7ce356e8ad3 | [
"MIT"
] | null | null | null | import asyncio
import json
import os
import aiofiles
import discord
with open('config.json') as f:
config = json.load(f)
if __name__ == '__main__':
main()
| 30.445652 | 82 | 0.482685 | import asyncio
import json
import os
import aiofiles
import discord
with open('config.json') as f:
config = json.load(f)
class Bot(discord.Client):
def __init__(self):
super(Bot, self).__init__()
async def on_ready(self):
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('------')
async def on_message(self, message):
if message.author == self.user:
content = message.content # type: str
if content.startswith('>tex'):
content = content[4:].strip()
image_file = await compile_tex(content)
await self.send_file(message.channel, image_file)
elif content.startswith('>bigify'):
await make_str(self, message, False)
elif content.startswith('>Bigify'):
await make_str(self, message, True)
async def make_str(self, message, newline):
content = message.content[7:].strip()
if len(content) == 0:
await self.delete_message(message)
return;
msg = ''
for letter in content:
if letter == ' ' and newline:
msg += '\n'
elif not letter.isalpha():
msg += ''
else:
msg += (':regional_indicator_%s: ' % letter)
await self.delete_message(message)
await self.send_message(message.channel, msg)
async def compile_tex(snippet):
async with aiofiles.open('template.tex') as f:
template = await f.read()
source = template.replace('{_user_code_}', snippet)
async with aiofiles.open('tmp/snippet.tex', mode='w') as f:
await f.write(source)
proc_latex = await asyncio.create_subprocess_exec('pdflatex',
'-shell-escape',
'snippet.tex', cwd='tmp/')
await proc_latex.wait()
proc_convert = await asyncio.create_subprocess_exec('convert',
'-density', '300',
'snippet.pdf',
'-trim',
'-border', '16x16',
'-background', 'white',
'-alpha', 'remove',
'-quality', '90',
'snippet.png', cwd='tmp/')
await proc_convert.wait()
return 'tmp/snippet.png'
def main():
os.makedirs('tmp', exist_ok=True)
client = Bot()
client.run(config['token'], bot=False)
if __name__ == '__main__':
main()
| 2,454 | 5 | 172 |
46cc55d2fe6b35ca6e90b4d3845fd8292a1109ea | 1,769 | py | Python | pyformlang/pda/utils.py | IlyaEp/pyformlang | eef239844beff5e9da3be4a4a240440ece81c10b | [
"MIT"
] | 15 | 2020-06-25T14:38:27.000Z | 2022-03-09T17:55:07.000Z | pyformlang/pda/utils.py | IlyaEp/pyformlang | eef239844beff5e9da3be4a4a240440ece81c10b | [
"MIT"
] | 11 | 2020-09-23T09:48:35.000Z | 2021-08-24T08:37:47.000Z | pyformlang/pda/utils.py | YaccConstructor/pyformlang | df640e13524c5d835ddcdedf25d8246fc73d7b88 | [
"MIT"
] | 5 | 2020-03-08T19:00:17.000Z | 2021-08-15T12:38:05.000Z | """ Useful functions for a PDA """
from .state import State
from .symbol import Symbol
from .stack_symbol import StackSymbol
from .epsilon import Epsilon
class PDAObjectCreator:
"""
A Object in a PDA
"""
def to_state(self, given):
""" Convert to a state """
if isinstance(given, State):
return _get_object_from_known(given, self._state_creator)
return _get_object_from_raw(given, self._state_creator, State)
def to_symbol(self, given):
""" Convert to a symbol """
if isinstance(given, Symbol):
return _get_object_from_known(given, self._symbol_creator)
if given == "epsilon":
return Epsilon()
return _get_object_from_raw(given, self._symbol_creator, Symbol)
def to_stack_symbol(self, given):
""" Convert to a stack symbol """
if isinstance(given, StackSymbol):
return _get_object_from_known(given,
self._stack_symbol_creator)
if isinstance(given, Epsilon):
return given
return _get_object_from_raw(given,
self._stack_symbol_creator,
StackSymbol)
| 30.5 | 72 | 0.63143 | """ Useful functions for a PDA """
from .state import State
from .symbol import Symbol
from .stack_symbol import StackSymbol
from .epsilon import Epsilon
class PDAObjectCreator:
"""
A Object in a PDA
"""
def __init__(self):
self._state_creator = dict()
self._symbol_creator = dict()
self._stack_symbol_creator = dict()
def to_state(self, given):
""" Convert to a state """
if isinstance(given, State):
return _get_object_from_known(given, self._state_creator)
return _get_object_from_raw(given, self._state_creator, State)
def to_symbol(self, given):
""" Convert to a symbol """
if isinstance(given, Symbol):
return _get_object_from_known(given, self._symbol_creator)
if given == "epsilon":
return Epsilon()
return _get_object_from_raw(given, self._symbol_creator, Symbol)
def to_stack_symbol(self, given):
""" Convert to a stack symbol """
if isinstance(given, StackSymbol):
return _get_object_from_known(given,
self._stack_symbol_creator)
if isinstance(given, Epsilon):
return given
return _get_object_from_raw(given,
self._stack_symbol_creator,
StackSymbol)
def _get_object_from_known(given, obj_converter):
if given.value in obj_converter:
return obj_converter[given.value]
obj_converter[given.value] = given
return given
def _get_object_from_raw(given, obj_converter, to_type):
if given in obj_converter:
return obj_converter[given]
temp = to_type(given)
obj_converter[given] = temp
return temp
| 456 | 0 | 73 |
fed03a781b64a2ca6afd5bacbc8faa8e99b33252 | 1,144 | py | Python | bottrust/solve.py | corbinmcneill/codejam | 5156fec100c73eb95969a91fd20bf411aec4b795 | [
"Apache-2.0"
] | null | null | null | bottrust/solve.py | corbinmcneill/codejam | 5156fec100c73eb95969a91fd20bf411aec4b795 | [
"Apache-2.0"
] | null | null | null | bottrust/solve.py | corbinmcneill/codejam | 5156fec100c73eb95969a91fd20bf411aec4b795 | [
"Apache-2.0"
] | null | null | null |
infile = open("input.txt")
T =int(infile.readline().strip())
for t in range(1, T+1):
solve(t, infile.readline().split(' '))
| 23.833333 | 94 | 0.627622 | def opp(x):
return (x+1)%2
def solve(casenum, inlist):
inlist.pop(0)
pos=[1,1]
time=0
waitingOn = 0
queue = []
while len(inlist) > 0:
queue.append((1 if inlist.pop(0)=='O' else 0, int(inlist.pop(0))))
primary = queue.pop(0)
while len(queue) > 0:
waitingOn = primary[0]
primetime = abs(primary[1] - pos[waitingOn]) + 1
pos[waitingOn]=primary[1]
while (len(queue) > 0 and queue[0][0] == waitingOn):
primary=queue.pop(0)
primetime += abs(primary[1] - pos[waitingOn]) + 1
pos[waitingOn]=primary[1]
if (len(queue)>0):
secondary = queue.pop(0)
if primetime >= abs(pos[opp(waitingOn)] - secondary[1]):
pos[opp(waitingOn)] = secondary[1]
else:
pos[opp(waitingOn)] = secondary[1] - (abs(pos[opp(waitingOn)] - secondary[1])) + primetime
primary = secondary
time+=primetime
else:
time+=primetime
print "Case #%d: %d"%(casenum, time)
return
waitingOn = primary[0]
time += abs(primary[1] - pos[waitingOn]) + 1
print "Case #%d: %d"%(casenum, time)
return
infile = open("input.txt")
T =int(infile.readline().strip())
for t in range(1, T+1):
solve(t, infile.readline().split(' '))
| 970 | 0 | 46 |
4b318a33f234812471367a2b69c153781e83ecf3 | 797 | py | Python | pyisis/tests/check_performance.py | rodsenra/pyisis | f5815fd096a463902893f87f309f8117b5705621 | [
"MIT"
] | null | null | null | pyisis/tests/check_performance.py | rodsenra/pyisis | f5815fd096a463902893f87f309f8117b5705621 | [
"MIT"
] | null | null | null | pyisis/tests/check_performance.py | rodsenra/pyisis | f5815fd096a463902893f87f309f8117b5705621 | [
"MIT"
] | 2 | 2019-11-08T20:51:54.000Z | 2021-08-17T23:49:48.000Z | # -*- coding: utf-8 -*-
"""
File to test Isis performance
"""
__created__ = "2007-05-15"
__updated__ = "2008-05-15"
__author__ = "Rodrigo Senra <rsenra@acm.org>"
# Setup test environment
from timeit import Timer
from pyisis.tests.config_tests import test_data, Lyer, initialize
config = initialize()
setup="""
from pyisis.files import MasterFile
from pyisis.views import list_all
from os.path import join
mf = MasterFile(join("..","sample","cds.mst"))
"""
if __name__=="__main__":
list_all() | 20.435897 | 65 | 0.668758 | # -*- coding: utf-8 -*-
"""
File to test Isis performance
"""
__created__ = "2007-05-15"
__updated__ = "2008-05-15"
__author__ = "Rodrigo Senra <rsenra@acm.org>"
# Setup test environment
from timeit import Timer
from pyisis.tests.config_tests import test_data, Lyer, initialize
config = initialize()
setup="""
from pyisis.files import MasterFile
from pyisis.views import list_all
from os.path import join
mf = MasterFile(join("..","sample","cds.mst"))
"""
def list_all():
stmt = """
list_all(mf)
"""
import sys
stdout = sys.stdout
sys.stdout = open("/dev/null","w")
t = Timer(stmt=stmt, setup=setup)
elapsed = t.timeit(number=4)/4
sys.stdout.flush()
sys.stdout = stdout
print "list_all %.4f sec/pass" % (elapsed)
if __name__=="__main__":
list_all() | 273 | 0 | 23 |
f7e28024155c4f77c948f2b05fb0f54a2779a2c3 | 5,412 | py | Python | unify_eval/model/keras_model.py | goesslfabian/unify-eval | ced486e44ca57ed31b552fd20b53cae61015e486 | [
"Apache-2.0"
] | 3 | 2021-02-18T10:40:29.000Z | 2022-01-28T10:20:54.000Z | unify_eval/model/keras_model.py | goesslfabian/unify-eval | ced486e44ca57ed31b552fd20b53cae61015e486 | [
"Apache-2.0"
] | 8 | 2020-11-13T19:00:13.000Z | 2022-02-10T02:10:28.000Z | unify_eval/model/keras_model.py | goesslfabian/unify-eval | ced486e44ca57ed31b552fd20b53cae61015e486 | [
"Apache-2.0"
] | 1 | 2021-06-23T12:37:12.000Z | 2021-06-23T12:37:12.000Z | from typing import Dict, List
import numpy as np
from keras import utils
from keras.engine import Layer
from keras.layers import Embedding
from keras.models import Sequential
from keras.preprocessing import text, sequence
from unify_eval.model.mixins.classification import DeepModel, Classifier
from unify_eval.model.types import Tensor
from unify_eval.utils.label_mapper import LabelMapper
class KerasModel(Classifier):
"""
Wrapper around a keras classifier model.
"""
def __init__(self,
tokenizer: text.Tokenizer,
keras_model: Sequential,
label_mapper: LabelMapper,
maxlen: int,
text_kw: str = "texts",
label_kw: str = "labels"):
"""
:param tokenizer: tokenizer to use
:param keras_model: actual keras model
:param label_mapper: label mapper instance that maps label indices to label names and vice versa
:param maxlen: maximum input length (remainder is ignored)
:param text_kw: keyword by which to extract text input
:param label_kw: keyword by which to extract label input
"""
super().__init__(label_mapper)
self.keras_model = keras_model
self.tokenizer = tokenizer
self.maxlen = maxlen
self.text_kw = text_kw
self.label_kw = label_kw
self.loss = {}
def preprocess_texts(self, texts) -> np.ndarray:
"""
map texts to padded index sequences
"""
sequences = self.tokenizer.texts_to_sequences([str(text) for text in texts])
x = sequence.pad_sequences(sequences=sequences, maxlen=self.maxlen)
return x
def preprocess_labels(self, labels) -> np.ndarray:
"""
map labels to onehot indices
"""
y = self.label_mapper.map_to_indices(labels)
y = utils.to_categorical(y, self.label_mapper.n_labels)
return y
@classmethod
@staticmethod
def pretrained_keras_model(
tokenizer: text.Tokenizer,
keras_layers: List[Layer],
label_mapper: LabelMapper,
embedding_dim: int,
embedding_index: Dict[str, np.ndarray],
maxlen: int,
text_kw: str = "texts",
label_kw: str = "labels") -> "KerasModel":
"""
:param tokenizer: tokenizer to use
:param keras_layers: list of layers to concatenate into single model
:param label_mapper: label mapper instance that maps label indices to label names and vice versa
:param embedding_dim: embedding dimensionality
:param embedding_index: map from token to embedding
:param maxlen: maximum input length (remainder is ignored)
:param text_kw: keyword by which to extract text input
:param label_kw: keyword by which to extract label input
"""
embedding_matrix = np.zeros((len(tokenizer.word_index) + 1, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(tokenizer.word_index) + 1,
embedding_dim,
weights=[embedding_matrix],
input_length=maxlen,
trainable=False)
keras_model = Sequential([
embedding_layer,
*keras_layers])
keras_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['categorical_crossentropy'])
return KerasModel(tokenizer=tokenizer,
keras_model=keras_model,
label_mapper=label_mapper,
maxlen=maxlen,
text_kw=text_kw,
label_kw=label_kw)
| 35.84106 | 104 | 0.592203 | from typing import Dict, List
import numpy as np
from keras import utils
from keras.engine import Layer
from keras.layers import Embedding
from keras.models import Sequential
from keras.preprocessing import text, sequence
from unify_eval.model.mixins.classification import DeepModel, Classifier
from unify_eval.model.types import Tensor
from unify_eval.utils.label_mapper import LabelMapper
class KerasModel(Classifier):
"""
Wrapper around a keras classifier model.
"""
def __init__(self,
tokenizer: text.Tokenizer,
keras_model: Sequential,
label_mapper: LabelMapper,
maxlen: int,
text_kw: str = "texts",
label_kw: str = "labels"):
"""
:param tokenizer: tokenizer to use
:param keras_model: actual keras model
:param label_mapper: label mapper instance that maps label indices to label names and vice versa
:param maxlen: maximum input length (remainder is ignored)
:param text_kw: keyword by which to extract text input
:param label_kw: keyword by which to extract label input
"""
super().__init__(label_mapper)
self.keras_model = keras_model
self.tokenizer = tokenizer
self.maxlen = maxlen
self.text_kw = text_kw
self.label_kw = label_kw
self.loss = {}
def preprocess_texts(self, texts) -> np.ndarray:
"""
map texts to padded index sequences
"""
sequences = self.tokenizer.texts_to_sequences([str(text) for text in texts])
x = sequence.pad_sequences(sequences=sequences, maxlen=self.maxlen)
return x
def preprocess_labels(self, labels) -> np.ndarray:
"""
map labels to onehot indices
"""
y = self.label_mapper.map_to_indices(labels)
y = utils.to_categorical(y, self.label_mapper.n_labels)
return y
def predict_label_probabilities(self, **kwargs) -> np.array:
x_test = self.preprocess_texts(texts=kwargs[self.text_kw])
return self.keras_model.predict(x_test)
def train(self, **kwargs) -> "DeepModel":
x_train = self.preprocess_texts(kwargs[self.text_kw])
y_train = self.preprocess_labels(kwargs[self.label_kw])
# train_on_batch?
history = self.keras_model.fit(x_train, y_train,
batch_size=kwargs["batch_size"],
epochs=kwargs["epochs"],
verbose=kwargs["verbose"])
self.loss = history.history
return self
def get_loss(self, **kwargs) -> dict:
return self.loss
@classmethod
def from_components(cls, **kwargs) -> "DeepModel":
return cls(**kwargs)
def get_numpy_parameters(self) -> Dict[str, np.ndarray]:
return {
}
def get_components(self) -> dict:
return {
"keras_model": self.keras_model,
"label_mapper": self.label_mapper,
"tokenizer": self.tokenizer,
"maxlen": self.maxlen,
"text_kw": self.text_kw,
"label_kw": self.label_kw
}
def get_logits(self, **kwargs) -> Tensor:
pass
@staticmethod
def pretrained_keras_model(
tokenizer: text.Tokenizer,
keras_layers: List[Layer],
label_mapper: LabelMapper,
embedding_dim: int,
embedding_index: Dict[str, np.ndarray],
maxlen: int,
text_kw: str = "texts",
label_kw: str = "labels") -> "KerasModel":
"""
:param tokenizer: tokenizer to use
:param keras_layers: list of layers to concatenate into single model
:param label_mapper: label mapper instance that maps label indices to label names and vice versa
:param embedding_dim: embedding dimensionality
:param embedding_index: map from token to embedding
:param maxlen: maximum input length (remainder is ignored)
:param text_kw: keyword by which to extract text input
:param label_kw: keyword by which to extract label input
"""
embedding_matrix = np.zeros((len(tokenizer.word_index) + 1, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(tokenizer.word_index) + 1,
embedding_dim,
weights=[embedding_matrix],
input_length=maxlen,
trainable=False)
keras_model = Sequential([
embedding_layer,
*keras_layers])
keras_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['categorical_crossentropy'])
return KerasModel(tokenizer=tokenizer,
keras_model=keras_model,
label_mapper=label_mapper,
maxlen=maxlen,
text_kw=text_kw,
label_kw=label_kw)
| 1,121 | 0 | 188 |
8c2ffadb64c357fa253ed239b739f7970fd7159f | 1,833 | py | Python | learning/base-language/custom-errors/custom-error-2.py | gerryw1389/python | 74fedaf2034769f2865659f14d332026b9aaede3 | [
"MIT"
] | 2 | 2020-12-01T17:29:09.000Z | 2020-12-13T02:54:43.000Z | learning/base-language/custom-errors/custom-error-2.py | gerryw1389/python | 74fedaf2034769f2865659f14d332026b9aaede3 | [
"MIT"
] | 4 | 2020-12-26T15:08:02.000Z | 2021-05-16T13:19:33.000Z | learning/base-language/custom-errors/custom-error-2.py | gerryw1389/python | 74fedaf2034769f2865659f14d332026b9aaede3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
################################################################
# Check for 'myfile.csv' in a certain directory
# Since this has more than two lines, it will output the files contents
# You should also try renaming to 'myfile3.csv' and re-running to ensure it catches it
################################################################
class Error(Exception):
''' Base class for other exceptions'''
pass
# here we define our own error
try:
filename = 'C:\\_gwill\\repo-home\\h1python\\learning\\base-language\\custom-errors\\myfile2.csv'
#Use `with open(filename, encoding='utf-8') as thefile:` if the file has special chars
with open(filename) as thefile:
#if the file has less than 2 lines, throw our own error
file_content = thefile.readlines()
# At this point, file_content should contain a list like ['FirstName,LastName\n', 'Darth,Vader']
line_count = len(file_content)
if line_count < 2:
raise EmptyFileError
except FileNotFoundError:
# catch if file doesn't exist
print("there is no myfile2.csv")
except EmptyFileError:
# catch our custom error
print('your file has less than two lines, exiting...')
thefile.close()
except Exception as e:
# catch any other exception
print('Failed: Exception was ' + str(e))
thefile.close()
else:
# yay! we made it without errors, let's read the file!
# since we did readlines(), it is a list object so we loop through and print
# If we instead did read() then you would just print
for one_line in file_content:
# and end='' in order for there not to be line breaks for each line
#print(one_line)
print(one_line, end='')
thefile.close()
#print('Success!')
| 33.944444 | 104 | 0.624659 | #!/usr/bin/env python3
################################################################
# Check for 'myfile.csv' in a certain directory
# Since this has more than two lines, it will output the files contents
# You should also try renaming to 'myfile3.csv' and re-running to ensure it catches it
################################################################
class Error(Exception):
''' Base class for other exceptions'''
pass
# here we define our own error
class EmptyFileError(Error):
pass
try:
filename = 'C:\\_gwill\\repo-home\\h1python\\learning\\base-language\\custom-errors\\myfile2.csv'
#Use `with open(filename, encoding='utf-8') as thefile:` if the file has special chars
with open(filename) as thefile:
#if the file has less than 2 lines, throw our own error
file_content = thefile.readlines()
# At this point, file_content should contain a list like ['FirstName,LastName\n', 'Darth,Vader']
line_count = len(file_content)
if line_count < 2:
raise EmptyFileError
except FileNotFoundError:
# catch if file doesn't exist
print("there is no myfile2.csv")
except EmptyFileError:
# catch our custom error
print('your file has less than two lines, exiting...')
thefile.close()
except Exception as e:
# catch any other exception
print('Failed: Exception was ' + str(e))
thefile.close()
else:
# yay! we made it without errors, let's read the file!
# since we did readlines(), it is a list object so we loop through and print
# If we instead did read() then you would just print
for one_line in file_content:
# and end='' in order for there not to be line breaks for each line
#print(one_line)
print(one_line, end='')
thefile.close()
#print('Success!')
| 0 | 16 | 22 |
e92a4323942d5e185d2ec4d2a8f20702a5940c71 | 3,429 | py | Python | DIKB/DIKB_Utils.py | dbmi-pitt/DIKB-Evidence-analytics | 9ffd629db30c41ced224ff2afdf132ce9276ae3f | [
"MIT"
] | 3 | 2015-06-08T17:58:54.000Z | 2022-03-10T18:49:44.000Z | DIKB/DIKB_Utils.py | dbmi-pitt/DIKB-Evidence-analytics | 9ffd629db30c41ced224ff2afdf132ce9276ae3f | [
"MIT"
] | null | null | null | DIKB/DIKB_Utils.py | dbmi-pitt/DIKB-Evidence-analytics | 9ffd629db30c41ced224ff2afdf132ce9276ae3f | [
"MIT"
] | null | null | null | ## The Drug Interaction Knowledge Base (DIKB) is (C) Copyright 2005 by
## Richard Boyce
## Original Authors:
## Richard Boyce
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the
## Free Software Foundation, Inc., 59 Temple Place - Suite 330,
## Boston, MA 02111-1307, USA.
## -----------------------------------------------------------------
## File: DIKB_Utils.py
###Functions for editing assertions in the KBs
from DIKB import *
from DrugModel import *
from EvidenceModel import *
# #### a function for adding bioavailability - TODO: create general evidence adding/editing functions
# def addBioavail(drg,et,pntr, quote, val, revwr, ev_base, ev_pickle_path, dikb):
# """ add evidence for the bioavailability of a drug.
# in: drg - a string specifying an drug in the 'dikb' knowledge-base
# in: et - a string specifying the 'evidence_type' of the evidence
# in: pntr - a string specifying the name or pubmed id of the evidence
# in: quote - a relevant quote from the document
# in: val - a float value for the bioavailability of the drug
# in: revwr - a string stating the reviewer of this evidence
# in: ev_base - an EvidenceBase drgect to store this evidence in
# in: ev_pickle_path - a string path to the pickle file for the evidence base
# in: dikb - a DIKB drgect
# out: 1 if error, 0 otherwise"""
# if not dikb.drgects.has_key(drg):
# print(" ".join(["addBioavail - Error: drgect name ", drg, "does not exist in dikb; spelling correct?. EXITING! Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# return 1
# a1 = Assertion(drg,'bioavailability','continuous_value')
# e1 = EvidenceContinousVal()
# e1.doc_pointer = pntr
# e1.quote = quote
# e1.evidence_type.putEntry(et)
# e1.value = val
# e1.reviewer.putEntry(revwr)
# a1.evidence_for.append(e1)
# lst_len = len(dikb.drgects[drg].bioavailability.evidence)
# ev_base.addAssertion(a1)
# if len(dikb.drgects[drg].bioavailability.evidence) == lst_len:
# print(" ".join(["addBioavail - Error: evidence for bioavailability did not get assigned. Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# try:
# ev.pickleKB(ev_pickle_path)
# print(" ".join(["addBioavail - Message: evidence for bioavailability added and stored in pickle. Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# except IOError, err:
# print(" ".join(["addBioavail - Error: evidence for bioavailability added but NOT STORED in pickle. Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# return 0
| 45.118421 | 132 | 0.649169 | ## The Drug Interaction Knowledge Base (DIKB) is (C) Copyright 2005 by
## Richard Boyce
## Original Authors:
## Richard Boyce
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the
## Free Software Foundation, Inc., 59 Temple Place - Suite 330,
## Boston, MA 02111-1307, USA.
## -----------------------------------------------------------------
## File: DIKB_Utils.py
###Functions for editing assertions in the KBs
from DIKB import *
from DrugModel import *
from EvidenceModel import *
# #### a function for adding bioavailability - TODO: create general evidence adding/editing functions
# def addBioavail(drg,et,pntr, quote, val, revwr, ev_base, ev_pickle_path, dikb):
# """ add evidence for the bioavailability of a drug.
# in: drg - a string specifying an drug in the 'dikb' knowledge-base
# in: et - a string specifying the 'evidence_type' of the evidence
# in: pntr - a string specifying the name or pubmed id of the evidence
# in: quote - a relevant quote from the document
# in: val - a float value for the bioavailability of the drug
# in: revwr - a string stating the reviewer of this evidence
# in: ev_base - an EvidenceBase drgect to store this evidence in
# in: ev_pickle_path - a string path to the pickle file for the evidence base
# in: dikb - a DIKB drgect
# out: 1 if error, 0 otherwise"""
# if not dikb.drgects.has_key(drg):
# print(" ".join(["addBioavail - Error: drgect name ", drg, "does not exist in dikb; spelling correct?. EXITING! Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# return 1
# a1 = Assertion(drg,'bioavailability','continuous_value')
# e1 = EvidenceContinousVal()
# e1.doc_pointer = pntr
# e1.quote = quote
# e1.evidence_type.putEntry(et)
# e1.value = val
# e1.reviewer.putEntry(revwr)
# a1.evidence_for.append(e1)
# lst_len = len(dikb.drgects[drg].bioavailability.evidence)
# ev_base.addAssertion(a1)
# if len(dikb.drgects[drg].bioavailability.evidence) == lst_len:
# print(" ".join(["addBioavail - Error: evidence for bioavailability did not get assigned. Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# try:
# ev.pickleKB(ev_pickle_path)
# print(" ".join(["addBioavail - Message: evidence for bioavailability added and stored in pickle. Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# except IOError, err:
# print(" ".join(["addBioavail - Error: evidence for bioavailability added but NOT STORED in pickle. Values - ",
# "drug: ", drg, "evidence pointer: ", pntr, "evidence type: ", et]))
# return 0
| 0 | 0 | 0 |
3f33f3dcee520339082c17e485022226b8479d5f | 4,828 | py | Python | tests/functional/test_empty_value.py | mazzi/tartiflette | 54ffdcb97f3ef0ea8b87ea3378790221cdb08e0b | [
"MIT"
] | 530 | 2019-06-04T11:45:36.000Z | 2022-03-31T09:29:56.000Z | tests/functional/test_empty_value.py | mazzi/tartiflette | 54ffdcb97f3ef0ea8b87ea3378790221cdb08e0b | [
"MIT"
] | 242 | 2019-06-04T11:53:08.000Z | 2022-03-28T07:06:27.000Z | tests/functional/test_empty_value.py | mazzi/tartiflette | 54ffdcb97f3ef0ea8b87ea3378790221cdb08e0b | [
"MIT"
] | 36 | 2019-06-21T06:40:27.000Z | 2021-11-04T13:11:16.000Z | import pytest
from tartiflette import Resolver, create_engine
_SDL = """
type bobby {
c: String
}
type boby {
b: bobby!
}
type bob {
a: boby
}
type Query {
string1: String!
stringList: [String]
stringListNonNull: [String]!
nonNullStringList: [String!]
nonNullStringListNonNull: [String!]!
anObject: bob
}
"""
@Resolver("Query.string1", schema_name="test_empty_values")
@Resolver("Query.stringList", schema_name="test_empty_values")
@Resolver("Query.stringListNonNull", schema_name="test_empty_values")
@Resolver("Query.nonNullStringList", schema_name="test_empty_values")
@Resolver("Query.nonNullStringListNonNull", schema_name="test_empty_values")
@Resolver("bobby.c", schema_name="test_empty_values")
@Resolver("boby.b", schema_name="test_empty_values")
@Resolver("Query.anObject", schema_name="test_empty_values")
@Resolver("bob.a", schema_name="test_empty_values")
@pytest.fixture(scope="module")
@pytest.mark.parametrize(
"query,expected",
[
(
"""
query {
string1
}""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.string1.",
"path": ["string1"],
"locations": [{"column": 17, "line": 3}],
}
],
},
),
(
"""
query {
stringList
}
""",
{"data": {"stringList": None}},
),
(
"""
query {
nonNullStringList
}
""",
{"data": {"nonNullStringList": None}},
),
(
"""
query {
stringListNonNull
}
""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.stringListNonNull.",
"path": ["stringListNonNull"],
"locations": [{"line": 3, "column": 17}],
}
],
},
),
(
"""
query {
nonNullStringListNonNull
}
""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.nonNullStringListNonNull.",
"path": ["nonNullStringListNonNull"],
"locations": [{"line": 3, "column": 17}],
}
],
},
),
(
"""
query {
string1
stringList
nonNullStringList
stringListNonNull
nonNullStringListNonNull
}""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.string1.",
"path": ["string1"],
"locations": [{"line": 3, "column": 17}],
},
{
"message": "Cannot return null for non-nullable field Query.stringListNonNull.",
"path": ["stringListNonNull"],
"locations": [{"line": 6, "column": 17}],
},
{
"message": "Cannot return null for non-nullable field Query.nonNullStringListNonNull.",
"path": ["nonNullStringListNonNull"],
"locations": [{"line": 7, "column": 17}],
},
],
},
),
],
)
@pytest.mark.asyncio
@pytest.mark.asyncio
| 27.276836 | 111 | 0.440969 | import pytest
from tartiflette import Resolver, create_engine
_SDL = """
type bobby {
c: String
}
type boby {
b: bobby!
}
type bob {
a: boby
}
type Query {
string1: String!
stringList: [String]
stringListNonNull: [String]!
nonNullStringList: [String!]
nonNullStringListNonNull: [String!]!
anObject: bob
}
"""
@Resolver("Query.string1", schema_name="test_empty_values")
@Resolver("Query.stringList", schema_name="test_empty_values")
@Resolver("Query.stringListNonNull", schema_name="test_empty_values")
@Resolver("Query.nonNullStringList", schema_name="test_empty_values")
@Resolver("Query.nonNullStringListNonNull", schema_name="test_empty_values")
@Resolver("bobby.c", schema_name="test_empty_values")
@Resolver("boby.b", schema_name="test_empty_values")
async def resolver_x(_pr, _args, _ctx, _info):
return None
@Resolver("Query.anObject", schema_name="test_empty_values")
@Resolver("bob.a", schema_name="test_empty_values")
async def resolver_y(_pr, _args, _ctx, _info):
return {}
@pytest.fixture(scope="module")
async def ttftt_engine():
return await create_engine(sdl=_SDL, schema_name="test_empty_values")
@pytest.mark.parametrize(
"query,expected",
[
(
"""
query {
string1
}""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.string1.",
"path": ["string1"],
"locations": [{"column": 17, "line": 3}],
}
],
},
),
(
"""
query {
stringList
}
""",
{"data": {"stringList": None}},
),
(
"""
query {
nonNullStringList
}
""",
{"data": {"nonNullStringList": None}},
),
(
"""
query {
stringListNonNull
}
""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.stringListNonNull.",
"path": ["stringListNonNull"],
"locations": [{"line": 3, "column": 17}],
}
],
},
),
(
"""
query {
nonNullStringListNonNull
}
""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.nonNullStringListNonNull.",
"path": ["nonNullStringListNonNull"],
"locations": [{"line": 3, "column": 17}],
}
],
},
),
(
"""
query {
string1
stringList
nonNullStringList
stringListNonNull
nonNullStringListNonNull
}""",
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.string1.",
"path": ["string1"],
"locations": [{"line": 3, "column": 17}],
},
{
"message": "Cannot return null for non-nullable field Query.stringListNonNull.",
"path": ["stringListNonNull"],
"locations": [{"line": 6, "column": 17}],
},
{
"message": "Cannot return null for non-nullable field Query.nonNullStringListNonNull.",
"path": ["nonNullStringListNonNull"],
"locations": [{"line": 7, "column": 17}],
},
],
},
),
],
)
@pytest.mark.asyncio
async def test_empty_values_1(query, expected, ttftt_engine):
assert await ttftt_engine.execute(query) == expected
@pytest.mark.asyncio
async def test_empty_values_2(ttftt_engine):
assert await ttftt_engine.execute(
"""
query {
anObject { a {b { c}}}
}
"""
) == {
"data": {"anObject": {"a": None}},
"errors": [
{
"message": "Cannot return null for non-nullable field boby.b.",
"path": ["anObject", "a", "b"],
"locations": [{"line": 3, "column": 27}],
}
],
}
| 707 | 0 | 110 |
e2257060548bbe94947020be6e7151c54ae4f973 | 673 | py | Python | tests/extmod/uasyncio_fair.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 13,648 | 2015-01-01T01:34:51.000Z | 2022-03-31T16:19:53.000Z | tests/extmod/uasyncio_fair.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 7,092 | 2015-01-01T07:59:11.000Z | 2022-03-31T23:52:18.000Z | tests/extmod/uasyncio_fair.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 4,942 | 2015-01-02T11:48:50.000Z | 2022-03-31T19:57:10.000Z | # Test fairness of scheduler
try:
import uasyncio as asyncio
except ImportError:
try:
import asyncio
except ImportError:
print("SKIP")
raise SystemExit
asyncio.run(main())
| 19.228571 | 44 | 0.592868 | # Test fairness of scheduler
try:
import uasyncio as asyncio
except ImportError:
try:
import asyncio
except ImportError:
print("SKIP")
raise SystemExit
async def task(id, t):
print("task start", id)
while True:
if t > 0:
print("task work", id)
await asyncio.sleep(t)
async def main():
t1 = asyncio.create_task(task(1, -0.01))
t2 = asyncio.create_task(task(2, 0.1))
t3 = asyncio.create_task(task(3, 0.18))
t4 = asyncio.create_task(task(4, -100))
await asyncio.sleep(0.5)
t1.cancel()
t2.cancel()
t3.cancel()
t4.cancel()
print("finish")
asyncio.run(main())
| 414 | 0 | 46 |
8f572daf819a48b89eb252616a9f6be2ccd086e9 | 5,767 | py | Python | assessment/views.py | ma2th/vfat-server | 3ccf98159c0b404e42cd8b2b66593130d8575c00 | [
"Apache-2.0"
] | null | null | null | assessment/views.py | ma2th/vfat-server | 3ccf98159c0b404e42cd8b2b66593130d8575c00 | [
"Apache-2.0"
] | null | null | null | assessment/views.py | ma2th/vfat-server | 3ccf98159c0b404e42cd8b2b66593130d8575c00 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Matthias Ring
# Machine Learning and Data Analytics Lab
# Friedrich-Alexander-University Erlangen-Nuremberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib as mpl
mpl.use('Agg')
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
import vfatserver.consts as consts
import vfatserver.util as util
from .util import plot_visual_field, plot_curve
from .models import EquidistantAssessment, OctopusG1Assessment
| 42.718519 | 119 | 0.710075 | # Copyright 2019 Matthias Ring
# Machine Learning and Data Analytics Lab
# Friedrich-Alexander-University Erlangen-Nuremberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib as mpl
mpl.use('Agg')
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
import vfatserver.consts as consts
import vfatserver.util as util
from .util import plot_visual_field, plot_curve
from .models import EquidistantAssessment, OctopusG1Assessment
def index(request):
equidistant_left = EquidistantAssessment.objects.filter(user=request.user, tested_eye=0).order_by('-date')
equidistant_right = EquidistantAssessment.objects.filter(user=request.user, tested_eye=1).order_by('-date')
octopusg1_left = OctopusG1Assessment.objects.filter(user=request.user, tested_eye=0).order_by('-date')
octopusg1_right = OctopusG1Assessment.objects.filter(user=request.user, tested_eye=1).order_by('-date')
context = {'equidistant_left': equidistant_left, 'equidistant_right': equidistant_right,
'octopusg1_left': octopusg1_left, 'octopusg1_right': octopusg1_right}
return render(request, 'assessment/index.html', context)
def detail_equidistant(request, assessment_id):
equidistant_assessment = get_object_or_404(EquidistantAssessment, pk=assessment_id, user=request.user)
context = {'assessment': equidistant_assessment,
'program_url': reverse('clientconf:equidistant-update', args=[equidistant_assessment.configuration.id]),
'field_url': reverse('assessment:detail-equidistant-field', args=[equidistant_assessment.id]),
'curve_url': reverse('assessment:detail-equidistant-curve', args=[equidistant_assessment.id]),
'delete_url': reverse('assessment:delete-equidistant', args=[equidistant_assessment.id])}
return render(request, 'assessment/detail.html', context)
def detail_octopusg1(request, assessment_id):
ocotopus_assessment = get_object_or_404(OctopusG1Assessment, pk=assessment_id, user=request.user)
context = {'assessment': ocotopus_assessment,
'program_url': reverse('clientconf:octopus-update', args=[ocotopus_assessment.configuration.id]),
'field_url': reverse('assessment:detail-octopusg1-field', args=[ocotopus_assessment.id]),
'curve_url': reverse('assessment:detail-octopusg1-curve', args=[ocotopus_assessment.id]),
'delete_url': reverse('assessment:delete-octopusg1', args=[ocotopus_assessment.id])}
return render(request, 'assessment/detail.html', context)
def detail_equidistant_field(request, assessment_id):
assessment = get_object_or_404(EquidistantAssessment, pk=assessment_id, user=request.user)
fig = plot_visual_field(assessment)
return util.fig_to_svg_response(fig)
def detail_octopusg1_field(request, assessment_id):
assessment = get_object_or_404(OctopusG1Assessment, pk=assessment_id, user=request.user)
fig = plot_visual_field(assessment)
return util.fig_to_svg_response(fig)
def detail_equidistant_curve(request, assessment_id):
assessment = get_object_or_404(EquidistantAssessment, pk=assessment_id, user=request.user)
fig = plot_curve(assessment)
return util.fig_to_svg_response(fig)
def detail_octopusg1_curve(request, assessment_id):
assessment = get_object_or_404(OctopusG1Assessment, pk=assessment_id, user=request.user)
fig = plot_curve(assessment)
return util.fig_to_svg_response(fig)
def delete_equidistant(request, assessment_id):
assessment = get_object_or_404(EquidistantAssessment, user=request.user, pk=assessment_id)
if request.method == 'POST':
if request.user.username != consts.demo_user_name:
assessment.delete()
return redirect('assessment:index')
else:
return render(request, 'assessment/delete.html',
context={'assessment': assessment,
'back_url': reverse('assessment:detail-equidistant', args=[assessment.id]),
'message': 'Demo assessments cannot be deleted!'})
else:
return render(request, 'assessment/delete.html',
context={'assessment': assessment,
'back_url': reverse('assessment:detail-equidistant', args=[assessment.id])})
def delete_octopusg1(request, assessment_id):
assessment = get_object_or_404(OctopusG1Assessment, user=request.user, pk=assessment_id)
if request.method == 'POST':
if request.user.username != consts.demo_user_name:
assessment.delete()
return redirect('assessment:index')
else:
return render(request, 'assessment/delete.html',
context={'assessment': assessment,
'back_url': reverse('assessment:detail-octopusg1', args=[assessment.id]),
'message': 'Demo assessments cannot be deleted!'})
else:
return render(request, 'assessment/delete.html',
context={'assessment': assessment,
'back_url': reverse('assessment:detail-octopusg1', args=[assessment.id])})
| 4,566 | 0 | 207 |
70b67168ab77c3dc1229b30dab4060805c01673a | 770 | py | Python | python/Orbits/OrbitColor.py | PaulAustin/sb7 | e7e7f9f85387d16f6069ed8e98192bd387d8cf95 | [
"MIT"
] | null | null | null | python/Orbits/OrbitColor.py | PaulAustin/sb7 | e7e7f9f85387d16f6069ed8e98192bd387d8cf95 | [
"MIT"
] | null | null | null | python/Orbits/OrbitColor.py | PaulAustin/sb7 | e7e7f9f85387d16f6069ed8e98192bd387d8cf95 | [
"MIT"
] | null | null | null | # Turtles in space
import turtle
sky = turtle.Screen()
sky.tracer(0)
sky.bgcolor('black')
rocket = turtle.Turtle()
rocket.speed(0)
rocket.color('green')
a = 10.0
b = 28.0
c = 8.0/3.0
x = y = z= 1.0e-1
#x = y = z= 1.0e-200
tic = 0.0
sky.ontimer(tictoc, 5)
| 16.73913 | 31 | 0.509091 | # Turtles in space
import turtle
sky = turtle.Screen()
sky.tracer(0)
sky.bgcolor('black')
rocket = turtle.Turtle()
rocket.speed(0)
rocket.color('green')
a = 10.0
b = 28.0
c = 8.0/3.0
x = y = z= 1.0e-1
#x = y = z= 1.0e-200
def setColor(x, y, z):
r = min(abs(x+0.5), 1.0)
g = min(abs(y+0.5), 1.0)
b = min(abs(z+0.5), 1.0)
rocket.pencolor((r, g, b))
def draw():
global x, y, z
dt = 0.01
dx = (a * (y-x)) * dt;
dy = (x * (b-z) - y) * dt;
dz = (x * y - c * z) * dt
x += dx
y += dy
z += dz
setColor(dx, dy, dz)
rocket.goto(x*15, z*10-250)
tic = 0.0
def tictoc():
global tic
print('iterations', tic)
tic += 10.0
for i in range (10):
draw()
sky.ontimer(tictoc, 5)
sky.ontimer(tictoc, 5)
| 442 | 0 | 68 |
d1a33930c9561630af9fd3336226f039547dac00 | 27,966 | py | Python | pesummary/gw/conversions/remnant.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2021-08-03T05:58:20.000Z | 2021-08-03T05:58:20.000Z | pesummary/gw/conversions/remnant.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 1 | 2020-06-13T13:29:35.000Z | 2020-06-15T12:45:04.000Z | pesummary/gw/conversions/remnant.py | pesummary/pesummary | 99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8 | [
"MIT"
] | 3 | 2021-07-08T08:31:28.000Z | 2022-03-31T14:08:58.000Z | # Licensed under an MIT style license -- see LICENSE.md
import numpy as np
from pesummary.utils.utils import logger, iterator
from pesummary.utils.decorators import array_input
from .spins import chi_p
__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
try:
import lalsimulation
from lalsimulation import (
FLAG_SEOBNRv4P_HAMILTONIAN_DERIVATIVE_NUMERICAL,
FLAG_SEOBNRv4P_EULEREXT_QNM_SIMPLE_PRECESSION,
FLAG_SEOBNRv4P_ZFRAME_L
)
from lal import MSUN_SI
except ImportError:
pass
DEFAULT_SEOBFLAGS = {
"SEOBNRv4P_SpinAlignedEOBversion": 4,
"SEOBNRv4P_SymmetrizehPlminusm": 1,
"SEOBNRv4P_HamiltonianDerivative": FLAG_SEOBNRv4P_HAMILTONIAN_DERIVATIVE_NUMERICAL,
"SEOBNRv4P_euler_extension": FLAG_SEOBNRv4P_EULEREXT_QNM_SIMPLE_PRECESSION,
"SEOBNRv4P_Zframe": FLAG_SEOBNRv4P_ZFRAME_L,
"SEOBNRv4P_debug": 0
}
@array_input()
def final_mass_of_merger_from_NSBH(
mass_1, mass_2, spin_1z, lambda_2, approximant="IMRPhenomNSBH"
):
"""Calculate the final mass resulting from an NSBH merger using NSBH
waveform models given samples for mass_1, mass_2, spin_1z and lambda_2.
mass_1 and mass_2 should be in solar mass units.
"""
from .tidal import _check_NSBH_approximant
return _check_NSBH_approximant(
approximant, mass_1, mass_2, spin_1z, lambda_2
)[4]
@array_input()
def final_spin_of_merger_from_NSBH(
mass_1, mass_2, spin_1z, lambda_2, approximant="IMRPhenomNSBH"
):
"""Calculate the final spin resulting from an NSBH merger using NSBH
waveform models given samples for mass_1, mass_2, spin_1z and lambda_2.
mass_1 and mass_2 should be in solar mass units.
"""
from .tidal import _check_NSBH_approximant
return _check_NSBH_approximant(
approximant, mass_1, mass_2, spin_1z, lambda_2
)[5]
@array_input()
def _final_from_initial_NSBH(*args, **kwargs):
"""Calculate the final mass and final spin given the initial parameters
of the binary using the approximant directly
"""
return [
final_mass_of_merger_from_NSBH(*args, **kwargs),
final_spin_of_merger_from_NSBH(*args, **kwargs)
]
def _wrapper_return_final_mass_and_final_spin_from_waveform(args):
"""Wrapper function to calculate the remnant properties for a given waveform
for a pool of workers
Parameters
----------
args: np.ndarray
2 dimensional array giving arguments to pass to
_return_final_mass_and_final_spin_from_waveform. The first argument
in each sublist is the keyword and the second argument in each sublist
is the item you wish to pass
"""
kwargs = {arg[0]: arg[1] for arg in args}
return _return_final_mass_and_final_spin_from_waveform(**kwargs)
def _return_final_mass_and_final_spin_from_waveform(
mass_function=None, spin_function=None, mass_function_args=[],
spin_function_args=[], mass_function_return_function=None,
mass_function_return_index=None, spin_function_return_function=None,
spin_function_return_index=None, mass_1_index=0, mass_2_index=1,
nsamples=0, approximant=None, default_SEOBNRv4P_kwargs=False
):
"""Return the final mass and final spin given functions to use
Parameters
----------
mass_function: func
function you wish to use to calculate the final mass
spin_function: func
function you wish to use to calculate the final spin
mass_function_args: list
list of arguments you wish to pass to mass_function
spin_function_args: list
list of arguments you wish to pass to spin_function
mass_function_return_function: str, optional
function used to extract the final mass from the quantity returned from
mass_function. For example, if mass_function returns a list and the
final_mass is a property of the 3 arg of this list,
mass_function_return_function='[3].final_mass'
mass_function_return_index: str, optional
if mass_function returns a list of parameters,
mass_function_return_index indicates the index of `final_mass` in the
list
spin_function_return_function: str, optional
function used to extract the final spin from the quantity returned from
spin_function. For example, if spin_function returns a list and the
final_spin is a property of the 3 arg of this list,
spin_function_return_function='[3].final_spin'
spin_function_return_index: str, optional
if spin_function returns a list of parameters,
spin_function_return_index indicates the index of `final_spin` in the
list
mass_1_index: int, optional
the index of mass_1 in mass_function_args. Default is 0
mass_2_index: int, optional
the index of mass_2 in mass_function_args. Default is 1
nsamples: int, optional
the total number of samples
approximant: str, optional
the approximant used
default_SEOBNRv4P_kwargs: Bool, optional
if True, use the default SEOBNRv4P flags
"""
if default_SEOBNRv4P_kwargs:
mode_array, seob_flags = _setup_SEOBNRv4P_args()
mass_function_args += [mode_array, seob_flags]
spin_function_args += [mode_array, seob_flags]
fm = mass_function(*mass_function_args)
if mass_function_return_function is not None:
fm = eval("fm{}".format(mass_function_return_function))
elif mass_function_return_index is not None:
fm = fm[mass_function_return_index]
fs = spin_function(*spin_function_args)
if spin_function_return_function is not None:
fs = eval("fs{}".format(spin_function_return_function))
elif spin_function_return_index is not None:
fs = fs[spin_function_return_index]
final_mass = fm * (
mass_function_args[mass_1_index] + mass_function_args[mass_2_index]
) / MSUN_SI
final_spin = fs
return final_mass, final_spin
def _setup_SEOBNRv4P_args(mode=[2, 2], seob_flags=DEFAULT_SEOBFLAGS):
"""Setup the SEOBNRv4P[HM] kwargs
"""
from lalsimulation import (
SimInspiralCreateModeArray, SimInspiralModeArrayActivateMode
)
from lal import DictInsertINT4Value, CreateDict
mode_array = SimInspiralCreateModeArray()
SimInspiralModeArrayActivateMode(mode_array, mode[0], mode[1])
_seob_flags = CreateDict()
for key, item in seob_flags.items():
DictInsertINT4Value(_seob_flags, key, item)
return mode_array, _seob_flags
@array_input()
def _final_from_initial_BBH(
mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z,
approximant="SEOBNRv4", iota=None, luminosity_distance=None, f_ref=None,
phi_ref=None, mode=[2, 2], delta_t=1. / 4096, seob_flags=DEFAULT_SEOBFLAGS,
return_fits_used=False, multi_process=None
):
"""Calculate the final mass and final spin given the initial parameters
of the binary using the approximant directly
Parameters
----------
mass_1: float/np.ndarray
primary mass of the binary
mass_2: float/np.ndarray
secondary mass of the binary
spin_1x: float/np.ndarray
x component of the primary spin
spin_1y: float/np.ndarray
y component of the primary spin
spin_1z: float/np.ndarray
z component of the primary spin
spin_2x: float/np.ndarray
x component of the secondary spin
spin_2y: float/np.ndarray
y component of the secondary spin
spin_2z: float/np.ndarray
z component of the seconday spin
approximant: str
name of the approximant you wish to use for the remnant fits
iota: float/np.ndarray, optional
the angle between the total orbital angular momentum and the line of
sight of the source. Used when calculating the remnant fits for
SEOBNRv4PHM. Since we only need the EOB dynamics here it does not matter
what we pass
luminosity_distance: float/np.ndarray, optional
the luminosity distance of the source. Used when calculating the
remnant fits for SEOBNRv4PHM. Since we only need the EOB dynamics here
it does not matter what we pass.
f_ref: float/np.ndarray, optional
the reference frequency at which the spins are defined
phi_ref: float/np.ndarray, optional
the coalescence phase of the binary
mode: list, optional
specific mode to use when calculating the remnant fits for SEOBNRv4PHM.
Since we only need the EOB dynamics here it does not matter what we
pass.
delta_t: float, optional
the sampling rate used in the analysis, Used when calculating the
remnant fits for SEOBNRv4PHM
seob_flags: dict, optional
dictionary containing the SEOB flags. Used when calculating the remnant
fits for SEOBNRv4PHM
return_fits_used: Bool, optional
if True, return the approximant that was used.
multi_process: int, optional
the number of cores to use when calculating the remnant fits
"""
from lalsimulation import (
SimIMREOBFinalMassSpin, SimIMREOBFinalMassSpinPrec,
SimInspiralGetSpinSupportFromApproximant,
SimIMRSpinPrecEOBWaveformAll, SimPhenomUtilsIMRPhenomDFinalMass,
SimPhenomUtilsPhenomPv2FinalSpin
)
import multiprocessing
try:
approx = getattr(lalsimulation, approximant)
except AttributeError:
raise ValueError(
"The waveform '{}' is not supported by lalsimulation"
)
m1 = mass_1 * MSUN_SI
m2 = mass_2 * MSUN_SI
kwargs = {"nsamples": len(mass_1), "approximant": approximant}
if approximant.lower() in ["seobnrv4p", "seobnrv4phm"]:
if any(i is None for i in [iota, luminosity_distance, f_ref, phi_ref]):
raise ValueError(
"The approximant '{}' requires samples for iota, f_ref, "
"phi_ref and luminosity_distance. Please pass these "
"samples.".format(approximant)
)
if len(delta_t) == 1:
delta_t = [delta_t[0]] * len(mass_1)
elif len(delta_t) != len(mass_1):
raise ValueError(
"Please provide either a single 'delta_t' that is is used for "
"all samples, or a single 'delta_t' for each sample"
)
mode_array, _seob_flags = _setup_SEOBNRv4P_args(
mode=mode, seob_flags=seob_flags
)
args = np.array([
phi_ref, delta_t, m1, m2, f_ref, luminosity_distance, iota,
spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z,
[mode_array] * len(mass_1), [_seob_flags] * len(mass_1)
])
kwargs.update(
{
"mass_function": SimIMRSpinPrecEOBWaveformAll,
"spin_function": SimIMRSpinPrecEOBWaveformAll,
"mass_function_args": args,
"spin_function_args": args,
"mass_function_return_function": "[21].data[6]",
"spin_function_return_function": "[21].data[7]",
"mass_1_index": 2,
"mass_2_index": 3,
}
)
elif approximant.lower() in ["seobnrv4"]:
spin1 = np.array([spin_1x, spin_1y, spin_1z]).T
spin2 = np.array([spin_2x, spin_2y, spin_2z]).T
app = np.array([approx] * len(mass_1))
kwargs.update(
{
"mass_function": SimIMREOBFinalMassSpin,
"spin_function": SimIMREOBFinalMassSpin,
"mass_function_args": [m1, m2, spin1, spin2, app],
"spin_function_args": [m1, m2, spin1, spin2, app],
"mass_function_return_index": 1,
"spin_function_return_index": 2
}
)
elif "phenompv3" in approximant.lower():
kwargs.update(
{
"mass_function": SimPhenomUtilsIMRPhenomDFinalMass,
"spin_function": SimPhenomUtilsPhenomPv2FinalSpin,
"mass_function_args": [m1, m2, spin_1z, spin_2z],
"spin_function_args": [m1, m2, spin_1z, spin_2z]
}
)
if SimInspiralGetSpinSupportFromApproximant(approx) > 2:
# matches the waveform's internal usage as corrected in
# https://git.ligo.org/lscsoft/lalsuite/-/merge_requests/1270
_chi_p = chi_p(mass_1, mass_2, spin_1x, spin_1y, spin_2x, spin_2y)
kwargs["spin_function_args"].append(_chi_p)
else:
kwargs["spin_function_args"].append(np.zeros_like(mass_1))
else:
raise ValueError(
"The waveform '{}' is not support by this function.".format(
approximant
)
)
args = convert_args_for_multi_processing(kwargs)
if multi_process is not None and multi_process[0] != 1:
_multi_process = multi_process[0]
if approximant.lower() in ["seobnrv4p", "seobnrv4phm"]:
logger.warning(
"Ignoring passed 'mode' and 'seob_flags' options. Defaults "
"must be used with multiprocessing. If you wish to use custom "
"options, please set `multi_process=None`"
)
_kwargs = kwargs.copy()
_kwargs["mass_function_args"] = kwargs["mass_function_args"][:-2]
_kwargs["spin_function_args"] = kwargs["spin_function_args"][:-2]
_kwargs["default_SEOBNRv4P_kwargs"] = True
args = convert_args_for_multi_processing(_kwargs)
with multiprocessing.Pool(_multi_process) as pool:
data = np.array(list(
iterator(
pool.imap(
_wrapper_return_final_mass_and_final_spin_from_waveform,
args
), tqdm=True, desc="Evaluating {} fit".format(approximant),
logger=logger, total=len(mass_1)
)
)).T
else:
final_mass, final_spin = [], []
_iterator = iterator(
range(kwargs["nsamples"]), tqdm=True, total=len(mass_1),
desc="Evaluating {} fit".format(approximant), logger=logger
)
for i in _iterator:
data = _wrapper_return_final_mass_and_final_spin_from_waveform(
args[i]
)
final_mass.append(data[0])
final_spin.append(data[1])
data = [final_mass, final_spin]
if return_fits_used:
return data, [approximant]
return data
def final_remnant_properties_from_NRSurrogate(
*args, f_low=20., f_ref=20., model="NRSur7dq4Remnant", return_fits_used=False,
properties=["final_mass", "final_spin", "final_kick"], approximant="SEOBNRv4PHM"
):
"""Return the properties of the final remnant resulting from a BBH merger using
NRSurrogate fits
Parameters
---------
f_low: float/np.ndarray
The low frequency cut-off used in the analysis. Default is 20Hz
f_ref: float/np.ndarray
The reference frequency used in the analysis. Default is 20Hz
model: str, optional
The name of the NRSurrogate model you wish to use
return_fits_used: Bool, optional
if True, return the approximant that was used.
properties: list, optional
The list of properties you wish to calculate
approximant: str, optional
The approximant that was used to generate the posterior samples
"""
from .nrutils import NRSur_fit
fit = NRSur_fit(
*args, f_low=f_low, f_ref=f_ref, model=model, fits=properties,
approximant=approximant
)
if return_fits_used:
return fit, [model]
return fit
def final_mass_of_merger_from_NR(
*args, NRfit="average", final_spin=None, return_fits_used=False
):
"""Return the final mass resulting from a BBH merger using NR fits
Parameters
----------
NRfit: str
Name of the fit you wish to use. If you wish to use a precessing fit
please use the syntax 'precessing_{}'.format(fit_name). If you wish
to have an average NR fit, then pass 'average'
final_spin: float/np.ndarray, optional
precomputed final spin of the remnant.
return_fits_used: Bool, optional
if True, return the fits that were used. Only used when NRfit='average'
"""
from pesummary.gw.conversions import nrutils
if NRfit.lower() == "average":
func = getattr(nrutils, "bbh_final_mass_average")
elif "panetal" in NRfit.lower():
func = getattr(
nrutils, "bbh_final_mass_non_spinning_Panetal"
)
else:
func = getattr(
nrutils, "bbh_final_mass_non_precessing_{}".format(NRfit)
)
if "healy" in NRfit.lower():
return func(*args, final_spin=final_spin)
if NRfit.lower() == "average":
return func(*args, return_fits_used=return_fits_used)
return func(*args)
def final_mass_of_merger_from_NRSurrogate(
*args, model="NRSur7dq4Remnant", return_fits_used=False, approximant="SEOBNRv4PHM"
):
"""Return the final mass resulting from a BBH merger using NRSurrogate
fits
"""
data = final_remnant_properties_from_NRSurrogate(
*args, model=model, properties=["final_mass"],
return_fits_used=return_fits_used,
approximant=approximant
)
if return_fits_used:
return data[0]["final_mass"], data[1]
return data["final_mass"]
def final_mass_of_merger_from_waveform(*args, NSBH=False, **kwargs):
"""Return the final mass resulting from a BBH/NSBH merger using a given
approximant
Parameters
----------
NSBH: Bool, optional
if True, use NSBH waveform fits. Default False
"""
if NSBH or "nsbh" in kwargs.get("approximant", "").lower():
return _final_from_initial_NSBH(*args, **kwargs)[1]
return _final_from_initial_BBH(*args, **kwargs)[0]
def final_spin_of_merger_from_NR(
*args, NRfit="average", return_fits_used=False
):
"""Return the final spin resulting from a BBH merger using NR fits
Parameters
----------
NRfit: str
Name of the fit you wish to use. If you wish to use a precessing fit
please use the syntax 'precessing_{}'.format(fit_name). If you wish
to have an average NR fit, then pass 'average'
return_fits_used: Bool, optional
if True, return the fits that were used. Only used when NRfit='average'
"""
from pesummary.gw.conversions import nrutils
if NRfit.lower() == "average":
func = getattr(nrutils, "bbh_final_spin_average_precessing")
elif "pan" in NRfit.lower():
func = getattr(
nrutils, "bbh_final_spin_non_spinning_Panetal"
)
elif "precessing" in NRfit.lower():
func = getattr(
nrutils, "bbh_final_spin_precessing_{}".format(
NRfit.split("precessing_")[1]
)
)
else:
func = getattr(
nrutils, "bbh_final_spin_non_precessing_{}".format(NRfit)
)
if NRfit.lower() == "average":
return func(*args, return_fits_used=return_fits_used)
return func(*args)
def final_spin_of_merger_from_NRSurrogate(
*args, model="NRSur7dq4Remnant", return_fits_used=False, approximant="SEOBNRv4PHM"
):
"""Return the final spin resulting from a BBH merger using NRSurrogate
fits
"""
data = final_remnant_properties_from_NRSurrogate(
*args, model=model, properties=["final_spin"],
return_fits_used=return_fits_used, approximant=approximant
)
if return_fits_used:
return data[0]["final_spin"], data[1]
return data["final_spin"]
def final_spin_of_merger_from_waveform(*args, NSBH=False, **kwargs):
"""Return the final spin resulting from a BBH/NSBH merger using a given
approximant.
Parameters
----------
NSBH: Bool, optional
if True, use NSBH waveform fits. Default False
"""
if NSBH or "nsbh" in kwargs.get("approximant", "").lower():
return _final_from_initial_NSBH(*args, **kwargs)[1]
return _final_from_initial_BBH(*args, **kwargs)[1]
def final_kick_of_merger_from_NRSurrogate(
*args, model="NRSur7dq4Remnant", return_fits_used=False, approximant="SEOBNRv4PHM"
):
"""Return the final kick of the remnant resulting from a BBH merger
using NRSurrogate fits
"""
data = final_remnant_properties_from_NRSurrogate(
*args, model=model, properties=["final_kick"],
return_fits_used=return_fits_used, approximant=approximant
)
if return_fits_used:
return data[0]["final_kick"], data[1]
return data["final_kick"]
def final_mass_of_merger(
*args, method="NR", approximant="SEOBNRv4", NRfit="average",
final_spin=None, return_fits_used=False, model="NRSur7dq4Remnant"
):
"""Return the final mass resulting from a BBH merger
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
spin_1z: float/np.ndarray
float/array of primary spin aligned with the orbital angular momentum
spin_2z: float/np.ndarray
float/array of secondary spin aligned with the orbital angular momentum
method: str
The method you wish to use to calculate the final mass of merger. Either
NR, NRSurrogate or waveform
approximant: str
Name of the approximant you wish to use if the chosen method is waveform
or NRSurrogate
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average' or when method='NRSurrogate'
model: str, optional
The NRSurrogate model to use when evaluating the fits
"""
if method.lower() == "nr":
mass_func = final_mass_of_merger_from_NR
kwargs = {
"NRfit": NRfit, "final_spin": final_spin,
"return_fits_used": return_fits_used
}
elif "nrsur" in method.lower():
mass_func = final_mass_of_merger_from_NRSurrogate
kwargs = {
"approximant": approximant, "return_fits_used": return_fits_used,
"model": model
}
else:
mass_func = final_mass_of_merger_from_waveform
kwargs = {"approximant": approximant}
return mass_func(*args, **kwargs)
def final_spin_of_merger(
*args, method="NR", approximant="SEOBNRv4", NRfit="average",
return_fits_used=False, model="NRSur7dq4Remnant"
):
"""Return the final mass resulting from a BBH merger
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
a_1: float/np.ndarray
float/array of primary spin magnitudes
a_2: float/np.ndarray
float/array of secondary spin magnitudes
tilt_1: float/np.ndarray
float/array of primary spin tilt angle from the orbital angular momentum
tilt_2: float/np.ndarray
float/array of secondary spin tilt angle from the orbital angular
momentum
phi_12: float/np.ndarray
float/array of samples for the angle between the in-plane spin
components
method: str
The method you wish to use to calculate the final mass of merger. Either
NR, NRSurrogate or waveform
approximant: str
Name of the approximant you wish to use if the chosen method is waveform
or NRSurrogate
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average' or when method='NRSurrogate'
model: str, optional
The NRSurrogate model to use when evaluating the fits
"""
if method.lower() == "nr":
spin_func = final_spin_of_merger_from_NR
kwargs = {"NRfit": NRfit, "return_fits_used": return_fits_used}
elif "nrsur" in method.lower():
spin_func = final_spin_of_merger_from_NRSurrogate
kwargs = {
"approximant": approximant, "return_fits_used": return_fits_used,
"model": model
}
else:
spin_func = final_spin_of_merger_from_waveform
kwargs = {"approximant": approximant}
return spin_func(*args, **kwargs)
def final_kick_of_merger(
*args, method="NR", approximant="SEOBNRv4", NRfit="average",
return_fits_used: False, model="NRSur7dq4Remnant"
):
"""Return the final kick velocity of the remnant resulting from a BBH merger
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
a_1: float/np.ndarray
float/array of primary spin magnitudes
a_2: float/np.ndarray
float/array of secondary spin magnitudes
tilt_1: float/np.ndarray
float/array of primary spin tilt angle from the orbital angular momentum
tilt_2: float/np.ndarray
float/array of secondary spin tilt angle from the orbital angular
momentum
phi_12: float/np.ndarray
float/array of samples for the angle between the in-plane spin
components
method: str
The method you wish to use to calculate the final kick of merger. Either
NR, NRSurrogate or waveform
approximant: str
Name of the approximant you wish to use if the chosen method is waveform
or NRSurrogate
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average' or when method='NRSurrogate'
model: str, optional
The NRSurrogate model to use when evaluating the fits
"""
if "nrsur" not in method.lower():
raise NotImplementedError(
"Currently you can only work out the final kick velocity using "
"NRSurrogate fits."
)
velocity_func = final_kick_of_merger_from_NRSurrogate
kwargs = {
"approximant": approximant, "return_fits_used": return_fits_used,
"model": model
}
return velocity_func(*args, **kwargs)
def peak_luminosity_of_merger(*args, NRfit="average", return_fits_used=False):
"""Return the peak luminosity of an aligned-spin BBH using NR fits
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
spin_1z: float/np.ndarray
float/array of primary spin aligned with the orbital angular momentum
spin_2z: float/np.ndarray
float/array of secondary spin aligned with the orbital angular momentum
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average'
"""
from pesummary.gw.conversions import nrutils
if NRfit.lower() == "average":
func = getattr(nrutils, "bbh_peak_luminosity_average")
else:
func = getattr(
nrutils, "bbh_peak_luminosity_non_precessing_{}".format(NRfit)
)
if NRfit.lower() == "average":
return func(*args, return_fits_used=return_fits_used)
return func(*args)
| 37.997283 | 87 | 0.667525 | # Licensed under an MIT style license -- see LICENSE.md
import numpy as np
from pesummary.utils.utils import logger, iterator
from pesummary.utils.decorators import array_input
from .spins import chi_p
__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
try:
import lalsimulation
from lalsimulation import (
FLAG_SEOBNRv4P_HAMILTONIAN_DERIVATIVE_NUMERICAL,
FLAG_SEOBNRv4P_EULEREXT_QNM_SIMPLE_PRECESSION,
FLAG_SEOBNRv4P_ZFRAME_L
)
from lal import MSUN_SI
except ImportError:
pass
DEFAULT_SEOBFLAGS = {
"SEOBNRv4P_SpinAlignedEOBversion": 4,
"SEOBNRv4P_SymmetrizehPlminusm": 1,
"SEOBNRv4P_HamiltonianDerivative": FLAG_SEOBNRv4P_HAMILTONIAN_DERIVATIVE_NUMERICAL,
"SEOBNRv4P_euler_extension": FLAG_SEOBNRv4P_EULEREXT_QNM_SIMPLE_PRECESSION,
"SEOBNRv4P_Zframe": FLAG_SEOBNRv4P_ZFRAME_L,
"SEOBNRv4P_debug": 0
}
@array_input()
def final_mass_of_merger_from_NSBH(
mass_1, mass_2, spin_1z, lambda_2, approximant="IMRPhenomNSBH"
):
"""Calculate the final mass resulting from an NSBH merger using NSBH
waveform models given samples for mass_1, mass_2, spin_1z and lambda_2.
mass_1 and mass_2 should be in solar mass units.
"""
from .tidal import _check_NSBH_approximant
return _check_NSBH_approximant(
approximant, mass_1, mass_2, spin_1z, lambda_2
)[4]
@array_input()
def final_spin_of_merger_from_NSBH(
mass_1, mass_2, spin_1z, lambda_2, approximant="IMRPhenomNSBH"
):
"""Calculate the final spin resulting from an NSBH merger using NSBH
waveform models given samples for mass_1, mass_2, spin_1z and lambda_2.
mass_1 and mass_2 should be in solar mass units.
"""
from .tidal import _check_NSBH_approximant
return _check_NSBH_approximant(
approximant, mass_1, mass_2, spin_1z, lambda_2
)[5]
@array_input()
def _final_from_initial_NSBH(*args, **kwargs):
"""Calculate the final mass and final spin given the initial parameters
of the binary using the approximant directly
"""
return [
final_mass_of_merger_from_NSBH(*args, **kwargs),
final_spin_of_merger_from_NSBH(*args, **kwargs)
]
def _wrapper_return_final_mass_and_final_spin_from_waveform(args):
"""Wrapper function to calculate the remnant properties for a given waveform
for a pool of workers
Parameters
----------
args: np.ndarray
2 dimensional array giving arguments to pass to
_return_final_mass_and_final_spin_from_waveform. The first argument
in each sublist is the keyword and the second argument in each sublist
is the item you wish to pass
"""
kwargs = {arg[0]: arg[1] for arg in args}
return _return_final_mass_and_final_spin_from_waveform(**kwargs)
def _return_final_mass_and_final_spin_from_waveform(
mass_function=None, spin_function=None, mass_function_args=[],
spin_function_args=[], mass_function_return_function=None,
mass_function_return_index=None, spin_function_return_function=None,
spin_function_return_index=None, mass_1_index=0, mass_2_index=1,
nsamples=0, approximant=None, default_SEOBNRv4P_kwargs=False
):
"""Return the final mass and final spin given functions to use
Parameters
----------
mass_function: func
function you wish to use to calculate the final mass
spin_function: func
function you wish to use to calculate the final spin
mass_function_args: list
list of arguments you wish to pass to mass_function
spin_function_args: list
list of arguments you wish to pass to spin_function
mass_function_return_function: str, optional
function used to extract the final mass from the quantity returned from
mass_function. For example, if mass_function returns a list and the
final_mass is a property of the 3 arg of this list,
mass_function_return_function='[3].final_mass'
mass_function_return_index: str, optional
if mass_function returns a list of parameters,
mass_function_return_index indicates the index of `final_mass` in the
list
spin_function_return_function: str, optional
function used to extract the final spin from the quantity returned from
spin_function. For example, if spin_function returns a list and the
final_spin is a property of the 3 arg of this list,
spin_function_return_function='[3].final_spin'
spin_function_return_index: str, optional
if spin_function returns a list of parameters,
spin_function_return_index indicates the index of `final_spin` in the
list
mass_1_index: int, optional
the index of mass_1 in mass_function_args. Default is 0
mass_2_index: int, optional
the index of mass_2 in mass_function_args. Default is 1
nsamples: int, optional
the total number of samples
approximant: str, optional
the approximant used
default_SEOBNRv4P_kwargs: Bool, optional
if True, use the default SEOBNRv4P flags
"""
if default_SEOBNRv4P_kwargs:
mode_array, seob_flags = _setup_SEOBNRv4P_args()
mass_function_args += [mode_array, seob_flags]
spin_function_args += [mode_array, seob_flags]
fm = mass_function(*mass_function_args)
if mass_function_return_function is not None:
fm = eval("fm{}".format(mass_function_return_function))
elif mass_function_return_index is not None:
fm = fm[mass_function_return_index]
fs = spin_function(*spin_function_args)
if spin_function_return_function is not None:
fs = eval("fs{}".format(spin_function_return_function))
elif spin_function_return_index is not None:
fs = fs[spin_function_return_index]
final_mass = fm * (
mass_function_args[mass_1_index] + mass_function_args[mass_2_index]
) / MSUN_SI
final_spin = fs
return final_mass, final_spin
def _setup_SEOBNRv4P_args(mode=[2, 2], seob_flags=DEFAULT_SEOBFLAGS):
"""Setup the SEOBNRv4P[HM] kwargs
"""
from lalsimulation import (
SimInspiralCreateModeArray, SimInspiralModeArrayActivateMode
)
from lal import DictInsertINT4Value, CreateDict
mode_array = SimInspiralCreateModeArray()
SimInspiralModeArrayActivateMode(mode_array, mode[0], mode[1])
_seob_flags = CreateDict()
for key, item in seob_flags.items():
DictInsertINT4Value(_seob_flags, key, item)
return mode_array, _seob_flags
@array_input()
def _final_from_initial_BBH(
mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z,
approximant="SEOBNRv4", iota=None, luminosity_distance=None, f_ref=None,
phi_ref=None, mode=[2, 2], delta_t=1. / 4096, seob_flags=DEFAULT_SEOBFLAGS,
return_fits_used=False, multi_process=None
):
"""Calculate the final mass and final spin given the initial parameters
of the binary using the approximant directly
Parameters
----------
mass_1: float/np.ndarray
primary mass of the binary
mass_2: float/np.ndarray
secondary mass of the binary
spin_1x: float/np.ndarray
x component of the primary spin
spin_1y: float/np.ndarray
y component of the primary spin
spin_1z: float/np.ndarray
z component of the primary spin
spin_2x: float/np.ndarray
x component of the secondary spin
spin_2y: float/np.ndarray
y component of the secondary spin
spin_2z: float/np.ndarray
z component of the seconday spin
approximant: str
name of the approximant you wish to use for the remnant fits
iota: float/np.ndarray, optional
the angle between the total orbital angular momentum and the line of
sight of the source. Used when calculating the remnant fits for
SEOBNRv4PHM. Since we only need the EOB dynamics here it does not matter
what we pass
luminosity_distance: float/np.ndarray, optional
the luminosity distance of the source. Used when calculating the
remnant fits for SEOBNRv4PHM. Since we only need the EOB dynamics here
it does not matter what we pass.
f_ref: float/np.ndarray, optional
the reference frequency at which the spins are defined
phi_ref: float/np.ndarray, optional
the coalescence phase of the binary
mode: list, optional
specific mode to use when calculating the remnant fits for SEOBNRv4PHM.
Since we only need the EOB dynamics here it does not matter what we
pass.
delta_t: float, optional
the sampling rate used in the analysis, Used when calculating the
remnant fits for SEOBNRv4PHM
seob_flags: dict, optional
dictionary containing the SEOB flags. Used when calculating the remnant
fits for SEOBNRv4PHM
return_fits_used: Bool, optional
if True, return the approximant that was used.
multi_process: int, optional
the number of cores to use when calculating the remnant fits
"""
from lalsimulation import (
SimIMREOBFinalMassSpin, SimIMREOBFinalMassSpinPrec,
SimInspiralGetSpinSupportFromApproximant,
SimIMRSpinPrecEOBWaveformAll, SimPhenomUtilsIMRPhenomDFinalMass,
SimPhenomUtilsPhenomPv2FinalSpin
)
import multiprocessing
def convert_args_for_multi_processing(kwargs):
args = []
for n in range(kwargs["nsamples"]):
_args = []
for key, item in kwargs.items():
if key == "mass_function_args" or key == "spin_function_args":
_args.append([key, [arg[n] for arg in item]])
else:
_args.append([key, item])
args.append(_args)
return args
try:
approx = getattr(lalsimulation, approximant)
except AttributeError:
raise ValueError(
"The waveform '{}' is not supported by lalsimulation"
)
m1 = mass_1 * MSUN_SI
m2 = mass_2 * MSUN_SI
kwargs = {"nsamples": len(mass_1), "approximant": approximant}
if approximant.lower() in ["seobnrv4p", "seobnrv4phm"]:
if any(i is None for i in [iota, luminosity_distance, f_ref, phi_ref]):
raise ValueError(
"The approximant '{}' requires samples for iota, f_ref, "
"phi_ref and luminosity_distance. Please pass these "
"samples.".format(approximant)
)
if len(delta_t) == 1:
delta_t = [delta_t[0]] * len(mass_1)
elif len(delta_t) != len(mass_1):
raise ValueError(
"Please provide either a single 'delta_t' that is is used for "
"all samples, or a single 'delta_t' for each sample"
)
mode_array, _seob_flags = _setup_SEOBNRv4P_args(
mode=mode, seob_flags=seob_flags
)
args = np.array([
phi_ref, delta_t, m1, m2, f_ref, luminosity_distance, iota,
spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z,
[mode_array] * len(mass_1), [_seob_flags] * len(mass_1)
])
kwargs.update(
{
"mass_function": SimIMRSpinPrecEOBWaveformAll,
"spin_function": SimIMRSpinPrecEOBWaveformAll,
"mass_function_args": args,
"spin_function_args": args,
"mass_function_return_function": "[21].data[6]",
"spin_function_return_function": "[21].data[7]",
"mass_1_index": 2,
"mass_2_index": 3,
}
)
elif approximant.lower() in ["seobnrv4"]:
spin1 = np.array([spin_1x, spin_1y, spin_1z]).T
spin2 = np.array([spin_2x, spin_2y, spin_2z]).T
app = np.array([approx] * len(mass_1))
kwargs.update(
{
"mass_function": SimIMREOBFinalMassSpin,
"spin_function": SimIMREOBFinalMassSpin,
"mass_function_args": [m1, m2, spin1, spin2, app],
"spin_function_args": [m1, m2, spin1, spin2, app],
"mass_function_return_index": 1,
"spin_function_return_index": 2
}
)
elif "phenompv3" in approximant.lower():
kwargs.update(
{
"mass_function": SimPhenomUtilsIMRPhenomDFinalMass,
"spin_function": SimPhenomUtilsPhenomPv2FinalSpin,
"mass_function_args": [m1, m2, spin_1z, spin_2z],
"spin_function_args": [m1, m2, spin_1z, spin_2z]
}
)
if SimInspiralGetSpinSupportFromApproximant(approx) > 2:
# matches the waveform's internal usage as corrected in
# https://git.ligo.org/lscsoft/lalsuite/-/merge_requests/1270
_chi_p = chi_p(mass_1, mass_2, spin_1x, spin_1y, spin_2x, spin_2y)
kwargs["spin_function_args"].append(_chi_p)
else:
kwargs["spin_function_args"].append(np.zeros_like(mass_1))
else:
raise ValueError(
"The waveform '{}' is not support by this function.".format(
approximant
)
)
args = convert_args_for_multi_processing(kwargs)
if multi_process is not None and multi_process[0] != 1:
_multi_process = multi_process[0]
if approximant.lower() in ["seobnrv4p", "seobnrv4phm"]:
logger.warning(
"Ignoring passed 'mode' and 'seob_flags' options. Defaults "
"must be used with multiprocessing. If you wish to use custom "
"options, please set `multi_process=None`"
)
_kwargs = kwargs.copy()
_kwargs["mass_function_args"] = kwargs["mass_function_args"][:-2]
_kwargs["spin_function_args"] = kwargs["spin_function_args"][:-2]
_kwargs["default_SEOBNRv4P_kwargs"] = True
args = convert_args_for_multi_processing(_kwargs)
with multiprocessing.Pool(_multi_process) as pool:
data = np.array(list(
iterator(
pool.imap(
_wrapper_return_final_mass_and_final_spin_from_waveform,
args
), tqdm=True, desc="Evaluating {} fit".format(approximant),
logger=logger, total=len(mass_1)
)
)).T
else:
final_mass, final_spin = [], []
_iterator = iterator(
range(kwargs["nsamples"]), tqdm=True, total=len(mass_1),
desc="Evaluating {} fit".format(approximant), logger=logger
)
for i in _iterator:
data = _wrapper_return_final_mass_and_final_spin_from_waveform(
args[i]
)
final_mass.append(data[0])
final_spin.append(data[1])
data = [final_mass, final_spin]
if return_fits_used:
return data, [approximant]
return data
def final_remnant_properties_from_NRSurrogate(
*args, f_low=20., f_ref=20., model="NRSur7dq4Remnant", return_fits_used=False,
properties=["final_mass", "final_spin", "final_kick"], approximant="SEOBNRv4PHM"
):
"""Return the properties of the final remnant resulting from a BBH merger using
NRSurrogate fits
Parameters
---------
f_low: float/np.ndarray
The low frequency cut-off used in the analysis. Default is 20Hz
f_ref: float/np.ndarray
The reference frequency used in the analysis. Default is 20Hz
model: str, optional
The name of the NRSurrogate model you wish to use
return_fits_used: Bool, optional
if True, return the approximant that was used.
properties: list, optional
The list of properties you wish to calculate
approximant: str, optional
The approximant that was used to generate the posterior samples
"""
from .nrutils import NRSur_fit
fit = NRSur_fit(
*args, f_low=f_low, f_ref=f_ref, model=model, fits=properties,
approximant=approximant
)
if return_fits_used:
return fit, [model]
return fit
def final_mass_of_merger_from_NR(
*args, NRfit="average", final_spin=None, return_fits_used=False
):
"""Return the final mass resulting from a BBH merger using NR fits
Parameters
----------
NRfit: str
Name of the fit you wish to use. If you wish to use a precessing fit
please use the syntax 'precessing_{}'.format(fit_name). If you wish
to have an average NR fit, then pass 'average'
final_spin: float/np.ndarray, optional
precomputed final spin of the remnant.
return_fits_used: Bool, optional
if True, return the fits that were used. Only used when NRfit='average'
"""
from pesummary.gw.conversions import nrutils
if NRfit.lower() == "average":
func = getattr(nrutils, "bbh_final_mass_average")
elif "panetal" in NRfit.lower():
func = getattr(
nrutils, "bbh_final_mass_non_spinning_Panetal"
)
else:
func = getattr(
nrutils, "bbh_final_mass_non_precessing_{}".format(NRfit)
)
if "healy" in NRfit.lower():
return func(*args, final_spin=final_spin)
if NRfit.lower() == "average":
return func(*args, return_fits_used=return_fits_used)
return func(*args)
def final_mass_of_merger_from_NRSurrogate(
*args, model="NRSur7dq4Remnant", return_fits_used=False, approximant="SEOBNRv4PHM"
):
"""Return the final mass resulting from a BBH merger using NRSurrogate
fits
"""
data = final_remnant_properties_from_NRSurrogate(
*args, model=model, properties=["final_mass"],
return_fits_used=return_fits_used,
approximant=approximant
)
if return_fits_used:
return data[0]["final_mass"], data[1]
return data["final_mass"]
def final_mass_of_merger_from_waveform(*args, NSBH=False, **kwargs):
"""Return the final mass resulting from a BBH/NSBH merger using a given
approximant
Parameters
----------
NSBH: Bool, optional
if True, use NSBH waveform fits. Default False
"""
if NSBH or "nsbh" in kwargs.get("approximant", "").lower():
return _final_from_initial_NSBH(*args, **kwargs)[1]
return _final_from_initial_BBH(*args, **kwargs)[0]
def final_spin_of_merger_from_NR(
*args, NRfit="average", return_fits_used=False
):
"""Return the final spin resulting from a BBH merger using NR fits
Parameters
----------
NRfit: str
Name of the fit you wish to use. If you wish to use a precessing fit
please use the syntax 'precessing_{}'.format(fit_name). If you wish
to have an average NR fit, then pass 'average'
return_fits_used: Bool, optional
if True, return the fits that were used. Only used when NRfit='average'
"""
from pesummary.gw.conversions import nrutils
if NRfit.lower() == "average":
func = getattr(nrutils, "bbh_final_spin_average_precessing")
elif "pan" in NRfit.lower():
func = getattr(
nrutils, "bbh_final_spin_non_spinning_Panetal"
)
elif "precessing" in NRfit.lower():
func = getattr(
nrutils, "bbh_final_spin_precessing_{}".format(
NRfit.split("precessing_")[1]
)
)
else:
func = getattr(
nrutils, "bbh_final_spin_non_precessing_{}".format(NRfit)
)
if NRfit.lower() == "average":
return func(*args, return_fits_used=return_fits_used)
return func(*args)
def final_spin_of_merger_from_NRSurrogate(
*args, model="NRSur7dq4Remnant", return_fits_used=False, approximant="SEOBNRv4PHM"
):
"""Return the final spin resulting from a BBH merger using NRSurrogate
fits
"""
data = final_remnant_properties_from_NRSurrogate(
*args, model=model, properties=["final_spin"],
return_fits_used=return_fits_used, approximant=approximant
)
if return_fits_used:
return data[0]["final_spin"], data[1]
return data["final_spin"]
def final_spin_of_merger_from_waveform(*args, NSBH=False, **kwargs):
"""Return the final spin resulting from a BBH/NSBH merger using a given
approximant.
Parameters
----------
NSBH: Bool, optional
if True, use NSBH waveform fits. Default False
"""
if NSBH or "nsbh" in kwargs.get("approximant", "").lower():
return _final_from_initial_NSBH(*args, **kwargs)[1]
return _final_from_initial_BBH(*args, **kwargs)[1]
def final_kick_of_merger_from_NRSurrogate(
*args, model="NRSur7dq4Remnant", return_fits_used=False, approximant="SEOBNRv4PHM"
):
"""Return the final kick of the remnant resulting from a BBH merger
using NRSurrogate fits
"""
data = final_remnant_properties_from_NRSurrogate(
*args, model=model, properties=["final_kick"],
return_fits_used=return_fits_used, approximant=approximant
)
if return_fits_used:
return data[0]["final_kick"], data[1]
return data["final_kick"]
def final_mass_of_merger(
*args, method="NR", approximant="SEOBNRv4", NRfit="average",
final_spin=None, return_fits_used=False, model="NRSur7dq4Remnant"
):
"""Return the final mass resulting from a BBH merger
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
spin_1z: float/np.ndarray
float/array of primary spin aligned with the orbital angular momentum
spin_2z: float/np.ndarray
float/array of secondary spin aligned with the orbital angular momentum
method: str
The method you wish to use to calculate the final mass of merger. Either
NR, NRSurrogate or waveform
approximant: str
Name of the approximant you wish to use if the chosen method is waveform
or NRSurrogate
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average' or when method='NRSurrogate'
model: str, optional
The NRSurrogate model to use when evaluating the fits
"""
if method.lower() == "nr":
mass_func = final_mass_of_merger_from_NR
kwargs = {
"NRfit": NRfit, "final_spin": final_spin,
"return_fits_used": return_fits_used
}
elif "nrsur" in method.lower():
mass_func = final_mass_of_merger_from_NRSurrogate
kwargs = {
"approximant": approximant, "return_fits_used": return_fits_used,
"model": model
}
else:
mass_func = final_mass_of_merger_from_waveform
kwargs = {"approximant": approximant}
return mass_func(*args, **kwargs)
def final_spin_of_merger(
*args, method="NR", approximant="SEOBNRv4", NRfit="average",
return_fits_used=False, model="NRSur7dq4Remnant"
):
"""Return the final mass resulting from a BBH merger
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
a_1: float/np.ndarray
float/array of primary spin magnitudes
a_2: float/np.ndarray
float/array of secondary spin magnitudes
tilt_1: float/np.ndarray
float/array of primary spin tilt angle from the orbital angular momentum
tilt_2: float/np.ndarray
float/array of secondary spin tilt angle from the orbital angular
momentum
phi_12: float/np.ndarray
float/array of samples for the angle between the in-plane spin
components
method: str
The method you wish to use to calculate the final mass of merger. Either
NR, NRSurrogate or waveform
approximant: str
Name of the approximant you wish to use if the chosen method is waveform
or NRSurrogate
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average' or when method='NRSurrogate'
model: str, optional
The NRSurrogate model to use when evaluating the fits
"""
if method.lower() == "nr":
spin_func = final_spin_of_merger_from_NR
kwargs = {"NRfit": NRfit, "return_fits_used": return_fits_used}
elif "nrsur" in method.lower():
spin_func = final_spin_of_merger_from_NRSurrogate
kwargs = {
"approximant": approximant, "return_fits_used": return_fits_used,
"model": model
}
else:
spin_func = final_spin_of_merger_from_waveform
kwargs = {"approximant": approximant}
return spin_func(*args, **kwargs)
def final_kick_of_merger(
*args, method="NR", approximant="SEOBNRv4", NRfit="average",
return_fits_used: False, model="NRSur7dq4Remnant"
):
"""Return the final kick velocity of the remnant resulting from a BBH merger
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
a_1: float/np.ndarray
float/array of primary spin magnitudes
a_2: float/np.ndarray
float/array of secondary spin magnitudes
tilt_1: float/np.ndarray
float/array of primary spin tilt angle from the orbital angular momentum
tilt_2: float/np.ndarray
float/array of secondary spin tilt angle from the orbital angular
momentum
phi_12: float/np.ndarray
float/array of samples for the angle between the in-plane spin
components
method: str
The method you wish to use to calculate the final kick of merger. Either
NR, NRSurrogate or waveform
approximant: str
Name of the approximant you wish to use if the chosen method is waveform
or NRSurrogate
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average' or when method='NRSurrogate'
model: str, optional
The NRSurrogate model to use when evaluating the fits
"""
if "nrsur" not in method.lower():
raise NotImplementedError(
"Currently you can only work out the final kick velocity using "
"NRSurrogate fits."
)
velocity_func = final_kick_of_merger_from_NRSurrogate
kwargs = {
"approximant": approximant, "return_fits_used": return_fits_used,
"model": model
}
return velocity_func(*args, **kwargs)
def peak_luminosity_of_merger(*args, NRfit="average", return_fits_used=False):
"""Return the peak luminosity of an aligned-spin BBH using NR fits
Parameters
----------
mass_1: float/np.ndarray
float/array of masses for the primary object
mass_2: float/np.ndarray
float/array of masses for the secondary object
spin_1z: float/np.ndarray
float/array of primary spin aligned with the orbital angular momentum
spin_2z: float/np.ndarray
float/array of secondary spin aligned with the orbital angular momentum
NRFit: str
Name of the NR fit you wish to use if chosen method is NR
return_fits_used: Bool, optional
if True, return the NR fits that were used. Only used when
NRFit='average'
"""
from pesummary.gw.conversions import nrutils
if NRfit.lower() == "average":
func = getattr(nrutils, "bbh_peak_luminosity_average")
else:
func = getattr(
nrutils, "bbh_peak_luminosity_non_precessing_{}".format(NRfit)
)
if NRfit.lower() == "average":
return func(*args, return_fits_used=return_fits_used)
return func(*args)
| 419 | 0 | 27 |
76beea20d465eec4a5cee6eba183bfc6d0f150c4 | 9,159 | py | Python | nrc/nrc/spiders/PAPermitScraper.py | SkyTruth/scraper | c1903a74c717a7b36a05f0f466c51544911c4499 | [
"MIT"
] | 2 | 2016-07-01T02:41:17.000Z | 2020-04-04T16:16:55.000Z | nrc/nrc/spiders/PAPermitScraper.py | SkyTruth/scraper | c1903a74c717a7b36a05f0f466c51544911c4499 | [
"MIT"
] | 4 | 2015-01-14T17:00:12.000Z | 2015-06-29T19:36:27.000Z | nrc/nrc/spiders/PAPermitScraper.py | SkyTruth/scraper | c1903a74c717a7b36a05f0f466c51544911c4499 | [
"MIT"
] | null | null | null | # PA Well Permit Scraper
import re
from datetime import datetime, timedelta
import xlrd
#import uuid
from string import Template
from xml.sax.saxutils import escape
from dateutil.parser import parse as parse_date
from scrapy.spider import BaseSpider
from scrapy.contrib.loader import ItemLoader
from scrapy.http import Request, Response, TextResponse
from scrapy.contrib.loader.processor import TakeFirst, MapCompose, Join
from scrapy.shell import inspect_response
from scrapy import log
#from scrapy.stats import stats
from nrc.items import PA_DrillingPermit, FeedEntry, FeedEntryTag
from nrc.database import NrcDatabase
from nrc.NrcBot import NrcBot
from nrc.AtomPubScraper import AtomPubScraper
| 42.402778 | 155 | 0.599301 | # PA Well Permit Scraper
import re
from datetime import datetime, timedelta
import xlrd
#import uuid
from string import Template
from xml.sax.saxutils import escape
from dateutil.parser import parse as parse_date
from scrapy.spider import BaseSpider
from scrapy.contrib.loader import ItemLoader
from scrapy.http import Request, Response, TextResponse
from scrapy.contrib.loader.processor import TakeFirst, MapCompose, Join
from scrapy.shell import inspect_response
from scrapy import log
#from scrapy.stats import stats
from nrc.items import PA_DrillingPermit, FeedEntry, FeedEntryTag
from nrc.database import NrcDatabase
from nrc.NrcBot import NrcBot
from nrc.AtomPubScraper import AtomPubScraper
class PAPermitScraper (AtomPubScraper):
name = 'PAPermitScraper'
allowed_domains = None
def process_item(self, task):
from_date = parse_date('11-01-2012', fuzzy=1)
to_date = parse_date('12-31-2013', fuzzy=1)
if 'from_date' in task and 'to_date' in task:
from_date = parse_date(task['from_date'], fuzzy=1)
to_date = parse_date(task['to_date'], fuzzy=1)
elif 'date_offset' in task:
to_date = datetime.today()
from_date = to_date - timedelta(days=int(task['date_offset']))
date_fmt = "%m/%d/%Y 23:59:59"
target_url = ("%s&P_START_DATE=%s&P_END_DATE=%s"
% (task['target_url'],
from_date.strftime(date_fmt),
to_date.strftime(date_fmt)))
request = Request (target_url, callback=self.parse_xml)
self.log('Downloading xml from url %s' % (target_url), log.INFO)
request.meta['task'] = task
yield request
def process_row (self, row, task):
#screen for bad API
if not self.base_api(row['WELL_API']):
self.log("Invalid API '';".format(row['WELL_API']), log.WARNING)
yield None
return
l=ItemLoader (PA_DrillingPermit())
l.Well_Type_in = lambda slist: [s[:20] for s in slist]
l.County_Name_in = lambda slist: [s[:20] for s in slist]
l.Municipality_Name_in = lambda slist: [s[:20] for s in slist]
l.Site_Name_in = lambda slist: [s[:50] for s in slist]
#l.add_value ('County_Name', row['COUNTY_NAME'])
l.add_value ('County_Name', row['COUNTY'])
#l.add_value ('Municipality_Name', row['MUNICIPALITY_NAME'])
l.add_value ('Municipality_Name', row['MUNICIPALITY'])
l.add_value ('Auth_Id', row['AUTHORIZATION_ID'])
l.add_value ('Date_Disposed', self.parse_date(row['PERMIT_ISSUED_DATE']))
l.add_value ('Appl_Type_Code', row['APPLICATION_TYPE'])
l.add_value ('Auth_Type_Description', row['AUTH_TYPE_DESCRIPTION'])
l.add_value ('Complete_API_', row['WELL_API'])
l.add_value ('Other_Id', self.base_api(row['WELL_API']))
# l.add_value ('Marcellus_Shale_Well', row['MARCELLUS_SHALE_IND'])
#l.add_value ('Horizontal_Well', row['HORIZONTAL_WELL_IND'])
if row['CONFIGURATION'] in ("Horizontal Well", "Deviated Well"):
horiz = 'Y'
else:
horiz = 'N'
if row['CONFIGURATION'] not in ("Vertical Well",):
self.log("Unknown PA Configuration: {0}."
.format(row['CONFIGURATION']), log.INFO)
l.add_value ('Horizontal_Well', horiz)
l.add_value ('Well_Type', row['WELL_TYPE'])
l.add_value ('Site_Name', row['FARM_NAME'])
l.add_value ('Latitude_Decimal', row['LATITUDE_DECIMAL'])
l.add_value ('Longitude_Decimal', row['LONGITUDE_DECIMAL'])
l.add_value ('Client_Id', row['CLIENT_ID'])
l.add_value ('Operator', row['OPERATOR'])
l.add_value ('Address1', row['OPERATOR_ADDRESS'])
l.add_value ('City', row['CITY'])
l.add_value ('State_Code', row['STATE'])
l.add_value ('Zip_Code', row['ZIP_CODE'])
l.add_value ('Unconventional', row['UNCONVENTIONAL'])
l.add_value ('OGO_Num', row['OGO_NUM'])
#l.add_value ('Facility_Id', row['PRIMARY_FACILITY_ID'])
l.add_value ('Facility_Id', row['PRMRY_FAC_ID'])
item = l.load_item()
if item['Complete_API_'] and item ['Date_Disposed']:
stats = self.crawler.stats
existing_item = self.db.loadItem (item, {'Complete_API_': item['Complete_API_'], 'Date_Disposed': item ['Date_Disposed']})
if existing_item:
# diff = item.contentDiff (existing_item)
# if diff:
# self.send_alert ('PA Permit values in %s have changed since previous scrape\n\n%s' % (item, diff))
# self.log ('PA Permit values in %s have changed since previous scrape\n\n%s' % (item, diff), log.ERROR)
# stats.inc_value ('_error_count', spider=self)
# else:
# self.log('Skipping existing item %s' % (item), log.DEBUG)
# stats.inc_value ('_unchanged_count', spider=self)
stats.inc_value ('_existing_count', spider=self)
else:
stats.inc_value ('_new_count', spider=self)
yield item
params = dict(item)
for f in item.fields:
params[f] = escape ("%s" % params.get(f,''))
params['Appl_Type_Code'] = self.get_appl_type(item)
params['Well_Type'] = self.get_well_type(item)
# create a new feed item
l=ItemLoader (FeedEntry())
url = "%s/%s/%s" % (task['target_url'], item['Complete_API_'], item ['Date_Disposed'])
#feed_entry_id = uuid.uuid3(uuid.NAMESPACE_URL, url.encode('ASCII'))
feed_entry_id = self.db.uuid3_str(name=url.encode('ASCII'))
l.add_value ('id', feed_entry_id)
l.add_value ('title', "PA %s Drilling Permit Issued in %s Township" % (params.get('Well_Type'), item.get('Municipality_Name') ))
# l.add_value ('updated', item.get('Date_Disposed'))
l.add_value ('incident_datetime', item.get('Date_Disposed'))
l.add_value ('link', task['about_url'])
l.add_value ('summary', self.summary_template().substitute(params))
l.add_value ('content', self.content_template().substitute(params))
l.add_value ('lat', item.get('Latitude_Decimal'))
l.add_value ('lng', item.get('Longitude_Decimal'))
l.add_value ('source_id', 4)
feed_item = l.load_item()
if feed_item.get('lat') and feed_item.get('lng'):
yield feed_item
yield self.create_tag (feed_entry_id, 'PADEP')
yield self.create_tag (feed_entry_id, 'frack')
yield self.create_tag (feed_entry_id, 'permit')
yield self.create_tag (feed_entry_id, 'drilling')
if item.get('Marcellus_Shale_Well') == 'Y':
yield self.create_tag (feed_entry_id, 'marcellus')
well_type = params.get('Well_Type')
if well_type:
yield self.create_tag (feed_entry_id, well_type)
def base_api(self, complete_api):
rex = r'[0-9]{3}-[0-9]{5}'
mo = re.match(rex, complete_api)
if mo:
return mo.group()
return ''
def create_tag (self, feed_entry_id, tag, comment = ''):
l = ItemLoader (FeedEntryTag())
l.add_value ('feed_entry_id', feed_entry_id)
l.add_value ('tag', tag)
l.add_value ('comment', comment)
return l.load_item()
def item_stored(self, item, id):
self.item_new (id)
pass
def get_appl_type (self, item):
m = {'NEW': 'New', 'REN': 'Renewal'}
code = item.get('Appl_Type_Code')
return m.get(code, code)
def get_well_type (self, item):
m = {'GAS': 'Gas', 'OIL': 'Oil'}
code = item.get('Well_Type')
return m.get(code, code)
def summary_template (self):
return Template ("$Well_Type permit issued on $Date_Disposed to $Operator for site $Site_Name in $Municipality_Name township, $County_Name county")
def content_template (self):
return Template (
"""<b>Report Details</b>
<table>
<tr><th>Well Type:</th><td>$Well_Type</td></tr>
<tr><th>Permit Issued:</th><td>$Date_Disposed</td></tr>
<tr><th>Operator:</th><td>$Operator</td></tr>
<tr><th>Site Name:</th><td>$Site_Name</td></tr>
<tr><th>Township:</th><td>$Municipality_Name</td></tr>
<tr><th>County:</th><td>$County_Name</td></tr>
<tr><th>Permit Type:</th><td>$Appl_Type_Code</td></tr>
<tr><th>Description:</th><td>$Auth_Type_Description</td></tr>
<tr><th>Unconventional:</th><td>$Unconventional</td></tr>
<tr><th>Horizontal:</th><td>$Horizontal_Well</td></tr>
<tr><th>Total Depth:</th><td>$Total_Depth</td></tr>
<tr><th>Well API Number:</th><td>$Complete_API_</td></tr>
<tr><th>OGO Number:</th><td>$OGO_Num</td></tr>
<tr><th>Facility ID:</th><td>$Facility_Id</td></tr>
</table>
""")
| 8,115 | 319 | 23 |
ee78a57c8a14d6b4c72780cf061ac7ed488b5e03 | 3,941 | py | Python | cryptobrute.py | mustafasayilan/cryptobrute | 855959ebc04388e12ea786133ab8eb2f464cf637 | [
"MIT"
] | 8 | 2021-06-14T21:02:47.000Z | 2022-03-17T20:57:20.000Z | cryptobrute.py | mustafasayilan/cryptobrute | 855959ebc04388e12ea786133ab8eb2f464cf637 | [
"MIT"
] | 4 | 2021-06-17T17:24:19.000Z | 2022-03-18T16:22:26.000Z | cryptobrute.py | mustafasayilan/cryptobrute | 855959ebc04388e12ea786133ab8eb2f464cf637 | [
"MIT"
] | 2 | 2021-06-25T09:44:14.000Z | 2021-10-08T17:42:06.000Z | from bitcoinaddress import Wallet
import os
from multiprocessing import Process
import argparse
import sys
import signal
# Set the signal handler
signal.signal(signal.SIGINT, handler)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", action='store', dest='output', help="Results will write this file.")
parser.add_argument("-p", "--maxprocess", action='store', dest='maxprocess', help="Maximum process. Default 5")
parser.add_argument("-i", "--input", action='store', dest='input', help="Select input address file")
args = parser.parse_args()
inputFileName = ""
outputFileName = ""
maximumProcess = 5
if args.input:
inputFileName = args.input
else:
sys.exit("Please select input file with -i addresses.txt")
if args.output:
outputFileName = args.output
else:
sys.exit("Please select output file with -o results.txt")
if args.maxprocess:
maximumProcess = int(args.maxprocess)
global addressArray
addressArray = {}
if __name__ == "__main__":
read.readFromText()
processes = [Process(target=cm.multitask, args=(0,))]
i = 0
while i < maximumProcess:
processes.append(Process(target=cm.multitask, args=((i+1),)))
i+=1
for process in processes:
process.start()
for process in processes:
process.join()
| 30.550388 | 134 | 0.544278 | from bitcoinaddress import Wallet
import os
from multiprocessing import Process
import argparse
import sys
import signal
def handler(signum, frame):
print('Exiting')
sys.exit()
# Set the signal handler
signal.signal(signal.SIGINT, handler)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", action='store', dest='output', help="Results will write this file.")
parser.add_argument("-p", "--maxprocess", action='store', dest='maxprocess', help="Maximum process. Default 5")
parser.add_argument("-i", "--input", action='store', dest='input', help="Select input address file")
args = parser.parse_args()
inputFileName = ""
outputFileName = ""
maximumProcess = 5
if args.input:
inputFileName = args.input
else:
sys.exit("Please select input file with -i addresses.txt")
if args.output:
outputFileName = args.output
else:
sys.exit("Please select output file with -o results.txt")
if args.maxprocess:
maximumProcess = int(args.maxprocess)
global addressArray
addressArray = {}
class read:
def readFromText():
print("Addresses loading please wait...")
addrfile = open(inputFileName, 'r')
Lines = addrfile.readlines()
for line in Lines:
addressArray[line.rstrip('\n')] = ""
print("Addresses Loaded")
class save:
def toFile(text):
file = open(outputFileName, "a+")
file.write(text)
file.close()
class check:
def balance(address):
balances = 0
try:
if address in addressArray:
balances = 1
else:
balances = 0
except NameError:
print("Error : "+str(NameError)+" Address : "+address)
pass
return balances
class cm:
total = 0
founded = 0
def multitask(pss):
i = 0
balance = 0
found = 0
while True:
i += 1
rands = os.urandom(32).hex()
wallet = Wallet(rands)
addr1 = wallet.address.__dict__['mainnet'].__dict__['pubaddr1']
addr2 = wallet.address.__dict__['mainnet'].__dict__['pubaddr1c']
addr3 = wallet.address.__dict__['mainnet'].__dict__['pubaddr3']
addr4 = wallet.address.__dict__['mainnet'].__dict__['pubaddrbc1_P2WPKH']
addr5 = wallet.address.__dict__['mainnet'].__dict__['pubaddrbc1_P2WSH']
heks = wallet.key.__dict__['mainnet'].__dict__['wif']
try:
balance = float(check.balance(addr1))
balance += float(check.balance(addr2))
balance += float(check.balance(addr3))
balance += float(check.balance(addr4))
balance += float(check.balance(addr5))
cm.total += 5 * (maximumProcess+1)
cm.founded += found
if (i*5)%10000 == 0:
print("Check Worker :"+str(pss)+" Address: " + addr1+" Privatekey uncompressed "+heks+" i "+str(i*5) ,end = "\n")
if pss == 0:
print(" Total: "+str(cm.total)+" Founded: " + str(cm.founded) ,end = "\r")
except NameError:
print(str(NameError))
pass
res = "Count: %s | Hex: %s \n" % (i, heks)
if balance > 0:
found += 1
save.toFile(res)
#print(res)
if __name__ == "__main__":
read.readFromText()
processes = [Process(target=cm.multitask, args=(0,))]
i = 0
while i < maximumProcess:
processes.append(Process(target=cm.multitask, args=((i+1),)))
i+=1
for process in processes:
process.start()
for process in processes:
process.join()
| 2,338 | 18 | 203 |
5e3dafd6d0e7593121c3eb1051224de153348874 | 1,180 | py | Python | src/tasks.py | artinnok/billing-gateway | fbb0b358066a0038e775e6a9c4d40bcdf8f79e8e | [
"MIT"
] | null | null | null | src/tasks.py | artinnok/billing-gateway | fbb0b358066a0038e775e6a9c4d40bcdf8f79e8e | [
"MIT"
] | 4 | 2021-03-18T23:34:38.000Z | 2021-06-04T22:27:26.000Z | src/tasks.py | artinnok/billing-gateway | fbb0b358066a0038e775e6a9c4d40bcdf8f79e8e | [
"MIT"
] | 1 | 2020-02-11T09:20:30.000Z | 2020-02-11T09:20:30.000Z | import django
from django.conf import settings
from django.db import transaction
from django.contrib.auth import get_user_model
django.setup()
from billing.models import Account, Payment
from billing.utils import complete_payment
USER = get_user_model()
| 25.652174 | 55 | 0.619492 | import django
from django.conf import settings
from django.db import transaction
from django.contrib.auth import get_user_model
django.setup()
from billing.models import Account, Payment
from billing.utils import complete_payment
USER = get_user_model()
def init_account(user_id):
with transaction.atomic():
user = USER.objects.get(id=user_id)
for currency in settings.DEFAULT_CURRENCY_LIST:
account = Account.objects.create(
user=user,
currency=currency,
)
if currency != settings.USD:
continue
internal_account = Account.objects.get(
code=settings.INTERNAL,
currency=currency,
)
payment = Payment.objects.create(
from_account=internal_account,
to_account=account,
amount=settings.DEFAULT_USD_BALANCE,
fee=settings.ZERO_FEE,
status=settings.INITIATED,
)
complete_payment(payment)
def transfer_money(payment_id):
payment = Payment.objects.get(id=payment_id)
complete_payment(payment)
| 874 | 0 | 46 |
dd3763f11f7171672cdad6d6acbf95ff8acd06bb | 6,545 | py | Python | cride/circles/views/memberships.py | jecs580/django_second_app | ef04b48342ef560eac8f58540ba684e5eb7d7926 | [
"MIT"
] | null | null | null | cride/circles/views/memberships.py | jecs580/django_second_app | ef04b48342ef560eac8f58540ba684e5eb7d7926 | [
"MIT"
] | 2 | 2019-12-24T00:03:49.000Z | 2019-12-24T00:03:50.000Z | cride/circles/views/memberships.py | jecs580/django_second_app | ef04b48342ef560eac8f58540ba684e5eb7d7926 | [
"MIT"
] | null | null | null | """Vistas de miembros del círculo"""
# Django REST Framework
from rest_framework import mixins, viewsets, status
from rest_framework.generics import get_object_or_404
from rest_framework.decorators import action
from rest_framework.response import Response
# Models
from cride.circles.models import Circle, Membership, Invitation
# Permissions
from rest_framework.permissions import IsAuthenticated
from cride.circles.permissions.memberships import IsActiveCircleMember, IsSelfMember
# Serializers
from cride.circles.serializers import MembershipModelSerializer, AddMemberSerializer
class MembershipViewSet(
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet
):
"""Conjunto de vistas de miembros de círculo."""
serializer_class = MembershipModelSerializer
def dispatch(self, request, *args, **kwargs):
"""Verifica que exista el circulo."""
slug_name = kwargs['slug_name'] # Es el nombre de la llave que mandamos en la url
# Creamos una nueva variable para obtener el circulo requerido
self.circle = get_object_or_404(
Circle,
slug_name=slug_name
) # Esto es equivalente a usar:
# try: Circle.objects.get(slug_name=slug_name)
# exception: Circle.DoesNotExist:
# Http404("<algun mensaje>") # Con la diferencia de que con este metodo podremos personalizar el
# raise que se envia.
return super(MembershipViewSet, self).dispatch(request, *args, **kwargs) # Dejamos que se ejecute en
# metodo dispath por defecto y lo retornamos ambos.
# Ahora cada que se ejecute esta clase que sea instanciada hara primeramente la verificacion del
# circulo
def get_permissions(self):
"""Asigna permisos basados en la accion"""
permissions = [IsAuthenticated]
if self.action != 'create':
permissions.append(IsActiveCircleMember)
if self.action == 'invitations':
permissions.append(IsSelfMember)
return [p() for p in permissions]
def get_queryset(self):
"""Returna los miembros del circulo"""
return Membership.objects.filter(
circle=self.circle,
is_active=True
)
def get_object(self):
"""Retorna el miembro del círculo utilizando el nombre de usuario del usuario"""
return get_object_or_404(
Membership,
user__username=self.kwargs['pk'], # Obtenemos el valor de username atravez de la url que enviemos
# desde un cliente la llave es pk por que para mixin se obtiene un objeto con identificado,
# pero como el username tambien funciona como indentificador, lo cambiamos, pero el el nombre
# de la llave es la misma
circle=self.circle,
is_active=True
)
def perform_destroy(self, instance):
"""Desabilita la membresia"""
instance.is_active = False # En vez de eliminar al miembro simplemente colocamos el campo is_active a
# False para que las demas vistas esten bloqueeadas por no tener el permiso.
instance.save()
@action(detail=True, methods=['get'])
def invitations(self, request, *args, **kwargs):
"""Recuperar el desglose de invitaciones de un miembro
Devolverá una lista que contiene todos los miembros
que han usado sus invitaciones y otra lista que contiene
las invitaciones que aún no se han usado.
"""
member = self.get_object() # Obtenemos el objeto de detalle (el miembro)
invited_members = Membership.objects.filter(
circle=self.circle,
invited_by=request.user,
is_active=True
) # Trae a los miembro que fueron invitados por el usuario colocado en la url
unsed_invitations = Invitation.objects.filter(
circle=self.circle,
issued_by=request.user,
used=False,
).values_list('code') # Invitaciones no utilizadas.Colocamos values_list('code') para que nos lista
# solo los valores de codigo. Esta lista es un poco rara.
diff = member.remaining_invitations-len(unsed_invitations) # Sacamos la difencia del numero
# invitaciones que le quedan por usar, contra las invitaciones que envio pero no son usadas.
# Esto es para generar el codigo de invitaciones. por que por defecto seran el numero maximo.
invitations = [x[0] for x in unsed_invitations] # La lista que nos devolvia el unsed_invitations tenian
# de elementos tuplas. Pero no nosotros solo queremos los codigos, entonces recoremos la lista y la
# llenamos en otra pero con los los elemento de la tupla.
for i in range(0, diff): # recorre el for mietras diff sea mayor a cero. En otras palabras si ya
# gasto todas sus invitaciones restantes y tiene las invitaciones no son usadas no entrara al for.
invitations.append(
Invitation.objects.create(
issued_by=request.user,
circle=self.circle
).code # Solo devolvemos el codigo para que se pueda agregar a la lista de strings.
)
# Este for solo se activara cuando la primera vez que consulte, y cuando se le aumenten un numero
# de ivitaciones.
data = {
'used_invitations': MembershipModelSerializer(invited_members, many=True).data,
'invitations': invitations
}
return Response(data)
def create(self, request, *args, **kwargs):
"""Maneja la creación de miembros desde el código de invitación."""
serializer = AddMemberSerializer(
data=request.data, # Cambiamos los datos recibidos(Json) a un diccionario
context={'circle': self.circle, 'request': request} # Los serializers tambien pueden recibir otros
# datos ademas de la data, para esto usamos la variable context, mandamos request para que el
# serializer pueda saber el usuario de la peticion.
)
serializer.is_valid(raise_exception=True)
member = serializer.save()
data = self.get_serializer(member).data # No usamos el serializer AddMemberSerializer. Si no el
# serializador que se coloco en la variable serializer_class puesto que ya esta personalizado para
# mostrar con mas detalle
return Response(data, status=status.HTTP_201_CREATED)
| 45.451389 | 112 | 0.676394 | """Vistas de miembros del círculo"""
# Django REST Framework
from rest_framework import mixins, viewsets, status
from rest_framework.generics import get_object_or_404
from rest_framework.decorators import action
from rest_framework.response import Response
# Models
from cride.circles.models import Circle, Membership, Invitation
# Permissions
from rest_framework.permissions import IsAuthenticated
from cride.circles.permissions.memberships import IsActiveCircleMember, IsSelfMember
# Serializers
from cride.circles.serializers import MembershipModelSerializer, AddMemberSerializer
class MembershipViewSet(
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet
):
"""Conjunto de vistas de miembros de círculo."""
serializer_class = MembershipModelSerializer
def dispatch(self, request, *args, **kwargs):
"""Verifica que exista el circulo."""
slug_name = kwargs['slug_name'] # Es el nombre de la llave que mandamos en la url
# Creamos una nueva variable para obtener el circulo requerido
self.circle = get_object_or_404(
Circle,
slug_name=slug_name
) # Esto es equivalente a usar:
# try: Circle.objects.get(slug_name=slug_name)
# exception: Circle.DoesNotExist:
# Http404("<algun mensaje>") # Con la diferencia de que con este metodo podremos personalizar el
# raise que se envia.
return super(MembershipViewSet, self).dispatch(request, *args, **kwargs) # Dejamos que se ejecute en
# metodo dispath por defecto y lo retornamos ambos.
# Ahora cada que se ejecute esta clase que sea instanciada hara primeramente la verificacion del
# circulo
def get_permissions(self):
"""Asigna permisos basados en la accion"""
permissions = [IsAuthenticated]
if self.action != 'create':
permissions.append(IsActiveCircleMember)
if self.action == 'invitations':
permissions.append(IsSelfMember)
return [p() for p in permissions]
def get_queryset(self):
"""Returna los miembros del circulo"""
return Membership.objects.filter(
circle=self.circle,
is_active=True
)
def get_object(self):
"""Retorna el miembro del círculo utilizando el nombre de usuario del usuario"""
return get_object_or_404(
Membership,
user__username=self.kwargs['pk'], # Obtenemos el valor de username atravez de la url que enviemos
# desde un cliente la llave es pk por que para mixin se obtiene un objeto con identificado,
# pero como el username tambien funciona como indentificador, lo cambiamos, pero el el nombre
# de la llave es la misma
circle=self.circle,
is_active=True
)
def perform_destroy(self, instance):
"""Desabilita la membresia"""
instance.is_active = False # En vez de eliminar al miembro simplemente colocamos el campo is_active a
# False para que las demas vistas esten bloqueeadas por no tener el permiso.
instance.save()
@action(detail=True, methods=['get'])
def invitations(self, request, *args, **kwargs):
"""Recuperar el desglose de invitaciones de un miembro
Devolverá una lista que contiene todos los miembros
que han usado sus invitaciones y otra lista que contiene
las invitaciones que aún no se han usado.
"""
member = self.get_object() # Obtenemos el objeto de detalle (el miembro)
invited_members = Membership.objects.filter(
circle=self.circle,
invited_by=request.user,
is_active=True
) # Trae a los miembro que fueron invitados por el usuario colocado en la url
unsed_invitations = Invitation.objects.filter(
circle=self.circle,
issued_by=request.user,
used=False,
).values_list('code') # Invitaciones no utilizadas.Colocamos values_list('code') para que nos lista
# solo los valores de codigo. Esta lista es un poco rara.
diff = member.remaining_invitations-len(unsed_invitations) # Sacamos la difencia del numero
# invitaciones que le quedan por usar, contra las invitaciones que envio pero no son usadas.
# Esto es para generar el codigo de invitaciones. por que por defecto seran el numero maximo.
invitations = [x[0] for x in unsed_invitations] # La lista que nos devolvia el unsed_invitations tenian
# de elementos tuplas. Pero no nosotros solo queremos los codigos, entonces recoremos la lista y la
# llenamos en otra pero con los los elemento de la tupla.
for i in range(0, diff): # recorre el for mietras diff sea mayor a cero. En otras palabras si ya
# gasto todas sus invitaciones restantes y tiene las invitaciones no son usadas no entrara al for.
invitations.append(
Invitation.objects.create(
issued_by=request.user,
circle=self.circle
).code # Solo devolvemos el codigo para que se pueda agregar a la lista de strings.
)
# Este for solo se activara cuando la primera vez que consulte, y cuando se le aumenten un numero
# de ivitaciones.
data = {
'used_invitations': MembershipModelSerializer(invited_members, many=True).data,
'invitations': invitations
}
return Response(data)
def create(self, request, *args, **kwargs):
"""Maneja la creación de miembros desde el código de invitación."""
serializer = AddMemberSerializer(
data=request.data, # Cambiamos los datos recibidos(Json) a un diccionario
context={'circle': self.circle, 'request': request} # Los serializers tambien pueden recibir otros
# datos ademas de la data, para esto usamos la variable context, mandamos request para que el
# serializer pueda saber el usuario de la peticion.
)
serializer.is_valid(raise_exception=True)
member = serializer.save()
data = self.get_serializer(member).data # No usamos el serializer AddMemberSerializer. Si no el
# serializador que se coloco en la variable serializer_class puesto que ya esta personalizado para
# mostrar con mas detalle
return Response(data, status=status.HTTP_201_CREATED)
| 0 | 0 | 0 |
c668b9611e35ba029b366d3fe235af37c06dfa5b | 2,369 | py | Python | mmmeta/backend/base.py | simonwoerpel/mmmeta | 3b130c859f0251f0a90af4423c47c91c0d5b496f | [
"MIT"
] | 4 | 2021-05-31T18:59:01.000Z | 2021-06-27T23:15:26.000Z | mmmeta/backend/base.py | simonwoerpel/mmmeta | 3b130c859f0251f0a90af4423c47c91c0d5b496f | [
"MIT"
] | null | null | null | mmmeta/backend/base.py | simonwoerpel/mmmeta | 3b130c859f0251f0a90af4423c47c91c0d5b496f | [
"MIT"
] | null | null | null | import json
import os
from ..util import cast, datetime_to_json
class Backend:
"""
base class for metadir backends.
currently only local filesystem backend implemented.
"""
def get_base_path(self):
"""return a base path to a local file dir or a cloud bucket"""
raise NotImplementedError
def get_path(self, path):
"""return absolute filesystem path or cloud bucket for `path"""
return os.path.join(self.base_path, path)
def exists(self, path):
"""check if given path exists and return boolean"""
raise NotImplementedError
def save(self, path, content):
"""
store `content` in path and return absolute path to stored file or
cloud blob location
"""
raise NotImplementedError
def load(self, path):
"""
return content as string for given path, use the same not found
exception for all storages:
"""
if not self.exists(path):
raise FileNotFoundError(f"Path `{path}` not found in storage `{self}`")
return self._load(path)
def _load(self, path):
"""actual implementation for specific storage"""
raise NotImplementedError
def set_value(self, path, value):
"""simply store values to a path location"""
self.save(path, value)
return value
def get_value(self, path, transform=lambda x: cast(x, with_date=True)):
"""simply get values from a path location"""
if not self.exists(path):
return
content = self.load(path)
return transform(content)
def get_children(self, path=".", condition=lambda x: True):
"""list all children under given path that match condition"""
raise NotImplementedError
def delete(self, path=""):
"""delete everything from path"""
raise NotImplementedError
| 29.246914 | 83 | 0.629802 | import json
import os
from ..util import cast, datetime_to_json
class Backend:
"""
base class for metadir backends.
currently only local filesystem backend implemented.
"""
def __init__(self, data_root):
self.data_root = data_root
self.base_path = self.get_base_path()
def __str__(self):
return self.get_base_path()
def __repr__(self):
return f"<{self.__class__.__name__}: {self}>"
def get_base_path(self):
"""return a base path to a local file dir or a cloud bucket"""
raise NotImplementedError
def get_path(self, path):
"""return absolute filesystem path or cloud bucket for `path"""
return os.path.join(self.base_path, path)
def exists(self, path):
"""check if given path exists and return boolean"""
raise NotImplementedError
def save(self, path, content):
"""
store `content` in path and return absolute path to stored file or
cloud blob location
"""
raise NotImplementedError
def load(self, path):
"""
return content as string for given path, use the same not found
exception for all storages:
"""
if not self.exists(path):
raise FileNotFoundError(f"Path `{path}` not found in storage `{self}`")
return self._load(path)
def load_json(self, path):
return json.loads(self.load(path))
def dump_json(self, path, content):
content = json.dumps(content, default=datetime_to_json)
self.save(path, content)
def _load(self, path):
"""actual implementation for specific storage"""
raise NotImplementedError
def set_value(self, path, value):
"""simply store values to a path location"""
self.save(path, value)
return value
def get_value(self, path, transform=lambda x: cast(x, with_date=True)):
"""simply get values from a path location"""
if not self.exists(path):
return
content = self.load(path)
return transform(content)
def get_children(self, path=".", condition=lambda x: True):
"""list all children under given path that match condition"""
raise NotImplementedError
def delete(self, path=""):
"""delete everything from path"""
raise NotImplementedError
| 334 | 0 | 135 |
5762199f83ef7cf6fdb43234108f9b42447293d5 | 3,947 | py | Python | fencing/diagram/FencingEntity.py | cqtran/Cmput_401_Fence_Friends | 98a25359e3801212e5afaaf71d1191870f73d608 | [
"Apache-2.0"
] | null | null | null | fencing/diagram/FencingEntity.py | cqtran/Cmput_401_Fence_Friends | 98a25359e3801212e5afaaf71d1191870f73d608 | [
"Apache-2.0"
] | null | null | null | fencing/diagram/FencingEntity.py | cqtran/Cmput_401_Fence_Friends | 98a25359e3801212e5afaaf71d1191870f73d608 | [
"Apache-2.0"
] | null | null | null | import math
from decimal import Decimal
# Used for rotations:
# https://stackoverflow.com/questions/34372480/rotate-point-about-another-point-in-degrees-python/34374437#34374437
# Accessed November 17, 2017
class Post:
"""A post"""
def displayString(self):
"""Return this item as it would be displayed to the user"""
string = self._displayString()
if self.isRemoval:
return string + " (Removal)"
return string
def _displayString(self):
"""Return this item as it would be displayed to the user"""
if self.postType == "cornerPost":
return "Corner Post"
if self.postType == "endPost":
return "End Post"
if self.postType == "tPost":
return "T Post"
if self.postType == "gatePost":
return "Gate Post"
print("Warning: unknown post type")
return str(self)
@property
class FencingEntity:
"""A fencing entity (fence segment or gate)"""
def displayString(self):
"""Return this item as it would be displayed to the user"""
if self._entityType == "fence":
string = "Fence"
elif self._entityType == "gate":
string = "Gate"
else:
print("Warning: unknown fencing attribute type")
return str(self)
if self._isDouble:
string = "Double " + string
string = self.lengthString() + " " + string
if self._isRemoval:
string += " (Removal)"
return string
@property
@property
@property
@property
@property
@property
@property
@property | 22.683908 | 115 | 0.657968 | import math
from decimal import Decimal
# Used for rotations:
# https://stackoverflow.com/questions/34372480/rotate-point-about-another-point-in-degrees-python/34374437#34374437
# Accessed November 17, 2017
class Post:
"""A post"""
def __init__(self, postType, x, y, isRemoval=False):
self.entityType = "post"
self.postType = postType
self._point = (x, y)
self.isRemoval = isRemoval
def __str__(self):
return self.postType
def displayString(self):
"""Return this item as it would be displayed to the user"""
string = self._displayString()
if self.isRemoval:
return string + " (Removal)"
return string
def _displayString(self):
"""Return this item as it would be displayed to the user"""
if self.postType == "cornerPost":
return "Corner Post"
if self.postType == "endPost":
return "End Post"
if self.postType == "tPost":
return "T Post"
if self.postType == "gatePost":
return "Gate Post"
print("Warning: unknown post type")
return str(self)
@property
def point(self):
return self._point
class FencingEntity:
"""A fencing entity (fence segment or gate)"""
def __init__(self, entityType, length, height, x, y, rotation,
isRemoval=False, isDouble=False):
if rotation is None:
rotation = 0
self._entityType = entityType
self._length = length
length = float(length)
self._x = FencingEntity._getX(x, y, length, height, rotation)
self._y = FencingEntity._getY(x, y, length, height, rotation)
self._x2 = FencingEntity._getX2(x, y, length, height, rotation)
self._y2 = FencingEntity._getY2(x, y, length, height, rotation)
self._isRemoval = isRemoval
self._isDouble = isDouble
def __str__(self):
entityType = self._entityType
if self._isDouble:
entityType = "double " + entityType
if self._isRemoval:
entityType += " (removal)"
return str(self._length) + 'in ' + entityType
def displayString(self):
"""Return this item as it would be displayed to the user"""
if self._entityType == "fence":
string = "Fence"
elif self._entityType == "gate":
string = "Gate"
else:
print("Warning: unknown fencing attribute type")
return str(self)
if self._isDouble:
string = "Double " + string
string = self.lengthString() + " " + string
if self._isRemoval:
string += " (Removal)"
return string
@property
def entityType(self):
return self._entityType
@property
def length(self):
return self._length
def _getX(x, y, width, height, rotation):
x0 = x + width / 2.0
y0 = y + height / 2.0
angle = math.radians(rotation)
return x0 + math.cos(angle) * (x - x0) - math.sin(angle) * (y - y0)
@property
def x(self):
return self._x
def _getX2(x, y, width, height, rotation):
x0 = x + width / 2.0
y0 = y + height / 2.0
x2 = x + width
y2 = y + height
angle = math.radians(rotation)
return x0 + math.cos(angle) * (x2 - x0) - math.sin(angle) * (y2 - y0)
@property
def x2(self):
return self._x2
def _getY(x, y, width, height, rotation):
x0 = x + width / 2.0
y0 = y + height / 2.0
angle = math.radians(rotation)
return y0 + math.sin(angle) * (x - x0) + math.cos(angle) * (y - y0)
@property
def y(self):
return self._y
def _getY2(x, y, width, height, rotation):
x0 = x + width / 2.0
y0 = y + height / 2.0
x2 = x + width
y2 = y + height
angle = math.radians(rotation)
return y0 + math.sin(angle) * (x2 - x0) + math.cos(angle) * (y2 - y0)
@property
def y2(self):
return self._y2
@property
def isRemoval(self):
return self._isRemoval
@property
def isDouble(self):
return self._isDouble
def _inchesString(self):
return str(self._length) + '"'
def _feetString(self):
feet = self._length // 12
inchesLeft = self._length % 12
return str(int(feet)) + "'" + str(inchesLeft) + '"'
def lengthString(self):
if self._entityType == "fence":
return self._feetString()
return self._inchesString() | 2,016 | 0 | 480 |
349f64833648c7a6bb0330713d700aa416d68e30 | 8,314 | py | Python | source/module/memory_helper_v2.py | siat-nlp/TTOS | 524ac690b01415818dd17b045692795db55f1552 | [
"MIT"
] | 14 | 2020-10-12T11:43:04.000Z | 2022-03-11T07:12:12.000Z | source/module/memory_helper_v2.py | siat-nlp/MGMA | 71b07cba6c18a916fe491f552402405387400294 | [
"Apache-2.0"
] | 1 | 2020-11-16T17:07:40.000Z | 2021-04-21T08:42:17.000Z | source/module/memory_helper_v2.py | siat-nlp/MGMA | 71b07cba6c18a916fe491f552402405387400294 | [
"Apache-2.0"
] | 3 | 2020-11-29T13:36:42.000Z | 2021-11-29T10:36:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: source/module/memory_helper.py
"""
import torch
import torch.nn as nn
| 47.238636 | 128 | 0.60332 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: source/module/memory_helper.py
"""
import torch
import torch.nn as nn
class KnowledgeMemoryv2(nn.Module):
def __init__(self,
query_size,
memory_size,
hidden_size,
max_hop=1,
num_layers=1,
dropout=0.0,
mode="mlp",
use_gpu=False):
super(KnowledgeMemoryv2, self).__init__()
assert (mode in ["general", "mlp"]), (
"Unsupported attention mode: {mode}"
)
self.query_size = query_size
self.memory_size = memory_size
self.hidden_size = hidden_size
self.max_hop = max_hop
self.num_layers = num_layers
self.dropout = dropout
self.mode = mode
self.use_gpu = use_gpu
self.rnn_input_size = self.query_size + self.memory_size
self.rnn = nn.GRU(input_size=self.rnn_input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout if self.num_layers > 1 else 0,
batch_first=True)
map(nn.init.orthogonal_, self.rnn.all_weights)
self.linear_pointer = nn.Linear(self.query_size, self.memory_size, bias=False)
if self.mode == "general":
self.linear_query = nn.ModuleList([nn.Linear(self.query_size, self.memory_size, bias=False)
for _ in range(self.max_hop)])
elif self.mode == "mlp":
self.linear_query = nn.ModuleList([nn.Linear(self.query_size, self.hidden_size, bias=True)
for _ in range(self.max_hop)])
self.linear_memory = nn.ModuleList([nn.Linear(self.memory_size, self.hidden_size, bias=False)
for _ in range(self.max_hop)])
self.v = nn.ModuleList([nn.Linear(self.hidden_size, 1, bias=False)
for _ in range(self.max_hop)])
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=-1)
self.sigmoid = nn.Sigmoid()
self.linear_forget = nn.ModuleList([nn.Linear(self.query_size, self.memory_size, bias=False)
for _ in range(self.max_hop)])
self.linear_add = nn.ModuleList([nn.Linear(self.query_size, self.memory_size, bias=False)
for _ in range(self.max_hop)])
def memory_point(self, enc_hidden, kb_state_memory, mask=None):
query = enc_hidden[-1].unsqueeze(1)
assert self.memory_size == kb_state_memory.size(-1)
key = self.linear_pointer(query) # (batch_size, query_length, memory_size)
attn = torch.bmm(key, kb_state_memory.transpose(1, 2)) # (batch_size, query_length, memory_length)
if mask is not None:
mask = mask.unsqueeze(1).repeat(1, query.size(1), 1) # (batch_size, query_length, memory_length)
attn.masked_fill_(mask, -float("inf"))
attn = attn.squeeze(1)
selector = self.sigmoid(attn)
return selector
def memory_address(self, query, key_memory, hop, selector=None, mask=None):
if self.mode == "general":
assert self.memory_size == key_memory.size(-1)
key = self.linear_query[hop](query) # (batch_size, query_length, memory_size)
attn = torch.bmm(key, key_memory.transpose(1, 2)) # (batch_size, query_length, memory_length)
else:
# (batch_size, query_length, memory_length, hidden_size)
hidden_sum = self.linear_query[hop](query).unsqueeze(2) + \
self.linear_memory[hop](key_memory).unsqueeze(1)
key = self.tanh(hidden_sum)
attn = self.v[hop](key).squeeze(-1) # (batch_size, query_length, memory_length)
if selector is not None:
attn = attn * selector
if mask is not None:
attn.masked_fill_(mask, -float("inf"))
weights = self.softmax(attn) # (batch_size, query_length, memory_length)
return weights
def memory_update_v1(self, query, weights, kb_state_memory):
"""
query: Tensor(batch_size, query_length, query_size)
weights: Tensor(batch_size, query_length, memory_length)
"""
forget = self.linear_forget(query)
forget_weights = self.sigmoid(forget) # (batch_size, query_length, memory_size)
forget_memory = torch.bmm(weights.transpose(1, 2), forget_weights) # (batch_size, memory_length, memory_size)
temp_memory = kb_state_memory * (1 - forget_memory)
add = self.linear_add(query) # (batch_size, query_length, memory_size)
add_weights = self.sigmoid(add)
add_memory = torch.bmm(weights.transpose(1, 2), add_weights) # (batch_size, memory_length, memory_size)
final_memory = temp_memory + add_memory
return final_memory
def memory_update(self, query, key_memory, hop, mask=None):
"""
query: Tensor(batch_size, query_length, query_size)
key_memory: Tensor(batch_size, memory_length, memory_size)
hop: int
mask: Tensor(batch_size, memory_length)
"""
weights = self.memory_address(query, key_memory, hop, mask=mask) # (batch_size, query_length, memory_length)
forget = self.linear_forget[hop](query) # (batch_size, query_length, memory_size)
forget_weights = self.sigmoid(forget)
forget_memory = torch.bmm(weights.transpose(1, 2), forget_weights) # (batch_size, memory_length, memory_size)
temp_memory = key_memory * (1 - forget_memory)
add = self.linear_add[hop](query) # (batch_size, query_length, memory_size)
add_weights = self.sigmoid(add)
add_memory = torch.bmm(weights.transpose(1, 2), add_weights) # (batch_size, memory_length, memory_size)
final_memory = temp_memory + add_memory
return final_memory
def forward(self, query, kb_state_memory, kb_slot_memory, hidden, selector=None, mask=None):
"""
query: Tensor(batch_size, query_length, query_size)
kb_state_memory: Tensor(batch_size, memory_length, memory_size)
kb_slot_memory: Tensor(batch_size, memory_length, memory_size)
selector: Tensor(batch_size, memory_length)
mask: Tensor(batch_size, memory_length)
"""
if mask is not None:
mask = mask.unsqueeze(1).repeat(1, query.size(1), 1) # (batch_size, query_length, memory_length)
if selector is not None:
selector = selector.unsqueeze(2).repeat(1, 1, kb_state_memory.size(
-1)) # (batch_size, memory_length, memory_size)
for hop in range(self.max_hop):
if selector is not None:
key_memory = kb_state_memory * selector
else:
key_memory = kb_state_memory
weights = self.memory_address(query, key_memory, hop, mask=mask)
weighted_kb = torch.bmm(weights, kb_slot_memory) # (batch_size, query_length, memory_size)
# get intermediate hidden state
rnn_input = torch.cat([weighted_kb, query], dim=-1)
rnn_output, new_hidden = self.rnn(rnn_input, hidden)
new_query = new_hidden[-1].unsqueeze(1)
# key memory update
kb_state_memory = self.memory_update(new_query, kb_state_memory, hop, mask=mask)
'''
if selector is not None:
selector = selector.unsqueeze(2).repeat(1, 1, kb_state_memory.size(-1)) # (batch_size, memory_length, memory_size)
key_memory = kb_state_memory * selector
weights = self.memory_address(query, key_memory, hop=0, mask=mask)
weighted_kb = torch.bmm(weights, kb_slot_memory) # (batch_size, query_length, memory_size)
'''
final_weighted_kb = weighted_kb
final_weights = weights
final_kb_memory = kb_state_memory
return final_weighted_kb, final_weights, final_kb_memory
| 4,047 | 4,105 | 25 |
dd4abb2ea03219fe9b4715144457ad9fc8a13898 | 2,230 | py | Python | config/settings/env.example.py | hbvj99/market-api | 489c9433556002cb391b93cbd6486da739c2418a | [
"MIT"
] | 1 | 2021-08-28T05:30:40.000Z | 2021-08-28T05:30:40.000Z | config/settings/env.example.py | hbvj99/market-api | 489c9433556002cb391b93cbd6486da739c2418a | [
"MIT"
] | 1 | 2022-01-14T08:57:19.000Z | 2022-01-14T08:57:20.000Z | config/settings/env.example.py | hbvj99/market-api | 489c9433556002cb391b93cbd6486da739c2418a | [
"MIT"
] | 1 | 2022-01-11T10:14:27.000Z | 2022-01-11T10:14:27.000Z | from datetime import timedelta
from .base import *
SECRET_KEY = ''
DEBUG = True
ALLOWED_HOSTS = '*'
INSTALLED_APPS += [
'drf_yasg'
]
INTERNAL_IPS = ['127.0.0.1', ] # required for drf_yasg
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = ['rest_framework.authentication.SessionAuthentication'] + \
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Static
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# to make development easy
TIME_ZONE = 'UTC'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = ''
FROM_MAIL = ''
DEFAULT_FROM_EMAIL = ''
EMAIL_USE_TLS = True
TOKEN_TIMEOUT_DAYS = 2
if DEBUG is False:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# SIMPLE JWT
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(hours=7),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
}
# Security (SSL)
SESSION_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 63072000 # 2 years
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_SSL_REDIRECT = True
SECURE_REFERRER_POLICY = 'same-origin'
CSRF_COOKIE_SECURE = True
# Other secure headers
USE_X_FORWARDED_HOST = True
X_FRAME_OPTIONS = 'DENY'
# Token expiry in seconds
PASSWORD_RESET_TIMEOUT = 432000 # 4 days
| 22.989691 | 110 | 0.676682 | from datetime import timedelta
from .base import *
SECRET_KEY = ''
DEBUG = True
ALLOWED_HOSTS = '*'
INSTALLED_APPS += [
'drf_yasg'
]
INTERNAL_IPS = ['127.0.0.1', ] # required for drf_yasg
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = ['rest_framework.authentication.SessionAuthentication'] + \
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Static
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# to make development easy
TIME_ZONE = 'UTC'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = ''
FROM_MAIL = ''
DEFAULT_FROM_EMAIL = ''
EMAIL_USE_TLS = True
TOKEN_TIMEOUT_DAYS = 2
if DEBUG is False:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# SIMPLE JWT
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(hours=7),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
}
# Security (SSL)
SESSION_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 63072000 # 2 years
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_SSL_REDIRECT = True
SECURE_REFERRER_POLICY = 'same-origin'
CSRF_COOKIE_SECURE = True
# Other secure headers
USE_X_FORWARDED_HOST = True
X_FRAME_OPTIONS = 'DENY'
# Token expiry in seconds
PASSWORD_RESET_TIMEOUT = 432000 # 4 days
| 0 | 0 | 0 |
7ecf764ca531cde0ebfa413f6cef01952df65a81 | 416 | py | Python | accounts/forms.py | EstherWaweru/Ecommerce-Backend | 2e0c1328d669c51e0786bde31c5feca648897875 | [
"MIT"
] | 5 | 2021-02-24T15:17:36.000Z | 2022-02-26T22:25:06.000Z | accounts/forms.py | EstherWaweru/Ecommerce-Backend | 2e0c1328d669c51e0786bde31c5feca648897875 | [
"MIT"
] | 5 | 2021-02-24T13:52:50.000Z | 2021-04-21T15:37:23.000Z | accounts/forms.py | EstherWaweru/Ecommerce-Backend | 2e0c1328d669c51e0786bde31c5feca648897875 | [
"MIT"
] | 1 | 2021-02-17T14:12:19.000Z | 2021-02-17T14:12:19.000Z | from django.contrib.auth.forms import UserCreationForm
from .models import User
from django import forms
| 32 | 73 | 0.757212 | from django.contrib.auth.forms import UserCreationForm
from .models import User
from django import forms
class SignUpForm(UserCreationForm):
class Meta:
model=User
fields=('email','first_name','last_name','password1','password2')
class LoginForm(forms.Form):
email=forms.CharField()
password=forms.CharField(widget=forms.PasswordInput)
remember_me=forms.BooleanField(required=False)
| 0 | 266 | 44 |
2a466d17f5958a202a4ed6b24cc4a465e028f11d | 2,298 | py | Python | catalogService/handler_apache.py | sassoftware/-catalog-service | 4b68af224842a2e93f7a4bacdac1fc262ae7b917 | [
"Apache-2.0"
] | 3 | 2015-06-10T19:31:17.000Z | 2017-11-29T07:04:12.000Z | catalogService/handler_apache.py | sassoftware/-catalog-service | 4b68af224842a2e93f7a4bacdac1fc262ae7b917 | [
"Apache-2.0"
] | null | null | null | catalogService/handler_apache.py | sassoftware/-catalog-service | 4b68af224842a2e93f7a4bacdac1fc262ae7b917 | [
"Apache-2.0"
] | 2 | 2016-02-04T00:51:15.000Z | 2020-07-24T00:22:44.000Z | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from conary.lib import coveragehook
from catalogService.utils import logger as rlogging
from restlib.http import modpython
from mint import config
from mint.db.database import Database
from catalogService.handler import getHandler
from catalogService.rest.database import RestDatabase
def handler(req):
"""
The presence of this function in the module allows it to be added directly
into apache as a mod_python handler.
The function is for testing purposes only.
"""
coveragehook.install()
mintCfgPath = os.path.join(req.document_root(), '..', '..', 'mint.conf')
mintcfg = config.getConfig(mintCfgPath)
mintdb = Database(mintcfg)
restdb = RestDatabase(mintcfg, mintdb)
topLevel = os.path.join(mintcfg.basePath)
_handler = ApacheRESTHandler(topLevel, restdb)
return _handler.handle(req)
| 31.054054 | 78 | 0.735857 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from conary.lib import coveragehook
from catalogService.utils import logger as rlogging
from restlib.http import modpython
from mint import config
from mint.db.database import Database
from catalogService.handler import getHandler
from catalogService.rest.database import RestDatabase
class Request(modpython.ModPythonRequest):
_helpDir = '/usr/share/catalog-service/help'
_driverHelpDir = 'drivers/%(driverName)s'
class ModPythonHttpHandler(modpython.ModPythonHttpHandler):
requestClass = Request
class ApacheRESTHandler(object):
httpHandlerClass = ModPythonHttpHandler
def __init__(self, pathPrefix, restdb):
self.pathPrefix = pathPrefix
self.handler = getHandler(restdb, self.httpHandlerClass)
def handle(self, req):
logger = self.getLogger(req)
self.handler.setLogger(logger)
rlogging.LoggerCallback.logger = logger
return self.handler.handle(req, pathPrefix=self.pathPrefix)
def getLogger(self, req):
logger = rlogging.getLogger('catalog-service', None)
logger.setAddress(req.connection.remote_ip)
return logger
def handler(req):
"""
The presence of this function in the module allows it to be added directly
into apache as a mod_python handler.
The function is for testing purposes only.
"""
coveragehook.install()
mintCfgPath = os.path.join(req.document_root(), '..', '..', 'mint.conf')
mintcfg = config.getConfig(mintCfgPath)
mintdb = Database(mintcfg)
restdb = RestDatabase(mintcfg, mintdb)
topLevel = os.path.join(mintcfg.basePath)
_handler = ApacheRESTHandler(topLevel, restdb)
return _handler.handle(req)
| 452 | 316 | 69 |
ea46f3168358363f5a637b7a03fc6a11c0722d89 | 4,805 | py | Python | udaru_anomaly_detection/cli/insert.py | nearform/udaru-anomaly-detection | ffae43713ef51135f6cf32e9244a5af098f888fe | [
"Apache-2.0"
] | 4 | 2018-06-11T15:35:07.000Z | 2021-03-16T14:19:31.000Z | udaru_anomaly_detection/cli/insert.py | nearform/udaru-anomaly-detection | ffae43713ef51135f6cf32e9244a5af098f888fe | [
"Apache-2.0"
] | null | null | null | udaru_anomaly_detection/cli/insert.py | nearform/udaru-anomaly-detection | ffae43713ef51135f6cf32e9244a5af098f888fe | [
"Apache-2.0"
] | 1 | 2019-04-04T16:36:19.000Z | 2019-04-04T16:36:19.000Z |
import datetime
from udaru_anomaly_detection.trail.insert import trail_insert
from udaru_anomaly_detection.tests.generator import generate_resource
| 30.605096 | 75 | 0.441831 |
import datetime
from udaru_anomaly_detection.trail.insert import trail_insert
from udaru_anomaly_detection.tests.generator import generate_resource
def insert(args):
for resource_i, resource in enumerate(generate_resource(100, 'train')):
print(f'insert [train]: {resource}')
trail_insert(
when=(datetime.datetime(2017, 1, 1) +
datetime.timedelta(days=1) * resource_i),
who={
'id': 'organization/resource_user',
'user': 'resource_user',
'organization': 'organization'
},
what='authorization:isUserAuthorized',
subject={
'id': resource,
'action': 'action'
},
where={
'ip': '64.64.117.58',
'port': '35246'
},
meta={
'result': True,
'dataset': 'train',
'expect': 'NA'
}
)
for resource_i, resource in enumerate(generate_resource(10, 'test')):
print(f'insert [test]: {resource}')
trail_insert(
when=(datetime.datetime(2018, 1, 1) +
datetime.timedelta(days=1) * resource_i),
who={
'id': 'organization/resource_user',
'user': 'resource_user',
'organization': 'organization'
},
what='authorization:isUserAuthorized',
subject={
'id': resource,
'action': 'action'
},
where={
'ip': '64.64.117.58',
'port': '35246'
},
meta={
'result': True,
'dataset': 'test',
'expect': 'valid'
}
)
invalid_resources = [
'../../../passwd',
':(){ :|: & };:',
'a',
'a' * 70,
'res::ricky:/sl/jennifersaunders',
'res:/sl/:ricky:/jennifersaunders'
]
for resource_i, resource in enumerate(invalid_resources):
print(f'insert [test]: {resource}')
trail_insert(
when=(datetime.datetime(2018, 2, 1) +
datetime.timedelta(days=1) * resource_i),
who={
'id': 'organization/resource_user',
'user': 'resource_user',
'organization': 'organization'
},
what='authorization:isUserAuthorized',
subject={
'id': resource,
'action': 'action'
},
where={
'ip': '64.64.117.58',
'port': '35246'
},
meta={
'result': True,
'dataset': 'test',
'expect': 'invalid'
}
)
nyc_ipaddress = '64.64.117.58' # New York City
wdc_ipaddress = '173.239.197.169' # Washington DC
lon_ipaddress = '5.101.142.229' # London
ip_inserts = [
(nyc_ipaddress, lon_ipaddress, 9, True),
(nyc_ipaddress, wdc_ipaddress, 2, True),
(nyc_ipaddress, lon_ipaddress, 2, False)
]
for user_i, (from_ip, to_ip, duration, valid) in enumerate(ip_inserts):
print(f'insert [test]: {from_ip} -> {to_ip}: {duration}h')
trail_insert(
when=(datetime.datetime(2018, 3, 1) +
datetime.timedelta(days=1) * user_i),
who={
'id': f'organization/user_{user_i}',
'user': f'user_{user_i}',
'organization': 'organization'
},
what='authorization:isUserAuthorized',
subject={
'id': 'res:bb185024/iptest',
'action': 'action'
},
where={
'ip': from_ip,
'port': '35246'
},
meta={
'result': True,
'dataset': 'test',
'expect': 'valid'
}
)
trail_insert(
when=(datetime.datetime(2018, 3, 1) +
datetime.timedelta(days=1) * user_i +
datetime.timedelta(hours=duration)),
who={
'id': f'organization/user_{user_i}',
'user': f'user_{user_i}',
'organization': 'organization'
},
what='authorization:isUserAuthorized',
subject={
'id': 'res:bb185024/iptest',
'action': 'action'
},
where={
'ip': to_ip,
'port': '35246'
},
meta={
'result': True,
'dataset': 'test',
'expect': 'valid' if valid else 'invalid'
}
)
| 4,631 | 0 | 23 |
de4cf2d04f8b6b65c62b52449a76f4089e5a7ae5 | 1,536 | py | Python | face_detection_mtcnn.py | zjxgithub/mtcnn-pytorch | b0a76c84fc2794e898aff3465bdeffd013616493 | [
"MIT"
] | null | null | null | face_detection_mtcnn.py | zjxgithub/mtcnn-pytorch | b0a76c84fc2794e898aff3465bdeffd013616493 | [
"MIT"
] | null | null | null | face_detection_mtcnn.py | zjxgithub/mtcnn-pytorch | b0a76c84fc2794e898aff3465bdeffd013616493 | [
"MIT"
] | null | null | null | from src import detect_faces, show_bboxes
from PIL import Image
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
img_path = '/net/deepfake-defense/datasets/CelebA/img/img_celeba/'
pert_path = '/net/deepfake-defense/datasets/CelebA/img/MTCNN_ifgsm/'
result_f = open('/home/zhujunxiao/protect_face_identity/face_image_protection/MTCNN/mtcnn-pytorch/results/MTCNN_detection_result_celeba.csv', 'w')
result_f.write('image, detected bounding box, detected boundingbox(after perturbation)\n')
for img_index in range(1, 2001):
img_filename = format(img_index, '06d') + '.jpg'
print(img_filename)
img = Image.open(img_path + img_filename)
img = img.resize((224, 224))
bounding_boxes, landmarks = detect_faces(img)
bounding_box_str = []
for box in bounding_boxes:
bounding_box_str.append(' '.join([str(x) for x in box]))
# for i in range(len(bounding_boxes)):
# result_f.write(img_filename + ',' + ' '.join([str(x) for x in bounding_boxes[i]]))
# result_f.write(',' + ' '.join([str(x) for x in landmarks[i]]) + '\n')
pert_img = pert_path + img_filename
img = Image.open(pert_img)
img = img.resize((224, 224))
pert_bounding_boxes, _ = detect_faces(img)
pert_bounding_box_str = []
for box in pert_bounding_boxes:
pert_bounding_box_str.append(' '.join([str(x) for x in box]))
result_f.write(img_filename + ',')
result_f.write(';'.join([x for x in bounding_box_str]) + ',')
result_f.write(';'.join([x for x in pert_bounding_box_str]) + '\n')
| 48 | 146 | 0.692057 | from src import detect_faces, show_bboxes
from PIL import Image
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
img_path = '/net/deepfake-defense/datasets/CelebA/img/img_celeba/'
pert_path = '/net/deepfake-defense/datasets/CelebA/img/MTCNN_ifgsm/'
result_f = open('/home/zhujunxiao/protect_face_identity/face_image_protection/MTCNN/mtcnn-pytorch/results/MTCNN_detection_result_celeba.csv', 'w')
result_f.write('image, detected bounding box, detected boundingbox(after perturbation)\n')
for img_index in range(1, 2001):
img_filename = format(img_index, '06d') + '.jpg'
print(img_filename)
img = Image.open(img_path + img_filename)
img = img.resize((224, 224))
bounding_boxes, landmarks = detect_faces(img)
bounding_box_str = []
for box in bounding_boxes:
bounding_box_str.append(' '.join([str(x) for x in box]))
# for i in range(len(bounding_boxes)):
# result_f.write(img_filename + ',' + ' '.join([str(x) for x in bounding_boxes[i]]))
# result_f.write(',' + ' '.join([str(x) for x in landmarks[i]]) + '\n')
pert_img = pert_path + img_filename
img = Image.open(pert_img)
img = img.resize((224, 224))
pert_bounding_boxes, _ = detect_faces(img)
pert_bounding_box_str = []
for box in pert_bounding_boxes:
pert_bounding_box_str.append(' '.join([str(x) for x in box]))
result_f.write(img_filename + ',')
result_f.write(';'.join([x for x in bounding_box_str]) + ',')
result_f.write(';'.join([x for x in pert_bounding_box_str]) + '\n')
| 0 | 0 | 0 |
6dafa7f39f8db7ea07fba93e25e731b3e174ed07 | 3,897 | py | Python | magcoords_v0.17.py | gregstarr/cartomap | 46f0917c4315dede1a12a663de80cdde0ae73393 | [
"MIT"
] | 5 | 2019-06-21T01:18:20.000Z | 2021-03-21T22:17:40.000Z | magcoords_v0.17.py | mrinalghosh/cartomap | 741c5916ad180b382dd1e60e5c8bb5168899c878 | [
"MIT"
] | 1 | 2019-06-10T13:05:18.000Z | 2019-06-10T13:05:18.000Z | magcoords_v0.17.py | mrinalghosh/cartomap | 741c5916ad180b382dd1e60e5c8bb5168899c878 | [
"MIT"
] | 4 | 2018-08-29T00:08:39.000Z | 2020-06-02T21:51:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 14:08:42 2019
@author: smrak
"""
import numpy as np
from cartomap import geogmap as gm
from datetime import datetime
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import apexpy as ap
latlim = [-0,60]
lonlim= [-140,0]
date = datetime(2017, 8, 21, 6)
fig = gm.plotCartoMap(projection='plate', title='Geomagnetic coordinates: MLAT/MLT',
latlim=latlim, lonlim=lonlim,
parallels = [0,10,20, 40, 60, 80, 90],
meridians = [-220, -180, -160,-140,-120,-100, -80,-60, -40, 0],
grid_linewidth=1,
figure=True,
states=False)
A = ap.Apex(date=date)
#glon = np.arange(lonlim[0]-40, lonlim[1] + 40.1, 1)
#glat = np.arange(latlim[0], latlim[1] + 0.1, 1)
#longrid, latgrid = np.meshgrid(glon, glat)
mlat_levels = np.arange(-90, 90.1, 10)
#mlat_levels = np.array([40,50,60,70])
# mlon
#mlat, mlon = A.convert(latgrid, longrid, 'geo', 'apex')
#mlon_levels = np.arange(-180,180,20)
# mlt
#mlat, mlon = A.convert(latgrid, longrid, 'geo', 'mlt', datetime=date)
mlon_levels = np.arange(0,24.2,2)
#ay = plt.contour(glon,glat, mlat, levels = mlat_levels, colors='red', transform=ccrs.PlateCarree())
#ax = plt.contour(glon,glat, mlon, levels = mlon_levels, colors='blue', linestyles ='solid', transform=ccrs.PlateCarree())
#ax.clabel(inline=True, fmt = '%d', fontsize=12, colors='blue')
#ay.clabel(inline=True, fmt = '%d', fontsize=12, colors='red')
# MLATS
mlat_range = np.arange(mlat_levels[0], mlat_levels[-1]+0.1, 0.1)
mlon_range = np.arange(mlon_levels[0], 24.3, 0.1)
for mlon in mlon_levels:
MLON = mlon * np.ones(mlat_range.size)
y, x = A.convert(mlat_range,MLON, 'mlt', 'geo', datetime=date)
if int(mlon) == 0:# or int(mlon) == 2:
continue
inmap = np.logical_and(x >= lonlim[0], x <= lonlim[1])
if np.sum(inmap) > 10:
plt.plot(np.unwrap(x,180), np.unwrap(y,90), 'b', lw=2, transform=ccrs.PlateCarree())
ix = abs(y-np.mean(latlim)).argmin()
mx = x[ix]-4
my = np.mean(latlim)
if np.logical_and(mx >= lonlim[0], mx <= lonlim[1]) and int(mlon) is not 0:
plt.text(mx, my, str(int(mlon)), color='k',
fontsize=14, backgroundcolor='white',transform=ccrs.PlateCarree())
for mlat in mlat_levels:
MLAT = mlat * np.ones(mlon_range.size)
gy,gx = A.convert(MLAT, mlon_range, 'mlt', 'geo', datetime=date)
inmap = np.logical_and(gy >= latlim[0], gy <= latlim[1])
if np.sum(inmap) > 10:
plt.plot(np.unwrap(gx, 180), np.unwrap(gy, 90), 'b', transform=ccrs.PlateCarree())
ix = abs(gx-np.mean(lonlim)).argmin()
mx = np.mean(lonlim)
my = gy[ix]-0.5
if np.logical_and(mx >= lonlim[0], mx <= lonlim[1]) and \
np.logical_and(my >= latlim[0], my <= latlim[1]):
ix = abs(gx-np.mean(lonlim)).argmin()
plt.text(mx, my, str(int(mlat)), color='k',
fontsize=14, backgroundcolor='white',transform=ccrs.PlateCarree())
#Functional
fig = gm.plotCartoMap(projection='plate',
title='Geomagnetic coordinates: MLAT/MLT',
latlim=latlim, lonlim=lonlim,
date=date,
#parallels = [0,10,20, 40, 60, 80, 90],
#meridians = [-220, -180, -160,-140,-120,-100, -80,-60, -40, 0],
grid_linewidth = 1,
figure = True,
states = False,
geomag = True,
gmagtype = 'apex',
mlon_cs = 'mlt',
mlon_levels = mlon_levels,
mlat_levels = mlat_levels,
mlon_colors='k',
mlat_colors='k',
mlat_labels=False) | 40.175258 | 123 | 0.561714 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 14:08:42 2019
@author: smrak
"""
import numpy as np
from cartomap import geogmap as gm
from datetime import datetime
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import apexpy as ap
latlim = [-0,60]
lonlim= [-140,0]
date = datetime(2017, 8, 21, 6)
fig = gm.plotCartoMap(projection='plate', title='Geomagnetic coordinates: MLAT/MLT',
latlim=latlim, lonlim=lonlim,
parallels = [0,10,20, 40, 60, 80, 90],
meridians = [-220, -180, -160,-140,-120,-100, -80,-60, -40, 0],
grid_linewidth=1,
figure=True,
states=False)
A = ap.Apex(date=date)
#glon = np.arange(lonlim[0]-40, lonlim[1] + 40.1, 1)
#glat = np.arange(latlim[0], latlim[1] + 0.1, 1)
#longrid, latgrid = np.meshgrid(glon, glat)
mlat_levels = np.arange(-90, 90.1, 10)
#mlat_levels = np.array([40,50,60,70])
# mlon
#mlat, mlon = A.convert(latgrid, longrid, 'geo', 'apex')
#mlon_levels = np.arange(-180,180,20)
# mlt
#mlat, mlon = A.convert(latgrid, longrid, 'geo', 'mlt', datetime=date)
mlon_levels = np.arange(0,24.2,2)
#ay = plt.contour(glon,glat, mlat, levels = mlat_levels, colors='red', transform=ccrs.PlateCarree())
#ax = plt.contour(glon,glat, mlon, levels = mlon_levels, colors='blue', linestyles ='solid', transform=ccrs.PlateCarree())
#ax.clabel(inline=True, fmt = '%d', fontsize=12, colors='blue')
#ay.clabel(inline=True, fmt = '%d', fontsize=12, colors='red')
# MLATS
mlat_range = np.arange(mlat_levels[0], mlat_levels[-1]+0.1, 0.1)
mlon_range = np.arange(mlon_levels[0], 24.3, 0.1)
for mlon in mlon_levels:
MLON = mlon * np.ones(mlat_range.size)
y, x = A.convert(mlat_range,MLON, 'mlt', 'geo', datetime=date)
if int(mlon) == 0:# or int(mlon) == 2:
continue
inmap = np.logical_and(x >= lonlim[0], x <= lonlim[1])
if np.sum(inmap) > 10:
plt.plot(np.unwrap(x,180), np.unwrap(y,90), 'b', lw=2, transform=ccrs.PlateCarree())
ix = abs(y-np.mean(latlim)).argmin()
mx = x[ix]-4
my = np.mean(latlim)
if np.logical_and(mx >= lonlim[0], mx <= lonlim[1]) and int(mlon) is not 0:
plt.text(mx, my, str(int(mlon)), color='k',
fontsize=14, backgroundcolor='white',transform=ccrs.PlateCarree())
for mlat in mlat_levels:
MLAT = mlat * np.ones(mlon_range.size)
gy,gx = A.convert(MLAT, mlon_range, 'mlt', 'geo', datetime=date)
inmap = np.logical_and(gy >= latlim[0], gy <= latlim[1])
if np.sum(inmap) > 10:
plt.plot(np.unwrap(gx, 180), np.unwrap(gy, 90), 'b', transform=ccrs.PlateCarree())
ix = abs(gx-np.mean(lonlim)).argmin()
mx = np.mean(lonlim)
my = gy[ix]-0.5
if np.logical_and(mx >= lonlim[0], mx <= lonlim[1]) and \
np.logical_and(my >= latlim[0], my <= latlim[1]):
ix = abs(gx-np.mean(lonlim)).argmin()
plt.text(mx, my, str(int(mlat)), color='k',
fontsize=14, backgroundcolor='white',transform=ccrs.PlateCarree())
#Functional
fig = gm.plotCartoMap(projection='plate',
title='Geomagnetic coordinates: MLAT/MLT',
latlim=latlim, lonlim=lonlim,
date=date,
#parallels = [0,10,20, 40, 60, 80, 90],
#meridians = [-220, -180, -160,-140,-120,-100, -80,-60, -40, 0],
grid_linewidth = 1,
figure = True,
states = False,
geomag = True,
gmagtype = 'apex',
mlon_cs = 'mlt',
mlon_levels = mlon_levels,
mlat_levels = mlat_levels,
mlon_colors='k',
mlat_colors='k',
mlat_labels=False) | 0 | 0 | 0 |
90b32c2a68a416c3472e5dabbfb1919af076c658 | 4,194 | py | Python | agent/agents.py | diegcr/2D-Motion-Retargeting | 2b4acedb45a281d2867c812fce6063dc68b8e88b | [
"MIT"
] | 2 | 2019-08-20T18:31:44.000Z | 2019-08-20T18:39:04.000Z | agent/agents.py | diegcr/2D-Motion-Retargeting | 2b4acedb45a281d2867c812fce6063dc68b8e88b | [
"MIT"
] | null | null | null | agent/agents.py | diegcr/2D-Motion-Retargeting | 2b4acedb45a281d2867c812fce6063dc68b8e88b | [
"MIT"
] | null | null | null | from agent.base_agent import BaseAgent
from functional.motion import get_foot_vel
import torch
| 47.123596 | 114 | 0.596328 | from agent.base_agent import BaseAgent
from functional.motion import get_foot_vel
import torch
class Agent2x(BaseAgent):
def __init__(self, config, net):
super(Agent2x, self).__init__(config, net)
self.inputs_name = ['input1', 'input2', 'input12', 'input21']
self.targets_name = ['target1', 'target2', 'target12', 'target21']
def forward(self, data):
inputs = [data[name].to(self.device) for name in self.inputs_name]
targets = [data[name].to(self.device) for name in self.targets_name]
# update loss metric
losses = {}
if self.use_triplet:
outputs, motionvecs, staticvecs = self.net.cross_with_triplet(*inputs)
losses['m_tpl1'] = self.triplet_weight * self.tripletloss(motionvecs[2], motionvecs[0], motionvecs[1])
losses['m_tpl2'] = self.triplet_weight * self.tripletloss(motionvecs[3], motionvecs[1], motionvecs[0])
losses['b_tpl1'] = self.triplet_weight * self.tripletloss(staticvecs[2], staticvecs[0], staticvecs[1])
losses['b_tpl2'] = self.triplet_weight * self.tripletloss(staticvecs[3], staticvecs[1], staticvecs[0])
else:
outputs = self.net.cross(inputs[0], inputs[1])
for i, target in enumerate(targets):
losses['rec' + self.targets_name[i][6:]] = self.mse(outputs[i], target)
if self.use_footvel_loss:
losses['foot_vel'] = 0
for i, target in enumerate(targets):
losses['foot_vel'] += self.footvel_loss_weight * self.mse(get_foot_vel(outputs[i], self.foot_idx),
get_foot_vel(target, self.foot_idx))
outputs_dict = {
"output1": outputs[0],
"output2": outputs[1],
"output12": outputs[2],
"output21": outputs[3],
}
return outputs_dict, losses
class Agent3x(BaseAgent):
def __init__(self, config, net):
super(Agent3x, self).__init__(config, net)
if self.use_triplet:
self.inputs_name = ['input1', 'input2', 'input121', 'input112',
'input122', 'input212', 'input221', 'input211']
else:
self.inputs_name = ['input1', 'input2']
self.targets_name = ['target111', 'target222', 'target121', 'target112',
'target122', 'target212', 'target221', 'target211']
def forward(self, data):
inputs = [data[name].to(self.device) for name in self.inputs_name]
targets = [data[name].to(self.device) for name in self.targets_name]
# update loss metric
losses = {}
if self.use_triplet:
outputs, motionvecs, bodyvecs, viewvecs = self.net.cross_with_triplet(inputs)
losses['m_tpl1'] = self.triplet_weight * self.tripletloss(motionvecs[2], motionvecs[0], motionvecs[1])
losses['m_tpl2'] = self.triplet_weight * self.tripletloss(motionvecs[3], motionvecs[1], motionvecs[0])
losses['b_tpl1'] = self.triplet_weight * self.tripletloss(bodyvecs[2], bodyvecs[0], bodyvecs[1])
losses['b_tpl2'] = self.triplet_weight * self.tripletloss(bodyvecs[3], bodyvecs[1], bodyvecs[0])
losses['v_tpl1'] = self.triplet_weight * self.tripletloss(viewvecs[2], viewvecs[0], viewvecs[1])
losses['v_tpl2'] = self.triplet_weight * self.tripletloss(viewvecs[3], viewvecs[1], viewvecs[0])
else:
outputs = self.net.cross(inputs[0], inputs[1])
for i, target in enumerate(targets):
losses['rec' + self.targets_name[i][6:]] = self.mse(outputs[i], target)
if self.use_footvel_loss:
losses['foot_vel'] = 0
for i, target in enumerate(targets):
losses['foot_vel'] += self.footvel_loss_weight * self.mse(get_foot_vel(outputs[i], self.foot_idx),
get_foot_vel(target, self.foot_idx))
outputs_dict = {}
for i, name in enumerate(self.targets_name):
outputs_dict['output' + name[6:]] = outputs[i]
return outputs_dict, losses
| 3,937 | 8 | 152 |
f5e992e91eca6adf82b3dad2c408676155551138 | 1,136 | py | Python | visan/examples/SCI_NL__1P_spectral_readout.py | ercumentaksoy/visan | 57c9257d80622fc0ab03591db48cc2155bd12f1b | [
"MIT",
"BSD-3-Clause"
] | 7 | 2020-04-09T05:21:03.000Z | 2022-01-23T18:39:02.000Z | visan/examples/SCI_NL__1P_spectral_readout.py | ercumentaksoy/visan | 57c9257d80622fc0ab03591db48cc2155bd12f1b | [
"MIT",
"BSD-3-Clause"
] | 7 | 2020-01-05T19:19:20.000Z | 2020-05-27T09:41:49.000Z | visan/examples/SCI_NL__1P_spectral_readout.py | ercumentaksoy/visan | 57c9257d80622fc0ab03591db48cc2155bd12f1b | [
"MIT",
"BSD-3-Clause"
] | 4 | 2020-04-18T14:11:22.000Z | 2021-11-10T02:27:49.000Z | # This is an example VISAN script for the SCI_NL__1P product
# Make sure to set the 'products-file directory' option in the VISAN Preferences panel to
# a directory containing SCI_NL__1P products.
# This example will then take the first product it finds in this directory and
# for that product plot the measured limb spectra for the range 290nm - 450nm
run()
| 34.424242 | 98 | 0.712148 | # This is an example VISAN script for the SCI_NL__1P product
# Make sure to set the 'products-file directory' option in the VISAN Preferences panel to
# a directory containing SCI_NL__1P products.
# This example will then take the first product it finds in this directory and
# for that product plot the measured limb spectra for the range 290nm - 450nm
def run():
import glob
import wx
productdir = str(wx.Config.Get().Read('DirectoryLocation/Products'))
# Use glob to find all files in productdir starting with 'SCI_NL__1P' and ending with '.child'
files = glob.glob(os.path.join(productdir, "SCI_NL__1P*.child"))
if len(files) == 0:
print(("Could not find any SCI_NLC_1P files in directory '" + productdir + "'"))
return
# We only ingest the first file from the list
record = harp.import_product(files[0], "wavelength>=290;wavelength<=450", "data=limb")
plot(record, showpropertypanel=True, name=os.path.basename(files[0]),
title="SCI_NLC_1P spectral readout example (limb: 290-450nm)")
wplot(record, colortable='RedToGreen', projection="Mollweide")
run()
| 748 | 0 | 23 |
aacb1e500ae26ed3efe067878478f2b7a74cc7a0 | 154 | py | Python | wsgi_multi/wsgi.py | ned2/dash-embed-recipes | ff495afd0e1293125957c86cac7b3953521d4895 | [
"MIT"
] | 1 | 2019-01-21T12:38:27.000Z | 2019-01-21T12:38:27.000Z | wsgi_multi/wsgi.py | ned2/dash-embed-recipes | ff495afd0e1293125957c86cac7b3953521d4895 | [
"MIT"
] | null | null | null | wsgi_multi/wsgi.py | ned2/dash-embed-recipes | ff495afd0e1293125957c86cac7b3953521d4895 | [
"MIT"
] | null | null | null | from server import server
from app1 import app as app1
from app2 import app as app2
app1.enable_dev_tools(debug=True)
app2.enable_dev_tools(debug=True)
| 19.25 | 33 | 0.818182 | from server import server
from app1 import app as app1
from app2 import app as app2
app1.enable_dev_tools(debug=True)
app2.enable_dev_tools(debug=True)
| 0 | 0 | 0 |
06d9344638a0e4528c9018bd45590f8835132b7c | 1,688 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/docker/plugins/module_utils/socket_helper.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 7 | 2021-11-16T04:05:42.000Z | 2022-02-19T21:14:29.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/docker/plugins/module_utils/socket_helper.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/docker/plugins/module_utils/socket_helper.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2022-03-01T05:43:07.000Z | 2022-03-01T05:43:07.000Z | # Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import os
import os.path
import socket as pysocket
from ansible.module_utils.six import PY3
| 31.259259 | 108 | 0.677133 | # Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import os
import os.path
import socket as pysocket
from ansible.module_utils.six import PY3
def make_unblocking(sock):
if hasattr(sock, '_sock'):
sock._sock.setblocking(0)
elif hasattr(sock, 'setblocking'):
sock.setblocking(0)
else:
fcntl.fcntl(sock.fileno(), fcntl.F_SETFL, fcntl.fcntl(sock.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
def _empty_writer(msg):
pass
def shutdown_writing(sock, log=_empty_writer):
if hasattr(sock, 'shutdown_write'):
sock.shutdown_write()
elif hasattr(sock, 'shutdown'):
try:
sock.shutdown(pysocket.SHUT_WR)
except TypeError as e:
# probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
log('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e))
sock.shutdown()
elif PY3 and isinstance(sock, getattr(pysocket, 'SocketIO')):
sock._sock.shutdown(pysocket.SHUT_WR)
else:
log('No idea how to signal end of writing')
def write_to_socket(sock, data):
if hasattr(sock, '_send_until_done'):
# WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but
# only `sendall`, which uses `_send_until_done` under the hood.
return sock._send_until_done(data)
elif hasattr(sock, 'send'):
return sock.send(data)
else:
return os.write(sock.fileno(), data)
| 1,242 | 0 | 92 |
392aa847b676c6ae4c0ce9254f3f9d819e5bb6ac | 7,438 | py | Python | scripts/match-pr-to-feedstocks.py | ax3l/conda-forge.github.io | 2086f087d3b2875c4493e38c2f71b1ccc5304ffd | [
"BSD-3-Clause"
] | null | null | null | scripts/match-pr-to-feedstocks.py | ax3l/conda-forge.github.io | 2086f087d3b2875c4493e38c2f71b1ccc5304ffd | [
"BSD-3-Clause"
] | null | null | null | scripts/match-pr-to-feedstocks.py | ax3l/conda-forge.github.io | 2086f087d3b2875c4493e38c2f71b1ccc5304ffd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env conda-execute
# conda execute
# env:
# - python
# - click
# - jinja2
# - requests
# - ruamel.yaml
# - conda-smithy
# - pygithub
# - fuzzywuzzy
# channels:
# - conda-forge
# run_with: python
import click
import conda_smithy.feedstocks as feedstocks
import jinja2
import json
import requests
import ruamel.yaml
from ruamel.yaml.scanner import ScannerError
import os
from github import Github
import conda_smithy.github as smithy_github
from fuzzywuzzy import process
# patch over differences between PY2 and PY3
try:
text_type = unicode
except NameError:
text_type = str
env = jinja2.Environment(undefined=NullUndefined)
@click.group()
def cli():
"""Match package names in pr against existing feedstocks.
Tools to match package names in from all the recipes in a pr against
the existing conda-forge feedstocks.
"""
pass
@cli.command('build-feedstock-index', help='create json index of feedstocks.')
@click.argument('filename')
@click.option('--gh-org', default='conda-forge', help='Set Github organization name.')
def build_feedstock_index(filename, gh_org='conda-forge'):
"Iterate over feedstocks and return dict of pkg-name:feedstock"
pkg_index = {}
for repo in feedstocks.feedstock_repos(gh_org):
try:
meta = repo.get_file_contents(path='recipe/meta.yaml').decoded_content
pkg_name = _extract_package_name(meta)
except (AttributeError, KeyError, ScannerError) as err:
# unable to parse the bob.io.image-feedstock
print('Unable to parse meta.yaml for {}'.format(repo.url))
print('guessing pkg name from feedstock url')
print('Traceback: \n', err)
pkg_name = repo.url.split('/')[-1].split('-feedstock')[0].lower()
pkg_index[pkg_name] = repo.full_name
with open(filename, 'w') as f:
json.dump(pkg_index, f)
print('feedstocks index written to {}'.format(filename))
@cli.command('build-pr-index', help='create json index of pull requests.')
@click.argument('filename')
@click.option('--gh-org', default='conda-forge', help='Set Github organization name.')
@click.option('--staged-recipes-repo', default='staged-recipes', help='Set staged recipe repo.')
def build_pr_index(filename, gh_org='conda-forge', staged_recipes_repo='staged-recipes'):
"Iterate over open pull requests in staged_recipes and return dict of pr:pkg-name"
token = smithy_github.gh_token()
gh = Github(token)
org = gh.get_organization(gh_org)
repo = org.get_repo(staged_recipes_repo)
pkg_index = {}
for pr in list(repo.get_pulls()):
for f in pr.get_files():
if f.filename.lower().endswith('meta.yaml'):
try:
meta = requests.get(f.raw_url).content
pkg_name = _extract_package_name(meta)
idx = 'pr {} ({}) /{}'.format(pr.number, pkg_name, f.filename)
pkg_index[idx] = pkg_name
except (AttributeError, ScannerError) as err:
pkg_index[idx] = None
print('Unable to parse meta.yaml for pr #{}'.format(pr.number))
print('setting pkg_name to None')
print('Traceback: \n', err)
with open(filename, 'w') as f:
json.dump(pkg_index, f)
print('pull requests index written to {}'.format(filename))
@cli.command('compare-indices', help='compare pr index to feedstock index.')
@click.argument('pr-index')
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
@cli.command('check-pr', help='check pr against feedstock index.')
@click.argument('pr', type=int)
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
@click.option('--gh-org', default='conda-forge', help='Set Github organization name.')
@click.option('--staged-recipes-repo', default='staged-recipes', help='Set staged recipe repo.')
@cli.command('check-pkg', help='check pkg name against feedstock index.')
@click.argument('name')
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
def _extract_package_name(meta):
"""Extract package name from meta.yaml"""
content = env.from_string(meta.decode('utf8')).render(os=os)
meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
return meta['package']['name'].lower()
if __name__ == '__main__':
cli()
| 36.106796 | 96 | 0.654477 | #!/usr/bin/env conda-execute
# conda execute
# env:
# - python
# - click
# - jinja2
# - requests
# - ruamel.yaml
# - conda-smithy
# - pygithub
# - fuzzywuzzy
# channels:
# - conda-forge
# run_with: python
import click
import conda_smithy.feedstocks as feedstocks
import jinja2
import json
import requests
import ruamel.yaml
from ruamel.yaml.scanner import ScannerError
import os
from github import Github
import conda_smithy.github as smithy_github
from fuzzywuzzy import process
# patch over differences between PY2 and PY3
try:
text_type = unicode
except NameError:
text_type = str
class NullUndefined(jinja2.Undefined):
def __unicode__(self):
return text_type(self._undefined_name)
def __getattr__(self, name):
return text_type('{}.{}'.format(self, name))
def __getitem__(self, name):
return '{}["{}"]'.format(self, name)
env = jinja2.Environment(undefined=NullUndefined)
@click.group()
def cli():
"""Match package names in pr against existing feedstocks.
Tools to match package names in from all the recipes in a pr against
the existing conda-forge feedstocks.
"""
pass
@cli.command('build-feedstock-index', help='create json index of feedstocks.')
@click.argument('filename')
@click.option('--gh-org', default='conda-forge', help='Set Github organization name.')
def build_feedstock_index(filename, gh_org='conda-forge'):
"Iterate over feedstocks and return dict of pkg-name:feedstock"
pkg_index = {}
for repo in feedstocks.feedstock_repos(gh_org):
try:
meta = repo.get_file_contents(path='recipe/meta.yaml').decoded_content
pkg_name = _extract_package_name(meta)
except (AttributeError, KeyError, ScannerError) as err:
# unable to parse the bob.io.image-feedstock
print('Unable to parse meta.yaml for {}'.format(repo.url))
print('guessing pkg name from feedstock url')
print('Traceback: \n', err)
pkg_name = repo.url.split('/')[-1].split('-feedstock')[0].lower()
pkg_index[pkg_name] = repo.full_name
with open(filename, 'w') as f:
json.dump(pkg_index, f)
print('feedstocks index written to {}'.format(filename))
@cli.command('build-pr-index', help='create json index of pull requests.')
@click.argument('filename')
@click.option('--gh-org', default='conda-forge', help='Set Github organization name.')
@click.option('--staged-recipes-repo', default='staged-recipes', help='Set staged recipe repo.')
def build_pr_index(filename, gh_org='conda-forge', staged_recipes_repo='staged-recipes'):
"Iterate over open pull requests in staged_recipes and return dict of pr:pkg-name"
token = smithy_github.gh_token()
gh = Github(token)
org = gh.get_organization(gh_org)
repo = org.get_repo(staged_recipes_repo)
pkg_index = {}
for pr in list(repo.get_pulls()):
for f in pr.get_files():
if f.filename.lower().endswith('meta.yaml'):
try:
meta = requests.get(f.raw_url).content
pkg_name = _extract_package_name(meta)
idx = 'pr {} ({}) /{}'.format(pr.number, pkg_name, f.filename)
pkg_index[idx] = pkg_name
except (AttributeError, ScannerError) as err:
pkg_index[idx] = None
print('Unable to parse meta.yaml for pr #{}'.format(pr.number))
print('setting pkg_name to None')
print('Traceback: \n', err)
with open(filename, 'w') as f:
json.dump(pkg_index, f)
print('pull requests index written to {}'.format(filename))
@cli.command('compare-indices', help='compare pr index to feedstock index.')
@click.argument('pr-index')
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
def compare_indices(pr_index, feedstock_index, threshold, limit):
pr_index = json.load(open(pr_index))
feedstock_index = json.load(open(feedstock_index))
matches = {}
for pr, name in list(pr_index.items()):
m = _fuzzy_match(name, feedstock_index, threshold=threshold, limit=limit)
if len(m) > 0:
matches[pr] = m
_format_output(matches, threshold, limit)
@cli.command('check-pr', help='check pr against feedstock index.')
@click.argument('pr', type=int)
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
@click.option('--gh-org', default='conda-forge', help='Set Github organization name.')
@click.option('--staged-recipes-repo', default='staged-recipes', help='Set staged recipe repo.')
def check_pr(pr, feedstock_index, threshold, limit, gh_org, staged_recipes_repo):
feedstock_index = json.load(open(feedstock_index))
token = smithy_github.gh_token()
gh = Github(token)
org = gh.get_organization(gh_org)
repo = org.get_repo(staged_recipes_repo)
pr = repo.get_pull(pr)
packages = {}
for f in pr.get_files():
if f.filename.lower().endswith('meta.yaml'):
try:
meta = requests.get(f.raw_url).content
pkg_name = _extract_package_name(meta)
idx = 'pr {} ({}) /{}'.format(pr.number, pkg_name, f.filename)
packages[idx] = pkg_name
except AttributeError:
packages[idx] = None
matches = {}
for k, pkg_name in packages.items():
matches[k] = _fuzzy_match(pkg_name, feedstock_index, threshold, limit)
_format_output(matches, threshold, limit)
@cli.command('check-pkg', help='check pkg name against feedstock index.')
@click.argument('name')
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
def check_pkg(name, feedstock_index, threshold, limit):
feedstock_index = json.load(open(feedstock_index))
matches = _fuzzy_match(name, feedstock_index, threshold, limit)
_format_output({name: matches}, threshold, limit)
def _format_output(matches, threshold, limit):
vals = (threshold, limit)
print('-------------------------------------------')
print('match(es) found using threshold={}, limit={}'.format(*vals))
print('-------------------------------------------')
for k, repo in sorted(matches.items()):
for recipe in repo:
if len(recipe) > 0:
print('{} matches --> pkg={}, score={}, feedstock={}'.format(k, *recipe))
def _fuzzy_match(name, feedstock_index, threshold, limit):
choices = list(feedstock_index.keys())
matches = process.extract(name, choices, limit=limit)
result = []
for match in matches:
pkg, score = match
if score >= threshold:
result.append((pkg, score, feedstock_index[pkg]))
return result
def _extract_package_name(meta):
"""Extract package name from meta.yaml"""
content = env.from_string(meta.decode('utf8')).render(os=os)
meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
return meta['package']['name'].lower()
if __name__ == '__main__':
cli()
| 2,394 | 17 | 215 |
e5e0d12a9e36fc5f92b96a631ff6de1286ac2249 | 1,566 | py | Python | application/mod_collage/col_controllers.py | hieusydo/Voyage | 2a98118131fad927326d318ae1766e64bbb5add8 | [
"MIT"
] | 1 | 2018-04-23T05:16:49.000Z | 2018-04-23T05:16:49.000Z | application/mod_collage/col_controllers.py | hieusydo/Voyage | 2a98118131fad927326d318ae1766e64bbb5add8 | [
"MIT"
] | null | null | null | application/mod_collage/col_controllers.py | hieusydo/Voyage | 2a98118131fad927326d318ae1766e64bbb5add8 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, session, redirect, url_for
from flask_wtf import FlaskForm
from wtforms import SelectField
from application.mod_collage.photoManip import generateCollage
from application.mod_auth.models import Landmark
mod_collage = Blueprint('collage', __name__, url_prefix='/collage')
# Represents the collage form
@mod_collage.route('/get/', methods=['GET', 'POST']) | 32.625 | 72 | 0.716475 | from flask import Blueprint, render_template, session, redirect, url_for
from flask_wtf import FlaskForm
from wtforms import SelectField
from application.mod_collage.photoManip import generateCollage
from application.mod_auth.models import Landmark
mod_collage = Blueprint('collage', __name__, url_prefix='/collage')
# Represents the collage form
class AddColForm(FlaskForm):
landmark1 = SelectField("Landmark 1")
landmark2 = SelectField("Landmark 2")
# Allows setting the 'choices' field after creation
def setChoices(self, landmarks):
self.landmark1.choices = landmarks
self.landmark2.choices = landmarks
@mod_collage.route('/get/', methods=['GET', 'POST'])
def picTest():
if 'user_id' not in session:
return redirect(url_for('auth.signin'))
# Get landmarks by id
uid = session['user_id']
landmarks = Landmark.query.filter_by(usrID=uid).all()
landmarks.sort(key=lambda x: x.lmName)
print "picTest", landmarks
# Create a list of value,display tuples from the landmarks
choices = []
for i in landmarks:
choices.append((i.photoFileURL, i.lmName))
# Create and set the form choices
form = AddColForm()
form.setChoices(choices)
if form.validate_on_submit():
print "picTest about to generateCollage..."
url = generateCollage(form.landmark1.data, form.landmark2.data)
print "picTest done generateCollage"
return render_template('collage/result.html', image_url=url)
return render_template('collage/request.html', form=form) | 945 | 174 | 44 |
0af5e91130c6574093b9682b307fb1019139a663 | 2,201 | py | Python | siteblog/blog/migrations/0001_initial.py | vladislavnet/siteblog | f8e0b139c974a78d5de17671768c34d214c025fe | [
"Unlicense"
] | null | null | null | siteblog/blog/migrations/0001_initial.py | vladislavnet/siteblog | f8e0b139c974a78d5de17671768c34d214c025fe | [
"Unlicense"
] | null | null | null | siteblog/blog/migrations/0001_initial.py | vladislavnet/siteblog | f8e0b139c974a78d5de17671768c34d214c025fe | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-05 12:46
from django.db import migrations, models
import django.db.models.deletion
| 39.303571 | 135 | 0.545207 | # Generated by Django 3.1.6 on 2021-02-05 12:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255, unique=True, verbose_name='Url')),
],
options={
'ordering': ['title'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True, verbose_name='Url')),
],
options={
'ordering': ['title'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True, verbose_name='Url')),
('author', models.CharField(max_length=100)),
('content', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Опубликовано')),
('photo', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('views', models.IntegerField(default=0, verbose_name='Кол-во просмотров')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='posts', to='blog.category')),
('tags', models.ManyToManyField(blank=True, related_name='posts', to='blog.Tag')),
],
options={
'ordering': ['-created_at'],
},
),
]
| 0 | 2,081 | 23 |
b2b8567fa0354d9e623bba8b14d8a1f37a000693 | 7,750 | py | Python | autoreload/autoreload.py | jarret/plugins | 65304d4baf3d6a0254148be0fd851789c152d8d3 | [
"BSD-3-Clause"
] | null | null | null | autoreload/autoreload.py | jarret/plugins | 65304d4baf3d6a0254148be0fd851789c152d8d3 | [
"BSD-3-Clause"
] | null | null | null | autoreload/autoreload.py | jarret/plugins | 65304d4baf3d6a0254148be0fd851789c152d8d3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from lightning import Plugin
import json
import psutil
import subprocess
import threading
import time
import os
try:
# C-lightning v0.7.2
plugin = Plugin(dynamic=False)
except:
plugin = Plugin()
@plugin.init()
def inject_manifest(plugin, manifest):
"""Once we have the manifest from the child plugin, inject it into our own.
"""
for opt in manifest.get("options", []):
plugin.add_option(opt['name'], opt['default'], opt['description'])
for m in manifest.get("rpcmethods", []):
plugin.add_method(m['name'], plugin.child.proxy_method, background=True)
for s in manifest.get("subscriptions", []):
plugin.add_subscription(s, plugin.child.proxy_subscription)
for h in manifest.get("hooks", []):
plugin.add_hook(h, plugin.child.proxy_method, background=True)
@plugin.method('autoreload-restart')
def restart(plugin):
"""Manually triggers a restart of the plugin controlled by autoreload.
"""
child = plugin.child
child.restart()
# We can't rely on @plugin.init to tell us the plugin we need to watch and
# reload since we need to start it to pass through its manifest before we get
# any cli options. So we're doomed to get our parent cmdline and parse out the
# argument by hand.
parent = psutil.Process().parent()
cmdline = parent.cmdline()
plugin.path = None
prefix = '--autoreload-plugin='
for c in cmdline:
if c.startswith(prefix):
plugin.path = c[len(prefix):]
break
if plugin.path:
plugin.child = ChildPlugin(plugin.path, plugin)
# If we can't start on the first attempt we can't inject into the
# manifest, no point in continuing.
if not plugin.child.start():
raise Exception("Could not start the plugin under development, can't continue")
inject_manifest(plugin, plugin.child.manifest)
# Now we can run the actual plugin
plugin.add_option("autoreload-plugin", None, "Path to the plugin that we should be watching and reloading.")
plugin.run()
| 31.25 | 117 | 0.572645 | #!/usr/bin/env python3
from lightning import Plugin
import json
import psutil
import subprocess
import threading
import time
import os
try:
# C-lightning v0.7.2
plugin = Plugin(dynamic=False)
except:
plugin = Plugin()
class ChildPlugin(object):
def __init__(self, path, plugin):
self.path = path
self.plugin = plugin
self.status = 'stopped'
self.proc = None
self.iolock = threading.Lock()
self.decoder = json.JSONDecoder()
self.manifest = None
self.init = None
self.reader = None
def watch(self):
last = os.path.getmtime(self.path)
while True:
time.sleep(1)
now = os.path.getmtime(self.path)
if last != now:
print("Detected a change in the child plugin, restarting...")
last = now
try:
self.restart()
except:
self.plugin.log("Failed to start plugin, will wait for next change and try again.", level='warn')
def handle_init(self, request):
"""Lightningd has sent us its first init message, clean and forward.
"""
params = request.params.copy()
# These may have been added by the plugin framework and we won't be
# able to serialize them when forwarding, so delete them.
for key in ['plugin', 'request']:
if key in params:
del params[key]
self.init = {
'jsonrpc': '2.0',
'method': request.method,
'params': params,
'id': request.id,
}
print("Forwarding", self.init)
# Now remove any options that we registered on behalf of the child
# plugin. It'd not understand them if we forward them.
opts = self.init['params']['options']
self.init['params']['options'] = {k: v for k, v in opts.items() if not k.startswith('autoreload')}
plugin.child.send(self.init)
print("Sent init to child plugin")
plugin.child.passthru()
def _readobj(self, sock):
buff=b''
while True:
try:
b = sock.readline()
buff += b
if len(b) == 0:
return None
if b'}\n' not in buff:
continue
# Convert late to UTF-8 so glyphs split across recvs do not
# impact us
buff = buff.decode("UTF-8")
objs, len_used = self.decoder.raw_decode(buff)
buff = buff[len_used:].lstrip().encode("UTF-8")
return objs
except ValueError:
# Probably didn't read enough
buff = buff.lstrip().encode("UTF-8")
def start(self):
assert(self.status == 'stopped')
try:
self.proc = subprocess.Popen([self.path], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
self.status = 'started'
self.getmanifest()
return True
except Exception as e:
self.plugin.log(e, level='warn')
return False
def stop(self):
assert(self.status == 'started')
self.proc.kill()
self.proc.wait()
reader = self.reader
if reader:
reader.join()
self.status = 'stopped'
def restart(self):
print('Restarting child plugin')
self.stop()
self.start()
plugin.child.send(self.init)
print("Sent init to child plugin")
plugin.child.passthru()
def getmanifest(self):
assert(self.status == 'started')
self.send({'jsonrpc': '2.0', 'id': 0, 'method': 'getmanifest', 'params': []})
while True:
msg = self._readobj(self.proc.stdout)
if msg is None:
print("Child plugin does not seem to be sending valid JSON: {}".format(buff.strip()))
self.stop()
raise ValueError()
if 'id' in msg and msg['id'] == 0:
self.manifest = msg['result']
break
self.plugin._write_locked(msg)
return self.manifest
def passthru(self):
# First read the init reply, and then we can switch to passthru
while True:
msg = self._readobj(self.proc.stdout)
if 'id' in msg and msg['id'] == self.init['id']:
break
self.plugin._write_locked(msg)
def read_loop():
while True:
line = self.proc.stdout.readline()
if line == b'':
break
self.plugin.stdout.buffer.write(line)
self.plugin.stdout.flush()
self.reader = None
print("Child plugin exited")
self.reader = threading.Thread(target=read_loop)
self.reader.daemon = True
self.reader.start()
def send(self, msg):
self.proc.stdin.write(json.dumps(msg).encode('UTF-8'))
self.proc.stdin.write(b'\n\n')
self.proc.stdin.flush()
def proxy_method(self, request, *args, **kwargs):
raw = {
'jsonrpc': '2.0',
'method': request.method,
'params': request.params,
'id': request.id,
}
self.send(raw)
def proxy_subscription(self, request, *args, **kwargs):
raw = {
'jsonrpc': '2.0',
'method': request.method,
'params': request.params,
}
self.send(raw)
@plugin.init()
def init(options, configuration, plugin, request):
if options['autoreload-plugin'] in ['null', None]:
print("Cannot run the autoreload plugin on its own, please specify --autoreload-plugin")
plugin.rpc.stop()
return
watch_thread = threading.Thread(target=plugin.child.watch)
watch_thread.daemon = True
watch_thread.start()
plugin.child.handle_init(request)
def inject_manifest(plugin, manifest):
"""Once we have the manifest from the child plugin, inject it into our own.
"""
for opt in manifest.get("options", []):
plugin.add_option(opt['name'], opt['default'], opt['description'])
for m in manifest.get("rpcmethods", []):
plugin.add_method(m['name'], plugin.child.proxy_method, background=True)
for s in manifest.get("subscriptions", []):
plugin.add_subscription(s, plugin.child.proxy_subscription)
for h in manifest.get("hooks", []):
plugin.add_hook(h, plugin.child.proxy_method, background=True)
@plugin.method('autoreload-restart')
def restart(plugin):
"""Manually triggers a restart of the plugin controlled by autoreload.
"""
child = plugin.child
child.restart()
# We can't rely on @plugin.init to tell us the plugin we need to watch and
# reload since we need to start it to pass through its manifest before we get
# any cli options. So we're doomed to get our parent cmdline and parse out the
# argument by hand.
parent = psutil.Process().parent()
cmdline = parent.cmdline()
plugin.path = None
prefix = '--autoreload-plugin='
for c in cmdline:
if c.startswith(prefix):
plugin.path = c[len(prefix):]
break
if plugin.path:
plugin.child = ChildPlugin(plugin.path, plugin)
# If we can't start on the first attempt we can't inject into the
# manifest, no point in continuing.
if not plugin.child.start():
raise Exception("Could not start the plugin under development, can't continue")
inject_manifest(plugin, plugin.child.manifest)
# Now we can run the actual plugin
plugin.add_option("autoreload-plugin", None, "Path to the plugin that we should be watching and reloading.")
plugin.run()
| 4,365 | 1,318 | 45 |
15cd974602f7b171557d6e3634177554bb7eed60 | 2,239 | py | Python | hard-gists/1381489/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/1381489/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/1381489/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | from PyQt4 import QtCore, QtGui
import maya.cmds as cmds
import maya.OpenMayaUI as mui
import sip
try:
dialog.deleteLater()
except:
pass
dialog = show()
| 30.671233 | 111 | 0.652523 | from PyQt4 import QtCore, QtGui
import maya.cmds as cmds
import maya.OpenMayaUI as mui
import sip
class MyDialog(QtGui.QDialog):
def __init__(self, parent, **kwargs):
super(MyDialog, self).__init__(parent, **kwargs)
self.setObjectName("MyWindow")
self.resize(800, 600)
self.setWindowTitle("PyQt ModelPanel Test")
self.verticalLayout = QtGui.QVBoxLayout(self)
self.verticalLayout.setContentsMargins(0,0,0,0)
# need to set a name so it can be referenced by maya node path
self.verticalLayout.setObjectName("mainLayout")
# First use SIP to unwrap the layout into a pointer
# Then get the full path to the UI in maya as a string
layout = mui.MQtUtil.fullName(long(sip.unwrapinstance(self.verticalLayout)))
cmds.setParent(layout)
paneLayoutName = cmds.paneLayout()
# Find a pointer to the paneLayout that we just created
ptr = mui.MQtUtil.findControl(paneLayoutName)
# Wrap the pointer into a python QObject
self.paneLayout = sip.wrapinstance(long(ptr), QtCore.QObject)
self.cameraName = cmds.camera()[0]
self.modelPanelName = cmds.modelPanel("customModelPanel", label="ModelPanel Test", cam=self.cameraName)
# Find a pointer to the modelPanel that we just created
ptr = mui.MQtUtil.findControl(self.modelPanelName)
# Wrap the pointer into a python QObject
self.modelPanel = sip.wrapinstance(long(ptr), QtCore.QObject)
# add our QObject reference to the paneLayout to our layout
self.verticalLayout.addWidget(self.paneLayout)
def showEvent(self, event):
super(MyDialog, self).showEvent(event)
# maya can lag in how it repaints UI. Force it to repaint
# when we show the window.
self.modelPanel.repaint()
def show():
# get a pointer to the maya main window
ptr = mui.MQtUtil.mainWindow()
# use sip to wrap the pointer into a QObject
win = sip.wrapinstance(long(ptr), QtCore.QObject)
d = MyDialog(win)
d.show()
return d
try:
dialog.deleteLater()
except:
pass
dialog = show()
| 1,939 | 9 | 100 |
61e3d943b4cd0789dbc0aead054385cc47d08639 | 2,124 | py | Python | fitbert/tests.py | tranvien98/fit_bert | 0857aacaaa5358c3111bb57d675c9edfed8654c8 | [
"Apache-2.0"
] | 67 | 2019-07-15T02:26:14.000Z | 2021-08-29T07:16:12.000Z | fitbert/tests.py | tranvien98/fit_bert | 0857aacaaa5358c3111bb57d675c9edfed8654c8 | [
"Apache-2.0"
] | 12 | 2019-07-12T22:14:25.000Z | 2021-09-06T12:41:13.000Z | fitbert/tests.py | tranvien98/fit_bert | 0857aacaaa5358c3111bb57d675c9edfed8654c8 | [
"Apache-2.0"
] | 14 | 2019-10-04T00:56:38.000Z | 2021-08-11T03:35:56.000Z | import pytest
from fitbert import FitBert
from fitbert.delemmatize import Delemmatizer
dl = Delemmatizer()
"""
def test_masker_works_without_instantiating():
masked_string, masked = FitBert.mask(
"This might be justified to signalling the connection between drunken driving and fatal accidents.",
(27, 37),
)
assert FitBert.mask_token in masked_string, "It should mask using the mask token"
assert masked == "signalling", "It should mask the write substring"
"""
@pytest.mark.slow
| 29.5 | 121 | 0.649718 | import pytest
from fitbert import FitBert
from fitbert.delemmatize import Delemmatizer
dl = Delemmatizer()
def test_delemmatizer_instantiates():
assert Delemmatizer() is not None, "It instantiates"
def test_delemmatizer_callable():
assert callable(dl), "Delemmatizer instance should be callable"
def test_delemmatizes_lemmas():
assert dl("look") == [
"looked",
"looking",
"looks",
"look",
], "should delemmatize lemmas"
def test_delemmatizes_non_lemmas():
assert dl("ran") == [
"ran",
"running",
"runs",
"run",
], "should delemmatize non-lemmas"
"""
def test_masker_works_without_instantiating():
masked_string, masked = FitBert.mask(
"This might be justified to signalling the connection between drunken driving and fatal accidents.",
(27, 37),
)
assert FitBert.mask_token in masked_string, "It should mask using the mask token"
assert masked == "signalling", "It should mask the write substring"
"""
@pytest.mark.slow
def test_ranking():
fb = FitBert(model_name="distilbert-base-uncased")
assert callable(fb.fitb)
sentences = [
"When she started talking about her ex-boyfriends, he looked like a ***mask*** out of water",
"The boy was warned that if he misbehaved in the class, he would have to pay ***mask***.",
"I am surprised that you have ***mask*** patience.",
]
options = [
["frog", "fish"],
["the drummer", "the flutist", "the piper"],
["such a", "so", "such"],
]
answers = ["fish", "the piper", "such"]
for sentence, option, answer in zip(sentences, options, answers):
ranked_options = fb.rank(sentence, option)
assert ranked_options[0] == answer, "It should rank options"
sentence = "Psychology includes the study of conscious and unconscious phenomena, as well as ***mask*** and thought."
options = ["feelings"]
answer = "feeling"
ranked_options = fb.rank(sentence, options, True)
assert ranked_options[0] == answer, "It should find and rank related options"
| 1,488 | 0 | 114 |
a84817ea7d662e7c6e5a6852c3ddb16144d004f0 | 729 | py | Python | src/orders/services/order_is_paid_setter.py | vaibhavantil2/education-backend | ae36f6652d8b120f13d3859874d5051ddbef7092 | [
"MIT"
] | null | null | null | src/orders/services/order_is_paid_setter.py | vaibhavantil2/education-backend | ae36f6652d8b120f13d3859874d5051ddbef7092 | [
"MIT"
] | null | null | null | src/orders/services/order_is_paid_setter.py | vaibhavantil2/education-backend | ae36f6652d8b120f13d3859874d5051ddbef7092 | [
"MIT"
] | 1 | 2021-12-22T06:46:05.000Z | 2021-12-22T06:46:05.000Z | from django.utils import timezone
from orders.models import Order
class OrderIsPaidSetter:
"""Mark order as paid"""
| 28.038462 | 85 | 0.647462 | from django.utils import timezone
from orders.models import Order
class OrderIsPaidSetter:
"""Mark order as paid"""
def __init__(self, order: Order, silent=False):
self.order = order
self.silent = silent
self.is_already_paid = (order.paid is not None)
def __call__(self):
self.mark_order_as_paid()
self.ship()
def mark_order_as_paid(self):
self.order.paid = timezone.now()
if not self.is_already_paid: # reset unpayment date if order is not paid yet
self.order.unpaid = None
self.order.save()
def ship(self):
if not self.is_already_paid and self.order.item is not None:
self.order.ship(silent=self.silent)
| 499 | 0 | 107 |
d30bb470de738f9f6f1fb2cbbd6f7dac331f9e25 | 4,783 | py | Python | swift_iam_role/swift_iam_role.py | sijuvj/quickstart-swift-digital-connectivity | 741eb7422987f9cbde28746443d466351906ce1a | [
"Apache-2.0"
] | null | null | null | swift_iam_role/swift_iam_role.py | sijuvj/quickstart-swift-digital-connectivity | 741eb7422987f9cbde28746443d466351906ce1a | [
"Apache-2.0"
] | null | null | null | swift_iam_role/swift_iam_role.py | sijuvj/quickstart-swift-digital-connectivity | 741eb7422987f9cbde28746443d466351906ce1a | [
"Apache-2.0"
] | null | null | null | """Nested Stack for the sample IAM Role creation for Managing SWIFT components"""
from typing import List
from aws_cdk import (
aws_rds as _rds,
aws_iam as _iam
)
from constructs import Construct
from aws_cdk import NestedStack
class SwiftIAMRole(NestedStack):
"""Nested Stack for the sample IAM Role creation for Managing SWIFT components"""
# pylint: disable=too-many-arguments
def create_swift_instance_operator_role(self, instance_ids):
"""create swift instance operator role"""
swift_instance_operator_role = \
_iam.Role(self, "SWIFTInstanceOperatorRole",
role_name="SWIFTInstanceOperatorRole",
assumed_by=_iam.AccountPrincipal(account_id=self.account)
.with_conditions({"Bool": {"aws:MultiFactorAuthPresent": "true"}})
)
instances_resource = []
if instance_ids is not None:
for instance_id in instance_ids:
instances_resource.append(
"arn:aws:ec2:" + self.region + ":" + self.account + ":instance/" + instance_id)
ssm_doc_resource = "arn:aws:ssm:" + self.region + \
":" + self.account + ":document/SSM-SessionManagerRunShell"
statements = [
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["ssm:StartSession", "ssm:SendCommand"],
resources=[ssm_doc_resource] + instances_resource,
conditions={"BoolIfExists": {
"ssm:SessionDocumentAccessCheck": "true"}}),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["ssm:DescribeSessions", "ssm:GetConnectionStatus",
"ssm:DescribeInstanceInformation",
"ssm:DescribeInstanceProperties", "ec2:DescribeInstances"],
resources=["*"]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["ssm:TerminateSession"],
resources=[
"arn:aws:ssm:*:*:session/${aws:username}-*"])]
_iam.Policy(
self, "SSMInstanceAccessPolicy", policy_name="SSMInstanceAccessPolicy",
roles=[swift_instance_operator_role], statements=statements,
force=True)
def create_swift_infrastructure_role(
self, database_instance: _rds.DatabaseInstance, instance_ids: List[str],
mq_broker_arn: str):
"""create swift infrastructure role"""
swift_infrastructure_role = \
_iam.Role(self, "SWIFTInfrastructureRole",
role_name="SWIFTInfrastructureRole",
assumed_by=_iam.AccountPrincipal(account_id=self.account)
.with_conditions({"Bool": {"aws:MultiFactorAuthPresent": "true"}})
)
instances_resource = []
if instance_ids is not None:
for instance_id in instance_ids:
instances_resource.append(
"arn:aws:ec2:" + self.region + ":" + self.account + ":instance/" + instance_id)
statements = [
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["rds:Describe*"],
resources=["*"]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["rds:Start*", "rds:Stop*"],
resources=[database_instance.instance_arn]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["ec2:Describe*"],
resources=["*"]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["ec2:Start*", "ec2:Stop*"],
resources=instances_resource),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["mq:List*", "mq:Describe*", "mq:RebootBroker"],
resources=[mq_broker_arn]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["logs:List*", "logs:Describe*", "logs:Get*"],
resources=["*"])]
_iam.Policy(
self, "SwiftInfrastructurePolicy", policy_name="SwiftInfrastructurePolicy",
roles=[swift_infrastructure_role], statements=statements,
force=True)
| 45.990385 | 99 | 0.593142 | """Nested Stack for the sample IAM Role creation for Managing SWIFT components"""
from typing import List
from aws_cdk import (
aws_rds as _rds,
aws_iam as _iam
)
from constructs import Construct
from aws_cdk import NestedStack
class SwiftIAMRole(NestedStack):
"""Nested Stack for the sample IAM Role creation for Managing SWIFT components"""
# pylint: disable=too-many-arguments
def __init__(self, scope: Construct, cid: str, instance_ids: List[str], mq_broker_arn: str,
database_instance: _rds.DatabaseInstance, **kwargs):
super().__init__(scope, cid, **kwargs)
self.create_swift_instance_operator_role(instance_ids)
self.create_swift_infrastructure_role(
database_instance=database_instance, instance_ids=instance_ids,
mq_broker_arn=mq_broker_arn)
def create_swift_instance_operator_role(self, instance_ids):
"""create swift instance operator role"""
swift_instance_operator_role = \
_iam.Role(self, "SWIFTInstanceOperatorRole",
role_name="SWIFTInstanceOperatorRole",
assumed_by=_iam.AccountPrincipal(account_id=self.account)
.with_conditions({"Bool": {"aws:MultiFactorAuthPresent": "true"}})
)
instances_resource = []
if instance_ids is not None:
for instance_id in instance_ids:
instances_resource.append(
"arn:aws:ec2:" + self.region + ":" + self.account + ":instance/" + instance_id)
ssm_doc_resource = "arn:aws:ssm:" + self.region + \
":" + self.account + ":document/SSM-SessionManagerRunShell"
statements = [
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["ssm:StartSession", "ssm:SendCommand"],
resources=[ssm_doc_resource] + instances_resource,
conditions={"BoolIfExists": {
"ssm:SessionDocumentAccessCheck": "true"}}),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["ssm:DescribeSessions", "ssm:GetConnectionStatus",
"ssm:DescribeInstanceInformation",
"ssm:DescribeInstanceProperties", "ec2:DescribeInstances"],
resources=["*"]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["ssm:TerminateSession"],
resources=[
"arn:aws:ssm:*:*:session/${aws:username}-*"])]
_iam.Policy(
self, "SSMInstanceAccessPolicy", policy_name="SSMInstanceAccessPolicy",
roles=[swift_instance_operator_role], statements=statements,
force=True)
def create_swift_infrastructure_role(
self, database_instance: _rds.DatabaseInstance, instance_ids: List[str],
mq_broker_arn: str):
"""create swift infrastructure role"""
swift_infrastructure_role = \
_iam.Role(self, "SWIFTInfrastructureRole",
role_name="SWIFTInfrastructureRole",
assumed_by=_iam.AccountPrincipal(account_id=self.account)
.with_conditions({"Bool": {"aws:MultiFactorAuthPresent": "true"}})
)
instances_resource = []
if instance_ids is not None:
for instance_id in instance_ids:
instances_resource.append(
"arn:aws:ec2:" + self.region + ":" + self.account + ":instance/" + instance_id)
statements = [
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["rds:Describe*"],
resources=["*"]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["rds:Start*", "rds:Stop*"],
resources=[database_instance.instance_arn]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["ec2:Describe*"],
resources=["*"]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["ec2:Start*", "ec2:Stop*"],
resources=instances_resource),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["mq:List*", "mq:Describe*", "mq:RebootBroker"],
resources=[mq_broker_arn]),
_iam.PolicyStatement(
effect=_iam.Effect.ALLOW, actions=["logs:List*", "logs:Describe*", "logs:Get*"],
resources=["*"])]
_iam.Policy(
self, "SwiftInfrastructurePolicy", policy_name="SwiftInfrastructurePolicy",
roles=[swift_infrastructure_role], statements=statements,
force=True)
| 416 | 0 | 26 |
2593d84834118e5edba7d5ac56debf357570d58b | 1,192 | py | Python | s3prl/downstream/ctc/corpus/downsample_cv.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 1 | 2022-03-15T04:04:23.000Z | 2022-03-15T04:04:23.000Z | s3prl/downstream/ctc/corpus/downsample_cv.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 2 | 2021-12-08T14:52:39.000Z | 2021-12-12T09:33:08.000Z | s3prl/downstream/ctc/corpus/downsample_cv.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | null | null | null | import argparse
import csv
from os.path import join
from pathlib import Path
from tqdm import tqdm
import torch
import torchaudio
import numpy as np
from librosa import resample
if __name__ == "__main__":
main()
| 25.913043 | 78 | 0.621644 | import argparse
import csv
from os.path import join
from pathlib import Path
from tqdm import tqdm
import torch
import torchaudio
import numpy as np
from librosa import resample
def read_processed_tsv(path):
with open(path, "r") as fp:
rows = csv.reader(fp, delimiter="\t")
file_list = []
for i, row in enumerate(rows):
if i == 0:
continue
file_list.append(row[0][:-3] + "mp3")
return file_list
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=str, help="Directory of the dataset.")
parser.add_argument("--tsv", type=str, help="Path to processed tsv file.")
args = parser.parse_args()
file_list = read_processed_tsv(args.tsv)
for file in tqdm(file_list):
file = str(file)
file = join(args.root, file)
wav, sample_rate = torchaudio.load(file)
wav = resample(
wav.squeeze(0).numpy(), sample_rate, 16000, res_type="kaiser_best"
)
wav = torch.FloatTensor(wav).unsqueeze(0)
new_file = file[:-3] + "wav"
torchaudio.save(new_file, wav, 16000)
if __name__ == "__main__":
main()
| 925 | 0 | 46 |
2e9e400801bf584b334ce06ed106835a338d64f7 | 1,474 | py | Python | Kseg2annANN.py | kejitan/ESVGscale | d4674d7ba3c897e25c010b3e1bceb3ca421adcd3 | [
"CC-BY-4.0"
] | null | null | null | Kseg2annANN.py | kejitan/ESVGscale | d4674d7ba3c897e25c010b3e1bceb3ca421adcd3 | [
"CC-BY-4.0"
] | 7 | 2021-04-21T01:01:12.000Z | 2022-03-12T00:18:22.000Z | Kseg2annANN.py | kejitan/ESVGscale | d4674d7ba3c897e25c010b3e1bceb3ca421adcd3 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
import os
from PIL import Image
import glob, os
from tqdm import tqdm
import six
#import cv2
import pandas as pd
from keras_segmentation.data_utils.data_loader import get_image_array, get_segmentation_array
import numpy as np
import re
import json
from pandas.io.json import json_normalize
import time
import multiprocessing
| 22.333333 | 93 | 0.629579 | #!/usr/bin/env python
import os
from PIL import Image
import glob, os
from tqdm import tqdm
import six
#import cv2
import pandas as pd
from keras_segmentation.data_utils.data_loader import get_image_array, get_segmentation_array
import numpy as np
import re
import json
from pandas.io.json import json_normalize
import time
import multiprocessing
def seg2ann(seg_file) :
try:
data = pd.read_csv('./PSPindexClass.csv')
except Exception as e:
print(e)
return {}
cols = ['Idx','Ratio','Train','Val','Stuff','Name']
CNames = np.empty(150, dtype=np.object)
for k in range(150):
CNames[k] = data['Name'].iloc[k]
seg_labels = get_segmentation_array(seg_file, 150, 473, 473, no_reshape=True)
CN = np.empty(150,dtype=np.object)
for i in range(CN.shape[0]):
CN[i] = []
xsumavg = np.zeros(150)
ysumavg = np.zeros(150)
xsum = 0
ysum = 0
for k in range (150):
CN[k].append(k+1) # class num
CN[k].append(0) # classs val CN[1]
CN[k][1] = np.sum(seg_labels[:,:,k], axis=(0,1))
if CN[k][1] > 0 :
for i in range(473):
for j in range(473):
if (seg_labels[i, j, k]) == 1 :
xsumavg[k] = xsumavg[k] + j
ysumavg[k] = ysumavg[k] + i
xsumavg[k] = xsumavg[k]/CN[k][1]
ysumavg[k] = ysumavg[k]/CN[k][1]
CDict = {}
for k in range(150):
if CN[k][1] != 0:
centroidx = xsumavg[k]
centroidy = ysumavg[k]
CDict[CN[k][1]] = [ (CN[k][0]), (CN[k][1].astype(int)), CNames[k] ]
return CDict
| 1,097 | 0 | 23 |
000d0dce0600f816e990894a0f4ae04b12802ab8 | 771 | py | Python | Part_3_advanced/m03_date_and_time/date_iso_calendar_and_weekday/homework_1_solution/new_movies/cinema_schedule.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m03_date_and_time/date_iso_calendar_and_weekday/homework_1_solution/new_movies/cinema_schedule.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m03_date_and_time/date_iso_calendar_and_weekday/homework_1_solution/new_movies/cinema_schedule.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | from enum import Enum, auto
from new_movies import movies_directory
weekly_schedule = {
Weekday.MONDAY: movies_directory.available_movies[0:2],
Weekday.TUESDAY: movies_directory.available_movies[2:4],
Weekday.WEDNESDAY: movies_directory.available_movies[4:6],
Weekday.THURSDAY: movies_directory.available_movies[6:8],
Weekday.FRIDAY: movies_directory.available_movies[8:11],
Weekday.SATURDAY: movies_directory.available_movies[11:12],
Weekday.SUNDAY: movies_directory.available_movies[12:14],
}
| 26.586207 | 63 | 0.736706 | from enum import Enum, auto
from new_movies import movies_directory
class Weekday(Enum):
MONDAY = auto()
TUESDAY = auto()
WEDNESDAY = auto()
THURSDAY = auto()
FRIDAY = auto()
SATURDAY = auto()
SUNDAY = auto()
weekly_schedule = {
Weekday.MONDAY: movies_directory.available_movies[0:2],
Weekday.TUESDAY: movies_directory.available_movies[2:4],
Weekday.WEDNESDAY: movies_directory.available_movies[4:6],
Weekday.THURSDAY: movies_directory.available_movies[6:8],
Weekday.FRIDAY: movies_directory.available_movies[8:11],
Weekday.SATURDAY: movies_directory.available_movies[11:12],
Weekday.SUNDAY: movies_directory.available_movies[12:14],
}
def get_movies_by_weekday(weekday):
return weekly_schedule[weekday]
| 50 | 147 | 46 |
964acf6bcfdb818f7ae341ce8f450e261785e925 | 3,351 | py | Python | sara_flexbe_behaviors/src/sara_flexbe_behaviors/check_reachability_sm.py | WalkingMachine/sara_behaviors | fcb55d274331915cd39d7d444546f17a39f85a44 | [
"BSD-3-Clause"
] | 5 | 2018-05-07T19:58:08.000Z | 2021-04-21T10:49:05.000Z | sara_flexbe_behaviors/src/sara_flexbe_behaviors/check_reachability_sm.py | WalkingMachine/sara_behaviors | fcb55d274331915cd39d7d444546f17a39f85a44 | [
"BSD-3-Clause"
] | 21 | 2017-05-26T01:20:06.000Z | 2021-01-26T23:03:36.000Z | sara_flexbe_behaviors/src/sara_flexbe_behaviors/check_reachability_sm.py | WalkingMachine/sara_behaviors | fcb55d274331915cd39d7d444546f17a39f85a44 | [
"BSD-3-Clause"
] | 2 | 2019-07-22T07:21:20.000Z | 2019-11-11T20:49:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.gen_gripper_pose import GenGripperPose
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.moveit_move import MoveitMove
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Fri Oct 20 2017
@author: Philippe La Madeleine
'''
class Check_reachabilitySM(Behavior):
'''
check if the object is in range
'''
# [/MANUAL_INIT]
# Behavior comments:
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 32.852941 | 115 | 0.626977 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.gen_gripper_pose import GenGripperPose
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.moveit_move import MoveitMove
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Fri Oct 20 2017
@author: Philippe La Madeleine
'''
class Check_reachabilitySM(Behavior):
'''
check if the object is in range
'''
def __init__(self):
super(Check_reachabilitySM, self).__init__()
self.name = 'Check_reachability'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:609 y:365, x:602 y:89
_state_machine = OperatableStateMachine(outcomes=['ok', 'too_far'], input_keys=['pose'])
_state_machine.userdata.pose = 0
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:42 y:56
OperatableStateMachine.add('gen',
GenGripperPose(l=0, z=0, planar=false),
transitions={'done': 'kinematic test', 'fail': 'too_far'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'pose_in': 'pose', 'pose_out': 'pose_out'})
# x:195 y:347
OperatableStateMachine.add('third check',
CheckConditionState(predicate=lambda x: (x.position.x**2+x.position.y**2+(x.position.z-1))**0.5 < 1.5),
transitions={'true': 'kinematic test', 'false': 'too_far'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose_out'})
# x:190 y:147
OperatableStateMachine.add('first check',
CheckConditionState(predicate=lambda x: x.position.x<0.8),
transitions={'true': 'second check', 'false': 'too_far'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose_out'})
# x:196 y:253
OperatableStateMachine.add('second check',
CheckConditionState(predicate=lambda x: x.position.z>0.5),
transitions={'true': 'third check', 'false': 'too_far'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose_out'})
# x:99 y:520
OperatableStateMachine.add('kinematic test',
MoveitMove(move=False, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'ok', 'failed': 'too_far'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'pose_out'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 2,182 | 0 | 48 |
d0d3ee91c9b8767fd459ff0b851864c764d98ba6 | 2,489 | py | Python | uu/formlibrary/upgrades/reindexer.py | mostscript/uu.formlibrary | a7f5819abac7c1ddea69ddee8fce465d45f4d1d5 | [
"BSD-4-Clause-UC"
] | null | null | null | uu/formlibrary/upgrades/reindexer.py | mostscript/uu.formlibrary | a7f5819abac7c1ddea69ddee8fce465d45f4d1d5 | [
"BSD-4-Clause-UC"
] | null | null | null | uu/formlibrary/upgrades/reindexer.py | mostscript/uu.formlibrary | a7f5819abac7c1ddea69ddee8fce465d45f4d1d5 | [
"BSD-4-Clause-UC"
] | null | null | null | import sys
import transaction
from zope.component.hooks import setSite
PKGNAME = 'uu.formlibrary'
PROFILE = 'profile-%s:default' % PKGNAME
_installed = lambda site: site.portal_quickinstaller.isProductInstalled
product_installed = lambda site, name: _installed(site)(name)
if __name__ == '__main__' and 'app' in locals():
idxname = sys.argv[-1]
if idxname.endswith('.py'):
print 'No index name has been provided, reindexing all indexes.'
idxname = None
main(app, idxname) # noqa
| 30.353659 | 78 | 0.634793 | import sys
import transaction
from zope.component.hooks import setSite
PKGNAME = 'uu.formlibrary'
PROFILE = 'profile-%s:default' % PKGNAME
_installed = lambda site: site.portal_quickinstaller.isProductInstalled
product_installed = lambda site, name: _installed(site)(name)
def stale_catalog_entries(site, catalog=None):
stale = []
catalog = catalog or site.portal_catalog
_catalog = catalog._catalog
getbrain = lambda rid: _catalog[rid]
getobject = lambda brain: brain._unrestrictedGetObject()
for rid, path in list(_catalog.paths.items()):
brain = getbrain(rid)
try:
o = getobject(brain) # noqa, poking for exception
except KeyError:
print 'Stale path (%s): %s' % (rid, path)
stale.append((rid, path))
return stale
def prune_stale_catalog_entries(site):
catalog = site.portal_catalog
stale = stale_catalog_entries(site, catalog)
_catalog = catalog._catalog
for rid, path in stale:
if rid in _catalog.data:
del(_catalog.data[rid])
if rid in _catalog.paths:
del(_catalog.paths[rid])
if path in _catalog.uids:
del(_catalog.uids[path])
for rid, path in stale:
assert rid not in _catalog.data
assert rid not in _catalog.paths
assert path not in _catalog.uids
return len(stale)
def reindex(site, name, catalog=None):
catalog = catalog or site.portal_catalog
if name is None:
for idxname in catalog._catalog.indexes.keys():
reindex(site, idxname, catalog)
catalog.manage_reindexIndex(name)
def main(app, idxname):
for site in app.objectValues('Plone Site'):
print '== SITE: %s ==' % site.getId()
setSite(site)
if product_installed(site, PKGNAME):
stale = prune_stale_catalog_entries(site)
if stale:
print '\tSuccessfully pruned %s stale catalog records' % stale
print '\tReindexing %s' % idxname
reindex(site, idxname)
txn = transaction.get()
name = "'%s'" % idxname if idxname else '(ALL INDEXES)'
txn.note('Update: reindexed %s index for %s' % (
name,
site.getId(),
))
txn.commit()
if __name__ == '__main__' and 'app' in locals():
idxname = sys.argv[-1]
if idxname.endswith('.py'):
print 'No index name has been provided, reindexing all indexes.'
idxname = None
main(app, idxname) # noqa
| 1,878 | 0 | 92 |
e764e1f40a98967bf941bfbb2600858b41fd38ee | 1,364 | py | Python | brain/missing_incorrect_files.py | neuropoly/lesion-mapping | 48365fec608b0a4bce8c613c937b2b7f26317470 | [
"MIT"
] | null | null | null | brain/missing_incorrect_files.py | neuropoly/lesion-mapping | 48365fec608b0a4bce8c613c937b2b7f26317470 | [
"MIT"
] | null | null | null | brain/missing_incorrect_files.py | neuropoly/lesion-mapping | 48365fec608b0a4bce8c613c937b2b7f26317470 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# This script helps at the generation or correction of groundtruths.
#
# Usage: python missing_incorrect_files.py <pickle_filename>
# where <pickle_filename> has been generated by '0_check_data.py'.
# e.g. python missing_incorrect_files.py 201809192209_incorrect_lesion.pkl
#
# Charley Gros 2018-09-18
# Modified: 2018-10-01
import os
import sys
import pickle
import sct_utils as sct
def _visualize_incorrect_segmentation(lst):
'''Open incorrect segmentations with FSLeyes.'''
stg = '\n\nIncorrect files: ' + str(len(lst)) + '\n\n'
stg += 'Please correct the segmentations and save them as *_lesion_manual.nii.gz for the lesion segmentation.'
stg += '\n'
print stg
for l in lst:
print os.path.dirname(l) + '\n'
fname_img = os.path.dirname(l) + '/' + l.split('/')[-2] + '.nii.gz'
os.system(' '.join(['fsleyes', fname_img, l, '-cm Red']))
def _display_missing_files(dct):
'''Print the missing files in the terminal.'''
stg = '\n\nMissing files: ' + str(len(dct[dct.keys()[0]])) + '\n\n' + '\n'.join(dct[dct.keys()[0]])
print stg
if __name__ == '__main__':
path_pickle = sys.argv[1]
run_main(path_pickle)
| 28.416667 | 111 | 0.703079 | #!/usr/bin/env python
#
# This script helps at the generation or correction of groundtruths.
#
# Usage: python missing_incorrect_files.py <pickle_filename>
# where <pickle_filename> has been generated by '0_check_data.py'.
# e.g. python missing_incorrect_files.py 201809192209_incorrect_lesion.pkl
#
# Charley Gros 2018-09-18
# Modified: 2018-10-01
import os
import sys
import pickle
import sct_utils as sct
def _visualize_incorrect_segmentation(lst):
'''Open incorrect segmentations with FSLeyes.'''
stg = '\n\nIncorrect files: ' + str(len(lst)) + '\n\n'
stg += 'Please correct the segmentations and save them as *_lesion_manual.nii.gz for the lesion segmentation.'
stg += '\n'
print stg
for l in lst:
print os.path.dirname(l) + '\n'
fname_img = os.path.dirname(l) + '/' + l.split('/')[-2] + '.nii.gz'
os.system(' '.join(['fsleyes', fname_img, l, '-cm Red']))
def _display_missing_files(dct):
'''Print the missing files in the terminal.'''
stg = '\n\nMissing files: ' + str(len(dct[dct.keys()[0]])) + '\n\n' + '\n'.join(dct[dct.keys()[0]])
print stg
def run_main(fname_pickle):
dct = pickle.load(open(fname_pickle,"rb"))
if dct.keys()[0] in ['incorrect_lesion']:
_visualize_incorrect_segmentation(dct[dct.keys()[0]])
else:
_display_missing_files(dct)
if __name__ == '__main__':
path_pickle = sys.argv[1]
run_main(path_pickle)
| 187 | 0 | 23 |
56cb0d99986ba98be7c75318cc32a218466d3d93 | 10,142 | py | Python | scripts/rouge_analysis.py | olizhu10/newsroom | 0a6dccd21da28892cc089e0924c53e0723b42785 | [
"Apache-2.0"
] | null | null | null | scripts/rouge_analysis.py | olizhu10/newsroom | 0a6dccd21da28892cc089e0924c53e0723b42785 | [
"Apache-2.0"
] | null | null | null | scripts/rouge_analysis.py | olizhu10/newsroom | 0a6dccd21da28892cc089e0924c53e0723b42785 | [
"Apache-2.0"
] | 1 | 2019-10-04T03:24:35.000Z | 2019-10-04T03:24:35.000Z | from rouge import Rouge
import csv
"""Creates a csv file with rouge scores between summaries in a cluster"""
CLUSTERS = {
'sandy':[
"After Sandy hit the East Coast Monday night, more than 2 million New Jersey residents were left without power and feeling powerless",
"Superstorm Sandy crashed ashore this week, cutting a path of destruction several hundred miles long. Here are some numbers that help put it in perspective.",
"Hurricane Sandy struck the Northeast hard when it made landfall in New Jersey Tuesday night. New York Magazine's cover reflects the damage.",
"Hurricane Sandy is poised to become an “unprecedented” superstorm that could leave millions of people in the Northeast without power for days or even weeks, experts said Saturday.",
"One of the largest and fiercest storms to menace the East Coast in years caused widespread flooding, power outages and damage. At least 16 have died, AP reports.",
"The hurricane continued its march north, with powerful winds already affecting the region on Sunday and landfall expected on Monday or Tuesday.",
],
'orlando':[
"A shooting at a gay nightclub in Orlando killed at least 50 people on Sunday, June 12. Orlando police said they shot and killed the gunman.",
"Approximately 20 people have died after an attacker opened fire inside a gay nightclub in the Florida city of Orlando, police say.",
"Officials say at least 49 people were killed and dozens were injured in the shooting.",
"A terrorist opened fire inside a popular Orlando gay club near closing time early Sunday.",
"At least 42 people were taken to hospitals with injuries, police said. The shooter was killed in an exchange of gunfire with police.",
"Police in the US city of Orlando are telling people to stay away from a gay nightclub where a shooting has broken out and people are injured.'",
"Unconfirmed reports have emerged of a shooting at a nightclub in Orlando, Florida.'",
"At least 50 people are dead and dozens injured after a gunman opened fire at a gay nightclub in Orlando. What exactly happened?'",
"For three harrowing hours, as Omar Mateen carried out his rampage inside the Pulse nightclub in Orlando, clubgoers hid in bathrooms, in air-conditioning vents, under tables.'",
"It's the worst terror attack on American soil since 9/11, and the deadliest mass shooting in U.S. history.'",
"The gun massacre Sunday at an Orlando nightclub is the worst in the history of the U.S., where mass shootings are frighteningly common.'",
],
'mandela':[
"Nelson Mandela, who rose from militant antiapartheid activist to become the unifying president of a democratic South Africa and a global symbol of racial reconciliation, died at his Johannesburg home on Thursday. He was 95.",
"He was the country’s most potent symbol of unity, using the power of forgiveness and reconciliation.",
"The South African leader, who passionately fought apartheid, dies at age 95",
"Nelson Mandela, the anti-apartheid crusader and former South African president, died Dec. 5 at 95. We’re bringing you live updates here.",
"In a symbol befitting a nation in mourning, a dark gray cloud swept over Johannesburg on Friday as news spread that Nelson Mandela is dead.",
"The people of South Africa reacted Friday with deep sadness at the loss of a man considered by many to be the father of the nation, while mourners said it was also a time to celebrate the achievements of the anti-apartheid leader who emerged from prison to become South Africa's first black president.",
"When Nelson Mandela died on Thursday, people around the globe gathered to memorialize the man widely recognized as a beacon of courage, hope and freedom.",
"Mandela transformed his nation from oppressive regime to one of the most inclusive democracies on the planet.",
"In an extraordinary life that spanned the rural hills where he was groomed for tribal leadership, anti-apartheid activism, guerrilla warfare, 27 years of political imprisonment and, ultimately, the South African presidency, Mandela held a unique cachet that engendered respect and awe in capitals around the globe.'",
],
'boston':[
"At least two dead and dozens injured when bombs go off near finish line.",
"Two explosions rocked the finish line at the Boston Marathon on Monday, killing three and wounding at least 144 people",
"Pressure cookers are believed to have been used to make the crude bombs that sent torrents of deadly shrapnel hurling into a crowd of onlookers and competitors at Monday’s Boston Marathon, experts told Fox News",
"Two deadly bomb blasts, seconds apart, turned the 117th Boston Marathon – the nation’s premier event for elite and recreational runners – into a tragedy on Monday. Here is a timeline of how the day’s events unfolded: 9 a.m. ET — Race …",
"When two bombs detonated in the final stretch of the Boston Marathon on Monday afternoon, runners, spectators and people across the country and around the world were stunned by the public nature of",
"Mayhem descended on the Boston marathon Monday afternoon, when an explosion at the finish line killed at least two and injured at least 23. TIME is tracking the breaking news from the scene in downtown Boston. Follow here for constant updates. 5:45 p.m.",
"Two bombs exploded in the packed streets near the finish line of the Boston Marathon on Monday, killing two people and injuring more than 100 in a terrifying scene of shattered glass, billowing smoke, bloodstained pavement and severed limbs, authorities said",
"Blasts near the finish line of the renowned race caused dozens of injuries and scattered crowds.",
"Two deadly explosions brought the Boston Marathon and much of this city to a chaotic halt Monday, killing at least three people, injuring about 140 and once again raising the specter of terrorism on American soil.",
]}
if __name__ == '__main__':
main()
| 60.369048 | 318 | 0.686945 | from rouge import Rouge
import csv
"""Creates a csv file with rouge scores between summaries in a cluster"""
def rouge(cluster):
matrix1 = []
matrix2 = []
matrixl = []
for summary1 in cluster:
scores1 = []
scores2 = []
scoresl = []
for summary2 in cluster:
r = Rouge()
score1 = r.get_scores(summary1, summary2)[0]['rouge-1']['f']
score2 = r.get_scores(summary1, summary2)[0]['rouge-2']['f']
scorel = r.get_scores(summary1, summary2)[0]['rouge-l']['f']
scores1.append(score1)
scores2.append(score2)
scoresl.append(scorel)
matrix1.append(scores1)
matrix2.append(scores2)
matrixl.append(scoresl)
return matrix1, matrix2, matrixl
def main():
for key in CLUSTERS:
matrix1, matrix2, matrixl = rouge(CLUSTERS[key])
with open('../data/rouge1_'+key+'.csv', 'w+') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for row in matrix1:
writer.writerow(row)
with open('../data/rouge2_'+key+'.csv', 'w+') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for row in matrix2:
writer.writerow(row)
with open('../data/rougel_'+key+'.csv', 'w+') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for row in matrixl:
writer.writerow(row)
def sample_data():
thresholds = np.linspace(0,1,21)
#thresholds = [0.76,0.78,0.8,0.82,0.84,0.86,0.88,0.9,0.92,0.94,0.96,0.98,1.0]
for key in CLUSTERS:
matrix1, matrix2, matrixl = rouge(CLUSTERS[key])
threshold_chart(key, matrix1, thresholds, 'rouge1')
threshold_chart(key, matrix2, thresholds, 'rouge2')
threshold_chart(key, matrixl, thresholds, 'rougel')
plt.clf()
precision_recall_curve(key, matrix1, thresholds, 'rouge1')
plt.clf()
precision_recall_curve(key, matrix2, thresholds, 'rouge2')
plt.clf()
precision_recall_curve(key, matrixl, thresholds, 'rougel')
def full_data():
plt.clf()
thresholds = np.linspace(0,0.5,11)
#thresholds = [0.76,0.78,0.8,0.82,0.84,0.86,0.88,0.9,0.92,0.94,0.96,0.98,1.0]
precisions = []
recalls = []
with open('../data/rougel_threshold_full_close.csv', 'w+') as csvfile: #switch here
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['threshold','TP','FP','TN','FN','precision','recall'])
for threshold in thresholds:
TPs = 0
FPs = 0
TNs = 0
FNs = 0
for key in CLUSTERS:
matrix1, matrix2, matrixl = rouge(CLUSTERS[key])
tm = threshold_matrix(threshold, matrixl) #switch here
TP, FP, TN, FN = find_pos_neg(true_matrices[key], tm)
TPs += TP
FPs += FP
TNs += TN
FNs += FN
p = precision(TPs,FPs)
precisions.append(p)
r = recall(TPs,FNs)
recalls.append(r)
writer.writerow([threshold,TPs,FPs,TNs,FNs,p,r])
plt.xlabel('recall')
plt.ylabel('precision')
plt.plot(recalls, precisions)
plt.savefig('../data/rougel_full_prcurve_close.png') #switch here
plt.clf()
thresholds = np.linspace(0,1,21)
precisions = []
recalls = []
with open('../data/rougel_threshold_full.csv', 'w+') as csvfile: #switch here
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['threshold','TP','FP','TN','FN','precision','recall'])
for threshold in thresholds:
TPs = 0
FPs = 0
TNs = 0
FNs = 0
for key in CLUSTERS:
matrix1, matrix2, matrixl = rouge(CLUSTERS[key])
tm = threshold_matrix(threshold, matrixl) #switch here
TP, FP, TN, FN = find_pos_neg(true_matrices[key], tm)
TPs += TP
FPs += FP
TNs += TN
FNs += FN
p = precision(TPs,FPs)
precisions.append(p)
r = recall(TPs,FNs)
recalls.append(r)
writer.writerow([threshold,TPs,FPs,TNs,FNs,p,r])
plt.xlabel('recall')
plt.ylabel('precision')
plt.plot(recalls, precisions)
plt.savefig('../data/rougel_full_prcurve.png') #switch here
CLUSTERS = {
'sandy':[
"After Sandy hit the East Coast Monday night, more than 2 million New Jersey residents were left without power and feeling powerless",
"Superstorm Sandy crashed ashore this week, cutting a path of destruction several hundred miles long. Here are some numbers that help put it in perspective.",
"Hurricane Sandy struck the Northeast hard when it made landfall in New Jersey Tuesday night. New York Magazine's cover reflects the damage.",
"Hurricane Sandy is poised to become an “unprecedented” superstorm that could leave millions of people in the Northeast without power for days or even weeks, experts said Saturday.",
"One of the largest and fiercest storms to menace the East Coast in years caused widespread flooding, power outages and damage. At least 16 have died, AP reports.",
"The hurricane continued its march north, with powerful winds already affecting the region on Sunday and landfall expected on Monday or Tuesday.",
],
'orlando':[
"A shooting at a gay nightclub in Orlando killed at least 50 people on Sunday, June 12. Orlando police said they shot and killed the gunman.",
"Approximately 20 people have died after an attacker opened fire inside a gay nightclub in the Florida city of Orlando, police say.",
"Officials say at least 49 people were killed and dozens were injured in the shooting.",
"A terrorist opened fire inside a popular Orlando gay club near closing time early Sunday.",
"At least 42 people were taken to hospitals with injuries, police said. The shooter was killed in an exchange of gunfire with police.",
"Police in the US city of Orlando are telling people to stay away from a gay nightclub where a shooting has broken out and people are injured.'",
"Unconfirmed reports have emerged of a shooting at a nightclub in Orlando, Florida.'",
"At least 50 people are dead and dozens injured after a gunman opened fire at a gay nightclub in Orlando. What exactly happened?'",
"For three harrowing hours, as Omar Mateen carried out his rampage inside the Pulse nightclub in Orlando, clubgoers hid in bathrooms, in air-conditioning vents, under tables.'",
"It's the worst terror attack on American soil since 9/11, and the deadliest mass shooting in U.S. history.'",
"The gun massacre Sunday at an Orlando nightclub is the worst in the history of the U.S., where mass shootings are frighteningly common.'",
],
'mandela':[
"Nelson Mandela, who rose from militant antiapartheid activist to become the unifying president of a democratic South Africa and a global symbol of racial reconciliation, died at his Johannesburg home on Thursday. He was 95.",
"He was the country’s most potent symbol of unity, using the power of forgiveness and reconciliation.",
"The South African leader, who passionately fought apartheid, dies at age 95",
"Nelson Mandela, the anti-apartheid crusader and former South African president, died Dec. 5 at 95. We’re bringing you live updates here.",
"In a symbol befitting a nation in mourning, a dark gray cloud swept over Johannesburg on Friday as news spread that Nelson Mandela is dead.",
"The people of South Africa reacted Friday with deep sadness at the loss of a man considered by many to be the father of the nation, while mourners said it was also a time to celebrate the achievements of the anti-apartheid leader who emerged from prison to become South Africa's first black president.",
"When Nelson Mandela died on Thursday, people around the globe gathered to memorialize the man widely recognized as a beacon of courage, hope and freedom.",
"Mandela transformed his nation from oppressive regime to one of the most inclusive democracies on the planet.",
"In an extraordinary life that spanned the rural hills where he was groomed for tribal leadership, anti-apartheid activism, guerrilla warfare, 27 years of political imprisonment and, ultimately, the South African presidency, Mandela held a unique cachet that engendered respect and awe in capitals around the globe.'",
],
'boston':[
"At least two dead and dozens injured when bombs go off near finish line.",
"Two explosions rocked the finish line at the Boston Marathon on Monday, killing three and wounding at least 144 people",
"Pressure cookers are believed to have been used to make the crude bombs that sent torrents of deadly shrapnel hurling into a crowd of onlookers and competitors at Monday’s Boston Marathon, experts told Fox News",
"Two deadly bomb blasts, seconds apart, turned the 117th Boston Marathon – the nation’s premier event for elite and recreational runners – into a tragedy on Monday. Here is a timeline of how the day’s events unfolded: 9 a.m. ET — Race …",
"When two bombs detonated in the final stretch of the Boston Marathon on Monday afternoon, runners, spectators and people across the country and around the world were stunned by the public nature of",
"Mayhem descended on the Boston marathon Monday afternoon, when an explosion at the finish line killed at least two and injured at least 23. TIME is tracking the breaking news from the scene in downtown Boston. Follow here for constant updates. 5:45 p.m.",
"Two bombs exploded in the packed streets near the finish line of the Boston Marathon on Monday, killing two people and injuring more than 100 in a terrifying scene of shattered glass, billowing smoke, bloodstained pavement and severed limbs, authorities said",
"Blasts near the finish line of the renowned race caused dozens of injuries and scattered crowds.",
"Two deadly explosions brought the Boston Marathon and much of this city to a chaotic halt Monday, killing at least three people, injuring about 140 and once again raising the specter of terrorism on American soil.",
]}
if __name__ == '__main__':
main()
| 4,233 | 0 | 92 |
a392f556d31a226a84e5dcaedacfa7b0401a58a5 | 3,422 | py | Python | dearpygui_ext/simple_table.py | Atlamillias/DearPyGui_Ext | 7f8e500988c697d6e006af625065a2065537f56e | [
"MIT"
] | 27 | 2021-08-19T16:10:23.000Z | 2022-03-25T16:53:11.000Z | dearpygui_ext/simple_table.py | Atlamillias/DearPyGui_Ext | 7f8e500988c697d6e006af625065a2065537f56e | [
"MIT"
] | 4 | 2021-08-23T23:30:14.000Z | 2022-02-21T19:27:44.000Z | dearpygui_ext/simple_table.py | Atlamillias/DearPyGui_Ext | 7f8e500988c697d6e006af625065a2065537f56e | [
"MIT"
] | 4 | 2021-08-19T16:10:39.000Z | 2022-02-12T05:20:28.000Z | import dearpygui._dearpygui as internal_dpg
import dearpygui.dearpygui as dpg
# 0.6 functions
# * add_column
# * delete_column
# * set_table_data
# * get_table_data
# * get_table_item
# * set_table_item
# * get_table_selections
# * set_table_selections
# * insert_column
# * insert_row
# * set_headers
| 34.918367 | 122 | 0.579778 | import dearpygui._dearpygui as internal_dpg
import dearpygui.dearpygui as dpg
# 0.6 functions
# * add_column
# * delete_column
# * set_table_data
# * get_table_data
# * get_table_item
# * set_table_item
# * get_table_selections
# * set_table_selections
# * insert_column
# * insert_row
# * set_headers
class mvSimpleTable:
def __init__(self, columns, data=None):
self._table_id = dpg.generate_uuid()
self._stage_id = dpg.generate_uuid()
self._columns = columns
self._rows = 0
with dpg.theme() as self._theme_id:
with dpg.theme_component(dpg.mvSelectable):
dpg.add_theme_color(dpg.mvThemeCol_Header, (0, 119, 200, 153))
dpg.add_theme_color(dpg.mvThemeCol_HeaderHovered, (29, 151, 236, 103))
self._selections = {}
if data:
self._rows = len(data)
with dpg.mutex():
with dpg.stage(tag=self._stage_id):
dpg.configure_app(skip_positional_args=True, skip_required_args=True)
for row_index in range(len(data)):
row = data[row_index]
internal_dpg.push_container_stack(internal_dpg.add_table_row())
for column in range(self._columns):
internal_dpg.add_selectable(label=str(row[column]),
user_data=[row_index, column, self],
callback=lambda s, a, u: u[2]._selection_toggle(s, a, u[0], u[1]))
internal_dpg.pop_container_stack()
dpg.configure_app(skip_positional_args=False, skip_required_args=False)
def _selection_toggle(self, sender, value, row, column):
self._selections[sender] = value
def clear(self):
dpg.delete_item(self._table_id, children_only=True, slot=1)
self._rows = 0
self._selections = {}
def add_row(self, data):
dpg.push_container_stack(self._table_id)
internal_dpg.push_container_stack(internal_dpg.add_table_row())
for i in range(len(data)):
internal_dpg.add_selectable(label=str(data[i]),
user_data=[self._rows, i, self],
callback=lambda s, a, u: u[2]._selection_toggle(s, a, u[0], u[1]))
dpg.pop_container_stack()
dpg.pop_container_stack()
self._rows += 1
def delete_row(self, row):
rows = dpg.get_item_children(self._table_id, slot=1)
dpg.delete_item(rows[row])
def submit(self):
with dpg.group() as temporary_id:
with dpg.table(header_row=True, no_host_extendX=True, delay_search=True,
borders_innerH=True, borders_outerH=True, borders_innerV=True,
borders_outerV=True, context_menu_in_body=True, row_background=True,
policy=dpg.mvTable_SizingFixedFit, height=-1,
scrollY=True, tag=self._table_id, clipper=True):
for i in range(self._columns):
internal_dpg.add_table_column(label="Header " + str(i))
dpg.unstage(self._stage_id)
dpg.delete_item(self._stage_id)
dpg.bind_item_theme(temporary_id, self._theme_id)
| 2,912 | -1 | 185 |
0acac77f99130902d3f1059af0697c6e56b0ebbd | 12,283 | py | Python | article/experiments/exp13.py | andycasey/mcfa | 8c4135e665e47006e9ca725e8bfc67315508366e | [
"MIT"
] | 2 | 2018-08-23T06:54:17.000Z | 2021-03-05T14:38:41.000Z | article/experiments/exp13.py | andycasey/mcfa | 8c4135e665e47006e9ca725e8bfc67315508366e | [
"MIT"
] | null | null | null | article/experiments/exp13.py | andycasey/mcfa | 8c4135e665e47006e9ca725e8bfc67315508366e | [
"MIT"
] | null | null | null |
"""
Experiment using all GALAH data.
"""
from __future__ import division # Just in case. Use Python 3.
import os
import sys
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import yaml
from matplotlib.ticker import MaxNLocator
from collections import Counter
from scipy import linalg
from hashlib import md5
sys.path.insert(0, "../../")
from mcfa import (mcfa, grid_search, mpl_utils, utils)
import galah_dr2 as galah
matplotlib.style.use(mpl_utils.mpl_style)
here = os.path.dirname(os.path.realpath(__file__))
with open("config.yml") as fp:
config = yaml.load(fp)
print(f"Config: {config}")
np.random.seed(config["random_seed"])
prefix = os.path.basename(__file__)[:-3]
unique_hash = md5((f"{config}").encode("utf-8")).hexdigest()[:5]
unique_config_path = f"{unique_hash}.yml"
if os.path.exists(unique_config_path):
print(f"Warning: this configuration already exists: {unique_config_path}")
with open(unique_config_path, "w") as fp:
yaml.dump(config, fp)
with open(__file__, "r") as fp:
code = fp.read()
with open(f"{unique_hash}-{__file__}", "w") as fp:
fp.write(code)
import os
os.system("rm -f *.pkl")
N_elements = 20
use_galah_flags = config["use_galah_flags"]
mcfa_kwds = dict()
mcfa_kwds.update(config["mcfa_kwds"])
elements = config[prefix]["elements"]
if config[prefix]["ignore_elements"] is not None:
elements = [el for el in elements if el not in config[prefix]["ignore_elements"]]
print(elements)
mask = galah.get_abundance_mask(elements, use_galah_flags=use_galah_flags)
galah_cuts = config[prefix]["galah_cuts"]
if galah_cuts is not None:
print(f"Applying cuts: {galah_cuts}")
for k, (lower, upper) in galah_cuts.items():
mask *= (upper >= galah.data[k]) * (galah.data[k] >= lower)
raise a
print(f"Number of stars: {sum(mask)}")
X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
print(f"Data shape: {X_H.shape}")
if config["wrt_x_fe"]:
X = convert_xh_to_xy(X_H, label_names, "fe_h")
else:
X = X_H
if not config["log_abundance"]:
X = 10**X
if config["subtract_mean"]:
X = X - np.mean(X, axis=0)
N, D = X.shape
# Do a gridsearch.
gs_options = config[prefix]["gridsearch"]
max_n_latent_factors = gs_options["max_n_latent_factors"]
max_n_components = gs_options["max_n_components"]
Js = 1 + np.arange(max_n_latent_factors)
Ks = 1 + np.arange(max_n_components)
N_inits = gs_options["n_inits"]
results_path = f"{prefix}-gridsearch-results.pkl"
if os.path.exists(results_path):
with open(results_path, "rb") as fp:
Jg, Kg, converged, meta, X, mcfa_kwds = pickle.load(fp)
else:
Jg, Kg, converged, meta = grid_search.grid_search(Js, Ks, X,
N_inits=N_inits, mcfa_kwds=mcfa_kwds)
with open(results_path, "wb") as fp:
pickle.dump((Jg, Kg, converged, meta, X, mcfa_kwds), fp)
ll = meta["ll"]
bic = meta["bic"]
mml = meta["message_length"]
J_best_ll, K_best_ll = grid_search.best(Js, Ks, -ll)
J_best_bic, K_best_bic = grid_search.best(Js, Ks, bic)
J_best_mml, K_best_mml = grid_search.best(Js, Ks, mml)
print(f"Best log likelihood at J = {J_best_ll} and K = {K_best_ll}")
print(f"Best BIC value found at J = {J_best_bic} and K = {K_best_bic}")
print(f"Best MML value found at J = {J_best_mml} and K = {K_best_mml}")
# Plot some contours.
plot_filled_contours_kwds = dict(converged=converged,
marker_function=np.nanargmin, N=100,
cmap="Spectral_r")
fig_ll = mpl_utils.plot_filled_contours(Jg, Kg, -ll,
colorbar_label=r"$-\log\mathcal{L}$",
**plot_filled_contours_kwds)
savefig(fig_ll, "gridsearch-ll")
fig_bic = mpl_utils.plot_filled_contours(Jg, Kg, bic,
colorbar_label=r"$\textrm{BIC}$",
**plot_filled_contours_kwds)
savefig(fig_bic, "gridsearch-bic")
fig_mml = mpl_utils.plot_filled_contours(Jg, Kg, mml,
colorbar_label=r"$\textrm{MML}$",
**plot_filled_contours_kwds)
savefig(fig_mml, "gridsearch-mml")
model = meta["best_models"][config["adopted_metric"]]
latex_label_names = [r"$\textrm{{{0}}}$".format(ea.split("_")[0].title()) for ea in label_names]
# Draw unrotated.
J_max = config["max_n_latent_factors_for_colormap"]
J_max = 12
cmap = mpl_utils.discrete_cmap(J_max, base_cmap="Spectral")
colors = [cmap(j) for j in range(J_max)]#[::-1]
A_est = model.theta_[model.parameter_names.index("A")]
A_astrophysical = np.zeros_like(A_est)#np.random.normal(0, 0.1, size=A_est.shape)
for i, tes in enumerate(config["grouped_elements"][:model.n_latent_factors]):
for j, te in enumerate(tes):
try:
idx = label_names.index("{0}_h".format(te.lower()))
except ValueError:
print(f"Skipping {te}")
else:
count = sum([(te in foo) for foo in config["grouped_elements"][:model.n_latent_factors]])
A_astrophysical[idx, i] = 1.0/count
A_astrophysical /= np.clip(np.sqrt(np.sum(A_astrophysical, axis=0)), 1, np.inf)
# Un-assigned columns
for column_index in np.where(np.all(A_astrophysical == 0, axis=0))[0]:
print(f"Warning: unassigned column index: {column_index}")
A_astrophysical[:, column_index] = np.random.normal(0, 1e-2, size=D)
if config["correct_A_astrophysical"]:
AL = linalg.cholesky(A_astrophysical.T @ A_astrophysical)
A_astrophysical = A_astrophysical @ linalg.solve(AL, np.eye(model.n_latent_factors))
max_n_rotations = 3
for each in range(max_n_rotations):
A_est = model.theta_[model.parameter_names.index("A")]
R, p_opt, cov, *_ = utils.find_rotation_matrix(A_astrophysical, A_est,
full_output=True)
R_opt = utils.exact_rotation_matrix(A_astrophysical, A_est,
p0=np.random.uniform(-np.pi, np.pi, model.n_latent_factors**2))
# WTF check R_opt.
AL = linalg.cholesky(R_opt.T @ R_opt)
R_opt2 = R_opt @ linalg.solve(AL, np.eye(model.n_latent_factors))
chi1 = np.sum(np.abs(A_est @ R - A_astrophysical))
chi2 = np.sum(np.abs(A_est @ R_opt2 - A_astrophysical))
R = R_opt2 if chi2 < chi1 else R
# Now make it a valid rotation matrix.
model.rotate(R, X=X, ensure_valid_rotation=True)
import pickle
with open(f"{unique_hash}-{prefix}-model.pkl", "wb") as fp:
pickle.dump(model, fp)
"""
J = model.n_latent_factors
L = model.theta_[model.parameter_names.index("A")]
elements = [ea.split("_")[0].title() for ea in label_names]
A_est = model.theta_[model.parameter_names.index("A")]
A_astrophysical = np.zeros_like(A_est)#np.random.normal(0, 0.1, size=A_est.shape)
for i, tes in enumerate(config["grouped_elements"][:model.n_latent_factors]):
for j, te in enumerate(tes):
try:
idx = label_names.index("{0}_h".format(te.lower()))
except ValueError:
print(f"Skipping {te}")
else:
count = sum([(te in foo) for foo in config["grouped_elements"][:model.n_latent_factors]])
A_astrophysical[idx, i] = 1.0/count
A_astrophysical /= np.clip(np.sqrt(np.sum(A_astrophysical, axis=0)), 1, np.inf)
# Un-assigned columns
for column_index in np.where(np.all(A_astrophysical == 0, axis=0))[0]:
print(f"Warning: unassigned column index: {column_index}")
A_astrophysical[:, column_index] = np.random.normal(0, 1e-2, size=D)
AL = linalg.cholesky(A_astrophysical.T @ A_astrophysical)
A_astrophysical = A_astrophysical @ linalg.solve(AL, np.eye(model.n_latent_factors))
R, p_opt, cov, *_ = utils.find_rotation_matrix(A_astrophysical, A_est,
full_output=True)
R_opt = utils.exact_rotation_matrix(A_astrophysical, A_est,
p0=np.random.uniform(-np.pi, np.pi, model.n_latent_factors**2))
# WTF check R_opt.
AL = linalg.cholesky(R_opt.T @ R_opt)
R_opt2 = R_opt @ linalg.solve(AL, np.eye(model.n_latent_factors))
chi1 = np.sum(np.abs(A_est @ R - A_astrophysical))
chi2 = np.sum(np.abs(A_est @ R_opt2 - A_astrophysical))
R = R_opt2 if chi2 < chi1 else R
# Now make it a valid rotation matrix.
model.rotate(R, X=X, ensure_valid_rotation=True)
"""
fig_fac = mpl_utils.plot_factor_loads_and_contributions(model, X,
label_names=latex_label_names, colors=colors,
target_loads=A_astrophysical)
savefig(fig_fac, "latent-factors-and-contributions-with-targets")
fig_fac = mpl_utils.plot_factor_loads_and_contributions(model, X,
label_names=latex_label_names, colors=colors)
savefig(fig_fac, "latent-factors-and-contributions")
raise a
# Plot clustering in data space and latent space.
# For the latent space we will just use a corner plot.
component_cmap = mpl_utils.discrete_cmap(7, base_cmap="Spectral_r")
fig = mpl_utils.plot_latent_space(model, X, ellipse_kwds=dict(alpha=0), s=10, edgecolor="none", alpha=1, c=[component_cmap(_) for _ in np.argmax(model.tau_, axis=1)], show_ticks=True,
label_names=[r"$\mathbf{{S}}_{{{0}}}$".format(i + 1) for i in range(model.n_latent_factors)])
for ax in fig.axes:
if ax.is_last_row():
ax.set_ylim(-1, 1)
ax.set_yticks([-1, 0, 1])
fig.tight_layout()
savefig(fig, "latent-space")
# For the data space we will use N x 2 panels of [X/Fe] vs [Fe/H], coloured by their responsibility.
#X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
fig, axes = plt.subplots(5, 3, figsize=(7.1, 9.0))
axes = np.atleast_1d(axes).flatten()
x = X_H.T[label_names.index("fe_h")]
c = np.argmax(model.tau_, axis=1)
K = model.n_components
y_idx = 0
for i, ax in enumerate(axes):
if label_names[i] == "fe_h":
y_idx += 1
y = X_H.T[y_idx] - x
ax.scatter(x, y, c=[component_cmap(_) for _ in c], s=10, edgecolor="none", rasterized=True)
element = label_names[y_idx].split("_")[0].title()
ax.set_ylabel(r"$[\textrm{{{0}/Fe}}]$".format(element))
y_idx += 1
x_lims = (-1.5, 0.5)
y_lims = (-0.5, 1.0)
for ax in axes:
ax.set_xlim(x_lims)
ax.set_ylim(y_lims)
ax.set_xticks([-1.5, -0.5, 0.5])
#ax.set_yticks([-0.5, 0.25, 1.0, 1.75])
ax.set_yticks([-0.5, 0, 0.5, 1.0])
if ax.is_last_row():
ax.set_xlabel(r"$[\textrm{Fe/H}]$")
else:
ax.set_xticklabels([])
ax.plot(x_lims, [0, 0], ":", c="#666666", lw=0.5, zorder=-1)
ax.plot([0, 0], y_lims, ":", c="#666666", lw=0.5, zorder=-1)
fig.tight_layout()
savefig(fig, "data-space")
latex_elements = [r"$\textrm{{{0}}}$".format(le.split("_")[0].title()) for le in label_names]
fig_scatter = mpl_utils.plot_specific_scatter(model,
steps=True,
xlabel="",
xticklabels=latex_elements,
ylabel=r"$\textrm{specific scatter / dex}$",
ticker_pad=20)
fig_scatter.axes[0].set_yticks(np.arange(0, 0.20, 0.05))
savefig(fig_scatter, "specific-scatter")
here = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(here, f"{prefix}-{unique_hash}-data.fits")
subset = galah.data[mask]
subset["association"] = np.argmax(model.tau_, axis=1)
subset.write(filename, overwrite=True)
| 29.175772 | 183 | 0.642107 |
"""
Experiment using all GALAH data.
"""
from __future__ import division # Just in case. Use Python 3.
import os
import sys
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import yaml
from matplotlib.ticker import MaxNLocator
from collections import Counter
from scipy import linalg
from hashlib import md5
sys.path.insert(0, "../../")
from mcfa import (mcfa, grid_search, mpl_utils, utils)
import galah_dr2 as galah
matplotlib.style.use(mpl_utils.mpl_style)
here = os.path.dirname(os.path.realpath(__file__))
with open("config.yml") as fp:
config = yaml.load(fp)
print(f"Config: {config}")
np.random.seed(config["random_seed"])
prefix = os.path.basename(__file__)[:-3]
unique_hash = md5((f"{config}").encode("utf-8")).hexdigest()[:5]
unique_config_path = f"{unique_hash}.yml"
if os.path.exists(unique_config_path):
print(f"Warning: this configuration already exists: {unique_config_path}")
with open(unique_config_path, "w") as fp:
yaml.dump(config, fp)
with open(__file__, "r") as fp:
code = fp.read()
with open(f"{unique_hash}-{__file__}", "w") as fp:
fp.write(code)
def savefig(fig, suffix):
here = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(here, f"{prefix}-{unique_hash}-{suffix}")
fig.savefig(f"{filename}.png", dpi=150)
fig.savefig(f"{filename}.pdf", dpi=300)
import os
os.system("rm -f *.pkl")
N_elements = 20
use_galah_flags = config["use_galah_flags"]
mcfa_kwds = dict()
mcfa_kwds.update(config["mcfa_kwds"])
elements = config[prefix]["elements"]
if config[prefix]["ignore_elements"] is not None:
elements = [el for el in elements if el not in config[prefix]["ignore_elements"]]
print(elements)
mask = galah.get_abundance_mask(elements, use_galah_flags=use_galah_flags)
galah_cuts = config[prefix]["galah_cuts"]
if galah_cuts is not None:
print(f"Applying cuts: {galah_cuts}")
for k, (lower, upper) in galah_cuts.items():
mask *= (upper >= galah.data[k]) * (galah.data[k] >= lower)
raise a
print(f"Number of stars: {sum(mask)}")
X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
print(f"Data shape: {X_H.shape}")
def convert_xh_to_xy(X_H, label_names, y_label):
index = label_names.index(y_label)
y_h = X_H[:, index]
offsets = np.zeros_like(X_H)
for i, label_name in enumerate(label_names):
if label_name == y_label: continue
offsets[:, i] = y_h
return X_H - offsets
if config["wrt_x_fe"]:
X = convert_xh_to_xy(X_H, label_names, "fe_h")
else:
X = X_H
if not config["log_abundance"]:
X = 10**X
if config["subtract_mean"]:
X = X - np.mean(X, axis=0)
N, D = X.shape
# Do a gridsearch.
gs_options = config[prefix]["gridsearch"]
max_n_latent_factors = gs_options["max_n_latent_factors"]
max_n_components = gs_options["max_n_components"]
Js = 1 + np.arange(max_n_latent_factors)
Ks = 1 + np.arange(max_n_components)
N_inits = gs_options["n_inits"]
results_path = f"{prefix}-gridsearch-results.pkl"
if os.path.exists(results_path):
with open(results_path, "rb") as fp:
Jg, Kg, converged, meta, X, mcfa_kwds = pickle.load(fp)
else:
Jg, Kg, converged, meta = grid_search.grid_search(Js, Ks, X,
N_inits=N_inits, mcfa_kwds=mcfa_kwds)
with open(results_path, "wb") as fp:
pickle.dump((Jg, Kg, converged, meta, X, mcfa_kwds), fp)
ll = meta["ll"]
bic = meta["bic"]
mml = meta["message_length"]
J_best_ll, K_best_ll = grid_search.best(Js, Ks, -ll)
J_best_bic, K_best_bic = grid_search.best(Js, Ks, bic)
J_best_mml, K_best_mml = grid_search.best(Js, Ks, mml)
print(f"Best log likelihood at J = {J_best_ll} and K = {K_best_ll}")
print(f"Best BIC value found at J = {J_best_bic} and K = {K_best_bic}")
print(f"Best MML value found at J = {J_best_mml} and K = {K_best_mml}")
# Plot some contours.
plot_filled_contours_kwds = dict(converged=converged,
marker_function=np.nanargmin, N=100,
cmap="Spectral_r")
fig_ll = mpl_utils.plot_filled_contours(Jg, Kg, -ll,
colorbar_label=r"$-\log\mathcal{L}$",
**plot_filled_contours_kwds)
savefig(fig_ll, "gridsearch-ll")
fig_bic = mpl_utils.plot_filled_contours(Jg, Kg, bic,
colorbar_label=r"$\textrm{BIC}$",
**plot_filled_contours_kwds)
savefig(fig_bic, "gridsearch-bic")
fig_mml = mpl_utils.plot_filled_contours(Jg, Kg, mml,
colorbar_label=r"$\textrm{MML}$",
**plot_filled_contours_kwds)
savefig(fig_mml, "gridsearch-mml")
model = meta["best_models"][config["adopted_metric"]]
latex_label_names = [r"$\textrm{{{0}}}$".format(ea.split("_")[0].title()) for ea in label_names]
# Draw unrotated.
J_max = config["max_n_latent_factors_for_colormap"]
J_max = 12
cmap = mpl_utils.discrete_cmap(J_max, base_cmap="Spectral")
colors = [cmap(j) for j in range(J_max)]#[::-1]
A_est = model.theta_[model.parameter_names.index("A")]
A_astrophysical = np.zeros_like(A_est)#np.random.normal(0, 0.1, size=A_est.shape)
for i, tes in enumerate(config["grouped_elements"][:model.n_latent_factors]):
for j, te in enumerate(tes):
try:
idx = label_names.index("{0}_h".format(te.lower()))
except ValueError:
print(f"Skipping {te}")
else:
count = sum([(te in foo) for foo in config["grouped_elements"][:model.n_latent_factors]])
A_astrophysical[idx, i] = 1.0/count
A_astrophysical /= np.clip(np.sqrt(np.sum(A_astrophysical, axis=0)), 1, np.inf)
# Un-assigned columns
for column_index in np.where(np.all(A_astrophysical == 0, axis=0))[0]:
print(f"Warning: unassigned column index: {column_index}")
A_astrophysical[:, column_index] = np.random.normal(0, 1e-2, size=D)
if config["correct_A_astrophysical"]:
AL = linalg.cholesky(A_astrophysical.T @ A_astrophysical)
A_astrophysical = A_astrophysical @ linalg.solve(AL, np.eye(model.n_latent_factors))
max_n_rotations = 3
for each in range(max_n_rotations):
A_est = model.theta_[model.parameter_names.index("A")]
R, p_opt, cov, *_ = utils.find_rotation_matrix(A_astrophysical, A_est,
full_output=True)
R_opt = utils.exact_rotation_matrix(A_astrophysical, A_est,
p0=np.random.uniform(-np.pi, np.pi, model.n_latent_factors**2))
# WTF check R_opt.
AL = linalg.cholesky(R_opt.T @ R_opt)
R_opt2 = R_opt @ linalg.solve(AL, np.eye(model.n_latent_factors))
chi1 = np.sum(np.abs(A_est @ R - A_astrophysical))
chi2 = np.sum(np.abs(A_est @ R_opt2 - A_astrophysical))
R = R_opt2 if chi2 < chi1 else R
# Now make it a valid rotation matrix.
model.rotate(R, X=X, ensure_valid_rotation=True)
import pickle
with open(f"{unique_hash}-{prefix}-model.pkl", "wb") as fp:
pickle.dump(model, fp)
"""
J = model.n_latent_factors
L = model.theta_[model.parameter_names.index("A")]
elements = [ea.split("_")[0].title() for ea in label_names]
A_est = model.theta_[model.parameter_names.index("A")]
A_astrophysical = np.zeros_like(A_est)#np.random.normal(0, 0.1, size=A_est.shape)
for i, tes in enumerate(config["grouped_elements"][:model.n_latent_factors]):
for j, te in enumerate(tes):
try:
idx = label_names.index("{0}_h".format(te.lower()))
except ValueError:
print(f"Skipping {te}")
else:
count = sum([(te in foo) for foo in config["grouped_elements"][:model.n_latent_factors]])
A_astrophysical[idx, i] = 1.0/count
A_astrophysical /= np.clip(np.sqrt(np.sum(A_astrophysical, axis=0)), 1, np.inf)
# Un-assigned columns
for column_index in np.where(np.all(A_astrophysical == 0, axis=0))[0]:
print(f"Warning: unassigned column index: {column_index}")
A_astrophysical[:, column_index] = np.random.normal(0, 1e-2, size=D)
AL = linalg.cholesky(A_astrophysical.T @ A_astrophysical)
A_astrophysical = A_astrophysical @ linalg.solve(AL, np.eye(model.n_latent_factors))
R, p_opt, cov, *_ = utils.find_rotation_matrix(A_astrophysical, A_est,
full_output=True)
R_opt = utils.exact_rotation_matrix(A_astrophysical, A_est,
p0=np.random.uniform(-np.pi, np.pi, model.n_latent_factors**2))
# WTF check R_opt.
AL = linalg.cholesky(R_opt.T @ R_opt)
R_opt2 = R_opt @ linalg.solve(AL, np.eye(model.n_latent_factors))
chi1 = np.sum(np.abs(A_est @ R - A_astrophysical))
chi2 = np.sum(np.abs(A_est @ R_opt2 - A_astrophysical))
R = R_opt2 if chi2 < chi1 else R
# Now make it a valid rotation matrix.
model.rotate(R, X=X, ensure_valid_rotation=True)
"""
fig_fac = mpl_utils.plot_factor_loads_and_contributions(model, X,
label_names=latex_label_names, colors=colors,
target_loads=A_astrophysical)
savefig(fig_fac, "latent-factors-and-contributions-with-targets")
fig_fac = mpl_utils.plot_factor_loads_and_contributions(model, X,
label_names=latex_label_names, colors=colors)
savefig(fig_fac, "latent-factors-and-contributions")
raise a
# Plot clustering in data space and latent space.
# For the latent space we will just use a corner plot.
component_cmap = mpl_utils.discrete_cmap(7, base_cmap="Spectral_r")
fig = mpl_utils.plot_latent_space(model, X, ellipse_kwds=dict(alpha=0), s=10, edgecolor="none", alpha=1, c=[component_cmap(_) for _ in np.argmax(model.tau_, axis=1)], show_ticks=True,
label_names=[r"$\mathbf{{S}}_{{{0}}}$".format(i + 1) for i in range(model.n_latent_factors)])
for ax in fig.axes:
if ax.is_last_row():
ax.set_ylim(-1, 1)
ax.set_yticks([-1, 0, 1])
fig.tight_layout()
savefig(fig, "latent-space")
# For the data space we will use N x 2 panels of [X/Fe] vs [Fe/H], coloured by their responsibility.
#X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
fig, axes = plt.subplots(5, 3, figsize=(7.1, 9.0))
axes = np.atleast_1d(axes).flatten()
x = X_H.T[label_names.index("fe_h")]
c = np.argmax(model.tau_, axis=1)
K = model.n_components
y_idx = 0
for i, ax in enumerate(axes):
if label_names[i] == "fe_h":
y_idx += 1
y = X_H.T[y_idx] - x
ax.scatter(x, y, c=[component_cmap(_) for _ in c], s=10, edgecolor="none", rasterized=True)
element = label_names[y_idx].split("_")[0].title()
ax.set_ylabel(r"$[\textrm{{{0}/Fe}}]$".format(element))
y_idx += 1
x_lims = (-1.5, 0.5)
y_lims = (-0.5, 1.0)
for ax in axes:
ax.set_xlim(x_lims)
ax.set_ylim(y_lims)
ax.set_xticks([-1.5, -0.5, 0.5])
#ax.set_yticks([-0.5, 0.25, 1.0, 1.75])
ax.set_yticks([-0.5, 0, 0.5, 1.0])
if ax.is_last_row():
ax.set_xlabel(r"$[\textrm{Fe/H}]$")
else:
ax.set_xticklabels([])
ax.plot(x_lims, [0, 0], ":", c="#666666", lw=0.5, zorder=-1)
ax.plot([0, 0], y_lims, ":", c="#666666", lw=0.5, zorder=-1)
fig.tight_layout()
savefig(fig, "data-space")
latex_elements = [r"$\textrm{{{0}}}$".format(le.split("_")[0].title()) for le in label_names]
fig_scatter = mpl_utils.plot_specific_scatter(model,
steps=True,
xlabel="",
xticklabels=latex_elements,
ylabel=r"$\textrm{specific scatter / dex}$",
ticker_pad=20)
fig_scatter.axes[0].set_yticks(np.arange(0, 0.20, 0.05))
savefig(fig_scatter, "specific-scatter")
here = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(here, f"{prefix}-{unique_hash}-data.fits")
subset = galah.data[mask]
subset["association"] = np.argmax(model.tau_, axis=1)
subset.write(filename, overwrite=True)
| 488 | 0 | 46 |
a910f16a21bfa52d0e32dae6f790623e2eaac9f2 | 38 | py | Python | feeds/tests/__init__.py | RamezIssac/djangopackages | 2b54b0ae95ef805c07ca3c0b9c5184466b65c23a | [
"MIT"
] | 383 | 2015-05-06T03:51:51.000Z | 2022-03-26T07:56:44.000Z | feeds/tests/__init__.py | RamezIssac/djangopackages | 2b54b0ae95ef805c07ca3c0b9c5184466b65c23a | [
"MIT"
] | 257 | 2017-04-17T08:31:16.000Z | 2022-03-27T02:30:49.000Z | feeds/tests/__init__.py | RamezIssac/djangopackages | 2b54b0ae95ef805c07ca3c0b9c5184466b65c23a | [
"MIT"
] | 105 | 2017-04-17T06:21:26.000Z | 2022-03-30T05:24:19.000Z | from feeds.tests.test_latest import *
| 19 | 37 | 0.815789 | from feeds.tests.test_latest import *
| 0 | 0 | 0 |
e67ed531cbda0b0c671afd7ecd25b2ee8474f03a | 1,268 | py | Python | SIP/src/peer/server/__init__.py | trishantpahwa/Session-Initiation-Protocol | 5b770dbb9533fbe3a8ff31fc583576cc107e5ba8 | [
"MIT"
] | 3 | 2019-06-18T18:21:05.000Z | 2021-07-15T06:28:25.000Z | SIP/src/peer/server/__init__.py | trishantpahwa/Session-Initiation-Protocol | 5b770dbb9533fbe3a8ff31fc583576cc107e5ba8 | [
"MIT"
] | 4 | 2019-01-30T11:31:13.000Z | 2019-03-06T12:36:54.000Z | SIP/src/peer/server/__init__.py | trishantpahwa/Session-Initiation-Protocol | 5b770dbb9533fbe3a8ff31fc583576cc107e5ba8 | [
"MIT"
] | 1 | 2019-08-12T11:31:23.000Z | 2019-08-12T11:31:23.000Z | # Sample TCP Server
'''from server import server
server_name = 'server'
domain = '192.168.1.218'
protocol = 'TCP'
port = '5060'
server_network_name = 'SERVER'
content_type = 'application'
content_sub_type = 'sdp'
server_ = server(server_name, domain, protocol, port, server_network_name,
content_type, content_sub_type)
def register_server(client_socket):
print('Registering server')
message = server_.receive_message(client_socket)
print(message)
server_.send_message('Received message + ' + message)
print('Message sent')
server_.create_server(register_server)'''
# Sample UDP Server
'''from server import server
server_name = 'server'
domain = 'VaaanInfra.com'
protocol = 'UDP'
port = '5060'
server_network_name = 'SERVER'
content_type = 'application'
content_sub_type = 'sdp'
server_ = server(server_name, domain, protocol, port, server_network_name,
content_type, content_sub_type)
def register_server():
print('Registering server')
message = server_.receive_message()
print(message)
address = ('192.168.1.218', 5060)
server_.send_message(('Received message: ' + message), address)
print('Message sent')
server_.create_server(register_server)'''
from .server import server
| 24.862745 | 74 | 0.721609 | # Sample TCP Server
'''from server import server
server_name = 'server'
domain = '192.168.1.218'
protocol = 'TCP'
port = '5060'
server_network_name = 'SERVER'
content_type = 'application'
content_sub_type = 'sdp'
server_ = server(server_name, domain, protocol, port, server_network_name,
content_type, content_sub_type)
def register_server(client_socket):
print('Registering server')
message = server_.receive_message(client_socket)
print(message)
server_.send_message('Received message + ' + message)
print('Message sent')
server_.create_server(register_server)'''
# Sample UDP Server
'''from server import server
server_name = 'server'
domain = 'VaaanInfra.com'
protocol = 'UDP'
port = '5060'
server_network_name = 'SERVER'
content_type = 'application'
content_sub_type = 'sdp'
server_ = server(server_name, domain, protocol, port, server_network_name,
content_type, content_sub_type)
def register_server():
print('Registering server')
message = server_.receive_message()
print(message)
address = ('192.168.1.218', 5060)
server_.send_message(('Received message: ' + message), address)
print('Message sent')
server_.create_server(register_server)'''
from .server import server
| 0 | 0 | 0 |
fb7f5797b0a8a40e01914660c71d0962e8e429dc | 841 | py | Python | Analysis/tests/ParserTests.py | ashishnitinpatil/resanalysersite | 0604d2fed4760be741c4d90b6d230d0f2cd8bf9e | [
"CC-BY-4.0"
] | null | null | null | Analysis/tests/ParserTests.py | ashishnitinpatil/resanalysersite | 0604d2fed4760be741c4d90b6d230d0f2cd8bf9e | [
"CC-BY-4.0"
] | null | null | null | Analysis/tests/ParserTests.py | ashishnitinpatil/resanalysersite | 0604d2fed4760be741c4d90b6d230d0f2cd8bf9e | [
"CC-BY-4.0"
] | null | null | null | import unittest
import random
from Analysis.ResAnalyser import PDF_Parser
# Pro tip - Am a noob at Testing :|
class ParserTests(unittest.TestCase):
"""
All tests for PDFParser class & all it's methods go here
"""
if __name__ == '__main__':
unittest.main()
| 24.028571 | 61 | 0.650416 | import unittest
import random
from Analysis.ResAnalyser import PDF_Parser
# Pro tip - Am a noob at Testing :|
class ParserTests(unittest.TestCase):
"""
All tests for PDFParser class & all it's methods go here
"""
def setUp(self):
self.parser = PDF_Parser(testing=True)
def test_is_credit(self):
self.assertEqual(6, self.parser.is_credit('6'))
self.assertEqual(8, self.parser.is_credit('8'))
self.assertEqual(10, self.parser.is_credit('0'))
self.assertEqual(False, self.parser.is_credit('7.4'))
self.assertEqual(False, self.parser.is_credit('AA'))
def test_is_gpa(self):
pass
def test_getdata(self):
pass
def test_get_stud_type(self):
pass
def test_get_batch(self):
pass
if __name__ == '__main__':
unittest.main()
| 405 | 0 | 161 |
4d21e2bbb3f7199438454942c63073b8ad992302 | 5,027 | py | Python | src/external/coremltools_wrap/coremltools/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py | cookingcodewithme/turicreate | a89e203d60529d2d72547c03ec9753ea979ee342 | [
"BSD-3-Clause"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | src/external/coremltools_wrap/coremltools/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py | cookingcodewithme/turicreate | a89e203d60529d2d72547c03ec9753ea979ee342 | [
"BSD-3-Clause"
] | 2,402 | 2017-12-08T22:31:01.000Z | 2022-03-28T19:25:52.000Z | src/external/coremltools_wrap/coremltools/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py | cookingcodewithme/turicreate | a89e203d60529d2d72547c03ec9753ea979ee342 | [
"BSD-3-Clause"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import unittest
import pytest
pytest.importorskip("tensorflow", minversion="1.14.0")
from tensorflow.core.framework import attr_value_pb2 as attr_value
from tensorflow.core.framework import tensor_shape_pb2 as tensor_shape
from tensorflow.core.framework import types_pb2 as types
from coremltools.converters.mil.mil import types as mil_types
import coremltools.converters.mil.frontend.tensorflow.parse as parse
| 39.896825 | 88 | 0.655262 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import unittest
import pytest
pytest.importorskip("tensorflow", minversion="1.14.0")
from tensorflow.core.framework import attr_value_pb2 as attr_value
from tensorflow.core.framework import tensor_shape_pb2 as tensor_shape
from tensorflow.core.framework import types_pb2 as types
from coremltools.converters.mil.mil import types as mil_types
import coremltools.converters.mil.frontend.tensorflow.parse as parse
class TestParse(unittest.TestCase):
def test_parse_list(self):
def compare(expected, lst, field_name):
attr = attr_value.AttrValue()
field = getattr(attr.list, field_name)
field.extend(lst)
actual = parse.parse_attr(attr)
self.assertEqual(expected, actual)
compare([1, 2, 3], [1, 2, 3], "i")
compare(["foo", "bar"], [b"foo", b"bar"], "s")
def test_parse_scalar(self):
def compare(expected, val, field_name):
a = attr_value.AttrValue()
setattr(a, field_name, val)
actual = parse.parse_attr(a)
self.assertEqual(expected, actual)
compare("a String", b"a String", "s")
compare(55, 55, "i")
compare(True, True, "b")
attr = attr_value.AttrValue()
attr.f = 12.3
self.assertAlmostEqual(12.3, parse.parse_attr(attr), places=2)
@staticmethod
def _attr_with_shape(dims, unknown_rank=0):
attr = attr_value.AttrValue()
for (dim_size, dim_name) in dims:
tf_dim = tensor_shape.TensorShapeProto.Dim()
tf_dim.size = dim_size
tf_dim.name = dim_name
attr.shape.dim.append(tf_dim)
attr.shape.unknown_rank = unknown_rank
return attr
def test_parse_shape(self):
def compare(expected, dims, unknown_rank=0):
attr = self._attr_with_shape(dims, unknown_rank)
actual = parse.parse_attr(attr)
self.assertEqual(expected, actual)
compare(None, [], 5)
compare([100], [(100, "outer")])
compare([1, 2, 3], [(1, "outer"), (2, "middle"), (3, "inner")])
def test_parse_tensor(self):
# Zero-rank tensor
attr = attr_value.AttrValue()
attr.tensor.version_number = 1
attr.tensor.dtype = types.DataType.DT_INT32
t = parse.parse_attr(attr)
self.assertTrue(isinstance(t, mil_types.int32))
self.assertEqual(0, t.val)
# Non-zero rank
attr = attr_value.AttrValue()
attr.tensor.version_number = 1
attr.tensor.dtype = types.DataType.DT_INT32
shaped_attr = self._attr_with_shape([(1, "outer"), (2, "middle"), (3, "inner")])
attr.tensor.tensor_shape.dim.extend(shaped_attr.shape.dim)
attr.tensor.int_val.extend([55, 56, 57])
t = parse.parse_attr(attr)
self.assertEqual([55, 56, 57], t.val.tolist())
self.assertEqual("tensor", mil_types.get_type_info(t).name)
# Note that the result of t.get_primitive() is a function that returns a type
# rather than an instance of that type as it is when the tensor has rank zero.
self.assertTrue(isinstance(t.get_primitive()(), mil_types.int32))
self.assertEqual((1, 2, 3), t.get_shape())
def test_parse_type(self):
def compare(expected, tf_type):
attr = attr_value.AttrValue()
attr.type = tf_type
self.assertEqual(expected, parse.parse_attr(attr))
compare(None, types.DataType.DT_INVALID)
compare(mil_types.float, types.DataType.DT_FLOAT)
compare(mil_types.double, types.DataType.DT_DOUBLE)
compare(mil_types.int32, types.DataType.DT_INT32)
compare(mil_types.uint8, types.DataType.DT_UINT8)
compare(mil_types.int16, types.DataType.DT_INT16)
compare(mil_types.int8, types.DataType.DT_INT8)
compare(mil_types.int8, types.DataType.DT_INT8)
compare(mil_types.str, types.DataType.DT_STRING)
compare(None, types.DataType.DT_COMPLEX64)
compare(mil_types.int64, types.DataType.DT_INT64)
compare(mil_types.bool, types.DataType.DT_BOOL)
compare(None, types.DataType.DT_QINT8)
compare(None, types.DataType.DT_QUINT8)
compare(None, types.DataType.DT_QINT32)
compare(None, types.DataType.DT_BFLOAT16)
compare(None, types.DataType.DT_QINT16)
compare(None, types.DataType.DT_QUINT16)
compare(mil_types.uint16, types.DataType.DT_UINT16)
compare(None, types.DataType.DT_COMPLEX128)
compare(None, types.DataType.DT_HALF)
compare(None, types.DataType.DT_RESOURCE)
compare(None, types.DataType.DT_VARIANT)
compare(mil_types.uint32, types.DataType.DT_UINT32)
compare(mil_types.uint64, types.DataType.DT_UINT64)
| 4,153 | 193 | 23 |
3002f2f09fa463bc36f6a4afa3490d0ddcbaccac | 4,581 | py | Python | tsmtool/tarsnap.py | rstms/tsmtool | 97ce28e398185d983a96dc2787274946bfab5553 | [
"MIT"
] | null | null | null | tsmtool/tarsnap.py | rstms/tsmtool | 97ce28e398185d983a96dc2787274946bfab5553 | [
"MIT"
] | 1 | 2022-03-28T11:07:36.000Z | 2022-03-28T11:07:36.000Z | tsmtool/tarsnap.py | rstms/tsmtool | 97ce28e398185d983a96dc2787274946bfab5553 | [
"MIT"
] | null | null | null | # Tarsnap - tarsnap website interface
import datetime
from pathlib import Path
import requests
from bs4 import BeautifulSoup, element
URL = "https://www.tarsnap.com"
| 29.554839 | 79 | 0.505566 | # Tarsnap - tarsnap website interface
import datetime
from pathlib import Path
import requests
from bs4 import BeautifulSoup, element
URL = "https://www.tarsnap.com"
class Tarsnap:
def __init__(self, config_file, account=None, email=None, password=None):
self.url = URL
self.account = account or "undefined"
self.config = {}
if config_file and config_file.exists():
for line in Path(config_file).open("r").readlines():
_account, _email, _password = line.split()
self.config[_account] = dict(email=_email, password=_password)
if not account:
account = _account
_config = self.config.get(account, {})
self.email = email or _config.get("email")
self.password = password or _config.get("password")
self.session = requests.Session()
def _post(self, route, data):
return self.session.post(self.url + "/" + route, data)
def _get(self, route):
return self.session.get(self.url + "/" + route)
def _round(self, value):
return float("%.2f" % value)
def _query(self):
response = self._post(
"manage.cgi", {"address": self.email, "password": self.password}
)
# for div in [soup.find('div')]:
# print('div: %s ' % repr(div.text))
balance = None
account = None
verbose_soup = None
soup = BeautifulSoup(response.text, "html.parser")
# print(soup.prettify())
for el in [
e for e in soup.find_all("div") if e and isinstance(e, element.Tag)
]:
for div in [
e
for e in el.find_all("div")
if e and isinstance(e, element.Tag)
]:
if div.attrs.get("class") == ["boxcontents"]:
msg = div.text.strip().split("\n")[0]
if f"You are logged in as {self.email}" not in msg:
raise RuntimeError(msg)
for el in soup.find("div").find_all("p"):
if "current account balance" in el.text:
balance = self._round(float(el.find("code").text[1:]))
elif "logged in as" in el.text:
account = el.find("code").text
for el in soup.find_all("a", href=True):
if el["href"].endswith("verboseactivity"):
response = self._get(el["href"])
verbose_soup = BeautifulSoup(response.text, "html.parser")
break
return balance, account, verbose_soup
def _handle_row(self, r, row):
if row[0] == "Balance":
r["balances"].append((row[1], float(row[6])))
if row[0] == "Payment":
payment = float(row[5])
r["payments"][row[1]] = payment
else:
payment = 0
return payment
def get_status(
self,
rows=False,
balances=False,
payments=False,
raw=False,
email=None,
password=None,
):
email = email or self.email
password = password or self.password
if not email:
raise ValueError("--email is required")
if not password:
raise ValueError("--password is required")
balance, account, soup = self._query()
r = {}
r["balance"] = balance
r["account"] = account
r["rows"] = []
r["balances"] = []
r["payments"] = {}
payment_total = 0.0
for el in soup.find("table").find_all("tr"):
r["rows"].append([el.text for el in el.find_all("td")])
for row in r["rows"]:
payment_total += self._handle_row(r, row)
if not raw:
if r["balances"]:
begin_date = datetime.datetime.strptime(
r["balances"][0][0], "%Y-%m-%d"
)
begin_amount = float(r["balances"][0][1])
end_date = datetime.datetime.strptime(
r["balances"][-1][0], "%Y-%m-%d"
)
end_amount = float(r["balances"][-1][1])
r["monthly_cost"] = self._round(
(begin_amount - (end_amount - payment_total))
/ (end_date - begin_date).days
* 365
/ 12
)
if not rows:
del r["rows"]
if not balances:
del r["balances"]
if not payments:
del r["payments"]
return r
| 4,207 | -7 | 211 |
96f267c168e1d5d5937fae5a5b39d3b15c98c832 | 395 | py | Python | simple_fun_#204_smallest_integer.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | simple_fun_#204_smallest_integer.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | simple_fun_#204_smallest_integer.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Simple Fun #204: Smallest Integer
#Problem level: 7 kyu
from itertools import groupby, chain
| 24.6875 | 90 | 0.61519 | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Simple Fun #204: Smallest Integer
#Problem level: 7 kyu
from itertools import groupby, chain
def smallest_integer(matrix):
min = pos = 0
for key,_ in groupby([x for x in sorted(list(chain.from_iterable(matrix))) if x>=0]):
if key==min:
min+=1
pos+=1
else:
return min
return min
| 228 | 0 | 22 |
55df1dbbfc5a5b8a80f1ba121da64e4465ba79b1 | 608 | py | Python | urbanairship/devices/__init__.py | rodsenra/python-library | bd3fb129ee0eb72265f6d0f2f03fd9e8184dcac0 | [
"Apache-2.0"
] | null | null | null | urbanairship/devices/__init__.py | rodsenra/python-library | bd3fb129ee0eb72265f6d0f2f03fd9e8184dcac0 | [
"Apache-2.0"
] | null | null | null | urbanairship/devices/__init__.py | rodsenra/python-library | bd3fb129ee0eb72265f6d0f2f03fd9e8184dcac0 | [
"Apache-2.0"
] | null | null | null | from .devicelist import (
ChannelList,
ChannelInfo,
DeviceTokenList,
APIDList,
DeviceInfo,
)
from .tag import (
ChannelTags,
OpenChannelTags
)
from .segment import (
Segment,
SegmentList
)
from .channel_uninstall import (
ChannelUninstall
)
from .open_channel import (
OpenChannel
)
from .named_users import (
NamedUser,
NamedUserList,
NamedUserTags
)
from .static_lists import (
StaticList,
StaticLists,
)
from .locationfinder import (
LocationFinder
)
from .sms import (
Sms
)
from .email import (
Email,
EmailTags
)
| 12.16 | 32 | 0.669408 | from .devicelist import (
ChannelList,
ChannelInfo,
DeviceTokenList,
APIDList,
DeviceInfo,
)
from .tag import (
ChannelTags,
OpenChannelTags
)
from .segment import (
Segment,
SegmentList
)
from .channel_uninstall import (
ChannelUninstall
)
from .open_channel import (
OpenChannel
)
from .named_users import (
NamedUser,
NamedUserList,
NamedUserTags
)
from .static_lists import (
StaticList,
StaticLists,
)
from .locationfinder import (
LocationFinder
)
from .sms import (
Sms
)
from .email import (
Email,
EmailTags
)
| 0 | 0 | 0 |
4d0d23e77fd94495bc44c0163012002503b7b6dc | 2,147 | py | Python | siam_tracker/models/necks/adjust.py | microsoft/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 28 | 2020-03-18T04:41:21.000Z | 2022-02-24T16:44:01.000Z | siam_tracker/models/necks/adjust.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 1 | 2020-04-05T15:23:22.000Z | 2020-04-07T16:23:12.000Z | siam_tracker/models/necks/adjust.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
] | 11 | 2020-03-19T00:30:06.000Z | 2021-11-10T08:22:35.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import torch
from torch import nn
from typing import List, Union, Dict
from ..builder import NECKS
from ..utils import build_stack_conv_layers, random_init_weights
@NECKS.register_module
| 38.339286 | 71 | 0.585468 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import torch
from torch import nn
from typing import List, Union, Dict
from ..builder import NECKS
from ..utils import build_stack_conv_layers, random_init_weights
@NECKS.register_module
class Adjust(nn.Module):
def __init__(self,
feat_names: Union[str, List],
in_channels: Union[str, List],
out_channels: Union[str, List],
num_layers: Union[int, List],
kernel_size: Union[int, List],
init_type: str = None,
**kwargs):
super(Adjust, self).__init__()
if isinstance(feat_names, str):
feat_names = [feat_names]
self.feat_names = feat_names
num_levels = len(self.feat_names)
self.num_levels = num_levels
if not isinstance(in_channels, (tuple, list)):
in_channels = [in_channels for _ in range(num_levels)]
if not isinstance(out_channels, (tuple, list)):
out_channels = [out_channels for _ in range(num_levels)]
if not isinstance(num_layers, (tuple, list)):
num_layers = [num_layers for _ in range(num_levels)]
if not isinstance(kernel_size, (tuple, list)):
kernel_size = [kernel_size for _ in range(num_levels)]
adjust_modules = []
for i in range(num_levels):
adjust_modules.append(
build_stack_conv_layers(num_layers=num_layers[i],
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=kernel_size[i],
**kwargs)
)
self.adjust_modules = nn.ModuleList(adjust_modules)
random_init_weights(self.modules(), init_type)
def forward(self, feats: Dict[str, torch.Tensor]):
for i in range(self.num_levels):
feat_name = self.feat_names[i]
feats[feat_name] = self.adjust_modules[i](feats[feat_name])
return feats
| 1,783 | 3 | 76 |
035257ef15ef2d405531293d8c35ccff5962b78c | 557 | py | Python | runserver.py | GFZ-Centre-for-Early-Warning/REM_DEA | 68af70088db58acc916f2223a8e3b715beb3866d | [
"BSD-3-Clause"
] | null | null | null | runserver.py | GFZ-Centre-for-Early-Warning/REM_DEA | 68af70088db58acc916f2223a8e3b715beb3866d | [
"BSD-3-Clause"
] | null | null | null | runserver.py | GFZ-Centre-for-Early-Warning/REM_DEA | 68af70088db58acc916f2223a8e3b715beb3866d | [
"BSD-3-Clause"
] | null | null | null | '''
---------------------------
runserver.py
---------------------------
Created on 24.04.2015
Last modified on 12.01.2016
Author: Marc Wieland
Description: Starts the application using a local flask server (NOT RECOMMENDED: use wsgi implementation instead see README.md)
----
'''
from webapp import app, db, models
from flask.ext.login import LoginManager
from flask.ext.security import Security,SQLAlchemyUserDatastore
#create database stuff
db.create_all()
#CHANGE THE SECRET KEY HERE:
app.secret_key = '42'
app.run(debug=True, use_reloader=False)
| 26.52381 | 127 | 0.70377 | '''
---------------------------
runserver.py
---------------------------
Created on 24.04.2015
Last modified on 12.01.2016
Author: Marc Wieland
Description: Starts the application using a local flask server (NOT RECOMMENDED: use wsgi implementation instead see README.md)
----
'''
from webapp import app, db, models
from flask.ext.login import LoginManager
from flask.ext.security import Security,SQLAlchemyUserDatastore
#create database stuff
db.create_all()
#CHANGE THE SECRET KEY HERE:
app.secret_key = '42'
app.run(debug=True, use_reloader=False)
| 0 | 0 | 0 |
7476c102eb8d74542cc6384cbb824d87d94c1c07 | 3,394 | py | Python | setup.py | Bhaskers-Blu-Org2/mu_pip_environment | d62df3b1d86bf375e82a7ca09740fa0aa3504fcc | [
"BSD-2-Clause"
] | 6 | 2019-07-01T05:10:41.000Z | 2021-06-11T08:58:35.000Z | setup.py | Microsoft/mu_pip_environment | d62df3b1d86bf375e82a7ca09740fa0aa3504fcc | [
"BSD-2-Clause"
] | null | null | null | setup.py | Microsoft/mu_pip_environment | d62df3b1d86bf375e82a7ca09740fa0aa3504fcc | [
"BSD-2-Clause"
] | 5 | 2019-07-01T05:10:42.000Z | 2020-08-05T14:52:28.000Z | # @file setup.py
# This contains setup info for mu_environment pip module
#
##
# Copyright (c) 2018, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import setuptools
from setuptools.command.sdist import sdist
from setuptools.command.install import install
from setuptools.command.develop import develop
from MuEnvironment.bin.NuGet import DownloadNuget
with open("README.rst", "r") as fh:
long_description = fh.read()
class PostSdistCommand(sdist):
"""Post-sdist."""
class PostInstallCommand(install):
"""Post-install."""
class PostDevCommand(develop):
"""Post-develop."""
setuptools.setup(
name="mu_environment",
author="Project Mu Team",
author_email="maknutse@microsoft.com",
description="Project Mu distributed dependency management, build, test, and tool environments.",
long_description=long_description,
url="https://github.com/microsoft/mu_pip_environment",
license='BSD2',
packages=setuptools.find_packages(),
use_scm_version=True,
setup_requires=['setuptools_scm'],
cmdclass={
'sdist': PostSdistCommand,
'install': PostInstallCommand,
'develop': PostDevCommand,
},
include_package_data=True,
entry_points={
'console_scripts': ['omnicache=MuEnvironment.Omnicache:main', 'nuget-publish=MuEnvironment.NugetPublishing:go']
},
install_requires=[
'pyyaml',
'mu_python_library>=0.4.6'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta"
]
)
| 34.989691 | 120 | 0.68739 | # @file setup.py
# This contains setup info for mu_environment pip module
#
##
# Copyright (c) 2018, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import setuptools
from setuptools.command.sdist import sdist
from setuptools.command.install import install
from setuptools.command.develop import develop
from MuEnvironment.bin.NuGet import DownloadNuget
with open("README.rst", "r") as fh:
long_description = fh.read()
class PostSdistCommand(sdist):
"""Post-sdist."""
def run(self):
# we need to download nuget so throw the exception if we don't get it
DownloadNuget()
sdist.run(self)
class PostInstallCommand(install):
"""Post-install."""
def run(self):
try:
DownloadNuget()
except:
pass
install.run(self)
class PostDevCommand(develop):
"""Post-develop."""
def run(self):
try:
DownloadNuget()
except:
pass
develop.run(self)
setuptools.setup(
name="mu_environment",
author="Project Mu Team",
author_email="maknutse@microsoft.com",
description="Project Mu distributed dependency management, build, test, and tool environments.",
long_description=long_description,
url="https://github.com/microsoft/mu_pip_environment",
license='BSD2',
packages=setuptools.find_packages(),
use_scm_version=True,
setup_requires=['setuptools_scm'],
cmdclass={
'sdist': PostSdistCommand,
'install': PostInstallCommand,
'develop': PostDevCommand,
},
include_package_data=True,
entry_points={
'console_scripts': ['omnicache=MuEnvironment.Omnicache:main', 'nuget-publish=MuEnvironment.NugetPublishing:go']
},
install_requires=[
'pyyaml',
'mu_python_library>=0.4.6'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta"
]
)
| 318 | 0 | 81 |
b514131d371af8802111a0dae7ec5445b63bcfcd | 10,929 | py | Python | tensorflow_probability/python/distributions/jax_transformation_test.py | bourov/probability | 1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2 | [
"Apache-2.0"
] | 2 | 2020-12-17T20:43:24.000Z | 2021-06-11T22:09:16.000Z | tensorflow_probability/python/distributions/jax_transformation_test.py | bourov/probability | 1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/jax_transformation_test.py | bourov/probability | 1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2 | [
"Apache-2.0"
] | 1 | 2021-01-03T20:23:52.000Z | 2021-01-03T20:23:52.000Z | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests TFP distribution compositionality with JAX transformations."""
import functools
from absl import flags
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as hps
import jax
from jax import random
import jax.numpy as np
# pylint: disable=no-name-in-module
from tensorflow_probability.python.distributions._jax import hypothesis_testlib as dhps
from tensorflow_probability.python.experimental.substrates.jax import tf2jax as tf
from tensorflow_probability.python.internal._jax import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal._jax import test_util
flags.DEFINE_bool('execute_only', False,
'If specified, skip equality checks and only verify '
'execution of transforms works.')
flags.DEFINE_bool('ignore_blocklists', False,
'If specified, run tests even for blocklisted distributions.')
FLAGS = flags.FLAGS
JIT_SAMPLE_BLOCKLIST = ()
JIT_LOGPROB_BLOCKLIST = (
'BatchReshape',
'Bates',
'Independent',
'MixtureSameFamily',
'TransformedDistribution',
)
VMAP_SAMPLE_BLOCKLIST = ('NegativeBinomial',)
VMAP_LOGPROB_BLOCKLIST = (
'BatchReshape',
'Bates',
'Independent',
'MixtureSameFamily',
'TransformedDistribution',
'QuantizedDistribution',
)
JVP_SAMPLE_BLOCKLIST = (
'Bates',
'BetaBinomial',
'Binomial',
'DirichletMultinomial',
'Gamma',
'GeneralizedNormal',
'Multinomial',
'OrderedLogistic',
'PERT',
'Triangular',
'TruncatedNormal',
'Uniform',
'VonMises',
'VonMisesFisher',
'WishartTriL',
)
JVP_LOGPROB_SAMPLE_BLOCKLIST = (
'BetaBinomial',
'Binomial',
'JohnsonSU',
'NegativeBinomial',
'Poisson',
)
JVP_LOGPROB_PARAM_BLOCKLIST = (
'Bates',
'Beta',
'BetaBinomial',
'Binomial',
'CholeskyLKJ',
'GammaGamma',
'HalfStudentT',
'JohnsonSU',
'LKJ',
'NegativeBinomial',
'OrderedLogistic',
'PERT',
'PowerSpherical',
'ProbitBernoulli',
'StudentT',
'Triangular',
'TruncatedNormal',
'Uniform',
'WishartTriL',
)
VJP_SAMPLE_BLOCKLIST = (
'Bates',
'Gamma',
'GeneralizedNormal',
'VonMisesFisher',
)
VJP_LOGPROB_SAMPLE_BLOCKLIST = (
'Categorical',
'OneHotCategorical',
'OrderedLogistic',
'PlackettLuce',
'ProbitBernoulli',
)
VJP_LOGPROB_PARAM_BLOCKLIST = ()
DEFAULT_MAX_EXAMPLES = 3
test_all_distributions = parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname} for dname in
sorted(list(dhps.INSTANTIABLE_BASE_DISTS.keys())
+ list(d for d in dhps.INSTANTIABLE_META_DISTS if d != 'Mixture')))
test_base_distributions = parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname} for dname in
sorted(list(dhps.INSTANTIABLE_BASE_DISTS.keys())))
del _GradTest # not intended for standalone execution
if __name__ == '__main__':
tf.test.main()
| 33.835913 | 87 | 0.708116 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests TFP distribution compositionality with JAX transformations."""
import functools
from absl import flags
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as hps
import jax
from jax import random
import jax.numpy as np
# pylint: disable=no-name-in-module
from tensorflow_probability.python.distributions._jax import hypothesis_testlib as dhps
from tensorflow_probability.python.experimental.substrates.jax import tf2jax as tf
from tensorflow_probability.python.internal._jax import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal._jax import test_util
flags.DEFINE_bool('execute_only', False,
'If specified, skip equality checks and only verify '
'execution of transforms works.')
flags.DEFINE_bool('ignore_blocklists', False,
'If specified, run tests even for blocklisted distributions.')
FLAGS = flags.FLAGS
JIT_SAMPLE_BLOCKLIST = ()
JIT_LOGPROB_BLOCKLIST = (
'BatchReshape',
'Bates',
'Independent',
'MixtureSameFamily',
'TransformedDistribution',
)
VMAP_SAMPLE_BLOCKLIST = ('NegativeBinomial',)
VMAP_LOGPROB_BLOCKLIST = (
'BatchReshape',
'Bates',
'Independent',
'MixtureSameFamily',
'TransformedDistribution',
'QuantizedDistribution',
)
JVP_SAMPLE_BLOCKLIST = (
'Bates',
'BetaBinomial',
'Binomial',
'DirichletMultinomial',
'Gamma',
'GeneralizedNormal',
'Multinomial',
'OrderedLogistic',
'PERT',
'Triangular',
'TruncatedNormal',
'Uniform',
'VonMises',
'VonMisesFisher',
'WishartTriL',
)
JVP_LOGPROB_SAMPLE_BLOCKLIST = (
'BetaBinomial',
'Binomial',
'JohnsonSU',
'NegativeBinomial',
'Poisson',
)
JVP_LOGPROB_PARAM_BLOCKLIST = (
'Bates',
'Beta',
'BetaBinomial',
'Binomial',
'CholeskyLKJ',
'GammaGamma',
'HalfStudentT',
'JohnsonSU',
'LKJ',
'NegativeBinomial',
'OrderedLogistic',
'PERT',
'PowerSpherical',
'ProbitBernoulli',
'StudentT',
'Triangular',
'TruncatedNormal',
'Uniform',
'WishartTriL',
)
VJP_SAMPLE_BLOCKLIST = (
'Bates',
'Gamma',
'GeneralizedNormal',
'VonMisesFisher',
)
VJP_LOGPROB_SAMPLE_BLOCKLIST = (
'Categorical',
'OneHotCategorical',
'OrderedLogistic',
'PlackettLuce',
'ProbitBernoulli',
)
VJP_LOGPROB_PARAM_BLOCKLIST = ()
DEFAULT_MAX_EXAMPLES = 3
test_all_distributions = parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname} for dname in
sorted(list(dhps.INSTANTIABLE_BASE_DISTS.keys())
+ list(d for d in dhps.INSTANTIABLE_META_DISTS if d != 'Mixture')))
test_base_distributions = parameterized.named_parameters(
{'testcase_name': dname, 'dist_name': dname} for dname in
sorted(list(dhps.INSTANTIABLE_BASE_DISTS.keys())))
class JitTest(test_util.TestCase):
@test_all_distributions
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
def testSample(self, dist_name, data):
if dist_name in JIT_SAMPLE_BLOCKLIST and not FLAGS.ignore_blocklists:
self.skipTest('Distribution currently broken.')
dist = data.draw(dhps.distributions(enable_vars=False,
dist_name=dist_name))
def _sample(seed):
return dist.sample(seed=seed)
seed = test_util.test_seed()
result = jax.jit(_sample)(seed)
if not FLAGS.execute_only:
self.assertAllClose(_sample(seed), result, rtol=1e-6,
atol=1e-6)
@test_all_distributions
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
def testLogProb(self, dist_name, data):
if dist_name in JIT_LOGPROB_BLOCKLIST and not FLAGS.ignore_blocklists:
self.skipTest('Distribution currently broken.')
dist = data.draw(dhps.distributions(enable_vars=False,
dist_name=dist_name))
sample = dist.sample(seed=test_util.test_seed())
result = jax.jit(dist.log_prob)(sample)
if not FLAGS.execute_only:
self.assertAllClose(dist.log_prob(sample), result,
rtol=1e-6, atol=1e-6)
class VmapTest(test_util.TestCase):
@test_all_distributions
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
def testSample(self, dist_name, data):
if dist_name in VMAP_SAMPLE_BLOCKLIST and not FLAGS.ignore_blocklists:
self.skipTest('Distribution currently broken.')
dist = data.draw(dhps.distributions(enable_vars=False,
dist_name=dist_name))
def _sample(seed):
return dist.sample(seed=seed)
seed = test_util.test_seed()
jax.vmap(_sample)(random.split(seed, 10))
@test_all_distributions
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
def testLogProb(self, dist_name, data):
if dist_name in VMAP_LOGPROB_BLOCKLIST and not FLAGS.ignore_blocklists:
self.skipTest('Distribution currently broken.')
dist = data.draw(dhps.distributions(enable_vars=False,
dist_name=dist_name))
sample = dist.sample(seed=test_util.test_seed(), sample_shape=10)
result = jax.vmap(dist.log_prob)(sample)
if not FLAGS.execute_only:
self.assertAllClose(result, dist.log_prob(sample),
rtol=1e-6, atol=1e-6)
class _GradTest(test_util.TestCase):
def _make_distribution(self, dist_name, params,
batch_shape, override_params=None):
override_params = override_params or {}
all_params = dict(params)
for param_name, override_param in override_params.items():
all_params[param_name] = override_param
all_params = dhps.constrain_params(all_params, dist_name)
all_params = dhps.modify_params(all_params, dist_name, validate_args=False)
return dhps.base_distributions(
enable_vars=False, dist_name=dist_name, params=all_params,
batch_shape=batch_shape, validate_args=False)
def _param_func_generator(self, data, dist_name, params, batch_shape, func,
generate_sample_function=False):
for param_name, param in params.items():
if (not tf.is_tensor(param)
or not np.issubdtype(param.dtype, np.floating)):
continue
def _func(param_name, param):
dist = data.draw(self._make_distribution(
dist_name, params, batch_shape,
override_params={param_name: param}))
return func(dist)
yield param_name, param, _func
@test_base_distributions
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
def testSample(self, dist_name, data):
if dist_name in self.sample_blocklist and not FLAGS.ignore_blocklists:
self.skipTest('Distribution currently broken.')
def _sample(dist):
return dist.sample(seed=random.PRNGKey(0))
params_unconstrained, batch_shape = data.draw(
dhps.base_distribution_unconstrained_params(
enable_vars=False, dist_name=dist_name))
for param_name, unconstrained_param, func in self._param_func_generator(
data, dist_name, params_unconstrained, batch_shape, _sample):
self._test_transformation(
functools.partial(func, param_name), unconstrained_param,
msg=param_name)
@test_base_distributions
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
def testLogProbParam(self, dist_name, data):
if (dist_name in self.logprob_param_blocklist and
not FLAGS.ignore_blocklists):
self.skipTest('Distribution currently broken.')
params, batch_shape = data.draw(
dhps.base_distribution_unconstrained_params(
enable_vars=False, dist_name=dist_name))
constrained_params = dhps.constrain_params(params, dist_name)
sampling_dist = data.draw(dhps.base_distributions(
batch_shape=batch_shape, enable_vars=False, dist_name=dist_name,
params=constrained_params))
sample = sampling_dist.sample(seed=random.PRNGKey(0))
def _log_prob(dist):
return dist.log_prob(sample)
for param_name, param, func in self._param_func_generator(
data, dist_name, params, batch_shape, _log_prob):
self._test_transformation(
functools.partial(func, param_name), param, msg=param_name)
@test_base_distributions
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
def testLogProbSample(self, dist_name, data):
if (dist_name in self.logprob_sample_blocklist and
not FLAGS.ignore_blocklists):
self.skipTest('Distribution currently broken.')
params, batch_shape = data.draw(
dhps.base_distribution_unconstrained_params(
enable_vars=False, dist_name=dist_name))
constrained_params = dhps.constrain_params(params, dist_name)
dist = data.draw(dhps.base_distributions(
batch_shape=batch_shape, enable_vars=False, dist_name=dist_name,
params=constrained_params))
sample = dist.sample(seed=random.PRNGKey(0))
def _log_prob(sample):
return dist.log_prob(sample)
self._test_transformation(_log_prob, sample)
class JVPTest(_GradTest):
sample_blocklist = JVP_SAMPLE_BLOCKLIST
logprob_param_blocklist = JVP_LOGPROB_PARAM_BLOCKLIST
logprob_sample_blocklist = JVP_LOGPROB_SAMPLE_BLOCKLIST
def _test_transformation(self, func, param, msg=None):
_, jvp = jax.jvp(func, (param,), (np.ones_like(param),))
if not FLAGS.execute_only:
self.assertNotAllEqual(jvp, np.zeros_like(jvp), msg=msg)
class VJPTest(_GradTest):
sample_blocklist = VJP_SAMPLE_BLOCKLIST
logprob_param_blocklist = VJP_LOGPROB_PARAM_BLOCKLIST
logprob_sample_blocklist = VJP_LOGPROB_SAMPLE_BLOCKLIST
def _test_transformation(self, func, param, msg=None):
out, f_vjp = jax.vjp(func, param)
vjp, = f_vjp(np.ones_like(out).astype(out.dtype))
if not FLAGS.execute_only:
self.assertNotAllEqual(vjp, np.zeros_like(vjp), msg=msg)
del _GradTest # not intended for standalone execution
if __name__ == '__main__':
tf.test.main()
| 5,673 | 1,482 | 115 |
0bee46430076063eb07a695c066b5cd6da03fd47 | 1,257 | py | Python | setup.py | JWKennington/CompPhys | f53ad33609738eeed81aa4b3390599668bf54017 | [
"MIT"
] | null | null | null | setup.py | JWKennington/CompPhys | f53ad33609738eeed81aa4b3390599668bf54017 | [
"MIT"
] | null | null | null | setup.py | JWKennington/CompPhys | f53ad33609738eeed81aa4b3390599668bf54017 | [
"MIT"
] | null | null | null | """Setup file
"""
import setuptools
import compphys
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(name='compphys',
version=compphys.__version__,
description='compphys',
long_description=long_description,
long_description_content_type="text/markdown",
python_requires='==3.7, ==3.8',
url=compphys.__github_url__,
author='James Kennington',
author_email='jwkennington@psu.edu',
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'matplotlib',
'numpy',
'pytest',
'scipy',
'simpy',
'plotly',
],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
],
zip_safe=False,
include_package_data=True,
)
| 32.230769 | 63 | 0.446301 | """Setup file
"""
import setuptools
import compphys
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(name='compphys',
version=compphys.__version__,
description='compphys',
long_description=long_description,
long_description_content_type="text/markdown",
python_requires='==3.7, ==3.8',
url=compphys.__github_url__,
author='James Kennington',
author_email='jwkennington@psu.edu',
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'matplotlib',
'numpy',
'pytest',
'scipy',
'simpy',
'plotly',
],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
],
zip_safe=False,
include_package_data=True,
)
| 0 | 0 | 0 |
4c0c86973bef9febabafdbc0b7755320ac5cdf61 | 166 | py | Python | lazy_record/typecasts.py | ECESeniorDesign/lazy_record | 929d3cc7c2538b0f792365c0d2b0e0d41084c2dd | [
"MIT"
] | 2 | 2017-02-04T03:33:28.000Z | 2021-01-08T05:58:18.000Z | lazy_record/typecasts.py | ECESeniorDesign/lazy_record | 929d3cc7c2538b0f792365c0d2b0e0d41084c2dd | [
"MIT"
] | 17 | 2016-01-05T00:09:30.000Z | 2016-02-15T20:06:45.000Z | lazy_record/typecasts.py | ECESeniorDesign/lazy_record | 929d3cc7c2538b0f792365c0d2b0e0d41084c2dd | [
"MIT"
] | null | null | null | """Functions to convert objects to a type"""
| 20.75 | 44 | 0.716867 | """Functions to convert objects to a type"""
def date(datetime):
# may get more complexity later
return datetime
def datetime(datetime):
return datetime
| 76 | 0 | 45 |
3b034620c9c0ef8e147d7736b8c194653375c054 | 2,070 | py | Python | ingestion/main.py | mharrisb1/blocktrace | 3c54286d4f28c3b0610f577dfdbbf643953475fa | [
"MIT"
] | null | null | null | ingestion/main.py | mharrisb1/blocktrace | 3c54286d4f28c3b0610f577dfdbbf643953475fa | [
"MIT"
] | null | null | null | ingestion/main.py | mharrisb1/blocktrace | 3c54286d4f28c3b0610f577dfdbbf643953475fa | [
"MIT"
] | null | null | null | import os
from typing import List, Optional
import multiprocessing as mp
from fastapi import FastAPI, BackgroundTasks
from Blocktrace.Networks import Wax
from Blocktrace.Streaming import stream_writer, publish_messages
app = FastAPI()
API_KEY = os.getenv("BT__API_KEY") or ""
GCP_PROJECT_ID = os.getenv("BT__GCP_PROJECT_ID") or ""
GCP_PUBSUB_BLOCK_TOPIC_ID = os.getenv("BT__GCP_PUBSUB_BLOCK_TOPIC_ID") or ""
GCP_PUBSUB_TX_TOPIC_ID = os.getenv("BT__GCP_PUBSUB_TX_TOPIC_ID") or ""
GCP_PUBSUB_ACT_TOPIC_ID = os.getenv("BT__GCP_PUBSUB_ACT_TOPIC_ID") or ""
@app.get("/api/v1/invoke/")
| 25.875 | 76 | 0.669565 | import os
from typing import List, Optional
import multiprocessing as mp
from fastapi import FastAPI, BackgroundTasks
from Blocktrace.Networks import Wax
from Blocktrace.Streaming import stream_writer, publish_messages
app = FastAPI()
API_KEY = os.getenv("BT__API_KEY") or ""
GCP_PROJECT_ID = os.getenv("BT__GCP_PROJECT_ID") or ""
GCP_PUBSUB_BLOCK_TOPIC_ID = os.getenv("BT__GCP_PUBSUB_BLOCK_TOPIC_ID") or ""
GCP_PUBSUB_TX_TOPIC_ID = os.getenv("BT__GCP_PUBSUB_TX_TOPIC_ID") or ""
GCP_PUBSUB_ACT_TOPIC_ID = os.getenv("BT__GCP_PUBSUB_ACT_TOPIC_ID") or ""
def blocktrace_ingest(
gcp_project_id: str,
block_topic_id: str,
tx_topic_id: str,
act_topic_id: str,
api_key: str,
contracts: List[str] = [],
start_block: Optional[int] = None,
end_block: Optional[int] = None,
):
wax = Wax(
api_key=api_key,
contracts=contracts,
start_block=start_block,
end_block=end_block,
)
q = mp.Queue()
reader_proc = mp.Process(
target=publish_messages,
kwargs={
"q": q,
"gcp_project_id": gcp_project_id,
"block_topic_id": block_topic_id,
"tx_topic_id": tx_topic_id,
"act_topic_id": act_topic_id,
},
)
reader_proc.daemon = True
reader_proc.start()
stream_writer(stream=wax.blocktrace(), q=q)
reader_proc.join()
@app.get("/api/v1/invoke/")
def invoke(
background_tasks: BackgroundTasks,
contracts: Optional[str] = None,
start_block: Optional[int] = None,
end_block: Optional[int] = None,
):
if contracts:
_contracts = contracts.split(",")
else:
_contracts = []
background_tasks.add_task(
func=blocktrace_ingest,
gcp_project_id=GCP_PROJECT_ID,
block_topic_id=GCP_PUBSUB_BLOCK_TOPIC_ID,
tx_topic_id=GCP_PUBSUB_TX_TOPIC_ID,
act_topic_id=GCP_PUBSUB_ACT_TOPIC_ID,
api_key=API_KEY,
contracts=_contracts,
start_block=start_block,
end_block=end_block,
)
return {"status": "success"}
| 1,438 | 0 | 45 |
f245d7340956f5a4519687e8747eb806b6b1f7a9 | 2,544 | py | Python | src/diffsnn/popp/thinning.py | ibm-research-tokyo/diffsnn | 9299fc5e8542c6fde33a287f81e7ae3682b2fd9d | [
"Apache-2.0"
] | 20 | 2021-06-01T02:42:43.000Z | 2022-02-14T07:08:34.000Z | src/diffsnn/popp/thinning.py | ibm-research-tokyo/diffsnn | 9299fc5e8542c6fde33a287f81e7ae3682b2fd9d | [
"Apache-2.0"
] | null | null | null | src/diffsnn/popp/thinning.py | ibm-research-tokyo/diffsnn | 9299fc5e8542c6fde33a287f81e7ae3682b2fd9d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Title '''
__author__ = 'Hiroshi Kajino <KAJINO@jp.ibm.com>'
__copyright__ = 'Copyright IBM Corp. 2020, 2021'
import math
import torch
from ..data import (EventSeq,
MultivariateEventSeq,
append_hidden)
from ..utils import complete_logprob
from ..pp.poisson import PoissonProcess
from ..pp.thinning import MultivariateThinningAlgorithmMixin
EPS = 1e-2
| 41.032258 | 82 | 0.608491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Title '''
__author__ = 'Hiroshi Kajino <KAJINO@jp.ibm.com>'
__copyright__ = 'Copyright IBM Corp. 2020, 2021'
import math
import torch
from ..data import (EventSeq,
MultivariateEventSeq,
append_hidden)
from ..utils import complete_logprob
from ..pp.poisson import PoissonProcess
from ..pp.thinning import MultivariateThinningAlgorithmMixin
EPS = 1e-2
class MultivariateThinningAlgorithmForPOMixin(MultivariateThinningAlgorithmMixin):
def sample_hidden_seq(self,
history: MultivariateEventSeq,
base_intensity: float) -> MultivariateEventSeq:
''' impute hidden units
'''
tgt_dim_list = list(range(self.obs_dim, self.obs_dim + self.hidden_dim))
output_history = append_hidden(history, self.hidden_dim)
base_intensity = base_intensity + EPS
log_base_intensity = math.log(base_intensity)
if not hasattr(self, 'base_pp'):
self.base_pp = PoissonProcess(
intensity=base_intensity,
seed=self.seed)
self.base_pp.params['intensity'].requires_grad = False
base_pp_history = EventSeq(time_list=[],
obs_period=[history.obs_period[0],
history.obs_period[0]])
self.base_pp.params['intensity'].data = base_intensity
while True:
candidate_time_stamp = self.base_pp.sample_candidate(
base_pp_history)
if candidate_time_stamp > history.obs_period[1]:
break
log_cond_int_tensor = self.all_log_conditional_intensity(
candidate_time_stamp,
output_history,
dim_list=tgt_dim_list)
log_acceptance_rate_tensor = complete_logprob(
log_cond_int_tensor - log_base_intensity)
random_idx = self.categorical(log_acceptance_rate_tensor)
if random_idx != self.hidden_dim:
_mark = [0.] * self.dim
_mark[tgt_dim_list[random_idx]] = 1.0
mark = torch.tensor(_mark)
output_history.insert_hidden(candidate_time_stamp,
mark)
#base_intensity = self.upperbound_cond_int(history) + EPS
base_pp_history.append(candidate_time_stamp)
history.obs_period[1] = history.obs_period[1]
return output_history
| 0 | 2,077 | 23 |
61b5cf4058bc359d31b1993b7454a6d576f40a1b | 2,320 | py | Python | downloader.py | sphexoo/vlc_yt_downloader | c43f419f6fa3d2703cefc8eb2eedb966046a98ab | [
"MIT"
] | null | null | null | downloader.py | sphexoo/vlc_yt_downloader | c43f419f6fa3d2703cefc8eb2eedb966046a98ab | [
"MIT"
] | null | null | null | downloader.py | sphexoo/vlc_yt_downloader | c43f419f6fa3d2703cefc8eb2eedb966046a98ab | [
"MIT"
] | null | null | null | import argparse
import subprocess
import os
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("url", help="Specify youtube url to download audio from.")
parser.add_argument("--out", default="out", metavar="<FILENAME>", help="Specify name of output file.")
parser.add_argument("-verbose", action="store_true", help="Show VLC media player GUI when downloading audio.")
parser.add_argument("-no_url_check", action="store_true", help="Disables url regex check. May result in unexpected behavior for invalid links.")
args = parser.parse_args()
main(args)
| 38.666667 | 191 | 0.658621 | import argparse
import subprocess
import os
import re
def cmdcall(command):
process = subprocess.Popen(command)
process.communicate()
def trimUrl(url):
trimmed_url = re.search(r'[^&]+', url).group()
print("[INFO]: Input URL trimmed to: {}".format(trimmed_url))
return trimmed_url
def isValidUrl(url):
if re.match(r'(https?://)?(www.)?youtube.com/watch\?v=[a-zA-Z0-9]+', url):
return True
print("[ERROR]: Invalid URL. Specify a valid URL or pass -no_url_check to skip URL check.")
return False
def main(args):
if not "VLC_HOME" in os.environ:
print("[ERROR]: Environment variable VLC_HOME not set. Set VLC_HOME to VLC executable to run this script.")
return -1
vlc_path = os.getenv("VLC_HOME")
url = trimUrl(args.url)
if not args.no_url_check and not isValidUrl(url):
return -1
tmp = 'tmp.ogg'
dst = args.out + ".mp3"
command_tmp = vlc_path + ' ' + url + ' --sout=#transcode{acodec="opus",ab="128","channels=2",samplerate="44100"}:standard{access=file,mux=ogg,dst=' + tmp +'} vlc://quit'
command_out = vlc_path + ' ' + tmp + ' --sout=#transcode{acodec="mp3",ab="128","channels=2",samplerate="44100"}:standard{access=file{no-overwrite},mux=dummy,dst="' + dst + '"} vlc://quit'
if not args.verbose:
command_tmp += " --qt-notification=0 --qt-start-minimized"
command_out += " --qt-notification=0 --qt-start-minimized"
print("[INFO]: Downloading audio")
cmdcall(command_tmp)
print("[INFO]: Downloading audio finished")
print("[INFO]: Converting to .mp3")
cmdcall(command_out)
print("[INFO]: Converting to .mp3 finished")
print("[INFO]: Cleanup")
os.remove(tmp)
print("[INFO]: Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("url", help="Specify youtube url to download audio from.")
parser.add_argument("--out", default="out", metavar="<FILENAME>", help="Specify name of output file.")
parser.add_argument("-verbose", action="store_true", help="Show VLC media player GUI when downloading audio.")
parser.add_argument("-no_url_check", action="store_true", help="Disables url regex check. May result in unexpected behavior for invalid links.")
args = parser.parse_args()
main(args)
| 1,602 | 0 | 92 |
261e351ade7c25413d564adb4e59ef72613cb9f9 | 2,045 | py | Python | tests/unit/tst_26.py | qrefine/qrefine | 016cac07a39e032c07f34384065dbd4756fe85f8 | [
"Apache-2.0"
] | 17 | 2016-01-13T02:22:26.000Z | 2021-04-03T18:58:43.000Z | tests/unit/tst_26.py | rajeevroy09/qrefine | 789cb6266d7a3055aea0ebb5a5f0a253680a97d0 | [
"Apache-2.0"
] | 78 | 2015-12-23T12:03:38.000Z | 2022-01-28T18:13:21.000Z | tests/unit/tst_26.py | rajeevroy09/qrefine | 789cb6266d7a3055aea0ebb5a5f0a253680a97d0 | [
"Apache-2.0"
] | 11 | 2017-04-04T04:10:25.000Z | 2021-04-13T08:54:54.000Z | import os, sys
import run_tests
from libtbx import easy_run
import libtbx.load_env
qrefine_path = libtbx.env.find_in_repositories("qrefine")
pdb_lines = '''
CRYST1 72.470 66.336 68.552 90.00 90.00 90.00 P 1
ATOM 387 N HIS A 30 62.619 25.986 37.359 1.00 66.84 N
ATOM 388 CA HIS A 30 63.258 26.030 36.050 1.00 70.57 C
ATOM 389 C HIS A 30 64.699 26.498 36.196 1.00 70.51 C
ATOM 390 O HIS A 30 64.980 27.444 36.921 1.00 73.92 O
ATOM 391 CB HIS A 30 62.568 26.958 35.058 1.00 70.79 C
ATOM 392 CG HIS A 30 61.106 26.715 34.861 1.00 68.99 C
ATOM 393 ND1 HIS A 30 60.132 27.545 35.365 1.00 77.35 N
ATOM 394 CD2 HIS A 30 60.459 25.708 34.234 1.00 70.51 C
ATOM 395 CE1 HIS A 30 58.941 27.084 35.013 1.00 79.15 C
ATOM 396 NE2 HIS A 30 59.114 25.973 34.318 1.00 70.69 N
ATOM 397 H HIS A 30 61.945 26.509 37.464 0.00 66.84 H
ATOM 398 HA HIS A 30 63.202 25.127 35.700 0.00 70.57 H
ATOM 399 HB2 HIS A 30 62.691 27.873 35.355 0.00 70.79 H
ATOM 400 HB3 HIS A 30 63.012 26.877 34.200 0.00 70.79 H
ATOM 401 HD2 HIS A 30 60.851 24.972 33.822 0.00 70.51 H
ATOM 402 HE1 HIS A 30 58.123 27.475 35.220 0.00 79.15 H
ATOM 403 HE2 HIS A 30 58.487 25.495 33.975 0.00 70.69 H
HETATM 541 ZN ZN A 101 60.278 29.235 36.302 1.00 76.89 ZN
TER
'''
if(__name__=='__main__'):
prefix = os.path.basename(__file__).replace(".py","")
run_tests.runner(function=run, prefix=prefix, disable=False)
| 44.456522 | 78 | 0.549144 | import os, sys
import run_tests
from libtbx import easy_run
import libtbx.load_env
qrefine_path = libtbx.env.find_in_repositories("qrefine")
pdb_lines = '''
CRYST1 72.470 66.336 68.552 90.00 90.00 90.00 P 1
ATOM 387 N HIS A 30 62.619 25.986 37.359 1.00 66.84 N
ATOM 388 CA HIS A 30 63.258 26.030 36.050 1.00 70.57 C
ATOM 389 C HIS A 30 64.699 26.498 36.196 1.00 70.51 C
ATOM 390 O HIS A 30 64.980 27.444 36.921 1.00 73.92 O
ATOM 391 CB HIS A 30 62.568 26.958 35.058 1.00 70.79 C
ATOM 392 CG HIS A 30 61.106 26.715 34.861 1.00 68.99 C
ATOM 393 ND1 HIS A 30 60.132 27.545 35.365 1.00 77.35 N
ATOM 394 CD2 HIS A 30 60.459 25.708 34.234 1.00 70.51 C
ATOM 395 CE1 HIS A 30 58.941 27.084 35.013 1.00 79.15 C
ATOM 396 NE2 HIS A 30 59.114 25.973 34.318 1.00 70.69 N
ATOM 397 H HIS A 30 61.945 26.509 37.464 0.00 66.84 H
ATOM 398 HA HIS A 30 63.202 25.127 35.700 0.00 70.57 H
ATOM 399 HB2 HIS A 30 62.691 27.873 35.355 0.00 70.79 H
ATOM 400 HB3 HIS A 30 63.012 26.877 34.200 0.00 70.79 H
ATOM 401 HD2 HIS A 30 60.851 24.972 33.822 0.00 70.51 H
ATOM 402 HE1 HIS A 30 58.123 27.475 35.220 0.00 79.15 H
ATOM 403 HE2 HIS A 30 58.487 25.495 33.975 0.00 70.69 H
HETATM 541 ZN ZN A 101 60.278 29.235 36.302 1.00 76.89 ZN
TER
'''
def run(prefix):
fn='test_zn_his_charge.pdb'
f=file(fn, 'wb')
f.write(pdb_lines)
f.close()
cmd = 'qr.charges %s verbose=1' % (fn)
if 0: print cmd
rc = easy_run.go(cmd)
assert 'Charge: 0' in rc.stdout_lines
os.remove(fn)
return rc
if(__name__=='__main__'):
prefix = os.path.basename(__file__).replace(".py","")
run_tests.runner(function=run, prefix=prefix, disable=False)
| 228 | 0 | 23 |
5d973a87f39d560b2655e7710aee9c5c99bf2cd5 | 1,693 | py | Python | DataStructures/Hashing/ChainingHashing.py | Yarintop/Data-Structures-And-Algorithms-In-Python | 55db9e7f39211c42988171d51ef2659041df1aa1 | [
"MIT"
] | null | null | null | DataStructures/Hashing/ChainingHashing.py | Yarintop/Data-Structures-And-Algorithms-In-Python | 55db9e7f39211c42988171d51ef2659041df1aa1 | [
"MIT"
] | null | null | null | DataStructures/Hashing/ChainingHashing.py | Yarintop/Data-Structures-And-Algorithms-In-Python | 55db9e7f39211c42988171d51ef2659041df1aa1 | [
"MIT"
] | null | null | null | from HashFunctions import HashFunctions
# def __getitem__(self, data):
# pos = self.hashFunction(data) % self.maxSize
if __name__ == "__main__":
h = ChainingHashing()
h.insert(1, 'a')
h.insert(2, 'b')
h.insert(3, 'c')
h.insert(3, 'd')
print(h[3])
| 28.694915 | 84 | 0.478441 | from HashFunctions import HashFunctions
class ChainingHashing:
def __init__(self, maxSize = 10000, hashFunction = HashFunctions.djb2a) -> None:
self.maxSize = maxSize
self.hash = [[]] * maxSize
self.hashFunction = hashFunction
def insert(self, key, data):
pos = self.hashFunction(key) % self.maxSize
bucket = self.hash[pos]
for i, kv in enumerate(bucket):
k, v = kv
if key == k:
bucket[i] = (key, data)
break
else:
bucket.append((key, data))
def remove(self, key):
pos = self.hashFunction(key) % self.maxSize
bucket = self.hash[pos]
if len(bucket) == 0:
raise ValueError(f"{key} is not in the HashMap.")
else:
for i, kv in enumerate(bucket):
k, v = kv
if k == key:
del bucket[i]
break
else:
raise ValueError(f"{key} is not in HashMap.")
def __getitem__(self, key):
pos = self.hashFunction(key) % self.maxSize
bucket = self.hash[pos]
if len(bucket) == 0:
return None
else:
for i, kv in enumerate(bucket):
k, v = kv
if k == key:
return v
else:
return None
# def __getitem__(self, data):
# pos = self.hashFunction(data) % self.maxSize
if __name__ == "__main__":
h = ChainingHashing()
h.insert(1, 'a')
h.insert(2, 'b')
h.insert(3, 'c')
h.insert(3, 'd')
print(h[3])
| 1,219 | 1 | 158 |
617675dd9196551a79d5a7317754b632108d6ec1 | 7,179 | py | Python | src/showbits.py | phungj/MSOE_Comp_Prog_Py | 95e7521b28d3dbcb6279e7baf03067ca27acbe37 | [
"MIT"
] | null | null | null | src/showbits.py | phungj/MSOE_Comp_Prog_Py | 95e7521b28d3dbcb6279e7baf03067ca27acbe37 | [
"MIT"
] | null | null | null | src/showbits.py | phungj/MSOE_Comp_Prog_Py | 95e7521b28d3dbcb6279e7baf03067ca27acbe37 | [
"MIT"
] | null | null | null | import warnings
import sys
def bits(v, numbits=None):
"""
Display the bits used to store an object
:param v: the value to display the bits of
:param numbits: the number of bits to display. Only used for int objects. bytes objects
always show the exact bits stored, and int objects default to
not showing any leading zeros.
"""
_check_version()
if type(v) is bytes:
if numbits:
warnings.warn('Ignoring provided argument numbits = {} while formatting bytes object'.format(numbits))
hexstring = _bits_bytes(v)
elif type(v) is str:
if numbits:
warnings.warn('Ignoring provided argument numbits = {} while formatting str object'.format(numbits))
hexstring = _bits_str(v)
elif type(v) is int:
hexstring = _bits_int(v, numbits)
else:
raise TypeError('display_bits can only display bytes, str, or int objects')
print(hexstring)
def _bits_bytes(bytes):
"""
Internal implementation of bits() for bytes objects
:param bytes: the bytes object to display the bits of
:return: A string with an ASCII '0' or '1' for each bit.
(An ASCII binary string)
"""
s = ''
for b in bytes:
s += ' ' + _bits_int(b, numbits=8)
return s[1:] # Drop initial space
def _bits_str(s):
"""
Internal implementatino of bits() for it objects
:param s: the string to display the bits of
:return: A string with an ASCII '0' or '1' for each bit
(An ASCII binary string)
"""
display = ''
for c in s:
display += '\n' + _bits_int(ord(c),21)
return display[1:] # Drop initial \n
def _bits_int(v, numbits=None):
"""
Internal implementation of bits() for int objects
:param v: the int value to display in bits
:param numbits: The number of bits to display. Defaults to not showing any leading zeros.
:return: A string with an ASCII '0' or '1' for each bit.
(An ASCII binary string)
"""
if numbits and 2**numbits-1 < v:
raise ValueError('Cannot store '+str(v)+' in '+str(numbits)+' bits')
if numbits:
s = "{:0{digits}b}".format(v,digits=str(numbits))
else:
s = "{:b}".format(v)
return _break(s,8) # Break into groups of 8 bits
def shorthand(v, numplaces=None):
"""
Display the bits used to store an object in hexadecimal shorthand
:param v: The value to display the bits of in hexadecimal shorthand
:param numplaces: The number of hexadecimal places (digits) to display.
e.g. 0x1ef8 has four hexadecimal places.
Only used for int objects. bytes objects always display
2 hexadecimal digits for each byte. int objects default
to showing all hexadecimal places without any leading zeros.
"""
_check_version()
if type(v) is bytes:
if numplaces:
warnings.warn('Ignoring provided argument numbits = {} while formatting bytes object'.format(numplaces))
hexstring = _shorthand_bytes(v)
elif type(v) is str:
if numplaces:
warnings.warn('Ignoring provided argument numbits = {} while formatting str object'.format(numplaces))
hexstring = _shorthand_str(v)
elif type(v) is int:
hexstring = _shorthand_int(v, numplaces)
else:
raise TypeError('display_bits can only display bytes, str, or int objects')
print(hexstring)
def _shorthand_bytes(bytes):
"""
Internal implementation of shorthand() for bytes objects
:param bytes: The bytes object to in hexadecimal shorthand
:return: A string object holding a single ASCII character for each place.
e.g., for 0x1ef8, returns '1ef8'
(An ASCII hexadecimal string)
"""
s = ''
for b in bytes:
s += ' ' + _shorthand_int(b, numplaces=2)
return s[1:] # Drop initial space
def _shorthand_str(s):
"""
Internal implementation of shorthand() for str objects
:param s: String to show shorthand of
:return: ASCII hexadecimal string: A string where each ASCII
characters stores a hexadecimal digit.
"""
display = ''
for c in s:
display += '\n' + _shorthand_int(ord(c),6)
return display[1:] # Drop initial \n
def _shorthand_int(v, numplaces=None):
"""
Internal implementation of the shorthand() for int objects
:param v: The int value to display the bits of in hexadecimal shorthand
:param numplaces: The number of hexadecimal places (digits) to display.
e.g. 0x1ef8 has four hexadecimal places.
int objects default to showing all hexadecimal places
without any leading zeros.
:return: A string object holding a single ASCII character for each place.
e.g., for 0x1ef8, returns '1ef8'
(An ASCII hexadecimal string)
"""
if numplaces and 2**(numplaces*4)-1 < v:
raise ValueError('Cannot store ' + str(v) +' in ' + str(numplaces) + ' hex digits')
if numplaces:
s = "{:0{digits}x}".format(v,digits=str(numplaces))
else:
s = "{:x}".format(v)
return _break(s,2) # Break into bytes (2 hex digits each)
def _break(bitstring,groupsize):
"""
Break a binary string into groups of groupsize digits.
For example, _break('1100001111',4) returns '11 0000 1111'
:param bitstring: The ASCII binary string to break into groups
:param groupsize: The number of bits to group together in each group
:return: A string with spaces inserted between each group.
"""
broken = ''
for i in range(len(bitstring)-groupsize,-1,-groupsize):
broken = bitstring[i:i+groupsize] + ' ' + broken
if len(bitstring)%groupsize > 0: # Avoid adding space before empty left-most group
broken = bitstring[0:len(bitstring)%groupsize] + ' ' + broken
return broken[:-1] # Drop right-most space
def _check_version():
"""
Check that the code is being run with the right version of Python
:raises: RuntimeError if Python 2 is used.
"""
if sys.version_info < (3,):
raise RuntimeError('This course requires Python 3. Please uninstall Python 2 and install Python 3 in its place.'
'(If you need Python 2 for a different class or project, please talk to me.)')
def _tests():
"""
Internal tests. These are run if the module is executed as a stand-alone script.
"""
print("shorthand(b'\\x0a\\x0d')")
shorthand(b'\x0a\x0d')
print("bits(b'A\\r\\n')")
bits(b'A\r\n')
print("bits(b'\\x0a\\x0d')")
bits(b'\x0a\x0d')
print("shorthand(15)")
shorthand(15)
print("shorthand(1000)")
shorthand(1000)
print("bits(15)")
bits(15)
print("bits(1000)")
bits(1000)
print("shorthand('A\\r\\n')")
shorthand('A\r\n')
print("shorthand('\\x0a\\x0d')")
shorthand('\x0a\x0d')
print("bits('A\\r\\n')")
bits('A\r\n')
print("bits('\\x0a\\x0d')")
bits('\x0a\x0d')
if __name__ == "__main__":
_tests()
pass # Breakpoint for debugging | 33.863208 | 120 | 0.6246 | import warnings
import sys
def bits(v, numbits=None):
"""
Display the bits used to store an object
:param v: the value to display the bits of
:param numbits: the number of bits to display. Only used for int objects. bytes objects
always show the exact bits stored, and int objects default to
not showing any leading zeros.
"""
_check_version()
if type(v) is bytes:
if numbits:
warnings.warn('Ignoring provided argument numbits = {} while formatting bytes object'.format(numbits))
hexstring = _bits_bytes(v)
elif type(v) is str:
if numbits:
warnings.warn('Ignoring provided argument numbits = {} while formatting str object'.format(numbits))
hexstring = _bits_str(v)
elif type(v) is int:
hexstring = _bits_int(v, numbits)
else:
raise TypeError('display_bits can only display bytes, str, or int objects')
print(hexstring)
def _bits_bytes(bytes):
"""
Internal implementation of bits() for bytes objects
:param bytes: the bytes object to display the bits of
:return: A string with an ASCII '0' or '1' for each bit.
(An ASCII binary string)
"""
s = ''
for b in bytes:
s += ' ' + _bits_int(b, numbits=8)
return s[1:] # Drop initial space
def _bits_str(s):
"""
Internal implementatino of bits() for it objects
:param s: the string to display the bits of
:return: A string with an ASCII '0' or '1' for each bit
(An ASCII binary string)
"""
display = ''
for c in s:
display += '\n' + _bits_int(ord(c),21)
return display[1:] # Drop initial \n
def _bits_int(v, numbits=None):
"""
Internal implementation of bits() for int objects
:param v: the int value to display in bits
:param numbits: The number of bits to display. Defaults to not showing any leading zeros.
:return: A string with an ASCII '0' or '1' for each bit.
(An ASCII binary string)
"""
if numbits and 2**numbits-1 < v:
raise ValueError('Cannot store '+str(v)+' in '+str(numbits)+' bits')
if numbits:
s = "{:0{digits}b}".format(v,digits=str(numbits))
else:
s = "{:b}".format(v)
return _break(s,8) # Break into groups of 8 bits
def shorthand(v, numplaces=None):
"""
Display the bits used to store an object in hexadecimal shorthand
:param v: The value to display the bits of in hexadecimal shorthand
:param numplaces: The number of hexadecimal places (digits) to display.
e.g. 0x1ef8 has four hexadecimal places.
Only used for int objects. bytes objects always display
2 hexadecimal digits for each byte. int objects default
to showing all hexadecimal places without any leading zeros.
"""
_check_version()
if type(v) is bytes:
if numplaces:
warnings.warn('Ignoring provided argument numbits = {} while formatting bytes object'.format(numplaces))
hexstring = _shorthand_bytes(v)
elif type(v) is str:
if numplaces:
warnings.warn('Ignoring provided argument numbits = {} while formatting str object'.format(numplaces))
hexstring = _shorthand_str(v)
elif type(v) is int:
hexstring = _shorthand_int(v, numplaces)
else:
raise TypeError('display_bits can only display bytes, str, or int objects')
print(hexstring)
def _shorthand_bytes(bytes):
"""
Internal implementation of shorthand() for bytes objects
:param bytes: The bytes object to in hexadecimal shorthand
:return: A string object holding a single ASCII character for each place.
e.g., for 0x1ef8, returns '1ef8'
(An ASCII hexadecimal string)
"""
s = ''
for b in bytes:
s += ' ' + _shorthand_int(b, numplaces=2)
return s[1:] # Drop initial space
def _shorthand_str(s):
"""
Internal implementation of shorthand() for str objects
:param s: String to show shorthand of
:return: ASCII hexadecimal string: A string where each ASCII
characters stores a hexadecimal digit.
"""
display = ''
for c in s:
display += '\n' + _shorthand_int(ord(c),6)
return display[1:] # Drop initial \n
def _shorthand_int(v, numplaces=None):
"""
Internal implementation of the shorthand() for int objects
:param v: The int value to display the bits of in hexadecimal shorthand
:param numplaces: The number of hexadecimal places (digits) to display.
e.g. 0x1ef8 has four hexadecimal places.
int objects default to showing all hexadecimal places
without any leading zeros.
:return: A string object holding a single ASCII character for each place.
e.g., for 0x1ef8, returns '1ef8'
(An ASCII hexadecimal string)
"""
if numplaces and 2**(numplaces*4)-1 < v:
raise ValueError('Cannot store ' + str(v) +' in ' + str(numplaces) + ' hex digits')
if numplaces:
s = "{:0{digits}x}".format(v,digits=str(numplaces))
else:
s = "{:x}".format(v)
return _break(s,2) # Break into bytes (2 hex digits each)
def _break(bitstring,groupsize):
"""
Break a binary string into groups of groupsize digits.
For example, _break('1100001111',4) returns '11 0000 1111'
:param bitstring: The ASCII binary string to break into groups
:param groupsize: The number of bits to group together in each group
:return: A string with spaces inserted between each group.
"""
broken = ''
for i in range(len(bitstring)-groupsize,-1,-groupsize):
broken = bitstring[i:i+groupsize] + ' ' + broken
if len(bitstring)%groupsize > 0: # Avoid adding space before empty left-most group
broken = bitstring[0:len(bitstring)%groupsize] + ' ' + broken
return broken[:-1] # Drop right-most space
def _check_version():
"""
Check that the code is being run with the right version of Python
:raises: RuntimeError if Python 2 is used.
"""
if sys.version_info < (3,):
raise RuntimeError('This course requires Python 3. Please uninstall Python 2 and install Python 3 in its place.'
'(If you need Python 2 for a different class or project, please talk to me.)')
def _tests():
"""
Internal tests. These are run if the module is executed as a stand-alone script.
"""
print("shorthand(b'\\x0a\\x0d')")
shorthand(b'\x0a\x0d')
print("bits(b'A\\r\\n')")
bits(b'A\r\n')
print("bits(b'\\x0a\\x0d')")
bits(b'\x0a\x0d')
print("shorthand(15)")
shorthand(15)
print("shorthand(1000)")
shorthand(1000)
print("bits(15)")
bits(15)
print("bits(1000)")
bits(1000)
print("shorthand('A\\r\\n')")
shorthand('A\r\n')
print("shorthand('\\x0a\\x0d')")
shorthand('\x0a\x0d')
print("bits('A\\r\\n')")
bits('A\r\n')
print("bits('\\x0a\\x0d')")
bits('\x0a\x0d')
if __name__ == "__main__":
_tests()
pass # Breakpoint for debugging | 0 | 0 | 0 |
af9bb922f3751e526ef2ef2b4b54d0b758578ed4 | 296 | py | Python | Plots and Graphs/potassium_boxplot.py | archit-47/Predicting-Chronic-Kidney-Diseases | 6f0a1ca68302a8ef2c5ba15ae136a011faf97aab | [
"MIT"
] | null | null | null | Plots and Graphs/potassium_boxplot.py | archit-47/Predicting-Chronic-Kidney-Diseases | 6f0a1ca68302a8ef2c5ba15ae136a011faf97aab | [
"MIT"
] | null | null | null | Plots and Graphs/potassium_boxplot.py | archit-47/Predicting-Chronic-Kidney-Diseases | 6f0a1ca68302a8ef2c5ba15ae136a011faf97aab | [
"MIT"
] | null | null | null | import csv
import matplotlib.pyplot as plt
with open('idsfinal.csv',mode='r') as csv_file:
csv_reader=csv.DictReader(csv_file)
mydata=[]
for row in csv_reader:
mydata.append(float(row["pot"]))
plt.boxplot(mydata)
plt.ylabel("Potassium")
plt.title("Potassium distribution")
plt.show()
| 18.5 | 47 | 0.736486 | import csv
import matplotlib.pyplot as plt
with open('idsfinal.csv',mode='r') as csv_file:
csv_reader=csv.DictReader(csv_file)
mydata=[]
for row in csv_reader:
mydata.append(float(row["pot"]))
plt.boxplot(mydata)
plt.ylabel("Potassium")
plt.title("Potassium distribution")
plt.show()
| 0 | 0 | 0 |
a768a7c7b3a894f4c526eaefb45d9d4f53fb312d | 12,091 | py | Python | classify.py | rothadamg/UPSITE | 80cce9c9dfc097bb5aaecb0a0975e6a49fdf184c | [
"MIT"
] | null | null | null | classify.py | rothadamg/UPSITE | 80cce9c9dfc097bb5aaecb0a0975e6a49fdf184c | [
"MIT"
] | null | null | null | classify.py | rothadamg/UPSITE | 80cce9c9dfc097bb5aaecb0a0975e6a49fdf184c | [
"MIT"
] | 1 | 2018-12-21T04:12:59.000Z | 2018-12-21T04:12:59.000Z | #!/usr/bin/env python
"""
Detect events or relations from text.
"""
from train import workdir, getDetector, getSteps
import sys, os
import tempfile
import codecs
import Utils.Settings as Settings
import Utils.Stream as Stream
import Utils.Download
from Utils.Connection.Connection import getConnection
import Utils.Download
from Detectors.Preprocessor import Preprocessor
def classify(input, model, output, workDir=None, step=None, omitSteps=None,
goldInput=None, detector=None, debug=False, clear=False,
preprocessorTag="-preprocessed.xml.gz", preprocessorParams=None, bioNLPSTParams=None):
"""
Detect events or relations from text.
@param input: The input file in either interaction XML or BioNLP ST format. Can also be a PMID or TEES default corpus name.
@param model: A path to a model file or the name of a TEES default model.
@param output: The output file stem. Output files will be of the form output-*
@param workDir: If intermediate files need to be saved, they will go here.
@param step: A step=substep pair, where the steps are PREPROCESS and CLASSIFY
@param omitSteps: step=substep parameters, where multiple substeps can be defined.
@param goldInput: a version of the corpus file with gold annotation. Enables measuring of performance
@param detector: a Detector object, or a string defining one to be imported. If None, will be read from model.
@param debug: In debug mode, more output is shown, and some temporary intermediate files are saved
@param clear: Remove existing workDir
@param preprocessorTag: preprocessor output file will be output + preprocessorTag
@param preprocessorParams: Optional parameters controlling preprocessing. If None, will be read from model.
@param bioNLPSTParams: Optional parameters controlling BioNLP ST format output. If None, will be read from model.
"""
input = os.path.abspath(input)
if goldInput != None: goldInput = os.path.abspath(goldInput)
if model != None: model = os.path.abspath(model)
# Initialize working directory
if workDir != None: # use a permanent work directory
workdir(workDir, clear)
Stream.openLog(output + "-log.txt") # log in the output directory
# Get input files
input, preprocess = getInput(input)
model = getModel(model)
# Define processing steps
selector, detectorSteps, omitDetectorSteps = getSteps(step, omitSteps, ["PREPROCESS", "CLASSIFY"])
if not preprocess:
selector.markOmitSteps("PREPROCESS")
classifyInput = input
if selector.check("PREPROCESS"):
preprocessor = Preprocessor()
if debug:
preprocessor.setArgForAllSteps("debug", True)
preprocessorOutput = output + preprocessorTag
#preprocessor.debug = debug
#preprocessor.source = input # This has to be defined already here, needs to be fixed later
#preprocessor.requireEntitiesForParsing = True # parse only sentences which contain named entities
if os.path.exists(preprocessorOutput) and not clear: #os.path.exists(preprocessor.getOutputPath("FIND-HEADS")):
#print >> sys.stderr, "Preprocessor output", preprocessor.getOutputPath("FIND-HEADS"), "exists, skipping preprocessing."
print >> sys.stderr, "Preprocessor output", preprocessorOutput, "exists, skipping preprocessing."
classifyInput = preprocessorOutput # preprocessor.getOutputPath("FIND-HEADS")
else:
#print >> sys.stderr, "Preprocessor output", preprocessor.getOutputPath("FIND-HEADS"), "does not exist"
print >> sys.stderr, "Preprocessor output", preprocessorOutput, "does not exist"
print >> sys.stderr, "------------ Preprocessing ------------"
# Remove some of the unnecessary intermediate files
#preprocessor.setIntermediateFiles({"Convert":None, "SPLIT-SENTENCES":None, "PARSE":None, "CONVERT-PARSE":None, "SPLIT-NAMES":None})
# Process input into interaction XML
classifyInput = preprocessor.process(input, preprocessorOutput, preprocessorParams, model, [], fromStep=detectorSteps["PREPROCESS"], toStep=None, omitSteps=omitDetectorSteps["PREPROCESS"])
if selector.check("CLASSIFY"):
detector = getDetector(detector, model)[0]() # initialize detector object
detector.debug = debug
detector.bioNLPSTParams = detector.getBioNLPSharedTaskParams(bioNLPSTParams, model)
detector.classify(classifyInput, model, output, goldData=goldInput, fromStep=detectorSteps["CLASSIFY"], omitSteps=omitDetectorSteps["CLASSIFY"], workDir=workDir)
if __name__=="__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
print >> sys.stderr, "Found Psyco, using"
except ImportError:
print >> sys.stderr, "Psyco not installed"
from optparse import OptionParser
optparser = OptionParser(description="Predict events/relations")
optparser.add_option("-i", "--input", default=None, dest="input", help="input")
optparser.add_option("-o", "--output", default=None, dest="output", help="output file stem")
optparser.add_option("-w", "--workdir", default=None, dest="workdir", help="output directory")
optparser.add_option("-m", "--model", default=None, dest="model", help="TEES model")
optparser.add_option("-d", "--detector", default=None, dest="detector", help="")
optparser.add_option("-c", "--connection", default=None, dest="connection", help="")
optparser.add_option("-g", "--gold", default=None, dest="gold", help="annotated version of the input file (optional)")
optparser.add_option("-p", "--preprocessorParams", default=None, dest="preprocessorParams", help="")
optparser.add_option("-b", "--bioNLPSTParams", default=None, dest="bioNLPSTParams", help="")
# Debugging and process control
optparser.add_option("--step", default=None, dest="step", help="")
optparser.add_option("--omitSteps", default=None, dest="omitSteps", help="")
optparser.add_option("--clearAll", default=False, action="store_true", dest="clearAll", help="Delete all files")
optparser.add_option("--debug", default=False, action="store_true", dest="debug", help="More verbose output")
(options, args) = optparser.parse_args()
assert options.output != None
classify(options.input, options.model, options.output, options.workdir, options.step, options.omitSteps,
options.gold, options.detector, options.debug, options.clearAll,
preprocessorParams=options.preprocessorParams, bioNLPSTParams=options.bioNLPSTParams)
| 51.67094 | 200 | 0.648168 | #!/usr/bin/env python
"""
Detect events or relations from text.
"""
from train import workdir, getDetector, getSteps
import sys, os
import tempfile
import codecs
import Utils.Settings as Settings
import Utils.Stream as Stream
import Utils.Download
from Utils.Connection.Connection import getConnection
import Utils.Download
from Detectors.Preprocessor import Preprocessor
def classify(input, model, output, workDir=None, step=None, omitSteps=None,
goldInput=None, detector=None, debug=False, clear=False,
preprocessorTag="-preprocessed.xml.gz", preprocessorParams=None, bioNLPSTParams=None):
"""
Detect events or relations from text.
@param input: The input file in either interaction XML or BioNLP ST format. Can also be a PMID or TEES default corpus name.
@param model: A path to a model file or the name of a TEES default model.
@param output: The output file stem. Output files will be of the form output-*
@param workDir: If intermediate files need to be saved, they will go here.
@param step: A step=substep pair, where the steps are PREPROCESS and CLASSIFY
@param omitSteps: step=substep parameters, where multiple substeps can be defined.
@param goldInput: a version of the corpus file with gold annotation. Enables measuring of performance
@param detector: a Detector object, or a string defining one to be imported. If None, will be read from model.
@param debug: In debug mode, more output is shown, and some temporary intermediate files are saved
@param clear: Remove existing workDir
@param preprocessorTag: preprocessor output file will be output + preprocessorTag
@param preprocessorParams: Optional parameters controlling preprocessing. If None, will be read from model.
@param bioNLPSTParams: Optional parameters controlling BioNLP ST format output. If None, will be read from model.
"""
input = os.path.abspath(input)
if goldInput != None: goldInput = os.path.abspath(goldInput)
if model != None: model = os.path.abspath(model)
# Initialize working directory
if workDir != None: # use a permanent work directory
workdir(workDir, clear)
Stream.openLog(output + "-log.txt") # log in the output directory
# Get input files
input, preprocess = getInput(input)
model = getModel(model)
# Define processing steps
selector, detectorSteps, omitDetectorSteps = getSteps(step, omitSteps, ["PREPROCESS", "CLASSIFY"])
if not preprocess:
selector.markOmitSteps("PREPROCESS")
classifyInput = input
if selector.check("PREPROCESS"):
preprocessor = Preprocessor()
if debug:
preprocessor.setArgForAllSteps("debug", True)
preprocessorOutput = output + preprocessorTag
#preprocessor.debug = debug
#preprocessor.source = input # This has to be defined already here, needs to be fixed later
#preprocessor.requireEntitiesForParsing = True # parse only sentences which contain named entities
if os.path.exists(preprocessorOutput) and not clear: #os.path.exists(preprocessor.getOutputPath("FIND-HEADS")):
#print >> sys.stderr, "Preprocessor output", preprocessor.getOutputPath("FIND-HEADS"), "exists, skipping preprocessing."
print >> sys.stderr, "Preprocessor output", preprocessorOutput, "exists, skipping preprocessing."
classifyInput = preprocessorOutput # preprocessor.getOutputPath("FIND-HEADS")
else:
#print >> sys.stderr, "Preprocessor output", preprocessor.getOutputPath("FIND-HEADS"), "does not exist"
print >> sys.stderr, "Preprocessor output", preprocessorOutput, "does not exist"
print >> sys.stderr, "------------ Preprocessing ------------"
# Remove some of the unnecessary intermediate files
#preprocessor.setIntermediateFiles({"Convert":None, "SPLIT-SENTENCES":None, "PARSE":None, "CONVERT-PARSE":None, "SPLIT-NAMES":None})
# Process input into interaction XML
classifyInput = preprocessor.process(input, preprocessorOutput, preprocessorParams, model, [], fromStep=detectorSteps["PREPROCESS"], toStep=None, omitSteps=omitDetectorSteps["PREPROCESS"])
if selector.check("CLASSIFY"):
detector = getDetector(detector, model)[0]() # initialize detector object
detector.debug = debug
detector.bioNLPSTParams = detector.getBioNLPSharedTaskParams(bioNLPSTParams, model)
detector.classify(classifyInput, model, output, goldData=goldInput, fromStep=detectorSteps["CLASSIFY"], omitSteps=omitDetectorSteps["CLASSIFY"], workDir=workDir)
def getModel(model):
if model == None:
return None
if not os.path.exists(model):
print >> sys.stderr, "Model", model, "doesn't exist, looking for a default model"
modelName = os.path.basename(model)
found = None
if hasattr(Settings, "MODEL_DIR"):
for suffix in ["", "-test", ".zip", "-test.zip"]:
predefined = os.path.join(Settings.MODEL_DIR, modelName + suffix)
if os.path.exists(predefined):
print >> sys.stderr, "Classifying with default model", predefined
found = predefined
model = found
break
if found == None:
print >> sys.stderr, "No default model found for definition", modelName
else:
print >> sys.stderr, "Default model directory MODEL_DIR not defined in Settings"
if found == None:
raise Exception("Model " + str(model) + " not found")
else:
print >> sys.stderr, "Classifying with model", model
return os.path.abspath(model)
def getInput(input, model=None):
if input == None: # Get a corpus corresponding to the model
assert model != None
input = model.split(".")[0]
if os.path.basename(input).isdigit(): # PMID
print >> sys.stderr, "Classifying PubMed abstract", os.path.basename(input)
input = getPubMed(os.path.basename(input))
preprocess = True
b = None
try:
a = os.path.basename(input)
b = a.strip('-')[-1]
except Exception:
pass
if b.isdigit():
print >> sys.stderr, "Classifying PMID list", os.path.basename(input)
input = getPubMed2(os.path.basename(input))
preprocess = True
elif not os.path.exists(input): # Use a predefined corpus
defaultInput = os.path.basename(input)
for suffix in ["", ".xml", ".xml.gz"]:
predefined = os.path.join(Settings.CORPUS_DIR, defaultInput + suffix)
found = None
if os.path.exists(predefined):
print >> sys.stderr, "Classifying default corpus file", predefined
found = predefined
preprocess = False
break
if found == None:
raise Exception("Default corpus file for input " + str(defaultInput) + " not found")
input = found
else:
print >> sys.stderr, "Classifying input", input
preprocess = True
return os.path.abspath(input), preprocess
def getPubMed(pmid):
print >> sys.stderr, "*************************** NOTE ***************************"
print >> sys.stderr, "Do not attempt to do large-scale classification of PubMed"
print >> sys.stderr, "abstracts with this feature. For that, use the downloadable"
print >> sys.stderr, "PubMed release. This is a demonstration feature only, and"
print >> sys.stderr, "abusing it will cause you to be banned from PubMed!!!!!!!!!!!!"
print >> sys.stderr, "************************************************************"
print >> sys.stderr, "Downloading PubMed abstract", pmid
tempDir = tempfile.gettempdir()
url = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=" + str(pmid) + "&retmode=xml"
downloaded = os.path.join(tempDir, "pmid-" + str(pmid))
Utils.Download.download(url, downloaded + ".xml", False)
# Read the text from the XML
f = codecs.open(downloaded + ".xml", "rt", "utf-8")
textElements = []
for line in f:
line = line.strip()
for tag in ["<ArticleTitle>", "<AbstractText>"]:
if line.startswith(tag):
textElements.append(line.split(">", 1)[1].split("<")[0])
f.close()
# Save the text file
f = codecs.open(downloaded + ".txt", "wt", "utf-8")
f.write("\n".join(textElements))
f.close()
# Return text file name
return downloaded + ".txt"
def getPubMed2(pmid):
print >> sys.stderr, "*************************** NOTE ***************************"
print >> sys.stderr, "Do not attempt to do large-scale classification of PubMed"
print >> sys.stderr, "abstracts with this feature. For that, use the downloadable"
print >> sys.stderr, "PubMed release. This is a demonstration feature only, and"
print >> sys.stderr, "abusing it will cause you to be banned from PubMed!"
print >> sys.stderr, "But, you have successfully activated the large-scale download feature!"
print >> sys.stderr, "************************************************************"
print >> sys.stderr, "Downloading PubMed abstracts", pmid
tempDir = tempfile.gettempdir()
url = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=" + pmid + "&retmode=xml"
downloaded = os.path.join(tempDir, "pmid-" + pmid)
Utils.Download.download(url, downloaded + ".xml", False)
# Read the text from the XML
f = codecs.open(downloaded + ".xml", "rt", "utf-8")
textElements = []
for line in f:
line = line.strip()
for tag in ["<ArticleTitle>", "<AbstractText>"]:
if line.startswith(tag):
textElements.append(line.split(">", 1)[1].split("<")[0])
f.close()
# Save the text file
f = codecs.open(downloaded + ".txt", "wt", "utf-8")
f.write("\n".join(textElements))
f.close()
# Return text file name
return downloaded + ".txt"
if __name__=="__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
print >> sys.stderr, "Found Psyco, using"
except ImportError:
print >> sys.stderr, "Psyco not installed"
from optparse import OptionParser
optparser = OptionParser(description="Predict events/relations")
optparser.add_option("-i", "--input", default=None, dest="input", help="input")
optparser.add_option("-o", "--output", default=None, dest="output", help="output file stem")
optparser.add_option("-w", "--workdir", default=None, dest="workdir", help="output directory")
optparser.add_option("-m", "--model", default=None, dest="model", help="TEES model")
optparser.add_option("-d", "--detector", default=None, dest="detector", help="")
optparser.add_option("-c", "--connection", default=None, dest="connection", help="")
optparser.add_option("-g", "--gold", default=None, dest="gold", help="annotated version of the input file (optional)")
optparser.add_option("-p", "--preprocessorParams", default=None, dest="preprocessorParams", help="")
optparser.add_option("-b", "--bioNLPSTParams", default=None, dest="bioNLPSTParams", help="")
# Debugging and process control
optparser.add_option("--step", default=None, dest="step", help="")
optparser.add_option("--omitSteps", default=None, dest="omitSteps", help="")
optparser.add_option("--clearAll", default=False, action="store_true", dest="clearAll", help="Delete all files")
optparser.add_option("--debug", default=False, action="store_true", dest="debug", help="More verbose output")
(options, args) = optparser.parse_args()
assert options.output != None
classify(options.input, options.model, options.output, options.workdir, options.step, options.omitSteps,
options.gold, options.detector, options.debug, options.clearAll,
preprocessorParams=options.preprocessorParams, bioNLPSTParams=options.bioNLPSTParams)
| 5,343 | 0 | 92 |
cd8f015fd57b190763452236dbe2f747d3309b7f | 837 | py | Python | tests/test_set.py | maxslarsson/tennis-probability | f26021b305e2b8abd87acad846454f7ce02e9199 | [
"MIT"
] | null | null | null | tests/test_set.py | maxslarsson/tennis-probability | f26021b305e2b8abd87acad846454f7ce02e9199 | [
"MIT"
] | null | null | null | tests/test_set.py | maxslarsson/tennis-probability | f26021b305e2b8abd87acad846454f7ce02e9199 | [
"MIT"
] | null | null | null | import pytest
from tennis_probability import set, InvalidInput, InvalidProbability, NegativeNumber
| 27.9 | 84 | 0.628435 | import pytest
from tennis_probability import set, InvalidInput, InvalidProbability, NegativeNumber
def test_set():
assert set(0, 0, 0) == 0
assert set(0, 0, 0.50) == 0.5
assert set(0, 0, 1) == 1
# Test valid inputs
assert set(5, 3, 0.13) == 0.008146509339015371
assert set(2, 2, 0.37) == 0.024086243446167555
assert set(4, 1, 0.91) == 0.9999999999999992
# Test invalid inputs
with pytest.raises(InvalidInput):
set(10, 3, 0.2)
with pytest.raises(InvalidInput):
set(2, 812, 0.5)
with pytest.raises(InvalidInput):
set(5, 5, 0.51)
with pytest.raises(NegativeNumber):
set(-1, 0, 0.9)
# Test invalid probabilities
with pytest.raises(InvalidProbability):
set(2, 3, 1.0001)
with pytest.raises(InvalidProbability):
set(1, 0, -1.001)
| 714 | 0 | 23 |
a59ded17348263bc8f03888d65350f5f62929739 | 26,195 | py | Python | pe_tree/runtime.py | lybtongji/pe_tree | 2be607fc55702293cd02cbc6cda5283452464aff | [
"Apache-2.0"
] | 1,271 | 2020-07-27T14:46:44.000Z | 2022-03-30T15:58:24.000Z | pe_tree/runtime.py | lybtongji/pe_tree | 2be607fc55702293cd02cbc6cda5283452464aff | [
"Apache-2.0"
] | 9 | 2020-08-04T13:23:38.000Z | 2021-05-18T16:53:49.000Z | pe_tree/runtime.py | lybtongji/pe_tree | 2be607fc55702293cd02cbc6cda5283452464aff | [
"Apache-2.0"
] | 168 | 2020-07-27T13:56:42.000Z | 2022-03-29T12:48:00.000Z | #
# Copyright (c) 2020 BlackBerry Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PE Tree runtime abstraction layer"""
# Standard imports
import os
import tempfile
import threading
import struct
# Config parser imports
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
# pefile
import pefile
# Qt imports
from PyQt5 import QtCore, Qt, QtGui, QtWidgets
# Capstone imports
try:
import capstone
HAVE_CAPSTONE = True
except ImportError:
HAVE_CAPSTONE = False
# PE Tree imports
import pe_tree.info
# pylint: disable=unused-argument
class RuntimeSignals(QtCore.QObject):
"""Allows worker threads to invoke runtime methods on the UI thread.
Warning:
This class must be instantiated from the UI thread!
"""
def invoke_method(self, method, *args):
"""Invoke runtime method on the UI thread"""
# Ensure only 1 thread at a time can access runtime.ret
self.runtime.lock.acquire()
self.runtime.opaque = self.opaque
# Invoke the runtime method in the UI thread
QtCore.QMetaObject.invokeMethod(self.runtime, method, Qt.Qt.BlockingQueuedConnection, *args)
# Get the method result
ret = self.runtime.ret
self.runtime.lock.release()
return ret
class Runtime(QtCore.QObject):
"""Base runtime class"""
@QtCore.pyqtSlot()
def get_temp_dir(self):
"""Get temporary directory path
Returns:
str: Temporary directory path
"""
self.ret = tempfile.gettempdir()
return self.ret
@QtCore.pyqtSlot()
def get_script_dir(self):
"""Get script directory
Returns:
str: Script directory path
"""
self.ret = os.path.dirname(os.path.realpath(pe_tree.info.__file__))
return self.ret
def show_widget(self):
"""Display the widget"""
self.widget.show()
self.ret = True
return self.ret
@QtCore.pyqtSlot(str, str, str, bool)
def ask_file(self, filename, caption, filter="All Files (*)", save=False):
"""Ask user to select a filename via open/save dialog
Args:
filename (str): Preferred filename
caption (str): Save/open dialog caption
filter (str): File extension filter
save (bool): Present the save dialog if True, otherwise open
Returns:
str: Filename if successful, otherwise None
"""
dialog = QtWidgets.QFileDialog()
options = QtWidgets.QFileDialog.Options()
if not save:
# Open file dialog
filename, _ = dialog.getOpenFileName(self.widget, caption, filename, filter, options=options)
else:
# Save file dialog
if filename[0] == ".":
# Remove leading dot from section names
filename = filename[1:]
filename, _ = dialog.getSaveFileName(self.widget, caption, filename, filter, options=options)
if filename:
self.ret = filename
else:
self.ret = ""
return self.ret
@QtCore.pyqtSlot(object, object)
def read_pe(self, image_base, size=0):
"""Read PE image from memory
Args:
image_base (int): Address of PE file in-memory
size (int, optional): Size of PE file in-memory
Returns:
bytearray: Data of PE image if successful, otherwise an empty bytearray
"""
self.ret = b""
try:
# Read the module's PE headers to determine the image size
pe = pefile.PE(data=self.get_bytes(image_base, 0x1000), fast_load=True)
# Read the remainder of the PE image
pe = pefile.PE(data=self.get_bytes(image_base, max(pe.OPTIONAL_HEADER.SizeOfImage, pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData)), fast_load=True)
# Fix up section pointers/sizes
for section in pe.sections:
section.PointerToRawData = section.VirtualAddress
section.SizeOfRawData = section.Misc_VirtualSize + (pe.OPTIONAL_HEADER.SectionAlignment - (section.Misc_VirtualSize % pe.OPTIONAL_HEADER.SectionAlignment))
# Get PE data
self.ret = pe.write()
except:
pass
return self.ret
@QtCore.pyqtSlot(int, int)
def get_bytes(self, start, size):
"""Read a sequence of bytes from memory
Args:
start (int): Start address
size (int): Number of byte to read
Returns:
int: Array of bytes if successful, otherwise None
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def get_byte(self, offset):
"""Read 8-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Byte value
"""
self.ret = self.get_bytes(offset, 1)
return self.ret
@QtCore.pyqtSlot(int)
def get_word(self, offset):
"""Read 16-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Word value
"""
self.ret = struct.unpack("<H", self.get_bytes(offset, 2))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_dword(self, offset):
"""Read 32-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Dword value
"""
self.ret = struct.unpack("<I", self.get_bytes(offset, 4))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_qword(self, offset):
"""Read 64-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Qword value
"""
self.ret = struct.unpack("<Q", self.get_bytes(offset, 8))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_name(self, offset):
"""Get symbol name for the given address
Args:
offset (int): Address to get name for
Returns:
str: Name of symbol if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(int)
def get_segment_name(self, offset):
"""Get segment/module name for the given address
Args:
offset (int): Address to get name for
Returns:
str: Name of segment/module if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(int)
def is_writable(self, offset):
"""Determine if the memory address is write-able
Args:
offset (int): Address to check for write permissions
Returns:
bool: True if the memory address resides in writable page of memory, otherwise False
"""
self.ret = False
return self.ret
@QtCore.pyqtSlot(int)
def get_label(self, offset):
"""Get the disassembly label for the given address
Args:
offset (int): Address to get label for
Returns:
str: Label name if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(object, int)
def jumpto(self, item, offset):
"""User double-clicked an item in the tree, by default disassemble using capstone
Args:
item (pe_tree.tree): Item that was double-clicked by the user
offset (int): Address to jump to
"""
try:
if item.tree.disasm:
for i in item.tree.disasm.disasm(item.get_data(size=0x100), offset):
item.tree.form.runtime.log("0x{:x}:\t{}\t{}".format(i.address, i.mnemonic, i.op_str))
except ValueError:
pass
self.ret = True
return self.ret
@QtCore.pyqtSlot(str)
def log(self, output):
"""Print to output"""
output_view = self.pe_tree_form.output_stack.currentWidget()
if output_view:
self.pe_tree_form.output_stack.setVisible(True)
output_view.setVisible(True)
output_view.append(output)
output_view.moveCursor(QtGui.QTextCursor.End)
self.ret = True
return self.ret
@QtCore.pyqtSlot(int, int)
def make_string(self, offset, size):
"""Convert the data at the given offset to an ASCII string
Args:
offset (int): Address to convert to string
size (int): Length of the string in bytes
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, str)
def make_comment(self, offset, comment):
"""Add a comment to the disassembly
Args:
offset (int): Address to comment
comment (str): Comment string
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, int, str, str, bytes)
def make_segment(self, offset, size, class_name="DATA", name="pe_map", data=None):
"""Add a segment in the IDB
Args:
offset (int): Base address of the new segment
size (int): Size of the new segment in bytes
class_name (str): "CODE" or "DATA" (default)
name (str): Name of the segment, default is "pe_map"
data (bytes): Data to populate the segment with (optional)
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def resolve_address(self, offset):
"""Get module/symbol name for the given address
Args:
offset (int): Address to get module and symbol name for
Returns:
(str,str): Tuple containing module name and API name. Either name may be "" if not available.
"""
self.ret = ("", "")
return self.ret
@QtCore.pyqtSlot(int)
def make_qword(self, offset):
"""Convert data at the specified address to a Qword
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def make_dword(self, offset):
"""Convert data at the specified address to a Dword
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def make_word(self, offset):
"""Convert data at the specified address to a Word
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, int)
def make_byte(self, offset, size=1):
"""Convert data at the specified address to a byte
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, str, int)
def make_name(self, offset, name, flags=0):
"""Name the given offset
Args:
name (str): Name of offset
offset (int): Offset to name
flags (int): Optional flags to pass to idc.set_name
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot()
def get_names(self):
"""Get list of all available symbols/name"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(object, object, object, object)
def find_iat_ptrs(self, pe, image_base, size, get_word):
"""Find likely IAT pointers using capstone for disassembly
Args:
pe (pefile): Parsed PE file
image_base (int): Base address of image
size (int): Size of image
get_word (object): Callback routine to read a Dword/Qword from memory (depending on the image architecture)
Returns:
[(int, int, str, str)]: Tuple containing IAT offset, xref, module name and API name
"""
# Initialise capstone
disasm = self.init_capstone(pe)
disasm.detail = True
iat_ptrs = []
# Traverse sections
for section in pe.sections:
# Is the section executable?
if not section.Characteristics & pefile.SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_EXECUTE"]:
continue
# Does the section contain anything?
data = section.get_data()
if not data:
continue
# Disassemble section
for i in disasm.disasm(section.get_data(), image_base + section.VirtualAddress):
# Attempt to read the current instruction's effective memory address operand (if present)
ptr = 0
if i.mnemonic in ["call", "push", "jmp"]:
if i.operands[0].type == capstone.x86.X86_OP_MEM:
# Get memory offset for branch instructions
ptr = i.operands[0].value.mem.disp
elif i.mnemonic in ["mov", "lea"]:
if i.operands[0].type == capstone.x86.X86_OP_REG and i.operands[1].type == capstone.x86.X86_OP_MEM:
# Get memory offset for mov/lea instructions
ptr = i.operands[1].value.mem.disp
# Does the instruction's memory address operand seem somewhat valid?!
if ptr < 0x1000:
continue
# Resolve pointer from memory operand
try:
iat_offset = get_word(ptr)
except:
continue
# Ignore offset if it is in our image
if image_base <= iat_offset <= image_base + size:
continue
# Get module and API name for offset
module, api = self.resolve_address(iat_offset)
# Ignore the offset if it is in a debug segment or stack etc
if api and module and module.endswith(".dll"):
if not iat_offset in iat_ptrs:
# Add IAT offset, address to patch, module name and API name to list
iat_ptrs.append((iat_offset, i.address + len(i.bytes) - 4, module, api))
self.ret = iat_ptrs
return self.ret
@QtCore.pyqtSlot(object)
def find_pe(self, cursor=None):
"""Find MZ/PE headers in memory
Args:
cursor (bool): If True, search for MZ/PE at the current cursor position, otherwise scan the entire address space
Returns:
[(int, str, bool)]: Tuple containing MZ offset, section name and bool set to True if the image is 64-bit
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(object)
def init_capstone(self, pe):
""" Initialise capstone disassembler
Args:
pe (pefile): PE file whose machine type is used to initialise capstone
Returns:
[capstone.Cs]: Capstone disassembler or None if unavailable/not supported
"""
self.ret = None
if HAVE_CAPSTONE:
mt = pefile.MACHINE_TYPE
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_I386"]:
self.ret = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_AMD64"]:
self.ret = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_ARM"]:
self.ret = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM)
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_POWERPC"]:
self.ret = capstone.Cs(capstone.CS_ARCH_PPC, capstone.CS_MODE_LITTLE_ENDIAN)
if pe.FILE_HEADER.Machine in [mt["IMAGE_FILE_MACHINE_THUMB"], mt["IMAGE_FILE_MACHINE_ARMNT"]]:
self.ret = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_THUMB)
if pe.FILE_HEADER.Machine in [mt["IMAGE_FILE_MACHINE_R3000"], mt["IMAGE_FILE_MACHINE_R4000"], mt["IMAGE_FILE_MACHINE_R10000"]]:
self.ret = capstone.Cs(capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS32)
return self.ret
@QtCore.pyqtSlot(str, str, object)
def get_config_option(self, section, option, fallback):
"""Read configuration option from INI file
Args:
section (str): Name of config section
option (str): Name of config option
fallback (object): Default fallback value if option is non-existing
Returns:
object: Configuration option if present, otherwise fallback argument
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
if self.config.has_section(section) and self.config.has_option(section, option):
if isinstance(fallback, bool):
self.ret = self.config.getboolean(section, option)
else:
self.ret = self.config.get(section, option)
else:
self.ret = fallback
self.config_lock.release()
return self.ret
def set_config_option(self, section, option, value):
"""Set configuration option in INI file
Args:
section (str): Name of config section
option (str): Name of config option
value (object): Default config value
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
self.config.set(section, option, str(value))
self.save_config()
self.config_lock.release()
def read_config(self):
"""Load configuration from INI file
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
# Initialise and parse config
self.config = ConfigParser()
self.config.read(self.config_file)
self.config_lock.release()
def set_default_config_option(self, config, section, option, default):
"""Set config option, fallback to default. Used internally to save config.
Args:
config (ConfigParser): Configuration parser
section (str): Name of config section
option (str): Name of config option
default (object): Default value to use if option is non-existing
Warning:
Only invoke from UI thread
"""
config.set(section, option, self.get_config_option(section, option, default))
def save_config(self):
"""Save all configuration options to INI file
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
try:
with open(self.config_file, "w") as config_file:
config = ConfigParser()
config.add_section("config")
self.set_default_config_option(config, "config", "debug", "False")
self.set_default_config_option(config, "config", "fonts", ",".join(["Consolas", "Monospace", "Courier"]))
self.set_default_config_option(config, "config", "passwords", ",".join(["", "infected"]))
self.set_default_config_option(config, "config", "virustotal_url", "https://www.virustotal.com/gui/search")
self.set_default_config_option(config, "config", "cyberchef_url", "https://gchq.github.io/CyberChef")
config.write(config_file)
self.config = config
except EnvironmentError:
pass
self.config_lock.release()
def get_available_font(self, families=None):
"""Read fonts from config and return first available font in Qt
Args:
families (list): Optional list of default fonts, otherwise this is read using get_config_option
Returns:
QtGui.QFont: QFont initialised using the family specified via config/families argument
Warning:
Only invoke from UI thread
"""
if not families:
# Read fonts from config
families = self.get_config_option("config", "fonts", None)
if families:
families = families.split(",")
if not families:
# Fallback to some sane fonts
families = ["Consolas", "Monospace", "Courier"]
# Check if fonts are available in Qt font database
for family in families:
family = family.strip()
if family in QtGui.QFontDatabase().families():
return QtGui.QFont(family)
return QtGui.QFont()
def about_box(self):
"""Show application about box
Warning:
Only invoke from UI thread
"""
message_box = QtWidgets.QMessageBox()
message_box.setIcon(QtWidgets.QMessageBox.Information)
message_box.setWindowTitle("About {}".format(pe_tree.info.__title__))
message_box.setText("<a href={}>{} - {}</a>".format(pe_tree.info.__url__, pe_tree.info.__title__, pe_tree.info.__version__))
message_box.setInformativeText("<span style=\"white-space: nowrap;\">Developed by <a href=\"{}\">BlackBerry Research and Intelligence Team</a></span><br><br>{}".format("https://www.blackberry.com/us/en/company/research-and-intelligence", pe_tree.info.__copyright__))
message_box.setStandardButtons(QtWidgets.QMessageBox.Ok)
message_box.exec_()
| 31.790049 | 274 | 0.605001 | #
# Copyright (c) 2020 BlackBerry Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PE Tree runtime abstraction layer"""
# Standard imports
import os
import tempfile
import threading
import struct
# Config parser imports
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
# pefile
import pefile
# Qt imports
from PyQt5 import QtCore, Qt, QtGui, QtWidgets
# Capstone imports
try:
import capstone
HAVE_CAPSTONE = True
except ImportError:
HAVE_CAPSTONE = False
# PE Tree imports
import pe_tree.info
# pylint: disable=unused-argument
class RuntimeSignals(QtCore.QObject):
"""Allows worker threads to invoke runtime methods on the UI thread.
Warning:
This class must be instantiated from the UI thread!
"""
def __init__(self, runtime, opaque=None):
super(RuntimeSignals, self).__init__()
self.opaque = opaque if opaque != None else {}
self.runtime = runtime
def invoke_method(self, method, *args):
"""Invoke runtime method on the UI thread"""
# Ensure only 1 thread at a time can access runtime.ret
self.runtime.lock.acquire()
self.runtime.opaque = self.opaque
# Invoke the runtime method in the UI thread
QtCore.QMetaObject.invokeMethod(self.runtime, method, Qt.Qt.BlockingQueuedConnection, *args)
# Get the method result
ret = self.runtime.ret
self.runtime.lock.release()
return ret
def get_temp_dir(self):
return self.invoke_method("get_temp_dir")
def ask_file(self, filename, caption, filter="All Files (*)", save=False):
return self.invoke_method("ask_file", Qt.Q_ARG(str, filename), Qt.Q_ARG(str, caption), Qt.Q_ARG(str, filter), Qt.Q_ARG(bool, save))
def read_pe(self, image_base, size=0):
return self.invoke_method("read_pe", Qt.Q_ARG(object, image_base), Qt.Q_ARG(object, size))
def get_bytes(self, start, size):
return self.invoke_method("get_bytes", Qt.Q_ARG(object, start), Qt.Q_ARG(object, size))
def get_byte(self, offset):
return self.invoke_method("get_byte", Qt.Q_ARG(object, offset))
def get_word(self, offset):
return self.invoke_method("get_word", Qt.Q_ARG(object, offset))
def get_dword(self, offset):
return self.invoke_method("get_dword", Qt.Q_ARG(object, offset))
def get_qword(self, offset):
return self.invoke_method("get_qword", Qt.Q_ARG(object, offset))
def get_name(self, offset):
return self.invoke_method("get_name", Qt.Q_ARG(object, offset))
def get_segment_name(self, offset):
return self.invoke_method("get_segment_name", Qt.Q_ARG(object, offset))
def is_writable(self, offset):
return self.invoke_method("is_writable", Qt.Q_ARG(object, offset))
def get_label(self, offset):
return self.invoke_method("get_label", Qt.Q_ARG(object, offset))
def jumpto(self, item, offset):
return self.invoke_method("jumpto", Qt.Q_ARG(object, offset))
def log(self, output):
return self.invoke_method("log", Qt.Q_ARG(str, output))
def make_string(self, offset, size):
return self.invoke_method("make_string", Qt.Q_ARG(object, offset), Qt.Q_ARG(object, size))
def make_comment(self, offset, comment):
return self.invoke_method("make_comment", Qt.Q_ARG(object, offset), Qt.Q_ARG(str, str(comment)))
def make_segment(self, offset, size, class_name="DATA", name="pe_map", data=None):
return self.invoke_method("make_segment", Qt.Q_ARG(object, offset), Qt.Q_ARG(object, size), Qt.Q_ARG(str, class_name), Qt.Q_ARG(str, name), Qt.Q_ARG(bytes, data))
def resolve_address(self, offset):
return self.invoke_method("resolve_address", Qt.Q_ARG(object, offset))
def make_qword(self, offset):
return self.invoke_method("make_qword", Qt.Q_ARG(object, offset))
def make_dword(self, offset):
return self.invoke_method("make_dword", Qt.Q_ARG(object, offset))
def make_word(self, offset):
return self.invoke_method("make_word", Qt.Q_ARG(object, offset))
def make_byte(self, offset, size=1):
return self.invoke_method("make_byte", Qt.Q_ARG(object, offset))
def make_name(self, offset, name, flags=0):
return self.invoke_method("make_name", Qt.Q_ARG(object, offset), Qt.Q_ARG(str, name), Qt.Q_ARG(int, flags))
def find_iat_ptrs(self, pe, image_base, size, get_word):
return self.invoke_method("find_iat_ptrs", Qt.Q_ARG(object, pe), Qt.Q_ARG(object, image_base), Qt.Q_ARG(object, size), Qt.Q_ARG(object, get_word))
def find_pe(self, cursor=False):
return self.invoke_method("find_pe", Qt.Q_ARG(object, cursor))
def init_capstone(self, pe):
return self.invoke_method("init_capstone", Qt.Q_ARG(object, pe))
def get_config_option(self, section, option, fallback):
return self.invoke_method("get_config_option", Qt.Q_ARG(str, section), Qt.Q_ARG(str, option), Qt.Q_ARG(object, fallback))
class Runtime(QtCore.QObject):
"""Base runtime class"""
def __init__(self, widget, args):
super(Runtime, self).__init__()
self.widget = widget
self.ret = None
self.lock = threading.Lock()
self.config_lock = threading.RLock()
self.signals = RuntimeSignals(self)
self.opaque = {}
self.args = args
self.read_config()
self.save_config()
@QtCore.pyqtSlot()
def get_temp_dir(self):
"""Get temporary directory path
Returns:
str: Temporary directory path
"""
self.ret = tempfile.gettempdir()
return self.ret
@QtCore.pyqtSlot()
def get_script_dir(self):
"""Get script directory
Returns:
str: Script directory path
"""
self.ret = os.path.dirname(os.path.realpath(pe_tree.info.__file__))
return self.ret
def show_widget(self):
"""Display the widget"""
self.widget.show()
self.ret = True
return self.ret
@QtCore.pyqtSlot(str, str, str, bool)
def ask_file(self, filename, caption, filter="All Files (*)", save=False):
"""Ask user to select a filename via open/save dialog
Args:
filename (str): Preferred filename
caption (str): Save/open dialog caption
filter (str): File extension filter
save (bool): Present the save dialog if True, otherwise open
Returns:
str: Filename if successful, otherwise None
"""
dialog = QtWidgets.QFileDialog()
options = QtWidgets.QFileDialog.Options()
if not save:
# Open file dialog
filename, _ = dialog.getOpenFileName(self.widget, caption, filename, filter, options=options)
else:
# Save file dialog
if filename[0] == ".":
# Remove leading dot from section names
filename = filename[1:]
filename, _ = dialog.getSaveFileName(self.widget, caption, filename, filter, options=options)
if filename:
self.ret = filename
else:
self.ret = ""
return self.ret
@QtCore.pyqtSlot(object, object)
def read_pe(self, image_base, size=0):
"""Read PE image from memory
Args:
image_base (int): Address of PE file in-memory
size (int, optional): Size of PE file in-memory
Returns:
bytearray: Data of PE image if successful, otherwise an empty bytearray
"""
self.ret = b""
try:
# Read the module's PE headers to determine the image size
pe = pefile.PE(data=self.get_bytes(image_base, 0x1000), fast_load=True)
# Read the remainder of the PE image
pe = pefile.PE(data=self.get_bytes(image_base, max(pe.OPTIONAL_HEADER.SizeOfImage, pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData)), fast_load=True)
# Fix up section pointers/sizes
for section in pe.sections:
section.PointerToRawData = section.VirtualAddress
section.SizeOfRawData = section.Misc_VirtualSize + (pe.OPTIONAL_HEADER.SectionAlignment - (section.Misc_VirtualSize % pe.OPTIONAL_HEADER.SectionAlignment))
# Get PE data
self.ret = pe.write()
except:
pass
return self.ret
@QtCore.pyqtSlot(int, int)
def get_bytes(self, start, size):
"""Read a sequence of bytes from memory
Args:
start (int): Start address
size (int): Number of byte to read
Returns:
int: Array of bytes if successful, otherwise None
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def get_byte(self, offset):
"""Read 8-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Byte value
"""
self.ret = self.get_bytes(offset, 1)
return self.ret
@QtCore.pyqtSlot(int)
def get_word(self, offset):
"""Read 16-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Word value
"""
self.ret = struct.unpack("<H", self.get_bytes(offset, 2))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_dword(self, offset):
"""Read 32-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Dword value
"""
self.ret = struct.unpack("<I", self.get_bytes(offset, 4))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_qword(self, offset):
"""Read 64-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Qword value
"""
self.ret = struct.unpack("<Q", self.get_bytes(offset, 8))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_name(self, offset):
"""Get symbol name for the given address
Args:
offset (int): Address to get name for
Returns:
str: Name of symbol if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(int)
def get_segment_name(self, offset):
"""Get segment/module name for the given address
Args:
offset (int): Address to get name for
Returns:
str: Name of segment/module if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(int)
def is_writable(self, offset):
"""Determine if the memory address is write-able
Args:
offset (int): Address to check for write permissions
Returns:
bool: True if the memory address resides in writable page of memory, otherwise False
"""
self.ret = False
return self.ret
@QtCore.pyqtSlot(int)
def get_label(self, offset):
"""Get the disassembly label for the given address
Args:
offset (int): Address to get label for
Returns:
str: Label name if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(object, int)
def jumpto(self, item, offset):
"""User double-clicked an item in the tree, by default disassemble using capstone
Args:
item (pe_tree.tree): Item that was double-clicked by the user
offset (int): Address to jump to
"""
try:
if item.tree.disasm:
for i in item.tree.disasm.disasm(item.get_data(size=0x100), offset):
item.tree.form.runtime.log("0x{:x}:\t{}\t{}".format(i.address, i.mnemonic, i.op_str))
except ValueError:
pass
self.ret = True
return self.ret
@QtCore.pyqtSlot(str)
def log(self, output):
"""Print to output"""
output_view = self.pe_tree_form.output_stack.currentWidget()
if output_view:
self.pe_tree_form.output_stack.setVisible(True)
output_view.setVisible(True)
output_view.append(output)
output_view.moveCursor(QtGui.QTextCursor.End)
self.ret = True
return self.ret
@QtCore.pyqtSlot(int, int)
def make_string(self, offset, size):
"""Convert the data at the given offset to an ASCII string
Args:
offset (int): Address to convert to string
size (int): Length of the string in bytes
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, str)
def make_comment(self, offset, comment):
"""Add a comment to the disassembly
Args:
offset (int): Address to comment
comment (str): Comment string
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, int, str, str, bytes)
def make_segment(self, offset, size, class_name="DATA", name="pe_map", data=None):
"""Add a segment in the IDB
Args:
offset (int): Base address of the new segment
size (int): Size of the new segment in bytes
class_name (str): "CODE" or "DATA" (default)
name (str): Name of the segment, default is "pe_map"
data (bytes): Data to populate the segment with (optional)
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def resolve_address(self, offset):
"""Get module/symbol name for the given address
Args:
offset (int): Address to get module and symbol name for
Returns:
(str,str): Tuple containing module name and API name. Either name may be "" if not available.
"""
self.ret = ("", "")
return self.ret
@QtCore.pyqtSlot(int)
def make_qword(self, offset):
"""Convert data at the specified address to a Qword
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def make_dword(self, offset):
"""Convert data at the specified address to a Dword
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def make_word(self, offset):
"""Convert data at the specified address to a Word
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, int)
def make_byte(self, offset, size=1):
"""Convert data at the specified address to a byte
Args:
offset (int): Offset to convert
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, str, int)
def make_name(self, offset, name, flags=0):
"""Name the given offset
Args:
name (str): Name of offset
offset (int): Offset to name
flags (int): Optional flags to pass to idc.set_name
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot()
def get_names(self):
"""Get list of all available symbols/name"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(object, object, object, object)
def find_iat_ptrs(self, pe, image_base, size, get_word):
"""Find likely IAT pointers using capstone for disassembly
Args:
pe (pefile): Parsed PE file
image_base (int): Base address of image
size (int): Size of image
get_word (object): Callback routine to read a Dword/Qword from memory (depending on the image architecture)
Returns:
[(int, int, str, str)]: Tuple containing IAT offset, xref, module name and API name
"""
# Initialise capstone
disasm = self.init_capstone(pe)
disasm.detail = True
iat_ptrs = []
# Traverse sections
for section in pe.sections:
# Is the section executable?
if not section.Characteristics & pefile.SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_EXECUTE"]:
continue
# Does the section contain anything?
data = section.get_data()
if not data:
continue
# Disassemble section
for i in disasm.disasm(section.get_data(), image_base + section.VirtualAddress):
# Attempt to read the current instruction's effective memory address operand (if present)
ptr = 0
if i.mnemonic in ["call", "push", "jmp"]:
if i.operands[0].type == capstone.x86.X86_OP_MEM:
# Get memory offset for branch instructions
ptr = i.operands[0].value.mem.disp
elif i.mnemonic in ["mov", "lea"]:
if i.operands[0].type == capstone.x86.X86_OP_REG and i.operands[1].type == capstone.x86.X86_OP_MEM:
# Get memory offset for mov/lea instructions
ptr = i.operands[1].value.mem.disp
# Does the instruction's memory address operand seem somewhat valid?!
if ptr < 0x1000:
continue
# Resolve pointer from memory operand
try:
iat_offset = get_word(ptr)
except:
continue
# Ignore offset if it is in our image
if image_base <= iat_offset <= image_base + size:
continue
# Get module and API name for offset
module, api = self.resolve_address(iat_offset)
# Ignore the offset if it is in a debug segment or stack etc
if api and module and module.endswith(".dll"):
if not iat_offset in iat_ptrs:
# Add IAT offset, address to patch, module name and API name to list
iat_ptrs.append((iat_offset, i.address + len(i.bytes) - 4, module, api))
self.ret = iat_ptrs
return self.ret
@QtCore.pyqtSlot(object)
def find_pe(self, cursor=None):
"""Find MZ/PE headers in memory
Args:
cursor (bool): If True, search for MZ/PE at the current cursor position, otherwise scan the entire address space
Returns:
[(int, str, bool)]: Tuple containing MZ offset, section name and bool set to True if the image is 64-bit
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(object)
def init_capstone(self, pe):
""" Initialise capstone disassembler
Args:
pe (pefile): PE file whose machine type is used to initialise capstone
Returns:
[capstone.Cs]: Capstone disassembler or None if unavailable/not supported
"""
self.ret = None
if HAVE_CAPSTONE:
mt = pefile.MACHINE_TYPE
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_I386"]:
self.ret = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_AMD64"]:
self.ret = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_ARM"]:
self.ret = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM)
if pe.FILE_HEADER.Machine == mt["IMAGE_FILE_MACHINE_POWERPC"]:
self.ret = capstone.Cs(capstone.CS_ARCH_PPC, capstone.CS_MODE_LITTLE_ENDIAN)
if pe.FILE_HEADER.Machine in [mt["IMAGE_FILE_MACHINE_THUMB"], mt["IMAGE_FILE_MACHINE_ARMNT"]]:
self.ret = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_THUMB)
if pe.FILE_HEADER.Machine in [mt["IMAGE_FILE_MACHINE_R3000"], mt["IMAGE_FILE_MACHINE_R4000"], mt["IMAGE_FILE_MACHINE_R10000"]]:
self.ret = capstone.Cs(capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS32)
return self.ret
@QtCore.pyqtSlot(str, str, object)
def get_config_option(self, section, option, fallback):
"""Read configuration option from INI file
Args:
section (str): Name of config section
option (str): Name of config option
fallback (object): Default fallback value if option is non-existing
Returns:
object: Configuration option if present, otherwise fallback argument
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
if self.config.has_section(section) and self.config.has_option(section, option):
if isinstance(fallback, bool):
self.ret = self.config.getboolean(section, option)
else:
self.ret = self.config.get(section, option)
else:
self.ret = fallback
self.config_lock.release()
return self.ret
def set_config_option(self, section, option, value):
"""Set configuration option in INI file
Args:
section (str): Name of config section
option (str): Name of config option
value (object): Default config value
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
self.config.set(section, option, str(value))
self.save_config()
self.config_lock.release()
def read_config(self):
"""Load configuration from INI file
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
# Initialise and parse config
self.config = ConfigParser()
self.config.read(self.config_file)
self.config_lock.release()
def set_default_config_option(self, config, section, option, default):
"""Set config option, fallback to default. Used internally to save config.
Args:
config (ConfigParser): Configuration parser
section (str): Name of config section
option (str): Name of config option
default (object): Default value to use if option is non-existing
Warning:
Only invoke from UI thread
"""
config.set(section, option, self.get_config_option(section, option, default))
def save_config(self):
"""Save all configuration options to INI file
Warning:
Only invoke from UI thread
"""
self.config_lock.acquire()
try:
with open(self.config_file, "w") as config_file:
config = ConfigParser()
config.add_section("config")
self.set_default_config_option(config, "config", "debug", "False")
self.set_default_config_option(config, "config", "fonts", ",".join(["Consolas", "Monospace", "Courier"]))
self.set_default_config_option(config, "config", "passwords", ",".join(["", "infected"]))
self.set_default_config_option(config, "config", "virustotal_url", "https://www.virustotal.com/gui/search")
self.set_default_config_option(config, "config", "cyberchef_url", "https://gchq.github.io/CyberChef")
config.write(config_file)
self.config = config
except EnvironmentError:
pass
self.config_lock.release()
def get_available_font(self, families=None):
"""Read fonts from config and return first available font in Qt
Args:
families (list): Optional list of default fonts, otherwise this is read using get_config_option
Returns:
QtGui.QFont: QFont initialised using the family specified via config/families argument
Warning:
Only invoke from UI thread
"""
if not families:
# Read fonts from config
families = self.get_config_option("config", "fonts", None)
if families:
families = families.split(",")
if not families:
# Fallback to some sane fonts
families = ["Consolas", "Monospace", "Courier"]
# Check if fonts are available in Qt font database
for family in families:
family = family.strip()
if family in QtGui.QFontDatabase().families():
return QtGui.QFont(family)
return QtGui.QFont()
def about_box(self):
"""Show application about box
Warning:
Only invoke from UI thread
"""
message_box = QtWidgets.QMessageBox()
message_box.setIcon(QtWidgets.QMessageBox.Information)
message_box.setWindowTitle("About {}".format(pe_tree.info.__title__))
message_box.setText("<a href={}>{} - {}</a>".format(pe_tree.info.__url__, pe_tree.info.__title__, pe_tree.info.__version__))
message_box.setInformativeText("<span style=\"white-space: nowrap;\">Developed by <a href=\"{}\">BlackBerry Research and Intelligence Team</a></span><br><br>{}".format("https://www.blackberry.com/us/en/company/research-and-intelligence", pe_tree.info.__copyright__))
message_box.setStandardButtons(QtWidgets.QMessageBox.Ok)
message_box.exec_()
| 3,305 | 0 | 781 |
41ab13b76744a888101fd6efde15e7403c131fa3 | 11,010 | py | Python | ion/services/sa/observatory/test/test_platform_instrument.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 3 | 2016-09-20T09:50:06.000Z | 2018-08-10T01:41:38.000Z | ion/services/sa/observatory/test/test_platform_instrument.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | null | null | null | ion/services/sa/observatory/test/test_platform_instrument.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 2 | 2016-03-16T22:25:49.000Z | 2016-11-26T14:54:21.000Z | #!/usr/bin/env python
"""
@package ion.services.sa.observatory.test.test_platform_instrument
@file ion/services/sa/observatory/test/test_platform_instrument.py
@author Carlos Rueda, Maurice Manning
@brief Tests involving some more detailed platform-instrument interations
"""
__author__ = 'Carlos Rueda, Maurice Manning'
#
# Base preparations and construction of the platform topology are provided by
# the base class BaseTestPlatform.
#
# developer conveniences:
# bin/nosetests -sv ion/services/sa/observatory/test/test_platform_instrument.py:Test.test_platform_with_instrument_streaming
from pyon.public import log
from ion.agents.platform.test.base_test_platform_agent import BaseIntTestPlatform
from pyon.agent.agent import ResourceAgentClient
from ion.agents.platform.test.base_test_platform_agent import FakeProcess
from pyon.agent.agent import ResourceAgentState
from pyon.event.event import EventSubscriber
from interface.services.sa.iinstrument_management_service import InstrumentManagementServiceClient
from interface.objects import AgentCommand
import unittest
import gevent
from mock import patch
from pyon.public import CFG
# -------------------------------- MI ----------------------------
# the following adapted from test_instrument_agent to be able to import from
# the MI repo, using egg directly.
from ion.agents.instrument.test.load_test_driver_egg import load_egg
DVR_CONFIG = load_egg()
# now we can import SBE37ProtocolEvent
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37ProtocolEvent
# ------------------------------------------------------------------------
@patch.dict(CFG, {'endpoint': {'receive': {'timeout': 180}}})
| 46.455696 | 239 | 0.679473 | #!/usr/bin/env python
"""
@package ion.services.sa.observatory.test.test_platform_instrument
@file ion/services/sa/observatory/test/test_platform_instrument.py
@author Carlos Rueda, Maurice Manning
@brief Tests involving some more detailed platform-instrument interations
"""
__author__ = 'Carlos Rueda, Maurice Manning'
#
# Base preparations and construction of the platform topology are provided by
# the base class BaseTestPlatform.
#
# developer conveniences:
# bin/nosetests -sv ion/services/sa/observatory/test/test_platform_instrument.py:Test.test_platform_with_instrument_streaming
from pyon.public import log
from ion.agents.platform.test.base_test_platform_agent import BaseIntTestPlatform
from pyon.agent.agent import ResourceAgentClient
from ion.agents.platform.test.base_test_platform_agent import FakeProcess
from pyon.agent.agent import ResourceAgentState
from pyon.event.event import EventSubscriber
from interface.services.sa.iinstrument_management_service import InstrumentManagementServiceClient
from interface.objects import AgentCommand
import unittest
import gevent
from mock import patch
from pyon.public import CFG
# -------------------------------- MI ----------------------------
# the following adapted from test_instrument_agent to be able to import from
# the MI repo, using egg directly.
from ion.agents.instrument.test.load_test_driver_egg import load_egg
DVR_CONFIG = load_egg()
# now we can import SBE37ProtocolEvent
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37ProtocolEvent
# ------------------------------------------------------------------------
@patch.dict(CFG, {'endpoint': {'receive': {'timeout': 180}}})
class TestPlatformInstrument(BaseIntTestPlatform):
# def setUp(self):
# # Start container
# super(TestPlatformInstrument, self).setUp()
#
# self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
@unittest.skip('This test takes too long and gets Connect Refused errors.')
def test_platform_with_instrument_streaming(self):
#
# The following is with just a single platform and the single
# instrument "SBE37_SIM_08", which corresponds to the one on port 4008.
#
instr_key = "SBE37_SIM_08"
self.catch_alert= gevent.queue.Queue()
p_root = self._set_up_single_platform_with_some_instruments([instr_key])
self._start_platform(p_root)
self.addCleanup(self._stop_platform, p_root)
# get everything in command mode:
self._ping_agent()
self._initialize()
self._go_active()
self._run()
# note that this includes the instrument also getting to the command state
self._stream_instruments()
# get client to the instrument:
# the i_obj is a DotDict with various pieces captured during the
# set-up of the instrument, in particular instrument_device_id
i_obj = self._get_instrument(instr_key)
# log.debug("KK creating ResourceAgentClient")
# ia_client = ResourceAgentClient(i_obj.instrument_device_id,
# process=FakeProcess())
# log.debug("KK got ResourceAgentClient: %s", ia_client)
#
# # verify the instrument is command state:
# state = ia_client.get_agent_state()
# log.debug("KK instrument state: %s", state)
# self.assertEqual(state, ResourceAgentState.COMMAND)
# # start streaming:
# log.debug("KK starting instrument streaming: %s", state)
# cmd = AgentCommand(command=SBE37ProtocolEvent.START_AUTOSAMPLE)
#
# # NOTE: commented out because of error (see other #!! lines)
# self._ia_client.execute_resource(cmd)
"""
2013-04-03 14:17:22,018 DEBUG Dummy-7 ion.services.sa.observatory.test.test_platform_instrument:121 KK starting instrument streaming: RESOURCE_AGENT_STATE_COMMAND
ERROR
2013-04-03 14:17:22,020 INFO Dummy-7 mi_logger:98 Stopping pagent pid 53267
Exception AttributeError: AttributeError("'_DummyThread' object has no attribute '_Thread__block'",) in <module 'threading' from '/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.pyc'> ignored
2013-04-03 14:17:22,092 ERROR build/bdist.macosx-10.8-intel/egg/mi/core/instrument/port_agent_client.py Zero bytes received from port_agent socket
2013-04-03 14:17:22,098 ERROR build/bdist.macosx-10.8-intel/egg/mi/core/instrument/port_agent_client.py fn_local_callback_error, Connection error: Zero bytes received from port_agent socket
2013-04-03 14:17:22,102 ERROR build/bdist.macosx-10.8-intel/egg/mi/core/instrument/port_agent_client.py Attempting connection_level recovery; attempt number 1
2013-04-03 14:17:22,113 ERROR build/bdist.macosx-10.8-intel/egg/mi/core/instrument/port_agent_client.py _init_comms(): Exception initializing comms for localhost: 5008: error(61, 'Connection refused')
Traceback (most recent call last):
File "build/bdist.macosx-10.8-intel/egg/mi/core/instrument/port_agent_client.py", line 281, in _init_comms
self._create_connection()
File "build/bdist.macosx-10.8-intel/egg/mi/core/instrument/port_agent_client.py", line 327, in _create_connection
self.sock.connect((self.host, self.port))
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/socket.py", line 224, in meth
return getattr(self._sock,name)(*args)
error: [Errno 61] Connection refused
2013
"""
# TODO set up listeners to verify things ...
#-------------------------------------------------------------------------------------
# Set up the subscriber to catch the alert event
#-------------------------------------------------------------------------------------
def callback_for_alert(event, *args, **kwargs):
#log.debug("caught an alert: %s", event)
log.debug('TestPlatformInstrument recieved ION event: args=%s, kwargs=%s, event=%s.',
str(args), str(kwargs), str(args[0]))
log.debug('TestPlatformInstrument recieved ION event obj %s: ', event)
# Get a resource agent client to talk with the instrument agent.
_ia_client = self._create_resource_agent_client(event.origin)
instAggStatus = _ia_client.get_agent(['aggstatus'])['aggstatus']
log.debug('callback_for_alert consume_event aggStatus: %s', instAggStatus)
if event.name == "temperature_warning_interval" and event.sub_type == "WARNING":
log.debug('temperature_warning_interval WARNING: ')
self.assertEqual(instAggStatus[2], 3)
if event.name == "late_data_warning" and event.sub_type == "WARNING":
log.debug('LATE DATA WARNING: ')
#check for WARNING or OK becuase the ALL Clear event comes too quicky..
self.assertTrue(instAggStatus[1] >= 2 )
#
# extended_instrument = self.imsclient.get_instrument_device_extension(i_obj.instrument_device_id)
# log.debug(' callback_for_alert communications_status_roll_up: %s', extended_instrument.computed.communications_status_roll_up)
# log.debug(' callback_for_alert data_status_roll_up: %s', extended_instrument.computed.data_status_roll_up)
self.catch_alert.put(event)
def callback_for_agg_alert(event, *args, **kwargs):
#log.debug("caught an alert: %s", event)
log.debug('TestPlatformInstrument recieved AggStatus event: args=%s, kwargs=%s, event=%s.',
str(args), str(kwargs), str(args[0]))
log.debug('TestPlatformInstrument recieved AggStatus event obj %s: ', event)
log.debug('TestPlatformInstrument recieved AggStatus event origin_type: %s ', event.origin_type)
log.debug('TestPlatformInstrument recieved AggStatus event origin: %s: ', event.origin)
# Get a resource agent client to talk with the instrument agent.
_ia_client = self._create_resource_agent_client(event.origin)
aggstatus = _ia_client.get_agent(['aggstatus'])['aggstatus']
log.debug('callback_for_agg_alert aggStatus: %s', aggstatus)
agg_status_comms = aggstatus[1]
agg_status_data = aggstatus[2]
#platform status lags so check that instrument device status is at least known
if event.origin_type == "InstrumentDevice":
self.assertTrue(agg_status_comms >= 2)
if event.origin_type == "PlatformDevice":
log.debug('PlatformDevice AggStatus ')
rollup_status = _ia_client.get_agent(['rollup_status'])['rollup_status']
log.debug('callback_for_agg_alert rollup_status: %s', rollup_status)
rollup_status_comms = rollup_status[1]
rollup_status_data = rollup_status[2]
self.assertTrue(rollup_status_comms >= agg_status_comms )
self.assertTrue(rollup_status_data >= agg_status_data )
child_agg_status = _ia_client.get_agent(['child_agg_status'])['child_agg_status']
log.debug('callback_for_agg_alert child_agg_status: %s', child_agg_status)
#only one child instrument
child1_agg_status = child_agg_status[i_obj.instrument_device_id]
child1_agg_status_data = child1_agg_status[2]
self.assertTrue(rollup_status_data >= child1_agg_status_data )
self.catch_alert.put(event)
#create a subscriber for the DeviceStatusAlertEvent from the instrument
self.event_subscriber = EventSubscriber(event_type='DeviceStatusAlertEvent',
origin=i_obj.instrument_device_id,
callback=callback_for_alert)
self.event_subscriber.start()
self.addCleanup(self.event_subscriber.stop)
#create a subscriber for the DeviceAggregateStatusEvent from the instrument and platform
self.event_subscriber = EventSubscriber(event_type='DeviceAggregateStatusEvent',
callback=callback_for_agg_alert)
self.event_subscriber.start()
self.addCleanup(self.event_subscriber.stop)
# sleep to let the streaming run for a while
log.debug("KK sleeping ...")
gevent.sleep(30)
caught_events = [self.catch_alert.get(timeout=45)]
caught_events.append(self.catch_alert.get(timeout=45))
log.debug("caught_events: %s", [c.type_ for c in caught_events])
# # stop streaming:
# log.debug("KK stopping instrument streaming: %s", state)
# cmd = AgentCommand(command=SBE37ProtocolEvent.STOP_AUTOSAMPLE)
# self._ia_client.execute_resource(cmd)
# TODO verifications ...
# ...
self._idle_instruments()
# then shutdown the network:
self._go_inactive()
self._reset()
self._shutdown()
| 8,970 | 329 | 22 |
523193ed8dd76327553e190d697260b45a0f932f | 2,092 | py | Python | desktop/core/src/desktop/lib/test_export_csvxls.py | erickt/hue | a046f1dd21226689ed447422f3373d96c65b2fd2 | [
"Apache-2.0"
] | null | null | null | desktop/core/src/desktop/lib/test_export_csvxls.py | erickt/hue | a046f1dd21226689ed447422f3373d96c65b2fd2 | [
"Apache-2.0"
] | null | null | null | desktop/core/src/desktop/lib/test_export_csvxls.py | erickt/hue | a046f1dd21226689ed447422f3373d96c65b2fd2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tablib
from desktop.lib.export_csvxls import create_generator, make_response
from nose.tools import assert_equal
| 38.740741 | 79 | 0.718451 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tablib
from desktop.lib.export_csvxls import create_generator, make_response
from nose.tools import assert_equal
def content_generator(header, data):
yield header, data
def test_export_csv():
header = ["x", "y"]
data = [ ["1", "2"], ["3", "4"], ["5,6", "7"], [None, None] ]
# Check CSV
generator = create_generator(content_generator(header, data), "csv")
response = make_response(generator, "csv", "foo")
assert_equal("application/csv", response["content-type"])
content = ''.join(response.streaming_content)
assert_equal('x,y\r\n1,2\r\n3,4\r\n"5,6",7\r\nNULL,NULL\r\n', content)
assert_equal("attachment; filename=foo.csv", response["content-disposition"])
def test_export_xls():
header = ["x", "y"]
data = [ ["1", "2"], ["3", "4"], ["5,6", "7"], [None, None] ]
dataset = tablib.Dataset(headers=header)
for row in data:
dataset.append([cell is not None and cell or "NULL" for cell in row])
# Check XLS
generator = create_generator(content_generator(header, data), "xls")
response = make_response(generator, "xls", "foo")
assert_equal("application/xls", response["content-type"])
content = ''.join(response.streaming_content)
assert_equal(dataset.xls, content)
assert_equal("attachment; filename=foo.xls", response["content-disposition"])
| 1,109 | 0 | 69 |
99936fb51ccf71b834427377bf4477143dd06b55 | 2,044 | py | Python | modules/process.py | hectorapp/hector-agent | 0d7ac3ab8b585aa2cd4effb7c218bb7830be18b7 | [
"BSD-3-Clause"
] | 1 | 2019-10-28T11:50:37.000Z | 2019-10-28T11:50:37.000Z | modules/process.py | hectorapp/hector-agent | 0d7ac3ab8b585aa2cd4effb7c218bb7830be18b7 | [
"BSD-3-Clause"
] | null | null | null | modules/process.py | hectorapp/hector-agent | 0d7ac3ab8b585aa2cd4effb7c218bb7830be18b7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
'''
* Author : Hutter Valentin
* Date : 13.05.2019
* Description : Hector agent monitoring
* Help :
- https://psutil.readthedocs.io/en/latest/#processes
- https://psutil.readthedocs.io/en/latest/#psutil.process_iter
- https://psutil.readthedocs.io/en/latest/#unicode
'''
import psutil
import sys
sys.path.insert(0, '..') # to import helpers from parent folder
import helpers
| 37.851852 | 134 | 0.674658 | #!/usr/bin/env python
# coding: utf-8
'''
* Author : Hutter Valentin
* Date : 13.05.2019
* Description : Hector agent monitoring
* Help :
- https://psutil.readthedocs.io/en/latest/#processes
- https://psutil.readthedocs.io/en/latest/#psutil.process_iter
- https://psutil.readthedocs.io/en/latest/#unicode
'''
import psutil
import sys
sys.path.insert(0, '..') # to import helpers from parent folder
import helpers
class process:
DEFAULT_NB_OF_RETURNED_RESULTS = -35
def collect(self):
processes = []
# Retrieves only processes that are running
running_processes = [(process.info) for process in psutil.process_iter(attrs=[
'pid', 'ppid', 'name', 'username', 'exe', 'cmdline',
'cpu_percent', 'memory_percent', 'status'
]) if process.info['status'] == psutil.STATUS_RUNNING][self.DEFAULT_NB_OF_RETURNED_RESULTS:] #Limit to 30 processes
for process in running_processes:
try:
process['cmdline'] = ' '.join(process['cmdline']).strip() #join args
if 'cpu_percent' in process and process['cpu_percent'] is not None:
process['cpu_percent'] = float("{0:.2f}".format(process['cpu_percent']))
# Init process memory usage
if 'memory_percent' in process and process['memory_percent'] is not None:
# The RAM used by a process is recovered based on the total RAM available for the server where the agent is installed
total_memory = psutil.virtual_memory().total
process['memory_used_mb'] = float("{0:.2f}".format(helpers.bytes_to_mb(((total_memory / 100) * process['memory_percent']))))
process['memory_percent'] = float("{0:.2f}".format(process['memory_percent']))
except psutil.NoSuchProcess: # https://psutil.readthedocs.io/en/latest/#psutil.NoSuchProcess
pass
except psutil.AccessDenied: # https://psutil.readthedocs.io/en/latest/#psutil.AccessDenied
pass
except: #default exception
pass
else:
processes.append(process)
return processes
| 1,531 | 57 | 23 |
0127c03d824add7d490dbb30604454d0eb757174 | 668 | py | Python | support/shared_consts.py | victordalla/regression-diagnosis | d0dd04de0dac801726113a93f2a6c833307333ca | [
"MIT"
] | null | null | null | support/shared_consts.py | victordalla/regression-diagnosis | d0dd04de0dac801726113a93f2a6c833307333ca | [
"MIT"
] | null | null | null | support/shared_consts.py | victordalla/regression-diagnosis | d0dd04de0dac801726113a93f2a6c833307333ca | [
"MIT"
] | null | null | null | """
Constants used across all files
"""
# The constants in this file must be defined and checked by the user of the template
import numpy as np
from typing import Dict, List, Optional
from copy import deepcopy
from datetime import date
seed = 0
true_params = {"b0": 6, "b1": 1.3, "scale": 50}
n = 300
x_min = -50
x_max = 100
np.random.seed(seed)
x_fix = np.random.uniform(x_min, x_max, n)
np.random.seed(seed)
e_fix = np.random.normal(0, true_params["scale"], n)
np.random.seed(seed)
y_fix = true_params["b0"] + true_params["b1"] * x_fix + e_fix | 23.034483 | 84 | 0.712575 | """
Constants used across all files
"""
# The constants in this file must be defined and checked by the user of the template
import numpy as np
from typing import Dict, List, Optional
from copy import deepcopy
from datetime import date
def from_dict_to_list(dictionary):
from itertools import chain
return list(chain(*dictionary.values()))
seed = 0
true_params = {"b0": 6, "b1": 1.3, "scale": 50}
n = 300
x_min = -50
x_max = 100
np.random.seed(seed)
x_fix = np.random.uniform(x_min, x_max, n)
np.random.seed(seed)
e_fix = np.random.normal(0, true_params["scale"], n)
np.random.seed(seed)
y_fix = true_params["b0"] + true_params["b1"] * x_fix + e_fix | 90 | 0 | 23 |
a8589db975cc723af5c71c12006f483dccac9b7c | 14,387 | py | Python | dictionary/dict_yeelight.py | giuliapuntoit/RL-framework-iot | 1c0961f10f0477415198bbee94b6eb3272973004 | [
"MIT"
] | 5 | 2021-01-23T20:47:18.000Z | 2021-09-13T14:37:01.000Z | dictionary/dict_yeelight.py | SmartData-Polito/RL-IoT | d293c8410d6c2e8fcb56f96c346c519dd3a84a28 | [
"MIT"
] | null | null | null | dictionary/dict_yeelight.py | SmartData-Polito/RL-IoT | d293c8410d6c2e8fcb56f96c346c519dd3a84a28 | [
"MIT"
] | 1 | 2021-02-09T17:34:47.000Z | 2021-02-09T17:34:47.000Z | """
Class containing the dictionary for the Yeelight protocol
"""
# COMMAND message {id_pair, method_pair, params_pair}
# id_pair is "id":<number>
# method_pair is "method":"<method>"
# params_pair is "params":["<param1>","<param2>", <numeric_param3>]
# <param1> is "property":<property_value>
if __name__ == '__main__':
method_returned = DictYeelight().run()
# Useful information
print("Method is " + str(method_returned))
| 42.190616 | 70 | 0.260443 | """
Class containing the dictionary for the Yeelight protocol
"""
# COMMAND message {id_pair, method_pair, params_pair}
# id_pair is "id":<number>
# method_pair is "method":"<method>"
# params_pair is "params":["<param1>","<param2>", <numeric_param3>]
# <param1> is "property":<property_value>
class DictYeelight(object):
def __init__(self, method_requested=2):
self.method_requested = method_requested
def run(self):
properties = [('power', ""), # values on off
('bright', 0), # range 1 100
('ct', 0), # range 1700 6500 (k)
('rgb', 0), # range 1 16777215
('hue', 0), # range 0 359
('sat', 0), # range 0 100
('color_mode', 0), # values 1 2 3
('flowing', 0), # values 0 1
('delayoff', 0), # range 1 60
('flow_params', 0), # ?
('music_on', 0), # values 0 1
('name', ""), # values set in set_name command
('bg_power', ""), # values on off
('bg_flowing', 0), # values 0 1
('bg_flow_params', ""), # ?
('bg_ct', 0), # range 1700 6500 (k?)
('bg_lmode', 0), # values 1 2 3
('bg_bright', 0), # range 0 100 (percentage)
('bg_rgb', 0), # range 1 16777215
('bg_hue', 0), # range 0 359
('bg_sat', 0), # range 0 100
('nl_br', 0), # range 1 100
('active_mode', 0), # values 0 1
]
# For now, enforcing type as:
# 0 -> int
# "" -> string
methods = []
methods.extend(({"name": "get_prop",
"min_params": 1,
"max_params": -1,
"params_list": properties,
},
{"name": "set_ct_abx",
"min_params": 3,
"max_params": 3,
"params_list": [
('ct_value', 0), # int
('effect', ""), # string
('duration', 0), # int
],
},
{"name": "set_rgb",
"min_params": 3,
"max_params": 3,
"params_list": [
('rgb_value', 0), # int
('effect', ""), # string
('duration', 0), # int
],
},
{"name": "set_hsv",
"min_params": 4,
"max_params": 4,
"params_list": [
('hue', 0), # int
('sat', 0), # int
('effect', ""), # string
('duration', 0), # int
],
},
{"name": "set_bright",
"min_params": 3,
"max_params": 3,
"params_list": [
('brightness', 0), # int
('effect', ""), # string
('duration', 0), # int
],
},
{"name": "set_power", # ON
"min_params": 3,
"max_params": 4, # mode is optional
"params_list": [
('power', "on"), # string
('effect', ""), # string
('duration', 0), # int
('mode', 0), # int
],
},
{"name": "set_power", # OFF
"min_params": 3,
"max_params": 4, # mode is optional
"params_list": [
('power', "off"), # string
('effect', ""), # string
('duration', 0), # int
('mode', 0), # int
],
},
{"name": "toggle",
"min_params": 0,
"max_params": 0,
"params_list": [],
},
{"name": "set_default",
"min_params": 0,
"max_params": 0,
"params_list": [],
},
{"name": "start_cf",
"min_params": 3,
"max_params": 3,
"params_list": [
('count', 0), # int
('action', 0), # int
('flow_expression', "") # string
],
},
{"name": "stop_cf",
"min_params": 0,
"max_params": 0,
"params_list": [],
},
{"name": "set_scene",
"min_params": 3,
"max_params": 4,
"params_list": [
('class', ""), # string
('val1', 0), # int
('val2', 0), # int
('val3', 0) # int, optional
],
},
{"name": "cron_add",
"min_params": 2,
"max_params": 2,
"params_list": [
('type', 0), # int
('value', 0), # int
]
},
{"name": "cron_get",
"min_params": 1,
"max_params": 1,
"params_list": [
('type', 0), # int
]
},
{"name": "cron_del",
"min_params": 1,
"max_params": 1,
"params_list": [
('type', 0), # int
]
},
{"name": "set_adjust",
"min_params": 2,
"max_params": 2,
"params_list": [
('action', ""), # string
('prop', ""), # string
]},
{"name": "set_music",
"min_params": 1,
"max_params": 3,
"params_list": [
('action', 0), # int
('host', ""), # string
('port', 0), # int
]},
{"name": "set_name",
"min_params": 1,
"max_params": 1,
"params_list": [
('name', ""), # string
]},
{"name": "bg_set_rgb",
"min_params": 3,
"max_params": 3,
"params_list": [
('rgb_value', 0), # int
('effect', ""), # string
('duration', 0), # int
]},
{"name": "bg_set_hsv",
"min_params": 4,
"max_params": 4,
"params_list": [
('hue', 0), # int
('sat', 0), # int
('effect', ""), # string
('duration', 0), # int
]},
{"name": "bg_set_ct_abx",
"min_params": 3,
"max_params": 3,
"params_list": [
('ct_value', 0), # int
('effect', ""), # string
('duration', 0), # int
]},
{"name": "bg_start_cf",
"min_params": 3,
"max_params": 3,
"params_list": [
('count', 0), # int
('action', 0), # int
('flow_expression', ""), # string
]},
{"name": "bg_stop_cf",
"min_params": 0,
"max_params": 0,
"params_list": [],
},
{"name": "bg_set_scene",
"min_params": 3,
"max_params": 4,
"params_list": [
('class', ""), # string
('val1', 0), # int
('val2', 0), # int
('val3', 0), # int optional
]},
{"name": "bg_set_default",
"min_params": 0,
"max_params": 0,
"params_list": []
},
{"name": "bg_set_power", # ON
"min_params": 3,
"max_params": 3,
"params_list": [
('power', "on"), # string
('effect', ""), # string
('duration', 0), # int
('mode', 0), # int
]},
{"name": "bg_set_power", # OFF
"min_params": 3,
"max_params": 3,
"params_list": [
('power', "off"), # string
('effect', ""), # string
('duration', 0), # int
('mode', 0), # int
]},
{"name": "bg_set_bright",
"min_params": 3,
"max_params": 3,
"params_list": [
('brightness', 0), # int
('effect', ""), # string
('duration', 0), # int
]},
{"name": "bg_set_adjust",
"min_params": 2,
"max_params": 2,
"params_list": [
('action', ""), # string
('prop', ""), # string
]},
{"name": "bg_toggle",
"min_params": 0,
"max_params": 0,
"params_list": []
},
{"name": "dev_toggle",
"min_params": 0,
"max_params": 0,
"params_list": []
},
{"name": "adjust_bright",
"min_params": 2,
"max_params": 2,
"params_list": [
('percentage', 0), # int
('duration', 0), # int
]},
{"name": "adjust_ct",
"min_params": 2,
"max_params": 2,
"params_list": [
('percentage', 0), # int
('duration', 0), # int
]},
{"name": "adjust_color",
"min_params": 2,
"max_params": 2,
"params_list": [
('percentage', 0), # int
('duration', 0), # int
]},
{"name": "bg_adjust_bright",
"min_params": 2,
"max_params": 2,
"params_list": [
('percentage', 0), # int
('duration', 0), # int
]},
{"name": "bg_adjust_ct",
"min_params": 2,
"max_params": 2,
"params_list": [
('percentage', 0), # int
('duration', 0), # int
]},
{"name": "bg_adjust_color",
"min_params": 2,
"max_params": 2,
"params_list": [
('percentage', 0), # int
('duration', 0), # int
]},
))
# max_params = -1 means N
# Default method selected value
method_selected = 2
if 0 <= self.method_requested < len(methods):
method_selected = self.method_requested
return methods[method_selected]
if __name__ == '__main__':
method_returned = DictYeelight().run()
# Useful information
print("Method is " + str(method_returned))
| 13,841 | 6 | 77 |
e1350dcd5fec81e1aacfcd1ad5964e08627c60d1 | 2,641 | py | Python | _includes/ropes.py | hectorpefo/prepublication | 084a0b331e331f8a56c413bc63ea73c2744076da | [
"MIT"
] | 4 | 2019-02-17T22:26:28.000Z | 2022-03-03T11:23:37.000Z | _includes/ropes.py | hectorpefo/prepublication | 084a0b331e331f8a56c413bc63ea73c2744076da | [
"MIT"
] | null | null | null | _includes/ropes.py | hectorpefo/prepublication | 084a0b331e331f8a56c413bc63ea73c2744076da | [
"MIT"
] | 5 | 2017-02-27T22:34:54.000Z | 2020-10-26T01:23:06.000Z | # Rope Burning Riddler from fivethirtyeight.com
# Main program
# Numbers of ropes to do
MinRopes = 1
MaxRopes = 6
for N in range(MinRopes,MaxRopes+1):
# ropes is a list of pairs. each pair is: [ends-lit
# (0, 1, or 2), time (has been or will be) extinguished].
# We start with extinction time 0 as a dummy value.
ropes = [[0,0]]*N
time = 0
situation = [ropes,time]
# Keep track of the situations we have already processed
already_explored = [situation]
# This is our list of the durations we can measure
times = []
# Recursively explore the achievable situations
explore(situation)
# Done. Tidy up and finish.
if 0 in times:
# 0 is not a duration per problem statement.
times.remove(0)
times.sort()
print(N,"ropes measure",len(times), "intervals")
# print(times)
| 28.095745 | 66 | 0.678531 | # Rope Burning Riddler from fivethirtyeight.com
def explore(situation):
ropes = situation[0]
time = situation[1]
# Find unextinguished ropes and make a list of those
# with at least 1 unlit end.
allextinguished = True
ropestochoose = []
for r in range(N):
if ropes[r][1] == 0 or ropes[r][1] > time:
allextinguished = False
if not ropes[r][0] ==2:
ropestochoose.append(r)
if allextinguished:
# No descendent situations, so tally the intervals
for rope in ropes:
time = rope[1]
if not time in times:
times.append(time)
# Comment-out the following block to ignore
# periods between extinguishings
for rope1 in ropes:
for rope2 in ropes:
time = abs(rope1[1]-rope2[1])
if not time in times:
times.append(time)
return
# A choice is to (0) do nothing, (1) ignite a first
# end if unignited (2) ignite (both first and)
# second end if unignited. The choice for a particular
# rope R (0 to len(ropestochoose)-1) is (choices//3**R)%3
# (think of choices as a base-3 numeral).
for choices in range(1,3**len(ropestochoose)):
# We will modify a copy of ropes
newropes = list(ropes)
for r in range(len(ropestochoose)):
rope = newropes[ropestochoose[r]]
choice = (choices//3**r)%3
if rope[0] == 0:
# No ends lit
if choice == 1:
rope = [1,time+1]
elif choice == 2:
rope = [2,time+.5]
elif rope[0] == 1:
# One end already lit
if choice == 2:
rope = [2,time+.5*(rope[1]-time)]
newropes[ropestochoose[r]] = rope
# This will prevent redundantly exploring equivalent situations
newropes.sort(reverse=True)
# Find time of next extinguishing
nexttime = min([rope[1] for rope in newropes if rope[1] > time])
newsituation = [newropes,nexttime]
if newropes == ropes or newsituation in already_explored:
continue
already_explored.append(newsituation)
explore(newsituation)
# Main program
# Numbers of ropes to do
MinRopes = 1
MaxRopes = 6
for N in range(MinRopes,MaxRopes+1):
# ropes is a list of pairs. each pair is: [ends-lit
# (0, 1, or 2), time (has been or will be) extinguished].
# We start with extinction time 0 as a dummy value.
ropes = [[0,0]]*N
time = 0
situation = [ropes,time]
# Keep track of the situations we have already processed
already_explored = [situation]
# This is our list of the durations we can measure
times = []
# Recursively explore the achievable situations
explore(situation)
# Done. Tidy up and finish.
if 0 in times:
# 0 is not a duration per problem statement.
times.remove(0)
times.sort()
print(N,"ropes measure",len(times), "intervals")
# print(times)
| 1,824 | 0 | 23 |
d9a2e2115dcdf579f02fc328bcbd1f22a3326b91 | 1,818 | py | Python | src/predict.py | Akashcba/ML | 95ecf3e011dcbd6112a4e58312c1656b94e9414d | [
"MIT"
] | null | null | null | src/predict.py | Akashcba/ML | 95ecf3e011dcbd6112a4e58312c1656b94e9414d | [
"MIT"
] | null | null | null | src/predict.py | Akashcba/ML | 95ecf3e011dcbd6112a4e58312c1656b94e9414d | [
"MIT"
] | null | null | null | import os
import pandas as pd
from sklearn import ensemble
from sklearn import preprocessing
from sklearn import metrics
import joblib
import numpy as np
import time
from . import dispatcher
TEST_DATA = os.environ.get("TEST_DATA")
MODEL = os.environ.get("MODEL")
PATH = os.environ.get("MODEL_PATH")
NUM_FOLDS = int(os.environ.get("NUM_FOLDS"))
if __name__ == "__main__":
print("\nPridicting The Values ......")
time.sleep(7)
submission = predict(test_data_path=TEST_DATA,
model_type=MODEL)
submission.loc[:, "id"] = submission.loc[:, "id"].astype(int)
submission.to_csv(f"/Users/my_mac/Documents/Machine Learning/ML/input/{MODEL}_submission.csv", index=False)
| 31.344828 | 111 | 0.628713 | import os
import pandas as pd
from sklearn import ensemble
from sklearn import preprocessing
from sklearn import metrics
import joblib
import numpy as np
import time
from . import dispatcher
TEST_DATA = os.environ.get("TEST_DATA")
MODEL = os.environ.get("MODEL")
PATH = os.environ.get("MODEL_PATH")
NUM_FOLDS = int(os.environ.get("NUM_FOLDS"))
def predict(test_data_path, model_type, model_path=PATH):
df = pd.read_csv(test_data_path)
test_idx = df["id"].values
predictions = None
for FOLD in range(NUM_FOLDS):
df = pd.read_csv(test_data_path)
#encoders = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}_label_encoder.pkl"))
#cols = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}_columns.pkl"))
cols = [c for c in df.columns if c not in ["id", "target","kfold"]]
'''
for c in encoders:
lbl = encoders[c]
df.loc[:, c] = df.loc[:, c].astype(str).fillna("NONE")
df.loc[:, c] = lbl.transform(df[c].values.tolist())
'''
clf = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}.pkl"))
df = df[cols]
preds = clf.predict_proba(df)[:, 1]
if FOLD == 0:
predictions = preds
else:
predictions += preds
predictions /= NUM_FOLDS
sub = pd.DataFrame(np.column_stack((test_idx, predictions)), columns=["id", "target"])
return sub
if __name__ == "__main__":
print("\nPridicting The Values ......")
time.sleep(7)
submission = predict(test_data_path=TEST_DATA,
model_type=MODEL)
submission.loc[:, "id"] = submission.loc[:, "id"].astype(int)
submission.to_csv(f"/Users/my_mac/Documents/Machine Learning/ML/input/{MODEL}_submission.csv", index=False)
| 1,079 | 0 | 27 |
8ad92d5050c75dcd61e8facec28005990ce8c6a4 | 3,369 | py | Python | ackermann_vehicle_gazebo/scripts/commandEncoder.py | testville/ros_gazebo_car | c8e3d149df9f25756e3a002bf080d474e40ad57e | [
"Apache-2.0"
] | 2 | 2020-12-05T11:14:14.000Z | 2022-01-09T20:23:18.000Z | ackermann_vehicle_gazebo/scripts/commandEncoder.py | testville/ros_gazebo_car | c8e3d149df9f25756e3a002bf080d474e40ad57e | [
"Apache-2.0"
] | null | null | null | ackermann_vehicle_gazebo/scripts/commandEncoder.py | testville/ros_gazebo_car | c8e3d149df9f25756e3a002bf080d474e40ad57e | [
"Apache-2.0"
] | 1 | 2022-02-14T23:21:27.000Z | 2022-02-14T23:21:27.000Z | #!/usr/bin/env python
import rospy
import tf
import roslib
import random
import math
import copy as copy_module
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
from ackermann_msgs.msg import AckermannDrive
if __name__ == '__main__':
rospy.loginfo('we are starting')
commandManager = CommandEncoder(300)
commandManager.provider()
| 45.527027 | 167 | 0.640843 | #!/usr/bin/env python
import rospy
import tf
import roslib
import random
import math
import copy as copy_module
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
from ackermann_msgs.msg import AckermannDrive
class CommandEncoder:
def __init__(self, targetFrameRate):
rospy.init_node('odometry_broadcaster')
self.targetFrameRate = targetFrameRate
self.commands = rospy.Subscriber('ackermann_cmd', AckermannDrive, self.commandEncoder)
self.lastPose = Pose()
self.lastPose.position = Point(0.0001, 0.0001, 0.0001)
self.lastPose.orientation = Quaternion(0.0001, 0, 0, 1.0)
self.lastPose.orientation.x
self.lastCommand = AckermannDrive()
self.lengthBetweenWheelBase = 0.4 * 3.4 * 0.7
self.Timer = rospy.Time
self.lastCommandTime = self.Timer.now().to_sec()
self.initialZPose = 0.2
self.odom_pub = rospy.Publisher("odom", Odometry, queue_size=50)
def commandEncoder(self, command):
self.lastCommand = command
def provider(self):
iteration = 0
r = rospy.Rate(self.targetFrameRate)
while not rospy.is_shutdown():
timeDerevative = rospy.Time.now().to_sec() - self.lastCommandTime
command = copy_module.deepcopy(self.lastCommand)
currentYaw = euler_from_quaternion((self.lastPose.orientation.x, self.lastPose.orientation.y, self.lastPose.orientation.z, self.lastPose.orientation.w))[2]
self.lastPose.position.x = self.lastPose.position.x + (timeDerevative * command.speed) * math.cos(currentYaw)
self.lastPose.position.y = self.lastPose.position.y + (timeDerevative * command.speed) * math.sin(currentYaw)
angular_speed = math.atan(command.steering_angle) * command.speed / 0.952
o = quaternion_from_euler(0, 0, currentYaw + (timeDerevative * command.speed / self.lengthBetweenWheelBase) * math.tan(command.steering_angle))
self.lastPose.orientation = Quaternion(o[0], o[1], o[2], o[3])
br = tf.TransformBroadcaster()
p = self.lastPose.position
odom = Odometry()
odom.header.frame_id = "odom"
odom.pose.pose = Pose(Point(p.x, p.y, p.z), Quaternion(o[0], o[1], o[2], o[3]))
odom.child_frame_id = "base_link"
odom.twist.twist = Twist(Vector3(self.lastCommand.speed, 0, 0), Vector3(0, 0, angular_speed))
# publish the message
self.odom_pub.publish(odom)
self.lastCommandTime = rospy.Time.now().to_sec()
sendTime = rospy.Time.now()
odom.header.stamp = self.lastCommandTime
br.sendTransform((p.x, p.y, 0), (0, 0, 0, 1), sendTime, 'base_footprint', 'odom')
br.sendTransform((0, 0, self.initialZPose), (0, 0, 0, 1), sendTime, 'base_stabilized', 'base_footprint')
br.sendTransform((0, 0, 0), (o[0], o[1], o[2], o[3]), sendTime, 'base_link', 'base_stabilized')
iteration += 1
r.sleep()
if __name__ == '__main__':
rospy.loginfo('we are starting')
commandManager = CommandEncoder(300)
commandManager.provider()
| 2,790 | 0 | 103 |
ab583dfc0f7d74b79f55a79c04417e8e1f16a66f | 4,069 | py | Python | tests/unit/test_triton_inference.py | beingaryan/NVTabular | 126c8e38ffe77ce36a228079776410d97580f992 | [
"Apache-2.0"
] | 1 | 2021-08-31T08:21:09.000Z | 2021-08-31T08:21:09.000Z | tests/unit/test_triton_inference.py | beingaryan/NVTabular | 126c8e38ffe77ce36a228079776410d97580f992 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_triton_inference.py | beingaryan/NVTabular | 126c8e38ffe77ce36a228079776410d97580f992 | [
"Apache-2.0"
] | null | null | null | import contextlib
import os
import signal
import subprocess
import time
from distutils.spawn import find_executable
import cudf
import pytest
from cudf.tests.utils import assert_eq
import nvtabular as nvt
import nvtabular.ops as ops
triton = pytest.importorskip("nvtabular.inference.triton")
grpcclient = pytest.importorskip("tritonclient.grpc")
tritonclient = pytest.importorskip("tritonclient")
_TRITON_SERVER_PATH = find_executable("tritonserver")
@contextlib.contextmanager
@pytest.mark.skipif(_TRITON_SERVER_PATH is None, reason="Requires tritonserver on the path")
@pytest.mark.parametrize("engine", ["parquet"])
| 34.483051 | 92 | 0.657901 | import contextlib
import os
import signal
import subprocess
import time
from distutils.spawn import find_executable
import cudf
import pytest
from cudf.tests.utils import assert_eq
import nvtabular as nvt
import nvtabular.ops as ops
triton = pytest.importorskip("nvtabular.inference.triton")
grpcclient = pytest.importorskip("tritonclient.grpc")
tritonclient = pytest.importorskip("tritonclient")
_TRITON_SERVER_PATH = find_executable("tritonserver")
@contextlib.contextmanager
def run_triton_server(modelpath):
cmdline = [_TRITON_SERVER_PATH, "--model-repository", modelpath]
with subprocess.Popen(cmdline) as process:
try:
with grpcclient.InferenceServerClient("localhost:8001") as client:
# wait until server is ready
for _ in range(60):
try:
ready = client.is_server_ready()
except tritonclient.utils.InferenceServerException:
ready = False
if ready:
yield client
return
time.sleep(1)
raise RuntimeError("Timed out waiting for tritonserver to become ready")
finally:
# signal triton to shutdown
process.send_signal(signal.SIGINT)
@pytest.mark.skipif(_TRITON_SERVER_PATH is None, reason="Requires tritonserver on the path")
def test_tritonserver_inference_string(tmpdir):
df = cudf.DataFrame({"user": ["aaaa", "bbbb", "cccc", "aaaa", "bbbb", "aaaa"]})
features = ["user"] >> ops.Categorify()
workflow = nvt.Workflow(features)
# fit the workflow and test on the input
dataset = nvt.Dataset(df)
workflow.fit(dataset)
local_df = workflow.transform(dataset).to_ddf().compute(scheduler="synchronous")
model_name = "test_inference_string"
triton.generate_nvtabular_model(workflow, model_name, tmpdir + "/test_inference_string")
inputs = triton.convert_df_to_triton_input(["user"], df)
with run_triton_server(tmpdir) as client:
response = client.infer(model_name, inputs)
user_features = response.as_numpy("user")
triton_df = cudf.DataFrame({"user": user_features.reshape(user_features.shape[0])})
assert_eq(triton_df, local_df)
def test_generate_triton_multihot(tmpdir):
df = cudf.DataFrame(
{
"userId": ["a", "a", "b"],
"movieId": ["1", "2", "2"],
"genres": [["action", "adventure"], ["action", "comedy"], ["comedy"]],
}
)
cats = ["userId", "movieId", "genres"] >> nvt.ops.Categorify()
workflow = nvt.Workflow(cats)
workflow.fit(nvt.Dataset(df))
expected = workflow.transform(nvt.Dataset(df)).to_ddf().compute()
print(expected)
# save workflow to triton / verify we see some expected output
repo = os.path.join(tmpdir, "models")
triton.generate_nvtabular_model(workflow, "model", repo)
workflow = None
assert os.path.exists(os.path.join(repo, "config.pbtxt"))
workflow = nvt.Workflow.load(os.path.join(repo, "1", "workflow"))
transformed = workflow.transform(nvt.Dataset(df)).to_ddf().compute()
assert_eq(expected, transformed)
@pytest.mark.parametrize("engine", ["parquet"])
def test_generate_triton_model(tmpdir, engine, df):
tmpdir = "./tmp"
conts = ["x", "y", "id"] >> ops.FillMissing() >> ops.Normalize()
cats = ["name-cat", "name-string"] >> ops.Categorify(cat_cache="host")
workflow = nvt.Workflow(conts + cats)
workflow.fit(nvt.Dataset(df))
expected = workflow.transform(nvt.Dataset(df)).to_ddf().compute()
# save workflow to triton / verify we see some expected output
repo = os.path.join(tmpdir, "models")
triton.generate_nvtabular_model(workflow, "model", repo)
workflow = None
assert os.path.exists(os.path.join(repo, "config.pbtxt"))
workflow = nvt.Workflow.load(os.path.join(repo, "1", "workflow"))
transformed = workflow.transform(nvt.Dataset(df)).to_ddf().compute()
assert_eq(expected, transformed)
| 3,349 | 0 | 89 |
8610d811befae6e575ff7f591c6961562d7dc4b9 | 69,207 | py | Python | salt/modules/debian_ip.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | 1 | 2021-04-05T19:46:35.000Z | 2021-04-05T19:46:35.000Z | salt/modules/debian_ip.py | dv-trading/salt | f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e | [
"Apache-2.0"
] | null | null | null | salt/modules/debian_ip.py | dv-trading/salt | f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
The networking module for Debian based distros
References:
* http://www.debian.org/doc/manuals/debian-reference/ch05.en.html
'''
# Import python libs
from __future__ import absolute_import
import functools
import logging
import os.path
import os
import re
import time
# Import third party libs
import jinja2
import jinja2.exceptions
import salt.ext.six as six
from salt.ext.six.moves import StringIO # pylint: disable=import-error,no-name-in-module
# Import salt libs
import salt.utils
import salt.utils.templates
import salt.utils.validate.net
import salt.utils.odict
# Set up logging
log = logging.getLogger(__name__)
# Set up template environment
JINJA = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'debian_ip')
)
)
# Define the module's virtual name
__virtualname__ = 'ip'
def __virtual__():
'''
Confine this module to Debian based distros
'''
if __grains__['os_family'] == 'Debian':
return __virtualname__
return (False, 'The debian_ip module could not be loaded: '
'unsupported OS family')
_ETHTOOL_CONFIG_OPTS = {
'speed': 'link-speed',
'duplex': 'link-duplex',
'autoneg': 'ethernet-autoneg',
'ethernet-port': 'ethernet-port',
'wol': 'ethernet-wol',
'driver-message-level': 'driver-message-level',
'ethernet-pause-rx': 'ethernet-pause-rx',
'ethernet-pause-tx': 'ethernet-pause-tx',
'ethernet-pause-autoneg': 'ethernet-pause-autoneg',
'rx': 'offload-rx',
'tx': 'offload-tx',
'sg': 'offload-sg',
'tso': 'offload-tso',
'ufo': 'offload-ufo',
'gso': 'offload-gso',
'gro': 'offload-gro',
'lro': 'offload-lro',
'hardware-irq-coalesce-adaptive-rx': 'hardware-irq-coalesce-adaptive-rx',
'hardware-irq-coalesce-adaptive-tx': 'hardware-irq-coalesce-adaptive-tx',
'hardware-irq-coalesce-rx-usecs': 'hardware-irq-coalesce-rx-usecs',
'hardware-irq-coalesce-rx-frames': 'hardware-irq-coalesce-rx-frames',
'hardware-dma-ring-rx': 'hardware-dma-ring-rx',
'hardware-dma-ring-rx-mini': 'hardware-dma-ring-rx-mini',
'hardware-dma-ring-rx-jumbo': 'hardware-dma-ring-rx-jumbo',
'hardware-dma-ring-tx': 'hardware-dma-ring-tx',
}
_REV_ETHTOOL_CONFIG_OPTS = {
'link-speed': 'speed',
'link-duplex': 'duplex',
'ethernet-autoneg': 'autoneg',
'ethernet-port': 'ethernet-port',
'ethernet-wol': 'wol',
'driver-message-level': 'driver-message-level',
'ethernet-pause-rx': 'ethernet-pause-rx',
'ethernet-pause-tx': 'ethernet-pause-tx',
'ethernet-pause-autoneg': 'ethernet-pause-autoneg',
'offload-rx': 'rx',
'offload-tx': 'tx',
'offload-sg': 'sg',
'offload-tso': 'tso',
'offload-ufo': 'ufo',
'offload-gso': 'gso',
'offload-lro': 'lro',
'offload-gro': 'gro',
'hardware-irq-coalesce-adaptive-rx': 'hardware-irq-coalesce-adaptive-rx',
'hardware-irq-coalesce-adaptive-tx': 'hardware-irq-coalesce-adaptive-tx',
'hardware-irq-coalesce-rx-usecs': 'hardware-irq-coalesce-rx-usecs',
'hardware-irq-coalesce-rx-frames': 'hardware-irq-coalesce-rx-frames',
'hardware-dma-ring-rx': 'hardware-dma-ring-rx',
'hardware-dma-ring-rx-mini': 'hardware-dma-ring-rx-mini',
'hardware-dma-ring-rx-jumbo': 'hardware-dma-ring-rx-jumbo',
'hardware-dma-ring-tx': 'hardware-dma-ring-tx',
}
_DEB_CONFIG_PPPOE_OPTS = {
'user': 'user',
'password': 'password',
'provider': 'provider',
'pppoe_iface': 'pppoe_iface',
'noipdefault': 'noipdefault',
'usepeerdns': 'usepeerdns',
'defaultroute': 'defaultroute',
'holdoff': 'holdoff',
'maxfail': 'maxfail',
'hide-password': 'hide-password',
'lcp-echo-interval': 'lcp-echo-interval',
'lcp-echo-failure': 'lcp-echo-failure',
'connect': 'connect',
'noauth': 'noauth',
'persist': 'persist',
'mtu': 'mtu',
'noaccomp': 'noaccomp',
'linkname': 'linkname',
}
_DEB_ROUTES_FILE = '/etc/network/routes'
_DEB_NETWORK_FILE = '/etc/network/interfaces'
_DEB_NETWORK_DIR = '/etc/network/interfaces.d/'
_DEB_NETWORK_UP_DIR = '/etc/network/if-up.d/'
_DEB_NETWORK_DOWN_DIR = '/etc/network/if-down.d/'
_DEB_NETWORK_CONF_FILES = '/etc/modprobe.d/'
_DEB_NETWORKING_FILE = '/etc/default/networking'
_DEB_HOSTNAME_FILE = '/etc/hostname'
_DEB_RESOLV_FILE = '/etc/resolv.conf'
_DEB_PPP_DIR = '/etc/ppp/peers/'
_CONFIG_TRUE = ['yes', 'on', 'true', '1', True]
_CONFIG_FALSE = ['no', 'off', 'false', '0', False]
_IFACE_TYPES = [
'eth', 'bond', 'alias', 'clone',
'ipsec', 'dialup', 'bridge', 'slave',
'vlan', 'pppoe', 'source',
]
def _error_msg_iface(iface, option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]'
return msg.format(iface, option, '|'.join(expected))
def _error_msg_routes(iface, option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]'
return msg.format(iface, option, expected)
def _error_msg_network(option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]'
return msg.format(option, '|'.join(expected))
def _raise_error_iface(iface, option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_iface(iface, option, expected)
log.error(msg)
raise AttributeError(msg)
def _raise_error_network(option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_network(option, expected)
log.error(msg)
raise AttributeError(msg)
def _raise_error_routes(iface, option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_routes(iface, option, expected)
log.error(msg)
raise AttributeError(msg)
def _read_file(path):
'''
Reads and returns the contents of a text file
'''
try:
with salt.utils.flopen(path, 'rb') as contents:
return [salt.utils.to_str(line) for line in contents.readlines()]
except (OSError, IOError):
return ''
def _parse_resolve():
'''
Parse /etc/resolv.conf and return domainname
'''
contents = _read_file(_DEB_RESOLV_FILE)
return contents
def _parse_domainname():
'''
Parse /etc/resolv.conf and return domainname
'''
contents = _read_file(_DEB_RESOLV_FILE)
pattern = r'domain\s+(?P<domain_name>\S+)'
prog = re.compile(pattern)
for item in contents:
match = prog.match(item)
if match:
return match.group('domain_name')
return ''
def _parse_searchdomain():
'''
Parse /etc/resolv.conf and return searchdomain
'''
contents = _read_file(_DEB_RESOLV_FILE)
pattern = r'search\s+(?P<search_domain>\S+)'
prog = re.compile(pattern)
for item in contents:
match = prog.match(item)
if match:
return match.group('search_domain')
return ''
def _parse_hostname():
'''
Parse /etc/hostname and return hostname
'''
contents = _read_file(_DEB_HOSTNAME_FILE)
if contents:
return contents[0].split('\n')[0]
else:
return ''
def _parse_current_network_settings():
'''
Parse /etc/default/networking and return current configuration
'''
opts = salt.utils.odict.OrderedDict()
opts['networking'] = ''
if os.path.isfile(_DEB_NETWORKING_FILE):
with salt.utils.fopen(_DEB_NETWORKING_FILE) as contents:
for line in contents:
if line.startswith('#'):
continue
elif line.startswith('CONFIGURE_INTERFACES'):
opts['networking'] = line.split('=', 1)[1].strip()
hostname = _parse_hostname()
domainname = _parse_domainname()
searchdomain = _parse_searchdomain()
opts['hostname'] = hostname
opts['domainname'] = domainname
opts['searchdomain'] = searchdomain
return opts
# def __validator_func(value):
# return (valid: True/False, (transformed) value, error message)
def __ipv4_quad(value):
'''validate an IPv4 address'''
return (salt.utils.validate.net.ipv4_addr(value), value,
'dotted IPv4 address')
def __ipv6(value):
'''validate an IPv6 address'''
return (salt.utils.validate.net.ipv6_addr(value), value,
'IPv6 address')
def __mac(value):
'''validate a mac address'''
return (salt.utils.validate.net.mac(value), value,
'MAC address')
def __int(value):
'''validate an integer'''
valid, _value = False, value
try:
_value = int(value)
valid = True
except ValueError:
pass
return (valid, _value, 'integer')
def __float(value):
'''validate a float'''
valid, _value = False, value
try:
_value = float(value)
valid = True
except ValueError:
pass
return (valid, _value, 'float')
def __ipv4_netmask(value):
'''validate an IPv4 dotted quad or integer CIDR netmask'''
valid, errmsg = False, 'dotted quad or integer CIDR (0->32)'
valid, value, _ = __int(value)
if not (valid and 0 <= value <= 32):
valid = salt.utils.validate.net.netmask(value)
return (valid, value, errmsg)
def __ipv6_netmask(value):
'''validate an IPv6 integer netmask'''
valid, errmsg = False, 'IPv6 netmask (0->128)'
valid, value, _ = __int(value)
valid = (valid and 0 <= value <= 128)
return (valid, value, errmsg)
def __within2(value, within=None, errmsg=None, dtype=None):
'''validate that a value is in ``within`` and optionally a ``dtype``'''
valid, _value = False, value
if dtype:
try:
_value = dtype(value) # TODO: this is a bit loose when dtype is a class
valid = _value in within
except ValueError:
pass
else:
valid = _value in within
if errmsg is None:
if dtype:
typename = getattr(dtype, '__name__',
hasattr(dtype, '__class__')
and getattr(dtype.__class__, 'name', dtype))
errmsg = '{0} within \'{1}\''.format(typename, within)
else:
errmsg = 'within \'{0}\''.format(within)
return (valid, _value, errmsg)
def __space_delimited_list(value):
'''validate that a value contains one or more space-delimited values'''
valid, _value, errmsg = False, value, 'space-delimited string'
try:
if hasattr(value, '__iter__'):
valid = True # TODO:
else:
_value = value.split()
if _value == []:
raise ValueError
valid = True
except AttributeError:
pass
except ValueError:
pass
return (valid, _value, errmsg)
SALT_ATTR_TO_DEBIAN_ATTR_MAP = {
'dns': 'dns-nameservers',
'search': 'dns-search',
'hwaddr': 'hwaddress', # TODO: this limits bootp functionality
'ipaddr': 'address',
}
DEBIAN_ATTR_TO_SALT_ATTR_MAP = dict(
(v, k) for (k, v) in six.iteritems(SALT_ATTR_TO_DEBIAN_ATTR_MAP))
# TODO
DEBIAN_ATTR_TO_SALT_ATTR_MAP['address'] = 'address'
DEBIAN_ATTR_TO_SALT_ATTR_MAP['hwaddress'] = 'hwaddress'
IPV4_VALID_PROTO = ['bootp', 'dhcp', 'static', 'manual', 'loopback', 'ppp']
IPV4_ATTR_MAP = {
'proto': __within(IPV4_VALID_PROTO, dtype=str),
# ipv4 static & manual
'address': __ipv4_quad,
'netmask': __ipv4_netmask,
'broadcast': __ipv4_quad,
'metric': __int,
'gateway': __ipv4_quad, # supports a colon-delimited list
'pointopoint': __ipv4_quad,
'hwaddress': __mac,
'mtu': __int,
'scope': __within(['global', 'link', 'host'], dtype=str),
# dhcp
'hostname': __anything,
'leasehours': __int,
'leasetime': __int,
'vendor': __anything,
'client': __anything,
# bootp
'bootfile': __anything,
'server': __ipv4_quad,
'hwaddr': __mac,
# tunnel
'mode': __within(['gre', 'GRE', 'ipip', 'IPIP', '802.3ad'], dtype=str),
'endpoint': __ipv4_quad,
'dstaddr': __ipv4_quad,
'local': __ipv4_quad,
'ttl': __int,
# bond
'slaves': __anything,
# ppp
'provider': __anything,
'unit': __int,
'options': __anything,
# resolvconf
'dns-nameservers': __space_delimited_list,
'dns-search': __space_delimited_list,
#
'vlan-raw-device': __anything,
#
'network': __anything, # i don't know what this is
'test': __anything, # TODO
'enable_ipv6': __anything, # TODO
}
IPV6_VALID_PROTO = ['auto', 'loopback', 'static', 'manual',
'dhcp', 'v4tunnel', '6to4']
IPV6_ATTR_MAP = {
'proto': __within(IPV6_VALID_PROTO),
# ipv6 static & manual
'address': __ipv6,
'netmask': __ipv6_netmask,
'broadcast': __ipv6,
'gateway': __ipv6, # supports a colon-delimited list
'hwaddress': __mac,
'mtu': __int,
'scope': __within(['global', 'site', 'link', 'host'], dtype=str),
# inet6 auto
'privext': __within([0, 1, 2], dtype=int),
'dhcp': __within([0, 1], dtype=int),
# inet6 static & manual & dhcp
'media': __anything,
'accept_ra': __within([0, 1], dtype=int),
'autoconf': __within([0, 1], dtype=int),
'preferred-lifetime': __int,
'dad-attempts': __int, # 0 to disable
'dad-interval': __float,
# bond
'slaves': __anything,
# tunnel
'mode': __within(['gre', 'GRE', 'ipip', 'IPIP', '802.3ad'], dtype=str),
'endpoint': __ipv4_quad,
'local': __ipv4_quad,
'ttl': __int,
# resolvconf
'dns-nameservers': __space_delimited_list,
'dns-search': __space_delimited_list,
#
'vlan-raw-device': __anything,
'test': __anything, # TODO
'enable_ipv6': __anything, # TODO
}
WIRELESS_ATTR_MAP = {
'wireless-essid': __anything,
'wireless-mode': __anything, # TODO
'wpa-ap-scan': __within([0, 1, 2], dtype=int), # TODO
'wpa-conf': __anything,
'wpa-driver': __anything,
'wpa-group': __anything,
'wpa-key-mgmt': __anything,
'wpa-pairwise': __anything,
'wpa-psk': __anything,
'wpa-proto': __anything, # partial(__within,
'wpa-roam': __anything,
'wpa-ssid': __anything, # TODO
}
ATTRMAPS = {
'inet': [IPV4_ATTR_MAP, WIRELESS_ATTR_MAP],
'inet6': [IPV6_ATTR_MAP, WIRELESS_ATTR_MAP]
}
def _validate_interface_option(attr, value, addrfam='inet'):
'''lookup the validation function for a [addrfam][attr] and
return the results
:param attr: attribute name
:param value: raw setting value
:param addrfam: address family (inet, inet6,
'''
valid, _value, errmsg = False, value, 'Unknown validator'
attrmaps = ATTRMAPS.get(addrfam, [])
for attrmap in attrmaps:
if attr in attrmap:
validate_func = attrmap[attr]
(valid, _value, errmsg) = validate_func(value)
break
return (valid, _value, errmsg)
def _parse_interfaces(interface_files=None):
'''
Parse /etc/network/interfaces and return current configured interfaces
'''
if interface_files is None:
interface_files = []
# Add this later.
if os.path.exists(_DEB_NETWORK_DIR):
interface_files += ['{0}/{1}'.format(_DEB_NETWORK_DIR, dir) for dir in os.listdir(_DEB_NETWORK_DIR)]
if os.path.isfile(_DEB_NETWORK_FILE):
interface_files.insert(0, _DEB_NETWORK_FILE)
adapters = salt.utils.odict.OrderedDict()
method = -1
for interface_file in interface_files:
with salt.utils.fopen(interface_file) as interfaces:
# This ensures iface_dict exists, but does not ensure we're not reading a new interface.
iface_dict = {}
for line in interfaces:
# Identify the clauses by the first word of each line.
# Go to the next line if the current line is a comment
# or all spaces.
if line.lstrip().startswith('#') or line.isspace():
continue
# Parse the iface clause
if line.startswith('iface'):
sline = line.split()
if len(sline) != 4:
msg = 'Interface file malformed: {0}.'
msg = msg.format(sline)
log.error(msg)
raise AttributeError(msg)
iface_name = sline[1]
addrfam = sline[2]
method = sline[3]
# Create item in dict, if not already there
if iface_name not in adapters:
adapters[iface_name] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if 'data' not in adapters[iface_name]:
adapters[iface_name]['data'] = salt.utils.odict.OrderedDict()
if addrfam not in adapters[iface_name]['data']:
adapters[iface_name]['data'][addrfam] = salt.utils.odict.OrderedDict()
iface_dict = adapters[iface_name]['data'][addrfam]
iface_dict['addrfam'] = addrfam
iface_dict['proto'] = method
iface_dict['filename'] = interface_file
# Parse the detail clauses.
elif line[0].isspace():
sline = line.split()
# conf file attr: dns-nameservers
# salt states.network attr: dns
attr, valuestr = line.rstrip().split(None, 1)
if _attrmaps_contain_attr(attr):
if '-' in attr:
attrname = attr.replace('-', '_')
else:
attrname = attr
(valid, value, errmsg) = _validate_interface_option(
attr, valuestr, addrfam)
iface_dict[attrname] = value
elif attr in _REV_ETHTOOL_CONFIG_OPTS:
if 'ethtool' not in iface_dict:
iface_dict['ethtool'] = salt.utils.odict.OrderedDict()
iface_dict['ethtool'][attr] = valuestr
elif attr.startswith('bond'):
opt = re.split(r'[_-]', attr, maxsplit=1)[1]
if 'bonding' not in iface_dict:
iface_dict['bonding'] = salt.utils.odict.OrderedDict()
iface_dict['bonding'][opt] = valuestr
elif attr.startswith('bridge'):
opt = re.split(r'[_-]', attr, maxsplit=1)[1]
if 'bridging' not in iface_dict:
iface_dict['bridging'] = salt.utils.odict.OrderedDict()
iface_dict['bridging'][opt] = valuestr
elif attr in ['up', 'pre-up', 'post-up',
'down', 'pre-down', 'post-down']:
cmd = valuestr
cmd_key = '{0}_cmds'.format(re.sub('-', '_', attr))
if cmd_key not in iface_dict:
iface_dict[cmd_key] = []
iface_dict[cmd_key].append(cmd)
elif line.startswith('auto'):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]['enabled'] = True
elif line.startswith('allow-hotplug'):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]['hotplug'] = True
elif line.startswith('source'):
if 'source' not in adapters:
adapters['source'] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if 'data' not in adapters['source']:
adapters['source']['data'] = salt.utils.odict.OrderedDict()
adapters['source']['data']['sources'] = []
adapters['source']['data']['sources'].append(line.split()[1])
# Return a sorted list of the keys for bond, bridge and ethtool options to
# ensure a consistent order
for iface_name in adapters:
if iface_name == 'source':
continue
if 'data' not in adapters[iface_name]:
msg = 'Interface file malformed for interface: {0}.'.format(iface_name)
log.error(msg)
adapters.pop(iface_name)
continue
for opt in ['ethtool', 'bonding', 'bridging']:
if 'inet' in adapters[iface_name]['data']:
if opt in adapters[iface_name]['data']['inet']:
opt_keys = sorted(adapters[iface_name]['data']['inet'][opt].keys())
adapters[iface_name]['data']['inet'][opt + '_keys'] = opt_keys
return adapters
def _parse_ethtool_opts(opts, iface):
'''
Filters given options and outputs valid settings for ETHTOOLS_OPTS
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
config = {}
if 'autoneg' in opts:
if opts['autoneg'] in _CONFIG_TRUE:
config.update({'autoneg': 'on'})
elif opts['autoneg'] in _CONFIG_FALSE:
config.update({'autoneg': 'off'})
else:
_raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE)
if 'duplex' in opts:
valid = ['full', 'half']
if opts['duplex'] in valid:
config.update({'duplex': opts['duplex']})
else:
_raise_error_iface(iface, 'duplex', valid)
if 'speed' in opts:
valid = ['10', '100', '1000', '10000']
if str(opts['speed']) in valid:
config.update({'speed': opts['speed']})
else:
_raise_error_iface(iface, opts['speed'], valid)
valid = _CONFIG_TRUE + _CONFIG_FALSE
for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'):
if option in opts:
if opts[option] in _CONFIG_TRUE:
config.update({option: 'on'})
elif opts[option] in _CONFIG_FALSE:
config.update({option: 'off'})
else:
_raise_error_iface(iface, option, valid)
return config
def _parse_ethtool_pppoe_opts(opts, iface):
'''
Filters given options and outputs valid settings for ETHTOOLS_PPPOE_OPTS
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
config = {}
for opt in _DEB_CONFIG_PPPOE_OPTS:
if opt in opts:
config[opt] = opts[opt]
if 'provider' in opts and not opts['provider']:
_raise_error_iface(iface, 'provider', _CONFIG_TRUE + _CONFIG_FALSE)
valid = _CONFIG_TRUE + _CONFIG_FALSE
for option in ('noipdefault', 'usepeerdns', 'defaultroute', 'hide-password', 'noauth', 'persist', 'noaccomp'):
if option in opts:
if opts[option] in _CONFIG_TRUE:
config.update({option: 'True'})
elif opts[option] in _CONFIG_FALSE:
config.update({option: 'False'})
else:
_raise_error_iface(iface, option, valid)
return config
def _parse_settings_bond(opts, iface):
'''
Filters given options and outputs valid settings for requested
operation. If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond_def = {
# 803.ad aggregation selection logic
# 0 for stable (default)
# 1 for bandwidth
# 2 for count
'ad_select': '0',
# Max number of transmit queues (default = 16)
'tx_queues': '16',
# Link monitoring in milliseconds. Most NICs support this
'miimon': '100',
# ARP interval in milliseconds
'arp_interval': '250',
# Delay before considering link down in milliseconds (miimon * 2)
'downdelay': '200',
# lacp_rate 0: Slow - every 30 seconds
# lacp_rate 1: Fast - every 1 second
'lacp_rate': '0',
# Max bonds for this driver
'max_bonds': '1',
# Specifies the time, in milliseconds, to wait before
# enabling a slave after a link recovery has been
# detected. Only used with miimon.
'updelay': '0',
# Used with miimon.
# On: driver sends mii
# Off: ethtool sends mii
'use_carrier': 'on',
# Default. Don't change unless you know what you are doing.
'xmit_hash_policy': 'layer2',
}
if opts['mode'] in ['balance-rr', '0']:
log.info(
'Device: {0} Bonding Mode: load balancing (round-robin)'.format(
iface
)
)
return _parse_settings_bond_0(opts, iface, bond_def)
elif opts['mode'] in ['active-backup', '1']:
log.info(
'Device: {0} Bonding Mode: fault-tolerance (active-backup)'.format(
iface
)
)
return _parse_settings_bond_1(opts, iface, bond_def)
elif opts['mode'] in ['balance-xor', '2']:
log.info(
'Device: {0} Bonding Mode: load balancing (xor)'.format(iface)
)
return _parse_settings_bond_2(opts, iface, bond_def)
elif opts['mode'] in ['broadcast', '3']:
log.info(
'Device: {0} Bonding Mode: fault-tolerance (broadcast)'.format(
iface
)
)
return _parse_settings_bond_3(opts, iface, bond_def)
elif opts['mode'] in ['802.3ad', '4']:
log.info(
'Device: {0} Bonding Mode: IEEE 802.3ad Dynamic link '
'aggregation'.format(iface)
)
return _parse_settings_bond_4(opts, iface, bond_def)
elif opts['mode'] in ['balance-tlb', '5']:
log.info(
'Device: {0} Bonding Mode: transmit load balancing'.format(iface)
)
return _parse_settings_bond_5(opts, iface, bond_def)
elif opts['mode'] in ['balance-alb', '6']:
log.info(
'Device: {0} Bonding Mode: adaptive load balancing'.format(iface)
)
return _parse_settings_bond_6(opts, iface, bond_def)
else:
valid = [
'0', '1', '2', '3', '4', '5', '6',
'balance-rr', 'active-backup', 'balance-xor',
'broadcast', '802.3ad', 'balance-tlb', 'balance-alb'
]
_raise_error_iface(iface, 'mode', valid)
def _parse_settings_bond_0(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond0.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '0'}
# ARP targets in n.n.n.n form
valid = ['list of ips (up to 16)']
if 'arp_ip_target' in opts:
if isinstance(opts['arp_ip_target'], list):
if 1 <= len(opts['arp_ip_target']) <= 16:
bond.update({'arp_ip_target': ''})
for ip in opts['arp_ip_target']: # pylint: disable=C0103
if len(bond['arp_ip_target']) > 0:
bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip
else:
bond['arp_ip_target'] = ip
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
if 'arp_interval' in opts:
try:
int(opts['arp_interval'])
bond.update({'arp_interval': opts['arp_interval']})
except ValueError:
_raise_error_iface(iface, 'arp_interval', ['integer'])
else:
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
return bond
def _parse_settings_bond_1(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '1'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
if not (__grains__['os'] == "Ubuntu" and __grains__['osrelease_info'][0] >= 16):
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_bond_2(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond2.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '2'}
valid = ['list of ips (up to 16)']
if 'arp_ip_target' in opts:
if isinstance(opts['arp_ip_target'], list):
if 1 <= len(opts['arp_ip_target']) <= 16:
bond.update({'arp_ip_target': ''})
for ip in opts['arp_ip_target']: # pylint: disable=C0103
if len(bond['arp_ip_target']) > 0:
bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip
else:
bond['arp_ip_target'] = ip
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
if 'arp_interval' in opts:
try:
int(opts['arp_interval'])
bond.update({'arp_interval': opts['arp_interval']})
except ValueError:
_raise_error_iface(iface, 'arp_interval', ['integer'])
else:
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
bond.update({'xmit_hash_policy': opts['hashing-algorithm']})
else:
_raise_error_iface(iface, 'hashing-algorithm', valid)
return bond
def _parse_settings_bond_3(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond3.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '3'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_bond_4(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond4.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '4'}
for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']:
if binding in opts:
if binding == 'lacp_rate':
if opts[binding] == 'fast':
opts.update({binding: '1'})
if opts[binding] == 'slow':
opts.update({binding: '0'})
valid = ['fast', '1', 'slow', '0']
else:
valid = ['integer']
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, valid)
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
bond.update({'xmit_hash_policy': opts['hashing-algorithm']})
else:
_raise_error_iface(iface, 'hashing-algorithm', valid)
return bond
def _parse_settings_bond_5(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond5.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '5'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond
def _parse_settings_bond_6(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond6.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '6'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond
def _parse_bridge_opts(opts, iface):
'''
Filters given options and outputs valid settings for BRIDGING_OPTS
If an option has a value that is not expected, this
function will log the Interface, Setting and what was expected.
'''
config = {}
if 'ports' in opts:
if isinstance(opts['ports'], list):
opts['ports'] = ' '.join(opts['ports'])
config.update({'ports': opts['ports']})
for opt in ['ageing', 'fd', 'gcint', 'hello', 'maxage']:
if opt in opts:
try:
float(opts[opt])
config.update({opt: opts[opt]})
except ValueError:
_raise_error_iface(iface, opt, ['float'])
for opt in ['bridgeprio', 'maxwait']:
if opt in opts:
if isinstance(opts[opt], int):
config.update({opt: opts[opt]})
else:
_raise_error_iface(iface, opt, ['integer'])
if 'hw' in opts:
# match 12 hex digits with either : or - as separators between pairs
if re.match('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$',
opts['hw'].lower()):
config.update({'hw': opts['hw']})
else:
_raise_error_iface(iface, 'hw', ['valid MAC address'])
for opt in ['pathcost', 'portprio']:
if opt in opts:
try:
port, cost_or_prio = opts[opt].split()
int(cost_or_prio)
config.update({opt: '{0} {1}'.format(port, cost_or_prio)})
except ValueError:
_raise_error_iface(iface, opt, ['interface integer'])
if 'stp' in opts:
if opts['stp'] in _CONFIG_TRUE:
config.update({'stp': 'on'})
elif opts['stp'] in _CONFIG_FALSE:
config.update({'stp': 'off'})
else:
_raise_error_iface(iface, 'stp', _CONFIG_TRUE + _CONFIG_FALSE)
if 'waitport' in opts:
if isinstance(opts['waitport'], int):
config.update({'waitport': opts['waitport']})
else:
values = opts['waitport'].split()
waitport_time = values.pop(0)
if waitport_time.isdigit() and values:
config.update({
'waitport': '{0} {1}'.format(
waitport_time, ' '.join(values)
)
})
else:
_raise_error_iface(iface, opt, ['integer [interfaces]'])
return config
def _parse_settings_eth(opts, iface_type, enabled, iface):
'''
Filters given options and outputs valid settings for a
network interface.
'''
adapters = salt.utils.odict.OrderedDict()
adapters[iface] = salt.utils.odict.OrderedDict()
adapters[iface]['type'] = iface_type
adapters[iface]['data'] = salt.utils.odict.OrderedDict()
iface_data = adapters[iface]['data']
iface_data['inet'] = salt.utils.odict.OrderedDict()
iface_data['inet6'] = salt.utils.odict.OrderedDict()
if enabled:
adapters[iface]['enabled'] = True
if opts.get('hotplug', False):
adapters[iface]['hotplug'] = True
# Defaults assume IPv4 (inet) interfaces unless enable_ipv6=True
def_addrfam = 'inet'
dual_stack = False
# If enable_ipv6=True, then expet either IPv6-only or dual stack.
if 'enable_ipv6' in opts and opts['enable_ipv6']:
iface_data['inet6']['addrfam'] = 'inet6'
iface_data['inet6']['netmask'] = '64' # defaults to 64
def_addrfam = 'inet6'
if 'iface_type' in opts and opts['iface_type'] == 'vlan':
iface_data['inet6']['vlan_raw_device'] = (
re.sub(r'\.\d*', '', iface))
if 'ipaddr' in opts and 'ipv6ipaddr' in opts:
# If both 'ipaddr' and 'ipv6ipaddr' are present; expect dual stack
iface_data['inet']['addrfam'] = 'inet'
def_addrfam = 'inet'
dual_stack = True
else:
# If enable_ipv6=False|None, IPv6 settings should not be set.
iface_data['inet']['addrfam'] = 'inet'
if iface_type not in ['bridge']:
tmp_ethtool = _parse_ethtool_opts(opts, iface)
if tmp_ethtool:
ethtool = {}
for item in tmp_ethtool:
ethtool[_ETHTOOL_CONFIG_OPTS[item]] = tmp_ethtool[item]
iface_data[def_addrfam]['ethtool'] = ethtool
# return a list of sorted keys to ensure consistent order
iface_data[def_addrfam]['ethtool_keys'] = sorted(ethtool)
if iface_type == 'bridge':
bridging = _parse_bridge_opts(opts, iface)
if bridging:
opts.pop('mode', None)
iface_data[def_addrfam]['bridging'] = bridging
iface_data[def_addrfam]['bridging_keys'] = sorted(bridging)
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'bond':
bonding = _parse_settings_bond(opts, iface)
if bonding:
opts.pop('mode', None)
iface_data[def_addrfam]['bonding'] = bonding
iface_data[def_addrfam]['bonding']['slaves'] = opts['slaves']
iface_data[def_addrfam]['bonding_keys'] = sorted(bonding)
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'slave':
adapters[iface]['master'] = opts['master']
opts['proto'] = 'manual'
iface_data[def_addrfam]['master'] = adapters[iface]['master']
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'vlan':
iface_data[def_addrfam]['vlan_raw_device'] = re.sub(r'\.\d*', '', iface)
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'pppoe':
tmp_ethtool = _parse_ethtool_pppoe_opts(opts, iface)
if tmp_ethtool:
for item in tmp_ethtool:
adapters[iface]['data'][def_addrfam][_DEB_CONFIG_PPPOE_OPTS[item]] = tmp_ethtool[item]
iface_data[def_addrfam]['addrfam'] = def_addrfam
for opt in opts:
# trim leading "ipv6" from option
if opt.startswith('ipv6'):
optname = opt[4:] # trim off the ipv6
v6only = True
else:
optname = opt
v6only = False
_optname = SALT_ATTR_TO_DEBIAN_ATTR_MAP.get(optname, optname)
if _attrmaps_contain_attr(_optname):
valuestr = opts[opt]
# default to 'static' if proto is 'none'
if optname == 'proto' and valuestr == 'none':
valuestr = 'static'
# If option is v6-only, don't validate against inet and always set value
if v6only:
(valid, value, errmsg) = _validate_interface_option(
_optname, valuestr, addrfam='inet6')
if not valid:
_raise_error_iface(iface, '\'{0}\' \'{1}\''.format(opt, valuestr), [errmsg])
# replace dashes with underscores for jinja
_optname = _optname.replace('-', '_')
iface_data['inet6'][_optname] = value
# Else, if it's a dual stack, the option may belong in both; apply v4 opt as v6 default
elif dual_stack:
valid_once = False
errmsg = None
for addrfam in ['inet', 'inet6']:
(valid, value, errmsg) = _validate_interface_option(
_optname, valuestr, addrfam=addrfam)
if valid:
valid_once = True
# replace dashes with underscores for jinja
_optname = _optname.replace('-', '_')
# if a v6-only version of this option was set; don't override
# otherwise, if dual stack, use the v4 version as a default value for v6
# allows overriding with =None
if addrfam == 'inet' or _optname not in iface_data['inet6']:
iface_data[addrfam][_optname] = value
if not valid_once:
_raise_error_iface(
iface,
'\'{0}\' \'{1}\''.format(opt, valuestr),
[errmsg]
)
# Else, it goes in the default(only) addrfam
# Not assuming v4 allows a v6 block to be created without lots of "ipv6" prefixes
else:
(valid, value, errmsg) = _validate_interface_option(
_optname, valuestr, addrfam=def_addrfam)
if not valid:
_raise_error_iface(
iface,
'\'{0}\' \'{1}\''.format(opt, valuestr),
[errmsg]
)
# replace dashes with underscores for jinja
_optname = _optname.replace('-', '_')
iface_data[def_addrfam][_optname] = value
for opt in ['up_cmds', 'pre_up_cmds', 'post_up_cmds',
'down_cmds', 'pre_down_cmds', 'post_down_cmds']:
if opt in opts:
iface_data['inet'][opt] = opts[opt]
for addrfam in ['inet', 'inet6']:
if 'addrfam' in iface_data[addrfam] and iface_data[addrfam]['addrfam'] == addrfam:
pass
else:
iface_data.pop(addrfam)
return adapters
def _parse_settings_source(opts, iface_type, enabled, iface):
'''
Filters given options and outputs valid settings for a
network interface.
'''
adapters = salt.utils.odict.OrderedDict()
adapters[iface] = salt.utils.odict.OrderedDict()
adapters[iface]['type'] = iface_type
adapters[iface]['data'] = salt.utils.odict.OrderedDict()
iface_data = adapters[iface]['data']
iface_data['sources'] = [opts['source']]
return adapters
def _parse_network_settings(opts, current):
'''
Filters given options and outputs valid settings for
the global network settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
current = dict((k.lower(), v) for (k, v) in six.iteritems(current))
result = {}
valid = _CONFIG_TRUE + _CONFIG_FALSE
if 'enabled' not in opts:
try:
opts['networking'] = current['networking']
_log_default_network('networking', current['networking'])
except ValueError:
_raise_error_network('networking', valid)
else:
opts['networking'] = opts['enabled']
if opts['networking'] in valid:
if opts['networking'] in _CONFIG_TRUE:
result['networking'] = 'yes'
elif opts['networking'] in _CONFIG_FALSE:
result['networking'] = 'no'
else:
_raise_error_network('networking', valid)
if 'hostname' not in opts:
try:
opts['hostname'] = current['hostname']
_log_default_network('hostname', current['hostname'])
except ValueError:
_raise_error_network('hostname', ['server1.example.com'])
if opts['hostname']:
result['hostname'] = opts['hostname']
else:
_raise_error_network('hostname', ['server1.example.com'])
if 'search' in opts:
result['search'] = opts['search']
return result
def _parse_routes(iface, opts):
'''
Filters given options and outputs valid settings for
the route settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
result = {}
if 'routes' not in opts:
_raise_error_routes(iface, 'routes', 'List of routes')
for opt in opts:
result[opt] = opts[opt]
return result
def _write_file(iface, data, folder, pattern):
'''
Writes a file to disk
'''
filename = os.path.join(folder, pattern.format(iface))
if not os.path.exists(folder):
msg = '{0} cannot be written. {1} does not exist'
msg = msg.format(filename, folder)
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
fout.write(data)
return filename
def _write_file_routes(iface, data, folder, pattern):
'''
Writes a file to disk
'''
filename = os.path.join(folder, pattern.format(iface))
if not os.path.exists(folder):
msg = '{0} cannot be written. {1} does not exist'
msg = msg.format(filename, folder)
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
fout.write(data)
__salt__['file.set_mode'](filename, '0755')
return filename
def _write_file_network(data, filename, create=False):
'''
Writes a file to disk
If file does not exist, only create if create
argument is True
'''
if not os.path.exists(filename) and not create:
msg = '{0} cannot be written. {0} does not exist\
and create is set to False'
msg = msg.format(filename)
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
fout.write(data)
def _read_temp(data):
'''
Return what would be written to disk
'''
tout = StringIO()
tout.write(data)
tout.seek(0)
output = tout.readlines()
tout.close()
return output
def _read_temp_ifaces(iface, data):
'''
Return what would be written to disk for interfaces
'''
try:
template = JINJA.get_template('debian_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_eth.jinja')
return ''
ifcfg = template.render({'name': iface, 'data': data})
# Return as a array so the difflib works
return [item + '\n' for item in ifcfg.split('\n')]
def _write_file_ifaces(iface, data, **settings):
'''
Writes a file to disk
'''
try:
eth_template = JINJA.get_template('debian_eth.jinja')
source_template = JINJA.get_template('debian_source.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_eth.jinja')
return ''
# Read /etc/network/interfaces into a dict
adapters = _parse_interfaces()
# Apply supplied settings over on-disk settings
adapters[iface] = data
ifcfg = ''
for adapter in adapters:
if 'type' in adapters[adapter] and adapters[adapter]['type'] == 'source':
tmp = source_template.render({'name': adapter, 'data': adapters[adapter]})
else:
tmp = eth_template.render({'name': adapter, 'data': adapters[adapter]})
ifcfg = ifcfg + tmp
if adapter == iface:
saved_ifcfg = tmp
_SEPERATE_FILE = False
if 'filename' in settings:
if not settings['filename'].startswith('/'):
filename = '{0}/{1}'.format(_DEB_NETWORK_DIR, settings['filename'])
else:
filename = settings['filename']
_SEPERATE_FILE = True
else:
if 'filename' in adapters[adapter]['data']:
filename = adapters[adapter]['data']
else:
filename = _DEB_NETWORK_FILE
if not os.path.exists(os.path.dirname(filename)):
msg = '{0} cannot be written.'
msg = msg.format(os.path.dirname(filename))
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
if _SEPERATE_FILE:
fout.write(saved_ifcfg)
else:
fout.write(ifcfg)
# Return as a array so the difflib works
return saved_ifcfg.split('\n')
def _write_file_ppp_ifaces(iface, data):
'''
Writes a file to disk
'''
try:
template = JINJA.get_template('debian_ppp_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_ppp_eth.jinja')
return ''
adapters = _parse_interfaces()
adapters[iface] = data
ifcfg = ''
tmp = template.render({'data': adapters[iface]})
ifcfg = tmp + ifcfg
filename = _DEB_PPP_DIR + '/' + adapters[iface]['data']['inet']['provider']
if not os.path.exists(os.path.dirname(filename)):
msg = '{0} cannot be written.'
msg = msg.format(os.path.dirname(filename))
log.error(msg)
raise AttributeError(msg)
with salt.utils.fopen(filename, 'w') as fout:
fout.write(ifcfg)
# Return as a array so the difflib works
return filename
def build_bond(iface, **settings):
'''
Create a bond script in /etc/modprobe.d with the passed settings
and load the bonding kernel module.
CLI Example:
.. code-block:: bash
salt '*' ip.build_bond bond0 mode=balance-alb
'''
deb_major = __grains__['osrelease'][:1]
opts = _parse_settings_bond(settings, iface)
try:
template = JINJA.get_template('conf.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template conf.jinja')
return ''
data = template.render({'name': iface, 'bonding': opts})
if 'test' in settings and settings['test']:
return _read_temp(data)
_write_file(iface, data, _DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
if deb_major == '5':
for line_type in ('alias', 'options'):
cmd = ['sed', '-i', '-e', r'/^{0}\s{1}.*/d'.format(line_type, iface),
'/etc/modprobe.conf']
__salt__['cmd.run'](cmd, python_shell=False)
__salt__['file.append']('/etc/modprobe.conf', path)
# Load kernel module
__salt__['kmod.load']('bonding')
# install ifenslave-2.6
__salt__['pkg.install']('ifenslave-2.6')
return _read_file(path)
def build_interface(iface, iface_type, enabled, **settings):
'''
Build an interface script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_interface eth0 eth <settings>
'''
iface = iface.lower()
iface_type = iface_type.lower()
if iface_type not in _IFACE_TYPES:
_raise_error_iface(iface, iface_type, _IFACE_TYPES)
if 'proto' not in settings:
settings['proto'] = 'static'
if iface_type == 'slave':
settings['slave'] = 'yes'
if 'master' not in settings:
msg = 'master is a required setting for slave interfaces'
log.error(msg)
raise AttributeError(msg)
elif iface_type == 'vlan':
settings['vlan'] = 'yes'
__salt__['pkg.install']('vlan')
elif iface_type == 'pppoe':
settings['pppoe'] = 'yes'
if not __salt__['pkg.version']('ppp'):
inst = __salt__['pkg.install']('ppp')
elif iface_type == 'bond':
if 'slaves' not in settings:
msg = 'slaves is a required setting for bond interfaces'
log.error(msg)
raise AttributeError(msg)
elif iface_type == 'bridge':
if 'ports' not in settings:
msg = (
'ports is a required setting for bridge interfaces on Debian '
'or Ubuntu based systems'
)
log.error(msg)
raise AttributeError(msg)
__salt__['pkg.install']('bridge-utils')
if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'pppoe']:
opts = _parse_settings_eth(settings, iface_type, enabled, iface)
if iface_type in ['source']:
opts = _parse_settings_source(settings, iface_type, enabled, iface)
if 'test' in settings and settings['test']:
return _read_temp_ifaces(iface, opts[iface])
ifcfg = _write_file_ifaces(iface, opts[iface], **settings)
if iface_type == 'pppoe':
_write_file_ppp_ifaces(iface, opts[iface])
# ensure lines in list end with newline, so difflib works
return [item + '\n' for item in ifcfg]
def build_routes(iface, **settings):
'''
Add route scripts for a network interface using up commands.
CLI Example:
.. code-block:: bash
salt '*' ip.build_routes eth0 <settings>
'''
iface = iface.lower()
opts = _parse_routes(iface, settings)
try:
template = JINJA.get_template('route_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template route_eth.jinja')
return ''
add_routecfg = template.render(route_type='add',
routes=opts['routes'],
iface=iface)
del_routecfg = template.render(route_type='del',
routes=opts['routes'],
iface=iface)
if 'test' in settings and settings['test']:
return _read_temp(add_routecfg + del_routecfg)
filename = _write_file_routes(iface,
add_routecfg,
_DEB_NETWORK_UP_DIR,
'route-{0}')
results = _read_file(filename)
filename = _write_file_routes(iface,
del_routecfg,
_DEB_NETWORK_DOWN_DIR,
'route-{0}')
results += _read_file(filename)
return results
def down(iface, iface_type):
'''
Shutdown a network interface
CLI Example:
.. code-block:: bash
salt '*' ip.down eth0 eth
'''
# Slave devices are controlled by the master.
# Source 'interfaces' aren't brought down.
if iface_type not in ['slave', 'source']:
return __salt__['cmd.run'](['ifdown', iface])
return None
def get_bond(iface):
'''
Return the content of a bond script
CLI Example:
.. code-block:: bash
salt '*' ip.get_bond bond0
'''
path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
return _read_file(path)
def get_interface(iface):
'''
Return the contents of an interface script
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0
'''
adapters = _parse_interfaces()
if iface in adapters:
try:
if iface == 'source':
template = JINJA.get_template('debian_source.jinja')
else:
template = JINJA.get_template('debian_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_eth.jinja')
return ''
ifcfg = template.render({'name': iface, 'data': adapters[iface]})
# ensure lines in list end with newline, so difflib works
return [item + '\n' for item in ifcfg.split('\n')]
else:
return []
def up(iface, iface_type): # pylint: disable=C0103
'''
Start up a network interface
CLI Example:
.. code-block:: bash
salt '*' ip.up eth0 eth
'''
# Slave devices are controlled by the master.
# Source 'interfaces' aren't brought up.
if iface_type not in ('slave', 'source'):
return __salt__['cmd.run'](['ifup', iface])
return None
def get_network_settings():
'''
Return the contents of the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_network_settings
'''
skip_etc_default_networking = (
__grains__['osfullname'] == 'Ubuntu' and
int(__grains__['osrelease'].split('.')[0]) >= 12)
if skip_etc_default_networking:
settings = {}
if __salt__['service.available']('networking'):
if __salt__['service.status']('networking'):
settings['networking'] = "yes"
else:
settings['networking'] = "no"
else:
settings['networking'] = "no"
hostname = _parse_hostname()
domainname = _parse_domainname()
settings['hostname'] = hostname
settings['domainname'] = domainname
else:
settings = _parse_current_network_settings()
try:
template = JINJA.get_template('display-network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template display-network.jinja')
return ''
network = template.render(settings)
return _read_temp(network)
def get_routes(iface):
'''
Return the routes for the interface
CLI Example:
.. code-block:: bash
salt '*' ip.get_routes eth0
'''
filename = os.path.join(_DEB_NETWORK_UP_DIR, 'route-{0}'.format(iface))
results = _read_file(filename)
filename = os.path.join(_DEB_NETWORK_DOWN_DIR, 'route-{0}'.format(iface))
results += _read_file(filename)
return results
def apply_network_settings(**settings):
'''
Apply global network configuration.
CLI Example:
.. code-block:: bash
salt '*' ip.apply_network_settings
'''
if 'require_reboot' not in settings:
settings['require_reboot'] = False
if 'apply_hostname' not in settings:
settings['apply_hostname'] = False
hostname_res = True
if settings['apply_hostname'] in _CONFIG_TRUE:
if 'hostname' in settings:
hostname_res = __salt__['network.mod_hostname'](settings['hostname'])
else:
log.warning(
'The network state sls is trying to apply hostname '
'changes but no hostname is defined.'
)
hostname_res = False
res = True
if settings['require_reboot'] in _CONFIG_TRUE:
log.warning(
'The network state sls is requiring a reboot of the system to '
'properly apply network configuration.'
)
res = True
else:
stop = __salt__['service.stop']('networking')
time.sleep(2)
res = stop and __salt__['service.start']('networking')
return hostname_res and res
def build_network_settings(**settings):
'''
Build the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.build_network_settings <settings>
'''
changes = []
# Read current configuration and store default values
current_network_settings = _parse_current_network_settings()
# Build settings
opts = _parse_network_settings(settings, current_network_settings)
# Ubuntu has moved away from /etc/default/networking
# beginning with the 12.04 release so we disable or enable
# the networking related services on boot
skip_etc_default_networking = (
__grains__['osfullname'] == 'Ubuntu' and
int(__grains__['osrelease'].split('.')[0]) >= 12)
if skip_etc_default_networking:
if opts['networking'] == 'yes':
service_cmd = 'service.enable'
else:
service_cmd = 'service.disable'
if __salt__['service.available']('NetworkManager'):
__salt__[service_cmd]('NetworkManager')
if __salt__['service.available']('networking'):
__salt__[service_cmd]('networking')
else:
try:
template = JINJA.get_template('network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template network.jinja')
return ''
network = template.render(opts)
if 'test' in settings and settings['test']:
return _read_temp(network)
# Write settings
_write_file_network(network, _DEB_NETWORKING_FILE, True)
# Write hostname to /etc/hostname
sline = opts['hostname'].split('.', 1)
opts['hostname'] = sline[0]
hostname = '{0}\n' . format(opts['hostname'])
current_domainname = current_network_settings['domainname']
current_searchdomain = current_network_settings['searchdomain']
# Only write the hostname if it has changed
if not opts['hostname'] == current_network_settings['hostname']:
if not ('test' in settings and settings['test']):
# TODO replace wiht a call to network.mod_hostname instead
_write_file_network(hostname, _DEB_HOSTNAME_FILE)
new_domain = False
if len(sline) > 1:
new_domainname = sline[1]
if new_domainname != current_domainname:
domainname = new_domainname
opts['domainname'] = new_domainname
new_domain = True
else:
domainname = current_domainname
opts['domainname'] = domainname
else:
domainname = current_domainname
opts['domainname'] = domainname
new_search = False
if 'search' in opts:
new_searchdomain = opts['search']
if new_searchdomain != current_searchdomain:
searchdomain = new_searchdomain
opts['searchdomain'] = new_searchdomain
new_search = True
else:
searchdomain = current_searchdomain
opts['searchdomain'] = searchdomain
else:
searchdomain = current_searchdomain
opts['searchdomain'] = searchdomain
# If the domain changes, then we should write the resolv.conf file.
if new_domain or new_search:
# Look for existing domain line and update if necessary
contents = _parse_resolve()
domain_prog = re.compile(r'domain\s+(?P<domain_name>\S+)')
search_prog = re.compile(r'search\s+(?P<search_domain>\S+)')
new_contents = []
found_domain = False
found_search = False
for item in contents:
domain_match = domain_prog.match(item)
search_match = search_prog.match(item)
if domain_match:
new_contents.append('domain {0}\n' . format(domainname))
found_domain = True
elif search_match:
new_contents.append('search {0}\n' . format(searchdomain))
found_search = True
else:
new_contents.append(item)
# A domain line didn't exist so we'll add one in
# with the new domainname
if not found_domain:
new_contents.insert(0, 'domain {0}\n' . format(domainname))
# A search line didn't exist so we'll add one in
# with the new search domain
if not found_search:
if new_contents[0].startswith('domain'):
new_contents.insert(1, 'search {0}\n' . format(searchdomain))
else:
new_contents.insert(0, 'search {0}\n' . format(searchdomain))
new_resolv = ''.join(new_contents)
# Write /etc/resolv.conf
if not ('test' in settings and settings['test']):
_write_file_network(new_resolv, _DEB_RESOLV_FILE)
# used for returning the results back
try:
template = JINJA.get_template('display-network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template display-network.jinja')
return ''
network = template.render(opts)
changes.extend(_read_temp(network))
return changes
| 32.62942 | 114 | 0.587917 | # -*- coding: utf-8 -*-
'''
The networking module for Debian based distros
References:
* http://www.debian.org/doc/manuals/debian-reference/ch05.en.html
'''
# Import python libs
from __future__ import absolute_import
import functools
import logging
import os.path
import os
import re
import time
# Import third party libs
import jinja2
import jinja2.exceptions
import salt.ext.six as six
from salt.ext.six.moves import StringIO # pylint: disable=import-error,no-name-in-module
# Import salt libs
import salt.utils
import salt.utils.templates
import salt.utils.validate.net
import salt.utils.odict
# Set up logging
log = logging.getLogger(__name__)
# Set up template environment
JINJA = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'debian_ip')
)
)
# Define the module's virtual name
__virtualname__ = 'ip'
def __virtual__():
'''
Confine this module to Debian based distros
'''
if __grains__['os_family'] == 'Debian':
return __virtualname__
return (False, 'The debian_ip module could not be loaded: '
'unsupported OS family')
_ETHTOOL_CONFIG_OPTS = {
'speed': 'link-speed',
'duplex': 'link-duplex',
'autoneg': 'ethernet-autoneg',
'ethernet-port': 'ethernet-port',
'wol': 'ethernet-wol',
'driver-message-level': 'driver-message-level',
'ethernet-pause-rx': 'ethernet-pause-rx',
'ethernet-pause-tx': 'ethernet-pause-tx',
'ethernet-pause-autoneg': 'ethernet-pause-autoneg',
'rx': 'offload-rx',
'tx': 'offload-tx',
'sg': 'offload-sg',
'tso': 'offload-tso',
'ufo': 'offload-ufo',
'gso': 'offload-gso',
'gro': 'offload-gro',
'lro': 'offload-lro',
'hardware-irq-coalesce-adaptive-rx': 'hardware-irq-coalesce-adaptive-rx',
'hardware-irq-coalesce-adaptive-tx': 'hardware-irq-coalesce-adaptive-tx',
'hardware-irq-coalesce-rx-usecs': 'hardware-irq-coalesce-rx-usecs',
'hardware-irq-coalesce-rx-frames': 'hardware-irq-coalesce-rx-frames',
'hardware-dma-ring-rx': 'hardware-dma-ring-rx',
'hardware-dma-ring-rx-mini': 'hardware-dma-ring-rx-mini',
'hardware-dma-ring-rx-jumbo': 'hardware-dma-ring-rx-jumbo',
'hardware-dma-ring-tx': 'hardware-dma-ring-tx',
}
_REV_ETHTOOL_CONFIG_OPTS = {
'link-speed': 'speed',
'link-duplex': 'duplex',
'ethernet-autoneg': 'autoneg',
'ethernet-port': 'ethernet-port',
'ethernet-wol': 'wol',
'driver-message-level': 'driver-message-level',
'ethernet-pause-rx': 'ethernet-pause-rx',
'ethernet-pause-tx': 'ethernet-pause-tx',
'ethernet-pause-autoneg': 'ethernet-pause-autoneg',
'offload-rx': 'rx',
'offload-tx': 'tx',
'offload-sg': 'sg',
'offload-tso': 'tso',
'offload-ufo': 'ufo',
'offload-gso': 'gso',
'offload-lro': 'lro',
'offload-gro': 'gro',
'hardware-irq-coalesce-adaptive-rx': 'hardware-irq-coalesce-adaptive-rx',
'hardware-irq-coalesce-adaptive-tx': 'hardware-irq-coalesce-adaptive-tx',
'hardware-irq-coalesce-rx-usecs': 'hardware-irq-coalesce-rx-usecs',
'hardware-irq-coalesce-rx-frames': 'hardware-irq-coalesce-rx-frames',
'hardware-dma-ring-rx': 'hardware-dma-ring-rx',
'hardware-dma-ring-rx-mini': 'hardware-dma-ring-rx-mini',
'hardware-dma-ring-rx-jumbo': 'hardware-dma-ring-rx-jumbo',
'hardware-dma-ring-tx': 'hardware-dma-ring-tx',
}
_DEB_CONFIG_PPPOE_OPTS = {
'user': 'user',
'password': 'password',
'provider': 'provider',
'pppoe_iface': 'pppoe_iface',
'noipdefault': 'noipdefault',
'usepeerdns': 'usepeerdns',
'defaultroute': 'defaultroute',
'holdoff': 'holdoff',
'maxfail': 'maxfail',
'hide-password': 'hide-password',
'lcp-echo-interval': 'lcp-echo-interval',
'lcp-echo-failure': 'lcp-echo-failure',
'connect': 'connect',
'noauth': 'noauth',
'persist': 'persist',
'mtu': 'mtu',
'noaccomp': 'noaccomp',
'linkname': 'linkname',
}
_DEB_ROUTES_FILE = '/etc/network/routes'
_DEB_NETWORK_FILE = '/etc/network/interfaces'
_DEB_NETWORK_DIR = '/etc/network/interfaces.d/'
_DEB_NETWORK_UP_DIR = '/etc/network/if-up.d/'
_DEB_NETWORK_DOWN_DIR = '/etc/network/if-down.d/'
_DEB_NETWORK_CONF_FILES = '/etc/modprobe.d/'
_DEB_NETWORKING_FILE = '/etc/default/networking'
_DEB_HOSTNAME_FILE = '/etc/hostname'
_DEB_RESOLV_FILE = '/etc/resolv.conf'
_DEB_PPP_DIR = '/etc/ppp/peers/'
_CONFIG_TRUE = ['yes', 'on', 'true', '1', True]
_CONFIG_FALSE = ['no', 'off', 'false', '0', False]
_IFACE_TYPES = [
'eth', 'bond', 'alias', 'clone',
'ipsec', 'dialup', 'bridge', 'slave',
'vlan', 'pppoe', 'source',
]
def _error_msg_iface(iface, option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]'
return msg.format(iface, option, '|'.join(expected))
def _error_msg_routes(iface, option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]'
return msg.format(iface, option, expected)
def _log_default_iface(iface, opt, value):
msg = 'Using default option -- Interface: {0} Option: {1} Value: {2}'
log.info(msg.format(iface, opt, value))
def _error_msg_network(option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]'
return msg.format(option, '|'.join(expected))
def _log_default_network(opt, value):
msg = 'Using existing setting -- Setting: {0} Value: {1}'
log.info(msg.format(opt, value))
def _raise_error_iface(iface, option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_iface(iface, option, expected)
log.error(msg)
raise AttributeError(msg)
def _raise_error_network(option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_network(option, expected)
log.error(msg)
raise AttributeError(msg)
def _raise_error_routes(iface, option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_routes(iface, option, expected)
log.error(msg)
raise AttributeError(msg)
def _read_file(path):
'''
Reads and returns the contents of a text file
'''
try:
with salt.utils.flopen(path, 'rb') as contents:
return [salt.utils.to_str(line) for line in contents.readlines()]
except (OSError, IOError):
return ''
def _parse_resolve():
'''
Parse /etc/resolv.conf and return domainname
'''
contents = _read_file(_DEB_RESOLV_FILE)
return contents
def _parse_domainname():
'''
Parse /etc/resolv.conf and return domainname
'''
contents = _read_file(_DEB_RESOLV_FILE)
pattern = r'domain\s+(?P<domain_name>\S+)'
prog = re.compile(pattern)
for item in contents:
match = prog.match(item)
if match:
return match.group('domain_name')
return ''
def _parse_searchdomain():
'''
Parse /etc/resolv.conf and return searchdomain
'''
contents = _read_file(_DEB_RESOLV_FILE)
pattern = r'search\s+(?P<search_domain>\S+)'
prog = re.compile(pattern)
for item in contents:
match = prog.match(item)
if match:
return match.group('search_domain')
return ''
def _parse_hostname():
'''
Parse /etc/hostname and return hostname
'''
contents = _read_file(_DEB_HOSTNAME_FILE)
if contents:
return contents[0].split('\n')[0]
else:
return ''
def _parse_current_network_settings():
'''
Parse /etc/default/networking and return current configuration
'''
opts = salt.utils.odict.OrderedDict()
opts['networking'] = ''
if os.path.isfile(_DEB_NETWORKING_FILE):
with salt.utils.fopen(_DEB_NETWORKING_FILE) as contents:
for line in contents:
if line.startswith('#'):
continue
elif line.startswith('CONFIGURE_INTERFACES'):
opts['networking'] = line.split('=', 1)[1].strip()
hostname = _parse_hostname()
domainname = _parse_domainname()
searchdomain = _parse_searchdomain()
opts['hostname'] = hostname
opts['domainname'] = domainname
opts['searchdomain'] = searchdomain
return opts
# def __validator_func(value):
# return (valid: True/False, (transformed) value, error message)
def __ipv4_quad(value):
'''validate an IPv4 address'''
return (salt.utils.validate.net.ipv4_addr(value), value,
'dotted IPv4 address')
def __ipv6(value):
'''validate an IPv6 address'''
return (salt.utils.validate.net.ipv6_addr(value), value,
'IPv6 address')
def __mac(value):
'''validate a mac address'''
return (salt.utils.validate.net.mac(value), value,
'MAC address')
def __anything(value):
return (True, value, None)
def __int(value):
'''validate an integer'''
valid, _value = False, value
try:
_value = int(value)
valid = True
except ValueError:
pass
return (valid, _value, 'integer')
def __float(value):
'''validate a float'''
valid, _value = False, value
try:
_value = float(value)
valid = True
except ValueError:
pass
return (valid, _value, 'float')
def __ipv4_netmask(value):
'''validate an IPv4 dotted quad or integer CIDR netmask'''
valid, errmsg = False, 'dotted quad or integer CIDR (0->32)'
valid, value, _ = __int(value)
if not (valid and 0 <= value <= 32):
valid = salt.utils.validate.net.netmask(value)
return (valid, value, errmsg)
def __ipv6_netmask(value):
'''validate an IPv6 integer netmask'''
valid, errmsg = False, 'IPv6 netmask (0->128)'
valid, value, _ = __int(value)
valid = (valid and 0 <= value <= 128)
return (valid, value, errmsg)
def __within2(value, within=None, errmsg=None, dtype=None):
'''validate that a value is in ``within`` and optionally a ``dtype``'''
valid, _value = False, value
if dtype:
try:
_value = dtype(value) # TODO: this is a bit loose when dtype is a class
valid = _value in within
except ValueError:
pass
else:
valid = _value in within
if errmsg is None:
if dtype:
typename = getattr(dtype, '__name__',
hasattr(dtype, '__class__')
and getattr(dtype.__class__, 'name', dtype))
errmsg = '{0} within \'{1}\''.format(typename, within)
else:
errmsg = 'within \'{0}\''.format(within)
return (valid, _value, errmsg)
def __within(within=None, errmsg=None, dtype=None):
return functools.partial(__within2, within=within,
errmsg=errmsg, dtype=dtype)
def __space_delimited_list(value):
'''validate that a value contains one or more space-delimited values'''
valid, _value, errmsg = False, value, 'space-delimited string'
try:
if hasattr(value, '__iter__'):
valid = True # TODO:
else:
_value = value.split()
if _value == []:
raise ValueError
valid = True
except AttributeError:
pass
except ValueError:
pass
return (valid, _value, errmsg)
SALT_ATTR_TO_DEBIAN_ATTR_MAP = {
'dns': 'dns-nameservers',
'search': 'dns-search',
'hwaddr': 'hwaddress', # TODO: this limits bootp functionality
'ipaddr': 'address',
}
DEBIAN_ATTR_TO_SALT_ATTR_MAP = dict(
(v, k) for (k, v) in six.iteritems(SALT_ATTR_TO_DEBIAN_ATTR_MAP))
# TODO
DEBIAN_ATTR_TO_SALT_ATTR_MAP['address'] = 'address'
DEBIAN_ATTR_TO_SALT_ATTR_MAP['hwaddress'] = 'hwaddress'
IPV4_VALID_PROTO = ['bootp', 'dhcp', 'static', 'manual', 'loopback', 'ppp']
IPV4_ATTR_MAP = {
'proto': __within(IPV4_VALID_PROTO, dtype=str),
# ipv4 static & manual
'address': __ipv4_quad,
'netmask': __ipv4_netmask,
'broadcast': __ipv4_quad,
'metric': __int,
'gateway': __ipv4_quad, # supports a colon-delimited list
'pointopoint': __ipv4_quad,
'hwaddress': __mac,
'mtu': __int,
'scope': __within(['global', 'link', 'host'], dtype=str),
# dhcp
'hostname': __anything,
'leasehours': __int,
'leasetime': __int,
'vendor': __anything,
'client': __anything,
# bootp
'bootfile': __anything,
'server': __ipv4_quad,
'hwaddr': __mac,
# tunnel
'mode': __within(['gre', 'GRE', 'ipip', 'IPIP', '802.3ad'], dtype=str),
'endpoint': __ipv4_quad,
'dstaddr': __ipv4_quad,
'local': __ipv4_quad,
'ttl': __int,
# bond
'slaves': __anything,
# ppp
'provider': __anything,
'unit': __int,
'options': __anything,
# resolvconf
'dns-nameservers': __space_delimited_list,
'dns-search': __space_delimited_list,
#
'vlan-raw-device': __anything,
#
'network': __anything, # i don't know what this is
'test': __anything, # TODO
'enable_ipv6': __anything, # TODO
}
IPV6_VALID_PROTO = ['auto', 'loopback', 'static', 'manual',
'dhcp', 'v4tunnel', '6to4']
IPV6_ATTR_MAP = {
'proto': __within(IPV6_VALID_PROTO),
# ipv6 static & manual
'address': __ipv6,
'netmask': __ipv6_netmask,
'broadcast': __ipv6,
'gateway': __ipv6, # supports a colon-delimited list
'hwaddress': __mac,
'mtu': __int,
'scope': __within(['global', 'site', 'link', 'host'], dtype=str),
# inet6 auto
'privext': __within([0, 1, 2], dtype=int),
'dhcp': __within([0, 1], dtype=int),
# inet6 static & manual & dhcp
'media': __anything,
'accept_ra': __within([0, 1], dtype=int),
'autoconf': __within([0, 1], dtype=int),
'preferred-lifetime': __int,
'dad-attempts': __int, # 0 to disable
'dad-interval': __float,
# bond
'slaves': __anything,
# tunnel
'mode': __within(['gre', 'GRE', 'ipip', 'IPIP', '802.3ad'], dtype=str),
'endpoint': __ipv4_quad,
'local': __ipv4_quad,
'ttl': __int,
# resolvconf
'dns-nameservers': __space_delimited_list,
'dns-search': __space_delimited_list,
#
'vlan-raw-device': __anything,
'test': __anything, # TODO
'enable_ipv6': __anything, # TODO
}
WIRELESS_ATTR_MAP = {
'wireless-essid': __anything,
'wireless-mode': __anything, # TODO
'wpa-ap-scan': __within([0, 1, 2], dtype=int), # TODO
'wpa-conf': __anything,
'wpa-driver': __anything,
'wpa-group': __anything,
'wpa-key-mgmt': __anything,
'wpa-pairwise': __anything,
'wpa-psk': __anything,
'wpa-proto': __anything, # partial(__within,
'wpa-roam': __anything,
'wpa-ssid': __anything, # TODO
}
ATTRMAPS = {
'inet': [IPV4_ATTR_MAP, WIRELESS_ATTR_MAP],
'inet6': [IPV6_ATTR_MAP, WIRELESS_ATTR_MAP]
}
def _validate_interface_option(attr, value, addrfam='inet'):
'''lookup the validation function for a [addrfam][attr] and
return the results
:param attr: attribute name
:param value: raw setting value
:param addrfam: address family (inet, inet6,
'''
valid, _value, errmsg = False, value, 'Unknown validator'
attrmaps = ATTRMAPS.get(addrfam, [])
for attrmap in attrmaps:
if attr in attrmap:
validate_func = attrmap[attr]
(valid, _value, errmsg) = validate_func(value)
break
return (valid, _value, errmsg)
def _attrmaps_contain_attr(attr):
return (
attr in WIRELESS_ATTR_MAP or
attr in IPV4_ATTR_MAP or
attr in IPV6_ATTR_MAP)
def _parse_interfaces(interface_files=None):
'''
Parse /etc/network/interfaces and return current configured interfaces
'''
if interface_files is None:
interface_files = []
# Add this later.
if os.path.exists(_DEB_NETWORK_DIR):
interface_files += ['{0}/{1}'.format(_DEB_NETWORK_DIR, dir) for dir in os.listdir(_DEB_NETWORK_DIR)]
if os.path.isfile(_DEB_NETWORK_FILE):
interface_files.insert(0, _DEB_NETWORK_FILE)
adapters = salt.utils.odict.OrderedDict()
method = -1
for interface_file in interface_files:
with salt.utils.fopen(interface_file) as interfaces:
# This ensures iface_dict exists, but does not ensure we're not reading a new interface.
iface_dict = {}
for line in interfaces:
# Identify the clauses by the first word of each line.
# Go to the next line if the current line is a comment
# or all spaces.
if line.lstrip().startswith('#') or line.isspace():
continue
# Parse the iface clause
if line.startswith('iface'):
sline = line.split()
if len(sline) != 4:
msg = 'Interface file malformed: {0}.'
msg = msg.format(sline)
log.error(msg)
raise AttributeError(msg)
iface_name = sline[1]
addrfam = sline[2]
method = sline[3]
# Create item in dict, if not already there
if iface_name not in adapters:
adapters[iface_name] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if 'data' not in adapters[iface_name]:
adapters[iface_name]['data'] = salt.utils.odict.OrderedDict()
if addrfam not in adapters[iface_name]['data']:
adapters[iface_name]['data'][addrfam] = salt.utils.odict.OrderedDict()
iface_dict = adapters[iface_name]['data'][addrfam]
iface_dict['addrfam'] = addrfam
iface_dict['proto'] = method
iface_dict['filename'] = interface_file
# Parse the detail clauses.
elif line[0].isspace():
sline = line.split()
# conf file attr: dns-nameservers
# salt states.network attr: dns
attr, valuestr = line.rstrip().split(None, 1)
if _attrmaps_contain_attr(attr):
if '-' in attr:
attrname = attr.replace('-', '_')
else:
attrname = attr
(valid, value, errmsg) = _validate_interface_option(
attr, valuestr, addrfam)
iface_dict[attrname] = value
elif attr in _REV_ETHTOOL_CONFIG_OPTS:
if 'ethtool' not in iface_dict:
iface_dict['ethtool'] = salt.utils.odict.OrderedDict()
iface_dict['ethtool'][attr] = valuestr
elif attr.startswith('bond'):
opt = re.split(r'[_-]', attr, maxsplit=1)[1]
if 'bonding' not in iface_dict:
iface_dict['bonding'] = salt.utils.odict.OrderedDict()
iface_dict['bonding'][opt] = valuestr
elif attr.startswith('bridge'):
opt = re.split(r'[_-]', attr, maxsplit=1)[1]
if 'bridging' not in iface_dict:
iface_dict['bridging'] = salt.utils.odict.OrderedDict()
iface_dict['bridging'][opt] = valuestr
elif attr in ['up', 'pre-up', 'post-up',
'down', 'pre-down', 'post-down']:
cmd = valuestr
cmd_key = '{0}_cmds'.format(re.sub('-', '_', attr))
if cmd_key not in iface_dict:
iface_dict[cmd_key] = []
iface_dict[cmd_key].append(cmd)
elif line.startswith('auto'):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]['enabled'] = True
elif line.startswith('allow-hotplug'):
for word in line.split()[1:]:
if word not in adapters:
adapters[word] = salt.utils.odict.OrderedDict()
adapters[word]['hotplug'] = True
elif line.startswith('source'):
if 'source' not in adapters:
adapters['source'] = salt.utils.odict.OrderedDict()
# Create item in dict, if not already there
if 'data' not in adapters['source']:
adapters['source']['data'] = salt.utils.odict.OrderedDict()
adapters['source']['data']['sources'] = []
adapters['source']['data']['sources'].append(line.split()[1])
# Return a sorted list of the keys for bond, bridge and ethtool options to
# ensure a consistent order
for iface_name in adapters:
if iface_name == 'source':
continue
if 'data' not in adapters[iface_name]:
msg = 'Interface file malformed for interface: {0}.'.format(iface_name)
log.error(msg)
adapters.pop(iface_name)
continue
for opt in ['ethtool', 'bonding', 'bridging']:
if 'inet' in adapters[iface_name]['data']:
if opt in adapters[iface_name]['data']['inet']:
opt_keys = sorted(adapters[iface_name]['data']['inet'][opt].keys())
adapters[iface_name]['data']['inet'][opt + '_keys'] = opt_keys
return adapters
def _parse_ethtool_opts(opts, iface):
'''
Filters given options and outputs valid settings for ETHTOOLS_OPTS
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
config = {}
if 'autoneg' in opts:
if opts['autoneg'] in _CONFIG_TRUE:
config.update({'autoneg': 'on'})
elif opts['autoneg'] in _CONFIG_FALSE:
config.update({'autoneg': 'off'})
else:
_raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE)
if 'duplex' in opts:
valid = ['full', 'half']
if opts['duplex'] in valid:
config.update({'duplex': opts['duplex']})
else:
_raise_error_iface(iface, 'duplex', valid)
if 'speed' in opts:
valid = ['10', '100', '1000', '10000']
if str(opts['speed']) in valid:
config.update({'speed': opts['speed']})
else:
_raise_error_iface(iface, opts['speed'], valid)
valid = _CONFIG_TRUE + _CONFIG_FALSE
for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'):
if option in opts:
if opts[option] in _CONFIG_TRUE:
config.update({option: 'on'})
elif opts[option] in _CONFIG_FALSE:
config.update({option: 'off'})
else:
_raise_error_iface(iface, option, valid)
return config
def _parse_ethtool_pppoe_opts(opts, iface):
'''
Filters given options and outputs valid settings for ETHTOOLS_PPPOE_OPTS
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
config = {}
for opt in _DEB_CONFIG_PPPOE_OPTS:
if opt in opts:
config[opt] = opts[opt]
if 'provider' in opts and not opts['provider']:
_raise_error_iface(iface, 'provider', _CONFIG_TRUE + _CONFIG_FALSE)
valid = _CONFIG_TRUE + _CONFIG_FALSE
for option in ('noipdefault', 'usepeerdns', 'defaultroute', 'hide-password', 'noauth', 'persist', 'noaccomp'):
if option in opts:
if opts[option] in _CONFIG_TRUE:
config.update({option: 'True'})
elif opts[option] in _CONFIG_FALSE:
config.update({option: 'False'})
else:
_raise_error_iface(iface, option, valid)
return config
def _parse_settings_bond(opts, iface):
'''
Filters given options and outputs valid settings for requested
operation. If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond_def = {
# 803.ad aggregation selection logic
# 0 for stable (default)
# 1 for bandwidth
# 2 for count
'ad_select': '0',
# Max number of transmit queues (default = 16)
'tx_queues': '16',
# Link monitoring in milliseconds. Most NICs support this
'miimon': '100',
# ARP interval in milliseconds
'arp_interval': '250',
# Delay before considering link down in milliseconds (miimon * 2)
'downdelay': '200',
# lacp_rate 0: Slow - every 30 seconds
# lacp_rate 1: Fast - every 1 second
'lacp_rate': '0',
# Max bonds for this driver
'max_bonds': '1',
# Specifies the time, in milliseconds, to wait before
# enabling a slave after a link recovery has been
# detected. Only used with miimon.
'updelay': '0',
# Used with miimon.
# On: driver sends mii
# Off: ethtool sends mii
'use_carrier': 'on',
# Default. Don't change unless you know what you are doing.
'xmit_hash_policy': 'layer2',
}
if opts['mode'] in ['balance-rr', '0']:
log.info(
'Device: {0} Bonding Mode: load balancing (round-robin)'.format(
iface
)
)
return _parse_settings_bond_0(opts, iface, bond_def)
elif opts['mode'] in ['active-backup', '1']:
log.info(
'Device: {0} Bonding Mode: fault-tolerance (active-backup)'.format(
iface
)
)
return _parse_settings_bond_1(opts, iface, bond_def)
elif opts['mode'] in ['balance-xor', '2']:
log.info(
'Device: {0} Bonding Mode: load balancing (xor)'.format(iface)
)
return _parse_settings_bond_2(opts, iface, bond_def)
elif opts['mode'] in ['broadcast', '3']:
log.info(
'Device: {0} Bonding Mode: fault-tolerance (broadcast)'.format(
iface
)
)
return _parse_settings_bond_3(opts, iface, bond_def)
elif opts['mode'] in ['802.3ad', '4']:
log.info(
'Device: {0} Bonding Mode: IEEE 802.3ad Dynamic link '
'aggregation'.format(iface)
)
return _parse_settings_bond_4(opts, iface, bond_def)
elif opts['mode'] in ['balance-tlb', '5']:
log.info(
'Device: {0} Bonding Mode: transmit load balancing'.format(iface)
)
return _parse_settings_bond_5(opts, iface, bond_def)
elif opts['mode'] in ['balance-alb', '6']:
log.info(
'Device: {0} Bonding Mode: adaptive load balancing'.format(iface)
)
return _parse_settings_bond_6(opts, iface, bond_def)
else:
valid = [
'0', '1', '2', '3', '4', '5', '6',
'balance-rr', 'active-backup', 'balance-xor',
'broadcast', '802.3ad', 'balance-tlb', 'balance-alb'
]
_raise_error_iface(iface, 'mode', valid)
def _parse_settings_bond_0(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond0.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '0'}
# ARP targets in n.n.n.n form
valid = ['list of ips (up to 16)']
if 'arp_ip_target' in opts:
if isinstance(opts['arp_ip_target'], list):
if 1 <= len(opts['arp_ip_target']) <= 16:
bond.update({'arp_ip_target': ''})
for ip in opts['arp_ip_target']: # pylint: disable=C0103
if len(bond['arp_ip_target']) > 0:
bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip
else:
bond['arp_ip_target'] = ip
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
if 'arp_interval' in opts:
try:
int(opts['arp_interval'])
bond.update({'arp_interval': opts['arp_interval']})
except ValueError:
_raise_error_iface(iface, 'arp_interval', ['integer'])
else:
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
return bond
def _parse_settings_bond_1(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '1'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
if not (__grains__['os'] == "Ubuntu" and __grains__['osrelease_info'][0] >= 16):
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_bond_2(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond2.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '2'}
valid = ['list of ips (up to 16)']
if 'arp_ip_target' in opts:
if isinstance(opts['arp_ip_target'], list):
if 1 <= len(opts['arp_ip_target']) <= 16:
bond.update({'arp_ip_target': ''})
for ip in opts['arp_ip_target']: # pylint: disable=C0103
if len(bond['arp_ip_target']) > 0:
bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip
else:
bond['arp_ip_target'] = ip
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
if 'arp_interval' in opts:
try:
int(opts['arp_interval'])
bond.update({'arp_interval': opts['arp_interval']})
except ValueError:
_raise_error_iface(iface, 'arp_interval', ['integer'])
else:
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
bond.update({'xmit_hash_policy': opts['hashing-algorithm']})
else:
_raise_error_iface(iface, 'hashing-algorithm', valid)
return bond
def _parse_settings_bond_3(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond3.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '3'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_bond_4(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond4.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '4'}
for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']:
if binding in opts:
if binding == 'lacp_rate':
if opts[binding] == 'fast':
opts.update({binding: '1'})
if opts[binding] == 'slow':
opts.update({binding: '0'})
valid = ['fast', '1', 'slow', '0']
else:
valid = ['integer']
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, valid)
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
bond.update({'xmit_hash_policy': opts['hashing-algorithm']})
else:
_raise_error_iface(iface, 'hashing-algorithm', valid)
return bond
def _parse_settings_bond_5(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond5.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '5'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond
def _parse_settings_bond_6(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond6.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '6'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except ValueError:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
return bond
def _parse_bridge_opts(opts, iface):
'''
Filters given options and outputs valid settings for BRIDGING_OPTS
If an option has a value that is not expected, this
function will log the Interface, Setting and what was expected.
'''
config = {}
if 'ports' in opts:
if isinstance(opts['ports'], list):
opts['ports'] = ' '.join(opts['ports'])
config.update({'ports': opts['ports']})
for opt in ['ageing', 'fd', 'gcint', 'hello', 'maxage']:
if opt in opts:
try:
float(opts[opt])
config.update({opt: opts[opt]})
except ValueError:
_raise_error_iface(iface, opt, ['float'])
for opt in ['bridgeprio', 'maxwait']:
if opt in opts:
if isinstance(opts[opt], int):
config.update({opt: opts[opt]})
else:
_raise_error_iface(iface, opt, ['integer'])
if 'hw' in opts:
# match 12 hex digits with either : or - as separators between pairs
if re.match('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$',
opts['hw'].lower()):
config.update({'hw': opts['hw']})
else:
_raise_error_iface(iface, 'hw', ['valid MAC address'])
for opt in ['pathcost', 'portprio']:
if opt in opts:
try:
port, cost_or_prio = opts[opt].split()
int(cost_or_prio)
config.update({opt: '{0} {1}'.format(port, cost_or_prio)})
except ValueError:
_raise_error_iface(iface, opt, ['interface integer'])
if 'stp' in opts:
if opts['stp'] in _CONFIG_TRUE:
config.update({'stp': 'on'})
elif opts['stp'] in _CONFIG_FALSE:
config.update({'stp': 'off'})
else:
_raise_error_iface(iface, 'stp', _CONFIG_TRUE + _CONFIG_FALSE)
if 'waitport' in opts:
if isinstance(opts['waitport'], int):
config.update({'waitport': opts['waitport']})
else:
values = opts['waitport'].split()
waitport_time = values.pop(0)
if waitport_time.isdigit() and values:
config.update({
'waitport': '{0} {1}'.format(
waitport_time, ' '.join(values)
)
})
else:
_raise_error_iface(iface, opt, ['integer [interfaces]'])
return config
def _parse_settings_eth(opts, iface_type, enabled, iface):
'''
Filters given options and outputs valid settings for a
network interface.
'''
adapters = salt.utils.odict.OrderedDict()
adapters[iface] = salt.utils.odict.OrderedDict()
adapters[iface]['type'] = iface_type
adapters[iface]['data'] = salt.utils.odict.OrderedDict()
iface_data = adapters[iface]['data']
iface_data['inet'] = salt.utils.odict.OrderedDict()
iface_data['inet6'] = salt.utils.odict.OrderedDict()
if enabled:
adapters[iface]['enabled'] = True
if opts.get('hotplug', False):
adapters[iface]['hotplug'] = True
# Defaults assume IPv4 (inet) interfaces unless enable_ipv6=True
def_addrfam = 'inet'
dual_stack = False
# If enable_ipv6=True, then expet either IPv6-only or dual stack.
if 'enable_ipv6' in opts and opts['enable_ipv6']:
iface_data['inet6']['addrfam'] = 'inet6'
iface_data['inet6']['netmask'] = '64' # defaults to 64
def_addrfam = 'inet6'
if 'iface_type' in opts and opts['iface_type'] == 'vlan':
iface_data['inet6']['vlan_raw_device'] = (
re.sub(r'\.\d*', '', iface))
if 'ipaddr' in opts and 'ipv6ipaddr' in opts:
# If both 'ipaddr' and 'ipv6ipaddr' are present; expect dual stack
iface_data['inet']['addrfam'] = 'inet'
def_addrfam = 'inet'
dual_stack = True
else:
# If enable_ipv6=False|None, IPv6 settings should not be set.
iface_data['inet']['addrfam'] = 'inet'
if iface_type not in ['bridge']:
tmp_ethtool = _parse_ethtool_opts(opts, iface)
if tmp_ethtool:
ethtool = {}
for item in tmp_ethtool:
ethtool[_ETHTOOL_CONFIG_OPTS[item]] = tmp_ethtool[item]
iface_data[def_addrfam]['ethtool'] = ethtool
# return a list of sorted keys to ensure consistent order
iface_data[def_addrfam]['ethtool_keys'] = sorted(ethtool)
if iface_type == 'bridge':
bridging = _parse_bridge_opts(opts, iface)
if bridging:
opts.pop('mode', None)
iface_data[def_addrfam]['bridging'] = bridging
iface_data[def_addrfam]['bridging_keys'] = sorted(bridging)
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'bond':
bonding = _parse_settings_bond(opts, iface)
if bonding:
opts.pop('mode', None)
iface_data[def_addrfam]['bonding'] = bonding
iface_data[def_addrfam]['bonding']['slaves'] = opts['slaves']
iface_data[def_addrfam]['bonding_keys'] = sorted(bonding)
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'slave':
adapters[iface]['master'] = opts['master']
opts['proto'] = 'manual'
iface_data[def_addrfam]['master'] = adapters[iface]['master']
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'vlan':
iface_data[def_addrfam]['vlan_raw_device'] = re.sub(r'\.\d*', '', iface)
iface_data[def_addrfam]['addrfam'] = def_addrfam
elif iface_type == 'pppoe':
tmp_ethtool = _parse_ethtool_pppoe_opts(opts, iface)
if tmp_ethtool:
for item in tmp_ethtool:
adapters[iface]['data'][def_addrfam][_DEB_CONFIG_PPPOE_OPTS[item]] = tmp_ethtool[item]
iface_data[def_addrfam]['addrfam'] = def_addrfam
for opt in opts:
# trim leading "ipv6" from option
if opt.startswith('ipv6'):
optname = opt[4:] # trim off the ipv6
v6only = True
else:
optname = opt
v6only = False
_optname = SALT_ATTR_TO_DEBIAN_ATTR_MAP.get(optname, optname)
if _attrmaps_contain_attr(_optname):
valuestr = opts[opt]
# default to 'static' if proto is 'none'
if optname == 'proto' and valuestr == 'none':
valuestr = 'static'
# If option is v6-only, don't validate against inet and always set value
if v6only:
(valid, value, errmsg) = _validate_interface_option(
_optname, valuestr, addrfam='inet6')
if not valid:
_raise_error_iface(iface, '\'{0}\' \'{1}\''.format(opt, valuestr), [errmsg])
# replace dashes with underscores for jinja
_optname = _optname.replace('-', '_')
iface_data['inet6'][_optname] = value
# Else, if it's a dual stack, the option may belong in both; apply v4 opt as v6 default
elif dual_stack:
valid_once = False
errmsg = None
for addrfam in ['inet', 'inet6']:
(valid, value, errmsg) = _validate_interface_option(
_optname, valuestr, addrfam=addrfam)
if valid:
valid_once = True
# replace dashes with underscores for jinja
_optname = _optname.replace('-', '_')
# if a v6-only version of this option was set; don't override
# otherwise, if dual stack, use the v4 version as a default value for v6
# allows overriding with =None
if addrfam == 'inet' or _optname not in iface_data['inet6']:
iface_data[addrfam][_optname] = value
if not valid_once:
_raise_error_iface(
iface,
'\'{0}\' \'{1}\''.format(opt, valuestr),
[errmsg]
)
# Else, it goes in the default(only) addrfam
# Not assuming v4 allows a v6 block to be created without lots of "ipv6" prefixes
else:
(valid, value, errmsg) = _validate_interface_option(
_optname, valuestr, addrfam=def_addrfam)
if not valid:
_raise_error_iface(
iface,
'\'{0}\' \'{1}\''.format(opt, valuestr),
[errmsg]
)
# replace dashes with underscores for jinja
_optname = _optname.replace('-', '_')
iface_data[def_addrfam][_optname] = value
for opt in ['up_cmds', 'pre_up_cmds', 'post_up_cmds',
'down_cmds', 'pre_down_cmds', 'post_down_cmds']:
if opt in opts:
iface_data['inet'][opt] = opts[opt]
for addrfam in ['inet', 'inet6']:
if 'addrfam' in iface_data[addrfam] and iface_data[addrfam]['addrfam'] == addrfam:
pass
else:
iface_data.pop(addrfam)
return adapters
def _parse_settings_source(opts, iface_type, enabled, iface):
'''
Filters given options and outputs valid settings for a
network interface.
'''
adapters = salt.utils.odict.OrderedDict()
adapters[iface] = salt.utils.odict.OrderedDict()
adapters[iface]['type'] = iface_type
adapters[iface]['data'] = salt.utils.odict.OrderedDict()
iface_data = adapters[iface]['data']
iface_data['sources'] = [opts['source']]
return adapters
def _parse_network_settings(opts, current):
'''
Filters given options and outputs valid settings for
the global network settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
current = dict((k.lower(), v) for (k, v) in six.iteritems(current))
result = {}
valid = _CONFIG_TRUE + _CONFIG_FALSE
if 'enabled' not in opts:
try:
opts['networking'] = current['networking']
_log_default_network('networking', current['networking'])
except ValueError:
_raise_error_network('networking', valid)
else:
opts['networking'] = opts['enabled']
if opts['networking'] in valid:
if opts['networking'] in _CONFIG_TRUE:
result['networking'] = 'yes'
elif opts['networking'] in _CONFIG_FALSE:
result['networking'] = 'no'
else:
_raise_error_network('networking', valid)
if 'hostname' not in opts:
try:
opts['hostname'] = current['hostname']
_log_default_network('hostname', current['hostname'])
except ValueError:
_raise_error_network('hostname', ['server1.example.com'])
if opts['hostname']:
result['hostname'] = opts['hostname']
else:
_raise_error_network('hostname', ['server1.example.com'])
if 'search' in opts:
result['search'] = opts['search']
return result
def _parse_routes(iface, opts):
'''
Filters given options and outputs valid settings for
the route settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
result = {}
if 'routes' not in opts:
_raise_error_routes(iface, 'routes', 'List of routes')
for opt in opts:
result[opt] = opts[opt]
return result
def _write_file(iface, data, folder, pattern):
'''
Writes a file to disk
'''
filename = os.path.join(folder, pattern.format(iface))
if not os.path.exists(folder):
msg = '{0} cannot be written. {1} does not exist'
msg = msg.format(filename, folder)
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
fout.write(data)
return filename
def _write_file_routes(iface, data, folder, pattern):
'''
Writes a file to disk
'''
filename = os.path.join(folder, pattern.format(iface))
if not os.path.exists(folder):
msg = '{0} cannot be written. {1} does not exist'
msg = msg.format(filename, folder)
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
fout.write(data)
__salt__['file.set_mode'](filename, '0755')
return filename
def _write_file_network(data, filename, create=False):
'''
Writes a file to disk
If file does not exist, only create if create
argument is True
'''
if not os.path.exists(filename) and not create:
msg = '{0} cannot be written. {0} does not exist\
and create is set to False'
msg = msg.format(filename)
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
fout.write(data)
def _read_temp(data):
'''
Return what would be written to disk
'''
tout = StringIO()
tout.write(data)
tout.seek(0)
output = tout.readlines()
tout.close()
return output
def _read_temp_ifaces(iface, data):
'''
Return what would be written to disk for interfaces
'''
try:
template = JINJA.get_template('debian_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_eth.jinja')
return ''
ifcfg = template.render({'name': iface, 'data': data})
# Return as a array so the difflib works
return [item + '\n' for item in ifcfg.split('\n')]
def _write_file_ifaces(iface, data, **settings):
'''
Writes a file to disk
'''
try:
eth_template = JINJA.get_template('debian_eth.jinja')
source_template = JINJA.get_template('debian_source.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_eth.jinja')
return ''
# Read /etc/network/interfaces into a dict
adapters = _parse_interfaces()
# Apply supplied settings over on-disk settings
adapters[iface] = data
ifcfg = ''
for adapter in adapters:
if 'type' in adapters[adapter] and adapters[adapter]['type'] == 'source':
tmp = source_template.render({'name': adapter, 'data': adapters[adapter]})
else:
tmp = eth_template.render({'name': adapter, 'data': adapters[adapter]})
ifcfg = ifcfg + tmp
if adapter == iface:
saved_ifcfg = tmp
_SEPERATE_FILE = False
if 'filename' in settings:
if not settings['filename'].startswith('/'):
filename = '{0}/{1}'.format(_DEB_NETWORK_DIR, settings['filename'])
else:
filename = settings['filename']
_SEPERATE_FILE = True
else:
if 'filename' in adapters[adapter]['data']:
filename = adapters[adapter]['data']
else:
filename = _DEB_NETWORK_FILE
if not os.path.exists(os.path.dirname(filename)):
msg = '{0} cannot be written.'
msg = msg.format(os.path.dirname(filename))
log.error(msg)
raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout:
if _SEPERATE_FILE:
fout.write(saved_ifcfg)
else:
fout.write(ifcfg)
# Return as a array so the difflib works
return saved_ifcfg.split('\n')
def _write_file_ppp_ifaces(iface, data):
'''
Writes a file to disk
'''
try:
template = JINJA.get_template('debian_ppp_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_ppp_eth.jinja')
return ''
adapters = _parse_interfaces()
adapters[iface] = data
ifcfg = ''
tmp = template.render({'data': adapters[iface]})
ifcfg = tmp + ifcfg
filename = _DEB_PPP_DIR + '/' + adapters[iface]['data']['inet']['provider']
if not os.path.exists(os.path.dirname(filename)):
msg = '{0} cannot be written.'
msg = msg.format(os.path.dirname(filename))
log.error(msg)
raise AttributeError(msg)
with salt.utils.fopen(filename, 'w') as fout:
fout.write(ifcfg)
# Return as a array so the difflib works
return filename
def build_bond(iface, **settings):
'''
Create a bond script in /etc/modprobe.d with the passed settings
and load the bonding kernel module.
CLI Example:
.. code-block:: bash
salt '*' ip.build_bond bond0 mode=balance-alb
'''
deb_major = __grains__['osrelease'][:1]
opts = _parse_settings_bond(settings, iface)
try:
template = JINJA.get_template('conf.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template conf.jinja')
return ''
data = template.render({'name': iface, 'bonding': opts})
if 'test' in settings and settings['test']:
return _read_temp(data)
_write_file(iface, data, _DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
if deb_major == '5':
for line_type in ('alias', 'options'):
cmd = ['sed', '-i', '-e', r'/^{0}\s{1}.*/d'.format(line_type, iface),
'/etc/modprobe.conf']
__salt__['cmd.run'](cmd, python_shell=False)
__salt__['file.append']('/etc/modprobe.conf', path)
# Load kernel module
__salt__['kmod.load']('bonding')
# install ifenslave-2.6
__salt__['pkg.install']('ifenslave-2.6')
return _read_file(path)
def build_interface(iface, iface_type, enabled, **settings):
'''
Build an interface script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_interface eth0 eth <settings>
'''
iface = iface.lower()
iface_type = iface_type.lower()
if iface_type not in _IFACE_TYPES:
_raise_error_iface(iface, iface_type, _IFACE_TYPES)
if 'proto' not in settings:
settings['proto'] = 'static'
if iface_type == 'slave':
settings['slave'] = 'yes'
if 'master' not in settings:
msg = 'master is a required setting for slave interfaces'
log.error(msg)
raise AttributeError(msg)
elif iface_type == 'vlan':
settings['vlan'] = 'yes'
__salt__['pkg.install']('vlan')
elif iface_type == 'pppoe':
settings['pppoe'] = 'yes'
if not __salt__['pkg.version']('ppp'):
inst = __salt__['pkg.install']('ppp')
elif iface_type == 'bond':
if 'slaves' not in settings:
msg = 'slaves is a required setting for bond interfaces'
log.error(msg)
raise AttributeError(msg)
elif iface_type == 'bridge':
if 'ports' not in settings:
msg = (
'ports is a required setting for bridge interfaces on Debian '
'or Ubuntu based systems'
)
log.error(msg)
raise AttributeError(msg)
__salt__['pkg.install']('bridge-utils')
if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'pppoe']:
opts = _parse_settings_eth(settings, iface_type, enabled, iface)
if iface_type in ['source']:
opts = _parse_settings_source(settings, iface_type, enabled, iface)
if 'test' in settings and settings['test']:
return _read_temp_ifaces(iface, opts[iface])
ifcfg = _write_file_ifaces(iface, opts[iface], **settings)
if iface_type == 'pppoe':
_write_file_ppp_ifaces(iface, opts[iface])
# ensure lines in list end with newline, so difflib works
return [item + '\n' for item in ifcfg]
def build_routes(iface, **settings):
'''
Add route scripts for a network interface using up commands.
CLI Example:
.. code-block:: bash
salt '*' ip.build_routes eth0 <settings>
'''
iface = iface.lower()
opts = _parse_routes(iface, settings)
try:
template = JINJA.get_template('route_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template route_eth.jinja')
return ''
add_routecfg = template.render(route_type='add',
routes=opts['routes'],
iface=iface)
del_routecfg = template.render(route_type='del',
routes=opts['routes'],
iface=iface)
if 'test' in settings and settings['test']:
return _read_temp(add_routecfg + del_routecfg)
filename = _write_file_routes(iface,
add_routecfg,
_DEB_NETWORK_UP_DIR,
'route-{0}')
results = _read_file(filename)
filename = _write_file_routes(iface,
del_routecfg,
_DEB_NETWORK_DOWN_DIR,
'route-{0}')
results += _read_file(filename)
return results
def down(iface, iface_type):
'''
Shutdown a network interface
CLI Example:
.. code-block:: bash
salt '*' ip.down eth0 eth
'''
# Slave devices are controlled by the master.
# Source 'interfaces' aren't brought down.
if iface_type not in ['slave', 'source']:
return __salt__['cmd.run'](['ifdown', iface])
return None
def get_bond(iface):
'''
Return the content of a bond script
CLI Example:
.. code-block:: bash
salt '*' ip.get_bond bond0
'''
path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
return _read_file(path)
def get_interface(iface):
'''
Return the contents of an interface script
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0
'''
adapters = _parse_interfaces()
if iface in adapters:
try:
if iface == 'source':
template = JINJA.get_template('debian_source.jinja')
else:
template = JINJA.get_template('debian_eth.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template debian_eth.jinja')
return ''
ifcfg = template.render({'name': iface, 'data': adapters[iface]})
# ensure lines in list end with newline, so difflib works
return [item + '\n' for item in ifcfg.split('\n')]
else:
return []
def up(iface, iface_type): # pylint: disable=C0103
'''
Start up a network interface
CLI Example:
.. code-block:: bash
salt '*' ip.up eth0 eth
'''
# Slave devices are controlled by the master.
# Source 'interfaces' aren't brought up.
if iface_type not in ('slave', 'source'):
return __salt__['cmd.run'](['ifup', iface])
return None
def get_network_settings():
'''
Return the contents of the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_network_settings
'''
skip_etc_default_networking = (
__grains__['osfullname'] == 'Ubuntu' and
int(__grains__['osrelease'].split('.')[0]) >= 12)
if skip_etc_default_networking:
settings = {}
if __salt__['service.available']('networking'):
if __salt__['service.status']('networking'):
settings['networking'] = "yes"
else:
settings['networking'] = "no"
else:
settings['networking'] = "no"
hostname = _parse_hostname()
domainname = _parse_domainname()
settings['hostname'] = hostname
settings['domainname'] = domainname
else:
settings = _parse_current_network_settings()
try:
template = JINJA.get_template('display-network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template display-network.jinja')
return ''
network = template.render(settings)
return _read_temp(network)
def get_routes(iface):
'''
Return the routes for the interface
CLI Example:
.. code-block:: bash
salt '*' ip.get_routes eth0
'''
filename = os.path.join(_DEB_NETWORK_UP_DIR, 'route-{0}'.format(iface))
results = _read_file(filename)
filename = os.path.join(_DEB_NETWORK_DOWN_DIR, 'route-{0}'.format(iface))
results += _read_file(filename)
return results
def apply_network_settings(**settings):
'''
Apply global network configuration.
CLI Example:
.. code-block:: bash
salt '*' ip.apply_network_settings
'''
if 'require_reboot' not in settings:
settings['require_reboot'] = False
if 'apply_hostname' not in settings:
settings['apply_hostname'] = False
hostname_res = True
if settings['apply_hostname'] in _CONFIG_TRUE:
if 'hostname' in settings:
hostname_res = __salt__['network.mod_hostname'](settings['hostname'])
else:
log.warning(
'The network state sls is trying to apply hostname '
'changes but no hostname is defined.'
)
hostname_res = False
res = True
if settings['require_reboot'] in _CONFIG_TRUE:
log.warning(
'The network state sls is requiring a reboot of the system to '
'properly apply network configuration.'
)
res = True
else:
stop = __salt__['service.stop']('networking')
time.sleep(2)
res = stop and __salt__['service.start']('networking')
return hostname_res and res
def build_network_settings(**settings):
'''
Build the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.build_network_settings <settings>
'''
changes = []
# Read current configuration and store default values
current_network_settings = _parse_current_network_settings()
# Build settings
opts = _parse_network_settings(settings, current_network_settings)
# Ubuntu has moved away from /etc/default/networking
# beginning with the 12.04 release so we disable or enable
# the networking related services on boot
skip_etc_default_networking = (
__grains__['osfullname'] == 'Ubuntu' and
int(__grains__['osrelease'].split('.')[0]) >= 12)
if skip_etc_default_networking:
if opts['networking'] == 'yes':
service_cmd = 'service.enable'
else:
service_cmd = 'service.disable'
if __salt__['service.available']('NetworkManager'):
__salt__[service_cmd]('NetworkManager')
if __salt__['service.available']('networking'):
__salt__[service_cmd]('networking')
else:
try:
template = JINJA.get_template('network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template network.jinja')
return ''
network = template.render(opts)
if 'test' in settings and settings['test']:
return _read_temp(network)
# Write settings
_write_file_network(network, _DEB_NETWORKING_FILE, True)
# Write hostname to /etc/hostname
sline = opts['hostname'].split('.', 1)
opts['hostname'] = sline[0]
hostname = '{0}\n' . format(opts['hostname'])
current_domainname = current_network_settings['domainname']
current_searchdomain = current_network_settings['searchdomain']
# Only write the hostname if it has changed
if not opts['hostname'] == current_network_settings['hostname']:
if not ('test' in settings and settings['test']):
# TODO replace wiht a call to network.mod_hostname instead
_write_file_network(hostname, _DEB_HOSTNAME_FILE)
new_domain = False
if len(sline) > 1:
new_domainname = sline[1]
if new_domainname != current_domainname:
domainname = new_domainname
opts['domainname'] = new_domainname
new_domain = True
else:
domainname = current_domainname
opts['domainname'] = domainname
else:
domainname = current_domainname
opts['domainname'] = domainname
new_search = False
if 'search' in opts:
new_searchdomain = opts['search']
if new_searchdomain != current_searchdomain:
searchdomain = new_searchdomain
opts['searchdomain'] = new_searchdomain
new_search = True
else:
searchdomain = current_searchdomain
opts['searchdomain'] = searchdomain
else:
searchdomain = current_searchdomain
opts['searchdomain'] = searchdomain
# If the domain changes, then we should write the resolv.conf file.
if new_domain or new_search:
# Look for existing domain line and update if necessary
contents = _parse_resolve()
domain_prog = re.compile(r'domain\s+(?P<domain_name>\S+)')
search_prog = re.compile(r'search\s+(?P<search_domain>\S+)')
new_contents = []
found_domain = False
found_search = False
for item in contents:
domain_match = domain_prog.match(item)
search_match = search_prog.match(item)
if domain_match:
new_contents.append('domain {0}\n' . format(domainname))
found_domain = True
elif search_match:
new_contents.append('search {0}\n' . format(searchdomain))
found_search = True
else:
new_contents.append(item)
# A domain line didn't exist so we'll add one in
# with the new domainname
if not found_domain:
new_contents.insert(0, 'domain {0}\n' . format(domainname))
# A search line didn't exist so we'll add one in
# with the new search domain
if not found_search:
if new_contents[0].startswith('domain'):
new_contents.insert(1, 'search {0}\n' . format(searchdomain))
else:
new_contents.insert(0, 'search {0}\n' . format(searchdomain))
new_resolv = ''.join(new_contents)
# Write /etc/resolv.conf
if not ('test' in settings and settings['test']):
_write_file_network(new_resolv, _DEB_RESOLV_FILE)
# used for returning the results back
try:
template = JINJA.get_template('display-network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template display-network.jinja')
return ''
network = template.render(opts)
changes.extend(_read_temp(network))
return changes
| 554 | 0 | 115 |
369a9a39bc0dffc579cc1557c234597a461013c7 | 8,726 | py | Python | openmmmcmc/tests/test_mcmc.py | choderalab/openmm-mcmc | 0b28cb9465699685176bbd8cfa5c83125af8fda9 | [
"MIT"
] | 1 | 2016-01-14T20:10:00.000Z | 2016-01-14T20:10:00.000Z | openmmmcmc/tests/test_mcmc.py | choderalab/openmmmcmc | 0b28cb9465699685176bbd8cfa5c83125af8fda9 | [
"MIT"
] | 9 | 2016-01-14T18:47:21.000Z | 2017-02-02T23:08:54.000Z | openmmmcmc/tests/test_mcmc.py | choderalab/openmmmcmc | 0b28cb9465699685176bbd8cfa5c83125af8fda9 | [
"MIT"
] | 3 | 2016-01-14T20:59:52.000Z | 2021-04-01T00:38:29.000Z | import numpy as np
import simtk.openmm as openmm
import simtk.unit as units
from openmmtools import testsystems
from pymbar import timeseries
from functools import partial
from openmmmcmc.mcmc import HMCMove, GHMCMove, LangevinDynamicsMove, MonteCarloBarostatMove
import logging
# Test various combinations of systems and MCMC schemes
analytical_testsystems = [
("HarmonicOscillator", testsystems.HarmonicOscillator(),
[GHMCMove(timestep=10.0*units.femtoseconds,nsteps=100)]),
("HarmonicOscillator", testsystems.HarmonicOscillator(),
{GHMCMove(timestep=10.0*units.femtoseconds,nsteps=100): 0.5,
HMCMove(timestep=10*units.femtosecond, nsteps=10): 0.5}),
("HarmonicOscillatorArray", testsystems.HarmonicOscillatorArray(N=4),
[LangevinDynamicsMove(timestep=10.0*units.femtoseconds,nsteps=100)]),
("IdealGas", testsystems.IdealGas(nparticles=216),
[HMCMove(timestep=10*units.femtosecond, nsteps=10)])
]
NSIGMA_CUTOFF = 6.0 # cutoff for significance testing
debug = False # set to True only for manual debugging of this nose test
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
if __name__ == "__main__":
#test_mcmc_expectations()
test_minimizer_all_testsystems()
| 44.748718 | 258 | 0.655168 | import numpy as np
import simtk.openmm as openmm
import simtk.unit as units
from openmmtools import testsystems
from pymbar import timeseries
from functools import partial
from openmmmcmc.mcmc import HMCMove, GHMCMove, LangevinDynamicsMove, MonteCarloBarostatMove
import logging
# Test various combinations of systems and MCMC schemes
analytical_testsystems = [
("HarmonicOscillator", testsystems.HarmonicOscillator(),
[GHMCMove(timestep=10.0*units.femtoseconds,nsteps=100)]),
("HarmonicOscillator", testsystems.HarmonicOscillator(),
{GHMCMove(timestep=10.0*units.femtoseconds,nsteps=100): 0.5,
HMCMove(timestep=10*units.femtosecond, nsteps=10): 0.5}),
("HarmonicOscillatorArray", testsystems.HarmonicOscillatorArray(N=4),
[LangevinDynamicsMove(timestep=10.0*units.femtoseconds,nsteps=100)]),
("IdealGas", testsystems.IdealGas(nparticles=216),
[HMCMove(timestep=10*units.femtosecond, nsteps=10)])
]
NSIGMA_CUTOFF = 6.0 # cutoff for significance testing
debug = False # set to True only for manual debugging of this nose test
def test_minimizer_all_testsystems():
#testsystem_classes = testsystems.TestSystem.__subclasses__()
testsystem_classes = [ testsystems.AlanineDipeptideVacuum ]
for testsystem_class in testsystem_classes:
class_name = testsystem_class.__name__
logging.info("Testing minimization with testsystem %s" % class_name)
testsystem = testsystem_class()
from openmmmcmc import mcmc
sampler_state = mcmc.SamplerState(testsystem.system, testsystem.positions)
# Check if NaN.
if np.isnan(sampler_state.potential_energy / units.kilocalories_per_mole):
raise Exception("Initial energy of system %s yielded NaN" % class_name)
# Minimize
#sampler_state.minimize(maxIterations=0)
# Check if NaN.
if np.isnan(sampler_state.potential_energy / units.kilocalories_per_mole):
raise Exception("Minimization of system %s yielded NaN" % class_name)
def test_mcmc_expectations():
# Select system:
for [system_name, testsystem, move_set] in analytical_testsystems:
subtest_mcmc_expectation(testsystem, move_set)
f = partial(subtest_mcmc_expectation, testsystem, move_set)
f.description = "Testing MCMC expectation for %s" % system_name
logging.info(f.description)
yield f
def subtest_mcmc_expectation(testsystem, move_set):
if debug:
print(testsystem.__class__.__name__)
print(str(move_set))
# Test settings.
temperature = 298.0 * units.kelvin
nequil = 10 # number of equilibration iterations
niterations = 40 # number of production iterations
# Retrieve system and positions.
[system, positions] = [testsystem.system, testsystem.positions]
platform_name = 'Reference'
from simtk.openmm import Platform
platform = Platform.getPlatformByName(platform_name)
# Compute properties.
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
kT = kB * temperature
ndof = 3*system.getNumParticles() - system.getNumConstraints()
# Create thermodynamic state
from openmmmcmc.thermodynamics import ThermodynamicState
thermodynamic_state = ThermodynamicState(system=testsystem.system, temperature=temperature)
# Create MCMC sampler.
from openmmmcmc.mcmc import MCMCSampler
sampler = MCMCSampler(thermodynamic_state, move_set=move_set, platform=platform)
# Create sampler state.
from openmmmcmc.mcmc import SamplerState
sampler_state = SamplerState(system=testsystem.system, positions=testsystem.positions, platform=platform)
# Equilibrate
for iteration in range(nequil):
#print("equilibration iteration %d / %d" % (iteration, nequil))
# Update sampler state.
sampler_state = sampler.run(sampler_state, 1)
# Accumulate statistics.
x_n = np.zeros([niterations], np.float64) # x_n[i] is the x position of atom 1 after iteration i, in angstroms
potential_n = np.zeros([niterations], np.float64) # potential_n[i] is the potential energy after iteration i, in kT
kinetic_n = np.zeros([niterations], np.float64) # kinetic_n[i] is the kinetic energy after iteration i, in kT
temperature_n = np.zeros([niterations], np.float64) # temperature_n[i] is the instantaneous kinetic temperature from iteration i, in K
volume_n = np.zeros([niterations], np.float64) # volume_n[i] is the volume from iteration i, in K
for iteration in range(niterations):
if debug: print("iteration %d / %d" % (iteration, niterations))
# Update sampler state.
sampler_state = sampler.run(sampler_state, 1)
# Get statistics.
potential_energy = sampler_state.potential_energy
kinetic_energy = sampler_state.kinetic_energy
total_energy = sampler_state.total_energy
instantaneous_temperature = kinetic_energy * 2.0 / ndof / (units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA)
volume = sampler_state.volume
#print "potential %8.1f kT | kinetic %8.1f kT | total %8.1f kT | volume %8.3f nm^3 | instantaneous temperature: %8.1f K" % (potential_energy/kT, kinetic_energy/kT, total_energy/kT, volume/(units.nanometers**3), instantaneous_temperature/units.kelvin)
# Accumulate statistics.
x_n[iteration] = sampler_state.positions[0,0] / units.angstroms
potential_n[iteration] = potential_energy / kT
kinetic_n[iteration] = kinetic_energy / kT
temperature_n[iteration] = instantaneous_temperature / units.kelvin
volume_n[iteration] = volume / (units.nanometers**3)
# Compute expected statistics.
if ('get_potential_expectation' in dir(testsystem)):
# Skip this check if the std dev is zero.
skip_test = False
if (potential_n.std() == 0.0):
skip_test = True
if debug: print("Skipping potential test since variance is zero.")
if not skip_test:
potential_expectation = testsystem.get_potential_expectation(thermodynamic_state) / kT
potential_mean = potential_n.mean()
g = timeseries.statisticalInefficiency(potential_n, fast=True)
dpotential_mean = potential_n.std() / np.sqrt(niterations / g)
potential_error = potential_mean - potential_expectation
nsigma = abs(potential_error) / dpotential_mean
test_passed = True
if (nsigma > NSIGMA_CUTOFF):
test_passed = False
if debug or (test_passed is False):
print("Potential energy expectation")
print("observed %10.5f +- %10.5f kT | expected %10.5f | error %10.5f +- %10.5f (%.1f sigma)" % (potential_mean, dpotential_mean, potential_expectation, potential_error, dpotential_mean, nsigma))
if test_passed:
print("TEST PASSED")
else:
print("TEST FAILED")
print("----------------------------------------------------------------------------")
if ('get_volume_expectation' in dir(testsystem)):
# Skip this check if the std dev is zero.
skip_test = False
if (volume_n.std() == 0.0):
skip_test = True
if debug: print("Skipping volume test.")
if not skip_test:
volume_expectation = testsystem.get_volume_expectation(thermodynamic_state) / (units.nanometers**3)
volume_mean = volume_n.mean()
g = timeseries.statisticalInefficiency(volume_n, fast=True)
dvolume_mean = volume_n.std() / np.sqrt(niterations / g)
volume_error = volume_mean - volume_expectation
nsigma = abs(volume_error) / dvolume_mean
test_passed = True
if (nsigma > NSIGMA_CUTOFF):
test_passed = False
if debug or (test_passed is False):
print("Volume expectation")
print("observed %10.5f +- %10.5f kT | expected %10.5f | error %10.5f +- %10.5f (%.1f sigma)" % (volume_mean, dvolume_mean, volume_expectation, volume_error, dvolume_mean, nsigma))
if test_passed:
print("TEST PASSED")
else:
print("TEST FAILED")
print("----------------------------------------------------------------------------")
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
if __name__ == "__main__":
#test_mcmc_expectations()
test_minimizer_all_testsystems()
| 7,259 | 0 | 69 |
f519e5a9d3baabea7b9efdf4583778ebf50e0b07 | 163 | py | Python | aula/aula013.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | aula/aula013.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | aula/aula013.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | from time import sleep
#Aula N°13
print("Aula N°13 \n")
sleep(0.2)
s = 0
for c in range(1, 4):
n = int(input('Escolha um numero: '))
s = s + n
print(s) | 12.538462 | 41 | 0.576687 | from time import sleep
#Aula N°13
print("Aula N°13 \n")
sleep(0.2)
s = 0
for c in range(1, 4):
n = int(input('Escolha um numero: '))
s = s + n
print(s) | 0 | 0 | 0 |
033708bec7b12d53ab2579de5592e8c6448a6dee | 1,043 | py | Python | Leetcode/0151-0200/0167-two-sum-ii.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | null | null | null | Leetcode/0151-0200/0167-two-sum-ii.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | null | null | null | Leetcode/0151-0200/0167-two-sum-ii.py | MiKueen/Data-Structures-and-Algorithms | 8788bde5349f326aac0267531f39ac7a2a708ee6 | [
"MIT"
] | 1 | 2019-10-06T15:46:14.000Z | 2019-10-06T15:46:14.000Z | '''
Author : MiKueen
Level : Easy
Problem Statement : Two Sum II - Input array is sorted
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Note:
Your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Example:
Input: numbers = [2,7,11,15], target = 9
Output: [1,2]
Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
'''
| 34.766667 | 137 | 0.628955 | '''
Author : MiKueen
Level : Easy
Problem Statement : Two Sum II - Input array is sorted
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Note:
Your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Example:
Input: numbers = [2,7,11,15], target = 9
Output: [1,2]
Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
'''
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
l, r = 0, len(numbers)-1
while l < r:
res = numbers[l] + numbers[r]
if res == target:
return [l+1, r+1]
elif res < target:
l += 1
else:
r -= 1
| 297 | -6 | 49 |
1a3533a122ed8825db4a37879558e890c3636d80 | 22,998 | py | Python | trueskilltest.py | adity5/trueskill | cd616d625973305a40d3259d9371cb430bf31dd4 | [
"BSD-3-Clause"
] | 533 | 2015-01-09T06:23:49.000Z | 2022-03-18T07:01:21.000Z | trueskilltest.py | adity5/trueskill | cd616d625973305a40d3259d9371cb430bf31dd4 | [
"BSD-3-Clause"
] | 40 | 2015-04-26T15:47:54.000Z | 2022-02-02T17:35:30.000Z | trueskilltest.py | adity5/trueskill | cd616d625973305a40d3259d9371cb430bf31dd4 | [
"BSD-3-Clause"
] | 116 | 2015-01-05T03:22:58.000Z | 2022-03-18T07:01:31.000Z | # -*- coding: utf-8 -*-
from __future__ import with_statement
import warnings
from almost import Approximate
from pytest import deprecated_call, raises
from conftest import various_backends
import trueskill as t
from trueskill import (
quality, quality_1vs1, rate, rate_1vs1, Rating, setup, TrueSkill)
warnings.simplefilter('always')
inf = float('inf')
nan = float('nan')
_rate = almost.wrap(rate)
_rate_1vs1 = almost.wrap(rate_1vs1)
_quality = almost.wrap(quality)
_quality_1vs1 = almost.wrap(quality_1vs1)
# usage
def test_compatibility_with_another_rating_systems():
"""All rating system modules should implement ``rate_1vs1`` and
``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
"""
r1, r2 = Rating(30, 3), Rating(20, 2)
assert quality_1vs1(r1, r2) == quality([(r1,), (r2,)])
rated = rate([(r1,), (r2,)])
assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
rated = rate([(r1,), (r2,)], [0, 0])
assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
# algorithm
@various_backends
@various_backends
@various_backends
@various_backends
@various_backends
@various_backends
@various_backends
@various_backends
@various_backends
# functions
@various_backends
# mathematics
# reported bugs
@various_backends
def test_issue3():
"""The `issue #3`_, opened by @youknowone.
These inputs led to ZeroDivisionError before 0.1.4. Also another TrueSkill
implementations cannot calculate this case.
.. _issue #3: https://github.com/sublee/trueskill/issues/3
"""
# @konikos's case 1
t1 = (Rating(42.234, 3.728), Rating(43.290, 3.842))
t2 = (Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500))
rate([t1, t2], [6, 5])
# @konikos's case 2
t1 = (Rating(25.000, 0.500), Rating(25.000, 0.500), Rating(25.000, 0.500),
Rating(25.000, 0.500), Rating(33.333, 0.500), Rating(33.333, 0.500),
Rating(33.333, 0.500), Rating(33.333, 0.500), Rating(41.667, 0.500),
Rating(41.667, 0.500), Rating(41.667, 0.500), Rating(41.667, 0.500))
t2 = (Rating(42.234, 3.728), Rating(43.291, 3.842))
rate([t1, t2], [0, 28])
@various_backends(['scipy'])
def test_issue4():
"""The `issue #4`_, opened by @sublee.
numpy.float64 handles floating-point error by different way. For example,
it can just warn RuntimeWarning on n/0 problem instead of throwing
ZeroDivisionError.
.. _issue #4: https://github.com/sublee/trueskill/issues/4
"""
import numpy
r1, r2 = Rating(105.247, 0.439), Rating(27.030, 0.901)
# make numpy to raise FloatingPointError instead of warning
# RuntimeWarning
old_settings = numpy.seterr(divide='raise')
try:
rate([(r1,), (r2,)])
finally:
numpy.seterr(**old_settings)
@various_backends([None, 'scipy'])
def test_issue5(backend):
"""The `issue #5`_, opened by @warner121.
This error occurs when a winner has too low rating than a loser. Basically
Python cannot calculate correct result but mpmath_ can. I added ``backend``
option to :class:`TrueSkill` class. If it is set to 'mpmath' then the
problem will have gone.
The result of TrueSkill calculator by Microsoft is N(-273.092, 2.683) and
N(-75.830, 2.080), of C# Skills by Moserware is N(NaN, 2.6826) and
N(NaN, 2.0798). I choose Microsoft's result as an expectation for the test
suite.
.. _issue #5: https://github.com/sublee/trueskill/issues/5
.. _mpmath: http://mpmath.googlecode.com/
"""
assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190))
assert _quality_1vs1(Rating(), Rating(1000)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000))
@various_backends(['mpmath'])
@various_backends(['mpmath'])
def test_issue5_with_more_extreme():
"""If the input is more extreme, 'mpmath' backend also made an exception.
But we can avoid the problem with higher precision.
"""
import mpmath
try:
dps = mpmath.mp.dps
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000))
mpmath.mp.dps = 50
assert almost(rate_1vs1(Rating(), Rating(1000000)), prec=-1) == \
[(400016.896, 6.455), (600008.104, 6.455)]
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000000000))
mpmath.mp.dps = 100
assert almost(rate_1vs1(Rating(), Rating(1000000000000)), prec=-7) == \
[(400001600117.693, 6.455), (599998399907.307, 6.455)]
finally:
mpmath.mp.dps = dps
def test_issue9_weights_dict_with_object_keys():
"""The `issue #9`_, opened by @.
.. _issue #9: https://github.com/sublee/trueskill/issues/9
"""
p1 = Player(Rating(), 0)
p2 = Player(Rating(), 0)
p3 = Player(Rating(), 1)
teams = [{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}]
rated = rate(teams, weights={(0, p1): 1, (0, p2): 0.5, (1, p3): 1})
assert rated[0][p1].mu > rated[0][p2].mu
assert rated[0][p1].sigma < rated[0][p2].sigma
assert rated[0][p1].sigma == rated[1][p3].sigma
| 35.71118 | 79 | 0.587573 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import warnings
from almost import Approximate
from pytest import deprecated_call, raises
from conftest import various_backends
import trueskill as t
from trueskill import (
quality, quality_1vs1, rate, rate_1vs1, Rating, setup, TrueSkill)
warnings.simplefilter('always')
inf = float('inf')
nan = float('nan')
class almost(Approximate):
def normalize(self, value):
if isinstance(value, Rating):
return self.normalize(tuple(value))
elif isinstance(value, list):
try:
if isinstance(value[0][0], Rating):
# flatten transformed ratings
return list(sum(value, ()))
except (TypeError, IndexError):
pass
return super(almost, self).normalize(value)
@classmethod
def wrap(cls, f, *args, **kwargs):
return lambda *a, **k: cls(f(*a, **k), *args, **kwargs)
_rate = almost.wrap(rate)
_rate_1vs1 = almost.wrap(rate_1vs1)
_quality = almost.wrap(quality)
_quality_1vs1 = almost.wrap(quality_1vs1)
# usage
def test_compatibility_with_another_rating_systems():
"""All rating system modules should implement ``rate_1vs1`` and
``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
"""
r1, r2 = Rating(30, 3), Rating(20, 2)
assert quality_1vs1(r1, r2) == quality([(r1,), (r2,)])
rated = rate([(r1,), (r2,)])
assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
rated = rate([(r1,), (r2,)], [0, 0])
assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
def test_compare_ratings():
assert Rating(1, 2) == Rating(1, 2)
assert Rating(1, 2) != Rating(1, 3)
assert Rating(2, 2) > Rating(1, 2)
assert Rating(3, 2) >= Rating(1, 2)
assert Rating(0, 2) < Rating(1, 2)
assert Rating(-1, 2) <= Rating(1, 2)
def test_rating_to_number():
assert int(Rating(1, 2)) == 1
assert float(Rating(1.1, 2)) == 1.1
assert complex(Rating(1.2, 2)) == 1.2 + 0j
try:
assert long(Rating(1, 2)) == long(1)
except NameError:
# Python 3 doesn't have `long` anymore
pass
def test_unsorted_groups():
t1, t2, t3 = generate_teams([1, 1, 1])
rated = rate([t1, t2, t3], [2, 1, 0])
assert almost(rated) == \
[(18.325, 6.656), (25.000, 6.208), (31.675, 6.656)]
def test_custom_environment():
env = TrueSkill(draw_probability=.50)
t1, t2 = generate_teams([1, 1], env=env)
rated = env.rate([t1, t2])
assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
def test_setup_global_environment():
try:
setup(draw_probability=.50)
t1, t2 = generate_teams([1, 1])
rated = rate([t1, t2])
assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
finally:
# rollback
setup()
def test_invalid_rating_groups():
env = TrueSkill()
with raises(ValueError):
env.validate_rating_groups([])
with raises(ValueError):
env.validate_rating_groups([()])
# need multiple groups not just one
with raises(ValueError):
env.validate_rating_groups([(Rating(),)])
# empty group is not allowed
with raises(ValueError):
env.validate_rating_groups([(Rating(),), ()])
# all groups should be same structure
with raises(TypeError):
env.validate_rating_groups([(Rating(),), {0: Rating()}])
def test_deprecated_methods():
env = TrueSkill()
r1, r2, r3 = Rating(), Rating(), Rating()
deprecated_call(t.transform_ratings, [(r1,), (r2,), (r3,)])
deprecated_call(t.match_quality, [(r1,), (r2,), (r3,)])
deprecated_call(env.Rating)
deprecated_call(env.transform_ratings, [(r1,), (r2,), (r3,)])
deprecated_call(env.match_quality, [(r1,), (r2,), (r3,)])
deprecated_call(env.rate_1vs1, r1, r2)
deprecated_call(env.quality_1vs1, r1, r2)
deprecated_call(lambda: Rating().exposure)
dyn = TrueSkill(draw_probability=t.dynamic_draw_probability)
deprecated_call(dyn.rate, [(r1,), (r2,)])
def test_deprecated_individual_rating_groups():
r1, r2, r3 = Rating(50, 1), Rating(10, 5), Rating(15, 5)
with raises(TypeError):
deprecated_call(rate, [r1, r2, r3])
with raises(TypeError):
deprecated_call(quality, [r1, r2, r3])
assert t.transform_ratings([r1, r2, r3]) == rate([(r1,), (r2,), (r3,)])
assert t.match_quality([r1, r2, r3]) == quality([(r1,), (r2,), (r3,)])
deprecated_call(t.transform_ratings, [r1, r2, r3])
deprecated_call(t.match_quality, [r1, r2, r3])
def test_rating_tuples():
r1, r2, r3 = Rating(), Rating(), Rating()
rated = rate([(r1, r2), (r3,)])
assert len(rated) == 2
assert isinstance(rated[0], tuple)
assert isinstance(rated[1], tuple)
assert len(rated[0]) == 2
assert len(rated[1]) == 1
assert isinstance(rated[0][0], Rating)
def test_rating_dicts():
class Player(object):
def __init__(self, name, rating, team):
self.name = name
self.rating = rating
self.team = team
p1 = Player('Player A', Rating(), 0)
p2 = Player('Player B', Rating(), 0)
p3 = Player('Player C', Rating(), 1)
rated = rate([{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}])
assert len(rated) == 2
assert isinstance(rated[0], dict)
assert isinstance(rated[1], dict)
assert len(rated[0]) == 2
assert len(rated[1]) == 1
assert p1 in rated[0]
assert p2 in rated[0]
assert p3 in rated[1]
assert p1 not in rated[1]
assert p2 not in rated[1]
assert p3 not in rated[0]
assert isinstance(rated[0][p1], Rating)
p1.rating = rated[p1.team][p1]
p2.rating = rated[p2.team][p2]
p3.rating = rated[p3.team][p3]
def test_dont_use_0_for_min_delta():
with raises(ValueError):
rate([(Rating(),), (Rating(),)], min_delta=0)
def test_list_instead_of_tuple():
r1, r2 = Rating(), Rating()
assert rate([[r1], [r2]]) == rate([(r1,), (r2,)])
assert quality([[r1], [r2]]) == quality([(r1,), (r2,)])
def test_backend():
env = TrueSkill(backend=(NotImplemented, NotImplemented, NotImplemented))
with raises(TypeError):
env.rate_1vs1(Rating(), Rating())
with raises(ValueError):
# '__not_defined__' backend is not defined
TrueSkill(backend='__not_defined__')
# algorithm
def generate_teams(sizes, env=None):
rating_cls = Rating if env is None else env.create_rating
rating_groups = []
for size in sizes:
ratings = []
for x in range(size):
ratings.append(rating_cls())
rating_groups.append(tuple(ratings))
return rating_groups
def generate_individual(size, env=None):
return generate_teams([1] * size, env=env)
@various_backends
def test_n_vs_n():
# 1 vs 1
t1, t2 = generate_teams([1, 1])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == [(29.396, 7.171), (20.604, 7.171)]
assert _rate([t1, t2], [0, 0]) == [(25.000, 6.458), (25.000, 6.458)]
# 2 vs 2
t1, t2 = generate_teams([2, 2])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == \
[(28.108, 7.774), (28.108, 7.774), (21.892, 7.774), (21.892, 7.774)]
assert _rate([t1, t2], [0, 0]) == \
[(25.000, 7.455), (25.000, 7.455), (25.000, 7.455), (25.000, 7.455)]
# 4 vs 4
t1, t2 = generate_teams([4, 4])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == \
[(27.198, 8.059), (27.198, 8.059), (27.198, 8.059), (27.198, 8.059),
(22.802, 8.059), (22.802, 8.059), (22.802, 8.059), (22.802, 8.059)]
@various_backends
def test_1_vs_n():
t1, = generate_teams([1])
# 1 vs 2
t2, = generate_teams([2])
assert _quality([t1, t2]) == 0.135
assert _rate([t1, t2]) == \
[(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)]
assert _rate([t1, t2], [0, 0]) == \
[(31.660, 7.138), (18.340, 7.138), (18.340, 7.138)]
# 1 vs 3
t2, = generate_teams([3])
assert _quality([t1, t2]) == 0.012
assert _rate([t1, t2]) == \
[(36.337, 7.527), (13.663, 7.527), (13.663, 7.527), (13.663, 7.527)]
assert almost(rate([t1, t2], [0, 0]), 2) == \
[(34.990, 7.455), (15.010, 7.455), (15.010, 7.455), (15.010, 7.455)]
# 1 vs 7
t2, = generate_teams([7])
assert _quality([t1, t2]) == 0
assert _rate([t1, t2]) == \
[(40.582, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917),
(9.418, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917)]
@various_backends
def test_individual():
# 3 players
players = generate_individual(3)
assert _quality(players) == 0.200
assert _rate(players) == \
[(31.675, 6.656), (25.000, 6.208), (18.325, 6.656)]
assert _rate(players, [0] * 3) == \
[(25.000, 5.698), (25.000, 5.695), (25.000, 5.698)]
# 4 players
players = generate_individual(4)
assert _quality(players) == 0.089
assert _rate(players) == \
[(33.207, 6.348), (27.401, 5.787), (22.599, 5.787), (16.793, 6.348)]
# 5 players
players = generate_individual(5)
assert _quality(players) == 0.040
assert _rate(players) == \
[(34.363, 6.136), (29.058, 5.536), (25.000, 5.420), (20.942, 5.536),
(15.637, 6.136)]
# 8 players
players = generate_individual(8)
assert _quality(players) == 0.004
assert _rate(players, [0] * 8) == \
[(25.000, 4.592), (25.000, 4.583), (25.000, 4.576), (25.000, 4.573),
(25.000, 4.573), (25.000, 4.576), (25.000, 4.583), (25.000, 4.592)]
# 16 players
players = generate_individual(16)
assert _rate(players) == \
[(40.539, 5.276), (36.810, 4.711), (34.347, 4.524), (32.336, 4.433),
(30.550, 4.380), (28.893, 4.349), (27.310, 4.330), (25.766, 4.322),
(24.234, 4.322), (22.690, 4.330), (21.107, 4.349), (19.450, 4.380),
(17.664, 4.433), (15.653, 4.524), (13.190, 4.711), (9.461, 5.276)]
@various_backends
def test_multiple_teams():
# 2 vs 4 vs 2
t1 = (Rating(40, 4), Rating(45, 3))
t2 = (Rating(20, 7), Rating(19, 6), Rating(30, 9), Rating(10, 4))
t3 = (Rating(50, 5), Rating(30, 2))
assert _quality([t1, t2, t3]) == 0.367
assert _rate([t1, t2, t3], [0, 1, 1]) == \
[(40.877, 3.840), (45.493, 2.934), (19.609, 6.396), (18.712, 5.625),
(29.353, 7.673), (9.872, 3.891), (48.830, 4.590), (29.813, 1.976)]
# 1 vs 2 vs 1
t1 = (Rating(),)
t2 = (Rating(), Rating())
t3 = (Rating(),)
assert _quality([t1, t2, t3]) == 0.047
@various_backends
def test_upset():
# 1 vs 1
t1, t2 = (Rating(),), (Rating(50, 12.5),)
assert _quality([t1, t2]) == 0.110
assert _rate([t1, t2], [0, 0]) == [(31.662, 7.137), (35.010, 7.910)]
# 2 vs 2
t1 = (Rating(20, 8), Rating(25, 6))
t2 = (Rating(35, 7), Rating(40, 5))
assert _quality([t1, t2]) == 0.084
assert _rate([t1, t2]) == \
[(29.698, 7.008), (30.455, 5.594), (27.575, 6.346), (36.211, 4.768)]
# 3 vs 2
t1 = (Rating(28, 7), Rating(27, 6), Rating(26, 5))
t2 = (Rating(30, 4), Rating(31, 3))
assert _quality([t1, t2]) == 0.254
assert _rate([t1, t2], [0, 1]) == \
[(28.658, 6.770), (27.484, 5.856), (26.336, 4.917), (29.785, 3.958),
(30.879, 2.983)]
assert _rate([t1, t2], [1, 0]) == \
[(21.840, 6.314), (22.474, 5.575), (22.857, 4.757), (32.012, 3.877),
(32.132, 2.949)]
# 8 players
players = [(Rating(10, 8),), (Rating(15, 7),), (Rating(20, 6),),
(Rating(25, 5),), (Rating(30, 4),), (Rating(35, 3),),
(Rating(40, 2),), (Rating(45, 1),)]
assert _quality(players) == 0.000
assert _rate(players) == \
[(35.135, 4.506), (32.585, 4.037), (31.329, 3.756), (30.984, 3.453),
(31.751, 3.064), (34.051, 2.541), (38.263, 1.849), (44.118, 0.983)]
@various_backends
def test_partial_play():
t1, t2 = (Rating(),), (Rating(), Rating())
# each results from C# Skills:
assert rate([t1, t2], weights=[(1,), (1, 1)]) == rate([t1, t2])
assert _rate([t1, t2], weights=[(1,), (1, 1)]) == \
[(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)]
assert _rate([t1, t2], weights=[(0.5,), (0.5, 0.5)]) == \
[(33.939, 7.312), (16.061, 7.312), (16.061, 7.312)]
assert _rate([t1, t2], weights=[(1,), (0, 1)]) == \
[(29.440, 7.166), (25.000, 8.333), (20.560, 7.166)]
assert _rate([t1, t2], weights=[(1,), (0.5, 1)]) == \
[(32.417, 7.056), (21.291, 8.033), (17.583, 7.056)]
# match quality of partial play
t1, t2, t3 = (Rating(),), (Rating(), Rating()), (Rating(),)
assert _quality([t1, t2, t3], [(1,), (0.25, 0.75), (1,)]) == 0.2
assert _quality([t1, t2, t3], [(1,), (0.8, 0.9), (1,)]) == 0.0809
@various_backends
def test_partial_play_with_weights_dict():
t1, t2 = (Rating(),), (Rating(), Rating())
assert rate([t1, t2], weights={(0, 0): 0.5, (1, 0): 0.5, (1, 1): 0.5}) == \
rate([t1, t2], weights=[[0.5], [0.5, 0.5]])
assert rate([t1, t2], weights={(1, 0): 0}) == \
rate([t1, t2], weights=[[1], [0, 1]])
assert rate([t1, t2], weights={(1, 0): 0.5}) == \
rate([t1, t2], weights=[[1], [0.5, 1]])
@various_backends
def test_microsoft_research_example():
# http://research.microsoft.com/en-us/projects/trueskill/details.aspx
alice, bob, chris, darren, eve, fabien, george, hillary = \
Rating(), Rating(), Rating(), Rating(), \
Rating(), Rating(), Rating(), Rating()
_rated = rate([{'alice': alice}, {'bob': bob}, {'chris': chris},
{'darren': darren}, {'eve': eve}, {'fabien': fabien},
{'george': george}, {'hillary': hillary}])
rated = {}
list(map(rated.update, _rated))
assert almost(rated['alice']) == (36.771, 5.749)
assert almost(rated['bob']) == (32.242, 5.133)
assert almost(rated['chris']) == (29.074, 4.943)
assert almost(rated['darren']) == (26.322, 4.874)
assert almost(rated['eve']) == (23.678, 4.874)
assert almost(rated['fabien']) == (20.926, 4.943)
assert almost(rated['george']) == (17.758, 5.133)
assert almost(rated['hillary']) == (13.229, 5.749)
@various_backends
def test_dynamic_draw_probability():
from trueskillhelpers import calc_dynamic_draw_probability as calc
def assert_predictable_draw_probability(r1, r2, drawn=False):
dyn = TrueSkill(draw_probability=t.dynamic_draw_probability)
sta = TrueSkill(draw_probability=calc((r1,), (r2,), dyn))
assert dyn.rate_1vs1(r1, r2, drawn) == sta.rate_1vs1(r1, r2, drawn)
assert_predictable_draw_probability(Rating(100), Rating(10))
assert_predictable_draw_probability(Rating(10), Rating(100))
assert_predictable_draw_probability(Rating(10), Rating(100), drawn=True)
assert_predictable_draw_probability(Rating(25), Rating(25))
assert_predictable_draw_probability(Rating(25), Rating(25), drawn=True)
assert_predictable_draw_probability(Rating(-25), Rating(125))
assert_predictable_draw_probability(Rating(125), Rating(-25))
assert_predictable_draw_probability(Rating(-25), Rating(125), drawn=True)
assert_predictable_draw_probability(Rating(25, 10), Rating(25, 0.1))
# functions
@various_backends
def test_exposure():
env = TrueSkill()
assert env.expose(env.create_rating()) == 0
env = TrueSkill(1000, 200)
assert env.expose(env.create_rating()) == 0
# mathematics
def test_valid_gaussian():
from trueskill.mathematics import Gaussian
with raises(TypeError): # sigma argument is needed
Gaussian(0)
with raises(ValueError): # sigma**2 should be greater than 0
Gaussian(0, 0)
def test_valid_matrix():
from trueskill.mathematics import Matrix
with raises(TypeError): # src must be a list or dict or callable
Matrix(None)
with raises(ValueError): # src must be a rectangular array of numbers
Matrix([])
with raises(ValueError): # src must be a rectangular array of numbers
Matrix([[1, 2, 3], [4, 5]])
with raises(TypeError):
# A callable src must return an interable which generates a tuple
# containing coordinate and value
Matrix(lambda: None)
def test_matrix_from_dict():
from trueskill.mathematics import Matrix
mat = Matrix({(0, 0): 1, (4, 9): 1})
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
def test_matrix_from_item_generator():
from trueskill.mathematics import Matrix
def gen_matrix(height, width):
yield (0, 0), 1
yield (height - 1, width - 1), 1
mat = Matrix(gen_matrix, 5, 10)
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
with raises(TypeError):
# A callable src must call set_height and set_width if the size is
# non-deterministic
Matrix(gen_matrix)
def gen_and_set_size_matrix(set_height, set_width):
set_height(5)
set_width(10)
return [((0, 0), 1), ((4, 9), 1)]
mat = Matrix(gen_and_set_size_matrix)
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
def test_matrix_operations():
from trueskill.mathematics import Matrix
assert Matrix([[1, 2], [3, 4]]).inverse() == \
Matrix([[-2.0, 1.0], [1.5, -0.5]])
assert Matrix([[1, 2], [3, 4]]).determinant() == -2
assert Matrix([[1, 2], [3, 4]]).adjugate() == Matrix([[4, -2], [-3, 1]])
with raises(ValueError): # Bad size
assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6]])
assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6, 7], [8, 9, 10]]) == \
Matrix([[21, 24, 27], [47, 54, 61]])
with raises(ValueError): # Must be same size
Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6, 7], [8, 9, 10]])
assert Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6], [7, 8]]) == \
Matrix([[6, 8], [10, 12]])
# reported bugs
@various_backends
def test_issue3():
"""The `issue #3`_, opened by @youknowone.
These inputs led to ZeroDivisionError before 0.1.4. Also another TrueSkill
implementations cannot calculate this case.
.. _issue #3: https://github.com/sublee/trueskill/issues/3
"""
# @konikos's case 1
t1 = (Rating(42.234, 3.728), Rating(43.290, 3.842))
t2 = (Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500))
rate([t1, t2], [6, 5])
# @konikos's case 2
t1 = (Rating(25.000, 0.500), Rating(25.000, 0.500), Rating(25.000, 0.500),
Rating(25.000, 0.500), Rating(33.333, 0.500), Rating(33.333, 0.500),
Rating(33.333, 0.500), Rating(33.333, 0.500), Rating(41.667, 0.500),
Rating(41.667, 0.500), Rating(41.667, 0.500), Rating(41.667, 0.500))
t2 = (Rating(42.234, 3.728), Rating(43.291, 3.842))
rate([t1, t2], [0, 28])
@various_backends(['scipy'])
def test_issue4():
"""The `issue #4`_, opened by @sublee.
numpy.float64 handles floating-point error by different way. For example,
it can just warn RuntimeWarning on n/0 problem instead of throwing
ZeroDivisionError.
.. _issue #4: https://github.com/sublee/trueskill/issues/4
"""
import numpy
r1, r2 = Rating(105.247, 0.439), Rating(27.030, 0.901)
# make numpy to raise FloatingPointError instead of warning
# RuntimeWarning
old_settings = numpy.seterr(divide='raise')
try:
rate([(r1,), (r2,)])
finally:
numpy.seterr(**old_settings)
@various_backends([None, 'scipy'])
def test_issue5(backend):
"""The `issue #5`_, opened by @warner121.
This error occurs when a winner has too low rating than a loser. Basically
Python cannot calculate correct result but mpmath_ can. I added ``backend``
option to :class:`TrueSkill` class. If it is set to 'mpmath' then the
problem will have gone.
The result of TrueSkill calculator by Microsoft is N(-273.092, 2.683) and
N(-75.830, 2.080), of C# Skills by Moserware is N(NaN, 2.6826) and
N(NaN, 2.0798). I choose Microsoft's result as an expectation for the test
suite.
.. _issue #5: https://github.com/sublee/trueskill/issues/5
.. _mpmath: http://mpmath.googlecode.com/
"""
assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190))
assert _quality_1vs1(Rating(), Rating(1000)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000))
@various_backends(['mpmath'])
def test_issue5_with_mpmath():
_rate_1vs1 = almost.wrap(rate_1vs1, 0)
assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0
assert _rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == \
[(-273.361, 2.683), (-75.683, 2.080)]
assert _quality_1vs1(Rating(), Rating(1000)) == 0
assert _rate_1vs1(Rating(), Rating(1000)) == \
[(415.298, 6.455), (609.702, 6.455)]
@various_backends(['mpmath'])
def test_issue5_with_more_extreme():
"""If the input is more extreme, 'mpmath' backend also made an exception.
But we can avoid the problem with higher precision.
"""
import mpmath
try:
dps = mpmath.mp.dps
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000))
mpmath.mp.dps = 50
assert almost(rate_1vs1(Rating(), Rating(1000000)), prec=-1) == \
[(400016.896, 6.455), (600008.104, 6.455)]
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000000000))
mpmath.mp.dps = 100
assert almost(rate_1vs1(Rating(), Rating(1000000000000)), prec=-7) == \
[(400001600117.693, 6.455), (599998399907.307, 6.455)]
finally:
mpmath.mp.dps = dps
def test_issue9_weights_dict_with_object_keys():
"""The `issue #9`_, opened by @.
.. _issue #9: https://github.com/sublee/trueskill/issues/9
"""
class Player(object):
def __init__(self, rating, team):
self.rating = rating
self.team = team
p1 = Player(Rating(), 0)
p2 = Player(Rating(), 0)
p3 = Player(Rating(), 1)
teams = [{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}]
rated = rate(teams, weights={(0, p1): 1, (0, p2): 0.5, (1, p3): 1})
assert rated[0][p1].mu > rated[0][p2].mu
assert rated[0][p1].sigma < rated[0][p2].sigma
assert rated[0][p1].sigma == rated[1][p3].sigma
| 16,534 | 76 | 781 |
13add9bde409fbeafca507098bc9056f4d2fe97e | 44,491 | py | Python | main.py | qinyiwei/MuTual | 3bdd13c1388d6136b8944666dfd434870760cc93 | [
"MIT"
] | null | null | null | main.py | qinyiwei/MuTual | 3bdd13c1388d6136b8944666dfd434870760cc93 | [
"MIT"
] | null | null | null | main.py | qinyiwei/MuTual | 3bdd13c1388d6136b8944666dfd434870760cc93 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import datetime
import argparse
import csv
import logging
import os
import random
import sys
import pickle
import numpy as np
import torch
import json
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import glob
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from transformers import (BertConfig, BertForMultipleChoice, BertTokenizer,
ElectraConfig, ElectraTokenizer, RobertaConfig, RobertaTokenizer, RobertaForMultipleChoice)
from modeling import (BertBaseline, RobertaBaseline, BertForMultipleChoicePlus, RobertaForMultipleChoicePlus)
from modeling.model import ElectraForMultipleChoice as Baseline
from transformers import (AdamW, WEIGHTS_NAME, CONFIG_NAME)
import re
import os
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class UbuntuProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.txt")))
return self._create_examples(
self._read_data(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _read_data(self, input_file):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = []
for i,line in enumerate(f):
line = re.compile('[\\x00-\\x08\\x0b-\\x0c\\x0e-\\x1f\\x7f]').sub(' ', line).strip()
line = line.strip().replace("_", "")
parts = line.strip().split("\t")
lable = parts[0]
message = ""
for i in range(1, len(parts) - 1, 1):
part = parts[i].strip()
if len(part) > 0:
if i != len(parts) - 2:
message += part
message += "[SEP]"
else:
message += part
response = parts[-1]
data = {"y": lable, "m": message, "r": response}
lines.append(data)
return lines
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = [line["r"]]
text_b = [line["m"].strip().split("[SEP]")]
label = line["y"]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MuTualProcessor(DataProcessor):
"""Processor for the MuTual data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
file = os.path.join(data_dir, 'train')
file = self._read_txt(file)
return self._create_examples(file, 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
file = os.path.join(data_dir, 'dev')
file = self._read_txt(file)
return self._create_examples(file, 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} test".format(data_dir))
file = os.path.join(data_dir, 'test')
file = self._read_txt(file)
return self._create_examples(file, 'test')
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (_, data_raw) in enumerate(lines):
id = "%s-%s" % (set_type, data_raw["id"])
article = data_raw["article"]
article = re.split(r"(f : |m : |M: |F: )", article)
article = ["".join(i) for i in zip(article[1::2], article[2::2])]
truth = str(ord(data_raw['answers']) - ord('A'))
options = data_raw['options']
examples.append(
InputExample(
guid=id,
text_a = [options[0], options[1], options[2], options[3]],
text_b=[article, article, article, article], # this is not efficient but convenient
label=truth))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, max_utterance_num,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
choices_features = []
all_tokens = []
for ending_idx, (text_a, text_b) in enumerate(zip(example.text_a, example.text_b)):
tokens_a = tokenizer.tokenize(text_a)
tokens_b = []
for idx, text in enumerate(text_b):
if len(text.strip()) > 0:
tokens_b.extend(tokenizer.tokenize(text) + ["[SEP]"])
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 2)
tokens = ["[CLS]"]
turn_ids = [0]
context_len = []
sep_pos = []
tokens_b_raw = " ".join(tokens_b)
tokens_b = []
current_pos = 0
for toks in tokens_b_raw.split("[SEP]")[-max_utterance_num - 1:-1]:
context_len.append(len(toks.split()) + 1)
tokens_b.extend(toks.split())
tokens_b.extend(["[SEP]"])
current_pos += context_len[-1]
turn_ids += [len(sep_pos)] * context_len[-1]
sep_pos.append(current_pos)
tokens += tokens_b
segment_ids = [0] * (len(tokens))
tokens_a += ["[SEP]"]
tokens += tokens_a
segment_ids += [1] * (len(tokens_a))
turn_ids += [len(sep_pos)] * len(tokens_a)
sep_pos.append(len(tokens) - 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
turn_ids += padding
context_len += [-1] * (max_utterance_num - len(context_len))
sep_pos += [0] * (max_utterance_num + 1 - len(sep_pos))
assert len(sep_pos) == max_utterance_num + 1
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(context_len) == max_utterance_num
assert len(turn_ids) == max_seq_length
choices_features.append((input_ids, input_mask, segment_ids, sep_pos, turn_ids))
all_tokens.append(tokens)
label_id = label_map[example.label]
if ex_index < 10:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
for choice_idx, (input_ids, input_mask, segment_ids, sep_pos, turn_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("tokens: %s" % " ".join([str(x) for x in all_tokens[choice_idx]]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("sep_pos: %s" % " ".join([str(x) for x in sep_pos]))
logger.info("turn_ids: %s" % " ".join([str(x) for x in turn_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(
example_id = example.guid,
choices_features = choices_features,
label = label_id
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop(0)
if __name__ == "__main__":
main()
| 41.310121 | 190 | 0.572835 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import datetime
import argparse
import csv
import logging
import os
import random
import sys
import pickle
import numpy as np
import torch
import json
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import glob
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from transformers import (BertConfig, BertForMultipleChoice, BertTokenizer,
ElectraConfig, ElectraTokenizer, RobertaConfig, RobertaTokenizer, RobertaForMultipleChoice)
from modeling import (BertBaseline, RobertaBaseline, BertForMultipleChoicePlus, RobertaForMultipleChoicePlus)
from modeling.model import ElectraForMultipleChoice as Baseline
from transformers import (AdamW, WEIGHTS_NAME, CONFIG_NAME)
import re
import os
logger = logging.getLogger(__name__)
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
'sep_pos': sep_pos,
'turn_ids': turn_ids
}
for input_ids, input_mask, segment_ids, sep_pos, turn_ids in choices_features
]
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class DoubanProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.txt")))
return self._create_examples(
self._read_data(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = [line["r"]]
text_b = [line["m"].strip().split("[SEP]")]
label = line["y"]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def _read_data(self, input_file):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = []
label_list = []
message_list = []
response_list = []
label_any_1 = 0
for ids,line in enumerate(f):
line = re.compile('[\\x00-\\x08\\x0b-\\x0c\\x0e-\\x1f\\x7f]').sub(' ', line).strip()
line = line.strip().replace("_", "")
parts = line.strip().split("\t")
lable = parts[0]
message = ""
for i in range(1, len(parts) - 1, 1):
part = parts[i].strip()
if len(part) > 0:
message += part
message += " [SEP] "
response = parts[-1]
if lable == '1':
label_any_1 = 1
label_list.append(lable)
message_list.append(message)
response_list.append(response)
if ids % 10 == 9:
if label_any_1 == 1:
for lable,message,response in zip(label_list,message_list,response_list):
data = {"y": lable, "m": message, "r": response}
lines.append(data)
label_any_1 = 0
label_list = []
message_list = []
response_list = []
return lines
class UbuntuProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.txt")))
return self._create_examples(
self._read_data(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _read_data(self, input_file):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = []
for i,line in enumerate(f):
line = re.compile('[\\x00-\\x08\\x0b-\\x0c\\x0e-\\x1f\\x7f]').sub(' ', line).strip()
line = line.strip().replace("_", "")
parts = line.strip().split("\t")
lable = parts[0]
message = ""
for i in range(1, len(parts) - 1, 1):
part = parts[i].strip()
if len(part) > 0:
if i != len(parts) - 2:
message += part
message += "[SEP]"
else:
message += part
response = parts[-1]
data = {"y": lable, "m": message, "r": response}
lines.append(data)
return lines
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = [line["r"]]
text_b = [line["m"].strip().split("[SEP]")]
label = line["y"]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MuTualProcessor(DataProcessor):
"""Processor for the MuTual data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
file = os.path.join(data_dir, 'train')
file = self._read_txt(file)
return self._create_examples(file, 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
file = os.path.join(data_dir, 'dev')
file = self._read_txt(file)
return self._create_examples(file, 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} test".format(data_dir))
file = os.path.join(data_dir, 'test')
file = self._read_txt(file)
return self._create_examples(file, 'test')
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _read_txt(self, input_dir):
lines = []
files = glob.glob(input_dir + "/*txt")
for file in tqdm(files, desc="read files"):
with open(file, 'r', encoding='utf-8') as fin:
data_raw = json.load(fin)
data_raw["id"] = file
lines.append(data_raw)
return lines
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (_, data_raw) in enumerate(lines):
id = "%s-%s" % (set_type, data_raw["id"])
article = data_raw["article"]
article = re.split(r"(f : |m : |M: |F: )", article)
article = ["".join(i) for i in zip(article[1::2], article[2::2])]
truth = str(ord(data_raw['answers']) - ord('A'))
options = data_raw['options']
examples.append(
InputExample(
guid=id,
text_a = [options[0], options[1], options[2], options[3]],
text_b=[article, article, article, article], # this is not efficient but convenient
label=truth))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, max_utterance_num,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
choices_features = []
all_tokens = []
for ending_idx, (text_a, text_b) in enumerate(zip(example.text_a, example.text_b)):
tokens_a = tokenizer.tokenize(text_a)
tokens_b = []
for idx, text in enumerate(text_b):
if len(text.strip()) > 0:
tokens_b.extend(tokenizer.tokenize(text) + ["[SEP]"])
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 2)
tokens = ["[CLS]"]
turn_ids = [0]
context_len = []
sep_pos = []
tokens_b_raw = " ".join(tokens_b)
tokens_b = []
current_pos = 0
for toks in tokens_b_raw.split("[SEP]")[-max_utterance_num - 1:-1]:
context_len.append(len(toks.split()) + 1)
tokens_b.extend(toks.split())
tokens_b.extend(["[SEP]"])
current_pos += context_len[-1]
turn_ids += [len(sep_pos)] * context_len[-1]
sep_pos.append(current_pos)
tokens += tokens_b
segment_ids = [0] * (len(tokens))
tokens_a += ["[SEP]"]
tokens += tokens_a
segment_ids += [1] * (len(tokens_a))
turn_ids += [len(sep_pos)] * len(tokens_a)
sep_pos.append(len(tokens) - 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
turn_ids += padding
context_len += [-1] * (max_utterance_num - len(context_len))
sep_pos += [0] * (max_utterance_num + 1 - len(sep_pos))
assert len(sep_pos) == max_utterance_num + 1
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(context_len) == max_utterance_num
assert len(turn_ids) == max_seq_length
choices_features.append((input_ids, input_mask, segment_ids, sep_pos, turn_ids))
all_tokens.append(tokens)
label_id = label_map[example.label]
if ex_index < 10:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
for choice_idx, (input_ids, input_mask, segment_ids, sep_pos, turn_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("tokens: %s" % " ".join([str(x) for x in all_tokens[choice_idx]]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("sep_pos: %s" % " ".join([str(x) for x in sep_pos]))
logger.info("turn_ids: %s" % " ".join([str(x) for x in turn_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(
example_id = example.guid,
choices_features = choices_features,
label = label_id
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop(0)
def get_p_at_n_in_m(pred, n, m, ind):
pos_score = pred[ind]
curr = pred[ind:ind + m]
curr = sorted(curr, reverse=True)
if curr[n - 1] <= pos_score:
return 1
return 0
def mean_average_precision(sort_data):
# to do
count_1 = 0
sum_precision = 0
for index in range(len(sort_data)):
if sort_data[index][1] == 1:
count_1 += 1
sum_precision += 1.0 * count_1 / (index + 1)
return sum_precision / count_1
def mean_reciprocal_rank(sort_data):
sort_lable = [s_d[1] for s_d in sort_data]
assert 1 in sort_lable
return 1.0 / (1 + sort_lable.index(1))
def precision_at_position_1(sort_data):
if sort_data[0][1] == 1:
return 1
else:
return 0
def recall_at_position_k_in_10(sort_data, k):
sort_lable = [s_d[1] for s_d in sort_data]
select_lable = sort_lable[:k]
return 1.0 * select_lable.count(1) / sort_lable.count(1)
def evaluation_one_session(data):
sort_data = sorted(data, key=lambda x: x[0], reverse=True)
m_a_p = mean_average_precision(sort_data)
m_r_r = mean_reciprocal_rank(sort_data)
p_1 = precision_at_position_1(sort_data)
r_1 = recall_at_position_k_in_10(sort_data, 1)
r_2 = recall_at_position_k_in_10(sort_data, 2)
r_5 = recall_at_position_k_in_10(sort_data, 5)
return m_a_p, m_r_r, p_1, r_1, r_2, r_5
def evaluate_douban(pred, label):
sum_m_a_p = 0
sum_m_r_r = 0
sum_p_1 = 0
sum_r_1 = 0
sum_r_2 = 0
sum_r_5 = 0
total_num = 0
data = []
#print(label)
for i in range(0, len(label)):
if i % 10 == 0:
data = []
data.append((float(pred[i]), int(label[i])))
if i % 10 == 9:
total_num += 1
m_a_p, m_r_r, p_1, r_1, r_2, r_5 = evaluation_one_session(data)
sum_m_a_p += m_a_p
sum_m_r_r += m_r_r
sum_p_1 += p_1
sum_r_1 += r_1
sum_r_2 += r_2
sum_r_5 += r_5
# print('total num: %s' %total_num)
# print('MAP: %s' %(1.0*sum_m_a_p/total_num))
# print('MRR: %s' %(1.0*sum_m_r_r/total_num))
# print('P@1: %s' %(1.0*sum_p_1/total_num))
return (1.0 * sum_m_a_p / total_num, 1.0 * sum_m_r_r / total_num, 1.0 * sum_p_1 / total_num,
1.0 * sum_r_1 / total_num, 1.0 * sum_r_2 / total_num, 1.0 * sum_r_5 / total_num)
def evaluate(pred, label):
# assert len(data) % 10 == 0
p_at_1_in_2 = 0.0
p_at_1_in_10 = 0.0
p_at_2_in_10 = 0.0
p_at_5_in_10 = 0.0
length = int(len(pred) / 10)
for i in range(0, length):
ind = i * 10
assert label[ind] == 1
p_at_1_in_2 += get_p_at_n_in_m(pred, 1, 2, ind)
p_at_1_in_10 += get_p_at_n_in_m(pred, 1, 10, ind)
p_at_2_in_10 += get_p_at_n_in_m(pred, 2, 10, ind)
p_at_5_in_10 += get_p_at_n_in_m(pred, 5, 10, ind)
return (p_at_1_in_2 / length, p_at_1_in_10 / length, p_at_2_in_10 / length, p_at_5_in_10 / length)
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def ComputeR10(scores,labels,count = 10):
total = 0
correct1 = 0
correct5 = 0
correct2 = 0
correct10 = 0
for i in range(len(labels)):
if labels[i] == 1:
total = total+1
sublist = scores[i:i+count]
if np.argmax(sublist) < 1:
correct1 = correct1 + 1
if np.argmax(sublist) < 2:
correct2 = correct2 + 1
if np.argmax(sublist) < 5:
correct5 = correct5 + 1
if np.argmax(sublist) < 10:
correct10 = correct10 + 1
print(correct1, correct5, correct10, total)
return (float(correct1)/ total, float(correct2)/ total, float(correct5)/ total, float(correct10)/ total)
def ComputeR2_1(scores,labels,count = 2):
total = 0
correct = 0
for i in range(len(labels)):
if labels[i] == 1:
total = total+1
sublist = scores[i:i+count]
if max(sublist) == scores[i]:
correct = correct + 1
return (float(correct)/ total)
def Compute_R4_2(preds, labels):
p2 = 0
for i in range(len(preds)):
j = sorted(list(preds[i]), reverse = True)
if j.index(preds[i][labels[i]]) <= 1:
p2 += 1
return p2 / len(preds)
def Compute_MRR(preds, labels):
mrr = 0
for i in range(len(preds)):
j = sorted(list(preds[i]), reverse = True)
mrr += 1 / (j.index(preds[i][labels[i]]) + 1)
return mrr / len(preds)
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if preds.shape[1] == 1:
preds_class = np.ones(preds.shape)
preds_class[preds < 0] = 0
else:
preds_class = np.argmax(preds, axis=1)
preds_logits = preds[:, 1]
if task_name in ["ubuntu", "ecd"]:
return {"acc": simple_accuracy(preds_class, labels), "recall@10":ComputeR10(preds_logits, labels), "recall@2":ComputeR2_1(preds_logits, labels), "DAM":evaluate(preds_logits, labels)}
elif task_name == 'douban':
return {"DAM":evaluate_douban(preds_logits, labels)}
elif task_name in ['mutual']:
return {"R4_1": simple_accuracy(preds_class, labels), "R4_2": Compute_R4_2(preds, labels), "MRR:": Compute_MRR(preds, labels)}
else:
raise KeyError(task_name)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--speaker_aware",
action='store_true',
help="Whether not to use speaker aware embedding")
parser.add_argument("--response_aware",
action='store_true',
help="Whether not to use response aware decouple")
parser.add_argument("--BiDAF",
action='store_true',
help="Whether not to use biDAF")
parser.add_argument("--data_dir",
default='../../../MuTual/data/mutual',
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_name_or_path", default="google/electra-large-discriminator", type=str)
parser.add_argument("--model_type", default="electra", type = str,
help = "Pre-trained Model selected in the list: bert, roberta, electra")
parser.add_argument("--task_name",
default="mutual",
type=str,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default="output_mutual_electra_3",
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--max_utterance_num",
default=20,
type=int,
help="The maximum total utterance number.")
parser.add_argument("--cache_flag",
default="v1",
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_grad_norm",
default = 1.0,
type = float,
help = "The maximum grad norm for clipping")
parser.add_argument("--cache_dir",
default='../../cached_models',
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--baseline",
action='store_true',
help="Whether to run baseline.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=24,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=24,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=4e-6,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_rnn",
default=1,
type=int,
help="RNN.")
parser.add_argument("--num_decouple",
default=1,
type=int,
help="Decoupling Layers.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.response_aware:
from modeling.model import ElectraForMultipleChoiceResponse as ElectraForMultipleChoicePlus
elif args.BiDAF:
from modeling.model import ElectraForMultipleChoiceBiDAF as ElectraForMultipleChoicePlus
else:
from modeling.model import ElectraForMultipleChoiceDecouple as ElectraForMultipleChoicePlus
MODEL_CLASSES = {
'bert': (BertConfig, BertForMultipleChoicePlus, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMultipleChoicePlus, RobertaTokenizer),
'electra': (ElectraConfig, ElectraForMultipleChoicePlus, ElectraTokenizer)
}
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {
"ubuntu": UbuntuProcessor,
'douban': DoubanProcessor,
'ecd': UbuntuProcessor,
"mutual": MuTualProcessor
}
output_modes = {
"ubuntu": "classification",
"mutual": "classification",
'douban': "classification",
'ecd': 'classification'
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
if args.baseline:
if args.model_type == 'electra':
model_class = Baseline
elif args.model_type == 'bert':
model_class = BertBaseline
elif args.model_type == 'roberta':
model_class = RobertaBaseline
config = config_class.from_pretrained(args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
if args.fp16:
model.half()
model.to(device)
print(model)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.do_train:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
cached_train_features_file = args.data_dir + '_{0}_{1}_{2}_{3}_{4}_{5}'.format(
list(filter(None, args.model_name_or_path.split('/'))).pop(), "train",str(args.task_name), str(args.max_seq_length),
str(args.max_utterance_num), str(args.cache_flag))
train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, args.max_utterance_num, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
# (batch_size, 1, seq_len)
all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
#all_response_len = torch.tensor(select_field(train_features, 'response_len'), dtype=torch.long)
all_sep_pos = torch.tensor(select_field(train_features, 'sep_pos'), dtype=torch.long)
all_turn_ids = torch.tensor(select_field(train_features, 'turn_ids'), dtype = torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_sep_pos, all_turn_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
eval_examples = processor.get_dev_examples(args.data_dir)
cached_train_features_file = args.data_dir + '_{0}_{1}_{2}_{3}_{4}_{5}'.format(
list(filter(None, args.model_name_or_path.split('/'))).pop(), "valid",str(args.task_name), str(args.max_seq_length),
str(args.max_utterance_num), str(args.cache_flag))
eval_features = None
try:
with open(cached_train_features_file, "rb") as reader:
eval_features = pickle.load(reader)
except:
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, args.max_utterance_num, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving eval features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(eval_features, writer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long)
all_sep_pos = torch.tensor(select_field(eval_features, 'sep_pos'), dtype=torch.long)
all_turn_ids = torch.tensor(select_field(eval_features, 'turn_ids'), dtype = torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label for f in eval_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label for f in eval_features], dtype=torch.float)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_sep_pos, all_turn_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
model.train()
tr_loss = 0
#nb_tr_examples = 0
nb_tr_steps = 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
token_type_ids = None
if args.speaker_aware:
token_type_ids = batch[4]%2
if args.response_aware:
token_type_ids = batch[2]
if args.BiDAF:
token_type_ids = batch[2]
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': token_type_ids,
'sep_pos': batch[3],
'turn_ids': batch[4],
'labels': batch[5]}
#input_ids, input_mask, segment_ids, response_len, sep_pos, label_ids = batch
output = model(**inputs)
loss = output[0]
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.detach().item()
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(
global_step / num_train_optimization_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, str(epoch) + "_" + WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
token_type_ids = None
if args.speaker_aware:
token_type_ids = batch[4]%2
if args.response_aware:
token_type_ids = batch[2]
if args.BiDAF:
token_type_ids = batch[2]
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': token_type_ids,
'sep_pos': batch[3],
'turn_ids': batch[4],
'labels': batch[5]}
#outputs = eval_model(**inputs)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.detach().mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
result = compute_metrics(task_name, preds, out_label_ids)
loss = tr_loss / nb_tr_steps if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 29,209 | 2,753 | 445 |
1c82a092d652b6f6dacceb9ea22aa4185bdac0bb | 1,389 | py | Python | netbox/users/migrations/0001_api_tokens_squashed_0002_unicode_literals.py | xcorp/netbox | 48b9c9da932dc736710d9c14793067093f8f1bde | [
"Apache-2.0"
] | 6 | 2017-12-01T05:13:39.000Z | 2020-01-23T13:04:43.000Z | netbox/users/migrations/0001_api_tokens_squashed_0002_unicode_literals.py | xcorp/netbox | 48b9c9da932dc736710d9c14793067093f8f1bde | [
"Apache-2.0"
] | 8 | 2021-04-16T01:38:00.000Z | 2022-01-04T21:27:27.000Z | netbox/users/migrations/0001_api_tokens_squashed_0002_unicode_literals.py | xcorp/netbox | 48b9c9da932dc736710d9c14793067093f8f1bde | [
"Apache-2.0"
] | 3 | 2017-11-18T01:28:22.000Z | 2018-05-17T14:04:43.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-01 17:43
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
| 40.852941 | 141 | 0.635709 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-01 17:43
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('users', '0001_api_tokens'), ('users', '0002_unicode_literals')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('expires', models.DateTimeField(blank=True, null=True)),
('key', models.CharField(max_length=40, unique=True, validators=[django.core.validators.MinLengthValidator(40)])),
('write_enabled', models.BooleanField(default=True, help_text='Permit create/update/delete operations using this key')),
('description', models.CharField(blank=True, max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tokens', to=settings.AUTH_USER_MODEL)),
],
options={
'default_permissions': [],
},
),
]
| 0 | 1,154 | 23 |
924b4ba3b425982309ec0f08bffb018f6cdf7467 | 497 | py | Python | downscale-lls.py | jni/useful-histories | 0c75003e4fa3a80d4bf7281314cdf6e363d3be56 | [
"BSD-3-Clause"
] | null | null | null | downscale-lls.py | jni/useful-histories | 0c75003e4fa3a80d4bf7281314cdf6e363d3be56 | [
"BSD-3-Clause"
] | null | null | null | downscale-lls.py | jni/useful-histories | 0c75003e4fa3a80d4bf7281314cdf6e363d3be56 | [
"BSD-3-Clause"
] | null | null | null | # IPython log file
from tqdm import tqdm
import dask.array as da
import zarr
import itertools
from skimage.transform import downscale_local_mean
lls = da.from_zarr('data/gokul-lls/aollsm-m4-560nm.zarr')
lls3 = zarr.open('data/gokul-lls/aollsm-m4-560nm-downscale.zarr', dtype=np.float32, shape=(199, 201, 192, 256), chunks=(1, 201, 192, 256))
indices = list(itertools.product(range(199), range(201)))
for i, j in tqdm(indices):
lls3[i, j] = downscale_local_mean(np.array(lls[i, j]), (4, 4))
| 33.133333 | 138 | 0.730382 | # IPython log file
from tqdm import tqdm
import dask.array as da
import zarr
import itertools
from skimage.transform import downscale_local_mean
lls = da.from_zarr('data/gokul-lls/aollsm-m4-560nm.zarr')
lls3 = zarr.open('data/gokul-lls/aollsm-m4-560nm-downscale.zarr', dtype=np.float32, shape=(199, 201, 192, 256), chunks=(1, 201, 192, 256))
indices = list(itertools.product(range(199), range(201)))
for i, j in tqdm(indices):
lls3[i, j] = downscale_local_mean(np.array(lls[i, j]), (4, 4))
| 0 | 0 | 0 |
d9f95ccb9bd06253bf86b034d97ce272134d0fd4 | 967 | py | Python | snake.py | kuntaltattu/beginning-python | 59b6e0b179fbf973976f8b12470fab8243aad793 | [
"Apache-2.0"
] | 1 | 2017-06-30T10:43:27.000Z | 2017-06-30T10:43:27.000Z | snake.py | kuntaltattu/beginning-python | 59b6e0b179fbf973976f8b12470fab8243aad793 | [
"Apache-2.0"
] | null | null | null | snake.py | kuntaltattu/beginning-python | 59b6e0b179fbf973976f8b12470fab8243aad793 | [
"Apache-2.0"
] | null | null | null | import pygame,sys
from pygame.locals import *
pygame.init()
FPS = 10
frametime = 1000/FPS
fpsClock = pygame.time.Clock ()
DISPLAYSURF = pygame.display.set_mode((700,500),pygame.DOUBLEBUF, 32)
DISPLAYSURF = pygame.display.set_caption ('Snakes!')
snakeImg = pygame.image.load ('snake.png')
WHITE = (255,255,255)
snakeht = pygame.image.get_height (snakeImg)
snakewd = pygame.image.get_width (snakeImg)
froght = pygame.image.get_height (frogImg)
frogwd = pyagem.image.get_width (frogImg)
rotation = 0
direction = 'right'
snakex = 0
snakey = 500
while True:
DISPLAYSURF.fill (WHITE)
if direction == 'right':
if snakex <= 700:
snakex += 5
direction = 'left'
elif direction == 'left':
if snakex >= 0:
snakex -= 5
DISPLAYSURF.blit (snakeImg, (snakex, snakey))
for event in pygame.event.get():
if event.type() == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
| 21.977273 | 69 | 0.649431 | import pygame,sys
from pygame.locals import *
pygame.init()
FPS = 10
frametime = 1000/FPS
fpsClock = pygame.time.Clock ()
DISPLAYSURF = pygame.display.set_mode((700,500),pygame.DOUBLEBUF, 32)
DISPLAYSURF = pygame.display.set_caption ('Snakes!')
snakeImg = pygame.image.load ('snake.png')
WHITE = (255,255,255)
snakeht = pygame.image.get_height (snakeImg)
snakewd = pygame.image.get_width (snakeImg)
froght = pygame.image.get_height (frogImg)
frogwd = pyagem.image.get_width (frogImg)
rotation = 0
direction = 'right'
snakex = 0
snakey = 500
while True:
DISPLAYSURF.fill (WHITE)
if direction == 'right':
if snakex <= 700:
snakex += 5
direction = 'left'
elif direction == 'left':
if snakex >= 0:
snakex -= 5
DISPLAYSURF.blit (snakeImg, (snakex, snakey))
for event in pygame.event.get():
if event.type() == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
| 0 | 0 | 0 |
4a8c44382a90003bcba3dd0fb83e3532d22fe13e | 201 | py | Python | rts/python/panic.py | Snektron/futhark | ca9a33d511ba30b27409aef46e5df92556ab2e8b | [
"ISC"
] | 2 | 2022-01-02T16:21:11.000Z | 2022-01-09T09:49:43.000Z | rts/python/panic.py | q60/futhark | a9421d922778281ac8a84e66497c340290c1e23b | [
"ISC"
] | null | null | null | rts/python/panic.py | q60/futhark | a9421d922778281ac8a84e66497c340290c1e23b | [
"ISC"
] | null | null | null | # Start of panic.py.
# End of panic.py.
| 20.1 | 42 | 0.61194 | # Start of panic.py.
def panic(exitcode, fmt, *args):
sys.stderr.write('%s: ' % sys.argv[0])
sys.stderr.write(fmt % args)
sys.stderr.write('\n')
sys.exit(exitcode)
# End of panic.py.
| 137 | 0 | 23 |
4248b02199f22375a1b1218c15ae4465fdf441ae | 1,276 | py | Python | codetools/contexts/name_filter_context.py | enthought/codetools | 20d8bb1eba68145750a1b689655b839078121474 | [
"BSD-3-Clause"
] | 29 | 2015-08-10T20:25:00.000Z | 2021-11-30T23:34:24.000Z | codetools/contexts/name_filter_context.py | enthought/codetools | 20d8bb1eba68145750a1b689655b839078121474 | [
"BSD-3-Clause"
] | 40 | 2015-01-05T15:01:37.000Z | 2022-03-11T13:47:06.000Z | codetools/contexts/name_filter_context.py | enthought/codetools | 20d8bb1eba68145750a1b689655b839078121474 | [
"BSD-3-Clause"
] | 4 | 2015-04-14T10:06:26.000Z | 2021-01-19T16:46:48.000Z | #
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
from __future__ import absolute_import
# Enthought library imports
from traits.api import Any
# Local imports
from .data_context import DataContext
class NameFilterContext(DataContext):
""" This context will only take variables that match a list of names.
The name of the variable is compared to a list of names. If it matches one
of them, it is allowed into the context. If it doesn't, it isn't allowed in
the context.
"""
##########################################################################
# NameFilterContext interface
##########################################################################
# The list of names that are allowed into this context.
names = Any(copy='shallow') #List -- any container that supports 'in' will work.
#### IRestrictedContext interface ##########################################
def allows(self, value, name=None):
""" Return False if the name is not in our list of accepted names.
"""
result = name in self.names
return result
| 28.355556 | 85 | 0.582288 | #
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
from __future__ import absolute_import
# Enthought library imports
from traits.api import Any
# Local imports
from .data_context import DataContext
class NameFilterContext(DataContext):
""" This context will only take variables that match a list of names.
The name of the variable is compared to a list of names. If it matches one
of them, it is allowed into the context. If it doesn't, it isn't allowed in
the context.
"""
##########################################################################
# NameFilterContext interface
##########################################################################
# The list of names that are allowed into this context.
names = Any(copy='shallow') #List -- any container that supports 'in' will work.
def _names_default(self):
return []
#### IRestrictedContext interface ##########################################
def allows(self, value, name=None):
""" Return False if the name is not in our list of accepted names.
"""
result = name in self.names
return result
| 22 | 0 | 27 |
3b89cf23a8f1210a9d5cf7e6030cb9a5160845ec | 591 | py | Python | setup.py | joeyearsley/quiver | 0702720f0d97fdd57e8bbac087c44a7982ff2e0e | [
"MIT"
] | 1,129 | 2016-11-14T06:16:21.000Z | 2022-03-03T02:24:37.000Z | setup.py | ankur248/quiver | cc901e43ab0dbc5544e4196ecaa079dd87f1f6ec | [
"MIT"
] | 43 | 2016-11-14T08:38:32.000Z | 2017-02-03T17:24:11.000Z | setup.py | ankur248/quiver | cc901e43ab0dbc5544e4196ecaa079dd87f1f6ec | [
"MIT"
] | 129 | 2016-11-14T10:51:25.000Z | 2021-11-14T03:06:09.000Z | from setuptools import setup, find_packages
setup(
name='quiver_engine',
version="0.1.4.1.4",
author="Jake Bian",
author_email="jake@keplr.io",
description=("Interactive per-layer visualization for convents in keras"),
license='mit',
packages=find_packages(),
include_package_data=True,
package_dir={'quiver_engine': 'quiver_engine'},
package_data={'quiver_engine': 'quiverboard/dist/*'},
install_requires=[
'keras',
'tensorflow',
'flask',
'flask_cors',
'gevent',
'numpy',
'pillow'
]
)
| 24.625 | 78 | 0.617597 | from setuptools import setup, find_packages
setup(
name='quiver_engine',
version="0.1.4.1.4",
author="Jake Bian",
author_email="jake@keplr.io",
description=("Interactive per-layer visualization for convents in keras"),
license='mit',
packages=find_packages(),
include_package_data=True,
package_dir={'quiver_engine': 'quiver_engine'},
package_data={'quiver_engine': 'quiverboard/dist/*'},
install_requires=[
'keras',
'tensorflow',
'flask',
'flask_cors',
'gevent',
'numpy',
'pillow'
]
)
| 0 | 0 | 0 |
2132da8d7e158e987542f256421399d9c24000d9 | 1,867 | py | Python | test/test_voice_endpoints_api.py | networthdata/generated-swagger-client | 41dd3fb02b322ed1d39cbaef6b4091ae6cab0d0b | [
"MIT"
] | null | null | null | test/test_voice_endpoints_api.py | networthdata/generated-swagger-client | 41dd3fb02b322ed1d39cbaef6b4091ae6cab0d0b | [
"MIT"
] | null | null | null | test/test_voice_endpoints_api.py | networthdata/generated-swagger-client | 41dd3fb02b322ed1d39cbaef6b4091ae6cab0d0b | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Speech Services API v2.0
Speech Services API v2.0. # noqa: E501
OpenAPI spec version: v2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.voice_endpoints_api import VoiceEndpointsApi # noqa: E501
from swagger_client.rest import ApiException
class TestVoiceEndpointsApi(unittest.TestCase):
"""VoiceEndpointsApi unit test stubs"""
def test_create_voice_deployment(self):
"""Test case for create_voice_deployment
Creates a new voice endpoint object. # noqa: E501
"""
pass
def test_delete_deployment(self):
"""Test case for delete_deployment
Delete the specified voice endpoint. # noqa: E501
"""
pass
def test_get_supported_locales_for_voice_endpoints(self):
"""Test case for get_supported_locales_for_voice_endpoints
Gets a list of supported locales for custom voice endpoints. # noqa: E501
"""
pass
def test_get_voice_deployment(self):
"""Test case for get_voice_deployment
Gets the details of a custom voice endpoint. # noqa: E501
"""
pass
def test_get_voice_deployments(self):
"""Test case for get_voice_deployments
Gets a list of voice endpoint details. # noqa: E501
"""
pass
def test_update_voice_endpoint(self):
"""Test case for update_voice_endpoint
Updates the name and description of the endpoint identified by the given ID. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 24.246753 | 98 | 0.668988 | # coding: utf-8
"""
Speech Services API v2.0
Speech Services API v2.0. # noqa: E501
OpenAPI spec version: v2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.voice_endpoints_api import VoiceEndpointsApi # noqa: E501
from swagger_client.rest import ApiException
class TestVoiceEndpointsApi(unittest.TestCase):
"""VoiceEndpointsApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.voice_endpoints_api.VoiceEndpointsApi() # noqa: E501
def tearDown(self):
pass
def test_create_voice_deployment(self):
"""Test case for create_voice_deployment
Creates a new voice endpoint object. # noqa: E501
"""
pass
def test_delete_deployment(self):
"""Test case for delete_deployment
Delete the specified voice endpoint. # noqa: E501
"""
pass
def test_get_supported_locales_for_voice_endpoints(self):
"""Test case for get_supported_locales_for_voice_endpoints
Gets a list of supported locales for custom voice endpoints. # noqa: E501
"""
pass
def test_get_voice_deployment(self):
"""Test case for get_voice_deployment
Gets the details of a custom voice endpoint. # noqa: E501
"""
pass
def test_get_voice_deployments(self):
"""Test case for get_voice_deployments
Gets a list of voice endpoint details. # noqa: E501
"""
pass
def test_update_voice_endpoint(self):
"""Test case for update_voice_endpoint
Updates the name and description of the endpoint identified by the given ID. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 98 | 0 | 54 |
fc7dd1c1e382aa08a027fdd5d8a16f08b7a9fd25 | 192 | py | Python | infoset/utils/__init__.py | clayton-colovore/infoset-ng | b0404fdda9e805effc16cebc9caef5f86b6bfe33 | [
"Apache-2.0"
] | null | null | null | infoset/utils/__init__.py | clayton-colovore/infoset-ng | b0404fdda9e805effc16cebc9caef5f86b6bfe33 | [
"Apache-2.0"
] | null | null | null | infoset/utils/__init__.py | clayton-colovore/infoset-ng | b0404fdda9e805effc16cebc9caef5f86b6bfe33 | [
"Apache-2.0"
] | null | null | null | """Infoset utilities package.
This package's modules which perform important tasks within
the project but are either not specific enough or not large
enough to warrant their own package
"""
| 24 | 59 | 0.796875 | """Infoset utilities package.
This package's modules which perform important tasks within
the project but are either not specific enough or not large
enough to warrant their own package
"""
| 0 | 0 | 0 |
b2011b74dfaf912f01e8af596df8d01c9f15ebc9 | 465 | py | Python | kagi/lower/east/_capital/four.py | jedhsu/kagi | 1301f7fc437bb445118b25ca92324dbd58d6ad2d | [
"MIT"
] | null | null | null | kagi/lower/east/_capital/four.py | jedhsu/kagi | 1301f7fc437bb445118b25ca92324dbd58d6ad2d | [
"MIT"
] | null | null | null | kagi/lower/east/_capital/four.py | jedhsu/kagi | 1301f7fc437bb445118b25ca92324dbd58d6ad2d | [
"MIT"
] | null | null | null | """
*Lower-East Capital 4* ⠨
The lower-east capital four gi.
"""
from dataclasses import dataclass
from ....._gi import Gi
from ....capital import CapitalGi
from ...._gi import StrismicGi
from ....east import EasternGi
from ...._number import FourGi
from ..._gi import LowerGi
__all__ = ["LowerEastCapital4"]
@dataclass
| 15 | 33 | 0.668817 | """
*Lower-East Capital 4* ⠨
The lower-east capital four gi.
"""
from dataclasses import dataclass
from ....._gi import Gi
from ....capital import CapitalGi
from ...._gi import StrismicGi
from ....east import EasternGi
from ...._number import FourGi
from ..._gi import LowerGi
__all__ = ["LowerEastCapital4"]
@dataclass
class LowerEastCapital4(
Gi,
StrismicGi,
LowerGi,
EasternGi,
CapitalGi,
FourGi,
):
symbol = "\u2828"
| 0 | 107 | 22 |
9bbeb054aaf802732a08d718ed260eec7f5edfb0 | 2,242 | py | Python | utils.py | resph0ina/mosaic_tools | 43484bec986bf03ff2dd0c5dc4b5520ee0408aed | [
"MIT"
] | null | null | null | utils.py | resph0ina/mosaic_tools | 43484bec986bf03ff2dd0c5dc4b5520ee0408aed | [
"MIT"
] | null | null | null | utils.py | resph0ina/mosaic_tools | 43484bec986bf03ff2dd0c5dc4b5520ee0408aed | [
"MIT"
] | null | null | null | import cv2
import numpy as np | 35.03125 | 82 | 0.541481 | import cv2
import numpy as np
def add_mosaic_rect(image, p1, p2, block_size=10, in_place=True):
if in_place:
img = image
else:
img = np.copy(image)
if p1[0] >= p2[0] or p1[1] >= p2[1]:
return img
for x in xrange(p1[0], p2[0], block_size):
for y in xrange(p1[1], p2[1], block_size):
x2 = min(x+block_size, p2[0])
y2 = min(y+block_size, p2[1])
if x2>x and y2>y:
img[y:y2, x:x2, 0] = np.mean(img[y:y2, x:x2, 0])
img[y:y2, x:x2, 1] = np.mean(img[y:y2, x:x2, 1])
img[y:y2, x:x2, 2] = np.mean(img[y:y2, x:x2, 2])
return img
def add_mosaic_mask(image, mask, block_size=10, in_place=True):
if in_place:
img = image
else:
img = np.copy(image)
maskrange = np.where(mask > 0)
if maskrange[0].size == 0:
return img
p1 = [np.min(maskrange[1]), np.min(maskrange[0])]
p2 = [np.max(maskrange[1]), np.max(maskrange[0])]
if p1[0] >= p2[0] or p1[1] >= p2[1]:
return img
for x in xrange(p1[0], p2[0], block_size):
for y in xrange(p1[1], p2[1], block_size):
x2 = min(x+block_size, p2[0])
y2 = min(y+block_size, p2[1])
if x2>x and y2>y and np.sum(mask[y:y2, x:x2] > 0) * 2 > (x2-x)*(y2-y):
img[y:y2, x:x2, 0] = np.mean(img[y:y2, x:x2, 0])
img[y:y2, x:x2, 1] = np.mean(img[y:y2, x:x2, 1])
img[y:y2, x:x2, 2] = np.mean(img[y:y2, x:x2, 2])
return img
def auto_canny(image, sigma=0.33):
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged
def get_mosaic_response(image):
im2 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sobel = cv2.Sobel(im2, cv2.CV_32F, 2, 2, ksize=3)
resp = np.abs(sobel * 100).astype(np.uint8)
kernel = np.ones((3,3),np.uint8)
resp = cv2.erode(resp, kernel)
kernel = np.ones((30,30),np.uint8)
resp = cv2.dilate(resp, kernel)
resp = cv2.dilate(resp, kernel)
resp = cv2.erode(resp, kernel)
resp = cv2.erode(resp, kernel)
act = np.copy(resp)
act[resp != 0] = 0
act[resp == 0] = 255
return act | 2,121 | 0 | 92 |
52db126a2ba0c671a7e58d5bac4bbfac41519cd3 | 753 | py | Python | inference-engine/ie_bridges/python/tests/test_utils.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 1 | 2022-01-19T15:36:45.000Z | 2022-01-19T15:36:45.000Z | inference-engine/ie_bridges/python/tests/test_utils.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 22 | 2021-02-03T12:41:51.000Z | 2022-02-21T13:04:48.000Z | inference-engine/ie_bridges/python/tests/test_utils.py | mmakridi/openvino | 769bb7709597c14debdaa356dd60c5a78bdfa97e | [
"Apache-2.0"
] | null | null | null | from openvino.inference_engine import IECore, IENetwork
import ngraph as ng
from ngraph.impl.op import Parameter
from ngraph.impl import Function, Shape, Type
| 27.888889 | 69 | 0.701195 | from openvino.inference_engine import IECore, IENetwork
import ngraph as ng
from ngraph.impl.op import Parameter
from ngraph.impl import Function, Shape, Type
def get_test_cnnnetwork():
element_type = Type.f32
param = Parameter(element_type, Shape([1, 3, 22, 22]))
relu = ng.relu(param)
func = Function([relu], [param], 'test')
caps = Function.to_capsule(func)
cnnNetwork = IENetwork(caps)
assert cnnNetwork != None
return cnnNetwork
def test_compare_networks():
try:
from openvino.test_utils import CompareNetworks
net = get_test_cnnnetwork()
status, msg = CompareNetworks(net, net)
assert status
except:
print("openvino.test_utils.CompareNetworks is not available")
| 546 | 0 | 46 |
53bb5459b58c2bd4de374bf23136490aac39de80 | 3,974 | py | Python | 2 - Conceptual Design/4 - Wing Design/aircraft_plotter.py | JARC99/DISECON_PIA | 8b05b39ffce9dc9cdfdae5b21129857bad6e4d99 | [
"MIT"
] | null | null | null | 2 - Conceptual Design/4 - Wing Design/aircraft_plotter.py | JARC99/DISECON_PIA | 8b05b39ffce9dc9cdfdae5b21129857bad6e4d99 | [
"MIT"
] | null | null | null | 2 - Conceptual Design/4 - Wing Design/aircraft_plotter.py | JARC99/DISECON_PIA | 8b05b39ffce9dc9cdfdae5b21129857bad6e4d99 | [
"MIT"
] | null | null | null | """Provide tools for creating parametirc aircraft geometry."""
import numpy as np
import matplotlib.pyplot as plt
import openvsp as vsp
def naca_4_series(max_camber, max_camber_loc, max_tc, n_points,
plot_switch=False):
"""Plot NACA 4-Series airfoil with the given characteristics."""
airfoil_name = 'NACA({0:.2f})({1:.2f})({2:.2f})'.format(
max_camber, max_camber_loc, max_tc)
x_coords = np.linspace(0, 1, n_points)
t_dist = get_thickness_dist(x_coords)
z_mcl, theta = get_camber_curve(x_coords)
x_u = x_coords - t_dist*np.sin(theta)
z_u = z_mcl + t_dist*np.cos(theta)
x_l = x_coords + t_dist*np.sin(theta)
z_l = z_mcl - t_dist*np.cos(theta)
scale_factor_u = 1/x_u[-1]
x_u *= scale_factor_u
z_u *= scale_factor_u
scale_factor_l = 1/x_l[-1]
x_l *= scale_factor_l
z_l *= scale_factor_l
if plot_switch:
fig = plt.figure(dpi=1200)
ax = fig.add_subplot(111)
ax.plot(x_u, z_u, 'k')
ax.plot(x_l, z_l, 'k')
ax.axis('equal')
ax.set_title(airfoil_name)
coords_array = np.vstack((np.concatenate((x_u[::-1], x_l)),
np.concatenate((z_u[::-1], z_l)))).T
np.savetxt('xfoil/' + airfoil_name + '.dat', coords_array, fmt='%.4f')
return coords_array
def create_VSP_wing(wing_span, planform, airfoil, alpha_i):
"""Create wing in OpenVSP dexcribed by the given characteristics."""
max_camber = airfoil[0]
max_camber_loc = airfoil[1]
max_tc = airfoil[2]
vsp.VSPCheckSetup()
vsp.ClearVSPModel()
wing_id = vsp.AddGeom('WING')
vsp.SetGeomName(wing_id, 'Wing')
wing_sec_span = wing_span/(2*(len(planform) - 1))
for i in range(len(planform)-1):
if i != 0:
vsp.InsertXSec(wing_id, i, vsp.XS_FOUR_SERIES)
vsp.SetParmValUpdate(wing_id, 'Span', 'XSec_{0}'.format(i+1),
wing_sec_span)
vsp.SetParmValUpdate(wing_id, 'Root_Chord', 'XSec_{0}'.format(i+1),
planform[i])
vsp.SetParmValUpdate(wing_id, 'Tip_Chord', 'XSec_{0}'.format(i+1),
planform[i+1])
vsp.SetParmValUpdate(wing_id, 'Sweep', 'XSec_{0}'.format(i+1), 0)
vsp.SetParmValUpdate(wing_id, 'Sweep_Location', 'XSec_{0}'.format(
i+1), 0.25)
for i in range(len(planform)):
vsp.SetParmValUpdate(wing_id, 'Camber', 'XSecCurve_{0}'.format(i),
max_camber/100)
vsp.SetParmValUpdate(wing_id, 'CamberLoc', 'XSecCurve_{0}'.format(i),
max_camber_loc/10)
vsp.SetParmValUpdate(wing_id, 'ThickChord', 'XSecCurve_{0}'.format(i),
max_tc/100)
vsp.SetParmValUpdate(wing_id, 'Y_Rel_Rotation', 'XForm', alpha_i)
vsp.SetParmValUpdate(wing_id, 'Origin', 'XForm', 0.25)
vsp.WriteVSPFile(
'C:/Users/jaros/Documents/GitHub/DISECON_PIA/2 - Conceptual Design/4 - Wing Design/wing_model.vsp3')
print('Done!')
| 33.116667 | 108 | 0.585053 | """Provide tools for creating parametirc aircraft geometry."""
import numpy as np
import matplotlib.pyplot as plt
import openvsp as vsp
def naca_4_series(max_camber, max_camber_loc, max_tc, n_points,
plot_switch=False):
"""Plot NACA 4-Series airfoil with the given characteristics."""
airfoil_name = 'NACA({0:.2f})({1:.2f})({2:.2f})'.format(
max_camber, max_camber_loc, max_tc)
x_coords = np.linspace(0, 1, n_points)
def get_thickness_dist(x_coords):
t_max = max_tc/100
t_dist = t_max*(1.4845*np.sqrt(x_coords) - 0.63*x_coords -
1.758*x_coords**2 + 1.4215*x_coords**3 -
0.5075*x_coords**4)
return t_dist
def get_camber_curve(x_coords):
x_mc = max_camber_loc/10
z_mc = max_camber/100
z_mcl = np.empty(len(x_coords))
dz_mcldx = np.empty(len(x_coords))
for i, x_coord in enumerate(x_coords):
if x_coord < x_mc:
z_mcl[i] = z_mc/x_mc**2*(2*x_mc*x_coord-x_coord**2)
dz_mcldx[i] = (z_mc/x_mc**2)*(2*x_mc - 2*x_coord)
else:
z_mcl[i] = (z_mc/(1-x_mc)**2)*(
1-2*x_mc + 2*x_mc*x_coord-x_coord**2)
dz_mcldx[i] = (z_mc/(1-x_mc)**2)*(2*x_mc - 2*x_coord)
theta = np.arctan(dz_mcldx)
return z_mcl, theta
t_dist = get_thickness_dist(x_coords)
z_mcl, theta = get_camber_curve(x_coords)
x_u = x_coords - t_dist*np.sin(theta)
z_u = z_mcl + t_dist*np.cos(theta)
x_l = x_coords + t_dist*np.sin(theta)
z_l = z_mcl - t_dist*np.cos(theta)
scale_factor_u = 1/x_u[-1]
x_u *= scale_factor_u
z_u *= scale_factor_u
scale_factor_l = 1/x_l[-1]
x_l *= scale_factor_l
z_l *= scale_factor_l
if plot_switch:
fig = plt.figure(dpi=1200)
ax = fig.add_subplot(111)
ax.plot(x_u, z_u, 'k')
ax.plot(x_l, z_l, 'k')
ax.axis('equal')
ax.set_title(airfoil_name)
coords_array = np.vstack((np.concatenate((x_u[::-1], x_l)),
np.concatenate((z_u[::-1], z_l)))).T
np.savetxt('xfoil/' + airfoil_name + '.dat', coords_array, fmt='%.4f')
return coords_array
def create_VSP_wing(wing_span, planform, airfoil, alpha_i):
"""Create wing in OpenVSP dexcribed by the given characteristics."""
max_camber = airfoil[0]
max_camber_loc = airfoil[1]
max_tc = airfoil[2]
vsp.VSPCheckSetup()
vsp.ClearVSPModel()
wing_id = vsp.AddGeom('WING')
vsp.SetGeomName(wing_id, 'Wing')
wing_sec_span = wing_span/(2*(len(planform) - 1))
for i in range(len(planform)-1):
if i != 0:
vsp.InsertXSec(wing_id, i, vsp.XS_FOUR_SERIES)
vsp.SetParmValUpdate(wing_id, 'Span', 'XSec_{0}'.format(i+1),
wing_sec_span)
vsp.SetParmValUpdate(wing_id, 'Root_Chord', 'XSec_{0}'.format(i+1),
planform[i])
vsp.SetParmValUpdate(wing_id, 'Tip_Chord', 'XSec_{0}'.format(i+1),
planform[i+1])
vsp.SetParmValUpdate(wing_id, 'Sweep', 'XSec_{0}'.format(i+1), 0)
vsp.SetParmValUpdate(wing_id, 'Sweep_Location', 'XSec_{0}'.format(
i+1), 0.25)
for i in range(len(planform)):
vsp.SetParmValUpdate(wing_id, 'Camber', 'XSecCurve_{0}'.format(i),
max_camber/100)
vsp.SetParmValUpdate(wing_id, 'CamberLoc', 'XSecCurve_{0}'.format(i),
max_camber_loc/10)
vsp.SetParmValUpdate(wing_id, 'ThickChord', 'XSecCurve_{0}'.format(i),
max_tc/100)
vsp.SetParmValUpdate(wing_id, 'Y_Rel_Rotation', 'XForm', alpha_i)
vsp.SetParmValUpdate(wing_id, 'Origin', 'XForm', 0.25)
vsp.WriteVSPFile(
'C:/Users/jaros/Documents/GitHub/DISECON_PIA/2 - Conceptual Design/4 - Wing Design/wing_model.vsp3')
print('Done!')
| 874 | 0 | 54 |
ce957869072f6295537bd57aeed256f7e4c2f9c5 | 2,574 | py | Python | app/centroid-to-feet-interpolation/funzioniUtili.py | davave/EdgeRealtimeVideoAnalytics | 8f6abf233b2e1822d4ab0b65c6f5eb7a91df090d | [
"Apache-2.0"
] | null | null | null | app/centroid-to-feet-interpolation/funzioniUtili.py | davave/EdgeRealtimeVideoAnalytics | 8f6abf233b2e1822d4ab0b65c6f5eb7a91df090d | [
"Apache-2.0"
] | null | null | null | app/centroid-to-feet-interpolation/funzioniUtili.py | davave/EdgeRealtimeVideoAnalytics | 8f6abf233b2e1822d4ab0b65c6f5eb7a91df090d | [
"Apache-2.0"
] | null | null | null | import cv2
import yaml
import numpy as np
def centroidFeetFromFile(fileName="/home/davide/Documenti/progetti/playground/centroid-to-feet-interpolation/101_640x480.yaml", normalization=False):
'''
funzione che carica e restituisce le coordinate di centroide e piedi nel formato [[x y]]
'''
with open(fileName, 'r') as stream:
try:
data = yaml.safe_load(stream)
centroidCoordinates = np.array(data['feet_calib'], dtype='i')[:,0,:]
feetCoordinates = np.array(data['feet_calib'], dtype='i')[:,1,:]
image_width = np.array(data['feet_calib_image_width'])
image_height = np.array(data['feet_calib_image_height'])
if normalization:
# Normalizing coordinates
for v in [centroidCoordinates,feetCoordinates]:
v[:,0] /= image_width
v[:,1] /= image_height
#print(centroidCoordinates[0,0])
except yaml.YAMLError as exc:
print(exc)
return centroidCoordinates,feetCoordinates | 41.516129 | 156 | 0.6554 | import cv2
import yaml
import numpy as np
def pointsMap(imgName='640x480.jpg',windowName='Disposizione punti del centroide'):
img = cv2.imread(imgName)
windowName = windowName
centroidCoordinates, feetCoordinates = centroidFeetFromFile()
#img = cv2.drawMarker(img, (320,240), color=(255,255,0), markerType = cv2.MARKER_CROSS)#, markerSize[, thickness[, line_type]]]] ) -> img
img = drawLine(img, centroidCoordinates,feetCoordinates, color=(255, 0, 0))
img = insertPoints(img,centroidCoordinates)
img = insertPoints(img,feetCoordinates,color=(0, 255, 0))
cv2.putText(img, 'centroid', (10,415), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(img, 'feet', (10,400), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imshow(windowName, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def insertPoints(CV2Img, centroidCoordinates, color=(0, 0, 255)):
for value in centroidCoordinates:
img = cv2.circle(CV2Img, (value[0], value[1]), radius=2, color=color, thickness=-1)
return img
def centroidFeetFromFile(fileName="/home/davide/Documenti/progetti/playground/centroid-to-feet-interpolation/101_640x480.yaml", normalization=False):
'''
funzione che carica e restituisce le coordinate di centroide e piedi nel formato [[x y]]
'''
with open(fileName, 'r') as stream:
try:
data = yaml.safe_load(stream)
centroidCoordinates = np.array(data['feet_calib'], dtype='i')[:,0,:]
feetCoordinates = np.array(data['feet_calib'], dtype='i')[:,1,:]
image_width = np.array(data['feet_calib_image_width'])
image_height = np.array(data['feet_calib_image_height'])
if normalization:
# Normalizing coordinates
for v in [centroidCoordinates,feetCoordinates]:
v[:,0] /= image_width
v[:,1] /= image_height
#print(centroidCoordinates[0,0])
except yaml.YAMLError as exc:
print(exc)
return centroidCoordinates,feetCoordinates
def drawLine(CV2Img, centroidCoordinates,feetCoordinates, color=(0, 0, 255)):
#print(centroidCoordinates[:,0])
for index in range(len(centroidCoordinates[:,0])):
#img = cv2.circle(CV2Img, (value[0], value[1]), radius=2, color=color, thickness=-1)
img = cv2.line(CV2Img, (centroidCoordinates[index,0], centroidCoordinates[index,1]), (feetCoordinates[index,0], feetCoordinates[index,1]), color, 1)
return img | 1,424 | 0 | 69 |
0fb04afb3633198dd89331a36a5ad6f08b43e364 | 4,856 | py | Python | tetris/a2c_rewards/play.py | NeuralFlux/rl-analysis | bb45e1f8bb9da4683cce4bd0a5e687770a4005e2 | [
"MIT"
] | 1 | 2020-12-05T13:15:35.000Z | 2020-12-05T13:15:35.000Z | tetris/a2c_rewards/play.py | NeuralFlux/rl-analysis | bb45e1f8bb9da4683cce4bd0a5e687770a4005e2 | [
"MIT"
] | null | null | null | tetris/a2c_rewards/play.py | NeuralFlux/rl-analysis | bb45e1f8bb9da4683cce4bd0a5e687770a4005e2 | [
"MIT"
] | null | null | null | import sys
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from Tetris.src.tetris import Tetris
from PIL import Image
from time import sleep
from pathlib import Path
import cv2
# height, width and possible actions for the agent
HEIGHT, WIDTH = 20, 6
ACTION_LIST = [(x, n_rotations) for n_rotations in range(4) for x in range(WIDTH)]
ACTION_LIST.remove((WIDTH - 1, 0))
ACTION_LIST.remove((WIDTH - 1, 2))
print(f"[Agent] ActionSpace: {len(ACTION_LIST)}")
"""TODO check if flatten is not required"""
if __name__ == "__main__":
# initializing our environment
env = Tetris(height=HEIGHT, width=WIDTH)
init_state = env.reset()
print(f"InputSize: {init_state.shape[0]}")
agent = A2CAgent(init_state.shape[0])
epoch_resume = -1
epoch_resume = agent.load('tetris_checkpoint_latest')
agent.play(env, 100, 10, video=True)
| 30.161491 | 116 | 0.585255 | import sys
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from Tetris.src.tetris import Tetris
from PIL import Image
from time import sleep
from pathlib import Path
import cv2
# height, width and possible actions for the agent
HEIGHT, WIDTH = 20, 6
ACTION_LIST = [(x, n_rotations) for n_rotations in range(4) for x in range(WIDTH)]
ACTION_LIST.remove((WIDTH - 1, 0))
ACTION_LIST.remove((WIDTH - 1, 2))
print(f"[Agent] ActionSpace: {len(ACTION_LIST)}")
class Network(nn.Module):
def __init__(self, input_size, action_size):
super(Network, self).__init__()
self.fc1 = nn.Linear(input_size, 256)
self.fc2 = nn.Linear(256, 256)
self.logits_p = nn.Linear(256, action_size)
self.v_values = nn.Linear(256, action_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.logits_p(x), self.v_values(x)
class A2CAgent(object):
def __init__(self, input_size):
self.model = Network(input_size, len(ACTION_LIST))
self.epoch = 0
def select_action(self, state, valid_action_mask):
# get the logits for each action
action_logits, _ = self.model.forward(state)
# mask invalid actions' logits to -inf
assert action_logits.size() == valid_action_mask.size()
adj_action_logits = torch.where(valid_action_mask, action_logits, torch.tensor(-1e+8))
dist = Categorical(logits=adj_action_logits)
# sample an action
sampled_val = dist.sample()
action_idx = int(sampled_val.item())
# compute log prob
# print(sampled_val.item() == 1.0, sampled_val, action_idx)
action_to_take = ACTION_LIST[action_idx]
return action_to_take
def play(self, env, num_epochs, roll_size, video=False):
avg = -float('inf')
best_avg = -float('inf')
max_score = -float('inf')
all_scores = np.zeros((num_epochs, ), dtype=np.int32)
for eps_idx in range(num_epochs):
self.epoch = eps_idx
# beginning of an episode
state = env.reset()
state = torch.tensor(state, dtype=torch.float32)
done = False
steps = 0
# whether or not record video of game
if video:
out = cv2.VideoWriter("output.mp4", cv2.VideoWriter_fourcc(*"mjpg"), 30,
(int(1.5 * env.width * env.block_size), env.height * env.block_size))
while not done:
action_mask = env.get_valid_actions()
action = self.select_action(state, action_mask)
# run one step
if video:
next_state, reward, done, _ = env.step(action, render=False, video=out)
else:
next_state, reward, done, _ = env.step(action, render=False)
# print("Took", action)
# input()
next_state = torch.tensor(next_state, dtype=torch.float32)
state = next_state
steps += 1
if video:
out.release()
video = False
# survival score
score = steps
# bookkeeping of stats
all_scores[eps_idx] = score
if score > max_score:
max_score = score
sys.stdout.write(f"\r [{eps_idx}]: {score}, Avg: {avg:.2f}, Max: {max_score}, Best_avg: {best_avg:.2f}")
sys.stdout.flush()
if ((eps_idx + 1) % roll_size) == 0:
avg = np.mean(all_scores[(eps_idx + 1) - roll_size:eps_idx])
if avg > best_avg:
best_avg = avg
print(f"\n [{eps_idx}]: {score}, Avg: {avg:.2f}, Max: {max_score}, Best_avg: {best_avg:.2f}")
avg = np.mean(all_scores)
max_score = np.max(all_scores)
print(f"\n [{eps_idx}]: {score}, Avg: {avg:.2f}, Max: {max_score}, Best_avg: {best_avg:.2f}")
def load(self, path):
save_dir = 'trained_checkpoints/supervised/'
path = save_dir + path + ".pt"
checkpoint = torch.load(path)
epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['model_state_dict'])
return epoch
"""TODO check if flatten is not required"""
if __name__ == "__main__":
# initializing our environment
env = Tetris(height=HEIGHT, width=WIDTH)
init_state = env.reset()
print(f"InputSize: {init_state.shape[0]}")
agent = A2CAgent(init_state.shape[0])
epoch_resume = -1
epoch_resume = agent.load('tetris_checkpoint_latest')
agent.play(env, 100, 10, video=True)
| 3,655 | 6 | 214 |