hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1757c182b12f55c79e5d7989984e96836c9b0525
| 2,150
|
py
|
Python
|
CraftProtocol/Protocol/v1_8/Packet/Play/ClientSettingsPacket.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 21
|
2018-05-12T20:18:02.000Z
|
2022-02-18T17:33:50.000Z
|
CraftProtocol/Protocol/v1_8/Packet/Play/ClientSettingsPacket.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 1
|
2018-06-23T09:13:39.000Z
|
2018-06-27T01:22:27.000Z
|
CraftProtocol/Protocol/v1_8/Packet/Play/ClientSettingsPacket.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 2
|
2018-05-19T21:36:00.000Z
|
2020-10-02T03:23:13.000Z
|
#!/usr/bin/env python
from CraftProtocol.Protocol.Packet.BasePacket import BasePacket
from CraftProtocol.Protocol.Packet.PacketDirection import PacketDirection
from CraftProtocol.StreamIO import StreamIO
class ClientSettingsPacket(BasePacket):
PACKET_ID = 0x15
PACKET_DIRECTION = PacketDirection.SERVERBOUND
def __init__(self, locale, view_distance, chat_mode, chat_colors, skin_parts):
BasePacket.__init__(self)
self._locale = unicode(locale)
self._view_distance = int(view_distance)
self._chat_mode = int(chat_mode)
self._chat_colors = bool(chat_colors)
self._skin_parts = int(skin_parts)
def get_locale(self):
return self._locale
def set_locale(self, locale):
self._locale = unicode(locale)
def get_view_distance(self):
return self._view_distance
def set_view_distance(self, view_distance):
self._view_distance = int(view_distance)
def get_chat_mode(self):
return self._chat_mode
def set_chat_mode(self, chat_mode):
self._chat_mode = int(chat_mode)
def is_chat_colors(self):
return self._chat_colors
def set_chat_colors(self, chat_colors):
self._chat_colors = bool(chat_colors)
def get_skin_parts(self):
return self._skin_parts
def set_skin_parts(self, skin_parts):
self._skin_parts = int(skin_parts)
@staticmethod
def write(stream, packet):
StreamIO.write_string(stream, packet.get_locale())
StreamIO.write_byte(stream, packet.get_view_distance())
StreamIO.write_varint(stream, packet.get_chat_mode())
StreamIO.write_bool(stream, packet.is_chat_colors())
StreamIO.write_ubyte(stream, packet.get_skin_parts())
@staticmethod
def read(stream, packet_size):
locale = StreamIO.read_string(stream)
view_distance = StreamIO.read_byte(stream)
chat_mode = StreamIO.read_varint(stream)
chat_colors = StreamIO.read_bool(stream)
skin_parts = StreamIO.read_ubyte(stream)
return ClientSettingsPacket(locale, view_distance, chat_mode, chat_colors, skin_parts)
| 32.089552
| 94
| 0.713953
|
54242c3a5ff99883e8cb54dd6ec506173de03862
| 936
|
py
|
Python
|
iv/Leetcode/easy/283_move_zero.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | 2
|
2020-09-19T22:28:15.000Z
|
2020-10-03T01:44:53.000Z
|
iv/Leetcode/easy/283_move_zero.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | null | null | null |
iv/Leetcode/easy/283_move_zero.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | 1
|
2020-10-03T01:43:30.000Z
|
2020-10-03T01:43:30.000Z
|
"""
Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.
"""
class Solution:
def moveZeroes(self, nums: list) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
lastfound = 0
for i in range(len(nums)):
if nums[i] != 0:
nums[lastfound] = nums[i]
lastfound += 1
continue
for i in range(lastfound, len(nums)):
nums[i] = 0
def moveZeroes2(self, nums: list) -> None:
for i in range(len(nums)):
if nums[i] != 0:
continue
for j in range(i + 1, len(nums)):
if nums[j] == 0:
continue
nums[i], nums[j] = nums[j], nums[i]
break
nums = [0,1,0,3,12]
s = Solution()
s.moveZeroes(nums)
print(nums)
| 24.631579
| 133
| 0.494658
|
3b6d77f1b00ae3c13074ddda0710ce9bc0eae050
| 86
|
py
|
Python
|
exercicio20.py
|
Alexkakakaka/infosatc-lp-avaliativo-01
|
eff4350eb2ec8bebd169155b3ac86a6396544fd9
|
[
"MIT"
] | null | null | null |
exercicio20.py
|
Alexkakakaka/infosatc-lp-avaliativo-01
|
eff4350eb2ec8bebd169155b3ac86a6396544fd9
|
[
"MIT"
] | null | null | null |
exercicio20.py
|
Alexkakakaka/infosatc-lp-avaliativo-01
|
eff4350eb2ec8bebd169155b3ac86a6396544fd9
|
[
"MIT"
] | null | null | null |
K = float(input("Digite um numero"))
L = K/0.45
print("Convertido massa em libras:",L)
| 28.666667
| 38
| 0.686047
|
9f5b1476089bcef73ad991f0b0546b6f2f490c86
| 1,767
|
py
|
Python
|
sdk/python/pulumi_azure_native/devices/v20200101/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/devices/v20200101/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/devices/v20200101/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AccessRightsDescription',
'AllocationPolicy',
'IotDpsSku',
'IpFilterActionType',
'IpFilterTargetType',
'State',
]
class AccessRightsDescription(str, Enum):
"""
Rights that this key has.
"""
SERVICE_CONFIG = "ServiceConfig"
ENROLLMENT_READ = "EnrollmentRead"
ENROLLMENT_WRITE = "EnrollmentWrite"
DEVICE_CONNECT = "DeviceConnect"
REGISTRATION_STATUS_READ = "RegistrationStatusRead"
REGISTRATION_STATUS_WRITE = "RegistrationStatusWrite"
class AllocationPolicy(str, Enum):
"""
Allocation policy to be used by this provisioning service.
"""
HASHED = "Hashed"
GEO_LATENCY = "GeoLatency"
STATIC = "Static"
class IotDpsSku(str, Enum):
"""
Sku name.
"""
S1 = "S1"
class IpFilterActionType(str, Enum):
"""
The desired action for requests captured by this rule.
"""
ACCEPT = "Accept"
REJECT = "Reject"
class IpFilterTargetType(str, Enum):
"""
Target for requests captured by this rule.
"""
ALL = "all"
SERVICE_API = "serviceApi"
DEVICE_API = "deviceApi"
class State(str, Enum):
"""
Current state of the provisioning service.
"""
ACTIVATING = "Activating"
ACTIVE = "Active"
DELETING = "Deleting"
DELETED = "Deleted"
ACTIVATION_FAILED = "ActivationFailed"
DELETION_FAILED = "DeletionFailed"
TRANSITIONING = "Transitioning"
SUSPENDING = "Suspending"
SUSPENDED = "Suspended"
RESUMING = "Resuming"
FAILING_OVER = "FailingOver"
FAILOVER_FAILED = "FailoverFailed"
| 22.653846
| 80
| 0.65931
|
e1cbf1508dddd3c46dbf4e9e2680af36f0b491a2
| 17,287
|
py
|
Python
|
houseSock.py
|
Scor09/house
|
32b0d16f43fa1fd45a6e5c70ac23672716838459
|
[
"MIT"
] | null | null | null |
houseSock.py
|
Scor09/house
|
32b0d16f43fa1fd45a6e5c70ac23672716838459
|
[
"MIT"
] | null | null | null |
houseSock.py
|
Scor09/house
|
32b0d16f43fa1fd45a6e5c70ac23672716838459
|
[
"MIT"
] | 1
|
2020-01-07T03:02:21.000Z
|
2020-01-07T03:02:21.000Z
|
# MIT License
# Copyright (c) 2018 NCC Group Plc
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from houseGlobal import house_global, socketio, thread, thread_lock, random_token, login_manager
from flask_socketio import SocketIO, emit
from flask_socketio import disconnect
from houseStatic import *
from houseUtil import *
import functools
import hmac
from flask_login import current_user, login_user, UserMixin
from flask_socketio import disconnect
class User(UserMixin):
def __init__(self, id):
self.id = id
def __repr__(self):
return "User: %s" % (self.id)
session_user = User(random_token)
@login_manager.user_loader
def user_loader(uuid):
if uuid != session_user.id:
return
return session_user
def authenticated_only(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_authenticated:
disconnect()
else:
return f(*args, **kwargs)
return wrapped
@socketio.on('connect', namespace='/eventBus')
def sock_connect():
emit('log', {'data': 'Connected'})
@socketio.on('authentication', namespace='/eventBus')
def sock_auth(msg):
uuid = str(msg.get('uuid'))
# print (stylize("[+] Login.. with uuid : {}".format(uuid), Info))
if hmac.compare_digest(str(uuid), str(random_token)):
this_user = User(uuid)
login_user(this_user)
emit("authenticated..")
else:
emit("auth_failed..")
@socketio.on('enableAutoRefresh', namespace='/eventBus')
@authenticated_only
def enableAutoRefresh():
house_global.monitor_refresh = 1
@socketio.on('diableAutoRefresh', namespace='/eventBus')
@authenticated_only
def diableAutoRefresh():
house_global.monitor_refresh = 0
@socketio.on('refresh_device', namespace='/eventBus')
@authenticated_only
def refresh_device():
getDevice()
# emit('update_device', {'data': cgi.escape(str(house_global.device))})
@socketio.on('check_monitor_running', namespace='/eventBus')
@authenticated_only
def check_monitor_running():
# not working, house_global.monitor_script.unload() won't destroy it, did not find a destroy script method
print (stylize("[+]Checking if Monitor is running...", Info))
IPython.embed()
if (house_global.monitor_script != '') & (house_global.monitor_script != None):
emit('monitor_running_status', {'running': 1})
else:
emit('monitor_running_status', {'running': 0})
# getDevice()
@socketio.on('set_device_id', namespace='/eventBus')
@authenticated_only
def set_device(msg):
# clean up script for previous devices
unload_script()
device_id = msg.get('id')
setDevice(device_id)
# emit('update_device', {'data': cgi.escape(str(house_global.device))})
@socketio.on('setPackage', namespace='/eventBus')
@authenticated_only
def setpkg(msg):
house_global.package_name = msg.get('packagename')
setPackage(house_global.package_name)
emit('update_package', {'data': cgi.escape(str(house_global.package_name))})
@socketio.on("setEnumConfig", namespace='/eventBus')
@authenticated_only
def update_EnumConfig(message):
enum_option = message.get('option')
class_to_find = message.get('class_to_find')
class_pattern = message.get('class_pattern')
if class_to_find != None:
house_global.enum_class = class_to_find if ('.' in class_to_find) else house_global.packagename + '.' + class_to_find
if class_pattern != None:
house_global.enum_class_pattern = class_pattern
if enum_option != None:
house_global.enum_option = enum_option
update_conf()
emit("EnumConfigDone")
@socketio.on('get_monitor_message', namespace='/eventBus')
@authenticated_only
def get_monitor_message():
# ret_html = render_template('history.html', tree=make_tree(path,'enum'))
# emit('update_monitor_message', {'mon_type': mon_type.upper(), 'monitor_message': house_global.monitor_message})
emit('update_monitor_message', {'monitor_message': house_global.monitor_message, 'monitor_new': list(house_global.monitor_queue)})
house_global.monitor_queue = set()
@socketio.on('get_enum_history', namespace='/eventBus')
@authenticated_only
def get_enum_history():
path = './cache/enum'
ret_html = render_template('history.html', tree=make_tree(path,'enum'))
emit("update_enum_history", {'data': ret_html})
@socketio.on('get_hooks_history', namespace='/eventBus')
@authenticated_only
def hooks_history():
path = './cache/hook'
ret_html = render_template('history.html', tree=make_tree(path,'hook'))
emit("update_hooks_history", {'data': ret_html})
@socketio.on('get_intercept_history', namespace='/eventBus')
@authenticated_only
def get_intercept_history():
path = './cache/intercept'
ret_html = render_template('history.html', tree=make_tree(path,'intercept'))
emit("update_intercept_history", {'data': ret_html})
@socketio.on('get_history_script', namespace='/eventBus')
@authenticated_only
def get_history_script(data):
path = data.get("filepath")
option = data.get("option")
try:
with open(path,'r') as f:
script_content = f.read()
emit("update_script_content",{'script': script_content, "option": option})
except Exception as e:
emit("update_script_content",{'script': "[!] Failed to get script: " + str(e), "option": option})
@socketio.on('save_script', namespace='/eventBus')
@authenticated_only
def save_script(save_script_data):
option = save_script_data.get("option")
filename = save_script_data.get("filename")
if (filename):
filename = os.path.split(filename)[1]
script = save_script_data.get("script")
if option == "hook":
try:
with open("./cache/hook/" + filename, 'w') as f:
f.write(script)
except Exception as e:
raise e
elif option == "enum":
try:
with open("./cache/enum/" + filename, 'w') as f:
f.write(script)
except Exception as e:
raise e
elif option == "intercept":
try:
with open("./cache/intercept/" + filename, 'w') as f:
f.write(script)
except Exception as e:
raise e
else:
print (stylize("Failed to save the file!", Error))
@socketio.on('deleteScript', namespace='/eventBus')
@authenticated_only
def deleteScript(data):
path = data.get('path')
if path != None:
if path.startswith('./cache/enum'):
fn = './cache/enum/' + os.path.basename(path)
op = "enum"
elif path.startswith('./cache/hook'):
fn = './cache/hook/' + os.path.basename(path)
op = "hook"
elif path.startswith('./cache/intercept'):
fn = './cache/intercept/' + os.path.basename(path)
op = "intercept"
else:
return
else:
emit('new_error_message',{'data' : "[!] What are you trying to delete?"})
return
try:
os.remove(fn)
emit("refresh_history_script",{'option': op})
except Exception as e:
emit('new_error_message',{'data' : "[!] Cannot delete: " + str(e)})
@socketio.on('gen_script', namespace='/eventBus')
@authenticated_only
def gen_script(message):
house_global.hooks_list = message.get('hooks_list')
update_conf()
house_global.script_to_load = ''
house_global.hooks_list = house_global.hook_conf.get('hooks_list')
build_hook_script()
emit("update_hooks")
@socketio.on('unload_script', namespace='/eventBus')
@authenticated_only
def doUnload():
print (stylize("[+]Unloading script..", Info))
unload_script()
@socketio.on('clear_hookMessage', namespace='/eventBus')
@authenticated_only
def clear_hookMessage():
house_global.messages = []
print (stylize("[+] Hook Message Cleard", Info))
@socketio.on('clear_monitorMessage', namespace='/eventBus')
@authenticated_only
def clear_monitorMessage(message):
clear_type = message.get('monitor_type').upper()
if (clear_type != None) & (clear_type in house_global.monitor_message.keys()):
house_global.monitor_message[clear_type] = []
@socketio.on('clear_EnumMessage', namespace='/eventBus')
@authenticated_only
def clear_EnumMessage():
house_global.enum_messages = []
@socketio.on('quitRepl', namespace = '/eventBus')
@authenticated_only
def doneRepl():
quitRepl()
@socketio.on('loadHookScript', namespace='/eventBus')
@authenticated_only
def doLoadHook(message):
clear_hook_msg()
j_script = message.get('script')
if j_script != None:
house_global.script_to_load = j_script
house_global.hook_script = j_script
cache_script("hooks_cache", house_global.hook_script)
try:
load_script()
except Exception as e:
print ("doLoadHook exception caught!" + str(e))
clear_hook_msg()
hook_exception = {"exception" : str(e)}
house_global.messages.insert(0,hook_exception)
emit("new_hook_message",hook_exception)
@socketio.on('loadEnumScript', namespace='/eventBus')
@authenticated_only
def doLoadEnum(message):
j_script = message.get('script')
if j_script != None:
house_global.script_to_load = j_script
house_global.enum_script_to_load = j_script
house_global.enum_messages = []
cache_script("enum_cache", house_global.enum_script_to_load)
try:
load_script()
except Exception as e:
doLoadEnum_exception = {"exception" : str(e)}
house_global.enum_messages = [ ]
house_global.enum_messages.insert(0,doLoadEnum_exception)
emit("update_enum_messages")
@socketio.on('doEnv', namespace='/eventBus')
def doEnv():
# with open('./scripts/enum/env.js') as f:
# house_global.script_to_load = f.read()
house_global.script_to_load = build_env_script()
try:
load_script()
except Exception as e:
# IPython.embed()
emit('update_env_info',{'error': cgi.escape("[!]load_script Exception: {}".format(str(e)))})
@socketio.on('loadStetho', namespace='/eventBus')
def doLoadStetho():
try:
preload_stetho_script()
except Exception as e:
# IPython.embed()
emit('sideload_stetho_error',{'error': cgi.escape("[!]preload_stetho_script Exception: {}".format(str(e)))})
@socketio.on('runpreload', namespace='/eventBus')
def runpreload(preload_message):
house_global.preload_conf = preload_message.get('preload_settings')
update_conf()
try:
run_preload_script()
except Exception as e:
# IPython.embed()
emit('runpreload',{'error': cgi.escape("[!]preload_script Exception: {}".format(str(e)))})
@socketio.on('loadMonitor', namespace='/eventBus')
def doloadMonitor(monitor_message):
house_global.monitor_conf = monitor_message.get('monitor_settings')
update_conf()
try:
loadMonitor()
# check_monitor_running()
except Exception as e:
# IPython.embed()
emit('doloadMonitor',{'error': cgi.escape("[!]doloadMonitor Exception: {}".format(str(e)))})
@socketio.on('endpreload', namespace='/eventBus')
def endpreload():
house_global.preload_conf = {"PRELOAD_STETHO": 0, "PRELOAD_SSLSTRIP": 1, "PRELOAD_SETPROXY" : 0}
update_conf()
try:
unload_script("preload")
# check_monitor_running()
except Exception as e:
# IPython.embed()
emit('endpreload',{'error': cgi.escape("[!]endpreload Exception: {}".format(str(e)))})
@socketio.on('unloadMonitor', namespace='/eventBus')
def dounloadMonitor():
house_global.monitor_conf = {"SWITCH_FILEIO": 0, "SWITCH_HTTP": 0, "SWITCH_MISC": 0, "SWITCH_WEBVIEW": 0, "SWITCH_SQL": 0, "SWITCH_IPC": 0}
update_conf()
try:
unload_script("monitor")
# check_monitor_running()
except Exception as e:
# IPython.embed()
emit('dounloadMonitor',{'error': cgi.escape("[!]dounloadMonitor Exception: {}".format(str(e)))})
@socketio.on('doInspect', namespace='/eventBus')
@authenticated_only
def doInspect(message):
house_global.device = frida.get_usb_device()
house_global.onMessageException = ''
ins_classname = message.get('ins_classname')
ins_methodname = message.get('ins_methodname')
if (ins_classname != None) & (ins_methodname != None):
house_global.inspect_conf['classname'] = ins_classname
house_global.inspect_conf['methodname'] = ins_methodname
update_conf()
house_global.inspect_result = 'Please wait'
house_global.script_to_load = prepare_script_fragment(ins_classname, ins_methodname, "inspect")
try:
load_script()
except Exception as e:
house_global.inspect_result = "<p><code>[!] Exception: {}</code></p>".format(str(e))
print (stylize("Exception caught in doInspect: {}".format(e), Info))
update_inspect_result = {'classname': house_global.inspect_conf["classname"], 'methodname' : house_global.inspect_conf["methodname"], 'inspect_result': (str(house_global.inspect_result))}
cache_inspect_html()
socketio.emit('update_inspect_result', update_inspect_result, namespace='/eventBus')
house_global.onMessageException = ''
@socketio.on('fetchInspect', namespace='/eventBus')
@authenticated_only
def fetchInspect():
overloadIndex = house_global.inspect_conf.get("overloadIndex")
if house_global.inspect_result == '':
with open("./config/inspect_cache.html",'w+') as f:
house_global.inspect_result = f.read()
update_inspect_result = {'classname': house_global.inspect_conf["classname"], 'methodname' : house_global.inspect_conf["methodname"], 'inspect_result': house_global.inspect_result, 'overloadIndex' : overloadIndex}
emit('update_inspect_result', update_inspect_result)
@socketio.on('genIntercept', namespace='/eventBus')
@authenticated_only
def genIntercept(message):
ins_methodindex = message.get('intercept_index')
if (ins_methodindex != None):
house_global.inspect_conf['overloadIndex'] = int(ins_methodindex)
else:
house_global.inspect_conf['overloadIndex'] = 0
update_conf()
with open('./config/intercept_conf.json') as f:
intercept_conf = f.read()
try:
j_intercept = json.loads(intercept_conf)
except Exception as e:
raise e
print (stylize("[+]Lets do intercept",Info))
clazz_name = j_intercept.get("classname")
methodname = j_intercept.get("methodname")
overloadIndex = j_intercept.get("overloadIndex")
if overloadIndex == None:
overloadIndex = 0
house_global.intercept_script = prepare_script_fragment(clazz_name, methodname, "intercept", overloadIndex)
socketio.emit('update_intercept_script', {'script': house_global.intercept_script}, namespace='/eventBus')
@socketio.on('load_intercept_script', namespace='/eventBus')
@authenticated_only
def load_intercept_script(message):
house_global.intercept_script = message.get('script')
house_global.script_to_load = message.get('script')
cache_script("intercept_cache", house_global.intercept_script)
try:
load_script()
except Exception as e:
house_global.intercept_exception = "[!] intercept_exception: {}".format(str(e))
socketio.emit('new_intercept', {'data': house_global.intercept_exception, 'time': house_global.new_intercept_time}, namespace='/eventBus')
@socketio.on('intercept_param', namespace='/eventBus')
@authenticated_only
def sock_intercept(message):
j_option = message['option']
j_param = message['data']
time_stamp = message['time'].replace('"','')
if (j_option == "intercept_param"):
print (stylize("[+] Posting {} @ {} to Frida..".format(json.loads(j_param),time_stamp),Info))
house_global.script.post({'type': 'input', 'payload': json.loads(j_param), 'time': time_stamp, 'option': "intercept_param"})
elif (j_option == "intercept_repl"):
print (stylize("[+] Posting {} to Frida REPL..".format(j_param),Info))
house_global.script.post({'type': 'input', 'payload': j_param, 'time': time_stamp, 'option': "intercept_repl"})
| 36.165272
| 217
| 0.682883
|
df6a9e311c0891bdb4c25acc51ad3ca054655b60
| 3,772
|
py
|
Python
|
benchmarks/benchmark_iterating.py
|
NihalHarish/datasets
|
67574a8d74796bc065a8b9b49ec02f7b1200c172
|
[
"Apache-2.0"
] | 9
|
2021-04-26T14:43:52.000Z
|
2021-11-08T09:47:24.000Z
|
benchmarks/benchmark_iterating.py
|
NihalHarish/datasets
|
67574a8d74796bc065a8b9b49ec02f7b1200c172
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/benchmark_iterating.py
|
NihalHarish/datasets
|
67574a8d74796bc065a8b9b49ec02f7b1200c172
|
[
"Apache-2.0"
] | 3
|
2021-01-03T22:08:20.000Z
|
2021-08-12T20:09:39.000Z
|
import json
import os
import tempfile
from utils import generate_example_dataset, get_duration
import datasets
SPEED_TEST_N_EXAMPLES = 50_000
SMALL_TEST = 5_000
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def read(dataset: datasets.Dataset, length):
for i in range(length):
_ = dataset[i]
@get_duration
def read_batch(dataset: datasets.Dataset, length, batch_size):
for i in range(0, len(dataset), batch_size):
_ = dataset[i : i + batch_size]
@get_duration
def read_formatted(dataset: datasets.Dataset, length, type):
with dataset.formatted_as(type=type):
for i in range(length):
_ = dataset[i]
@get_duration
def read_formatted_batch(dataset: datasets.Dataset, length, batch_size, type):
with dataset.formatted_as(type=type):
for i in range(0, length, batch_size):
_ = dataset[i : i + batch_size]
def benchmark_iterating():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
functions = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
functions_shuffled = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset")
features = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")}
)
dataset = generate_example_dataset(
os.path.join(tmp_dir, "dataset.arrow"),
features,
num_examples=SPEED_TEST_N_EXAMPLES,
seq_shapes={"list": (100,)},
)
print("first set of iterations")
for func, kwargs in functions:
print(func.__name__, str(kwargs))
times[func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(dataset, **kwargs)
print("shuffling dataset")
dataset = dataset.shuffle()
print("Second set of iterations (after shuffling")
for func, kwargs in functions_shuffled:
print("shuffled ", func.__name__, str(kwargs))
times["shuffled " + func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(
dataset, **kwargs
)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 37.72
| 108
| 0.635472
|
6b1863243edf99b5d1c26511baf66237d21ae231
| 6,457
|
py
|
Python
|
httpolice/inputs/har.py
|
vfaronov/httpolice
|
0008cb746798eb8a7981d57f16f0b91b87e1a8d6
|
[
"MIT"
] | 1,027
|
2016-04-25T11:17:13.000Z
|
2022-02-06T23:47:45.000Z
|
httpolice/inputs/har.py
|
vfaronov/httpolice
|
0008cb746798eb8a7981d57f16f0b91b87e1a8d6
|
[
"MIT"
] | 9
|
2016-07-25T11:30:34.000Z
|
2021-03-23T20:42:29.000Z
|
httpolice/inputs/har.py
|
vfaronov/httpolice
|
0008cb746798eb8a7981d57f16f0b91b87e1a8d6
|
[
"MIT"
] | 27
|
2016-05-19T22:17:39.000Z
|
2020-09-18T05:53:39.000Z
|
import base64
import io
import json
from urllib.parse import urlparse
from httpolice.exchange import Exchange
from httpolice.helpers import pop_pseudo_headers
from httpolice.inputs.common import InputError
from httpolice.known import h, m, st
from httpolice.request import Request
from httpolice.response import Response
from httpolice.structure import FieldName, StatusCode, Unavailable
from httpolice.util.text import decode_path
FIDDLER = [u'Fiddler']
CHROME = [u'WebInspector']
FIREFOX = [u'Firefox']
EDGE = [u'F12 Developer Tools']
def har_input(paths):
for path in paths:
# According to the spec, HAR files are UTF-8 with an optional BOM.
path = decode_path(path)
with io.open(path, 'rt', encoding='utf-8-sig') as f:
try:
data = json.load(f)
except ValueError as exc:
raise InputError('%s: bad HAR file: %s' % (path, exc)) from exc
try:
creator = data['log']['creator']['name']
for entry in data['log']['entries']:
yield _process_entry(entry, creator, path)
except (TypeError, KeyError) as exc:
raise InputError('%s: cannot understand HAR file: %r' %
(path, exc)) from exc
def _process_entry(data, creator, path):
req = _process_request(data['request'], creator, path)
resp = _process_response(data['response'], req, creator, path)
return Exchange(req, [resp] if resp is not None else [])
def _process_request(data, creator, path):
version, header_entries = _process_message(data, creator)
method = data['method']
parsed = urlparse(data['url'])
scheme = parsed.scheme
if method == m.CONNECT:
target = parsed.netloc
elif any(name == h.host for (name, _) in header_entries):
# With HAR, we can't tell if the request was to a proxy or to a server.
# So we force most requests into the "origin form" of the target,
target = parsed.path
if parsed.query:
target += u'?' + parsed.query
else:
# However, if the request has no ``Host`` header,
# the user won't be able to see the target host
# unless we set the full URL ("absolute form") as the target.
# To prevent this from having an effect on the proxy logic,
# we explicitly set `Request.is_to_proxy` to `None` later.
target = data['url']
if data['bodySize'] == 0:
# No body, or a body of length 0 (which we do not distinguish).
body = b''
elif data['bodySize'] > 0:
# A message body was present, but we cannot recover it,
# because message body is the body *with* ``Content-Encoding``,
# and HAR does not include that.
body = Unavailable()
else:
# Unknown. Maybe there was a body, maybe there wasn't.
body = None
text = None
post = data.get('postData')
if post and post.get('text'):
text = post['text']
if creator in FIDDLER and method == m.CONNECT and u'Fiddler' in text:
# Fiddler's HAR export adds a body with debug information
# to CONNECT requests.
text = None
body = b''
req = Request(scheme, method, target, version, header_entries, body,
remark=u'from %s' % path)
if text is not None:
req.unicode_body = text
req.is_to_proxy = None # See above.
return req
def _process_response(data, req, creator, path):
if data['status'] == 0: # Indicates error in Chrome.
return None
version, header_entries = _process_message(data, creator)
status = StatusCode(data['status'])
reason = data['statusText']
if creator in FIDDLER and req.method == m.CONNECT and status.successful:
# Fiddler's HAR export adds extra debug headers to CONNECT responses
# after the tunnel is closed.
header_entries = [(name, value)
for (name, value) in header_entries
if name not in [u'EndTime', u'ClientToServerBytes',
u'ServerToClientBytes']]
# The logic for body is mostly like that for requests (see above).
if data['bodySize'] == 0 or data['content']['size'] == 0 or \
status == st.not_modified: # Firefox also includes body on 304
body = b''
elif creator in FIREFOX:
# Firefox seems to exports bogus bodySize:
# see test/har_data/firefox_gif.har
body = None
# Browsers may set ``bodySize = -1`` even when ``content.size >= 0``.
elif data['bodySize'] > 0 or data['content']['size'] > 0:
body = Unavailable()
else:
body = None
resp = Response(version, status, reason, header_entries, body=body,
remark=u'from %s' % path)
if data['content'].get('text') and status != st.not_modified:
if data['content'].get('encoding', u'').lower() == u'base64':
try:
decoded_body = base64.b64decode(data['content']['text'])
except ValueError:
pass
else:
if creator in FIDDLER and req.method == m.CONNECT and \
status.successful and b'Fiddler' in decoded_body:
# Fiddler's HAR export adds a body with debug information
# to CONNECT responses.
resp.body = b''
else:
resp.decoded_body = decoded_body
elif 'encoding' not in data['content']:
resp.unicode_body = data['content']['text']
return resp
def _process_message(data, creator):
header_entries = [(FieldName(d['name']), d['value'])
for d in data['headers']]
pop_pseudo_headers(header_entries)
# Web browsers' HAR export poorly reflects the actual traffic on the wire.
# Their httpVersion can't be trusted, and they often mangle lower-level
# parts of the protocol, e.g. at the time of writing Chrome sometimes omits
# the Host header from HTTP/1.1 requests. Just consider their HTTP version
# to be always unknown, and a lot of this pain goes away.
version = None
if data['httpVersion'].startswith(u'HTTP/') and \
creator not in CHROME + FIREFOX + EDGE:
version = data['httpVersion']
return version, header_entries
| 38.664671
| 79
| 0.602292
|
41a34fa36edf581c273cf650e4dfd8268c732634
| 3,141
|
py
|
Python
|
tests/integration_tests/dashboard_utils.py
|
nieaijun99/superset
|
86368dd406b9e828f31186a4b6179d24758a7d87
|
[
"Apache-2.0"
] | 2
|
2021-12-21T15:57:16.000Z
|
2022-01-31T02:22:02.000Z
|
tests/integration_tests/dashboard_utils.py
|
nieaijun99/superset
|
86368dd406b9e828f31186a4b6179d24758a7d87
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_tests/dashboard_utils.py
|
nieaijun99/superset
|
86368dd406b9e828f31186a4b6179d24758a7d87
|
[
"Apache-2.0"
] | 2
|
2021-12-21T13:41:18.000Z
|
2021-12-26T22:16:43.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utils to provide dashboards for tests"""
import json
from typing import Any, Dict, List, Optional
from pandas import DataFrame
from superset import ConnectorRegistry, db
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.utils.core import DatasourceType, get_example_default_schema
def get_table(
table_name: str,
database: Database,
schema: Optional[str] = None,
):
schema = schema or get_example_default_schema()
table_source = ConnectorRegistry.sources["table"]
return (
db.session.query(table_source)
.filter_by(database_id=database.id, schema=schema, table_name=table_name)
.one_or_none()
)
def create_table_metadata(
table_name: str,
database: Database,
table_description: str = "",
fetch_values_predicate: Optional[str] = None,
schema: Optional[str] = None,
) -> SqlaTable:
schema = schema or get_example_default_schema()
table = get_table(table_name, database, schema)
if not table:
table_source = ConnectorRegistry.sources["table"]
table = table_source(schema=schema, table_name=table_name)
if fetch_values_predicate:
table.fetch_values_predicate = fetch_values_predicate
table.database = database
table.description = table_description
db.session.merge(table)
db.session.commit()
return table
def create_slice(
title: str, viz_type: str, table: SqlaTable, slices_dict: Dict[str, str]
) -> Slice:
return Slice(
slice_name=title,
viz_type=viz_type,
datasource_type=DatasourceType.TABLE,
datasource_id=table.id,
params=json.dumps(slices_dict, indent=4, sort_keys=True),
)
def create_dashboard(
slug: str, title: str, position: str, slices: List[Slice]
) -> Dashboard:
dash = db.session.query(Dashboard).filter_by(slug=slug).one_or_none()
if not dash:
dash = Dashboard()
dash.dashboard_title = title
if position is not None:
js = position
pos = json.loads(js)
dash.position_json = json.dumps(pos, indent=4)
dash.slug = slug
if slices is not None:
dash.slices = slices
db.session.merge(dash)
db.session.commit()
return dash
| 31.727273
| 81
| 0.720471
|
2919d7a8668edd013807391b5a92799b369efd18
| 1,571
|
py
|
Python
|
tf-undistributed/train_val_split.py
|
markandersonintel/distributed-tensorflow
|
5fa31d8aa0c7eec54890d91155712096975e9075
|
[
"Apache-2.0"
] | null | null | null |
tf-undistributed/train_val_split.py
|
markandersonintel/distributed-tensorflow
|
5fa31d8aa0c7eec54890d91155712096975e9075
|
[
"Apache-2.0"
] | null | null | null |
tf-undistributed/train_val_split.py
|
markandersonintel/distributed-tensorflow
|
5fa31d8aa0c7eec54890d91155712096975e9075
|
[
"Apache-2.0"
] | null | null | null |
import os
from shutil import copyfile
from shutil import rmtree
import random
def split():
datafolder = 'data/'
trainfolder = 'train/'
valfolder = 'val/'
valpercentage = .2
if not os.path.exists(datafolder):
print(datafolder,'does not exist.')
return;
if os.path.exists(trainfolder):
try:
rmtree(trainfolder, ignore_errors=True)
os.makedirs(trainfolder)
except OSError as e:
print(e)
if os.path.exists(valfolder):
try:
rmtree(valfolder, ignore_errors=True)
os.makedirs(valfolder)
except OSError as e:
print(e)
for root, dirs, files in os.walk(datafolder):
for dir in dirs:
datasubfolder = datafolder+dir+'/'
trainsubfolder = trainfolder+dir+'/'
valsubfolder = valfolder+dir+'/'
os.makedirs(trainsubfolder)
os.makedirs(valsubfolder)
files = os.listdir(datasubfolder)
random.shuffle(files)
for i in range(int(len(files)*(1-valpercentage))):
if not i%1000:
print('train ' + dir + ' ' + str(i))
file = files.pop()
copyfile(datasubfolder + file, trainsubfolder + file)
for i in range(len(files)):
if not i%1000:
print('val ' + dir + ' ' + str(i))
file = files.pop()
copyfile(datasubfolder + file, valsubfolder + file)
split()
| 34.152174
| 70
| 0.527689
|
722ef645e7a54ca886dac115ae13d4bf80f8a4a7
| 5,858
|
py
|
Python
|
tests/scripts/thread-cert/Cert_5_3_04_AddressMapCache.py
|
tpmanley/openthread
|
bc02c6c05cf52884bc6cd9fad8dc8fc16364a147
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/Cert_5_3_04_AddressMapCache.py
|
tpmanley/openthread
|
bc02c6c05cf52884bc6cd9fad8dc8fc16364a147
|
[
"BSD-3-Clause"
] | 2
|
2019-02-25T10:21:34.000Z
|
2022-01-23T13:05:08.000Z
|
tests/scripts/thread-cert/Cert_5_3_04_AddressMapCache.py
|
tpmanley/openthread
|
bc02c6c05cf52884bc6cd9fad8dc8fc16364a147
|
[
"BSD-3-Clause"
] | 1
|
2020-10-22T16:33:36.000Z
|
2020-10-22T16:33:36.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import node
import config
import command
LEADER = 1
DUT_ROUTER1 = 2
SED1 = 3
ED1 = 4
ED2 = 5
ED3 = 6
ED4 = 7
MTDS = [SED1, ED1, ED2, ED3, ED4]
class Cert_5_3_4_AddressMapCache(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 8):
self.nodes[i] = node.Node(i, (i in MTDS), simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED2].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED3].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED4].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[DUT_ROUTER1].set_panid(0xface)
self.nodes[DUT_ROUTER1].set_mode('rsdn')
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[DUT_ROUTER1].add_whitelist(self.nodes[SED1].get_addr64())
self.nodes[DUT_ROUTER1].enable_whitelist()
self.nodes[DUT_ROUTER1].set_router_selection_jitter(1)
self.nodes[SED1].set_panid(0xface)
self.nodes[SED1].set_mode('s')
self.nodes[SED1].add_whitelist(self.nodes[DUT_ROUTER1].get_addr64())
# Set the SED1's timeout in order to receive the icmp reply when keep
# alive with DUT_ROUTER.
self.nodes[SED1].set_timeout(5)
self.nodes[SED1].enable_whitelist()
for ED in [ED1, ED2, ED3, ED4]:
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for n in list(self.nodes.values()):
n.stop()
n.destroy()
self.simulator.stop()
def test(self):
# 1
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[DUT_ROUTER1].start()
for i in MTDS:
self.nodes[i].start()
self.simulator.go(5)
self.assertEqual(self.nodes[DUT_ROUTER1].get_state(), 'router')
for i in MTDS:
self.assertEqual(self.nodes[i].get_state(), 'child')
# This method flushes the message queue so calling this method again
# will return only the newly logged messages.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER1)
# 2
for ED in [ED1, ED2, ED3, ED4]:
ed_mleid = self.nodes[ED].get_ip6_address(
config.ADDRESS_TYPE.ML_EID
)
self.assertTrue(self.nodes[SED1].ping(ed_mleid))
self.simulator.go(5)
# Verify DUT_ROUTER1 generated an Address Query Request to find
# each node's RLOC.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER1)
msg = dut_messages.next_coap_message('0.02', '/a/aq')
command.check_address_query(
msg,
self.nodes[DUT_ROUTER1],
config.REALM_LOCAL_ALL_ROUTERS_ADDRESS,
)
# 3 & 4
# This method flushes the message queue so calling this method again
# will return only the newly logged messages.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER1)
for ED in [ED1, ED2, ED3, ED4]:
ed_mleid = self.nodes[ED].get_ip6_address(
config.ADDRESS_TYPE.ML_EID
)
self.assertTrue(self.nodes[SED1].ping(ed_mleid))
self.simulator.go(5)
# Verify DUT_ROUTER1 didn't generate an Address Query Request.
dut_messages = self.simulator.get_messages_sent_by(DUT_ROUTER1)
msg = dut_messages.next_coap_message('0.02', '/a/aq', False)
assert (
msg is None
), "Error: The DUT sent an unexpected Address Query Request"
if __name__ == '__main__':
unittest.main()
| 38.539474
| 79
| 0.662854
|
10cdeddbcbe69c44b6e8d37feebd141c15549b07
| 6,610
|
py
|
Python
|
pajbot/modules/pointlottery.py
|
KasperHelsted/pajbot
|
c366dcfc5e6076f9adcfce24c7a666653068b031
|
[
"MIT"
] | null | null | null |
pajbot/modules/pointlottery.py
|
KasperHelsted/pajbot
|
c366dcfc5e6076f9adcfce24c7a666653068b031
|
[
"MIT"
] | null | null | null |
pajbot/modules/pointlottery.py
|
KasperHelsted/pajbot
|
c366dcfc5e6076f9adcfce24c7a666653068b031
|
[
"MIT"
] | 1
|
2020-03-11T19:37:10.000Z
|
2020-03-11T19:37:10.000Z
|
import logging
from numpy import random
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.modules.base import BaseModule
log = logging.getLogger(__name__)
class PointLotteryModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Point Lottery"
DESCRIPTION = "Lets players participate in lottery for points"
CATEGORY = "Game"
SETTINGS = []
def __init__(self, bot):
super().__init__(bot)
self.lottery_running = False
self.lottery_users = []
self.lottery_points = 0
def load_commands(self, **options):
self.commands["pointlottery"] = Command.raw_command(
self.lottery,
delay_all=0,
delay_user=5,
description="Lottery for points",
examples=[
CommandExample(
None,
"Lottery start",
chat="user:!pointlottery start\n"
"bot:A Lottery has begun. Type !pointlottery join {points} to join the lottery!",
description="Start lottery",
).parse(),
CommandExample(
None,
"Lottery join",
chat="user:!pointlottery join {}",
description="You don't get confirmation whether you joined the lottery or not.",
).parse(),
CommandExample(
None,
"Lottery stop",
chat="user:!pointlottery stop\n" "bot:The lottery has finished! {} won {} points",
description="Finish lottery",
).parse(),
CommandExample(
None,
"Lottery join",
chat="user:!pointlottery {}",
description="You don't get confirmation whether you joined the lottery or not.",
).parse(),
],
)
def lottery(self, **options):
message = options["message"]
source = options["source"]
commands = {
"start": (self.process_start, 500),
"begin": (self.process_start, 500),
"join": (self.process_join, 100),
"": (self.process_join, 100),
"end": (self.process_end, 500),
"stop": (self.process_end, 500),
"status": (self.process_status, 100),
}
try:
if message.split(" ")[0].isdigit():
command = ""
else:
command = str(message.split(" ")[0])
cb, level = commands[command]
if source.level < level:
# User does not have access to run this command
return False
cb(**options)
except (KeyError, ValueError, TypeError, AttributeError):
return False
def process_start(self, **options):
source = options["source"]
bot = options["bot"]
if self.lottery_running:
bot.say("{0}, a lottery is already running OMGScoots".format(source.username_raw))
return False
self.lottery_users = []
self.lottery_running = True
self.lottery_points = 0
bot.websocket_manager.emit("notification", {"message": "A lottery has been started!"})
bot.execute_delayed(
0.75, bot.websocket_manager.emit, ("notification", {"message": "Type !pointlottery join to enter!"})
)
bot.me(
"A lottery has begun. Type !pointlottery join {tickets} or !pointlottery {tickets} to join the lottery! "
"The more tickets you buy, the more chances to win you have! "
"1 ticket costs 1 point"
)
def process_join(self, **options):
source = options["source"]
message = options["message"]
bot = options["bot"]
if not self.lottery_running:
log.debug("No lottery running")
return False
if source in [user for user, points in self.lottery_users if user == source]:
return False
try:
if len(message.split(" ")) == 1:
tickets = int(message.split(" ")[0])
else:
tickets = int(message.split(" ")[1])
if not source.can_afford(tickets):
bot.me("Sorry, {0}, you don't have enough points! FeelsBadMan".format(source.username_raw))
return False
if tickets <= 0:
bot.me("Sorry, {0}, you have to buy at least 1 ticket! FeelsBadMan".format(source.username_raw))
return False
source.points -= tickets
self.lottery_points += tickets
log.info("Lottery points is now at {}".format(self.lottery_points))
except (ValueError, TypeError, AttributeError):
bot.me("Sorry, {0}, I didn't recognize your command! FeelsBadMan".format(source.username_raw))
return False
# Added user to the lottery
self.lottery_users.append((source, tickets))
def process_end(self, **options):
bot = options["bot"]
if not self.lottery_running:
return False
self.lottery_running = False
if not self.lottery_users:
bot.me("Wow, no one joined the lottery DansGame")
return False
winner = self.weighted_choice(self.lottery_users)
log.info("at end, lottery points is now at {}".format(self.lottery_points))
bot.websocket_manager.emit(
"notification",
{"message": "{} won {} points in the lottery!".format(winner.username_raw, self.lottery_points)},
)
bot.me(
"The lottery has finished! {0} won {1} points! PogChamp".format(winner.username_raw, self.lottery_points)
)
winner.points += self.lottery_points
winner.save()
self.lottery_users = []
def process_status(self, **options):
bot = options["bot"]
if not self.lottery_running:
return False
bot.me(
"{} people have joined the lottery so far, for a total of {} points".format(
len(self.lottery_users), self.lottery_points
)
)
@staticmethod
def weighted_choice(choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
| 33.383838
| 117
| 0.543722
|
f0f7283f5930c31495d9e4dbb04ec69eb9e43246
| 439
|
py
|
Python
|
nose2/tests/functional/support/scenario/slow/test_slow.py
|
Metaswitch/nose2
|
1fdb46ef9eb279fb1f9b2321dc978977876e8230
|
[
"BSD-2-Clause"
] | null | null | null |
nose2/tests/functional/support/scenario/slow/test_slow.py
|
Metaswitch/nose2
|
1fdb46ef9eb279fb1f9b2321dc978977876e8230
|
[
"BSD-2-Clause"
] | null | null | null |
nose2/tests/functional/support/scenario/slow/test_slow.py
|
Metaswitch/nose2
|
1fdb46ef9eb279fb1f9b2321dc978977876e8230
|
[
"BSD-2-Clause"
] | 1
|
2019-11-24T12:11:52.000Z
|
2019-11-24T12:11:52.000Z
|
import time
import unittest
import logging
log = logging.getLogger(__name__)
class TestSlow(unittest.TestCase):
def test_ok(self):
print("hide this")
time.sleep(2)
def test_fail(self):
print("show this")
log.debug("hola")
time.sleep(2)
self.assertEqual(1, 2)
def test_err(self):
print("show this too")
log.debug("ciao")
time.sleep(2)
{}['x']
| 18.291667
| 34
| 0.574032
|
a0d3bbdd9c82da419a89e54082798ca68188d71a
| 2,645
|
py
|
Python
|
potara/similaritymeasures.py
|
sildar/potara
|
cb98b337531e4833058c95ac1eb29d8bcdac0104
|
[
"MIT"
] | 86
|
2015-04-09T08:47:38.000Z
|
2021-12-31T03:16:53.000Z
|
potara/similaritymeasures.py
|
sildar/potara
|
cb98b337531e4833058c95ac1eb29d8bcdac0104
|
[
"MIT"
] | 11
|
2016-02-24T03:02:58.000Z
|
2021-01-20T16:30:30.000Z
|
potara/similaritymeasures.py
|
sildar/potara
|
cb98b337531e4833058c95ac1eb29d8bcdac0104
|
[
"MIT"
] | 31
|
2015-04-13T12:50:16.000Z
|
2021-08-23T08:42:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Contains several similarity measures.
These measures deal with raw strings which words
are supposed to be white-space separated.
"""
from collections import Counter
from operator import itemgetter
import math
def cosine(s1, s2):
"""
Retuns the cosine value between two strings
>>> cosine("This is a sentence", "This is a sentence")
1.0
"""
vec1 = Counter(s1.split())
vec2 = Counter(s2.split())
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def w2v(s1, s2, wordmodel):
"""
Calculates the similarity between two strings
given a word model that gives word-word similarity.
The word model is supposed to hold a vocab field
that contains the vocabulary.
It must have a similarity(word1, word2) method.
"""
if s1 == s2:
return 1.0
intersection = set(s1.split()) & set(s2.split())
# give 1 point per common word
commonwords = len(intersection)
# We want at least one common word
if commonwords == 0:
return 0
# remove common words
l1 = [word for word in s1.split() if word not in intersection]
l2 = [word for word in s2.split() if word not in intersection]
# change order depending on size
if len(l1) > len(l2):
l1, l2 = l2, l1
totalscore = 0
for t1 in l1:
sublist = []
hasitem = False
for i, t2 in enumerate(l2):
# check if POS are here
if len(t1.split('/')) > 1:
# compare same POS words
if t1.split('/')[1][:2] == t2.split('/')[1][:2]:
if t1 in wordmodel.wv and t2 in wordmodel.wv:
sublist.append((i, wordmodel.wv.similarity(t1, t2)))
hasitem = True
# if we don't know one of the words
# consider them as dissimilar
else:
sublist.append((i, 0))
else:
sublist.append((i, 0))
if hasitem:
maxitem, subscore = max(sublist, key=itemgetter(1))
l2.pop(maxitem)
totalscore += subscore
num = float(commonwords + totalscore)
denum = min(len(s1.split()), len(s2.split()))
score = num / denum
return score
| 26.989796
| 76
| 0.567108
|
45544ee9f2232c52ae106632c2d90919702d7a92
| 2,800
|
py
|
Python
|
dna/query_ontology_specific_classes.py
|
ontoinsights/deep_narrative_analysis
|
2c01e4d870de30a325d9aa84896d346a3ccb4bbd
|
[
"CC-BY-4.0"
] | 9
|
2020-03-02T03:41:48.000Z
|
2022-03-30T09:28:38.000Z
|
dna/query_ontology_specific_classes.py
|
ontoinsights/deep_narrative_analysis
|
2c01e4d870de30a325d9aa84896d346a3ccb4bbd
|
[
"CC-BY-4.0"
] | null | null | null |
dna/query_ontology_specific_classes.py
|
ontoinsights/deep_narrative_analysis
|
2c01e4d870de30a325d9aa84896d346a3ccb4bbd
|
[
"CC-BY-4.0"
] | null | null | null |
# Query ontology class details
# To avoid passing a store name parameter, the ontology files are preloaded into an 'ontologies' database
# Called by create_event_turtle.py
from database import query_ontology
from utilities import empty_string, owl_thing
domain_query_norp_emotion_or_enum = \
'prefix : <urn:ontoinsights:dna:> SELECT ?class ?prob WHERE { ' \
'{ SERVICE <db://ontologies-database> { ?domain_class_type rdfs:subClassOf+ :class_type } } ' \
'{ SERVICE <db://domain-database> { ?class rdfs:subClassOf* ?domain_class_type . ' \
'{ { ?class :noun_synonym ?nsyn . FILTER(?nsyn = "keyword") . BIND(100 as ?prob) } UNION ' \
'{ ?class rdfs:label ?label . FILTER(?label = "keyword") . BIND(85 as ?prob) } UNION ' \
'{ ?class :noun_synonym ?nsyn . FILTER(CONTAINS(?nsyn, "keyword")) . BIND(90 as ?prob) } UNION ' \
'{ ?class rdfs:label ?label . FILTER(CONTAINS(?label, "keyword")) . BIND(85 as ?prob) } UNION ' \
'{ ?class :definition ?defn . FILTER(CONTAINS(lcase(?defn), " keyword ")) . BIND(80 as ?prob) } UNION ' \
'{ ?class :definition ?defn . FILTER(CONTAINS(lcase(?defn), "keyword")) . BIND(75 as ?prob) } } } } ' \
'} ORDER BY DESC(?prob)'
query_norp_emotion_or_enum = \
'prefix : <urn:ontoinsights:dna:> SELECT ?class ?prob WHERE { ' \
'?class rdfs:subClassOf+ :class_type . ' \
'{ { ?class :noun_synonym ?nsyn . FILTER(?nsyn = "keyword") . BIND(100 as ?prob) } UNION ' \
'{ ?class rdfs:label ?label . FILTER(?label = "keyword") . BIND(85 as ?prob) } UNION ' \
'{ ?class :noun_synonym ?nsyn . FILTER(CONTAINS(?nsyn, "keyword")) . BIND(90 as ?prob) } UNION ' \
'{ ?class rdfs:label ?label . FILTER(CONTAINS(?label, "keyword")) . BIND(85 as ?prob) } UNION ' \
'{ ?class :definition ?defn . FILTER(CONTAINS(lcase(?defn), " keyword ")) . BIND(80 as ?prob) } UNION ' \
'{ ?class :definition ?defn . FILTER(CONTAINS(lcase(?defn), "keyword")) . BIND(75 as ?prob) } } ' \
'} ORDER BY DESC(?prob)'
def get_norp_emotion_or_enum(noun_text: str) -> (str, str):
"""
Check if the input text is a kind of ethnicity, religion, line of work or political ideology.
@param noun_text: String holding the text to be categorized.
@return: A tuple consisting of a string indicating either 'Ethnicity', 'ReligiousBelief',
'LineOfBusiness' or 'PoliticalIdeology', and the specific subclass
"""
for class_type in ('Ethnicity', 'ReligiousBelief', 'LineOfBusiness', 'PoliticalIdeology'):
result = query_ontology(
noun_text, query_norp_emotion_or_enum.replace('class_type', class_type),
domain_query_norp_emotion_or_enum.replace('class_type', class_type))
if result != owl_thing:
return class_type, result
return empty_string, empty_string
| 59.574468
| 109
| 0.658214
|
6cd61170c0b767169dda44e3fc52ea646493e784
| 3,043
|
py
|
Python
|
janitor/engineering.py
|
Ram-N/pyjanitor
|
b85d9c9e8d823e356cb095643db0af05e351b009
|
[
"MIT"
] | null | null | null |
janitor/engineering.py
|
Ram-N/pyjanitor
|
b85d9c9e8d823e356cb095643db0af05e351b009
|
[
"MIT"
] | null | null | null |
janitor/engineering.py
|
Ram-N/pyjanitor
|
b85d9c9e8d823e356cb095643db0af05e351b009
|
[
"MIT"
] | null | null | null |
"""
Engineering-specific data cleaning functions.
"""
import numpy as np
import pandas as pd
import pandas_flavor as pf
from janitor import check
from .utils import import_message
try:
import unyt
except ImportError:
import_message("engineering", "unyt", "conda install -c conda-forge unyt")
@pf.register_dataframe_method
def convert_units(
df: pd.DataFrame,
column_name: str = None,
existing_units: str = None,
to_units: str = None,
dest_column_name: str = None,
) -> pd.DataFrame:
"""
Converts a column of numeric values from one unit to another.
Functional usage example:
.. code-block:: python
import pandas as pd
import janitor.engineering
df = pd.DataFrame(...)
df = janitor.engineering.convert_units(
df=df,
column_name='temp_F',
existing_units='degF',
to_units='degC',
dest_column_name='temp_C'
)
Method chaining usage example:
.. code-block:: python
import pandas as pd
import janitor.engineering
df = pd.DataFrame(...)
df = df.convert_units(
column_name='temp_F',
existing_units='degF',
to_units='degC',
dest_column_name='temp_C'
)
Unit conversion can only take place if the existing_units and
to_units are of the same type (e.g., temperature or pressure).
The provided unit types can be any unit name or alternate name provided
in the unyt package's Listing of Units table:
https://unyt.readthedocs.io/en/stable/unit_listing.html#unit-listing.
Volume units are not provided natively in unyt. However, exponents are
supported, and therefore some volume units can be converted. For example,
a volume in cubic centimeters can be converted to cubic meters using
existing_units='cm**3' and to_units='m**3'.
This method mutates the original DataFrame.
:param df: A pandas dataframe.
:param column_name: Name of the column containing numeric
values that are to be converted from one set of units to another.
:param existing_units: The unit type to convert from.
:param to_units: The unit type to convert to.
:param dest_column_name: The name of the new column containing the
converted values that will be created.
:returns: A pandas DataFrame with a new column of unit-converted values.
"""
# Check all inputs are correct data type
check("column_name", column_name, [str])
check("existing_units", existing_units, [str])
check("to_units", to_units, [str])
check("dest_column_name", dest_column_name, [str])
# Check that column_name is a numeric column
if not np.issubdtype(df[column_name].dtype, np.number):
raise TypeError(f"{column_name} must be a numeric column.")
original_vals = df[column_name].values * unyt.Unit(existing_units)
converted_vals = original_vals.to(to_units)
df[dest_column_name] = np.array(converted_vals)
return df
| 29.833333
| 78
| 0.678607
|
3f1ef6ce7357d20969a260bb0d6f57f2517d53ea
| 222
|
py
|
Python
|
Python/pythonChallenge/5.py
|
JanStoltman/100DaysOfCode
|
1d18b76ed1e3e942e8392006a5d4bfb41484d047
|
[
"MIT"
] | null | null | null |
Python/pythonChallenge/5.py
|
JanStoltman/100DaysOfCode
|
1d18b76ed1e3e942e8392006a5d4bfb41484d047
|
[
"MIT"
] | null | null | null |
Python/pythonChallenge/5.py
|
JanStoltman/100DaysOfCode
|
1d18b76ed1e3e942e8392006a5d4bfb41484d047
|
[
"MIT"
] | null | null | null |
import urllib2,pickle
url = "http://www.pythonchallenge.com/pc/def/banner.p"
req = urllib2.urlopen(url).read()
data = pickle.loads(req)
print data
#for line in data:
# print ''.join(elem[0]*elem[1] for elem in line)
| 22.2
| 54
| 0.698198
|
c249ef05a39971272570c44647ad173174573b38
| 1,963
|
py
|
Python
|
SC/aq_dashboard.py
|
alqu7095/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
ff1d2a7ab04f503ca346734a9089203c632172bb
|
[
"MIT"
] | null | null | null |
SC/aq_dashboard.py
|
alqu7095/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
ff1d2a7ab04f503ca346734a9089203c632172bb
|
[
"MIT"
] | null | null | null |
SC/aq_dashboard.py
|
alqu7095/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
ff1d2a7ab04f503ca346734a9089203c632172bb
|
[
"MIT"
] | null | null | null |
from flask import Flask
import requests
import openaq
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
from flask import render_template
from .models import DB, Record
api = openaq.OpenAQ()
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
DB.init_app(app)
@app.route('/')
def root():
status, body = api.measurements(city='Los Angeles', parameter = 'pm25')
LA_25 = []
for i in range(0,len(body['results'])):
LA_25.append((body['results'][i]['date']['utc'], body['results'][i]['value']))
risk = DB.session.query(Record).filter(Record.value>10).all()
record = Record.query.all()
return render_template("root.html", risk=risk, record=record, LA_25=LA_25)
@app.route('/refresh', methods = ['POST', 'GET'])
def refresh():
DB.drop_all()
DB.create_all()
status, body = api.measurements(city="Los Angeles", parameter = 'pm25')
if request.method == 'GET':
for i in range(0, len(body['results'])):
DB.session.add(
Record(
datetime = body['results'][i]['date']['utc'],
value = body ['results'][i]['value']))
DB.session.commit()
record = Record.query.all()
return render_template('refresh.html', records = records)
@app.route('/dashboard', methods=['GET'])
def dashboard():
risk = DB.session.query(Record).filter(Record.value>10).all()
record = Record.query.all()
return render_template("dashboard.html", risk=risk, record=record)
return app
DB = SQLAlchemy()
class Record(DB.Model):
id= DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return 'Record{}, {}'.format(self.datetime, self.value)
| 29.298507
| 90
| 0.599083
|
bf4f7bee2af60c6d48dd9cc60c36890984d86fc3
| 689
|
py
|
Python
|
backend/app/main.py
|
flsworld/comment-rick-n-morty
|
fdab96ca5d14fedd2428fbdad7e49ec8e31b4ddd
|
[
"MIT"
] | null | null | null |
backend/app/main.py
|
flsworld/comment-rick-n-morty
|
fdab96ca5d14fedd2428fbdad7e49ec8e31b4ddd
|
[
"MIT"
] | null | null | null |
backend/app/main.py
|
flsworld/comment-rick-n-morty
|
fdab96ca5d14fedd2428fbdad7e49ec8e31b4ddd
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from app.api.router import api_router
from app.core.config import settings
description = """
Comment R&M API helps you do awesome stuff 🚀
## Characters
Characters from the animated series
## Episodes
Episodes from the animated series
## Appearances
An appearance represents a character in an episode
## Comments
Comment made by a user either on
* character
* episode
* appearance
## User
(_not implemented_)
"""
def get_application():
app = FastAPI(
title=settings.PROJECT_NAME, version=settings.VERSION, description=description
)
return app
app = get_application()
app.include_router(api_router, prefix=settings.API_PREFIX)
| 16.023256
| 86
| 0.753266
|
14486cbfa7549cfe8daf7b33116acecea5bf62c4
| 526
|
py
|
Python
|
Aula_54/Dao/base_dao.py
|
Mateus-Silva11/AulasPython
|
d34dc4f62ade438e68b0a80e0baac4d6ec0d378e
|
[
"MIT"
] | null | null | null |
Aula_54/Dao/base_dao.py
|
Mateus-Silva11/AulasPython
|
d34dc4f62ade438e68b0a80e0baac4d6ec0d378e
|
[
"MIT"
] | null | null | null |
Aula_54/Dao/base_dao.py
|
Mateus-Silva11/AulasPython
|
d34dc4f62ade438e68b0a80e0baac4d6ec0d378e
|
[
"MIT"
] | null | null | null |
import sqlalchemy as db
# criando a class base_dao Orientação a objeto simplificando o codigo
class BaseDao:
def __init__(self):
#criando a conexeção com o banco de dados
# ----database+conector://user:passwd@url:porta/database
conexao = db.create_engine("mysql+mysqlconnector://topskills01:ts2019@mysql.topskills.dev:3306/topskills01")
#criando uma sessao
criador_sessao = db.orm.sessionmaker()
criador_sessao.configure(bind=conexao)
self.sessao = criador_sessao()
| 43.833333
| 116
| 0.712928
|
110d5b55f447cf2f02df1bc26a4277efba24f1a3
| 477
|
py
|
Python
|
data/scripts/templates/object/tangible/deed/city_deed/shared_garden_tatooine_med_03_deed.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/deed/city_deed/shared_garden_tatooine_med_03_deed.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/deed/city_deed/shared_garden_tatooine_med_03_deed.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/city_deed/shared_garden_tatooine_med_03_deed.iff"
result.attribute_template_id = 2
result.stfName("deed","garden_tatooine_med_03_deed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 28.058824
| 90
| 0.744235
|
a3caf39b5642ec5764d6241a59d2e3004c226158
| 736
|
py
|
Python
|
tests/utils/test_math.py
|
lx120/tinynn
|
88b941a706700ca7f6b1cc4ae7f271df7049348c
|
[
"MIT"
] | 217
|
2019-08-19T07:23:57.000Z
|
2022-03-31T13:00:46.000Z
|
tests/utils/test_math.py
|
lx120/tinynn
|
88b941a706700ca7f6b1cc4ae7f271df7049348c
|
[
"MIT"
] | 19
|
2019-09-06T17:11:13.000Z
|
2022-03-12T00:02:47.000Z
|
tests/utils/test_math.py
|
lx120/tinynn
|
88b941a706700ca7f6b1cc4ae7f271df7049348c
|
[
"MIT"
] | 57
|
2019-09-06T12:12:24.000Z
|
2022-03-29T03:33:01.000Z
|
import numpy as np
import tinynn as tn
tn.seeder.random_seed(31)
def test_softmax():
x = np.array([1.0, 2.0, 3.0, 4.0])
a = np.exp(x - np.max(x))
expect = a / a.sum()
assert np.allclose(tn.math.softmax(x), expect)
x = np.array([1e10, 1e10])
expect = [0.5, 0.5]
assert np.allclose(tn.math.softmax(x), expect)
def test_log_softmax():
x = np.random.uniform(0, 1, 10)
assert np.allclose(tn.math.log_softmax(x), np.log(tn.math.softmax(x)))
x = np.random.uniform(1e10, 1e10, 10)
assert np.allclose(tn.math.log_softmax(x), np.log(tn.math.softmax(x)))
def test_sigmoid():
assert tn.math.sigmoid(1e10) == 1.0
assert tn.math.sigmoid(-1e10) == 0.0
assert tn.math.sigmoid(0) == 0.5
| 23.741935
| 74
| 0.629076
|
5f3507eb5ecfa89fb9b5be5b4548773a2c25a3c1
| 1,111
|
py
|
Python
|
blog/urls.py
|
ZiYin-ss/python-blog
|
df2b18384a6ff691ea4b35a0659d54979eb9da15
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
ZiYin-ss/python-blog
|
df2b18384a6ff691ea4b35a0659d54979eb9da15
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
ZiYin-ss/python-blog
|
df2b18384a6ff691ea4b35a0659d54979eb9da15
|
[
"MIT"
] | null | null | null |
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from blog.settings import DEBUG, MEDIA_ROOT
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('post.urls')),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('search/', include('haystack.urls')),
]
from django.views.static import serve
if DEBUG:
urlpatterns += url(r'media/(?P<path>.*)/$', serve, {"document_root": MEDIA_ROOT}),
| 33.666667
| 86
| 0.70117
|
9123a8f0a0461930239491c951ec5a47ff13dc92
| 18,627
|
py
|
Python
|
evennia/scripts/scripts.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/scripts/scripts.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/scripts/scripts.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module defines Scripts, out-of-character entities that can store
data both on themselves and on other objects while also having the
ability to run timers.
"""
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.task import LoopingCall
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from evennia.typeclasses.models import TypeclassBase
from evennia.scripts.models import ScriptDB
from evennia.scripts.manager import ScriptManager
from evennia.utils import logger
from future.utils import with_metaclass
__all__ = ["DefaultScript", "DoNothing", "Store"]
_GA = object.__getattribute__
_SESSIONS = None
class ExtendedLoopingCall(LoopingCall):
"""
LoopingCall that can start at a delay different
than `self.interval`.
"""
start_delay = None
callcount = 0
def start(self, interval, now=True, start_delay=None, count_start=0):
"""
Start running function every interval seconds.
This overloads the LoopingCall default by offering the
start_delay keyword and ability to repeat.
Args:
interval (int): Repeat interval in seconds.
now (bool, optional): Whether to start immediately or after
`start_delay` seconds.
start_delay (int): The number of seconds before starting.
If None, wait interval seconds. Only valid if `now` is `False`.
It is used as a way to start with a variable start time
after a pause.
count_start (int): Number of repeats to start at. The count
goes up every time the system repeats. This is used to
implement something repeating `N` number of times etc.
Raises:
AssertError: if trying to start a task which is already running.
ValueError: If interval is set to an invalid value < 0.
Notes:
As opposed to Twisted's inbuilt count mechanism, this
system will count also if force_repeat() was called rather
than just the number of `interval` seconds since the start.
This allows us to force-step through a limited number of
steps if we want.
"""
assert not self.running, ("Tried to start an already running "
"ExtendedLoopingCall.")
if interval < 0:
raise ValueError("interval must be >= 0")
self.running = True
deferred = self._deferred = Deferred()
self.starttime = self.clock.seconds()
self.interval = interval
self._runAtStart = now
self.callcount = max(0, count_start)
self.start_delay = start_delay if start_delay is None else max(0, start_delay)
if now:
# run immediately
self()
elif start_delay is not None and start_delay >= 0:
# start after some time: for this to work we need to
# trick _scheduleFrom by temporarily setting a different
# self.interval for it to check.
real_interval, self.interval = self.interval, start_delay
self._scheduleFrom(self.starttime)
# re-set the actual interval (this will be picked up
# next time it runs
self.interval = real_interval
else:
self._scheduleFrom(self.starttime)
return deferred
def __call__(self):
"""
Tick one step. We update callcount (tracks number of calls) as
well as null start_delay (needed in order to correctly
estimate next_call_time at all times).
"""
self.callcount += 1
if self.start_delay:
self.start_delay = None
self.starttime = self.clock.seconds()
super(ExtendedLoopingCall, self).__call__()
def force_repeat(self):
"""
Force-fire the callback
Raises:
AssertionError: When trying to force a task that is not
running.
"""
assert self.running, ("Tried to fire an ExtendedLoopingCall "
"that was not running.")
self.call.cancel()
self.call = None
self.starttime = self.clock.seconds()
self()
def next_call_time(self):
"""
Get the next call time. This also takes the eventual effect
of start_delay into account.
Returns:
next (int or None): The time in seconds until the next call. This
takes `start_delay` into account. Returns `None` if
the task is not running.
"""
if self.running:
total_runtime = self.clock.seconds() - self.starttime
interval = self.start_delay or self.interval
return interval - (total_runtime % self.interval)
return None
class ScriptBase(with_metaclass(TypeclassBase, ScriptDB)):
"""
Base class for scripts. Don't inherit from this, inherit from the
class `DefaultScript` below instead.
"""
objects = ScriptManager()
class DefaultScript(ScriptBase):
"""
This is the base TypeClass for all Scripts. Scripts describe
events, timers and states in game, they can have a time component
or describe a state that changes under certain conditions.
"""
def __eq__(self, other):
"""
Compares two Scripts. Compares dbids.
Args:
other (Script): A script to compare with.
"""
try:
return other.dbid == self.dbid
except Exception:
return False
def _start_task(self):
"""
Start task runner.
"""
self.ndb._task = ExtendedLoopingCall(self._step_task)
if self.db._paused_time:
# the script was paused; restarting
callcount = self.db._paused_callcount or 0
self.ndb._task.start(self.db_interval,
now=False,
start_delay=self.db._paused_time,
count_start=callcount)
del self.db._paused_time
del self.db._paused_repeats
else:
# starting script anew
self.ndb._task.start(self.db_interval,
now=not self.db_start_delay)
def _stop_task(self):
"""
Stop task runner
"""
task = self.ndb._task
if task and task.running:
task.stop()
def _step_errback(self, e):
"""
Callback for runner errors
"""
cname = self.__class__.__name__
estring = _("Script %(key)s(#%(dbid)s) of type '%(cname)s': at_repeat() error '%(err)s'.") % \
{"key": self.key, "dbid": self.dbid, "cname": cname,
"err": e.getErrorMessage()}
try:
self.db_obj.msg(estring)
except Exception:
pass
logger.log_err(estring)
def _step_callback(self):
"""
Step task runner. No try..except needed due to defer wrap.
"""
if not self.is_valid():
self.stop()
return
# call hook
self.at_repeat()
# check repeats
callcount = self.ndb._task.callcount
maxcount = self.db_repeats
if maxcount > 0 and maxcount <= callcount:
self.stop()
def _step_task(self):
"""
Step task. This groups error handling.
"""
try:
return maybeDeferred(self._step_callback).addErrback(self._step_errback)
except Exception:
logger.log_trace()
# Public methods
def time_until_next_repeat(self):
"""
Get time until the script fires it `at_repeat` hook again.
Returns:
next (int): Time in seconds until the script runs again.
If not a timed script, return `None`.
Notes:
This hook is not used in any way by the script's stepping
system; it's only here for the user to be able to check in
on their scripts and when they will next be run.
"""
task = self.ndb._task
if task:
try:
return int(round(task.next_call_time()))
except TypeError:
pass
return None
def remaining_repeats(self):
"""
Get the number of returning repeats for limited Scripts.
Returns:
remaining (int or `None`): The number of repeats
remaining until the Script stops. Returns `None`
if it has unlimited repeats.
"""
task = self.ndb._task
if task:
return max(0, self.db_repeats - task.callcount)
def start(self, force_restart=False):
"""
Called every time the script is started (for persistent
scripts, this is usually once every server start)
Args:
force_restart (bool, optional): Normally an already
started script will not be started again. if
`force_restart=True`, the script will always restart
the script, regardless of if it has started before.
Returns:
result (int): 0 or 1 depending on if the script successfully
started or not. Used in counting.
"""
if self.is_active and not force_restart:
# script already runs and should not be restarted.
return 0
obj = self.obj
if obj:
# check so the scripted object is valid and initalized
try:
obj.cmdset
except AttributeError:
# this means the object is not initialized.
logger.log_trace()
self.is_active = False
return 0
# try to restart a paused script
try:
if self.unpause(manual_unpause=False):
return 1
except RuntimeError:
# manually paused.
return 0
# start the script from scratch
self.is_active = True
try:
self.at_start()
except Exception:
logger.log_trace()
if self.db_interval > 0:
self._start_task()
return 1
def stop(self, kill=False):
"""
Called to stop the script from running. This also deletes the
script.
Args:
kill (bool, optional): - Stop the script without
calling any relevant script hooks.
Returns:
result (int): 0 if the script failed to stop, 1 otherwise.
Used in counting.
"""
if not kill:
try:
self.at_stop()
except Exception:
logger.log_trace()
self._stop_task()
try:
self.delete()
except AssertionError:
logger.log_trace()
return 0
except ObjectDoesNotExist:
return 0
return 1
def pause(self, manual_pause=True):
"""
This stops a running script and stores its active state.
It WILL NOT call the `at_stop()` hook.
"""
self.db._manual_pause = manual_pause
if not self.db._paused_time:
# only allow pause if not already paused
task = self.ndb._task
if task:
self.db._paused_time = task.next_call_time()
self.db._paused_callcount = task.callcount
self._stop_task()
self.is_active = False
def unpause(self, manual_unpause=True):
"""
Restart a paused script. This WILL call the `at_start()` hook.
Args:
manual_unpause (bool, optional): This is False if unpause is
called by the server reload/reset mechanism.
Returns:
result (bool): True if unpause was triggered, False otherwise.
Raises:
RuntimeError: If trying to automatically resart this script
(usually after a reset/reload), but it was manually paused,
and so should not the auto-unpaused.
"""
if not manual_unpause and self.db._manual_pause:
# if this script was paused manually (by a direct call of pause),
# it cannot be automatically unpaused (e.g. by a @reload)
raise RuntimeError
if self.db._paused_time:
# only unpause if previously paused
self.is_active = True
try:
self.at_start()
except Exception:
logger.log_trace()
self._start_task()
return True
def restart(self, interval=None, repeats=None, start_delay=None):
"""
Restarts an already existing/running Script from the
beginning, optionally using different settings. This will
first call the stop hooks, and then the start hooks again.
Args:
interval (int, optional): Allows for changing the interval
of the Script. Given in seconds. if `None`, will use the
already stored interval.
repeats (int, optional): The number of repeats. If unset, will
use the previous setting.
start_delay (bool, optional): If we should wait `interval` seconds
before starting or not. If `None`, re-use the previous setting.
"""
try:
self.at_stop()
except Exception:
logger.log_trace()
self._stop_task()
self.is_active = False
if interval is not None:
self.interval = interval
if repeats is not None:
self.repeats = repeats
if start_delay is not None:
self.start_delay = start_delay
self.start()
def reset_callcount(self, value=0):
"""
Reset the count of the number of calls done.
Args:
value (int, optional): The repeat value to reset to. Default
is to set it all the way back to 0.
Notes:
This is only useful if repeats != 0.
"""
task = self.ndb._task
if task:
task.callcount = max(0, int(value))
def force_repeat(self):
"""
Fire a premature triggering of the script callback. This
will reset the timer and count down repeats as if the script
had fired normally.
"""
task = self.ndb._task
if task:
task.force_repeat()
def at_first_save(self):
"""
This is called after very first time this object is saved.
Generally, you don't need to overload this, but only the hooks
called by this method.
"""
self.at_script_creation()
if hasattr(self, "_createdict"):
# this will only be set if the utils.create_script
# function was used to create the object. We want
# the create call's kwargs to override the values
# set by hooks.
cdict = self._createdict
updates = []
if not cdict.get("key"):
if not self.db_key:
self.db_key = "#%i" % self.dbid
updates.append("db_key")
elif self.db_key != cdict["key"]:
self.db_key = cdict["key"]
updates.append("db_key")
if cdict.get("interval") and self.interval != cdict["interval"]:
self.db_interval = cdict["interval"]
updates.append("db_interval")
if cdict.get("start_delay") and self.start_delay != cdict["start_delay"]:
self.db_start_delay = cdict["start_delay"]
updates.append("db_start_delay")
if cdict.get("repeats") and self.repeats != cdict["repeats"]:
self.db_repeats = cdict["repeats"]
updates.append("db_repeats")
if cdict.get("persistent") and self.persistent != cdict["persistent"]:
self.db_persistent = cdict["persistent"]
updates.append("db_persistent")
if updates:
self.save(update_fields=updates)
if not cdict.get("autostart"):
# don't auto-start the script
return
# auto-start script (default)
self.start()
def at_script_creation(self):
"""
Only called once, by the create function.
"""
pass
def is_valid(self):
"""
Is called to check if the script is valid to run at this time.
Should return a boolean. The method is assumed to collect all
needed information from its related self.obj.
"""
return not self._is_deleted
def at_start(self):
"""
Called whenever the script is started, which for persistent
scripts is at least once every server start. It will also be
called when starting again after a pause (such as after a
server reload)
"""
pass
def at_repeat(self):
"""
Called repeatedly if this Script is set to repeat regularly.
"""
pass
def at_stop(self):
"""
Called whenever when it's time for this script to stop (either
because is_valid returned False or it runs out of iterations)
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save
non-persistent properties across a restart, this is the place
to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
# Some useful default Script types used by Evennia.
class DoNothing(DefaultScript):
"""
A script that does nothing. Used as default fallback.
"""
def at_script_creation(self):
"""
Setup the script
"""
self.key = "sys_do_nothing"
self.desc = "This is an empty placeholder script."
class Store(DefaultScript):
"""
Simple storage script
"""
def at_script_creation(self):
"""
Setup the script
"""
self.key = "sys_storage"
self.desc = "This is a generic storage container."
| 31.732538
| 102
| 0.571643
|
ed2f21b005e347f409fc308b43b9bb80520b4697
| 3,430
|
py
|
Python
|
examples/python/native/cifar10_cnn_concat.py
|
rvinaybharadwaj/FlexFlow
|
bb8a3e24ac623ac48cd97e103b37ba7e715d3d3a
|
[
"Apache-2.0"
] | 2
|
2021-03-17T12:00:35.000Z
|
2021-03-17T12:18:28.000Z
|
examples/python/native/cifar10_cnn_concat.py
|
rvinaybharadwaj/FlexFlow
|
bb8a3e24ac623ac48cd97e103b37ba7e715d3d3a
|
[
"Apache-2.0"
] | null | null | null |
examples/python/native/cifar10_cnn_concat.py
|
rvinaybharadwaj/FlexFlow
|
bb8a3e24ac623ac48cd97e103b37ba7e715d3d3a
|
[
"Apache-2.0"
] | null | null | null |
from flexflow.core import *
from flexflow.keras.datasets import cifar10
from accuracy import ModelAccuracy
def top_level_task():
ffconfig = FFConfig()
alexnetconfig = NetConfig()
print(alexnetconfig.dataset_path)
ffconfig.parse_args()
print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.get_batch_size(), ffconfig.get_workers_per_node(), ffconfig.get_num_nodes()))
ffmodel = FFModel(ffconfig)
dims_input = [ffconfig.get_batch_size(), 3, 32, 32]
input_tensor = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)
t1 = ffmodel.conv2d(input_tensor, 32, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t1 = ffmodel.conv2d(t1, 32, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t2 = ffmodel.conv2d(input_tensor, 32, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t2 = ffmodel.conv2d(t2, 32, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t3 = ffmodel.conv2d(input_tensor, 32, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t3 = ffmodel.conv2d(t3, 32, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t = ffmodel.concat([t1, t2, t3], 1)
t = ffmodel.pool2d(t, 2, 2, 2, 2, 0, 0,)
t1 = ffmodel.conv2d(t, 64, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t2 = ffmodel.conv2d(t, 64, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t = ffmodel.concat([t1, t2], 1)
t = ffmodel.conv2d(t, 64, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU)
t = ffmodel.pool2d(t, 2, 2, 2, 2, 0, 0)
t = ffmodel.flat(t);
t = ffmodel.dense(t, 512, ActiMode.AC_MODE_RELU)
t = ffmodel.dense(t, 10)
t = ffmodel.softmax(t)
ffoptimizer = SGDOptimizer(ffmodel, 0.01)
ffmodel.set_sgd_optimizer(ffoptimizer)
ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
label_tensor = ffmodel.get_label_tensor()
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
x_train = x_train.astype('float32')
x_train /= 255
full_input_array = x_train
print(full_input_array.__array_interface__["strides"])
y_train = y_train.astype('int32')
full_label_array = y_train
dims_full_input = [num_samples, 3, 32, 32]
full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)
dims_full_label = [num_samples, 1]
full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)
full_input.attach_numpy_array(ffconfig, full_input_array)
full_label.attach_numpy_array(ffconfig, full_label_array)
dataloader_input = SingleDataLoader(ffmodel, input_tensor, full_input, num_samples, DataType.DT_FLOAT)
dataloader_label = SingleDataLoader(ffmodel, label_tensor, full_label, num_samples, DataType.DT_INT32)
full_input.detach_numpy_array(ffconfig)
full_label.detach_numpy_array(ffconfig)
num_samples = dataloader_input.get_num_samples()
ffmodel.init_layers()
epochs = ffconfig.get_epochs()
ts_start = ffconfig.get_current_time()
ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start);
print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
perf_metrics = ffmodel.get_perf_metrics()
accuracy = perf_metrics.get_accuracy()
if accuracy < ModelAccuracy.CIFAR10_CNN.value:
assert 0, 'Check Accuracy'
if __name__ == "__main__":
print("cifar10 cnn concat")
top_level_task()
| 38.539326
| 167
| 0.737026
|
b1647c267dc651f218079210988d83f539769102
| 37
|
py
|
Python
|
loop.py
|
pexceful/test
|
02e1336a3f5383dd9cc0569cbc5105b39b8799ad
|
[
"MIT"
] | null | null | null |
loop.py
|
pexceful/test
|
02e1336a3f5383dd9cc0569cbc5105b39b8799ad
|
[
"MIT"
] | null | null | null |
loop.py
|
pexceful/test
|
02e1336a3f5383dd9cc0569cbc5105b39b8799ad
|
[
"MIT"
] | null | null | null |
for i in range(69):
print (i)
| 12.333333
| 20
| 0.513514
|
873c4bb37ec797c4ec251cbf24cefca20d45d25e
| 22,602
|
py
|
Python
|
a5_public/nmt_model.py
|
thophan92/cs224n-winter2019
|
f3f8041b35e949e73167135d662a2bd93e7406de
|
[
"MIT"
] | null | null | null |
a5_public/nmt_model.py
|
thophan92/cs224n-winter2019
|
f3f8041b35e949e73167135d662a2bd93e7406de
|
[
"MIT"
] | null | null | null |
a5_public/nmt_model.py
|
thophan92/cs224n-winter2019
|
f3f8041b35e949e73167135d662a2bd93e7406de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 5
nmt_model.py: NMT Model
Pencheng Yin <pcyin@cs.cmu.edu>
Sahil Chopra <schopra8@stanford.edu>
"""
from collections import namedtuple
import sys
from typing import List, Tuple, Dict, Set, Union
import torch
import torch.nn as nn
import torch.nn.utils
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from model_embeddings import ModelEmbeddings
from char_decoder import CharDecoder
Hypothesis = namedtuple('Hypothesis', ['value', 'score'])
import random
class NMT(nn.Module):
""" Simple Neural Machine Translation Model:
- Bidrectional LSTM Encoder
- Unidirection LSTM Decoder
- Global Attention Model (Luong, et al. 2015)
"""
def __init__(self, embed_size, hidden_size, vocab, dropout_rate=0.2, no_char_decoder=False):
""" Init NMT Model.
@param embed_size (int): Embedding size (dimensionality)
@param hidden_size (int): Hidden Size (dimensionality)
@param vocab (Vocab): Vocabulary object containing src and tgt languages
See vocab.py for documentation.
@param dropout_rate (float): Dropout probability, for attention
"""
super(NMT, self).__init__()
self.model_embeddings_source = ModelEmbeddings(embed_size, vocab.src)
self.model_embeddings_target = ModelEmbeddings(embed_size, vocab.tgt)
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
self.vocab = vocab
### COPY OVER YOUR CODE FROM ASSIGNMENT 4
self.encoder = nn.LSTM(embed_size, hidden_size, bidirectional=True, bias=True)
self.decoder = nn.LSTMCell(embed_size + hidden_size, hidden_size, bias=True)
self.h_projection = nn.Linear(2 * hidden_size, hidden_size, bias=False)
self.c_projection = nn.Linear(2 * hidden_size, hidden_size, bias=False)
self.att_projection = nn.Linear(2 * hidden_size, hidden_size, bias=False)
self.combined_output_projection = nn.Linear(3 * hidden_size, hidden_size, bias=False)
self.target_vocab_projection = nn.Linear(hidden_size, len(vocab.tgt), bias=False)
self.dropout = nn.Dropout(dropout_rate)
### END YOUR CODE FROM ASSIGNMENT 4
if not no_char_decoder:
self.charDecoder = CharDecoder(hidden_size, target_vocab=vocab.tgt)
else:
self.charDecoder = None
def forward(self, source: List[List[str]], target: List[List[str]]) -> torch.Tensor:
""" Take a mini-batch of source and target sentences, compute the log-likelihood of
target sentences under the language models learned by the NMT system.
@param source (List[List[str]]): list of source sentence tokens
@param target (List[List[str]]): list of target sentence tokens, wrapped by `<s>` and `</s>`
@returns scores (Tensor): a variable/tensor of shape (b, ) representing the
log-likelihood of generating the gold-standard target sentence for
each example in the input batch. Here b = batch size.
"""
# Compute sentence lengths
source_lengths = [len(s) for s in source]
# Convert list of lists into tensors
## A4 code
# source_padded = self.vocab.src.to_input_tensor(source, device=self.device) # Tensor: (src_len, b)
# target_padded = self.vocab.tgt.to_input_tensor(target, device=self.device) # Tensor: (tgt_len, b)
# enc_hiddens, dec_init_state = self.encode(source_padded, source_lengths)
# enc_masks = self.generate_sent_masks(enc_hiddens, source_lengths)
# combined_outputs = self.decode(enc_hiddens, enc_masks, dec_init_state, target_padded)
## End A4 code
### YOUR CODE HERE for part 1k
### TODO:
### Modify the code lines above as needed to fetch the character-level tensor
### to feed into encode() and decode(). You should:
### - Keep `target_padded` from A4 code above for predictions
### - Add `source_padded_chars` for character level padded encodings for source
### - Add `target_padded_chars` for character level padded encodings for target
### - Modify calls to encode() and decode() to use the character level encodings
target_padded = self.vocab.tgt.to_input_tensor(target, device=self.device) # (max_sentence_length, batch_size)
source_padded_chars = self.vocab.src.to_input_tensor_char(source, device=self.device) # (max_sentence_length, batch_size, max_word_length)
target_padded_chars = self.vocab.tgt.to_input_tensor_char(target, device=self.device) # (max_sentence_length, batch_size, max_word_length)
enc_hiddens, dec_init_state = self.encode(source_padded_chars, source_lengths)
enc_masks = self.generate_sent_masks(enc_hiddens, source_lengths)
combined_outputs = self.decode(enc_hiddens, enc_masks, dec_init_state, target_padded_chars)
### END YOUR CODE
P = F.log_softmax(self.target_vocab_projection(combined_outputs), dim=-1)
# Zero out, probabilities for which we have nothing in the target text
target_masks = (target_padded != self.vocab.tgt['<pad>']).float()
# Compute log probability of generating true target words
target_gold_words_log_prob = torch.gather(P, index=target_padded[1:].unsqueeze(-1), dim=-1).squeeze(-1) * target_masks[1:]
scores = target_gold_words_log_prob.sum() # mhahn2 Small modification from A4 code.
if self.charDecoder is not None:
max_word_len = target_padded_chars.shape[-1]
target_words = target_padded[1:].contiguous().view(-1)
target_chars = target_padded_chars[1:].contiguous().view(-1, max_word_len) # TODO actively add .contiguous()
target_outputs = combined_outputs.view(-1, 256)
target_chars_oov = target_chars #torch.index_select(target_chars, dim=0, index=oovIndices)
rnn_states_oov = target_outputs #torch.index_select(target_outputs, dim=0, index=oovIndices)
oovs_losses = self.charDecoder.train_forward(target_chars_oov.t(), (rnn_states_oov.unsqueeze(0), rnn_states_oov.unsqueeze(0)))
scores = scores - oovs_losses
return scores
def encode(self, source_padded: torch.Tensor, source_lengths: List[int]) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
""" Apply the encoder to source sentences to obtain encoder hidden states.
Additionally, take the final states of the encoder and project them to obtain initial states for decoder.
@param source_padded (Tensor): Tensor of padded source sentences with shape (src_len, b, max_word_length), where
b = batch_size, src_len = maximum source sentence length. Note that
these have already been sorted in order of longest to shortest sentence.
@param source_lengths (List[int]): List of actual lengths for each of the source sentences in the batch
@returns enc_hiddens (Tensor): Tensor of hidden units with shape (b, src_len, h*2), where
b = batch size, src_len = maximum source sentence length, h = hidden size.
@returns dec_init_state (tuple(Tensor, Tensor)): Tuple of tensors representing the decoder's initial
hidden state and cell.
"""
enc_hiddens, dec_init_state = None, None
### COPY OVER YOUR CODE FROM ASSIGNMENT 4
### Except replace "self.model_embeddings.source" with "self.model_embeddings_source"
X = self.model_embeddings_source(
source_padded) # source_padded.shape : (src_len, b) --> X.shape : (sentence_length, batch_size, emb_word_size)
X = pack_padded_sequence(X, lengths=source_lengths)
enc_hiddens, (last_hidden, last_cell) = self.encoder(X)
enc_hiddens, _ = pad_packed_sequence(enc_hiddens)
enc_hiddens = enc_hiddens.transpose(0, 1)
last_hidden = torch.cat((last_hidden[0], last_hidden[1]), 1) # concatenate forward and backward vectors
init_decoder_hidden = self.h_projection(last_hidden)
last_cell = torch.cat((last_cell[0], last_cell[1]), 1)
init_decoder_cell = self.c_projection(last_cell)
dec_init_state = (init_decoder_hidden, init_decoder_cell)
### END YOUR CODE FROM ASSIGNMENT 4
return enc_hiddens, dec_init_state
def decode(self, enc_hiddens: torch.Tensor, enc_masks: torch.Tensor,
dec_init_state: Tuple[torch.Tensor, torch.Tensor], target_padded: torch.Tensor) -> torch.Tensor:
"""Compute combined output vectors for a batch.
@param enc_hiddens (Tensor): Hidden states (b, src_len, h*2), where
b = batch size, src_len = maximum source sentence length, h = hidden size.
@param enc_masks (Tensor): Tensor of sentence masks (b, src_len), where
b = batch size, src_len = maximum source sentence length.
@param dec_init_state (tuple(Tensor, Tensor)): Initial state and cell for decoder
@param target_padded (Tensor): Gold-standard padded target sentences (tgt_len, b, max_word_length), where
tgt_len = maximum target sentence length, b = batch size.
@returns combined_outputs (Tensor): combined output tensor (tgt_len, b, h), where
tgt_len = maximum target sentence length, b = batch_size, h = hidden size
"""
# Chop of the <END> token for max length sentences.
target_padded = target_padded[:-1]
# Initialize the decoder state (hidden and cell)
dec_state = dec_init_state
# Initialize previous combined output vector o_{t-1} as zero
batch_size = enc_hiddens.size(0)
o_prev = torch.zeros(batch_size, self.hidden_size, device=self.device)
# Initialize a list we will use to collect the combined output o_t on each step
combined_outputs = []
### COPY OVER YOUR CODE FROM ASSIGNMENT 4
### Except replace "self.model_embeddings.target" with "self.model_embeddings_target"
enc_hiddens_proj = self.att_projection(enc_hiddens) # enc_hiddens_proj.shape : (b, src_len, h)
Y = self.model_embeddings_target(target_padded) # Y.shape : (tgt_len, b, e)
# for Y_t in torch.split(Y, 1, dim=0):
# Y_t = torch.squeeze(Y_t)
# Ybar_t = torch.cat((Y_t, o_prev))
# dec_state, o_t, e_t = self.step(Ybar_t, dec_state, enc_hiddens, enc_hiddens_proj, enc_masks)
# combined_outputs.append(o_t)
# o_prev = o_t
for Y_t in torch.split(Y, split_size_or_sections=1):
Y_t = Y_t.squeeze(0)
Ybar_t = torch.cat([Y_t, o_prev], dim=-1)
dec_state, o_t, _ = self.step(Ybar_t, dec_state, enc_hiddens, enc_hiddens_proj, enc_masks)
combined_outputs.append(o_t)
o_prev = o_t
combined_outputs = torch.stack(combined_outputs)
### END YOUR CODE FROM ASSIGNMENT 4
return combined_outputs
def step(self, Ybar_t: torch.Tensor,
dec_state: Tuple[torch.Tensor, torch.Tensor],
enc_hiddens: torch.Tensor,
enc_hiddens_proj: torch.Tensor,
enc_masks: torch.Tensor) -> Tuple[Tuple, torch.Tensor, torch.Tensor]:
""" Compute one forward step of the LSTM decoder, including the attention computation.
@param Ybar_t (Tensor): Concatenated Tensor of [Y_t o_prev], with shape (b, e + h). The input for the decoder,
where b = batch size, e = embedding size, h = hidden size.
@param dec_state (tuple(Tensor, Tensor)): Tuple of tensors both with shape (b, h), where b = batch size, h = hidden size.
First tensor is decoder's prev hidden state, second tensor is decoder's prev cell.
@param enc_hiddens (Tensor): Encoder hidden states Tensor, with shape (b, src_len, h * 2), where b = batch size,
src_len = maximum source length, h = hidden size.
@param enc_hiddens_proj (Tensor): Encoder hidden states Tensor, projected from (h * 2) to h. Tensor is with shape (b, src_len, h),
where b = batch size, src_len = maximum source length, h = hidden size.
@param enc_masks (Tensor): Tensor of sentence masks shape (b, src_len),
where b = batch size, src_len is maximum source length.
@returns dec_state (tuple (Tensor, Tensor)): Tuple of tensors both shape (b, h), where b = batch size, h = hidden size.
First tensor is decoder's new hidden state, second tensor is decoder's new cell.
@returns combined_output (Tensor): Combined output Tensor at timestep t, shape (b, h), where b = batch size, h = hidden size.
@returns e_t (Tensor): Tensor of shape (b, src_len). It is attention scores distribution.
Note: You will not use this outside of this function.
We are simply returning this value so that we can sanity check
your implementation.
"""
combined_output = None
### COPY OVER YOUR CODE FROM ASSIGNMENT 4
dec_state = self.decoder(Ybar_t, dec_state)
(dec_hidden, dec_cell) = dec_state
# (b, src_len, h) x (b, h, 1) --> (b, src_len, 1) --> (b, src_len)
e_t = torch.bmm(enc_hiddens_proj, dec_hidden.unsqueeze(2)).squeeze(2)
### END YOUR CODE FROM ASSIGNMENT 4
# Set e_t to -inf where enc_masks has 1
if enc_masks is not None:
e_t.data.masked_fill_(enc_masks.byte(), -float('inf'))
### COPY OVER YOUR CODE FROM ASSIGNMENT 4
alpha_t = nn.functional.softmax(e_t, dim=-1) # (b, src_len)
# (b, 1, src_len) x (b, src_len, 2h) --> (b, 1, 2h) --> (b, 2h)
alpha_t_view = (alpha_t.size(0), 1, alpha_t.size(1))
a_t = torch.bmm(alpha_t.view(*alpha_t_view), enc_hiddens).squeeze(1)
# a_t = alpha_t.unsqueeze(1).bmm(enc_hiddens).squeeze(1) # (b, 2h)
U_t = torch.cat((a_t, dec_hidden), dim=1) # (b, 3h)
V_t = self.combined_output_projection(U_t) # (b, h)
O_t = self.dropout(torch.tanh(V_t)) # (b, h)
### END YOUR CODE FROM ASSIGNMENT 4
combined_output = O_t
return dec_state, combined_output, e_t
def generate_sent_masks(self, enc_hiddens: torch.Tensor, source_lengths: List[int]) -> torch.Tensor:
""" Generate sentence masks for encoder hidden states.
@param enc_hiddens (Tensor): encodings of shape (b, src_len, 2*h), where b = batch size,
src_len = max source length, h = hidden size.
@param source_lengths (List[int]): List of actual lengths for each of the sentences in the batch.
@returns enc_masks (Tensor): Tensor of sentence masks of shape (b, src_len),
where src_len = max source length, h = hidden size.
"""
enc_masks = torch.zeros(enc_hiddens.size(0), enc_hiddens.size(1), dtype=torch.float)
for e_id, src_len in enumerate(source_lengths):
enc_masks[e_id, src_len:] = 1
return enc_masks.to(self.device)
def beam_search(self, src_sent: List[str], beam_size: int=5, max_decoding_time_step: int=70) -> List[Hypothesis]:
""" Given a single source sentence, perform beam search, yielding translations in the target language.
@param src_sent (List[str]): a single source sentence (words)
@param beam_size (int): beam size
@param max_decoding_time_step (int): maximum number of time steps to unroll the decoding RNN
@returns hypotheses (List[Hypothesis]): a list of hypothesis, each hypothesis has two fields:
value: List[str]: the decoded target sentence, represented as a list of words
score: float: the log-likelihood of the target sentence
"""
## A4 code
# src_sents_var = self.vocab.src.to_input_tensor([src_sent], self.device)
## End A4 code
src_sents_var = self.vocab.src.to_input_tensor_char([src_sent], self.device)
src_encodings, dec_init_vec = self.encode(src_sents_var, [len(src_sent)])
src_encodings_att_linear = self.att_projection(src_encodings)
h_tm1 = dec_init_vec
att_tm1 = torch.zeros(1, self.hidden_size, device=self.device)
eos_id = self.vocab.tgt['</s>']
hypotheses = [['<s>']]
hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)
completed_hypotheses = []
t = 0
while len(completed_hypotheses) < beam_size and t < max_decoding_time_step:
t += 1
hyp_num = len(hypotheses)
exp_src_encodings = src_encodings.expand(hyp_num,
src_encodings.size(1),
src_encodings.size(2))
exp_src_encodings_att_linear = src_encodings_att_linear.expand(hyp_num,
src_encodings_att_linear.size(1),
src_encodings_att_linear.size(2))
## A4 code
# y_tm1 = self.vocab.tgt.to_input_tensor(list([hyp[-1]] for hyp in hypotheses), device=self.device)
# y_t_embed = self.model_embeddings_target(y_tm1)
## End A4 code
y_tm1 = self.vocab.tgt.to_input_tensor_char(list([hyp[-1]] for hyp in hypotheses), device=self.device)
y_t_embed = self.model_embeddings_target(y_tm1)
y_t_embed = torch.squeeze(y_t_embed, dim=0)
x = torch.cat([y_t_embed, att_tm1], dim=-1)
(h_t, cell_t), att_t, _ = self.step(x, h_tm1,
exp_src_encodings, exp_src_encodings_att_linear, enc_masks=None)
# log probabilities over target words
log_p_t = F.log_softmax(self.target_vocab_projection(att_t), dim=-1)
live_hyp_num = beam_size - len(completed_hypotheses)
contiuating_hyp_scores = (hyp_scores.unsqueeze(1).expand_as(log_p_t) + log_p_t).view(-1)
top_cand_hyp_scores, top_cand_hyp_pos = torch.topk(contiuating_hyp_scores, k=live_hyp_num)
prev_hyp_ids = top_cand_hyp_pos / len(self.vocab.tgt)
hyp_word_ids = top_cand_hyp_pos % len(self.vocab.tgt)
new_hypotheses = []
live_hyp_ids = []
new_hyp_scores = []
decoderStatesForUNKsHere = []
for prev_hyp_id, hyp_word_id, cand_new_hyp_score in zip(prev_hyp_ids, hyp_word_ids, top_cand_hyp_scores):
prev_hyp_id = prev_hyp_id.item()
hyp_word_id = hyp_word_id.item()
cand_new_hyp_score = cand_new_hyp_score.item()
hyp_word = self.vocab.tgt.id2word[hyp_word_id]
# Record output layer in case UNK was generated
if hyp_word == "<unk>":
hyp_word = "<unk>"+str(len(decoderStatesForUNKsHere))
decoderStatesForUNKsHere.append(att_t[prev_hyp_id])
new_hyp_sent = hypotheses[prev_hyp_id] + [hyp_word]
if hyp_word == '</s>':
completed_hypotheses.append(Hypothesis(value=new_hyp_sent[1:-1],
score=cand_new_hyp_score))
else:
new_hypotheses.append(new_hyp_sent)
live_hyp_ids.append(prev_hyp_id)
new_hyp_scores.append(cand_new_hyp_score)
if len(decoderStatesForUNKsHere) > 0 and self.charDecoder is not None: # decode UNKs
decoderStatesForUNKsHere = torch.stack(decoderStatesForUNKsHere, dim=0)
decodedWords = self.charDecoder.decode_greedy((decoderStatesForUNKsHere.unsqueeze(0), decoderStatesForUNKsHere.unsqueeze(0)), max_length=21, device=self.device)
assert len(decodedWords) == decoderStatesForUNKsHere.size()[0], "Incorrect number of decoded words"
for hyp in new_hypotheses:
if hyp[-1].startswith("<unk>"):
hyp[-1] = decodedWords[int(hyp[-1][5:])]#[:-1]
if len(completed_hypotheses) == beam_size:
break
live_hyp_ids = torch.tensor(live_hyp_ids, dtype=torch.long, device=self.device)
h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])
att_tm1 = att_t[live_hyp_ids]
hypotheses = new_hypotheses
hyp_scores = torch.tensor(new_hyp_scores, dtype=torch.float, device=self.device)
if len(completed_hypotheses) == 0:
completed_hypotheses.append(Hypothesis(value=hypotheses[0][1:],
score=hyp_scores[0].item()))
completed_hypotheses.sort(key=lambda hyp: hyp.score, reverse=True)
return completed_hypotheses
@property
def device(self) -> torch.device:
""" Determine which device to place the Tensors upon, CPU or GPU.
"""
return self.att_projection.weight.device
@staticmethod
def load(model_path: str, no_char_decoder=False):
""" Load the model from a file.
@param model_path (str): path to model
"""
params = torch.load(model_path, map_location=lambda storage, loc: storage)
args = params['args']
model = NMT(vocab=params['vocab'], no_char_decoder=no_char_decoder, **args)
model.load_state_dict(params['state_dict'])
return model
def save(self, path: str):
""" Save the odel to a file.
@param path (str): path to the model
"""
print('save model parameters to [%s]' % path, file=sys.stderr)
params = {
'args': dict(embed_size=self.model_embeddings_source.embed_size, hidden_size=self.hidden_size, dropout_rate=self.dropout_rate),
'vocab': self.vocab,
'state_dict': self.state_dict()
}
torch.save(params, path)
| 51.485194
| 176
| 0.631714
|
12eb0ac032ca0ddfbe6dece3e9903078f5562987
| 10,942
|
py
|
Python
|
tests/zpill.py
|
kjhosein/cloud-custodian
|
b1b24b3d497849f2b80c45071d73e15f2a868ea9
|
[
"Apache-2.0"
] | null | null | null |
tests/zpill.py
|
kjhosein/cloud-custodian
|
b1b24b3d497849f2b80c45071d73e15f2a868ea9
|
[
"Apache-2.0"
] | null | null | null |
tests/zpill.py
|
kjhosein/cloud-custodian
|
b1b24b3d497849f2b80c45071d73e15f2a868ea9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta, tzinfo
import fnmatch
import json
import unittest
import os
import shutil
import zipfile
import boto3
from botocore.response import StreamingBody
import jmespath
from placebo import pill
import placebo
from six import StringIO
###########################################################################
# BEGIN PLACEBO MONKEY PATCH
#
# Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony...
# These monkeypatch patches represent fixes on trunk of that repo that have not been released
# into an extant version, we carry them here. We can drop this when this issue is resolved
#
# https://github.com/garnaat/placebo/issues/63
#
# License - Apache 2.0
# Copyright (c) 2015 Mitch Garnaat
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if "__class__" in target:
class_name = target.pop("__class__")
if "__module__" in obj:
obj.pop("__module__")
# Use getattr(module, class_name) for custom types if needed
if class_name == "datetime":
return datetime(tzinfo=utc, **target)
if class_name == "StreamingBody":
return StringIO(target["body"])
# Return unrecognized structures as-is
return obj
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {"__class__": obj.__class__.__name__}
try:
result["__module__"] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime):
result["year"] = obj.year
result["month"] = obj.month
result["day"] = obj.day
result["hour"] = obj.hour
result["minute"] = obj.minute
result["second"] = obj.second
result["microsecond"] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result["body"] = obj.read()
obj._raw_stream = StringIO(result["body"])
obj._amount_read = 0
return result
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable")
placebo.pill.serialize = serialize
placebo.pill.deserialize = deserialize
# END PLACEBO MONKEY
##########################################################################
class BluePill(pill.Pill):
def playback(self):
super(BluePill, self).playback()
self._avail = self.get_available()
def get_available(self):
return set(
[
os.path.join(self.data_path, n)
for n in fnmatch.filter(os.listdir(self.data_path), "*.json")
]
)
def get_next_file_path(self, service, operation):
fn = super(BluePill, self).get_next_file_path(service, operation)
# couple of double use cases
if fn in self._avail:
self._avail.remove(fn)
else:
print("\ndouble use %s\n" % fn)
return fn
def stop(self):
result = super(BluePill, self).stop()
if self._avail:
print("Unused json files \n %s" % ("\n".join(sorted(self._avail))))
return result
class ZippedPill(pill.Pill):
def __init__(self, path, prefix=None, debug=False):
super(ZippedPill, self).__init__(prefix, debug)
self.path = path
self._used = set()
self.archive = None
def playback(self):
self.archive = zipfile.ZipFile(self.path, "r")
self._files = set(self.archive.namelist())
return super(ZippedPill, self).playback()
def record(self):
self.archive = zipfile.ZipFile(self.path, "a", zipfile.ZIP_DEFLATED)
self._files = set()
files = set([n for n in self.archive.namelist() if n.startswith(self.prefix)])
if not files:
return super(ZippedPill, self).record()
# We can't update files in a zip, so copy
self.archive.close()
os.rename(self.path, "%s.tmp" % self.path)
src = zipfile.ZipFile("%s.tmp" % self.path, "r")
self.archive = zipfile.ZipFile(self.path, "w", zipfile.ZIP_DEFLATED)
for n in src.namelist():
if n in files:
continue
self.archive.writestr(n, src.read(n))
os.remove("%s.tmp" % self.path)
return super(ZippedPill, self).record()
def stop(self):
super(ZippedPill, self).stop()
if self.archive:
self.archive.close()
def save_response(self, service, operation, response_data, http_response=200):
filepath = self.get_new_file_path(service, operation)
pill.LOG.debug("save_response: path=%s", filepath)
json_data = {"status_code": http_response, "data": response_data}
self.archive.writestr(
filepath,
json.dumps(json_data, indent=4, default=pill.serialize),
zipfile.ZIP_DEFLATED,
)
self._files.add(filepath)
def load_response(self, service, operation):
response_file = self.get_next_file_path(service, operation)
self._used.add(response_file)
pill.LOG.debug("load_responses: %s", response_file)
response_data = json.loads(
self.archive.read(response_file), object_hook=pill.deserialize
)
return (
pill.FakeHttpResponse(response_data["status_code"]), response_data["data"]
)
def get_new_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_new_file_path: %s", base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + "*")
for file_path in fnmatch.filter(self._files, glob_pattern):
file_name = os.path.basename(file_path)
m = self.filename_re.match(file_name)
if m:
i = int(m.group("index"))
if i > index:
index = i
index += 1
return os.path.join(self._data_path, "{0}_{1}.json".format(base_name, index))
def get_next_file_path(self, service, operation):
base_name = "{0}.{1}".format(service, operation)
if self.prefix:
base_name = "{0}.{1}".format(self.prefix, base_name)
pill.LOG.debug("get_next_file_path: %s", base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(self._data_path, base_name + "_{0}.json".format(index))
if fn in self._files:
next_file = fn
self._index[base_name] += 1
self._files.add(fn)
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError("response file ({0}) not found".format(fn))
return fn
def attach(session, data_path, prefix=None, debug=False):
pill = ZippedPill(data_path, prefix=prefix, debug=debug)
pill.attach(session, prefix)
return pill
class PillTest(unittest.TestCase):
archive_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "placebo_data.zip"
)
placebo_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "placebo"
)
output_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "output"
)
recording = False
def assertJmes(self, expr, instance, expected):
value = jmespath.search(expr, instance)
self.assertEqual(value, expected)
def cleanUp(self):
self.pill = None
def record_flight_data(self, test_case, zdata=False, augment=False):
self.recording = True
test_dir = os.path.join(self.placebo_dir, test_case)
if not (zdata or augment):
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir)
session = boto3.Session()
default_region = session.region_name
if not zdata:
pill = placebo.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, debug=True)
pill.record()
self.pill = pill
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
def factory(region=None, assume=None):
if region and region != default_region:
new_session = boto3.Session(region_name=region)
assert not zdata
new_pill = placebo.attach(new_session, test_dir, debug=True)
new_pill.record()
self.addCleanup(new_pill.stop)
return new_session
return session
return factory
def replay_flight_data(self, test_case, zdata=False, region=None):
"""
The `region` argument is to allow functional tests to override the
default region. It is unused when replaying stored data.
"""
if os.environ.get("C7N_FUNCTIONAL") == "yes":
self.recording = True
return lambda region=region, assume=None: boto3.Session(region_name=region)
if not zdata:
test_dir = os.path.join(self.placebo_dir, test_case)
if not os.path.exists(test_dir):
raise RuntimeError("Invalid Test Dir for flight data %s" % test_dir)
session = boto3.Session()
if not zdata:
pill = placebo.attach(session, test_dir)
# pill = BluePill()
# pill.attach(session, test_dir)
else:
pill = attach(session, self.archive_path, test_case, False)
pill.playback()
self.addCleanup(pill.stop)
self.addCleanup(self.cleanUp)
return lambda region=None, assume=None: session
| 32.858859
| 93
| 0.61561
|
74bb4253cc3841ff6d81fc81ca5850e23c65ad05
| 1,279
|
py
|
Python
|
install.py
|
ohrn/critic
|
26d38be643703d8bf9e6bfdb370c887251b1c109
|
[
"Apache-2.0"
] | null | null | null |
install.py
|
ohrn/critic
|
26d38be643703d8bf9e6bfdb370c887251b1c109
|
[
"Apache-2.0"
] | null | null | null |
install.py
|
ohrn/critic
|
26d38be643703d8bf9e6bfdb370c887251b1c109
|
[
"Apache-2.0"
] | null | null | null |
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from __future__ import print_function
import sys
if __name__ == "__main__":
# To avoid accidentally creating files owned by root.
sys.dont_write_bytecode = True
# Python version check is done before imports below so
# that python 2.6/2.5 users can see the error message.
import pythonversion
pythonversion.check("""\
NOTE: This script must be run in the Python interpreter that will be
used to run Critic.
""")
if sys.flags.optimize > 0:
print("""
ERROR: Please run this script without -O or -OO options.
""")
sys.exit(1)
from installation import install_main
install_main.main()
| 30.452381
| 80
| 0.724003
|
dbaa624ae9d6a5a5949db692e52c0c1deb18b8df
| 28,542
|
py
|
Python
|
tensorflow/contrib/gan/python/losses/python/losses_impl_test.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 13
|
2018-07-23T18:53:35.000Z
|
2021-11-18T19:56:45.000Z
|
tensorflow/contrib/gan/python/losses/python/losses_impl_test.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 1
|
2018-03-28T23:47:43.000Z
|
2018-03-28T23:47:43.000Z
|
tensorflow/contrib/gan/python/losses/python/losses_impl_test.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 13
|
2018-09-07T13:28:38.000Z
|
2020-07-17T15:06:24.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.losses.python import losses_impl as tfgan_losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.losses import losses as tf_losses
from tensorflow.python.platform import test
# TODO(joelshor): Use `parameterized` tests when opensourced.
class _LossesTest(object):
def init_constants(self):
self._discriminator_real_outputs_np = [-5.0, 1.4, 12.5, 2.7]
self._discriminator_gen_outputs_np = [10.0, 4.4, -5.5, 3.6]
self._weights = 2.3
self._discriminator_real_outputs = constant_op.constant(
self._discriminator_real_outputs_np, dtype=dtypes.float32)
self._discriminator_gen_outputs = constant_op.constant(
self._discriminator_gen_outputs_np, dtype=dtypes.float32)
def test_generator_all_correct(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(
self._discriminator_gen_outputs, loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
loss = self._g_loss_fn(
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
loss = self._d_loss_fn(
array_ops.reshape(self._discriminator_real_outputs, [2, 2]),
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._g_loss_fn(logits, weights=weights)
self.assertEqual(logits.dtype, loss.dtype)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [[10.0, 4.4, -5.5, 3.6]],
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
logits2 = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
generated_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._d_loss_fn(
logits, logits2, real_weights=real_weights,
generated_weights=generated_weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [self._discriminator_real_outputs_np],
logits2: [self._discriminator_gen_outputs_np],
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, weights=self._weights)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=self._weights, generated_weights=self._weights)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs,
weights=constant_op.constant(self._weights))
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=weights, generated_weights=weights)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(self._discriminator_gen_outputs, add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class LeastSquaresLossTest(test.TestCase, _LossesTest):
"""Tests for least_squares_xxx_loss."""
def setUp(self):
super(LeastSquaresLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 17.69625
self._expected_d_loss = 41.73375
self._generator_loss_name = 'lsq_generator_loss/value'
self._discriminator_loss_name = 'lsq_discriminator_loss/add'
self._g_loss_fn = tfgan_losses.least_squares_generator_loss
self._d_loss_fn = tfgan_losses.least_squares_discriminator_loss
class ModifiedLossTest(test.TestCase, _LossesTest):
"""Tests for modified_xxx_loss."""
def setUp(self):
super(ModifiedLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 1.38582
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_modified_loss/value'
self._discriminator_loss_name = 'discriminator_modified_loss/add_1'
self._g_loss_fn = tfgan_losses.modified_generator_loss
self._d_loss_fn = tfgan_losses.modified_discriminator_loss
class MinimaxLossTest(test.TestCase, _LossesTest):
"""Tests for minimax_xxx_loss."""
def setUp(self):
super(MinimaxLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -4.82408
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_minimax_loss/Neg'
self._discriminator_loss_name = 'discriminator_minimax_loss/add_1'
self._g_loss_fn = tfgan_losses.minimax_generator_loss
self._d_loss_fn = tfgan_losses.minimax_discriminator_loss
class WassersteinLossTest(test.TestCase, _LossesTest):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(WassersteinLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -3.12500
self._expected_d_loss = 0.22500
self._generator_loss_name = 'generator_wasserstein_loss/value'
self._discriminator_loss_name = 'discriminator_wasserstein_loss/sub'
self._g_loss_fn = tfgan_losses.wasserstein_generator_loss
self._d_loss_fn = tfgan_losses.wasserstein_discriminator_loss
# TODO(joelshor): Use `parameterized` tests when opensourced.
# TODO(joelshor): Refactor this test to use the same code as the other losses.
class ACGANLossTest(test.TestCase):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(ACGANLossTest, self).setUp()
self._g_loss_fn = tfgan_losses.acgan_generator_loss
self._d_loss_fn = tfgan_losses.acgan_discriminator_loss
self._discriminator_gen_classification_logits_np = [[10.0, 4.4, -5.5, 3.6],
[-4.0, 4.4, 5.2, 4.6],
[1.1, 2.4, -3.5, 5.6],
[1.1, 2.4, -3.5, 5.6]]
self._discriminator_real_classification_logits_np = [[-2.0, 0.4, 12.5, 2.7],
[-1.2, 1.9, 12.3, 2.6],
[-2.4, -1.7, 2.5, 2.7],
[1.1, 2.4, -3.5, 5.6]]
self._one_hot_labels_np = [[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]]
self._weights = 2.3
self._discriminator_gen_classification_logits = constant_op.constant(
self._discriminator_gen_classification_logits_np, dtype=dtypes.float32)
self._discriminator_real_classification_logits = constant_op.constant(
self._discriminator_real_classification_logits_np, dtype=dtypes.float32)
self._one_hot_labels = constant_op.constant(
self._one_hot_labels_np, dtype=dtypes.float32)
self._generator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._discriminator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'discriminator_real_classification_logits':
self._discriminator_real_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._generator_loss_name = 'acgan_generator_loss/value'
self._discriminator_loss_name = 'acgan_discriminator_loss/add'
self._expected_g_loss = 3.84974
self._expected_d_loss = 9.43950
def test_generator_all_correct(self):
loss = self._g_loss_fn(**self._generator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(**self._discriminator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(loss_collection='collection', **self._generator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(loss_collection='collection', **self._discriminator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._generator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._discriminator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._generator_kwargs.items()}
loss = self._g_loss_fn(**patch_args)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._discriminator_kwargs.items()}
loss = self._d_loss_fn(**patch_args)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._g_loss_fn(gen_logits, one_hot_labels)
with self.test_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits_and_weights(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._d_loss_fn(gen_logits, real_logits, one_hot_labels)
with self.test_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
real_logits: self._discriminator_real_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(weights=self._weights, **self._generator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
real_weights=self._weights, generated_weights=self._weights,
**self._discriminator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(
weights=constant_op.constant(self._weights), **self._generator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(real_weights=weights, generated_weights=weights,
**self._discriminator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(add_summaries=True, **self._generator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(add_summaries=True, **self._discriminator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class _PenaltyTest(object):
def test_all_correct(self):
loss = self._penalty_fn(**self._kwargs)
self.assertEqual(self._expected_dtype, loss.dtype)
self.assertEqual(self._expected_op_name, loss.op.name)
with self.test_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss, loss.eval(), 6)
def test_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._penalty_fn(loss_collection='collection', **self._kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_no_reduction(self):
loss = self._penalty_fn(reduction=tf_losses.Reduction.NONE, **self._kwargs)
self.assertAllEqual([self._batch_size], loss.shape)
def test_python_scalar_weight(self):
loss = self._penalty_fn(weights=2.3, **self._kwargs)
with self.test_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
def test_scalar_tensor_weight(self):
loss = self._penalty_fn(weights=constant_op.constant(2.3), **self._kwargs)
with self.test_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
class GradientPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for wasserstein_gradient_penalty."""
def setUp(self):
super(GradientPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.wasserstein_gradient_penalty
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._expected_dtype = dtypes.float32
with variable_scope.variable_scope('fake_scope') as self._scope:
self._discriminator_fn(0.0, 0.0)
self._kwargs = {
'generated_data': constant_op.constant(
self._generated_data_np, dtype=self._expected_dtype),
'real_data': constant_op.constant(
self._real_data_np, dtype=self._expected_dtype),
'generator_inputs': None,
'discriminator_fn': self._discriminator_fn,
'discriminator_scope': self._scope,
}
self._expected_loss = 9.00000
self._expected_op_name = 'wasserstein_gradient_penalty/value'
self._batch_size = 1
def _discriminator_fn(self, inputs, _):
ops.add_to_collection('fake_update_ops', constant_op.constant(1.0))
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
def test_loss_with_placeholder(self):
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'])
self.assertEqual(generated_data.dtype, loss.dtype)
with self.test_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(self._expected_loss, loss, 5)
def test_loss_with_gradient_norm_target(self):
"""Test loss value with non default gradient norm target."""
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'],
target=2.0)
with self.test_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(
loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(1.0, loss, 5)
def test_reuses_scope(self):
"""Test that gradient penalty reuses discriminator scope."""
num_vars = len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(
num_vars, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_works_with_get_collection(self):
"""Tests that gradient penalty works inside other scopes."""
# We ran the discriminator once in the setup, so there should be an op
# already in the collection.
self.assertEqual(1, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
# Make sure the op is added to the collection even if it's in a name scope.
with ops.name_scope('loss'):
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(2, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
# Make sure the op is added to the collection even if it's in a variable
# scope.
with variable_scope.variable_scope('loss_vscope'):
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(3, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
class MutualInformationPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for mutual_information_penalty."""
def setUp(self):
super(MutualInformationPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.mutual_information_penalty
self._structured_generator_inputs = [1.0, 2.0]
self._predicted_distributions = [categorical.Categorical(logits=[1.0, 2.0]),
normal.Normal([0.0], [1.0])]
self._expected_dtype = dtypes.float32
self._kwargs = {
'structured_generator_inputs': self._structured_generator_inputs,
'predicted_distributions': self._predicted_distributions,
}
self._expected_loss = 1.61610
self._expected_op_name = 'mutual_information_loss/mul'
self._batch_size = 2
class CombineAdversarialLossTest(test.TestCase):
"""Tests for combine_adversarial_loss."""
def setUp(self):
super(CombineAdversarialLossTest, self).setUp()
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._generated_data = constant_op.constant(
self._generated_data_np, dtype=dtypes.float32)
self._real_data = constant_op.constant(
self._real_data_np, dtype=dtypes.float32)
self._generated_inputs = None
self._expected_loss = 9.00000
def _test_correct_helper(self, use_weight_factor):
variable_list = [variables.Variable(1.0)]
main_loss = variable_list[0] * 2
adversarial_loss = variable_list[0] * 3
gradient_ratio_epsilon = 1e-6
if use_weight_factor:
weight_factor = constant_op.constant(2.0)
gradient_ratio = None
adv_coeff = 2.0
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
else:
weight_factor = None
gradient_ratio = constant_op.constant(0.5)
adv_coeff = 2.0 / (3 * 0.5 + gradient_ratio_epsilon)
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_ratio_epsilon=gradient_ratio_epsilon,
variables=variable_list)
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(expected_loss, combined_loss.eval(), 1e-5)
def test_correct_useweightfactor(self):
self._test_correct_helper(True)
def test_correct_nouseweightfactor(self):
self._test_correct_helper(False)
def _test_no_weight_skips_adversarial_loss_helper(self, use_weight_factor):
"""Test the 0 adversarial weight or grad ratio skips adversarial loss."""
main_loss = constant_op.constant(1.0)
adversarial_loss = constant_op.constant(1.0)
weight_factor = 0.0 if use_weight_factor else None
gradient_ratio = None if use_weight_factor else 0.0
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_summaries=False)
with self.test_session(use_gpu=True):
self.assertEqual(1.0, combined_loss.eval())
def test_no_weight_skips_adversarial_loss_useweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(True)
def test_no_weight_skips_adversarial_loss_nouseweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(False)
def test_stable_global_norm_avoids_overflow(self):
tensors = [array_ops.ones([4]), array_ops.ones([4, 4]) * 1e19, None]
gnorm_is_inf = math_ops.is_inf(clip_ops.global_norm(tensors))
stable_gnorm_is_inf = math_ops.is_inf(
tfgan_losses._numerically_stable_global_norm(tensors))
with self.test_session(use_gpu=True):
self.assertTrue(gnorm_is_inf.eval())
self.assertFalse(stable_gnorm_is_inf.eval())
def test_stable_global_norm_unchanged(self):
"""Test that preconditioning doesn't change global norm value."""
random_seed.set_random_seed(1234)
tensors = [random_ops.random_uniform([3]*i, -10.0, 10.0) for i in range(6)]
gnorm = clip_ops.global_norm(tensors)
precond_gnorm = tfgan_losses._numerically_stable_global_norm(tensors)
with self.test_session(use_gpu=True) as sess:
for _ in range(10): # spot check closeness on more than one sample.
gnorm_np, precond_gnorm_np = sess.run([gnorm, precond_gnorm])
self.assertNear(gnorm_np, precond_gnorm_np, 1e-4)
class CycleConsistencyLossTest(test.TestCase):
"""Tests for cycle_consistency_loss."""
def setUp(self):
super(CycleConsistencyLossTest, self).setUp()
self._data_x_np = [[1.0, 2, 3], [4, 5, 6]]
self._reconstructed_data_x_np = [[7.0, 8, 9], [10, 11, 12]]
self._data_y_np = [1.0, 9]
self._reconstructed_data_y_np = [-2.0, 3]
self._data_x = constant_op.constant(self._data_x_np, dtype=dtypes.float32)
self._reconstructed_data_x = constant_op.constant(
self._reconstructed_data_x_np, dtype=dtypes.float32)
self._data_y = constant_op.constant(self._data_y_np, dtype=dtypes.float32)
self._reconstructed_data_y = constant_op.constant(
self._reconstructed_data_y_np, dtype=dtypes.float32)
def test_correct_loss(self):
loss = tfgan_losses.cycle_consistency_loss(
self._data_x, self._reconstructed_data_x, self._data_y,
self._reconstructed_data_y)
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(5.25, loss.eval(), 1e-5)
if __name__ == '__main__':
test.main()
| 42.097345
| 83
| 0.713335
|
5a9a926c8e4843485b838d0f2a91f8492753fe55
| 289
|
py
|
Python
|
st_marys/pipelines.py
|
nbanion/blah
|
cf14d33d6f6222f4ba8e7582f11150a887508fa2
|
[
"MIT"
] | null | null | null |
st_marys/pipelines.py
|
nbanion/blah
|
cf14d33d6f6222f4ba8e7582f11150a887508fa2
|
[
"MIT"
] | 8
|
2019-10-12T16:38:21.000Z
|
2019-10-21T03:20:56.000Z
|
st_marys/pipelines.py
|
nbanion/blah
|
cf14d33d6f6222f4ba8e7582f11150a887508fa2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class StMarysPipeline(object):
def process_item(self, item, spider):
return item
| 24.083333
| 66
| 0.712803
|
1bba371c954dbca889533f304ca64467f503494e
| 40
|
py
|
Python
|
maps/spice_islands/__init__.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
maps/spice_islands/__init__.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
maps/spice_islands/__init__.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
from .spice_islands import SpiceIslands
| 20
| 39
| 0.875
|
81c6d9adb42681e3a614f7af89d9c2b4524cfb4d
| 1,450
|
py
|
Python
|
pypiorg/app.py
|
paulburnz314/flask_talkpython
|
65a13c0fc6ab37d13cc996172d7e120e346116a9
|
[
"MIT"
] | null | null | null |
pypiorg/app.py
|
paulburnz314/flask_talkpython
|
65a13c0fc6ab37d13cc996172d7e120e346116a9
|
[
"MIT"
] | null | null | null |
pypiorg/app.py
|
paulburnz314/flask_talkpython
|
65a13c0fc6ab37d13cc996172d7e120e346116a9
|
[
"MIT"
] | null | null | null |
import os
import sys
from flask import Flask
# import app.data.__all_models
# from flask_sqlalchemy import SQLAlchemy
# from flask_migrate import Migrate
# from app.config import Config
# abspath returns absolute path of a path
# join means join to path strings
# dirname returns the directory of a file
# __file__ refers to the script's file name
# pardir is represented by ..
# below most likely equals the \flask-talkpython directory
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, basedir)
import pypiorg.data.db_session as db_session
# db = SQLAlchemy()
# migrate = Migrate()
app = Flask(__name__)
def main():
# app.config.from_object(config_class)
# db.init_app(app)
# migrate.init_app(app, db)
register_blueprints()
setup_db()
app.run(debug=True)
return basedir
# print(f'Connecting to database with {config_class.SQLALCHEMY_DATABASE_URI}')
def setup_db():
db_file = os.path.join(os.path.dirname(__file__), 'db', 'app.sqlite')
db_session.global_init(db_file)
def register_blueprints():
from pypiorg.views.home_views import bp as home_bp
app.register_blueprint(home_bp)
from pypiorg.views.package_views import bp as package_bp
app.register_blueprint(package_bp)
from pypiorg.views.cms_views import bp as cms_bp
app.register_blueprint(cms_bp)
from pypiorg.views.account_views import bp as account_bp
app.register_blueprint(account_bp)
if __name__ == '__main__':
main()
| 25.892857
| 79
| 0.775172
|
83639e3d429f438857b498bf839d646192101206
| 5,759
|
py
|
Python
|
srunner/tools/scenario_config_parser.py
|
Disiok/scenario_runner
|
b883f61005375012b47b17c9b857daad141d1506
|
[
"MIT"
] | 1
|
2020-07-01T08:01:04.000Z
|
2020-07-01T08:01:04.000Z
|
srunner/tools/scenario_config_parser.py
|
Disiok/scenario_runner
|
b883f61005375012b47b17c9b857daad141d1506
|
[
"MIT"
] | null | null | null |
srunner/tools/scenario_config_parser.py
|
Disiok/scenario_runner
|
b883f61005375012b47b17c9b857daad141d1506
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides access to a scenario configuration parser
"""
import glob
import os
import xml.etree.ElementTree as ET
from srunner.scenarioconfigs.scenario_configuration import ScenarioConfiguration, ActorConfigurationData
from srunner.scenarioconfigs.route_scenario_configuration import RouteConfiguration, TargetConfiguration
class ScenarioConfigurationParser(object):
"""
Pure static class providing access to parser methods for scenario configuration files (*.xml)
"""
@staticmethod
def parse_scenario_configuration(scenario_config_file, scenario_name):
"""
Parse scenario configuration file and provide a list of
ScenarioConfigurations @return
If scenario_name starts with "group:" all scenarios within
the config file will be returned. Otherwise only the scenario,
that matches the scenario_name.
"""
single_scenario_only = True
if scenario_name.startswith("group:"):
single_scenario_only = False
scenario_name = scenario_name[6:]
tree = ET.parse(scenario_config_file)
scenario_configurations = []
for scenario in tree.iter("scenario"):
new_config = ScenarioConfiguration()
new_config.town = scenario.attrib.get('town', None)
new_config.name = scenario.attrib.get('name', None)
new_config.type = scenario.attrib.get('type', None)
new_config.other_actors = []
new_config.ego_vehicles = []
new_config.trigger_points = []
for weather in scenario.iter("weather"):
new_config.weather.cloudiness = float(weather.attrib.get("cloudiness", 0))
new_config.weather.precipitation = float(weather.attrib.get("precipitation", 0))
new_config.weather.precipitation_deposits = float(weather.attrib.get("precipitation_deposits", 0))
new_config.weather.wind_intensity = float(weather.attrib.get("wind_intensity", 0.35))
new_config.weather.sun_azimuth_angle = float(weather.attrib.get("sun_azimuth_angle", 0.0))
new_config.weather.sun_altitude_angle = float(weather.attrib.get("sun_altitude_angle", 15.0))
new_config.weather.fog_density = float(weather.attrib.get("fog_density", 0.0))
new_config.weather.fog_distance = float(weather.attrib.get("fog_distance", 0.0))
new_config.weather.wetness = float(weather.attrib.get("wetness", 0.0))
for ego_vehicle in scenario.iter("ego_vehicle"):
new_config.ego_vehicles.append(ActorConfigurationData.parse_from_node(ego_vehicle, 'hero'))
new_config.trigger_points.append(new_config.ego_vehicles[-1].transform)
for target in scenario.iter("target"):
new_config.target = TargetConfiguration(target)
for route in scenario.iter("route"):
route_conf = RouteConfiguration()
route_conf.parse_xml(route)
new_config.route = route_conf
for other_actor in scenario.iter("other_actor"):
new_config.other_actors.append(ActorConfigurationData.parse_from_node(other_actor, 'scenario'))
if single_scenario_only:
if new_config.name == scenario_name:
scenario_configurations.append(new_config)
else:
scenario_configurations.append(new_config)
return scenario_configurations
@staticmethod
def get_list_of_scenarios(config_file_name):
"""
Parse *all* config files and provide a list with all scenarios @return
"""
list_of_config_files = glob.glob("{}/srunner/examples/*.xml".format(os.getenv('ROOT_SCENARIO_RUNNER', "./")))
list_of_config_files += glob.glob("{}/srunner/examples/*.xosc".format(os.getenv('ROOT_SCENARIO_RUNNER', "./")))
if config_file_name != '':
list_of_config_files.append(config_file_name)
scenarios = []
for file_name in list_of_config_files:
if ".xosc" in file_name:
tree = ET.parse(file_name)
scenarios.append("{} (OpenSCENARIO)".format(tree.find("FileHeader").attrib.get('description', None)))
else:
tree = ET.parse(file_name)
for scenario in tree.iter("scenario"):
scenarios.append(scenario.attrib.get('name', None))
return scenarios
@staticmethod
def find_scenario_config(scenario_name, config_file_name):
"""
Parse *all* config files and find first match for scenario config
"""
list_of_config_files = glob.glob("{}/srunner/examples/*.xml".format(os.getenv('ROOT_SCENARIO_RUNNER', "./")))
if config_file_name != '':
list_of_config_files.append(config_file_name)
if scenario_name.startswith("group:"):
scenario_name = scenario_name[6:]
for file_name in list_of_config_files:
tree = ET.parse(file_name)
for scenario in tree.iter("scenario"):
if scenario.attrib.get('type', None) == scenario_name:
return file_name
else:
for file_name in list_of_config_files:
tree = ET.parse(file_name)
for scenario in tree.iter("scenario"):
if scenario.attrib.get('name', None) == scenario_name:
return file_name
return None
| 40.272727
| 119
| 0.641952
|
9b24c94a638cf1ba168a444fcf48ea086f4213ea
| 25,881
|
py
|
Python
|
multiworld/envs/pygame/multiobject_pygame_env.py
|
williamd4112/multiworld
|
bfcbe52db9ac67913ca38b7b3313afb8fdcfaaf7
|
[
"Apache-2.0"
] | null | null | null |
multiworld/envs/pygame/multiobject_pygame_env.py
|
williamd4112/multiworld
|
bfcbe52db9ac67913ca38b7b3313afb8fdcfaaf7
|
[
"Apache-2.0"
] | null | null | null |
multiworld/envs/pygame/multiobject_pygame_env.py
|
williamd4112/multiworld
|
bfcbe52db9ac67913ca38b7b3313afb8fdcfaaf7
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
import logging
import numpy as np
from gym import spaces
from pygame import Color
import random
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.core.serializable import Serializable
from multiworld.envs.env_util import (
get_stat_in_paths,
create_stats_ordered_dict,
)
from multiworld.envs.pygame.pygame_viewer import PygameViewer
from multiworld.envs.pygame.walls import VerticalWall, HorizontalWall
class Multiobj2DEnv(MultitaskEnv, Serializable):
"""
A little 2D point whose life goal is to reach a target.
"""
def __init__(
self,
render_dt_msec=0,
action_l2norm_penalty=0, # disabled for now
render_onscreen=False,
render_size=84,
reward_type="dense",
action_scale=1.0,
target_radius=0.60,
boundary_dist=4,
ball_radius=0.50,
include_colors_in_obs = False,
walls=None,
num_colors = 8,
change_colors=True,
fixed_colors = False,
fixed_goal=None,
randomize_position_on_reset=True,
images_are_rgb=False, # else black and white
show_goal=True,
use_env_labels = False,
num_objects=1,
include_white=False,
**kwargs
):
if walls is None:
walls = []
if walls is None:
walls = []
if fixed_goal is not None:
fixed_goal = np.array(fixed_goal)
if len(kwargs) > 0:
LOGGER = logging.getLogger(__name__)
LOGGER.log(logging.WARNING, "WARNING, ignoring kwargs:", kwargs)
self.quick_init(locals())
self.render_dt_msec = render_dt_msec
self.action_l2norm_penalty = action_l2norm_penalty
self.render_onscreen = render_onscreen
self.render_size = render_size
self.reward_type = reward_type
self.action_scale = action_scale
self.target_radius = target_radius
self.boundary_dist = boundary_dist
self.ball_radius = ball_radius
self.walls = walls
self.fixed_goal = fixed_goal
self.randomize_position_on_reset = randomize_position_on_reset
self.images_are_rgb = images_are_rgb
self.show_goal = show_goal
self.num_objects = num_objects
self.max_target_distance = self.boundary_dist - self.target_radius
self.change_colors = change_colors
self.fixed_colors = fixed_colors
self.num_colors = num_colors
self._target_position = None
self._position = np.zeros((2))
self.include_white = include_white
self.initial_pass = False
self.white_passed = False
if change_colors:
self.randomize_colors()
else:
self.object_colors = [Color('blue')]
u = np.ones(2)
self.action_space = spaces.Box(-u, u, dtype=np.float32)
self.include_colors_in_obs = include_colors_in_obs
o = self.boundary_dist * np.ones(2)
ohe_range = spaces.Box(np.zeros(self.num_colors), np.ones(self.num_colors), dtype='float32')
if include_colors_in_obs and self.fixed_colors:
high = np.concatenate((o, np.ones(self.num_colors)))
low = np.concatenate((-o, np.zeros(self.num_colors)))
else:
high = o
low = -o
self.obs_range = spaces.Box(low, high, dtype='float32')
self.use_env_labels = use_env_labels
if self.use_env_labels:
self.observation_space = spaces.Dict([
('label', ohe_range),
('observation', self.obs_range),
('desired_goal', self.obs_range),
('achieved_goal', self.obs_range),
('state_observation', self.obs_range),
('state_desired_goal', self.obs_range),
('state_achieved_goal', self.obs_range),
])
else:
self.observation_space = spaces.Dict([
('observation', self.obs_range),
('desired_goal', self.obs_range),
('achieved_goal', self.obs_range),
('state_observation', self.obs_range),
('state_desired_goal', self.obs_range),
('state_achieved_goal', self.obs_range),
])
self.drawer = None
self.render_drawer = None
self.color_index = 0
self.colors = [Color('green'), Color('red'), Color('blue'), Color('black'), Color('purple'), Color('brown'), Color('pink'), Color('orange'), Color('grey'), Color('yellow')]
def randomize_colors(self):
self.object_colors = []
rgbs = np.random.randint(0, 256, (self.num_objects, 3))
for i in range(self.num_objects):
if self.fixed_colors:
self.object_colors.append(self.colors[self.color_index])
self.color_index = (self.color_index + 1) % 10
else:
rgb = map(int, rgbs[i, :])
self.object_colors.append(Color(*rgb, 255))
def step(self, velocities):
assert self.action_scale <= 1.0
velocities = np.clip(velocities, a_min=-1, a_max=1) * self.action_scale
new_position = self._position + velocities
for wall in self.walls:
new_position = wall.handle_collision(
self._position, new_position
)
self._position = new_position
self._position = np.clip(
self._position,
a_min=-self.boundary_dist,
a_max=self.boundary_dist,
)
distance_to_target = np.linalg.norm(
self._position - self._target_position
)
is_success = distance_to_target < self.target_radius
ob = self._get_obs()
reward = self.compute_reward(velocities, ob)
info = {
'radius': self.target_radius,
'target_position': self._target_position,
'distance_to_target': distance_to_target,
'velocity': velocities,
'speed': np.linalg.norm(velocities),
'is_success': is_success,
}
done = False
return ob, reward, done, info
def reset(self):
if self.white_passed:
self.include_white = False
if self.change_colors:
self.randomize_colors()
self._target_position = self.sample_goal()['state_desired_goal']
if self.randomize_position_on_reset:
self._position = self._sample_position(
self.obs_range.low,
self.obs_range.high,
)
if self.initial_pass:
self.white_passed = True
self.initial_pass = True
return self._get_obs()
def _position_inside_wall(self, pos):
for wall in self.walls:
if wall.contains_point(pos):
return True
return False
def _sample_position(self, low, high):
pos = np.random.uniform(low, high)
while self._position_inside_wall(pos) is True:
pos = np.random.uniform(low, high)
return pos
def _get_obs(self):
obs = self._position.copy()
if self.use_env_labels:
return dict(
observation=obs,
label = ohe,
desired_goal=self._target_position.copy(),
achieved_goal=self._position.copy(),
state_observation=self._position.copy(),
state_desired_goal=self._target_position.copy(),
state_achieved_goal=self._position.copy(),
)
else:
return dict(
observation=obs,
desired_goal=self._target_position.copy(),
achieved_goal=self._position.copy(),
state_observation=self._position.copy(),
state_desired_goal=self._target_position.copy(),
state_achieved_goal=self._position.copy(),
)
def compute_rewards(self, actions, obs):
achieved_goals = obs['state_achieved_goal']
desired_goals = obs['state_desired_goal']
d = np.linalg.norm(achieved_goals - desired_goals, axis=-1)
if self.reward_type == "sparse":
return -(d > self.target_radius).astype(np.float32)
elif self.reward_type == "dense":
return -d
elif self.reward_type == 'vectorized_dense':
return -np.abs(achieved_goals - desired_goals)
else:
raise NotImplementedError()
def compute_reward_gym(self, achieved_goal, desired_goal, info):
d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)
if self.reward_type == "sparse":
return -(d > self.target_radius).astype(np.float32)
elif self.reward_type == "dense":
return -d
elif self.reward_type == 'vectorized_dense':
return -np.abs(achieved_goal - desired_goal)
else:
raise NotImplementedError()
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
for stat_name in [
'radius',
'target_position',
'distance_to_target',
'velocity',
'speed',
'is_success',
]:
stat_name = stat_name
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict(
'%s%s' % (prefix, stat_name),
stat,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Final %s%s' % (prefix, stat_name),
[s[-1] for s in stat],
always_show_all_stats=True,
))
return statistics
def get_goal(self):
return {
'desired_goal': self._target_position.copy(),
'state_desired_goal': self._target_position.copy(),
}
def sample_goals(self, batch_size):
if not self.fixed_goal is None:
goals = np.repeat(
self.fixed_goal.copy()[None],
batch_size,
0
)
else:
goals = np.zeros((batch_size, self.obs_range.low.size))
for b in range(batch_size):
if batch_size > 1:
logging.warning("This is very slow!")
goals[b, :] = self._sample_position(
self.obs_range.low,
self.obs_range.high,
)
return {
'desired_goal': goals,
'state_desired_goal': goals,
}
def set_position(self, pos):
self._position[0] = pos[0]
self._position[1] = pos[1]
"""Functions for ImageEnv wrapper"""
def get_image(self, width=None, height=None):
"""Returns a black and white image"""
if self.drawer is None:
if width != height:
raise NotImplementedError()
self.drawer = PygameViewer(
screen_width=width,
screen_height=height,
x_bounds=(-self.boundary_dist - self.ball_radius, self.boundary_dist + self.ball_radius),
y_bounds=(-self.boundary_dist - self.ball_radius, self.boundary_dist + self.ball_radius),
render_onscreen=self.render_onscreen,
)
self.draw(self.drawer, False)
img = self.drawer.get_image()
if self.images_are_rgb:
return img.transpose((1, 0, 2))
else:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
img = (-r + b).transpose().flatten()
return img
def set_to_goal(self, goal_dict):
goal = goal_dict["state_desired_goal"]
self._position = goal
self._target_position = goal
def get_env_state(self):
return self._get_obs()
def set_env_state(self, state):
position = state["state_observation"]
goal = state["state_desired_goal"]
self._position = position
self._target_position = goal
def draw(self, drawer, tick):
# if self.drawer is not None:
# self.drawer.fill(Color('white'))
# if self.render_drawer is not None:
# self.render_drawer.fill(Color('white'))
drawer.fill(Color('white'))
if self.show_goal:
drawer.draw_solid_circle(
self._target_position,
self.target_radius,
Color('green'),
)
if not self.include_white:
drawer.draw_solid_circle(
self._position,
self.ball_radius,
self.object_colors[0],
)
for wall in self.walls:
drawer.draw_segment(
wall.endpoint1,
wall.endpoint2,
Color('black'),
)
drawer.draw_segment(
wall.endpoint2,
wall.endpoint3,
Color('black'),
)
drawer.draw_segment(
wall.endpoint3,
wall.endpoint4,
Color('black'),
)
drawer.draw_segment(
wall.endpoint4,
wall.endpoint1,
Color('black'),
)
drawer.render()
if tick:
drawer.tick(self.render_dt_msec)
def render(self, close=False):
if close:
self.render_drawer = None
return
if self.render_drawer is None or self.render_drawer.terminated:
self.render_drawer = PygameViewer(
self.render_size,
self.render_size,
x_bounds=(-self.boundary_dist-self.ball_radius, self.boundary_dist+self.ball_radius),
y_bounds=(-self.boundary_dist-self.ball_radius, self.boundary_dist+self.ball_radius),
render_onscreen=True,
)
self.draw(self.render_drawer, True)
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
for stat_name in [
'distance_to_target',
]:
stat_name = stat_name
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict(
'%s%s' % (prefix, stat_name),
stat,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Final %s%s' % (prefix, stat_name),
[s[-1] for s in stat],
always_show_all_stats=True,
))
return statistics
"""Static visualization/utility methods"""
@staticmethod
def true_model(state, action):
velocities = np.clip(action, a_min=-1, a_max=1)
position = state
new_position = position + velocities
return np.clip(
new_position,
a_min=-Point2DEnv.boundary_dist,
a_max=Point2DEnv.boundary_dist,
)
@staticmethod
def true_states(state, actions):
real_states = [state]
for action in actions:
next_state = Point2DEnv.true_model(state, action)
real_states.append(next_state)
state = next_state
return real_states
@staticmethod
def plot_trajectory(ax, states, actions, goal=None):
assert len(states) == len(actions) + 1
x = states[:, 0]
y = -states[:, 1]
num_states = len(states)
plasma_cm = plt.get_cmap('plasma')
for i, state in enumerate(states):
color = plasma_cm(float(i) / num_states)
ax.plot(state[0], -state[1],
marker='o', color=color, markersize=10,
)
actions_x = actions[:, 0]
actions_y = -actions[:, 1]
ax.quiver(x[:-1], y[:-1], x[1:] - x[:-1], y[1:] - y[:-1],
scale_units='xy', angles='xy', scale=1, width=0.005)
ax.quiver(x[:-1], y[:-1], actions_x, actions_y, scale_units='xy',
angles='xy', scale=1, color='r',
width=0.0035, )
ax.plot(
[
-Point2DEnv.boundary_dist,
-Point2DEnv.boundary_dist,
],
[
Point2DEnv.boundary_dist,
-Point2DEnv.boundary_dist,
],
color='k', linestyle='-',
)
ax.plot(
[
Point2DEnv.boundary_dist,
-Point2DEnv.boundary_dist,
],
[
Point2DEnv.boundary_dist,
Point2DEnv.boundary_dist,
],
color='k', linestyle='-',
)
ax.plot(
[
Point2DEnv.boundary_dist,
Point2DEnv.boundary_dist,
],
[
Point2DEnv.boundary_dist,
-Point2DEnv.boundary_dist,
],
color='k', linestyle='-',
)
ax.plot(
[
Point2DEnv.boundary_dist,
-Point2DEnv.boundary_dist,
],
[
-Point2DEnv.boundary_dist,
-Point2DEnv.boundary_dist,
],
color='k', linestyle='-',
)
if goal is not None:
ax.plot(goal[0], -goal[1], marker='*', color='g', markersize=15)
ax.set_ylim(
-Point2DEnv.boundary_dist - 1,
Point2DEnv.boundary_dist + 1,
)
ax.set_xlim(
-Point2DEnv.boundary_dist - 1,
Point2DEnv.boundary_dist + 1,
)
def initialize_camera(self, init_fctn):
pass
class Multiobj2DWallEnv(Multiobj2DEnv):
"""Point2D with walls"""
def __init__(
self,
wall_shape="",
wall_thickness=1.0,
inner_wall_max_dist=1,
change_walls=False,
**kwargs
):
self.quick_init(locals())
super().__init__(**kwargs)
self.inner_wall_max_dist = inner_wall_max_dist
self.wall_shape = wall_shape
self.wall_shapes = ["right", "left", "bottom", "top"]
self.wall_thickness = wall_thickness
self.change_walls = change_walls
if self.change_walls:
self.randomize_walls()
else:
self.fixed_wall(wall_shape)
def randomize_walls(self):
self.walls = []
random.shuffle(self.wall_shapes)
for w in self.wall_shapes[:3]:
if np.random.uniform() < 0.333:
self.add_wall(w)
def add_wall(self, wall):
if wall == "right":
# Right wall
self.walls.append(VerticalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
))
if wall == "left":# Left wall
self.walls.append(VerticalWall(
self.ball_radius,
-self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
))
if wall == "bottom":# Bottom wall
self.walls.append(HorizontalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
))
if wall == "top":
self.walls.append(HorizontalWall(
self.ball_radius,
-self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
))
def fixed_wall(self, wall_shape):
if wall_shape == "u":
self.walls = [
# Right wall
VerticalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
),
# Left wall
VerticalWall(
self.ball_radius,
-self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
),
# Bottom wall
HorizontalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
)
]
if wall_shape == "-" or wall_shape == "h":
self.walls = [
HorizontalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
)
]
if wall_shape == "--":
self.walls = [
HorizontalWall(
self.ball_radius,
0,
-self.inner_wall_max_dist,
self.inner_wall_max_dist,
)
]
if wall_shape == "big-u":
self.walls = [
VerticalWall(
self.ball_radius,
self.inner_wall_max_dist*2,
-self.inner_wall_max_dist*2,
self.inner_wall_max_dist,
self.wall_thickness
),
# Left wall
VerticalWall(
self.ball_radius,
-self.inner_wall_max_dist*2,
-self.inner_wall_max_dist*2,
self.inner_wall_max_dist,
self.wall_thickness
),
# Bottom wall
HorizontalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist*2,
self.inner_wall_max_dist*2,
self.wall_thickness
),
]
if wall_shape == "easy-u":
self.walls = [
VerticalWall(
self.ball_radius,
self.inner_wall_max_dist*2,
-self.inner_wall_max_dist*0.5,
self.inner_wall_max_dist,
self.wall_thickness
),
# Left wall
VerticalWall(
self.ball_radius,
-self.inner_wall_max_dist*2,
-self.inner_wall_max_dist*0.5,
self.inner_wall_max_dist,
self.wall_thickness
),
# Bottom wall
HorizontalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist*2,
self.inner_wall_max_dist*2,
self.wall_thickness
),
]
if wall_shape == "big-h":
self.walls = [
# Bottom wall
HorizontalWall(
self.ball_radius,
self.inner_wall_max_dist,
-self.inner_wall_max_dist*2,
self.inner_wall_max_dist*2,
),
]
if wall_shape == "box":
self.walls = [
# Bottom wall
VerticalWall(
self.ball_radius,
0,
0,
0,
self.wall_thickness
),
]
if wall_shape == "none":
self.walls = []
# def reset(self):
# if self.change_colors:
# if self.fixed_colors:
# self.color_index = (self.color_index + 1) % 10
# self.pm_color = self.colors[self.color_index]
# else:
# rgbs = np.random.randint(0, 256, (1, 3))
# rgb = map(int, rgbs[0, :])
# self.pm_color = Color(*rgb, 255)
# else:
# pm_color = Color('blue')
# if self.change_walls:
# self.randomize_walls()
# if self.randomize_position_on_reset:
# self._position = self._sample_position(
# self.obs_range.low,
# self.obs_range.high)
# self._target_position = self.sample_goal()['state_desired_goal']
# return self._get_obs()
def reset(self):
if self.white_passed:
self.include_white = False
if self.change_colors:
self.randomize_colors()
if self.change_walls:
self.randomize_walls()
self._target_position = self.sample_goal()['state_desired_goal']
if self.randomize_position_on_reset:
self._position = self._sample_position(
self.obs_range.low,
self.obs_range.high,
)
if self.initial_pass:
self.white_passed = True
self.initial_pass = True
return self._get_obs()
if __name__ == "__main__":
import gym
import cv2
e = Multiobj2DEnv(
render_onscreen=True,
show_goal=False,
fixed_colors=True,
num_colors=7
)
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
e.reset()
for i in range(1000):
img = e.get_image(100, 100)
if i % 20 == 0:
print('reset')
e.reset()
cv2.imshow('im', img)
cv2.waitKey(10)
| 33.920052
| 180
| 0.524516
|
49fac59a3cce64570d0887fff3e479694e411592
| 5,872
|
py
|
Python
|
mtm/mtm.py
|
imclab/python-ssa-mtm
|
9c081485c9bb78773f1d9a3587dd1c070e23b495
|
[
"MIT"
] | 2
|
2019-01-09T07:10:52.000Z
|
2020-07-21T00:44:50.000Z
|
mtm/mtm.py
|
imclab/python-ssa-mtm
|
9c081485c9bb78773f1d9a3587dd1c070e23b495
|
[
"MIT"
] | null | null | null |
mtm/mtm.py
|
imclab/python-ssa-mtm
|
9c081485c9bb78773f1d9a3587dd1c070e23b495
|
[
"MIT"
] | 2
|
2019-01-09T07:10:53.000Z
|
2020-07-21T00:44:52.000Z
|
# -*- coding: utf-8 -*-
#
#
# mtm.py
#
# purpose: Wrap mtm2.f
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.tiddlyspot.com/
# created: 27-Jul-2012
# modified: Fri 27 Jul 2012 06:43:45 PM BRT
#
# obs: MultiTaper Spectral Analysis.
#
import mtm2
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.font_manager as fm
#from matplotlib import rcParams
#rcParams['font.family'] = 'sans-serif'
#rcParams['font.sans-serif'] = ['Tahoma']
def compute_mtm(arr, dt=1.0, npi=2, nwin=3, f1=0.0, f2=0.0, inorm=0, ispec=1,
iresh=1, ithresh=3, inoise=0, ilog=1, ismooth=1, isignal=0,
irecon=0, nsignals=0, iplotresh=1, iplotftest=1, iplotsmoo=1,
iplotraw=1, iplotconf=1, icon=0):
r"""This function compute mtm spectrum on a time-series array. It returns
five arrays:
* 3 columns array spec_raw with the frequency, the raw spectrum and ftest
in the last one.
* 3 columns array spec_resh with the frequency, the reshape signal
spectrum and the harmonic.
* 2 columns array spec_smoo with the frequency and the smooth spectrum.
* 5 columns array spec_conf with the frequency, the confidence for the
spectrum obtained.
* 3 columns array recon with the frequency, the reconstructed signal.
It has a large number of parameters which are initially set to some
values:
The time step parameter: `dt` = 1.0 in years
To compute mtm on monthly data, set dt = 0.0833333333 (1/12 year)
The resolution in `p` multiples of Rayleigh frequency: npi = 2
The number of tapers: suggested nwin = 2 * npi - 1
if nwin > (2 * npi - 1) then nwin = 2 * npi - 1
The spectrum:
f1 = 0.0 (fmin)
f2 = fny = 0.5/dt (fmax)
The normalization: inorm = 0
inorm = (0) none (1) N (2) 1/dt
The spectrum resolution: ispec = 1
ispec = (0) high-resolution or (1) adaptive estimate
Reshaping flag: iresh = 1
iresh = reshaping (yes=1)?
if iresh = 1
itresh = sig. level for harmonic peak detection/reshaping
(0) 50% (1) 90% (2) 95% (3) 99% (4) 99.5% (5) 99.9%
ithresh = 3
Some hypothesis parameters:
inoise = (0) red noise, (1) white noise, (2) locally-white noise
inoise = 0
ilog = red noise background fit: (0) linear,(1) log
ilog = 1
ismooth = background estimation: (0) raw, (1) robust'
ismooth = 1
Signal assumption:
isignal = (0) harmonic or narrowband, (1) narrowband, (2) harmonic
isignal = 0
if isignal=2 then inoise=2, iplotsmoo=0, iplotconf=0
if isignal=1 then iplotftest=0 and iplotresh=0
Reconstruction:
irecon = 0
signals to reconstruct: nsignals=0
Display option parameters:
(1) raw spectrum - iplotraw
(2) reshaped & harmonic spectra - iplotresh
(3) median smoothed background - iplotsmoo
(4) 50% 90% 95% 99% conf levels - iplotconf
(5) F-test spectrum - iplotftest
iplotresh = 1
iplotftest = 1
iplotsmoo = 1
iplotraw = 1
iplotconf = 1
Constraint Option:
icon = (0) min misfit, (1) min norm, (2) min slope, (3) min rough
icon=0
"""
if nwin > (2 * npi - 1):
nwin = 2 * npi - 1
if f2 == 0:
f2 = 0.5 / dt
if isignal == 2:
inoise = 2
iplotsmoo = 0
iplotconf = 0
if isignal == 1:
iplotftest = 0
iplotresh = 0
spec_raw, spec_resh, spec_smoo, spec_conf, recon_sig = mtm2.mtm_mann(
arr, dt, npi, nwin, f1, f2, inorm, ispec, iresh, ithresh, inoise, ilog,
ismooth, isignal, irecon, nsignals, iplotresh, iplotftest, iplotsmoo,
iplotraw, iplotconf, icon)
spec_raw = resize_spec(spec_raw)
spec_resh = resize_spec(spec_resh)
spec_smoo = resize_spec(spec_smoo)
spec_conf = resize_spec(spec_conf)
recon_sig = resize_spec(recon_sig)
return spec_raw, spec_resh, spec_smoo, spec_conf, recon_sig
def plot_spec(spec_raw):
fig, ax = plt.subplots()
#prop = fm.FontProperties(fname='/Library/Fonts/Tahoma.ttf')
#ax.set_xlabel('Frequency [years]', fontsize=10, color='black',
#fontproperties=prop)
m = np.array([1.0, 2.0, 5.0, 10.0, 20.0, 100.0])
ohm = 1.0 / m
ax.loglog(spec_raw[:, 0], spec_raw[:, 1], linewidth=0.5)
for i in range(0, 6):
ax.text(ohm[i], 200000, str(int(m[i])))
ax.set_xlabel('Frequency [1/years]', fontsize=12, color='black')
ax.set_ylabel('Spectral Power', fontsize=12, color='black')
ax.set_title('MTM Spectrum', fontsize=12, color='black')
ax.text(2, 200000, 'Period [year]')
ax.text(.01, 0.01, r'Nino3 from Speedy')
#ax.axis([0.005, 10, 0.01, 1000000])
plt.grid(True)
plt.show()
def resize_spec(arr):
arr = np.asanyarray(arr)
if arr.ndim != 2:
raise ValueError("Array must be 2D.")
size = 1
nrow, ncol = arr.shape
for i in range(1, nrow):
if arr[i, 0] != 0:
size = size + 1
if size != 1:
arr = arr[0:size, 0:ncol]
return arr
def find_inequalities(arr, fmin, fmax):
if3 = 0
nrow, ncol = arr.shape
if7 = nrow - 1
while arr[if3, 0] < fmin and if3 < nrow:
if3 = if3 + 1
while arr[if7, 0] > fmax and if7 > 0:
if7 = if7 - 1
return if3, if7
if __name__ == '__main__':
X = [1.0135518, -0.7113242, -0.3906069, 1.565203, 0.0439317, -1.1656093,
1.0701692, 1.0825379, -1.2239744, -0.0321446, 1.1815997, -1.4969448,
-0.7455299, 1.0973884, -0.2188716, -1.0719573, 0.9922009, 0.4374216,
-1.6880219, 0.2609807]
X = np.atleast_2d(X)
spec_raw, spec_resh, spec_smoo, spec_conf, recon_sig = compute_mtm(X)
plot_spec(spec_raw)
| 32.622222
| 79
| 0.604394
|
9f2b9a943dd8c9ba18ae389f7c8571bdad3ba99f
| 31,252
|
py
|
Python
|
pymatgen/io/abinit/abitimer.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | 1
|
2021-10-18T01:26:50.000Z
|
2021-10-18T01:26:50.000Z
|
pymatgen/io/abinit/abitimer.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | null | null | null |
pymatgen/io/abinit/abitimer.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
import sys
import os
import collections
import numpy as np
from monty.string import is_string, list_strings
from pymatgen.util.num import minloc
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.abc.Iterable):
"""
Responsible for parsing a list of output files, extracting the timing results
and analyzing the results.
Assume the Abinit output files have been produced with `timopt -1`.
Example:
parser = AbinitTimerParser()
parser.parse(list_of_files)
To analyze all *.abo files withing top, use:
parser, paths, okfiles = AbinitTimerParser.walk(top=".", ext=".abo")
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
# DEFAULT_MPI_RANK = "0"
@classmethod
def walk(cls, top=".", ext=".abo"):
"""
Scan directory tree starting from top, look for files with extension `ext` and
parse timing data.
Return: (parser, paths, okfiles)
where `parser` is the new object, `paths` is the list of files found and `okfiles`
is the list of files that have been parsed successfully.
(okfiles == paths) if all files have been parsed.
"""
paths = []
for root, dirs, files in os.walk(top):
for f in files:
if f.endswith(ext):
paths.append(os.path.join(root, f))
parser = cls()
okfiles = parser.parse(paths)
return parser, paths, okfiles
def __init__(self):
"""Initialize object."""
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
@property
def filenames(self):
"""List of files that have been parsed successfully."""
return self._filenames
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return: list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname)
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
def parse_line(line):
"""Parse single line."""
name, vals = line[:25], line[25:].split()
try:
ctime, cfract, wtime, wfract, ncalls, gflops = vals
except ValueError:
# v8.3 Added two columns at the end [Speedup, Efficacity]
ctime, cfract, wtime, wfract, ncalls, gflops, speedup, eff = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
sections, info, cpu_time, wall_time = None, None, None, None
data = {}
inside, has_timer = 0, False
for line in fh:
# print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG):].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
key, val = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except Exception:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
def timers(self, filename=None, mpi_rank="0"):
"""
Return the list of timers associated to the given `filename` and MPI rank mpi_rank.
"""
if filename is not None:
return [self._timers[filename][mpi_rank]]
else:
return [self._timers[filename][mpi_rank] for filename in self._filenames]
def section_names(self, ordkey="wall_time"):
"""
Return the names of sections ordered by ordkey.
For the time being, the values are taken from the first timer.
"""
section_names = []
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
# check = section_names
# else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
# if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() given `section_name`
A fake section is returned if the timer does not have section_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
Return: :class:`ParallelEfficiency` object.
"""
timers = self.timers()
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total and section efficiency)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
# print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
def summarize(self, **kwargs):
"""
Return pandas DataFrame with the most important results stored in the timers.
"""
import pandas as pd
colnames = ["fname", "wall_time", "cpu_time", "mpi_nprocs", "omp_nthreads", "mpi_rank"]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].values.argmin()
ref_wtime = frame.iloc[i]["wall_time"]
ref_ncpus = frame.iloc[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="good+bad", nmax=5, ax=None, **kwargs):
"""
Plot the parallel efficiency
Args:
key: Parallel efficiency is computed using the wall_time.
what: Specifies what to plot: `good` for sections with good parallel efficiency.
`bad` for sections with bad efficiency. Options can be concatenated with `+`.
nmax: Maximum number of entries in plot
ax: matplotlib :class:`Axes` or None if a new figure should be created.
================ ====================================================
kwargs Meaning
================ ====================================================
linewidth matplotlib linewidth. Default: 2.0
markersize matplotlib markersize. Default: 10
================ ====================================================
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
lw = kwargs.pop("linewidth", 2.0)
msize = kwargs.pop("markersize", 10)
what = what.split("+")
timers = self.timers()
peff = self.pefficiency()
n = len(timers)
xx = np.arange(n)
# ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
ax.set_prop_cycle(color=['g', 'b', 'c', 'm', 'y', 'k'])
lines, legend_entries = [], []
# Plot sections with good efficiency.
if "good" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
# print(g, peff[g])
yy = peff[g][key]
line, = ax.plot(xx, yy, "-->", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "bad" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
# print(b, peff[b])
yy = peff[b][key]
line, = ax.plot(xx, yy, "-.<", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(b)
# Add total if not already done
if "total" not in legend_entries:
yy = peff["total"][key]
total_line, = ax.plot(xx, yy, "r", linewidth=lw, markersize=msize)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
# ax.set_title(title)
ax.set_xlabel('Total_NCPUs')
ax.set_ylabel('Efficiency')
ax.grid(True)
# Set xticks and labels.
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, **kwargs):
"""
Plot pie charts of the different timers.
Args:
key: Keyword used to extract data from timers.
minfract: Don't show sections whose relative weight is less that minfract.
Returns:
`matplotlib` figure
"""
timers = self.timers()
n = len(timers)
# Make square figures and axes
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
fig = plt.gcf()
gspec = GridSpec(n, 1)
for idx, timer in enumerate(timers):
ax = plt.subplot(gspec[idx, 0])
ax.set_title(str(timer))
timer.pie(ax=ax, key=key, minfract=minfract, show=False)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""
Plot stacked histogram of the different timers.
Args:
key: Keyword used to extract data from the timers. Only the first `nmax`
sections with largest value are show.
mmax: Maximum nuber of sections to show. Other entries are grouped together
in the `others` section.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax=%d)" % nmax)
values.append(rest)
# The dataset is stored in values. Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = ax.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
ax.set_title("Stacked histogram with the %d most important sections" % nmax)
ticks = ind + width / 2.0
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation=15)
# Add legend.
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig
def plot_all(self, show=True, **kwargs):
"""
Call all plot methods provided by the parser.
"""
figs = []
app = figs.append
app(self.plot_stacked_hist(show=show))
app(self.plot_efficiency(show=show))
app(self.plot_pie(show=show))
return figs
class ParallelEfficiency(dict):
"""
Store results concerning the parallel efficiency of the job.
"""
def __init__(self, filenames, ref_idx, *args, **kwargs):
"""
Args:
filennames: List of filenames
ref_idx: Index of the Reference time (calculation done with the smallest number of cpus)
"""
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
self.estimator = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items),
}[criterion]
data = []
for (sect_name, peff) in self.items():
# Ignore values where we had a division by zero.
if all([v != -1 for v in peff[key]]):
values = peff[key][:]
# print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
data.sort(key=lambda t: t[1], reverse=reverse)
return tuple([sect_name for (sect_name, e) in data])
def totable(self, stop=None, reverse=True):
"""
Return table (list of lists) with timing results.
Args:
stop: Include results up to stop. None for all
reverse: Put items with highest wall_time in first positions if True.
"""
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
if stop is not None:
osects = osects[:stop]
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
"""
Return first `nmax` sections with best value of key `key` using criterion `criterion`.
"""
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
"""
Return first `nmax` sections with worst value of key `key` using criterion `criterion`.
"""
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection:
"""Record with the timing results associated to a section of code."""
STR_FIELDS = [
"name"
]
NUMERIC_FIELDS = [
"wall_time",
"wall_fract",
"cpu_time",
"cpu_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
"""Return a fake section. Mainly used to fill missing entries if needed."""
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
"""
Args:
name: Name of the sections.
cpu_time: CPU time in seconds.
cpu_fract: Percentage of CPU time.
wall_time: Wall-time in seconds.
wall_fract: Percentage of wall-time.
ncalls: Number of calls
gflops: Gigaflops.
"""
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
"""Convert object to tuple."""
return tuple([self.__dict__[at] for at in AbinitTimerSection.FIELDS])
def to_dict(self):
"""Convert object to dictionary."""
return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format. Add header if `with_header`"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
"""String representation."""
string = ""
for a in AbinitTimerSection.FIELDS:
string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer:
"""Container class storing the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
"""
Args:
sections: List of sections
info: Dictionary with extra info.
cpu_time: Cpu-time in seconds.
wall_time: Wall-time in seconds.
"""
# Store sections and names
self.sections = tuple(sections)
self.section_names = tuple([s.name for s in self.sections])
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file=%s, wall_time=%.1f, mpi_nprocs=%d, omp_nthreads=%d" % (
self.fname, self.wall_time, self.mpi_nprocs, self.omp_nthreads)
# string += ", rank = " + self.mpi_rank
return string
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
"""Return section associated to `section_name`."""
try:
idx = self.section_names.index(section_name)
except Exception:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w")
for idx, section in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
# Maintain old API
totable = to_table
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return a pandas DataFrame with entries sorted according to `sort_key`.
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
def get_values(self, keys):
"""
Return a list of values associated to a particular list of keys.
"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
else:
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the corresponding values.
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for n, v in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for n, v in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
new_names, new_values = names, values
if sorted:
# Sort new_values and rearrange new_names.
nandv = [nv for nv in zip(new_names, new_values)]
nandv.sort(key=lambda t: t[1])
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
"""Sum value of keys."""
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
return sorted(self.sections, key=lambda s: s.__dict__[key], reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
"""
Plot histogram with cpu- and wall-time on axis `ax`.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color='r')
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color='y')
# Add ylable and title
ax.set_ylabel('Time (s)')
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ('CPU', 'Wall'), loc="best")
return fig
@add_fig_kwargs
def pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""
Plot pie chart for this timer.
Args:
key: Keyword used to extract data from the timer.
minfract: Don't show sections whose relative weight is less that minfract.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.axis("equal")
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
ax.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)
return fig
@add_fig_kwargs
def scatter_hist(self, ax=None, **kwargs):
"""
Scatter plot + histogram.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
# axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
# axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
# plt.draw()
return fig
| 33.895879
| 119
| 0.565308
|
ebdd9074b2abe09a12e479c79af89c5258c3c947
| 10,110
|
py
|
Python
|
wetterdienst/provider/eccc/observation/metadata/unit.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 155
|
2020-07-03T05:09:22.000Z
|
2022-03-28T06:57:39.000Z
|
wetterdienst/provider/eccc/observation/metadata/unit.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 453
|
2020-07-02T21:21:52.000Z
|
2022-03-31T21:35:36.000Z
|
wetterdienst/provider/eccc/observation/metadata/unit.py
|
bh-chaker/wetterdienst
|
b0d51bb4c7392eb47834e4978e26882d74b22e35
|
[
"MIT"
] | 21
|
2020-09-07T12:13:27.000Z
|
2022-03-26T16:26:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from wetterdienst.metadata.unit import OriginUnit, SIUnit, UnitEnum
from wetterdienst.util.parameter import DatasetTreeCore
class EcccObservationUnit(DatasetTreeCore):
class HOURLY(UnitEnum):
TEMPERATURE_AIR_MEAN_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MEAN_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
TEMPERATURE_DEW_POINT_MEAN_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_DEW_POINT_MEAN_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
HUMIDITY = OriginUnit.PERCENT.value, SIUnit.PERCENT.value
QUALITY_HUMIDITY = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
WIND_DIRECTION = (
OriginUnit.WIND_DIRECTION.value,
SIUnit.WIND_DIRECTION.value,
)
QUALITY_WIND_DIRECTION = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
WIND_SPEED = (
OriginUnit.KILOMETER_PER_HOUR.value,
SIUnit.METER_PER_SECOND.value,
)
QUALITY_WIND_SPEED = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
VISIBILITY = OriginUnit.KILOMETER.value, SIUnit.METER.value
QUALITY_VISIBILITY = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
PRESSURE_AIR_SITE = (
OriginUnit.KILOPASCAL.value,
SIUnit.PASCAL.value,
)
QUALITY_PRESSURE_AIR_SITE = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
HUMIDEX = OriginUnit.DIMENSIONLESS.value, SIUnit.DIMENSIONLESS.value
QUALITY_HUMIDEX = OriginUnit.DIMENSIONLESS.value, SIUnit.DIMENSIONLESS.value
WIND_GUST_MAX = (
OriginUnit.KILOMETER_PER_HOUR.value,
SIUnit.METER_PER_SECOND.value,
)
QUALITY_WIND_GUST_MAX = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
WEATHER = OriginUnit.DIMENSIONLESS.value, SIUnit.DIMENSIONLESS.value
class DAILY(UnitEnum):
# Data Quality quality of all variables?
TEMPERATURE_AIR_MAX_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MAX_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
TEMPERATURE_AIR_MIN_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MIN_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
TEMPERATURE_AIR_MEAN_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MEAN_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
NDAYS_HEATING_DEGREE = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_NDAYS_HEATING_DEGREE = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
NDAYS_COOLING_DEGREE = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_NDAYS_COOLING_DEGREE = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
PRECIPITATION_HEIGHT_LIQUID = (
OriginUnit.MILLIMETER.value,
SIUnit.KILOGRAM_PER_SQUARE_METER.value,
)
QUALITY_PRECIPITATION_HEIGHT_LIQUID = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
SNOW_DEPTH_NEW = OriginUnit.CENTIMETER.value, SIUnit.METER.value
QUALITY_SNOW_DEPTH_NEW = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
PRECIPITATION_HEIGHT = (
OriginUnit.MILLIMETER.value,
SIUnit.KILOGRAM_PER_SQUARE_METER.value,
)
QUALITY_PRECIPITATION_HEIGHT = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
SNOW_DEPTH = OriginUnit.CENTIMETER.value, SIUnit.METER.value
QUALITY_SNOW_DEPTH = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
WIND_DIRECTION_GUST_MAX = (
OriginUnit.WIND_DIRECTION.value,
SIUnit.WIND_DIRECTION.value,
)
QUALITY_WIND_DIRECTION_GUST_MAX = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
WIND_GUST_MAX = (
OriginUnit.KILOMETER_PER_HOUR.value,
SIUnit.METER_PER_SECOND.value,
)
QUALITY_WIND_GUST_MAX = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
class MONTHLY(UnitEnum):
TEMPERATURE_AIR_MAX_200_MEAN = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MAX_200_MEAN = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
TEMPERATURE_AIR_MIN_200_MEAN = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MIN_200_MEAN = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
TEMPERATURE_AIR_MEAN_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MEAN_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
TEMPERATURE_AIR_MAX_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MAX_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
TEMPERATURE_AIR_MIN_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
QUALITY_TEMPERATURE_AIR_MIN_200 = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
PRECIPITATION_HEIGHT_LIQUID = (
OriginUnit.MILLIMETER.value,
SIUnit.KILOGRAM_PER_SQUARE_METER.value,
)
QUALITY_PRECIPITATION_HEIGHT_LIQUID = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
SNOW_DEPTH_NEW = OriginUnit.CENTIMETER.value, SIUnit.METER.value
QUALITY_SNOW_DEPTH_NEW = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
PRECIPITATION_HEIGHT = (
OriginUnit.MILLIMETER.value,
SIUnit.KILOGRAM_PER_SQUARE_METER.value,
)
QUALITY_PRECIPITATION_HEIGHT = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
# should name include previous day? how is it measured?
SNOW_DEPTH = OriginUnit.CENTIMETER.value, SIUnit.METER.value
QUALITY_SNOW_DEPTH = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
WIND_DIRECTION_GUST_MAX = (
OriginUnit.WIND_DIRECTION.value,
SIUnit.WIND_DIRECTION.value,
)
QUALITY_WIND_DIRECTION_GUST_MAX = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
WIND_GUST_MAX = (
OriginUnit.KILOMETER_PER_HOUR.value,
SIUnit.METER_PER_SECOND.value,
)
QUALITY_WIND_GUST_MAX = (
OriginUnit.DIMENSIONLESS.value,
SIUnit.DIMENSIONLESS.value,
)
class ANNUAL(UnitEnum):
TEMPERATURE_AIR_MAX_200_MEAN = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
TEMPERATURE_AIR_MIN_200_MEAN = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
PRECIPITATION_FREQUENCY = OriginUnit.PERCENT.value, SIUnit.PERCENT.value
TEMPERATURE_AIR_MAX_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
# 'highest temp.year'
# 'highest temp. period'
# 'highest temp. data quality'
TEMPERATURE_AIR_MIN_200 = (
OriginUnit.DEGREE_CELSIUS.value,
SIUnit.DEGREE_KELVIN.value,
)
# 'lowest temp. year'
# 'lowest temp. period'
# 'lowest temp. data quality'
PRECIPITATION_HEIGHT_MAX = (
OriginUnit.MILLIMETER.value,
SIUnit.KILOGRAM_PER_SQUARE_METER.value,
)
# 'greatest precip. year'
# 'greatest precip. period'
# 'greatest precip. data quality'
PRECIPITATION_HEIGHT_LIQUID_MAX = (
OriginUnit.MILLIMETER.value,
SIUnit.KILOGRAM_PER_SQUARE_METER.value,
)
# 'greatest rainfall year'
# 'greatest rainfall period'
# 'greatest rainfall data quality'
SNOW_DEPTH_NEW_MAX = OriginUnit.CENTIMETER.value, SIUnit.METER.value
# 'greatest snowfall year'
# 'greatest snowfall period'
# 'greatest snowfall data quality'
SNOW_DEPTH_MAX = OriginUnit.CENTIMETER.value, SIUnit.METER.value
# 'most snow on the ground year'
# 'most snow on the ground period'
# 'most snow on the ground data quality'
| 35.104167
| 84
| 0.609298
|
164cf366e05cfa38df5c72d3b5a4b3501478d058
| 59,119
|
py
|
Python
|
api/chalicelib/core/sessions.py
|
tares003/openreplay
|
098a0675286710f261425a2aece51644c86e1900
|
[
"MIT"
] | 1
|
2022-02-16T11:41:05.000Z
|
2022-02-16T11:41:05.000Z
|
api/chalicelib/core/sessions.py
|
tares003/openreplay
|
098a0675286710f261425a2aece51644c86e1900
|
[
"MIT"
] | null | null | null |
api/chalicelib/core/sessions.py
|
tares003/openreplay
|
098a0675286710f261425a2aece51644c86e1900
|
[
"MIT"
] | null | null | null |
import schemas
from chalicelib.core import events, metadata, events_ios, \
sessions_mobs, issues, projects, errors, resources, assist, performance_event
from chalicelib.utils import pg_client, helper, dev, metrics_helper
SESSION_PROJECTION_COLS = """s.project_id,
s.session_id::text AS session_id,
s.user_uuid,
s.user_id,
-- s.user_agent,
s.user_os,
s.user_browser,
s.user_device,
s.user_device_type,
s.user_country,
s.start_ts,
s.duration,
s.events_count,
s.pages_count,
s.errors_count,
s.user_anonymous_id,
s.platform,
s.issue_score,
to_jsonb(s.issue_types) AS issue_types,
favorite_sessions.session_id NOTNULL AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed """
def __group_metadata(session, project_metadata):
meta = []
for m in project_metadata.keys():
if project_metadata[m] is not None and session.get(m) is not None:
meta.append({project_metadata[m]: session[m]})
session.pop(m)
return meta
def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_viewed=False, group_metadata=False):
with pg_client.PostgresClient() as cur:
extra_query = []
if include_fav_viewed:
extra_query.append("""COALESCE((SELECT TRUE
FROM public.user_favorite_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s), FALSE) AS favorite""")
extra_query.append("""COALESCE((SELECT TRUE
FROM public.user_viewed_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s), FALSE) AS viewed""")
query = cur.mogrify(
f"""\
SELECT
s.*,
s.session_id::text AS session_id,
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata._get_column_names()]) + ") AS project_metadata") if group_metadata else ''}
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
WHERE s.project_id = %(project_id)s
AND s.session_id = %(session_id)s;""",
{"project_id": project_id, "session_id": session_id, "userId": user_id}
)
# print("===============")
# print(query)
cur.execute(query=query)
data = cur.fetchone()
if data is not None:
data = helper.dict_to_camel_case(data)
if full_data:
if data["platform"] == 'ios':
data['events'] = events_ios.get_by_sessionId(project_id=project_id, session_id=session_id)
for e in data['events']:
if e["type"].endswith("_IOS"):
e["type"] = e["type"][:-len("_IOS")]
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
session_id=session_id)
data['mobsUrl'] = sessions_mobs.get_ios(sessionId=session_id)
else:
data['events'] = events.get_by_sessionId2_pg(project_id=project_id, session_id=session_id,
group_clickrage=True)
all_errors = events.get_errors_by_session_id(session_id=session_id)
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
# to keep only the first stack
data['errors'] = [errors.format_first_stack_frame(e) for e in all_errors if
e['source'] == "js_exception"][
:500] # limit the number of errors to reduce the response-body size
data['userEvents'] = events.get_customs_by_sessionId2_pg(project_id=project_id,
session_id=session_id)
data['mobsUrl'] = sessions_mobs.get_web(sessionId=session_id)
data['resources'] = resources.get_by_session_id(session_id=session_id)
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
data['issues'] = issues.get_by_session_id(session_id=session_id)
data['live'] = assist.is_live(project_id=project_id,
session_id=session_id,
project_key=data["projectKey"])
return data
else:
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
def __get_sql_operator(op: schemas.SearchEventOperator):
return {
schemas.SearchEventOperator._is: "=",
schemas.SearchEventOperator._is_any: "IN",
schemas.SearchEventOperator._on: "=",
schemas.SearchEventOperator._on_any: "IN",
schemas.SearchEventOperator._is_not: "!=",
schemas.SearchEventOperator._not_on: "!=",
schemas.SearchEventOperator._contains: "ILIKE",
schemas.SearchEventOperator._not_contains: "NOT ILIKE",
schemas.SearchEventOperator._starts_with: "ILIKE",
schemas.SearchEventOperator._ends_with: "ILIKE",
}.get(op, "=")
def __is_negation_operator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator._is_not,
schemas.SearchEventOperator._not_on,
schemas.SearchEventOperator._not_contains]
def __reverse_sql_operator(op):
return "=" if op == "!=" else "!=" if op == "=" else "ILIKE" if op == "NOT ILIKE" else "NOT ILIKE"
def __get_sql_operator_multiple(op: schemas.SearchEventOperator):
return " IN " if op not in [schemas.SearchEventOperator._is_not, schemas.SearchEventOperator._not_on,
schemas.SearchEventOperator._not_contains] else " NOT IN "
def __get_sql_value_multiple(values):
if isinstance(values, tuple):
return values
return tuple(values) if isinstance(values, list) else (values,)
def _multiple_conditions(condition, values, value_key="value", is_not=False):
query = []
for i in range(len(values)):
k = f"{value_key}_{i}"
query.append(condition.replace(value_key, k))
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
def _multiple_values(values, value_key="value"):
query_values = {}
if values is not None and isinstance(values, list):
for i in range(len(values)):
k = f"{value_key}_{i}"
query_values[k] = values[i]
return query_values
def _isAny_opreator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator._on_any, schemas.SearchEventOperator._is_any]
@dev.timed
def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, favorite_only=False, errors_only=False,
error_status="ALL", count_only=False, issue=None):
full_args, query_part, sort = search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id,
user_id)
with pg_client.PostgresClient() as cur:
if errors_only:
main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id, ser.status, ser.parent_error_id, ser.payload,
COALESCE((SELECT TRUE
FROM public.user_favorite_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s), FALSE) AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE er.error_id = ve.error_id
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
{query_part};""", full_args)
elif count_only:
main_query = cur.mogrify(f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions,
COUNT(DISTINCT s.user_uuid) AS count_users
{query_part};""", full_args)
elif data.group_by_user:
main_query = cur.mogrify(f"""SELECT COUNT(*) AS count, jsonb_agg(users_sessions) FILTER ( WHERE rn <= 200 ) AS sessions
FROM (SELECT user_id,
count(full_sessions) AS user_sessions_count,
jsonb_agg(full_sessions) FILTER (WHERE rn <= 1) AS last_session,
ROW_NUMBER() OVER (ORDER BY count(full_sessions) DESC) AS rn
FROM (SELECT *, ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY start_ts DESC) AS rn
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
{query_part}
ORDER BY s.session_id desc) AS filtred_sessions
ORDER BY favorite DESC, issue_score DESC, {sort} {data.order}) AS full_sessions
GROUP BY user_id
ORDER BY user_sessions_count DESC) AS users_sessions;""",
full_args)
else:
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count, COALESCE(JSONB_AGG(full_sessions) FILTER (WHERE rn <= 200), '[]'::JSONB) AS sessions
FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY favorite DESC, issue_score DESC, session_id desc, start_ts desc) AS rn
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
{query_part}
ORDER BY s.session_id desc) AS filtred_sessions
ORDER BY favorite DESC, issue_score DESC, {sort} {data.order}) AS full_sessions;""",
full_args)
# main_query = cur.mogrify(f"""SELECT * FROM
# (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
# {query_part}
# ORDER BY s.session_id desc) AS filtred_sessions
# ORDER BY favorite DESC, issue_score DESC, {sort} {order};""",
# full_args)
# print("--------------------")
# print(main_query)
cur.execute(main_query)
# print("--------------------")
if count_only:
return helper.dict_to_camel_case(cur.fetchone())
sessions = cur.fetchone()
total = sessions["count"]
sessions = sessions["sessions"]
# sessions = []
# total = cur.rowcount
# row = cur.fetchone()
# limit = 200
# while row is not None and len(sessions) < limit:
# if row.get("favorite"):
# limit += 1
# sessions.append(row)
# row = cur.fetchone()
if errors_only:
return sessions
if not data.group_by_user and data.sort is not None and data.sort != "session_id":
sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data.sort)],
reverse=data.order.upper() == "DESC")
return {
'total': total,
'sessions': helper.list_to_camel_case(sessions)
}
@dev.timed
def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
view_type: schemas.MetricViewType):
step_size = int(metrics_helper.__get_step_size(endTimestamp=data.endDate, startTimestamp=data.startDate,
density=density, factor=1, decimal=True))
full_args, query_part, sort = search_query_parts(data=data, error_status=None, errors_only=False,
favorite_only=False, issue=None, project_id=project_id,
user_id=None)
full_args["step_size"] = step_size
with pg_client.PostgresClient() as cur:
if view_type == schemas.MetricViewType.line_chart:
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT DISTINCT ON(s.session_id) s.session_id, s.start_ts
{query_part})
SELECT generated_timestamp AS timestamp,
COUNT(s) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL ( SELECT 1 AS s
FROM full_sessions
WHERE start_ts >= generated_timestamp
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp;""", full_args)
else:
main_query = cur.mogrify(f"""SELECT count(DISTINCT s.session_id) AS count
{query_part};""", full_args)
# print("--------------------")
# print(main_query)
cur.execute(main_query)
# print("--------------------")
if view_type == schemas.MetricViewType.line_chart:
sessions = cur.fetchall()
else:
sessions = cur.fetchone()["count"]
return sessions
def search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id, user_id):
ss_constraints = []
full_args = {"project_id": project_id, "startDate": data.startDate, "endDate": data.endDate,
"projectId": project_id, "userId": user_id}
extra_constraints = [
"s.project_id = %(project_id)s",
"s.duration IS NOT NULL"
]
extra_from = ""
fav_only_join = ""
if favorite_only and not errors_only:
fav_only_join = "LEFT JOIN public.user_favorite_sessions AS fs ON fs.session_id = s.session_id"
extra_constraints.append("fs.user_id = %(userId)s")
full_args["userId"] = user_id
events_query_part = ""
if len(data.filters) > 0:
meta_keys = None
for i, f in enumerate(data.filters):
if not isinstance(f.value, list):
f.value = [f.value]
filter_type = f.type
f.value = helper.values_for_operator(value=f.value, op=f.operator)
f_k = f"f_value{i}"
full_args = {**full_args, **_multiple_values(f.value, value_key=f_k)}
op = __get_sql_operator(f.operator) \
if filter_type not in [schemas.FilterType.events_count] else f.operator
is_any = _isAny_opreator(f.operator)
if not is_any and len(f.value) == 0:
continue
is_not = False
if __is_negation_operator(f.operator):
is_not = True
if filter_type == schemas.FilterType.user_browser:
if is_any:
extra_constraints.append('s.user_browser IS NOT NULL')
ss_constraints.append('ms.user_browser IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
ss_constraints.append(
_multiple_conditions(f'ms.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]:
if is_any:
extra_constraints.append('s.user_os IS NOT NULL')
ss_constraints.append('ms.user_os IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
ss_constraints.append(
_multiple_conditions(f'ms.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]:
if is_any:
extra_constraints.append('s.user_device IS NOT NULL')
ss_constraints.append('ms.user_device IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
ss_constraints.append(
_multiple_conditions(f'ms.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]:
if is_any:
extra_constraints.append('s.user_country IS NOT NULL')
ss_constraints.append('ms.user_country IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
ss_constraints.append(
_multiple_conditions(f'ms.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.utm_source]:
if is_any:
extra_constraints.append('s.utm_source IS NOT NULL')
ss_constraints.append('ms.utm_source IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f's.utm_source {op} %({f_k})s::text', f.value, is_not=is_not,
value_key=f_k))
ss_constraints.append(
_multiple_conditions(f'ms.utm_source {op} %({f_k})s::text', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.utm_medium]:
if is_any:
extra_constraints.append('s.utm_medium IS NOT NULL')
ss_constraints.append('ms.utm_medium IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f's.utm_medium {op} %({f_k})s::text', f.value, is_not=is_not,
value_key=f_k))
ss_constraints.append(
_multiple_conditions(f'ms.utm_medium {op} %({f_k})s::text', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.utm_campaign]:
if is_any:
extra_constraints.append('s.utm_campaign IS NOT NULL')
ss_constraints.append('ms.utm_campaign IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f's.utm_campaign {op} %({f_k})s::text', f.value, is_not=is_not,
value_key=f_k))
ss_constraints.append(
_multiple_conditions(f'ms.utm_campaign {op} %({f_k})s::text', f.value, is_not=is_not,
value_key=f_k))
elif filter_type == schemas.FilterType.duration:
if len(f.value) > 0 and f.value[0] is not None:
extra_constraints.append("s.duration >= %(minDuration)s")
ss_constraints.append("ms.duration >= %(minDuration)s")
full_args["minDuration"] = f.value[0]
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
extra_constraints.append("s.duration <= %(maxDuration)s")
ss_constraints.append("ms.duration <= %(maxDuration)s")
full_args["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.referrer:
extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"
if is_any:
extra_constraints.append('p.base_referrer IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f"p.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k))
elif filter_type == events.event_type.METADATA.ui_type:
# get metadata list only if you need it
if meta_keys is None:
meta_keys = metadata.get(project_id=project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
if f.source in meta_keys.keys():
if is_any:
extra_constraints.append(f"s.{metadata.index_to_colname(meta_keys[f.source])} IS NOT NULL")
ss_constraints.append(f"ms.{metadata.index_to_colname(meta_keys[f.source])} IS NOT NULL")
else:
extra_constraints.append(
_multiple_conditions(
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s::text",
f.value, is_not=is_not, value_key=f_k))
ss_constraints.append(
_multiple_conditions(
f"ms.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s::text",
f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
if is_any:
extra_constraints.append('s.user_id IS NOT NULL')
ss_constraints.append('ms.user_id IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f"s.user_id {op} %({f_k})s::text", f.value, is_not=is_not, value_key=f_k))
ss_constraints.append(
_multiple_conditions(f"ms.user_id {op} %({f_k})s::text", f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.user_anonymous_id,
schemas.FilterType.user_anonymous_id_ios]:
if is_any:
extra_constraints.append('s.user_anonymous_id IS NOT NULL')
ss_constraints.append('ms.user_anonymous_id IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f"s.user_anonymous_id {op} %({f_k})s::text", f.value, is_not=is_not,
value_key=f_k))
ss_constraints.append(
_multiple_conditions(f"ms.user_anonymous_id {op} %({f_k})s::text", f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]:
if is_any:
extra_constraints.append('s.rev_id IS NOT NULL')
ss_constraints.append('ms.rev_id IS NOT NULL')
else:
extra_constraints.append(
_multiple_conditions(f"s.rev_id {op} %({f_k})s::text", f.value, is_not=is_not, value_key=f_k))
ss_constraints.append(
_multiple_conditions(f"ms.rev_id {op} %({f_k})s::text", f.value, is_not=is_not, value_key=f_k))
elif filter_type == schemas.FilterType.platform:
# op = __get_sql_operator(f.operator)
extra_constraints.append(
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
ss_constraints.append(
_multiple_conditions(f"ms.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
elif filter_type == schemas.FilterType.issue:
if is_any:
extra_constraints.append("array_length(s.issue_types, 1) > 0")
ss_constraints.append("array_length(ms.issue_types, 1) > 0")
else:
extra_constraints.append(
_multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
value_key=f_k))
ss_constraints.append(
_multiple_conditions(f"%({f_k})s {op} ANY (ms.issue_types)", f.value, is_not=is_not,
value_key=f_k))
elif filter_type == schemas.FilterType.events_count:
extra_constraints.append(
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
ss_constraints.append(
_multiple_conditions(f"ms.events_count {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
# ---------------------------------------------------------------------------
if len(data.events) > 0:
# ss_constraints = [s.decode('UTF-8') for s in ss_constraints]
events_query_from = []
event_index = 0
or_events = data.events_order == schemas.SearchEventOrder._or
# events_joiner = " FULL JOIN " if or_events else " INNER JOIN LATERAL "
events_joiner = " UNION " if or_events else " INNER JOIN LATERAL "
for i, event in enumerate(data.events):
event_type = event.type
is_any = _isAny_opreator(event.operator)
if not isinstance(event.value, list):
event.value = [event.value]
if not is_any and len(event.value) == 0 \
or event_type in [schemas.PerformanceEventType.location_dom_complete,
schemas.PerformanceEventType.location_largest_contentful_paint_time,
schemas.PerformanceEventType.location_ttfb,
schemas.PerformanceEventType.location_avg_cpu_load,
schemas.PerformanceEventType.location_avg_memory_usage
] and (event.source is None or len(event.source) == 0):
continue
op = __get_sql_operator(event.operator)
is_not = False
if __is_negation_operator(event.operator):
is_not = True
op = __reverse_sql_operator(op)
if event_index == 0 or or_events:
event_from = "%s INNER JOIN public.sessions AS ms USING (session_id)"
event_where = ["ms.project_id = %(projectId)s", "main.timestamp >= %(startDate)s",
"main.timestamp <= %(endDate)s", "ms.start_ts >= %(startDate)s",
"ms.start_ts <= %(endDate)s", "ms.duration IS NOT NULL"]
else:
event_from = "%s"
event_where = ["main.timestamp >= %(startDate)s", "main.timestamp <= %(endDate)s",
"main.session_id=event_0.session_id"]
if data.events_order == schemas.SearchEventOrder._then:
event_where.append(f"event_{event_index - 1}.timestamp <= main.timestamp")
e_k = f"e_value{i}"
s_k = e_k + "_source"
if event.type != schemas.PerformanceEventType.time_between_events:
event.value = helper.values_for_operator(value=event.value, op=event.operator)
full_args = {**full_args,
**_multiple_values(event.value, value_key=e_k),
**_multiple_values(event.source, value_key=s_k)}
# if event_type not in list(events.SUPPORTED_TYPES.keys()) \
# or event.value in [None, "", "*"] \
# and (event_type != events.event_type.ERROR.ui_type \
# or event_type != events.event_type.ERROR_IOS.ui_type):
# continue
if event_type == events.event_type.CLICK.ui_type:
event_from = event_from % f"{events.event_type.CLICK.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.CLICK.column} {op} %({e_k})s", event.value,
value_key=e_k))
elif event_type == events.event_type.INPUT.ui_type:
event_from = event_from % f"{events.event_type.INPUT.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.INPUT.column} {op} %({e_k})s", event.value,
value_key=e_k))
if event.source is not None and len(event.source) > 0:
event_where.append(_multiple_conditions(f"main.value ILIKE %(custom{i})s", event.source,
value_key=f"custom{i}"))
full_args = {**full_args, **_multiple_values(event.source, value_key=f"custom{i}")}
elif event_type == events.event_type.LOCATION.ui_type:
event_from = event_from % f"{events.event_type.LOCATION.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.LOCATION.column} {op} %({e_k})s",
event.value, value_key=e_k))
elif event_type == events.event_type.CUSTOM.ui_type:
event_from = event_from % f"{events.event_type.CUSTOM.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.CUSTOM.column} {op} %({e_k})s", event.value,
value_key=e_k))
elif event_type == events.event_type.REQUEST.ui_type:
event_from = event_from % f"{events.event_type.REQUEST.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s", event.value,
value_key=e_k))
elif event_type == events.event_type.GRAPHQL.ui_type:
event_from = event_from % f"{events.event_type.GRAPHQL.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.GRAPHQL.column} {op} %({e_k})s", event.value,
value_key=e_k))
elif event_type == events.event_type.STATEACTION.ui_type:
event_from = event_from % f"{events.event_type.STATEACTION.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.STATEACTION.column} {op} %({e_k})s",
event.value, value_key=e_k))
elif event_type == events.event_type.ERROR.ui_type:
event_from = event_from % f"{events.event_type.ERROR.table} AS main INNER JOIN public.errors AS main1 USING(error_id)"
event.source = tuple(event.source)
if not is_any and event.value not in [None, "*", ""]:
event_where.append(
_multiple_conditions(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)",
event.value, value_key=e_k))
if event.source[0] not in [None, "*", ""]:
event_where.append(_multiple_conditions(f"main1.source = %({s_k})s", event.value, value_key=s_k))
# ----- IOS
elif event_type == events.event_type.CLICK_IOS.ui_type:
event_from = event_from % f"{events.event_type.CLICK_IOS.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.CLICK_IOS.column} {op} %({e_k})s",
event.value, value_key=e_k))
elif event_type == events.event_type.INPUT_IOS.ui_type:
event_from = event_from % f"{events.event_type.INPUT_IOS.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.INPUT_IOS.column} {op} %({e_k})s",
event.value, value_key=e_k))
if event.source is not None and len(event.source) > 0:
event_where.append(_multiple_conditions(f"main.value ILIKE %(custom{i})s", event.source,
value_key="custom{i}"))
full_args = {**full_args, **_multiple_values(event.source, f"custom{i}")}
elif event_type == events.event_type.VIEW_IOS.ui_type:
event_from = event_from % f"{events.event_type.VIEW_IOS.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.VIEW_IOS.column} {op} %({e_k})s",
event.value, value_key=e_k))
elif event_type == events.event_type.CUSTOM_IOS.ui_type:
event_from = event_from % f"{events.event_type.CUSTOM_IOS.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.CUSTOM_IOS.column} {op} %({e_k})s",
event.value, value_key=e_k))
elif event_type == events.event_type.REQUEST_IOS.ui_type:
event_from = event_from % f"{events.event_type.REQUEST_IOS.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.REQUEST_IOS.column} {op} %({e_k})s",
event.value, value_key=e_k))
elif event_type == events.event_type.ERROR_IOS.ui_type:
event_from = event_from % f"{events.event_type.ERROR_IOS.table} AS main INNER JOIN public.crashes_ios AS main1 USING(crash_id)"
if not is_any and event.value not in [None, "*", ""]:
event_where.append(
_multiple_conditions(f"(main1.reason {op} %({e_k})s OR main1.name {op} %({e_k})s)",
event.value, value_key=e_k))
elif event_type == schemas.PerformanceEventType.fetch_failed:
event_from = event_from % f"{events.event_type.REQUEST.table} AS main "
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s",
event.value, value_key=e_k))
col = performance_event.get_col(event_type)
colname = col["column"]
event_where.append(f"main.{colname} = FALSE")
# elif event_type == schemas.PerformanceEventType.fetch_duration:
# event_from = event_from % f"{events.event_type.REQUEST.table} AS main "
# if not is_any:
# event_where.append(
# _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s",
# event.value, value_key=e_k))
# col = performance_event.get_col(event_type)
# colname = col["column"]
# tname = "main"
# e_k += "_custom"
# full_args = {**full_args, **_multiple_values(event.source, value_key=e_k)}
# event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " +
# _multiple_conditions(f"{tname}.{colname} {event.sourceOperator} %({e_k})s",
# event.source, value_key=e_k))
elif event_type in [schemas.PerformanceEventType.location_dom_complete,
schemas.PerformanceEventType.location_largest_contentful_paint_time,
schemas.PerformanceEventType.location_ttfb,
schemas.PerformanceEventType.location_avg_cpu_load,
schemas.PerformanceEventType.location_avg_memory_usage
]:
event_from = event_from % f"{events.event_type.LOCATION.table} AS main "
col = performance_event.get_col(event_type)
colname = col["column"]
tname = "main"
if col.get("extraJoin") is not None:
tname = "ej"
event_from += f" INNER JOIN {col['extraJoin']} AS {tname} USING(session_id)"
event_where += [f"{tname}.timestamp >= main.timestamp", f"{tname}.timestamp >= %(startDate)s",
f"{tname}.timestamp <= %(endDate)s"]
if not is_any:
event_where.append(
_multiple_conditions(f"main.{events.event_type.LOCATION.column} {op} %({e_k})s",
event.value, value_key=e_k))
e_k += "_custom"
full_args = {**full_args, **_multiple_values(event.source, value_key=e_k)}
event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " +
_multiple_conditions(f"{tname}.{colname} {event.sourceOperator} %({e_k})s",
event.source, value_key=e_k))
elif event_type == schemas.PerformanceEventType.time_between_events:
event_from = event_from % f"{getattr(events.event_type, event.value[0].type).table} AS main INNER JOIN {getattr(events.event_type, event.value[1].type).table} AS main2 USING(session_id) "
if not isinstance(event.value[0].value, list):
event.value[0].value = [event.value[0].value]
if not isinstance(event.value[1].value, list):
event.value[1].value = [event.value[1].value]
event.value[0].value = helper.values_for_operator(value=event.value[0].value,
op=event.value[0].operator)
event.value[1].value = helper.values_for_operator(value=event.value[1].value,
op=event.value[0].operator)
e_k1 = e_k + "_e1"
e_k2 = e_k + "_e2"
full_args = {**full_args,
**_multiple_values(event.value[0].value, value_key=e_k1),
**_multiple_values(event.value[1].value, value_key=e_k2)}
s_op = __get_sql_operator(event.value[0].operator)
event_where += ["main2.timestamp >= %(startDate)s", "main2.timestamp <= %(endDate)s"]
if event_index > 0 and not or_events:
event_where.append("main2.session_id=event_0.session_id")
event_where.append(
_multiple_conditions(
f"main.{getattr(events.event_type, event.value[0].type).column} {s_op} %({e_k1})s",
event.value[0].value, value_key=e_k1))
s_op = __get_sql_operator(event.value[1].operator)
event_where.append(
_multiple_conditions(
f"main2.{getattr(events.event_type, event.value[1].type).column} {s_op} %({e_k2})s",
event.value[1].value, value_key=e_k2))
e_k += "_custom"
full_args = {**full_args, **_multiple_values(event.source, value_key=e_k)}
event_where.append(
_multiple_conditions(f"main2.timestamp - main.timestamp {event.sourceOperator} %({e_k})s",
event.source, value_key=e_k))
else:
continue
if event_index == 0 or or_events:
event_where += ss_constraints
if is_not:
if event_index == 0 or or_events:
events_query_from.append(f"""\
(SELECT
session_id,
0 AS timestamp
FROM sessions
WHERE EXISTS(SELECT session_id
FROM {event_from}
WHERE {" AND ".join(event_where)}
AND sessions.session_id=ms.session_id) IS FALSE
AND project_id = %(projectId)s
AND start_ts >= %(startDate)s
AND start_ts <= %(endDate)s
AND duration IS NOT NULL
) {"" if or_events else (f"AS event_{event_index}" + ("ON(TRUE)" if event_index > 0 else ""))}\
""")
else:
events_query_from.append(f"""\
(SELECT
event_0.session_id,
event_{event_index - 1}.timestamp AS timestamp
WHERE EXISTS(SELECT session_id FROM {event_from} WHERE {" AND ".join(event_where)}) IS FALSE
) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\
""")
else:
events_query_from.append(f"""\
(SELECT main.session_id, MIN(main.timestamp) AS timestamp
FROM {event_from}
WHERE {" AND ".join(event_where)}
GROUP BY 1
) {"" if or_events else (f"AS event_{event_index} " + ("ON(TRUE)" if event_index > 0 else ""))}\
""")
event_index += 1
if event_index > 0:
if or_events:
events_query_part = f"""SELECT
session_id,
MIN(timestamp) AS first_event_ts,
MAX(timestamp) AS last_event_ts
FROM ({events_joiner.join(events_query_from)}) AS u
GROUP BY 1
{fav_only_join}"""
else:
events_query_part = f"""SELECT
event_0.session_id,
MIN(event_0.timestamp) AS first_event_ts,
MAX(event_{event_index - 1}.timestamp) AS last_event_ts
FROM {events_joiner.join(events_query_from)}
GROUP BY 1
{fav_only_join}"""
else:
data.events = []
# ---------------------------------------------------------------------------
if data.startDate is not None:
extra_constraints.append("s.start_ts >= %(startDate)s")
if data.endDate is not None:
extra_constraints.append("s.start_ts <= %(endDate)s")
# if data.platform is not None:
# if data.platform == schemas.PlatformType.mobile:
# extra_constraints.append(b"s.user_os in ('Android','BlackBerry OS','iOS','Tizen','Windows Phone')")
# elif data.platform == schemas.PlatformType.desktop:
# extra_constraints.append(
# b"s.user_os in ('Chrome OS','Fedora','Firefox OS','Linux','Mac OS X','Ubuntu','Windows')")
if data.order is None:
data.order = "DESC"
sort = 'session_id'
if data.sort is not None and data.sort != "session_id":
sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
else:
sort = 'session_id'
if errors_only:
extra_from += f" INNER JOIN {events.event_type.ERROR.table} AS er USING (session_id) INNER JOIN public.errors AS ser USING (error_id)"
extra_constraints.append("ser.source = 'js_exception'")
if error_status != "ALL":
extra_constraints.append("ser.status = %(error_status)s")
full_args["status"] = error_status.lower()
if favorite_only:
extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
extra_constraints.append("ufe.user_id = %(user_id)s")
# extra_constraints = [extra.decode('UTF-8') + "\n" for extra in extra_constraints]
if not favorite_only and not errors_only and user_id is not None:
extra_from += """LEFT JOIN (SELECT user_id, session_id
FROM public.user_favorite_sessions
WHERE user_id = %(userId)s) AS favorite_sessions
USING (session_id)"""
extra_join = ""
if issue is not None:
extra_join = """
INNER JOIN LATERAL(SELECT TRUE FROM events_common.issues INNER JOIN public.issues AS p_issues USING (issue_id)
WHERE issues.session_id=f.session_id
AND p_issues.type=%(issue_type)s
AND p_issues.context_string=%(issue_contextString)s
AND timestamp >= f.first_event_ts
AND timestamp <= f.last_event_ts) AS issues ON(TRUE)
"""
full_args["issue_contextString"] = issue["contextString"]
full_args["issue_type"] = issue["type"]
query_part = f"""\
FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else "public.sessions AS s"}
{extra_join}
{"INNER JOIN public.sessions AS s USING(session_id)" if len(events_query_part) > 0 else ""}
{extra_from}
WHERE
{" AND ".join(extra_constraints)}"""
return full_args, query_part, sort
def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None):
if project_id is None:
all_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
else:
all_projects = [
projects.get_project(tenant_id=tenant_id, project_id=int(project_id), include_last_session=False,
include_gdpr=False)]
all_projects = {int(p["projectId"]): p["name"] for p in all_projects}
project_ids = list(all_projects.keys())
available_keys = metadata.get_keys_by_projects(project_ids)
for i in available_keys:
available_keys[i]["user_id"] = schemas.FilterType.user_id
available_keys[i]["user_anonymous_id"] = schemas.FilterType.user_anonymous_id
results = {}
for i in project_ids:
if m_key not in available_keys[i].values():
available_keys.pop(i)
results[i] = {"total": 0, "sessions": [], "missingMetadata": True}
project_ids = list(available_keys.keys())
if len(project_ids) > 0:
with pg_client.PostgresClient() as cur:
sub_queries = []
for i in project_ids:
col_name = list(available_keys[i].keys())[list(available_keys[i].values()).index(m_key)]
sub_queries.append(cur.mogrify(
f"(SELECT COALESCE(COUNT(s.*)) AS count FROM public.sessions AS s WHERE s.project_id = %(id)s AND s.{col_name} = %(value)s) AS \"{i}\"",
{"id": i, "value": m_value}).decode('UTF-8'))
query = f"""SELECT {", ".join(sub_queries)};"""
cur.execute(query=query)
rows = cur.fetchone()
sub_queries = []
for i in rows.keys():
results[i] = {"total": rows[i], "sessions": [], "missingMetadata": False, "name": all_projects[int(i)]}
if rows[i] > 0:
col_name = list(available_keys[int(i)].keys())[list(available_keys[int(i)].values()).index(m_key)]
sub_queries.append(
cur.mogrify(
f"""(
SELECT *
FROM (
SELECT DISTINCT ON(favorite_sessions.session_id, s.session_id) {SESSION_PROJECTION_COLS}
FROM public.sessions AS s LEFT JOIN (SELECT session_id
FROM public.user_favorite_sessions
WHERE user_favorite_sessions.user_id = %(userId)s
) AS favorite_sessions USING (session_id)
WHERE s.project_id = %(id)s AND s.duration IS NOT NULL AND s.{col_name} = %(value)s
) AS full_sessions
ORDER BY favorite DESC, issue_score DESC
LIMIT 10
)""",
{"id": i, "value": m_value, "userId": user_id}).decode('UTF-8'))
if len(sub_queries) > 0:
cur.execute("\nUNION\n".join(sub_queries))
rows = cur.fetchall()
for i in rows:
results[str(i["project_id"])]["sessions"].append(helper.dict_to_camel_case(i))
return results
def search_by_issue(user_id, issue, project_id, start_date, end_date):
constraints = ["s.project_id = %(projectId)s",
"p_issues.context_string = %(issueContextString)s",
"p_issues.type = %(issueType)s"]
if start_date is not None:
constraints.append("start_ts >= %(startDate)s")
if end_date is not None:
constraints.append("start_ts <= %(endDate)s")
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
f"""SELECT DISTINCT ON(favorite_sessions.session_id, s.session_id) {SESSION_PROJECTION_COLS}
FROM public.sessions AS s
INNER JOIN events_common.issues USING (session_id)
INNER JOIN public.issues AS p_issues USING (issue_id)
LEFT JOIN (SELECT user_id, session_id
FROM public.user_favorite_sessions
WHERE user_id = %(userId)s) AS favorite_sessions
USING (session_id)
WHERE {" AND ".join(constraints)}
ORDER BY s.session_id DESC;""",
{
"issueContextString": issue["contextString"],
"issueType": issue["type"], "userId": user_id,
"projectId": project_id,
"startDate": start_date,
"endDate": end_date
}))
rows = cur.fetchall()
return helper.list_to_camel_case(rows)
def get_favorite_sessions(project_id, user_id, include_viewed=False):
with pg_client.PostgresClient() as cur:
query_part = cur.mogrify(f"""\
FROM public.sessions AS s
LEFT JOIN public.user_favorite_sessions AS fs ON fs.session_id = s.session_id
WHERE fs.user_id = %(userId)s""",
{"projectId": project_id, "userId": user_id}
)
extra_query = b""
if include_viewed:
extra_query = cur.mogrify(""",\
COALESCE((SELECT TRUE
FROM public.user_viewed_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s), FALSE) AS viewed""",
{"projectId": project_id, "userId": user_id})
cur.execute(f"""\
SELECT s.project_id,
s.session_id::text AS session_id,
s.user_uuid,
s.user_id,
-- s.user_agent,
s.user_os,
s.user_browser,
s.user_device,
s.user_country,
s.start_ts,
s.duration,
s.events_count,
s.pages_count,
s.errors_count,
TRUE AS favorite
{extra_query.decode('UTF-8')}
{query_part.decode('UTF-8')}
ORDER BY s.session_id
LIMIT 50;""")
sessions = cur.fetchall()
return helper.list_to_camel_case(sessions)
def get_user_sessions(project_id, user_id, start_date, end_date):
with pg_client.PostgresClient() as cur:
constraints = ["s.project_id = %(projectId)s", "s.user_id = %(userId)s"]
if start_date is not None:
constraints.append("s.start_ts >= %(startDate)s")
if end_date is not None:
constraints.append("s.start_ts <= %(endDate)s")
query_part = f"""\
FROM public.sessions AS s
WHERE {" AND ".join(constraints)}"""
cur.execute(cur.mogrify(f"""\
SELECT s.project_id,
s.session_id::text AS session_id,
s.user_uuid,
s.user_id,
-- s.user_agent,
s.user_os,
s.user_browser,
s.user_device,
s.user_country,
s.start_ts,
s.duration,
s.events_count,
s.pages_count,
s.errors_count
{query_part}
ORDER BY s.session_id
LIMIT 50;""", {
"projectId": project_id,
"userId": user_id,
"startDate": start_date,
"endDate": end_date
}))
sessions = cur.fetchall()
return helper.list_to_camel_case(sessions)
def get_session_user(project_id, user_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""\
SELECT
user_id,
count(*) as session_count,
max(start_ts) as last_seen,
min(start_ts) as first_seen
FROM
"public".sessions
WHERE
project_id = %(project_id)s
AND user_id = %(user_id)s
AND duration is not null
GROUP BY user_id;
""",
{"project_id": project_id, "user_id": user_id}
)
cur.execute(query=query)
data = cur.fetchone()
return helper.dict_to_camel_case(data)
def get_session_ids_by_user_ids(project_id, user_ids):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""\
SELECT session_id FROM public.sessions
WHERE
project_id = %(project_id)s AND user_id IN %(user_id)s;""",
{"project_id": project_id, "user_id": tuple(user_ids)}
)
ids = cur.execute(query=query)
return ids
def delete_sessions_by_session_ids(session_ids):
with pg_client.PostgresClient(long_query=True) as cur:
query = cur.mogrify(
"""\
DELETE FROM public.sessions
WHERE
session_id IN %(session_ids)s;""",
{"session_ids": tuple(session_ids)}
)
cur.execute(query=query)
return True
def delete_sessions_by_user_ids(project_id, user_ids):
with pg_client.PostgresClient(long_query=True) as cur:
query = cur.mogrify(
"""\
DELETE FROM public.sessions
WHERE
project_id = %(project_id)s AND user_id IN %(user_id)s;""",
{"project_id": project_id, "user_id": tuple(user_ids)}
)
cur.execute(query=query)
return True
def count_all():
with pg_client.PostgresClient(long_query=True) as cur:
row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
return row.get("count", 0)
| 54.892293
| 203
| 0.518311
|
3376ad41740ce43c7aeebeebf3d549d2f66a7c1d
| 1,577
|
py
|
Python
|
src/accounts/migrations/0001_initial.py
|
Saruni0305/oop-work-2
|
371787017cdd8446220b646c0070c4e53065bff5
|
[
"MIT"
] | 24
|
2018-11-17T21:02:06.000Z
|
2021-11-18T23:02:00.000Z
|
src/accounts/migrations/0001_initial.py
|
Saruni0305/oop-work-2
|
371787017cdd8446220b646c0070c4e53065bff5
|
[
"MIT"
] | 9
|
2020-06-05T21:43:20.000Z
|
2021-11-15T17:49:01.000Z
|
src/accounts/migrations/0001_initial.py
|
SaruniM/oop-work-2
|
371787017cdd8446220b646c0070c4e53065bff5
|
[
"MIT"
] | 15
|
2019-03-08T20:19:17.000Z
|
2021-12-29T10:16:14.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-31 17:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True)),
('active', models.BooleanField(default=True)),
('staff', models.BooleanField(default=False)),
('admin', models.BooleanField(default=False)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GuestModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('active', models.BooleanField(default=True)),
('update', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| 36.674419
| 114
| 0.567533
|
03dba78cf64649ea49e6e5e3a78d739d706ce563
| 25,375
|
py
|
Python
|
examples/autoignition-mpi.py
|
illinois-ceesd/mirgecom
|
6ae28905dbb9d073a9f778111d12b10e474fe799
|
[
"MIT"
] | 3
|
2020-08-18T16:31:30.000Z
|
2021-03-02T20:20:07.000Z
|
examples/autoignition-mpi.py
|
illinois-ceesd/mirgecom
|
6ae28905dbb9d073a9f778111d12b10e474fe799
|
[
"MIT"
] | 356
|
2020-05-29T04:44:50.000Z
|
2022-03-31T00:09:49.000Z
|
examples/autoignition-mpi.py
|
illinois-ceesd/mirgecom
|
6ae28905dbb9d073a9f778111d12b10e474fe799
|
[
"MIT"
] | 16
|
2020-09-11T20:56:57.000Z
|
2021-07-28T19:21:05.000Z
|
"""Demonstrate combustive mixture with Pyrometheus."""
__copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import numpy as np
import pyopencl as cl
import pyopencl.tools as cl_tools
from functools import partial
from meshmode.array_context import (
PyOpenCLArrayContext,
PytatoPyOpenCLArrayContext
)
from mirgecom.profiling import PyOpenCLProfilingArrayContext
from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa
from grudge.eager import EagerDGDiscretization
from grudge.shortcuts import make_visualizer
from logpyle import IntervalTimer, set_dt
from mirgecom.euler import extract_vars_for_logging, units_for_logging
from mirgecom.euler import euler_operator
from mirgecom.simutil import (
get_sim_timestep,
generate_and_distribute_mesh,
write_visfile,
allsync
)
from mirgecom.io import make_init_message
from mirgecom.mpi import mpi_entry_point
from mirgecom.integrators import rk4_step
from mirgecom.steppers import advance_state
from mirgecom.boundary import AdiabaticSlipBoundary
from mirgecom.initializers import MixtureInitializer
from mirgecom.eos import PyrometheusMixture
from mirgecom.gas_model import GasModel
from arraycontext import thaw
from mirgecom.logging_quantities import (
initialize_logmgr,
logmgr_add_many_discretization_quantities,
logmgr_add_cl_device_info,
logmgr_add_device_memory_usage,
set_sim_state
)
import cantera
logger = logging.getLogger(__name__)
class MyRuntimeError(RuntimeError):
"""Simple exception for fatal driver errors."""
pass
@mpi_entry_point
def main(ctx_factory=cl.create_some_context, use_logmgr=True,
use_leap=False, use_profiling=False, casename=None,
rst_filename=None, actx_class=PyOpenCLArrayContext,
log_dependent=True):
"""Drive example."""
cl_ctx = ctx_factory()
if casename is None:
casename = "mirgecom"
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
from mirgecom.simutil import global_reduce as _global_reduce
global_reduce = partial(_global_reduce, comm=comm)
logmgr = initialize_logmgr(use_logmgr,
filename=f"{casename}.sqlite", mode="wu", mpi_comm=comm)
if use_profiling:
queue = cl.CommandQueue(cl_ctx,
properties=cl.command_queue_properties.PROFILING_ENABLE)
else:
queue = cl.CommandQueue(cl_ctx)
actx = actx_class(
queue,
allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))
# Some discretization parameters
dim = 2
nel_1d = 8
order = 1
# {{{ Time stepping control
# This example runs only 3 steps by default (to keep CI ~short)
# With the mixture defined below, equilibrium is achieved at ~40ms
# To run to equilibrium, set t_final >= 40ms.
# Time stepper selection
if use_leap:
from leap.rk import RK4MethodBuilder
timestepper = RK4MethodBuilder("state")
else:
timestepper = rk4_step
# Time loop control parameters
current_step = 0
t_final = 1e-8
current_cfl = 1.0
current_dt = 1e-9
current_t = 0
constant_cfl = False
# i.o frequencies
nstatus = 1
nviz = 5
nhealth = 1
nrestart = 5
# }}} Time stepping control
debug = False
rst_path = "restart_data/"
rst_pattern = (
rst_path + "{cname}-{step:04d}-{rank:04d}.pkl"
)
if rst_filename: # read the grid from restart data
rst_filename = f"{rst_filename}-{rank:04d}.pkl"
from mirgecom.restart import read_restart_data
restart_data = read_restart_data(actx, rst_filename)
local_mesh = restart_data["local_mesh"]
local_nelements = local_mesh.nelements
global_nelements = restart_data["global_nelements"]
assert restart_data["num_parts"] == nproc
rst_time = restart_data["t"]
rst_step = restart_data["step"]
rst_order = restart_data["order"]
else: # generate the grid from scratch
from meshmode.mesh.generation import generate_regular_rect_mesh
box_ll = -0.005
box_ur = 0.005
generate_mesh = partial(generate_regular_rect_mesh, a=(box_ll,)*dim,
b=(box_ur,) * dim, nelements_per_axis=(nel_1d,)*dim)
local_mesh, global_nelements = generate_and_distribute_mesh(comm,
generate_mesh)
local_nelements = local_mesh.nelements
from grudge.dof_desc import DISCR_TAG_BASE, DISCR_TAG_QUAD
from meshmode.discretization.poly_element import \
default_simplex_group_factory, QuadratureSimplexGroupFactory
discr = EagerDGDiscretization(
actx, local_mesh,
discr_tag_to_group_factory={
DISCR_TAG_BASE: default_simplex_group_factory(
base_dim=local_mesh.dim, order=order),
DISCR_TAG_QUAD: QuadratureSimplexGroupFactory(2*order + 1)
},
mpi_communicator=comm
)
nodes = thaw(discr.nodes(), actx)
ones = discr.zeros(actx) + 1.0
vis_timer = None
if logmgr:
logmgr_add_cl_device_info(logmgr, queue)
logmgr_add_device_memory_usage(logmgr, queue)
vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
logmgr.add_quantity(vis_timer)
logmgr.add_watches([
("step.max", "step = {value}, "),
("t_sim.max", "sim time: {value:1.6e} s\n"),
("t_step.max", "------- step walltime: {value:6g} s, "),
("t_log.max", "log walltime: {value:6g} s")
])
if log_dependent:
logmgr_add_many_discretization_quantities(logmgr, discr, dim,
extract_vars_for_logging,
units_for_logging)
logmgr.add_watches([
("min_pressure", "\n------- P (min, max) (Pa) = ({value:1.9e}, "),
("max_pressure", "{value:1.9e})\n"),
("min_temperature", "------- T (min, max) (K) = ({value:7g}, "),
("max_temperature", "{value:7g})\n")])
# {{{ Set up initial state using Cantera
# Use Cantera for initialization
# -- Pick up a CTI for the thermochemistry config
# --- Note: Users may add their own CTI file by dropping it into
# --- mirgecom/mechanisms alongside the other CTI files.
from mirgecom.mechanisms import get_mechanism_cti
mech_cti = get_mechanism_cti("uiuc")
cantera_soln = cantera.Solution(phase_id="gas", source=mech_cti)
nspecies = cantera_soln.n_species
# Initial temperature, pressure, and mixutre mole fractions are needed to
# set up the initial state in Cantera.
temperature_seed = 1500.0 # Initial temperature hot enough to burn
# Parameters for calculating the amounts of fuel, oxidizer, and inert species
equiv_ratio = 1.0
ox_di_ratio = 0.21
stoich_ratio = 3.0
# Grab the array indices for the specific species, ethylene, oxygen, and nitrogen
i_fu = cantera_soln.species_index("C2H4")
i_ox = cantera_soln.species_index("O2")
i_di = cantera_soln.species_index("N2")
x = np.zeros(nspecies)
# Set the species mole fractions according to our desired fuel/air mixture
x[i_fu] = (ox_di_ratio*equiv_ratio)/(stoich_ratio+ox_di_ratio*equiv_ratio)
x[i_ox] = stoich_ratio*x[i_fu]/equiv_ratio
x[i_di] = (1.0-ox_di_ratio)*x[i_ox]/ox_di_ratio
# Uncomment next line to make pylint fail when it can't find cantera.one_atm
one_atm = cantera.one_atm # pylint: disable=no-member
# one_atm = 101325.0
# Let the user know about how Cantera is being initilized
print(f"Input state (T,P,X) = ({temperature_seed}, {one_atm}, {x}")
# Set Cantera internal gas temperature, pressure, and mole fractios
cantera_soln.TPX = temperature_seed, one_atm, x
# Pull temperature, total density, mass fractions, and pressure from Cantera
# We need total density, and mass fractions to initialize the fluid/gas state.
can_t, can_rho, can_y = cantera_soln.TDY
can_p = cantera_soln.P
# *can_t*, *can_p* should not differ (significantly) from user's initial data,
# but we want to ensure that we use exactly the same starting point as Cantera,
# so we use Cantera's version of these data.
# }}}
# {{{ Create Pyrometheus thermochemistry object & EOS
# Create a Pyrometheus EOS with the Cantera soln. Pyrometheus uses Cantera and
# generates a set of methods to calculate chemothermomechanical properties and
# states for this particular mechanism.
from mirgecom.thermochemistry import make_pyrometheus_mechanism_class
pyro_mechanism = make_pyrometheus_mechanism_class(cantera_soln)(actx.np)
eos = PyrometheusMixture(pyro_mechanism, temperature_guess=temperature_seed)
gas_model = GasModel(eos=eos)
from pytools.obj_array import make_obj_array
def get_temperature_update(cv, temperature):
y = cv.species_mass_fractions
e = gas_model.eos.internal_energy(cv) / cv.mass
return pyro_mechanism.get_temperature_update_energy(e, temperature, y)
from mirgecom.gas_model import make_fluid_state
def get_fluid_state(cv, tseed):
return make_fluid_state(cv=cv, gas_model=gas_model,
temperature_seed=tseed)
compute_temperature_update = actx.compile(get_temperature_update)
construct_fluid_state = actx.compile(get_fluid_state)
# }}}
# {{{ MIRGE-Com state initialization
# Initialize the fluid/gas state with Cantera-consistent data:
# (density, pressure, temperature, mass_fractions)
print(f"Cantera state (rho,T,P,Y) = ({can_rho}, {can_t}, {can_p}, {can_y}")
velocity = np.zeros(shape=(dim,))
initializer = MixtureInitializer(dim=dim, nspecies=nspecies,
pressure=can_p, temperature=can_t,
massfractions=can_y, velocity=velocity)
my_boundary = AdiabaticSlipBoundary()
boundaries = {BTAG_ALL: my_boundary}
if rst_filename:
current_step = rst_step
current_t = rst_time
if logmgr:
from mirgecom.logging_quantities import logmgr_set_time
logmgr_set_time(logmgr, current_step, current_t)
if order == rst_order:
current_cv = restart_data["cv"]
temperature_seed = restart_data["temperature_seed"]
else:
rst_cv = restart_data["cv"]
old_discr = EagerDGDiscretization(actx, local_mesh, order=rst_order,
mpi_communicator=comm)
from meshmode.discretization.connection import make_same_mesh_connection
connection = make_same_mesh_connection(actx, discr.discr_from_dd("vol"),
old_discr.discr_from_dd("vol"))
current_cv = connection(rst_cv)
temperature_seed = connection(restart_data["temperature_seed"])
else:
# Set the current state from time 0
current_cv = initializer(eos=gas_model.eos, x_vec=nodes)
temperature_seed = temperature_seed * ones
# The temperature_seed going into this function is:
# - At time 0: the initial temperature input data (maybe from Cantera)
# - On restart: the restarted temperature seed from restart file (saving
# the *seed* allows restarts to be deterministic
current_fluid_state = construct_fluid_state(current_cv, temperature_seed)
current_dv = current_fluid_state.dv
temperature_seed = current_dv.temperature
# Inspection at physics debugging time
if debug:
print("Initial MIRGE-Com state:")
print(f"Initial DV pressure: {current_fluid_state.pressure}")
print(f"Initial DV temperature: {current_fluid_state.temperature}")
# }}}
visualizer = make_visualizer(discr)
initname = initializer.__class__.__name__
eosname = gas_model.eos.__class__.__name__
init_message = make_init_message(dim=dim, order=order,
nelements=local_nelements,
global_nelements=global_nelements,
dt=current_dt, t_final=t_final, nstatus=nstatus,
nviz=nviz, cfl=current_cfl,
constant_cfl=constant_cfl, initname=initname,
eosname=eosname, casename=casename)
# Cantera equilibrate calculates the expected end state @ chemical equilibrium
# i.e. the expected state after all reactions
cantera_soln.equilibrate("UV")
eq_temperature, eq_density, eq_mass_fractions = cantera_soln.TDY
eq_pressure = cantera_soln.P
# Report the expected final state to the user
if rank == 0:
logger.info(init_message)
logger.info(f"Expected equilibrium state:"
f" {eq_pressure=}, {eq_temperature=},"
f" {eq_density=}, {eq_mass_fractions=}")
def my_write_status(dt, cfl, dv=None):
status_msg = f"------ {dt=}" if constant_cfl else f"----- {cfl=}"
if ((dv is not None) and (not log_dependent)):
temp = dv.temperature
press = dv.pressure
from grudge.op import nodal_min_loc, nodal_max_loc
tmin = allsync(actx.to_numpy(nodal_min_loc(discr, "vol", temp)),
comm=comm, op=MPI.MIN)
tmax = allsync(actx.to_numpy(nodal_max_loc(discr, "vol", temp)),
comm=comm, op=MPI.MAX)
pmin = allsync(actx.to_numpy(nodal_min_loc(discr, "vol", press)),
comm=comm, op=MPI.MIN)
pmax = allsync(actx.to_numpy(nodal_max_loc(discr, "vol", press)),
comm=comm, op=MPI.MAX)
dv_status_msg = f"\nP({pmin}, {pmax}), T({tmin}, {tmax})"
status_msg = status_msg + dv_status_msg
if rank == 0:
logger.info(status_msg)
def my_write_viz(step, t, dt, state, ts_field, dv, production_rates, cfl):
viz_fields = [("cv", state), ("dv", dv),
("production_rates", production_rates),
("dt" if constant_cfl else "cfl", ts_field)]
write_visfile(discr, viz_fields, visualizer, vizname=casename,
step=step, t=t, overwrite=True, vis_timer=vis_timer)
def my_write_restart(step, t, state, temperature_seed):
rst_fname = rst_pattern.format(cname=casename, step=step, rank=rank)
if rst_fname == rst_filename:
if rank == 0:
logger.info("Skipping overwrite of restart file.")
else:
rst_data = {
"local_mesh": local_mesh,
"cv": state.cv,
"temperature_seed": temperature_seed,
"t": t,
"step": step,
"order": order,
"global_nelements": global_nelements,
"num_parts": nproc
}
from mirgecom.restart import write_restart_file
write_restart_file(actx, rst_data, rst_fname, comm)
def my_health_check(cv, dv):
import grudge.op as op
health_error = False
pressure = dv.pressure
temperature = dv.temperature
from mirgecom.simutil import check_naninf_local, check_range_local
if check_naninf_local(discr, "vol", pressure):
health_error = True
logger.info(f"{rank=}: Invalid pressure data found.")
if check_range_local(discr, "vol", pressure, 1e5, 2.6e5):
health_error = True
logger.info(f"{rank=}: Pressure range violation.")
if check_naninf_local(discr, "vol", temperature):
health_error = True
logger.info(f"{rank=}: Invalid temperature data found.")
if check_range_local(discr, "vol", temperature, 1.498e3, 1.6e3):
health_error = True
logger.info(f"{rank=}: Temperature range violation.")
# This check is the temperature convergence check
# The current *temperature* is what Pyrometheus gets
# after a fixed number of Newton iterations, *n_iter*.
# Calling `compute_temperature` here with *temperature*
# input as the guess returns the calculated gas temperature after
# yet another *n_iter*.
# The difference between those two temperatures is the
# temperature residual, which can be used as an indicator of
# convergence in Pyrometheus `get_temperature`.
# Note: The local max jig below works around a very long compile
# in lazy mode.
temp_resid = compute_temperature_update(cv, temperature) / temperature
temp_err = (actx.to_numpy(op.nodal_max_loc(discr, "vol", temp_resid)))
if temp_err > 1e-8:
health_error = True
logger.info(f"{rank=}: Temperature is not converged {temp_resid=}.")
return health_error
from mirgecom.inviscid import get_inviscid_timestep
def get_dt(state):
return get_inviscid_timestep(discr, state=state)
compute_dt = actx.compile(get_dt)
from mirgecom.inviscid import get_inviscid_cfl
def get_cfl(state, dt):
return get_inviscid_cfl(discr, dt=dt, state=state)
compute_cfl = actx.compile(get_cfl)
def get_production_rates(cv, temperature):
return eos.get_production_rates(cv, temperature)
compute_production_rates = actx.compile(get_production_rates)
def my_get_timestep(t, dt, state):
# richer interface to calculate {dt,cfl} returns node-local estimates
t_remaining = max(0, t_final - t)
if constant_cfl:
ts_field = current_cfl * compute_dt(state)
from grudge.op import nodal_min_loc
dt = allsync(actx.to_numpy(nodal_min_loc(discr, "vol", ts_field)),
comm=comm, op=MPI.MIN)
cfl = current_cfl
else:
ts_field = compute_cfl(state, current_dt)
from grudge.op import nodal_max_loc
cfl = allsync(actx.to_numpy(nodal_max_loc(discr, "vol", ts_field)),
comm=comm, op=MPI.MAX)
return ts_field, cfl, min(t_remaining, dt)
def my_pre_step(step, t, dt, state):
cv, tseed = state
fluid_state = construct_fluid_state(cv, tseed)
dv = fluid_state.dv
try:
if logmgr:
logmgr.tick_before()
from mirgecom.simutil import check_step
do_viz = check_step(step=step, interval=nviz)
do_restart = check_step(step=step, interval=nrestart)
do_health = check_step(step=step, interval=nhealth)
do_status = check_step(step=step, interval=nstatus)
if do_health:
health_errors = global_reduce(my_health_check(cv, dv), op="lor")
if health_errors:
if rank == 0:
logger.info("Fluid solution failed health check.")
raise MyRuntimeError("Failed simulation health check.")
ts_field, cfl, dt = my_get_timestep(t=t, dt=dt, state=fluid_state)
if do_status:
my_write_status(dt=dt, cfl=cfl, dv=dv)
if do_restart:
my_write_restart(step=step, t=t, state=fluid_state,
temperature_seed=tseed)
if do_viz:
production_rates = compute_production_rates(fluid_state.cv,
fluid_state.temperature)
my_write_viz(step=step, t=t, dt=dt, state=cv, dv=dv,
production_rates=production_rates,
ts_field=ts_field, cfl=cfl)
except MyRuntimeError:
if rank == 0:
logger.info("Errors detected; attempting graceful exit.")
# my_write_viz(step=step, t=t, dt=dt, state=cv)
# my_write_restart(step=step, t=t, state=fluid_state)
raise
return state, dt
def my_post_step(step, t, dt, state):
cv, tseed = state
fluid_state = construct_fluid_state(cv, tseed)
# Logmgr needs to know about EOS, dt, dim?
# imo this is a design/scope flaw
if logmgr:
set_dt(logmgr, dt)
set_sim_state(logmgr, dim, cv, gas_model.eos)
logmgr.tick_after()
return make_obj_array([cv, fluid_state.temperature]), dt
def my_rhs(t, state):
cv, tseed = state
from mirgecom.gas_model import make_fluid_state
fluid_state = make_fluid_state(cv=cv, gas_model=gas_model,
temperature_seed=tseed)
return make_obj_array([
euler_operator(discr, state=fluid_state, time=t, boundaries=boundaries,
gas_model=gas_model)
+ eos.get_species_source_terms(cv, fluid_state.temperature),
0*tseed])
current_dt = get_sim_timestep(discr, current_fluid_state, current_t, current_dt,
current_cfl, t_final, constant_cfl)
current_step, current_t, current_state = \
advance_state(rhs=my_rhs, timestepper=timestepper,
pre_step_callback=my_pre_step,
post_step_callback=my_post_step, dt=current_dt,
state=make_obj_array([current_cv, temperature_seed]),
t=current_t, t_final=t_final)
# Dump the final data
if rank == 0:
logger.info("Checkpointing final state ...")
final_cv, tseed = current_state
final_fluid_state = construct_fluid_state(final_cv, tseed)
final_dv = final_fluid_state.dv
final_dm = compute_production_rates(final_cv, final_dv.temperature)
ts_field, cfl, dt = my_get_timestep(t=current_t, dt=current_dt,
state=final_fluid_state)
my_write_viz(step=current_step, t=current_t, dt=dt, state=final_cv,
dv=final_dv, production_rates=final_dm, ts_field=ts_field, cfl=cfl)
my_write_status(dt=dt, cfl=cfl, dv=final_dv)
my_write_restart(step=current_step, t=current_t, state=final_fluid_state,
temperature_seed=tseed)
if logmgr:
logmgr.close()
elif use_profiling:
print(actx.tabulate_profiling_data())
finish_tol = 1e-16
assert np.abs(current_t - t_final) < finish_tol
if __name__ == "__main__":
import argparse
casename = "autoignition"
parser = argparse.ArgumentParser(description=f"MIRGE-Com Example: {casename}")
parser.add_argument("--lazy", action="store_true",
help="switch to a lazy computation mode")
parser.add_argument("--profiling", action="store_true",
help="turn on detailed performance profiling")
parser.add_argument("--log", action="store_true", default=True,
help="turn on logging")
parser.add_argument("--leap", action="store_true",
help="use leap timestepper")
parser.add_argument("--restart_file", help="root name of restart file")
parser.add_argument("--casename", help="casename to use for i/o")
args = parser.parse_args()
from warnings import warn
warn("Automatically turning off DV logging. MIRGE-Com Issue(578)")
log_dependent = False
if args.profiling:
if args.lazy:
raise ValueError("Can't use lazy and profiling together.")
actx_class = PyOpenCLProfilingArrayContext
else:
if args.lazy:
log_dependent = False
actx_class = PytatoPyOpenCLArrayContext
else:
actx_class = PyOpenCLArrayContext
logging.basicConfig(format="%(message)s", level=logging.INFO)
if args.casename:
casename = args.casename
rst_filename = None
if args.restart_file:
rst_filename = args.restart_file
main(use_logmgr=args.log, use_leap=args.leap, use_profiling=args.profiling,
casename=casename, rst_filename=rst_filename, actx_class=actx_class,
log_dependent=log_dependent)
# vim: foldmethod=marker
| 39.648438
| 85
| 0.647764
|
e985bd0f16edf042c605bd328098f8e34f1f2e0a
| 662
|
py
|
Python
|
vcr/serializers/gzipserializer.py
|
mrgrassho/vcrpy-custom
|
695076ef53f4989dab023ae35215a033c2182fd2
|
[
"MIT"
] | null | null | null |
vcr/serializers/gzipserializer.py
|
mrgrassho/vcrpy-custom
|
695076ef53f4989dab023ae35215a033c2182fd2
|
[
"MIT"
] | null | null | null |
vcr/serializers/gzipserializer.py
|
mrgrassho/vcrpy-custom
|
695076ef53f4989dab023ae35215a033c2182fd2
|
[
"MIT"
] | null | null | null |
import yaml
# Use the libYAML versions if possible
try:
from yaml import CLoader as Loader, CDumper as Dumper
import zlib
from base64 import b64encode, b64decode
except ImportError:
from yaml import Loader, Dumper
def deserialize(b64encoded_cassete):
compressed_cassete = b64decode(b64encoded_cassete.encode())
cassette_string = zlib.decompress(compressed_cassete).decode()
return yaml.load(cassette_string, Loader=Loader)
def serialize(cassette_dict):
cassette_string = yaml.dump(cassette_dict, Dumper=Dumper)
compressed_cassete = zlib.compress(cassette_string.encode())
return b64encode(compressed_cassete).decode()
| 31.52381
| 66
| 0.777946
|
96be88e3ec643dbd19e58fcf4364bce0fafb6c5b
| 7,285
|
py
|
Python
|
scipy/integrate/odepack.py
|
FRidh/scipy
|
dabfb4586e0b656b5f6da8b301643b918259e61f
|
[
"BSD-3-Clause"
] | 39
|
2016-11-08T11:24:30.000Z
|
2021-11-18T06:50:21.000Z
|
scipy/integrate/odepack.py
|
FRidh/scipy
|
dabfb4586e0b656b5f6da8b301643b918259e61f
|
[
"BSD-3-Clause"
] | 1
|
2015-09-30T05:26:54.000Z
|
2016-03-22T15:09:56.000Z
|
scipy/integrate/odepack.py
|
FRidh/scipy
|
dabfb4586e0b656b5f6da8b301643b918259e61f
|
[
"BSD-3-Clause"
] | 13
|
2017-04-08T08:03:12.000Z
|
2021-08-25T08:38:52.000Z
|
# Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0):
"""
Integrate a system of ordinary differential equations.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y,t0,...)
where y can be a vector.
Parameters
----------
func : callable(y, t0, ...)
Computes the derivative of y at t0.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t0, ...)
Gradient (Jacobian) of `func`.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step.
'tcur' vector with the value of t reached for each time step.
(will always be at least as large as the input times).
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected.
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step.
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise.
'lenrw' the length of the double work array required.
'leniw' the length of integer work array required.
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g. singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
ode : a more object-oriented integrator based on VODE.
quad : for finding the area under a curve.
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords)
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| 43.106509
| 102
| 0.613452
|
19ba8983dba3b7513a0bfecfd4859025c0cc4087
| 8,010
|
py
|
Python
|
src/api/dcps/python/test/test_instance_operations.py
|
brezillon/opensplice
|
725ae9d949c83fce1746bd7d8a154b9d0a81fe3e
|
[
"Apache-2.0"
] | 133
|
2017-11-09T02:10:00.000Z
|
2022-03-29T09:45:10.000Z
|
src/api/dcps/python/test/test_instance_operations.py
|
brezillon/opensplice
|
725ae9d949c83fce1746bd7d8a154b9d0a81fe3e
|
[
"Apache-2.0"
] | 131
|
2017-11-07T14:48:43.000Z
|
2022-03-13T15:30:47.000Z
|
src/api/dcps/python/test/test_instance_operations.py
|
brezillon/opensplice
|
725ae9d949c83fce1746bd7d8a154b9d0a81fe3e
|
[
"Apache-2.0"
] | 94
|
2017-11-09T02:26:19.000Z
|
2022-02-24T06:38:25.000Z
|
#
# Vortex OpenSplice
#
# This software and documentation are Copyright 2006 to TO_YEAR ADLINK
# Technology Limited, its affiliated companies and licensors. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Created on Jan 5, 2018
@author: prismtech
'''
import unittest
import os
import dds
import ddsutil
class TestInstanceOperations(unittest.TestCase):
idl_path = os.path.join('idl', 'Shapes.idl')
shape_type_name = 'ShapeType'
def test_registration(self):
dp = dds.DomainParticipant()
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
t = gci.register_topic(dp, 'ST_test_registration')
wr = dp.create_datawriter(t)
#data = ShapeType(color='RED')
data = ShapeType(color='RED',x=0,y=0,z=0,t=Inner(foo=0))
h = wr.register_instance(data)
self.assertIsNotNone(h, 'handle is None')
self.assertIsInstance(h, int, 'handle is not int')
wr.unregister_instance(data, h)
def test_unregistration(self):
dp = dds.DomainParticipant()
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
t = gci.register_topic(dp, 'ST_test_unregistration')
wr = dp.create_datawriter(t)
#data = ShapeType(color='RED')
data = ShapeType(color='RED',x=0,y=0,z=0,t=Inner(foo=0))
data2 = ShapeType(color='BLUE',x=0,y=0,z=0,t=Inner(foo=0))
data3 = ShapeType(color='YELLOW',x=0,y=0,z=0,t=Inner(foo=0))
data4 = ShapeType(color='PINK',x=0,y=0,z=0,t=Inner(foo=0))
dataNever = ShapeType(color='NEVER',x=0,y=0,z=0,t=Inner(foo=0))
h = wr.register_instance(data)
h2 = wr.register_instance(data2)
h3 = wr.register_instance(data3)
h4 = wr.register_instance(data4)
self.assertIsNotNone(h, 'handle is None')
self.assertIsInstance(h, int, 'handle is not int')
# test expected success paths
wr.unregister_instance(handle=h)
wr.unregister_instance(data=data2)
# test failure paths
try:
wr.unregister_instance()
self.fail('should not succeed; unregistering inconsistent data')
except dds.DDSException:
pass
# unregister something that's already unregistered
try:
wr.unregister_instance(handle=h)
self.fail('should not succeed; duplicate unregistration')
except dds.DDSException:
pass
# unregister something that was never registered
try:
wr.unregister_instance(data=dataNever)
self.fail('should not succeed; instance never registered')
except dds.DDSException:
pass
# The following are not failures, but will produce oslp-error.log entries
# unregister something where data does not match
wr.unregister_instance(dataNever, h4)
def test_wr_instance_lookup(self):
dp = dds.DomainParticipant()
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
t = gci.register_topic(dp, 'ST_test_wr_instance_lookup')
wr = dp.create_datawriter(t)
#data = ShapeType(color='RED')
data = ShapeType(color='RED',x=0,y=0,z=0,t=Inner(foo=0))
dataUnreg = ShapeType(color='GREEN',x=0,y=0,z=0,t=Inner(foo=0))
h = wr.register_instance(data)
hlookup = wr.lookup_instance(data)
self.assertEqual(hlookup, h)
hUnreg = wr.lookup_instance(dataUnreg)
self.assertIsNone(hUnreg)
# def test_wr_get_key(self):
# dp = dds.DomainParticipant()
#
# gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
# ShapeType = gci.get_class('ShapeType')
# Inner = gci.get_class('Inner')
#
# t = gci.register_topic(dp, 'ST_test_wr_get_key')
#
# wr = dp.create_datawriter(t)
# #data = ShapeType(color='RED')
# data = ShapeType(color='RED',x=1,y=2,z=3,t=Inner(foo=4))
# dataUnreg = ShapeType(color='GREEN',x=0,y=0,z=0,t=Inner(foo=0))
#
# h = wr.register_instance(data)
#
# dataLookup = wr.get_key(h)
# self.assertEqual(dataLookup.color, data.color)
# self.assertEqual(dataLookup.x, 0)
# self.assertEqual(dataLookup.y, 0)
# self.assertEqual(dataLookup.z, 0)
# self.assertEqual(dataLookup.t.foo, 0)
#
# try:
# wr.get_key(11231997)
# self.fail('Expected exception on invalid handle')
# except dds.DDSException:
# pass
def test_take_instance(self):
dp = dds.DomainParticipant()
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
t = gci.register_topic(dp, 'ST_test_take_instance')
rd = dp.create_datareader(t)
wr = dp.create_datawriter(t)
dataR = ShapeType(color='RED',x=1,y=1,z=1,t=Inner(foo=1))
dataG = ShapeType(color='GREEN',x=1,y=1,z=1,t=Inner(foo=1))
keyR = ShapeType(color='RED',x=0,y=0,z=0,t=Inner(foo=0))
hR = rd.lookup_instance(keyR)
self.assertIsNone(hR)
wr.write(dataR)
wr.write(dataG)
keyR = ShapeType(color='RED',x=0,y=0,z=0,t=Inner(foo=0))
hR = rd.lookup_instance(keyR)
self.assertIsInstance(hR, int)
data = rd.take_instance(hR,n=2)
self.assertEqual(1, len(data))
found = [d.color for d, _ in data]
self.assertIn('RED',found)
#do it again
data = rd.take_instance(hR,n=2)
self.assertEqual(0, len(data))
def test_read_instance(self):
dp = dds.DomainParticipant()
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
t = gci.register_topic(dp, 'ST_test_read_instance')
rd = dp.create_datareader(t)
wr = dp.create_datawriter(t)
dataR = ShapeType(color='RED',x=1,y=1,z=1,t=Inner(foo=1))
dataG = ShapeType(color='GREEN',x=1,y=1,z=1,t=Inner(foo=1))
keyR = ShapeType(color='RED',x=0,y=0,z=0,t=Inner(foo=0))
hR = rd.lookup_instance(keyR)
self.assertIsNone(hR)
wr.write(dataR)
wr.write(dataG)
hR = rd.lookup_instance(keyR)
self.assertIsInstance(hR, int)
data = rd.read_instance(hR,n=2)
self.assertEqual(1, len(data))
found = [d.color for d, _ in data]
self.assertIn('RED',found)
#do it again
data = rd.read_instance(hR,n=2)
self.assertEqual(1, len(data))
found = [d.color for d, _ in data]
self.assertIn('RED',found)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'TestInstanceOperations.test_register_instance']
unittest.main()
| 34.085106
| 85
| 0.607116
|
551bacb09ffe983de8bc4b0940f7459b262f390e
| 2,114
|
py
|
Python
|
swerve/core/qml.py
|
Swerved/Hulkster
|
a7b5fd9cefe17032d3a738247cb2633d3ecddb31
|
[
"blessing"
] | null | null | null |
swerve/core/qml.py
|
Swerved/Hulkster
|
a7b5fd9cefe17032d3a738247cb2633d3ecddb31
|
[
"blessing"
] | null | null | null |
swerve/core/qml.py
|
Swerved/Hulkster
|
a7b5fd9cefe17032d3a738247cb2633d3ecddb31
|
[
"blessing"
] | null | null | null |
"""
Property Bindings for QML
https://stackoverflow.com/questions/62378735/how-to-acheive-simpliefed-two-way-data-binding-in-python-and-qt-quick-qml-applic
"""
from PySide2 import QtCore
class PropertyMeta(type(QtCore.QObject)):
def __new__(cls, name, bases, attrs):
for key in list(attrs.keys()):
attr = attrs[key]
if not isinstance(attr, Property):
continue
initial_value = attr.initial_value
type_ = type(initial_value)
notifier = QtCore.Signal(type_)
attrs[key] = PropertyImpl(
initial_value, name=key, type_=type_, notify=notifier
)
attrs[signal_attribute_name(key)] = notifier
return super().__new__(cls, name, bases, attrs)
class Property:
""" Property definition.
This property will be patched by the PropertyMeta metaclass into a PropertyImpl type.
"""
def __init__(self, initial_value, name=""):
self.initial_value = initial_value
self.name = name
class PropertyImpl(QtCore.Property):
""" Actual property implementation using a signal to notify any change. """
def __init__(self, initial_value, name="", type_=None, notify=None):
super().__init__(type_, self.getter, self.setter, notify=notify)
self.initial_value = initial_value
self.name = name
def getter(self, inst):
return getattr(inst, value_attribute_name(self.name), self.initial_value)
def setter(self, inst, value):
last_value = getattr(inst, self.name)
if last_value != value:
setattr(inst, value_attribute_name(self.name), value)
notifier_signal = getattr(inst, signal_attribute_name(self.name))
notifier_signal.emit(value)
def signal_attribute_name(property_name):
""" Return a magic key for the attribute storing the signal name. """
return f"_{property_name}_prop_signal_"
def value_attribute_name(property_name):
""" Return a magic key for the attribute storing the property value. """
return f"_{property_name}_prop_value_"
| 33.03125
| 125
| 0.667928
|
29bb5ecdbe6fd583da3aece9cb12e417a905d199
| 23,839
|
py
|
Python
|
python/tvm/micro/contrib/zephyr.py
|
YSHsieh7777/tvm
|
b51973fb48deb34ff725bf1206f1b683f8bc2773
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 3
|
2019-06-18T12:53:48.000Z
|
2021-11-17T10:56:51.000Z
|
python/tvm/micro/contrib/zephyr.py
|
YSHsieh7777/tvm
|
b51973fb48deb34ff725bf1206f1b683f8bc2773
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
python/tvm/micro/contrib/zephyr.py
|
YSHsieh7777/tvm
|
b51973fb48deb34ff725bf1206f1b683f8bc2773
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1
|
2021-02-06T01:56:20.000Z
|
2021-02-06T01:56:20.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a compiler integration that uses an externally-supplied Zephyr project."""
import collections
import logging
import multiprocessing
import os
import re
import tempfile
import textwrap
import shlex
import shutil
import subprocess
import sys
import yaml
import tvm.micro
from . import base
from .. import compiler
from .. import debugger
from ..transport import debug
from ..transport import file_descriptor
from ..transport import serial
from ..transport import Transport, TransportClosedError, TransportTimeouts
from ..transport import wakeup
_LOG = logging.getLogger(__name__)
class SubprocessEnv(object):
def __init__(self, default_overrides):
self.default_overrides = default_overrides
def run(self, cmd, **kw):
env = dict(os.environ)
for k, v in self.default_overrides.items():
env[k] = v
return subprocess.check_output(cmd, env=env, **kw, universal_newlines=True)
class ProjectNotFoundError(Exception):
"""Raised when the project_dir supplied to ZephyrCompiler does not exist."""
class FlashRunnerNotSupported(Exception):
"""Raised when the FLASH_RUNNER for a project isn't supported by this Zephyr adapter."""
class ZephyrCompiler(tvm.micro.Compiler):
"""A Compiler instance that builds against a pre-existing zephyr project."""
def __init__(
self,
project_dir=None,
board=None,
west_cmd=None,
zephyr_base=None,
zephyr_toolchain_variant=None,
env_vars=None,
):
"""Configure the compiler for use.
Parameters
----------
project_dir : str
Path to the pre-existing Zephyr project.
board : str
Name of the Zephyr board to build for (i.e. passed to `west build -b`)
west_cmd : Optional[list]
If given, argv that invoke the west build tool. Used only for flashing.
zephyr_base : Optional[str]
If given, path to Zephyr, as would normally be present in the ZEPHYR_BASE environment
variable. If not given, consults this environment variable. This value must be set in
one of those two places.
zephyr_toolchain_variant: Optional[str]
If given, overrides the toolchain used by Zephyr. If not given, uses the default
zephyr toolchain. When running on OS X outside of docker, you need to specify this.
env_vars : Optional[Dict[str,str]]
If given, additional environment variables present when invoking west, cmake, or make.
"""
self._project_dir = project_dir
if not os.path.exists(project_dir):
# Raise this error instead of a potentially-more-cryptic compiler error due to a missing
# prj.conf.
raise ProjectNotFoundError(
f"project_dir supplied to ZephyrCompiler does not exist: {project_dir}"
)
self._board = board
if west_cmd is None:
self._west_cmd = [sys.executable, "-mwest.app.main"]
elif isinstance(west_cmd, str):
self._west_cmd = [west_cmd]
elif isinstance(west_cmd, list):
self._west_cmd = west_cmd
else:
raise TypeError("west_cmd: expected string, list, or None; got %r" % (west_cmd,))
env = {}
if zephyr_toolchain_variant is not None:
env["ZEPHYR_TOOLCHAIN_VARIANT"] = zephyr_toolchain_variant
self._zephyr_base = zephyr_base or os.environ["ZEPHYR_BASE"]
assert (
self._zephyr_base is not None
), f"Must specify zephyr_base=, or ZEPHYR_BASE must be in environment variables"
env["ZEPHYR_BASE"] = self._zephyr_base
if env_vars:
env.update(env_vars)
self._subprocess_env = SubprocessEnv(env)
OPT_KEY_TO_CMAKE_DEFINE = {
"cflags": "CFLAGS",
"ccflags": "CXXFLAGS",
"ldflags": "LDFLAGS",
}
@classmethod
def _options_to_cmake_args(cls, options):
args = []
for key, define in cls.OPT_KEY_TO_CMAKE_DEFINE.items():
if key in options:
quoted_opts = [shlex.quote(o).replace(";", "\\;") for o in options[key]]
args.append(f'-DEXTRA_{define}={" ".join(quoted_opts)}')
if "cmake_args" in options:
args.extend(options["cmake_args"])
return args
def library(self, output, sources, options=None):
project_name = os.path.basename(output)
if project_name.startswith("lib"):
project_name = project_name[3:]
lib_prj_conf = os.path.join(output, "prj.conf")
if self._project_dir is not None:
project_dir_conf = os.path.join(self._project_dir, "prj.conf")
if os.path.exists(project_dir_conf):
shutil.copy(project_dir_conf, lib_prj_conf)
else:
with open(lib_prj_conf, "w") as prj_conf_f:
prj_conf_f.write("CONFIG_CPLUSPLUS=y\n")
cmakelists_path = os.path.join(output, "CMakeLists.txt")
with open(cmakelists_path, "w") as cmake_f:
sources = " ".join(f'"{o}"' for o in sources)
cmake_f.write(
textwrap.dedent(
f"""\
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr HINTS $ENV{{ZEPHYR_BASE}})
project({project_name}_prj)
target_sources(app PRIVATE)
zephyr_library_named({project_name})
target_sources({project_name} PRIVATE {sources})
target_sources(app PRIVATE main.c)
target_link_libraries(app PUBLIC {project_name})
"""
)
)
if "include_dirs" in options:
cmake_f.write(
f"target_include_directories({project_name} PRIVATE "
f'{" ".join(os.path.abspath(d) for d in options["include_dirs"])})\n'
)
with open(os.path.join(output, "main.c"), "w"):
pass
# expected not to exist after populate_tvm_libs
build_dir = os.path.join(output, "__tvm_build")
os.mkdir(build_dir)
self._subprocess_env.run(
["cmake", "..", f"-DBOARD={self._board}"] + self._options_to_cmake_args(options),
cwd=build_dir,
)
num_cpus = multiprocessing.cpu_count()
self._subprocess_env.run(
["make", f"-j{num_cpus}", "VERBOSE=1", project_name], cwd=build_dir
)
return tvm.micro.MicroLibrary(build_dir, [f"lib{project_name}.a"])
def _print_make_statistics(self, output):
output = output.splitlines()
lines = iter(output)
for line in lines:
if line.startswith("Memory region"):
# print statistics header
_LOG.info(line)
_LOG.info("--------------------- ---------- ------------ ---------")
line = next(lines)
# while there is a region print it
try:
while ":" in line:
_LOG.info(line)
line = next(lines)
else:
break
except StopIteration:
pass
def binary(self, output, objects, options=None, link_main=True, main_options=None):
assert link_main, "Must pass link_main=True"
assert self._project_dir is not None, "Must supply project_dir= to build binaries"
copied_libs = base.populate_tvm_objs(self._project_dir, objects)
# expected not to exist after populate_tvm_objs
cmake_args = [
"cmake",
os.path.abspath(self._project_dir),
f"-DBOARD={self._board}",
] + self._options_to_cmake_args(options)
if "include_dirs" in options:
cmake_args.append(
"-DTVM_INCLUDE_DIRS="
f'{";".join(os.path.abspath(d) for d in options["include_dirs"])}'
)
cmake_args.append(f'-DTVM_LIBS={";".join(copied_libs)}')
self._subprocess_env.run(cmake_args, cwd=output)
make_output = self._subprocess_env.run(["make"], cwd=output)
self._print_make_statistics(make_output)
return tvm.micro.MicroBinary(
output,
binary_file=os.path.join("zephyr", "zephyr.elf"),
debug_files=[os.path.join("zephyr", "zephyr.elf")],
labelled_files={
"cmake_cache": ["CMakeCache.txt"],
"device_tree": [os.path.join("zephyr", "zephyr.dts")],
},
immobile="qemu" in self._board,
)
@property
def flasher_factory(self):
return compiler.FlasherFactory(
ZephyrFlasher,
(self._board,),
dict(
zephyr_base=self._zephyr_base,
project_dir=self._project_dir,
subprocess_env=self._subprocess_env.default_overrides,
west_cmd=self._west_cmd,
),
)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
def read_cmake_cache(file_name):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(file_name, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
class ZephyrFlasher(tvm.micro.compiler.Flasher):
"""A Flasher implementation that delegates to Zephyr/west."""
def __init__(
self,
board,
zephyr_base=None,
project_dir=None,
subprocess_env=None,
nrfjprog_snr=None,
openocd_serial=None,
flash_args=None,
debug_rpc_session=None,
serial_timeouts=None,
west_cmd=None,
):
zephyr_base = zephyr_base or os.environ["ZEPHYR_BASE"]
sys.path.insert(0, os.path.join(zephyr_base, "scripts", "dts"))
try:
import dtlib # pylint: disable=import-outside-toplevel
self._dtlib = dtlib
finally:
sys.path.pop(0)
self._board = board
self._zephyr_base = zephyr_base
self._project_dir = project_dir
self._west_cmd = west_cmd
self._flash_args = flash_args
self._openocd_serial = openocd_serial
self._autodetected_openocd_serial = None
self._subprocess_env = SubprocessEnv(subprocess_env)
self._debug_rpc_session = debug_rpc_session
self._nrfjprog_snr = nrfjprog_snr
self._serial_timeouts = serial_timeouts
def _get_nrf_device_args(self):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(
f'No attached boards recognized by {" ".join(nrfjprog_args)}'
)
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if self._nrfjprog_snr is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: "
f'{", ".join(boards)}'
)
if str(self._nrfjprog_snr) not in boards:
raise BoardError(
f"nrfjprog_snr ({self._nrfjprog_snr}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", str(self._nrfjprog_snr)]
if not boards:
return []
return ["--snr", boards[0]]
# kwargs passed to usb.core.find to find attached boards for the openocd flash runner.
BOARD_USB_FIND_KW = {
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
}
def openocd_serial(self, cmake_entries):
"""Find the serial port to use for a board with OpenOCD flash strategy."""
if self._openocd_serial is not None:
return self._openocd_serial
if self._autodetected_openocd_serial is None:
import usb # pylint: disable=import-outside-toplevel
find_kw = self.BOARD_USB_FIND_KW[cmake_entries["BOARD"]]
boards = usb.core.find(find_all=True, **find_kw)
serials = []
for b in boards:
serials.append(b.serial_number)
if len(serials) == 0:
raise BoardAutodetectFailed(f"No attached USB devices matching: {find_kw!r}")
serials.sort()
self._autodetected_openocd_serial = serials[0]
_LOG.debug("zephyr openocd driver: autodetected serial %s", serials[0])
return self._autodetected_openocd_serial
def _get_openocd_device_args(self, cmake_entries):
return ["--serial", self.openocd_serial(cmake_entries)]
@classmethod
def _get_flash_runner(cls, cmake_entries):
flash_runner = cmake_entries.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(cmake_entries["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(self, cmake_entries):
flash_runner = self._get_flash_runner(cmake_entries)
if flash_runner == "nrfjprog":
return self._get_nrf_device_args()
if flash_runner == "openocd":
return self._get_openocd_device_args(cmake_entries)
raise BoardError(
f"Don't know how to find serial terminal for board {cmake_entries['BOARD']} with flash "
f"runner {flash_runner}"
)
def flash(self, micro_binary):
cmake_entries = read_cmake_cache(
micro_binary.abspath(micro_binary.labelled_files["cmake_cache"][0])
)
if "qemu" in cmake_entries["BOARD"]:
return ZephyrQemuTransport(micro_binary.base_dir, startup_timeout_sec=30.0)
build_dir = os.path.dirname(
micro_binary.abspath(micro_binary.labelled_files["cmake_cache"][0])
)
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if (
self._board.startswith("nrf5340dk")
and self._get_flash_runner(cmake_entries) == "nrfjprog"
):
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(self._get_nrf_device_args())
self._subprocess_env.run(recover_args, cwd=build_dir)
west_args = (
self._west_cmd
+ ["flash", "--build-dir", build_dir, "--skip-rebuild"]
+ self._get_device_args(cmake_entries)
)
if self._flash_args is not None:
west_args.extend(self._flash_args)
self._subprocess_env.run(west_args, cwd=build_dir)
return self.transport(micro_binary)
def _find_nrf_serial_port(self, cmake_entries):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + self._get_device_args(cmake_entries), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
return {"port_path": ports_by_vcom["VCOM2"]}
def _find_openocd_serial_port(self, cmake_entries):
return {"grep": self.openocd_serial(cmake_entries)}
def _find_serial_port(self, micro_binary):
cmake_entries = read_cmake_cache(
micro_binary.abspath(micro_binary.labelled_files["cmake_cache"][0])
)
flash_runner = self._get_flash_runner(cmake_entries)
if flash_runner == "nrfjprog":
return self._find_nrf_serial_port(cmake_entries)
if flash_runner == "openocd":
return self._find_openocd_serial_port(cmake_entries)
raise FlashRunnerNotSupported(
f"Don't know how to deduce serial port for flash runner {flash_runner}"
)
def transport(self, micro_binary):
"""Instantiate the transport for use with non-QEMU Zephyr."""
dt_inst = self._dtlib.DT(
micro_binary.abspath(micro_binary.labelled_files["device_tree"][0])
)
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
port_kwargs = self._find_serial_port(micro_binary)
serial_transport = serial.SerialTransport(
timeouts=self._serial_timeouts, baudrate=uart_baud, **port_kwargs
)
if self._debug_rpc_session is None:
return serial_transport
return debug.DebugWrapperTransport(
debugger.RpcDebugger(
self._debug_rpc_session,
debugger.DebuggerFactory(
ZephyrDebugger,
(
" ".join(shlex.quote(x) for x in self._west_cmd),
os.path.dirname(micro_binary.abspath(micro_binary.label("cmake_cache")[0])),
micro_binary.abspath(micro_binary.debug_files[0]),
self._zephyr_base,
),
{},
),
),
serial_transport,
)
class QemuStartupFailureError(Exception):
"""Raised when the qemu pipe is not present within startup_timeout_sec."""
class QemuFdTransport(file_descriptor.FdTransport):
"""An FdTransport subclass that escapes written data to accomodate the QEMU monitor.
It's supposedly possible to disable the monitor, but Zephyr controls most of the command-line
arguments for QEMU and there are too many options which implictly enable the monitor, so this
approach seems more robust.
"""
def write_monitor_quit(self):
file_descriptor.FdTransport.write(self, b"\x01x", 1.0)
def close(self):
file_descriptor.FdTransport.close(self)
def timeouts(self):
assert False, "should not get here"
def write(self, data, timeout_sec):
"""Write data, escaping for QEMU monitor."""
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
num_written = file_descriptor.FdTransport.write(self, to_write, timeout_sec)
num_written -= sum(1 if x < num_written else 0 for x in escape_pos)
return num_written
class ZephyrQemuTransport(Transport):
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, base_dir, startup_timeout_sec=5.0, **kwargs):
self.base_dir = base_dir
self.startup_timeout_sec = startup_timeout_sec
self.kwargs = kwargs
self.proc = None
self.fd_transport = None
self.pipe_dir = None
def timeouts(self):
return TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=self.startup_timeout_sec,
session_established_timeout_sec=5.0,
)
def open(self):
self.pipe_dir = tempfile.mkdtemp()
self.pipe = os.path.join(self.pipe_dir, "fifo")
self.write_pipe = os.path.join(self.pipe_dir, "fifo.in")
self.read_pipe = os.path.join(self.pipe_dir, "fifo.out")
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=self.base_dir,
**self.kwargs,
)
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.fd_transport = wakeup.WakeupTransport(
QemuFdTransport(
os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK),
os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK),
self.timeouts(),
),
b"\xfe\xff\xfd\x03\0\0\0\0\0\x02" b"fw",
)
self.fd_transport.open()
def close(self):
if self.fd_transport is not None:
self.fd_transport.child_transport.write_monitor_quit()
self.proc.wait()
self.fd_transport.close()
self.fd_transport = None
if self.proc is not None:
self.proc = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
if self.fd_transport is None:
raise TransportClosedError()
return self.fd_transport.read(n, timeout_sec)
def write(self, data, timeout_sec):
if self.fd_transport is None:
raise TransportClosedError()
return self.fd_transport.write(data, timeout_sec)
class ZephyrDebugger(debugger.GdbDebugger):
"""A Zephyr debugger implementation."""
def __init__(self, west_cmd, build_dir, elf_path, zephyr_base):
super(ZephyrDebugger, self).__init__()
self._west_cmd = shlex.split(west_cmd)
self._build_dir = build_dir
self._elf_path = elf_path
self._zephyr_base = zephyr_base
def popen_kwargs(self):
env = dict(os.environ)
env["ZEPHYR_BASE"] = self._zephyr_base
return dict(
args=self._west_cmd
+ [
"debug",
"--skip-rebuild",
"--build-dir",
self._build_dir,
"--elf-file",
self._elf_path,
],
env=env,
)
| 35.84812
| 100
| 0.608163
|
e8dc330e0cd659f91bc0d09987ce9d2246f68c1b
| 25,508
|
py
|
Python
|
python/ray/tune/logger.py
|
yuanchi2807/ray
|
cf512254bb4bcd71ff1818dff5c868ab10c5f620
|
[
"Apache-2.0"
] | 1
|
2021-09-20T15:45:59.000Z
|
2021-09-20T15:45:59.000Z
|
python/ray/tune/logger.py
|
yuanchi2807/ray
|
cf512254bb4bcd71ff1818dff5c868ab10c5f620
|
[
"Apache-2.0"
] | 53
|
2021-10-06T20:08:04.000Z
|
2022-03-21T20:17:25.000Z
|
python/ray/tune/logger.py
|
yuanchi2807/ray
|
cf512254bb4bcd71ff1818dff5c868ab10c5f620
|
[
"Apache-2.0"
] | 1
|
2022-03-24T22:48:21.000Z
|
2022-03-24T22:48:21.000Z
|
import csv
import json
import logging
import numpy as np
import os
import yaml
from typing import Iterable, TYPE_CHECKING, Dict, List, Optional, TextIO, Type
import ray.cloudpickle as cloudpickle
from ray.tune.callback import Callback
from ray.tune.utils.util import SafeFallbackEncoder
from ray.util.debug import log_once
from ray.tune.result import (
TRAINING_ITERATION,
TIME_TOTAL_S,
TIMESTEPS_TOTAL,
EXPR_PARAM_FILE,
EXPR_PARAM_PICKLE_FILE,
EXPR_PROGRESS_FILE,
EXPR_RESULT_FILE,
)
from ray.tune.utils import flatten_dict
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.tune.trial import Trial # noqa: F401
logger = logging.getLogger(__name__)
tf = None
VALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32, np.int64]
class Logger:
"""Logging interface for ray.tune.
By default, the UnifiedLogger implementation is used which logs results in
multiple formats (TensorBoard, rllab/viskit, plain json, custom loggers)
at once.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
trial (Trial): Trial object for the logger to access.
"""
def __init__(self, config: Dict, logdir: str, trial: Optional["Trial"] = None):
self.config = config
self.logdir = logdir
self.trial = trial
self._init()
def _init(self):
pass
def on_result(self, result):
"""Given a result, appends it to the existing log."""
raise NotImplementedError
def update_config(self, config):
"""Updates the config for logger."""
pass
def close(self):
"""Releases all resources used by this logger."""
pass
def flush(self):
"""Flushes all disk writes to storage."""
pass
class NoopLogger(Logger):
def on_result(self, result):
pass
class JsonLogger(Logger):
"""Logs trial results in json format.
Also writes to a results file and param.json file when results or
configurations are updated. Experiments must be executed with the
JsonLogger to be compatible with the ExperimentAnalysis tool.
"""
def _init(self):
self.update_config(self.config)
local_file = os.path.join(self.logdir, EXPR_RESULT_FILE)
self.local_out = open(local_file, "a")
def on_result(self, result: Dict):
json.dump(result, self, cls=SafeFallbackEncoder)
self.write("\n")
self.local_out.flush()
def write(self, b):
self.local_out.write(b)
def flush(self):
if not self.local_out.closed:
self.local_out.flush()
def close(self):
self.local_out.close()
def update_config(self, config: Dict):
self.config = config
config_out = os.path.join(self.logdir, EXPR_PARAM_FILE)
with open(config_out, "w") as f:
json.dump(self.config, f, indent=2, sort_keys=True, cls=SafeFallbackEncoder)
config_pkl = os.path.join(self.logdir, EXPR_PARAM_PICKLE_FILE)
with open(config_pkl, "wb") as f:
cloudpickle.dump(self.config, f)
class CSVLogger(Logger):
"""Logs results to progress.csv under the trial directory.
Automatically flattens nested dicts in the result dict before writing
to csv:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def _init(self):
"""CSV outputted with Headers as first set of results."""
progress_file = os.path.join(self.logdir, EXPR_PROGRESS_FILE)
self._continuing = os.path.exists(progress_file)
self._file = open(progress_file, "a")
self._csv_out = None
def on_result(self, result: Dict):
tmp = result.copy()
if "config" in tmp:
del tmp["config"]
result = flatten_dict(tmp, delimiter="/")
if self._csv_out is None:
self._csv_out = csv.DictWriter(self._file, result.keys())
if not self._continuing:
self._csv_out.writeheader()
self._csv_out.writerow(
{k: v for k, v in result.items() if k in self._csv_out.fieldnames}
)
self._file.flush()
def flush(self):
if not self._file.closed:
self._file.flush()
def close(self):
self._file.close()
class TBXLogger(Logger):
"""TensorBoardX Logger.
Note that hparams will be written only after a trial has terminated.
This logger automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
VALID_HPARAMS = (str, bool, int, float, list, type(None))
VALID_NP_HPARAMS = (np.bool8, np.float32, np.float64, np.int32, np.int64)
def _init(self):
try:
from tensorboardX import SummaryWriter
except ImportError:
if log_once("tbx-install"):
logger.info('pip install "ray[tune]" to see TensorBoard files.')
raise
self._file_writer = SummaryWriter(self.logdir, flush_secs=30)
self.last_result = None
def on_result(self, result: Dict):
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
for k in ["config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
valid_result = {}
for attr, value in flat_result.items():
full_attr = "/".join(path + [attr])
if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not np.isnan(value):
valid_result[full_attr] = value
self._file_writer.add_scalar(full_attr, value, global_step=step)
elif (isinstance(value, list) and len(value) > 0) or (
isinstance(value, np.ndarray) and value.size > 0
):
valid_result[full_attr] = value
# Must be video
if isinstance(value, np.ndarray) and value.ndim == 5:
self._file_writer.add_video(
full_attr, value, global_step=step, fps=20
)
continue
try:
self._file_writer.add_histogram(full_attr, value, global_step=step)
# In case TensorboardX still doesn't think it's a valid value
# (e.g. `[[]]`), warn and move on.
except (ValueError, TypeError):
if log_once("invalid_tbx_value"):
logger.warning(
"You are trying to log an invalid value ({}={}) "
"via {}!".format(full_attr, value, type(self).__name__)
)
self.last_result = valid_result
self._file_writer.flush()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
if self.trial and self.trial.evaluated_params and self.last_result:
flat_result = flatten_dict(self.last_result, delimiter="/")
scrubbed_result = {
k: value
for k, value in flat_result.items()
if isinstance(value, tuple(VALID_SUMMARY_TYPES))
}
self._try_log_hparams(scrubbed_result)
self._file_writer.close()
def _try_log_hparams(self, result):
# TBX currently errors if the hparams value is None.
flat_params = flatten_dict(self.trial.evaluated_params)
scrubbed_params = {
k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)
}
np_params = {
k: v.tolist()
for k, v in flat_params.items()
if isinstance(v, self.VALID_NP_HPARAMS)
}
scrubbed_params.update(np_params)
removed = {
k: v
for k, v in flat_params.items()
if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)
}
if removed:
logger.info(
"Removed the following hyperparameter values when "
"logging to tensorboard: %s",
str(removed),
)
from tensorboardX.summary import hparams
try:
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result
)
self._file_writer.file_writer.add_summary(experiment_tag)
self._file_writer.file_writer.add_summary(session_start_tag)
self._file_writer.file_writer.add_summary(session_end_tag)
except Exception:
logger.exception(
"TensorboardX failed to log hparams. "
"This may be due to an unsupported type "
"in the hyperparameter values."
)
DEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger)
class UnifiedLogger(Logger):
"""Unified result logger for TensorBoard, rllab/viskit, plain json.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
loggers (list): List of logger creators. Defaults to CSV, Tensorboard,
and JSON loggers.
"""
def __init__(
self,
config: Dict,
logdir: str,
trial: Optional["Trial"] = None,
loggers: Optional[List[Type[Logger]]] = None,
):
if loggers is None:
self._logger_cls_list = DEFAULT_LOGGERS
else:
self._logger_cls_list = loggers
if JsonLogger not in self._logger_cls_list:
if log_once("JsonLogger"):
logger.warning(
"JsonLogger not provided. The ExperimentAnalysis tool is "
"disabled."
)
super(UnifiedLogger, self).__init__(config, logdir, trial)
def _init(self):
self._loggers = []
for cls in self._logger_cls_list:
try:
self._loggers.append(cls(self.config, self.logdir, self.trial))
except Exception as exc:
if log_once(f"instantiate:{cls.__name__}"):
logger.warning(
"Could not instantiate %s: %s.", cls.__name__, str(exc)
)
def on_result(self, result):
for _logger in self._loggers:
_logger.on_result(result)
def update_config(self, config):
for _logger in self._loggers:
_logger.update_config(config)
def close(self):
for _logger in self._loggers:
_logger.close()
def flush(self):
for _logger in self._loggers:
_logger.flush()
@PublicAPI
class LoggerCallback(Callback):
"""Base class for experiment-level logger callbacks
This base class defines a general interface for logging events,
like trial starts, restores, ends, checkpoint saves, and receiving
trial results.
Callbacks implementing this interface should make sure that logging
utilities are cleaned up properly on trial termination, i.e. when
``log_trial_end`` is received. This includes e.g. closing files.
"""
def log_trial_start(self, trial: "Trial"):
"""Handle logging when a trial starts.
Args:
trial (Trial): Trial object.
"""
pass
def log_trial_restore(self, trial: "Trial"):
"""Handle logging when a trial restores.
Args:
trial (Trial): Trial object.
"""
pass
def log_trial_save(self, trial: "Trial"):
"""Handle logging when a trial saves a checkpoint.
Args:
trial (Trial): Trial object.
"""
pass
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
"""Handle logging when a trial reports a result.
Args:
trial (Trial): Trial object.
result (dict): Result dictionary.
"""
pass
def log_trial_end(self, trial: "Trial", failed: bool = False):
"""Handle logging when a trial ends.
Args:
trial (Trial): Trial object.
failed (bool): True if the Trial finished gracefully, False if
it failed (e.g. when it raised an exception).
"""
pass
def on_trial_result(
self,
iteration: int,
trials: List["Trial"],
trial: "Trial",
result: Dict,
**info,
):
self.log_trial_result(iteration, trial, result)
def on_trial_start(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_start(trial)
def on_trial_restore(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_restore(trial)
def on_trial_save(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_save(trial)
def on_trial_complete(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_end(trial, failed=False)
def on_trial_error(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_end(trial, failed=True)
class LegacyLoggerCallback(LoggerCallback):
"""Supports logging to trial-specific `Logger` classes.
Previously, Ray Tune logging was handled via `Logger` classes that have
been instantiated per-trial. This callback is a fallback to these
`Logger`-classes, instantiating each `Logger` class for each trial
and logging to them.
Args:
logger_classes (Iterable[Type[Logger]]): Logger classes that should
be instantiated for each trial.
"""
def __init__(self, logger_classes: Iterable[Type[Logger]]):
self.logger_classes = list(logger_classes)
self._class_trial_loggers: Dict[Type[Logger], Dict["Trial", Logger]] = {}
def log_trial_start(self, trial: "Trial"):
trial.init_logdir()
for logger_class in self.logger_classes:
trial_loggers = self._class_trial_loggers.get(logger_class, {})
if trial not in trial_loggers:
logger = logger_class(trial.config, trial.logdir, trial)
trial_loggers[trial] = logger
self._class_trial_loggers[logger_class] = trial_loggers
def log_trial_restore(self, trial: "Trial"):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].flush()
def log_trial_save(self, trial: "Trial"):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].flush()
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].on_result(result)
def log_trial_end(self, trial: "Trial", failed: bool = False):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].close()
class JsonLoggerCallback(LoggerCallback):
"""Logs trial results in json format.
Also writes to a results file and param.json file when results or
configurations are updated. Experiments must be executed with the
JsonLoggerCallback to be compatible with the ExperimentAnalysis tool.
"""
def __init__(self):
self._trial_configs: Dict["Trial", Dict] = {}
self._trial_files: Dict["Trial", TextIO] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_files:
self._trial_files[trial].close()
# Update config
self.update_config(trial, trial.config)
# Make sure logdir exists
trial.init_logdir()
local_file = os.path.join(trial.logdir, EXPR_RESULT_FILE)
self._trial_files[trial] = open(local_file, "at")
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_files:
self.log_trial_start(trial)
json.dump(result, self._trial_files[trial], cls=SafeFallbackEncoder)
self._trial_files[trial].write("\n")
self._trial_files[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial not in self._trial_files:
return
self._trial_files[trial].close()
del self._trial_files[trial]
def update_config(self, trial: "Trial", config: Dict):
self._trial_configs[trial] = config
config_out = os.path.join(trial.logdir, EXPR_PARAM_FILE)
with open(config_out, "w") as f:
json.dump(
self._trial_configs[trial],
f,
indent=2,
sort_keys=True,
cls=SafeFallbackEncoder,
)
config_pkl = os.path.join(trial.logdir, EXPR_PARAM_PICKLE_FILE)
with open(config_pkl, "wb") as f:
cloudpickle.dump(self._trial_configs[trial], f)
class CSVLoggerCallback(LoggerCallback):
"""Logs results to progress.csv under the trial directory.
Automatically flattens nested dicts in the result dict before writing
to csv:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def __init__(self):
self._trial_continue: Dict["Trial", bool] = {}
self._trial_files: Dict["Trial", TextIO] = {}
self._trial_csv: Dict["Trial", csv.DictWriter] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_files:
self._trial_files[trial].close()
# Make sure logdir exists
trial.init_logdir()
local_file = os.path.join(trial.logdir, EXPR_PROGRESS_FILE)
self._trial_continue[trial] = os.path.exists(local_file)
self._trial_files[trial] = open(local_file, "at")
self._trial_csv[trial] = None
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_files:
self.log_trial_start(trial)
tmp = result.copy()
tmp.pop("config", None)
result = flatten_dict(tmp, delimiter="/")
if not self._trial_csv[trial]:
self._trial_csv[trial] = csv.DictWriter(
self._trial_files[trial], result.keys()
)
if not self._trial_continue[trial]:
self._trial_csv[trial].writeheader()
self._trial_csv[trial].writerow(
{k: v for k, v in result.items() if k in self._trial_csv[trial].fieldnames}
)
self._trial_files[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial not in self._trial_files:
return
del self._trial_csv[trial]
self._trial_files[trial].close()
del self._trial_files[trial]
class TBXLoggerCallback(LoggerCallback):
"""TensorBoardX Logger.
Note that hparams will be written only after a trial has terminated.
This logger automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
VALID_HPARAMS = (str, bool, int, float, list, type(None))
VALID_NP_HPARAMS = (np.bool8, np.float32, np.float64, np.int32, np.int64)
def __init__(self):
try:
from tensorboardX import SummaryWriter
self._summary_writer_cls = SummaryWriter
except ImportError:
if log_once("tbx-install"):
logger.info('pip install "ray[tune]" to see TensorBoard files.')
raise
self._trial_writer: Dict["Trial", SummaryWriter] = {}
self._trial_result: Dict["Trial", Dict] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_writer:
self._trial_writer[trial].close()
trial.init_logdir()
self._trial_writer[trial] = self._summary_writer_cls(
trial.logdir, flush_secs=30
)
self._trial_result[trial] = {}
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_writer:
self.log_trial_start(trial)
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
for k in ["config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
valid_result = {}
for attr, value in flat_result.items():
full_attr = "/".join(path + [attr])
if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not np.isnan(value):
valid_result[full_attr] = value
self._trial_writer[trial].add_scalar(full_attr, value, global_step=step)
elif (isinstance(value, list) and len(value) > 0) or (
isinstance(value, np.ndarray) and value.size > 0
):
valid_result[full_attr] = value
# Must be video
if isinstance(value, np.ndarray) and value.ndim == 5:
self._trial_writer[trial].add_video(
full_attr, value, global_step=step, fps=20
)
continue
try:
self._trial_writer[trial].add_histogram(
full_attr, value, global_step=step
)
# In case TensorboardX still doesn't think it's a valid value
# (e.g. `[[]]`), warn and move on.
except (ValueError, TypeError):
if log_once("invalid_tbx_value"):
logger.warning(
"You are trying to log an invalid value ({}={}) "
"via {}!".format(full_attr, value, type(self).__name__)
)
self._trial_result[trial] = valid_result
self._trial_writer[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial in self._trial_writer:
if trial and trial.evaluated_params and self._trial_result[trial]:
flat_result = flatten_dict(self._trial_result[trial], delimiter="/")
scrubbed_result = {
k: value
for k, value in flat_result.items()
if isinstance(value, tuple(VALID_SUMMARY_TYPES))
}
self._try_log_hparams(trial, scrubbed_result)
self._trial_writer[trial].close()
del self._trial_writer[trial]
del self._trial_result[trial]
def _try_log_hparams(self, trial: "Trial", result: Dict):
# TBX currently errors if the hparams value is None.
flat_params = flatten_dict(trial.evaluated_params)
scrubbed_params = {
k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)
}
np_params = {
k: v.tolist()
for k, v in flat_params.items()
if isinstance(v, self.VALID_NP_HPARAMS)
}
scrubbed_params.update(np_params)
removed = {
k: v
for k, v in flat_params.items()
if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)
}
if removed:
logger.info(
"Removed the following hyperparameter values when "
"logging to tensorboard: %s",
str(removed),
)
from tensorboardX.summary import hparams
try:
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result
)
self._trial_writer[trial].file_writer.add_summary(experiment_tag)
self._trial_writer[trial].file_writer.add_summary(session_start_tag)
self._trial_writer[trial].file_writer.add_summary(session_end_tag)
except Exception:
logger.exception(
"TensorboardX failed to log hparams. "
"This may be due to an unsupported type "
"in the hyperparameter values."
)
# Maintain backwards compatibility.
from ray.tune.integration.mlflow import ( # noqa: E402
MLflowLogger as _MLflowLogger,
)
MLflowLogger = _MLflowLogger
# The capital L is a typo, but needs to remain for backwards compatibility.
MLFLowLogger = _MLflowLogger
def pretty_print(result):
result = result.copy()
result.update(config=None) # drop config from pretty print
result.update(hist_stats=None) # drop hist_stats from pretty print
out = {}
for k, v in result.items():
if v is not None:
out[k] = v
cleaned = json.dumps(out, cls=SafeFallbackEncoder)
return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)
| 33.607378
| 88
| 0.601968
|
7e6ae4c8def60153196a4e2bedc03f3c8c8584fc
| 6,656
|
py
|
Python
|
qa/rpc-tests/setmaxconnections.py
|
steve-cw-chung/dogecoin
|
35914ee5c76e41777307366f579eecbbad9b35de
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/setmaxconnections.py
|
steve-cw-chung/dogecoin
|
35914ee5c76e41777307366f579eecbbad9b35de
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/setmaxconnections.py
|
steve-cw-chung/dogecoin
|
35914ee5c76e41777307366f579eecbbad9b35de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2021 The Dogecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise setmaxconnections RPC command
#
# from the constants MAX_ADDNODE_CONNECTIONS and PROTECTED_INBOUND_PEERS in src/net.h
MAX_ADDNODE_CONNECTIONS = 8
PROTECTED_INBOUND_PEERS = 4 + 8 + 4 + 4
MINIMUM_CONNECTIONS = MAX_ADDNODE_CONNECTIONS
MINIMUM_CONNECTIONS_INTERNAL = MAX_ADDNODE_CONNECTIONS + PROTECTED_INBOUND_PEERS
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.peer_disconnected = False
self.ping_counter = 0
def add_connection(self, conn):
self.connection = conn
def close(self):
self.connection.handle_close()
self.peer_disconnected = True
class SetMaxConnectionCountTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.nodes = []
self.client_nodes = []
self.num_nodes = 1
def create_testnode(self, node_idx=0):
node = TestNode()
conn = NodeConn('127.0.0.1', p2p_port(node_idx), self.nodes[node_idx], node)
node.add_connection(conn)
return node
def setup_network(self):
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-nmaxconnections=0"]))
self.is_network_split = False
NetworkThread().start()
def wait_for_verack(self, nodes, timeout_seconds=5):
def nodes_veracked():
for node in nodes:
if not node.verack_received:
return False
return True
wait_until(nodes_veracked, timeout=timeout_seconds)
def connect_nodes(self, nodes, count):
for i in range(0, count):
nodes.append(self.create_testnode())
self.wait_for_verack(nodes)
def run_test(self):
self.test_rpc_argument_validation()
# these numbers must meet or exceed PROTECTED_INBOUND_CONNECTIONS
# otherwise there aren't enough nodes to disconnect
self.test_node_connection_changes(20)
self.test_node_connection_changes(30)
# max_count has to be at least 20
# min_count can be closer to 20
self.test_node_disconnections(40, 20)
def test_rpc_argument_validation(self):
first_node = self.nodes[0]
try:
first_node.setmaxconnections()
raise AssertionError("Must check for no parameter provided")
except JSONRPCException as e:
assert("1. maxconnectioncount" in e.error['message'])
try:
first_node.setmaxconnections("good doge bad doge")
raise AssertionError("Must check for no numeric parameter provided")
except JSONRPCException as e:
assert("JSON value is not an integer as expected" in e.error['message'])
try:
first_node.setmaxconnections(-1)
raise AssertionError(f"Must check for parameter value >= {MINIMUM_CONNECTIONS}")
except JSONRPCException as e:
assert(f"maxconnectioncount must be >= {MINIMUM_CONNECTIONS}" in e.error['message'])
try:
first_node.setmaxconnections(7)
raise AssertionError(f"Must check for parameter value >= {MINIMUM_CONNECTIONS}")
except JSONRPCException as e:
assert(f"maxconnectioncount must be >= {MINIMUM_CONNECTIONS}" in e.error['message'])
try:
first_node.setmaxconnections(MINIMUM_CONNECTIONS)
assert(True)
except JSONRPCException as e:
raise AssertionError(f"Must allow parameter value >= {MINIMUM_CONNECTIONS}")
def wait_for_n_disconnections(self, nodes, count, timeout):
def disconnected():
closed_conns = [node.peer_disconnected for node in nodes]
print(f'Len {len(closed_conns)}, waiting for {count} of {len(nodes)} => {len(closed_conns) >= count}')
return len(closed_conns) >= count
return wait_until(disconnected, timeout=timeout)
def test_node_connection_changes(self, extras):
first_node = self.nodes[0]
# MINIMUM_CONNECTIONS outgoing connections plus 1 feeler
max_connections = 1 + MINIMUM_CONNECTIONS + extras
first_node.setmaxconnections(max_connections)
client_nodes = []
self.connect_nodes(client_nodes, extras)
x = first_node.getconnectioncount()
assert(x <= extras)
# attempt to add more nodes
self.connect_nodes(client_nodes, 3)
# the new nodes should not increase the connection count
x = first_node.getconnectioncount()
assert(x <= extras)
first_node.setmaxconnections(MINIMUM_CONNECTIONS)
disconnectable_connections=max_connections - MINIMUM_CONNECTIONS_INTERNAL
assert(self.wait_for_n_disconnections(client_nodes, count=disconnectable_connections, timeout=30))
# disconnect to clean up for the next test
for node in client_nodes:
node.close()
def test_node_disconnections(self, max_count, min_count):
first_node = self.nodes[0]
attempted_nodes = []
# MINIMUM_CONNECTIONS outgoing connections plus 1 feeler
# plus 20 connections protected from eviction
first_node.setmaxconnections(20 + 1 + MINIMUM_CONNECTIONS + max_count)
client_nodes = []
self.connect_nodes(client_nodes, max_count)
x = first_node.getconnectioncount()
assert(x <= max_count)
first_node.setmaxconnections(min_count)
def nodes_disconnected():
disc_count = 0
for node in attempted_nodes:
if node.peer_disconnected:
disc_count += 1
else:
node.sync_with_ping(0.1)
if disc_count < max_count - min_count:
return False
return True
wait_until(nodes_disconnected, timeout=30)
# try asserting this two ways, for debugging the test
x = first_node.getconnectioncount()
assert(x < max_count)
actual_min = max(min_count, MINIMUM_CONNECTIONS_INTERNAL)
assert(x == actual_min)
# disconnect to clean up for the next test
for node in attempted_nodes:
node.close()
if __name__ == '__main__':
SetMaxConnectionCountTest().main()
| 34.848168
| 114
| 0.662109
|
481b509e4cb5c60412e24a54570f60e996a90d57
| 8,268
|
py
|
Python
|
tests/sources/test_license.py
|
junjihashimoto/webots
|
12eb8c010275f390ae97d91d5c04906ffa00c262
|
[
"Apache-2.0"
] | null | null | null |
tests/sources/test_license.py
|
junjihashimoto/webots
|
12eb8c010275f390ae97d91d5c04906ffa00c262
|
[
"Apache-2.0"
] | null | null | null |
tests/sources/test_license.py
|
junjihashimoto/webots
|
12eb8c010275f390ae97d91d5c04906ffa00c262
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that checks that all the source files have the Apache 2 license."""
import unittest
import datetime
import os
import fnmatch
from io import open
APACHE2_LICENSE_C = """/*
* Copyright 1996-20XX Cyberbotics Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/""".replace('20XX', str(datetime.datetime.now().year))
APACHE2_LICENSE_CPP = """// Copyright 1996-20XX Cyberbotics Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.""".replace('20XX', str(datetime.datetime.now().year))
APACHE2_LICENSE_PYTHON = """# Copyright 1996-20XX Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.""".replace('20XX', str(datetime.datetime.now().year))
PYTHON_OPTIONAL_HEADER = """#!/usr/bin/env python
"""
PYTHON3_OPTIONAL_HEADER = """#!/usr/bin/env python3
"""
class TestLicense(unittest.TestCase):
"""Unit test for checking that all the source files have the Apache 2 license."""
def setUp(self):
"""Get all the source files which require a license check."""
directories = [
'src/lib/Controller',
'src/webots',
'src/wren',
'projects',
'include/controller',
'include/plugins',
'resources/languages/cpp',
'scripts'
]
skippedDirectoryPaths = [
'src/webots/external',
'projects/default/controllers/ros/include',
'projects/default/resources/sumo',
'projects/default/libraries/vehicle/java',
'projects/default/libraries/vehicle/python',
'projects/humans/c3d/controllers/c3d_viewer',
'projects/languages/ros/controllers/ros_python/kinetic',
'projects/languages/ros/controllers/ros_python/python',
'projects/robots/epfl/lis/controllers/blimp',
'projects/robots/epfl/lis/plugins/physics/blimp_physics',
'projects/robots/gctronic/e-puck/transfer/library',
'projects/robots/gctronic/e-puck/transfer/xc16',
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba',
'projects/robots/mobsya/thymio/libraries/dashel',
'projects/robots/mobsya/thymio/libraries/dashel-src',
'projects/robots/robotis/darwin-op/libraries/robotis-op2/robotis/Framework',
'projects/robots/robotis/darwin-op/libraries/robotis-op2/robotis/Linux',
'projects/robots/robotis/darwin-op/remote_control/libjpeg-turbo',
'projects/robots/robotis/darwin-op/libraries',
'projects/samples/robotbenchmark',
'projects/vehicles/controllers/ros_automobile/include'
]
skippedFilePaths = [
'projects/robots/gctronic/e-puck/controllers/e-puck2_server/play_melody.c',
'projects/robots/gctronic/e-puck/controllers/e-puck2_server/play_melody.h'
]
skippedDirectories = [
'build'
]
extensions = ['*.c', '*.cpp', '*.h', '*.hpp', '*.py', '*.java', 'Makefile']
self.sources = []
for directory in directories:
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME'] + os.sep + directory.replace('/', os.sep)):
shouldContinue = False
relativeRootPath = rootPath.replace(os.environ['WEBOTS_HOME'] + os.sep, '')
for path in skippedDirectoryPaths:
if rootPath.startswith(os.environ['WEBOTS_HOME'] + os.sep + path.replace('/', os.sep)):
shouldContinue = True
break
currentDirectories = rootPath.replace(os.environ['WEBOTS_HOME'], '').split(os.sep)
for directory in skippedDirectories:
if directory in currentDirectories:
shouldContinue = True
break
if fileNames == '__init__.py':
shouldContinue = True
if shouldContinue:
continue
for extension in extensions:
for fileName in fnmatch.filter(fileNames, extension):
if os.path.join(relativeRootPath, fileName).replace(os.sep, '/') in skippedFilePaths:
continue
file = os.path.join(rootPath, fileName)
self.sources.append(file)
def test_sources_have_license(self):
"""Test that sources have the license."""
for source in self.sources:
with open(source, 'r', encoding='utf-8') as content_file:
content = content_file.read()
if source.endswith('.c') or source.endswith('.h'):
self.assertTrue(
content.startswith(APACHE2_LICENSE_C),
msg='Source file "%s" doesn\'t contain the correct Apache 2.0 License:\n%s' %
(source, APACHE2_LICENSE_C)
)
elif source.endswith('.cpp') or source.endswith('.hpp') or source.endswith('.java'):
self.assertTrue(
content.startswith(APACHE2_LICENSE_CPP),
msg='Source file "%s" doesn\'t contain the correct Apache 2.0 License:\n%s' %
(source, APACHE2_LICENSE_CPP)
)
elif source.endswith('.py') or source.endswith('Makefile'):
self.assertTrue(
content.startswith(APACHE2_LICENSE_PYTHON) or
content.startswith(PYTHON_OPTIONAL_HEADER + APACHE2_LICENSE_PYTHON) or
content.startswith(PYTHON3_OPTIONAL_HEADER + APACHE2_LICENSE_PYTHON),
msg='Source file "%s" doesn\'t contain the correct Apache 2.0 License:\n%s' %
(source, APACHE2_LICENSE_PYTHON)
)
else:
self.assertTrue(
False,
msg='Unsupported file extension "%s".' % source
)
if __name__ == '__main__':
unittest.main()
| 43.978723
| 126
| 0.620827
|
05e48ddac63f21b99f039b1744b101ef5fd78816
| 8,166
|
py
|
Python
|
mmedit/models/backbones/sr_backbones/tof.py
|
rivergold/mmediting
|
fd972635c48bb065db29d1b5090592a87c7263d2
|
[
"Apache-2.0"
] | null | null | null |
mmedit/models/backbones/sr_backbones/tof.py
|
rivergold/mmediting
|
fd972635c48bb065db29d1b5090592a87c7263d2
|
[
"Apache-2.0"
] | null | null | null |
mmedit/models/backbones/sr_backbones/tof.py
|
rivergold/mmediting
|
fd972635c48bb065db29d1b5090592a87c7263d2
|
[
"Apache-2.0"
] | 2
|
2021-09-07T05:21:18.000Z
|
2021-09-17T22:34:54.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import load_checkpoint
from mmedit.models.common import flow_warp
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
class BasicModule(nn.Module):
"""Basic module of SPyNet.
Note that unlike the common spynet architecture, the basic module
here contains batch normalization.
"""
def __init__(self):
super(BasicModule, self).__init__()
self.basic_module = nn.Sequential(
ConvModule(
in_channels=8,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=64,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=64,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=16,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=16,
out_channels=2,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=None))
def forward(self, tensor_input):
"""
Args:
tensor_input (Tensor): Input tensor with shape (b, 8, h, w).
8 channels contain:
[reference image (3), neighbor image (3), initial flow (2)].
Returns:
Tensor: Estimated flow with shape (b, 2, h, w)
"""
return self.basic_module(tensor_input)
class SPyNet(nn.Module):
"""SPyNet architecture.
Note that this implementation is specifically for TOFlow. It differs from
the common SPyNet in the following aspects:
1. The basic modules here contain BatchNorm.
2. Normalization and denormalization are not done here, as
they are done in TOFlow.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network
Code reference:
https://github.com/Coldog2333/pytoflow
"""
def __init__(self, load_path=None):
super(SPyNet, self).__init__()
self.basic_module = nn.ModuleList([BasicModule() for _ in range(4)])
def forward(self, ref, supp):
"""
Args:
ref (Tensor): Reference image with shape of (b, 3, h, w).
supp: The supporting image to be warped: (b, 3, h, w).
Returns:
Tensor: Estimated optical flow: (b, 2, h, w).
"""
num_batches, _, h, w = ref.size()
ref = [ref]
supp = [supp]
# generate downsampled frames
for _ in range(3):
ref.insert(
0,
F.avg_pool2d(
input=ref[0],
kernel_size=2,
stride=2,
count_include_pad=False))
supp.insert(
0,
F.avg_pool2d(
input=supp[0],
kernel_size=2,
stride=2,
count_include_pad=False))
# flow computation
flow = ref[0].new_zeros(num_batches, 2, h // 16, w // 16)
for i in range(4):
flow_up = F.interpolate(
input=flow,
scale_factor=2,
mode='bilinear',
align_corners=True) * 2.0
flow = flow_up + self.basic_module[i](
torch.cat([
ref[i],
flow_warp(supp[i], flow_up.permute(0, 2, 3, 1)), flow_up
], 1))
return flow
@BACKBONES.register_module()
class TOFlow(nn.Module):
"""PyTorch implementation of TOFlow.
In TOFlow, the LR frames are pre-upsampled and have the same size with
the GT frames.
Paper: Xue et al., Video Enhancement with Task-Oriented Flow, IJCV 2018
Code reference:
1. https://github.com/anchen1011/toflow
2. https://github.com/Coldog2333/pytoflow
Args:
adapt_official_weights (bool): Whether to adapt the weights translated
from the official implementation. Set to false if you want to
train from scratch. Default: False
"""
def __init__(self, adapt_official_weights=False):
super(TOFlow, self).__init__()
self.adapt_official_weights = adapt_official_weights
self.ref_idx = 0 if adapt_official_weights else 3
# The mean and std are for img with range (0, 1)
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
# flow estimation module
self.spynet = SPyNet()
# reconstruction module
self.conv1 = nn.Conv2d(3 * 7, 64, 9, 1, 4)
self.conv2 = nn.Conv2d(64, 64, 9, 1, 4)
self.conv3 = nn.Conv2d(64, 64, 1)
self.conv4 = nn.Conv2d(64, 3, 1)
# activation function
self.relu = nn.ReLU(inplace=True)
def normalize(self, img):
"""Normalize the input image.
Args:
img (Tensor): Input image.
Returns:
Tensor: Normalized image.
"""
return (img - self.mean) / self.std
def denormalize(self, img):
"""Denormalize the output image.
Args:
img (Tensor): Output image.
Returns:
Tensor: Denormalized image.
"""
return img * self.std + self.mean
def forward(self, lrs):
"""
Args:
lrs: Input lr frames: (b, 7, 3, h, w).
Returns:
Tensor: SR frame: (b, 3, h, w).
"""
# In the official implementation, the 0-th frame is the reference frame
if self.adapt_official_weights:
lrs = lrs[:, [3, 0, 1, 2, 4, 5, 6], :, :, :]
num_batches, num_lrs, _, h, w = lrs.size()
lrs = self.normalize(lrs.view(-1, 3, h, w))
lrs = lrs.view(num_batches, num_lrs, 3, h, w)
lr_ref = lrs[:, self.ref_idx, :, :, :]
lr_aligned = []
for i in range(7): # 7 frames
if i == self.ref_idx:
lr_aligned.append(lr_ref)
else:
lr_supp = lrs[:, i, :, :, :]
flow = self.spynet(lr_ref, lr_supp)
lr_aligned.append(flow_warp(lr_supp, flow.permute(0, 2, 3, 1)))
# reconstruction
hr = torch.stack(lr_aligned, dim=1)
hr = hr.view(num_batches, -1, h, w)
hr = self.relu(self.conv1(hr))
hr = self.relu(self.conv2(hr))
hr = self.relu(self.conv3(hr))
hr = self.conv4(hr) + lr_ref
return self.denormalize(hr)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass # use default initialization
else:
raise TypeError('"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
| 31.167939
| 79
| 0.52841
|
f15bfe153051ea5c40d291d1aa581aef1a7e1f03
| 5,477
|
py
|
Python
|
src/HavokMud/logging_support.py
|
Beirdo/HavokMud-redux
|
c681a320a99f299744abb2ef5b728779ad5d7710
|
[
"MIT"
] | null | null | null |
src/HavokMud/logging_support.py
|
Beirdo/HavokMud-redux
|
c681a320a99f299744abb2ef5b728779ad5d7710
|
[
"MIT"
] | null | null | null |
src/HavokMud/logging_support.py
|
Beirdo/HavokMud-redux
|
c681a320a99f299744abb2ef5b728779ad5d7710
|
[
"MIT"
] | null | null | null |
import logging
import logging.config
import os
LOGDIR = "/var/log/havokmud"
class AccountLogMessage(object):
def __init__(self, account, message, _global=False):
self.ip = account.ip_address
self.email = account.email
self.message = message
self._global = _global
def __str__(self):
return self.message
class PlayerLogMessage(AccountLogMessage):
def __init__(self, player, message, _global=False, account=False):
AccountLogMessage.__init__(self, player.account, message, _global)
self.player = player.name
self._account = account
class LogFilter(logging.Filter):
def __init__(self, logType=None):
logging.Filter.__init__(self)
if logType is None:
logType = "global"
self.logType = logType
def filter(self, record):
record.logType = self.logType
message = record.msg
logType = self.logType
if isinstance(message, str):
if logType in ["global", "all"]:
record.ip = "-"
record.email = "-"
record.player = "-"
return True
if isinstance(message, AccountLogMessage):
# noinspection PyProtectedMember
if logType in ["account", "all"] or (logType == "global" and message._global):
record.ip = message.ip
record.email = message.email
record.player = "-"
return True
if isinstance(message, PlayerLogMessage):
# noinspection PyProtectedMember
if logType in ["player", "all"] or (logType == 'global' and message._global) \
or ("logType" == "account" and message._account):
record.ip = message.ip
record.email = message.email
record.player = message.player
return True
return False
class AccountLogHandler(logging.Handler):
files = {}
def emit(self, record):
msg = self.format(record)
fp = self.files.get(record.email, None)
if not fp:
filename = os.path.join(LOGDIR, "account-%s.log" % record.email)
fp = open(filename, "a")
self.files[record.email] = fp
fp.write(msg + "\n")
fp.flush()
def closeEmail(self, email):
fp = self.files.pop(email, None)
if fp:
fp.close()
class PlayerLogHandler(logging.Handler):
files = {}
def emit(self, record):
msg = self.format(record)
fp = self.files.get(record.player, None)
if not fp:
filename = os.path.join(LOGDIR, "player-%s.log" % record.player)
fp = open(filename, "a")
self.files[record.player] = fp
fp.write(msg + "\n")
fp.flush()
def closePlayer(self, player):
fp = self.files.pop(player, None)
if fp:
fp.close()
def logging_setup(logLevel, console=True):
logConfig = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"allFilter": {
"()": LogFilter,
"logType": "all",
},
"globalFilter": {
"()": LogFilter,
"logType": "global",
},
"accountFilter": {
"()": LogFilter,
"logType": "account",
},
"playerFilter": {
"()": LogFilter,
"logType": "player",
}
},
"handlers": {
'console': {
'class': 'logging.StreamHandler',
'filters': ['allFilter'],
'level': logLevel,
'formatter': 'console',
},
'global': {
'class': 'logging.FileHandler',
'filters': ['globalFilter'],
'level': logLevel,
'formatter': 'default',
'filename': os.path.join(LOGDIR, 'global.log'),
'mode': "a",
'encoding': 'utf-8',
},
'account': {
'class': 'HavokMud.logging_support.AccountLogHandler',
'filters': ['accountFilter'],
'level': logging.DEBUG,
'formatter': 'default',
},
'player': {
'class': 'HavokMud.logging_support.PlayerLogHandler',
'filters': ['playerFilter'],
'level': logging.DEBUG,
'formatter': 'default',
}
},
"formatters": {
"default": {
"format": '%(asctime)s %(levelname)s [PID %(process)d] (%(name)s:%(lineno)d) %(ip)s %(email)s '
'%(player)s %(message)s',
},
"console": {
"format": '%(asctime)s %(levelname)s (%(name)s:%(lineno)d) %(ip)s %(email)s %(player)s %(message)s'
}
},
"root": {
'handlers': ['global', 'account', 'player'],
'level': logging.DEBUG,
}
}
if console:
logConfig["root"]["handlers"].append("console")
os.makedirs(LOGDIR, 0o1777, exist_ok=True)
logging.config.dictConfig(logConfig)
def logging_additional_setup(logLevelConfig):
for (module, levelname) in logLevelConfig.items():
logging.getLogger(module).setLevel(getattr(logging, levelname, "DEBUG"))
| 30.259669
| 115
| 0.50356
|
edeacde1e693fdf92d440db9887e835d1b86a4c9
| 12,180
|
py
|
Python
|
photoshop/views.py
|
kmranrg/BabyPhotoshop
|
52efb2bad9f46e6c2d1351aae0b1e1587a98d736
|
[
"BSD-3-Clause"
] | null | null | null |
photoshop/views.py
|
kmranrg/BabyPhotoshop
|
52efb2bad9f46e6c2d1351aae0b1e1587a98d736
|
[
"BSD-3-Clause"
] | null | null | null |
photoshop/views.py
|
kmranrg/BabyPhotoshop
|
52efb2bad9f46e6c2d1351aae0b1e1587a98d736
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
from os import path, remove
from PIL import ImageEnhance, Image, ImageFilter
# Create your views here.
def index(request):
return render(request, 'index.html')
def sharpness(sharpnessValue, imageFileName):
imageFileName = str(imageFileName)
sharpnessValue = float(sharpnessValue)
img = Image.open(imageFileName)
enhancer = ImageEnhance.Sharpness(img)
img = enhancer.enhance(sharpnessValue)
if imageFileName[-3:] == "jpg":
img.save("media/sharpness_kumar_anurag.jpg")
return "jpg"
elif imageFileName[-3:] == "png":
img.save("media/sharpness_kumar_anurag.png")
return "png"
elif imageFileName[-4:] == "jpeg":
img.save("media/sharpness_kumar_anurag.jpeg")
return "jpeg"
else:
return "fileNotSaved"
def saturate(saturationValue, imageFileName):
imageFileName = str(imageFileName)
saturationValue = float(saturationValue)
img = Image.open(imageFileName)
color = ImageEnhance.Color(img)
img = color.enhance(saturationValue)
if imageFileName[-3:] == "jpg":
img.save("media/saturated_kumar_anurag.jpg")
return "jpg"
elif imageFileName[-3:] == "png":
img.save("media/saturated_kumar_anurag.png")
return "png"
elif imageFileName[-4:] == "jpeg":
img.save("media/saturated_kumar_anurag.jpeg")
return "jpeg"
else:
return "fileNotSaved"
def transpose(transposeValue, imageFileName):
imageFileName = str(imageFileName)
transposeValue = str(transposeValue)
img = Image.open(imageFileName)
if transposeValue == "FLIP_LEFT_RIGHT":
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif transposeValue == "FLIP_TOP_BOTTOM":
img = img.transpose(Image.FLIP_TOP_BOTTOM)
elif transposeValue == "ROTATE_90":
img = img.transpose(Image.ROTATE_90)
elif transposeValue == "ROTATE_180":
img = img.transpose(Image.ROTATE_180)
elif transposeValue == "ROTATE_270":
img = img.transpose(Image.ROTATE_270)
else:
return "fileNotSaved"
if imageFileName[-3:] == "jpg":
img.save("media/transposed_kumar_anurag.jpg")
return "jpg"
elif imageFileName[-3:] == "png":
img.save("media/transposed_kumar_anurag.png")
return "png"
elif imageFileName[-4:] == "jpeg":
img.save("media/transposed_kumar_anurag.jpeg")
return "jpeg"
else:
return "fileNotSaved"
def brightness(brightnessValue, imageFileName):
imageFileName = str(imageFileName)
brightnessValue = float(brightnessValue)
img = Image.open(imageFileName)
brightness = ImageEnhance.Brightness(img)
img = brightness.enhance(brightnessValue)
if imageFileName[-3:] == "jpg":
img.save("media/brightness_kumar_anurag.jpg")
return "jpg"
elif imageFileName[-3:] == "png":
img.save("media/brightness_kumar_anurag.png")
return "png"
elif imageFileName[-4:] == "jpeg":
img.save("media/brightness_kumar_anurag.jpeg")
return "jpeg"
else:
return "fileNotSaved"
def contrast(contrastValue, imageFileName):
imageFileName = str(imageFileName)
contrastValue = float(contrastValue)
img = Image.open(imageFileName)
contrast = ImageEnhance.Contrast(img)
img = contrast.enhance(contrastValue)
if imageFileName[-3:] == "jpg":
img.save("media/contrast_kumar_anurag.jpg")
return "jpg"
elif imageFileName[-3:] == "png":
img.save("media/contrast_kumar_anurag.png")
return "png"
elif imageFileName[-4:] == "jpeg":
img.save("media/contrast_kumar_anurag.jpeg")
return "jpeg"
else:
return "fileNotSaved"
def gaussianBlur(gblurValue, imageFileName):
imageFileName = str(imageFileName)
gblurValue = float(gblurValue)
img = Image.open(imageFileName)
img = img.filter(ImageFilter.GaussianBlur(radius=gblurValue))
if imageFileName[-3:] == "jpg":
img.save("media/gblur_kumar_anurag.jpg")
return "jpg"
elif imageFileName[-3:] == "png":
img.save("media/gblur_kumar_anurag.png")
return "png"
elif imageFileName[-4:] == "jpeg":
img.save("media/gblur_kumar_anurag.jpeg")
return "jpeg"
else:
return "fileNotSaved"
def convert_to_pdf(imageFileName):
imageFileName = str(imageFileName)
img = Image.open(imageFileName)
try:
img.save("media/pdf_kumar_anurag.pdf")
return "pdf"
except ValueError:
rgb = Image.new('RGB', img.size, (255, 255, 255)) # white background
rgb.paste(img, mask=img.split()[3]) # paste using alpha channel as mask
rgb.save("media/pdf_kumar_anurag.pdf", 'PDF', resoultion=100.0)
return "pdf"
except:
return "fileNotSaved"
def photo_pdf(request):
usr_uploaded_file = ""
if request.method == 'POST':
uploaded_file = request.FILES['imageFileForPDF']
usr_uploaded_file = str(uploaded_file.name)
# deleting the file if filename already exists
if path.exists("media/"+usr_uploaded_file):
remove("media/"+usr_uploaded_file)
# saving the file
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
# calling convert_to_pdf function
fileType = convert_to_pdf("media/"+usr_uploaded_file)
if fileType == "fileNotSaved":
usr_uploaded_file = "File not uploaded. Please upload jpg, png and jpeg file formats only"
else:
usr_uploaded_file = {"name":uploaded_file.name, "type":fileType}
return render(request, 'photo_pdf.html', {'usr_uploaded_file': usr_uploaded_file})
def photo_gaussian_blur(request):
usr_uploaded_file = ""
usr_gaussianBlurValue = 0
if request.method == 'POST':
uploaded_file = request.FILES['imageFileForGaussianBlur']
usr_uploaded_file = str(uploaded_file.name)
gaussianBlurValue = float(request.POST["gaussianBlurValue"])
usr_gaussianBlurValue = gaussianBlurValue
# deleting the file if filename already exists
if path.exists("media/"+usr_uploaded_file):
remove("media/"+usr_uploaded_file)
# saving the file
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
# calling gaussianBlur function
fileType = gaussianBlur(usr_gaussianBlurValue , "media/"+usr_uploaded_file)
if fileType == "fileNotSaved":
usr_uploaded_file = "File not uploaded. Please upload jpg, png and jpeg file formats only"
else:
usr_uploaded_file = {"name":uploaded_file.name, "type":fileType}
return render(request, 'photo_gaussian_blur.html', {'usr_uploaded_file': usr_uploaded_file})
def photo_contrast(request):
usr_uploaded_file = ""
usr_contrastValue = 0
if request.method == 'POST':
uploaded_file = request.FILES['imageFileForContrast']
usr_uploaded_file = str(uploaded_file.name)
contrastValue = float(request.POST["contrastValue"])
usr_contrastValue = contrastValue
# deleting the file if filename already exists
if path.exists("media/"+usr_uploaded_file):
remove("media/"+usr_uploaded_file)
# saving the file
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
# calling contrast function
fileType = contrast(usr_contrastValue , "media/"+usr_uploaded_file)
if fileType == "fileNotSaved":
usr_uploaded_file = "File not uploaded. Please upload jpg, png and jpeg file formats only"
else:
usr_uploaded_file = {"name":uploaded_file.name, "type":fileType}
return render(request, 'photo_contrast.html', {'usr_uploaded_file': usr_uploaded_file})
def photo_brightness(request):
usr_uploaded_file = ""
usr_brightnessValue = 0
if request.method == 'POST':
uploaded_file = request.FILES['imageFileForBrightness']
usr_uploaded_file = str(uploaded_file.name)
brightnessValue = float(request.POST["brightnessValue"])
usr_brightnessValue = brightnessValue
# deleting the file if filename already exists
if path.exists("media/"+usr_uploaded_file):
remove("media/"+usr_uploaded_file)
# saving the file
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
# calling brightness function
fileType = brightness(usr_brightnessValue , "media/"+usr_uploaded_file)
if fileType == "fileNotSaved":
usr_uploaded_file = "File not uploaded. Please upload jpg, png and jpeg file formats only"
else:
usr_uploaded_file = {"name":uploaded_file.name, "type":fileType}
return render(request, 'photo_brightness.html', {'usr_uploaded_file': usr_uploaded_file})
def photo_transpose(request):
usr_uploaded_file = ""
usr_transposeValue = 0
if request.method == 'POST':
uploaded_file = request.FILES['imageFileForTranspose']
usr_uploaded_file = str(uploaded_file.name)
transposeValue = str(request.POST["transposeValue"])
usr_transposeValue = transposeValue
# deleting the file if filename already exists
if path.exists("media/"+usr_uploaded_file):
remove("media/"+usr_uploaded_file)
# saving the file
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
# calling transpose function
fileType = transpose(usr_transposeValue , "media/"+usr_uploaded_file)
if fileType == "fileNotSaved":
usr_uploaded_file = "File not uploaded. Please upload jpg, png and jpeg file formats only"
else:
usr_uploaded_file = {"name":uploaded_file.name, "type":fileType}
return render(request, 'photo_transpose.html', {'usr_uploaded_file': usr_uploaded_file})
def photo_saturation(request):
usr_uploaded_file = ""
usr_saturationValue = 0
if request.method == 'POST':
uploaded_file = request.FILES['imageFileForSaturation']
usr_uploaded_file = str(uploaded_file.name)
saturationValue = float(request.POST["saturationValue"])
usr_saturationValue = saturationValue
# deleting the file if filename already exists
if path.exists("media/"+usr_uploaded_file):
remove("media/"+usr_uploaded_file)
# saving the file
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
# calling saturate function
fileType = saturate(usr_saturationValue, "media/"+usr_uploaded_file)
if fileType == "fileNotSaved":
usr_uploaded_file = "File not uploaded. Please upload jpg, png and jpeg file formats only"
else:
usr_uploaded_file = {"name":uploaded_file.name, "type":fileType}
return render(request, 'photo_saturation.html', {'usr_uploaded_file': usr_uploaded_file})
def photo_sharpness(request):
usr_uploaded_file = ""
usr_sharpnessValue = 0
if request.method == 'POST':
uploaded_file = request.FILES['imageFileForSharpness']
usr_uploaded_file = str(uploaded_file.name)
sharpnessValue = float(request.POST["sharpnessValue"])
usr_sharpnessValue = sharpnessValue
# deleting the file if filename already exists
if path.exists("media/"+usr_uploaded_file):
remove("media/"+usr_uploaded_file)
# saving the file
fs = FileSystemStorage()
fs.save(uploaded_file.name, uploaded_file)
# calling sharpness function
fileType = sharpness(usr_sharpnessValue , "media/"+usr_uploaded_file)
if fileType == "fileNotSaved":
usr_uploaded_file = "File not uploaded. Please upload jpg, png and jpeg file formats only"
else:
usr_uploaded_file = {"name":uploaded_file.name, "type":fileType}
return render(request, 'photo_sharpness.html', {'usr_uploaded_file': usr_uploaded_file})
| 34.213483
| 103
| 0.666092
|
27dbd3e3b8ace7587c10551fec9b3d89a4a402b7
| 10,733
|
py
|
Python
|
python_modules/dagster/dagster/core/types/config_schema.py
|
sd2k/dagster
|
d15542e2be374a0c35f2b1623c1fff98f002c605
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/types/config_schema.py
|
sd2k/dagster
|
d15542e2be374a0c35f2b1623c1fff98f002c605
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/types/config_schema.py
|
sd2k/dagster
|
d15542e2be374a0c35f2b1623c1fff98f002c605
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
from dagster import check
from dagster.config.config_type import ConfigType, ConfigTypeKind
from dagster.core.decorator_utils import (
split_function_parameters,
validate_decorated_fn_positionals,
)
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.utils import ensure_gen, ensure_single_item
from dagster.utils.backcompat import canonicalize_backcompat_args, rename_warning
class DagsterTypeLoader(object):
@property
def schema_type(self):
check.not_implemented(
"Must override schema_type in {klass}".format(klass=type(self).__name__)
)
def compute_loaded_input_version(self, _config_value):
return None
def construct_from_config_value(self, _context, config_value):
"""
How to create a runtime value from config data.
"""
return config_value
def required_resource_keys(self):
return frozenset()
class InputHydrationConfig(DagsterTypeLoader):
def __init__(self):
rename_warning("DagsterTypeLoader", "InputHydrationConfig", "0.10.0")
super(InputHydrationConfig, self).__init__()
class DagsterTypeMaterializer(object):
@property
def schema_type(self):
check.not_implemented(
"Must override schema_type in {klass}".format(klass=type(self).__name__)
)
def materialize_runtime_values(self, _context, _config_value, _runtime_value):
"""
How to materialize a runtime value given configuration.
"""
check.not_implemented("Must implement")
def required_resource_keys(self):
return frozenset()
class OutputMaterializationConfig(DagsterTypeMaterializer):
def __init__(self):
rename_warning("DagsterTypeMaterializer", "OutputMaterializationConfig", "0.10.0")
super(OutputMaterializationConfig, self).__init__()
class DagsterTypeLoaderFromDecorator(DagsterTypeLoader):
def __init__(
self,
config_type,
func,
required_resource_keys,
loader_version=None,
external_version_fn=lambda: None,
):
self._config_type = check.inst_param(config_type, "config_type", ConfigType)
self._func = check.callable_param(func, "func")
self._required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
self._loader_version = loader_version
self._external_version_fn = external_version_fn
@property
def schema_type(self):
return self._config_type
def compute_loaded_input_version(self, config_value):
version = ""
if self._loader_version:
version += str(self._loader_version)
ext_version = self._external_version_fn(config_value)
if ext_version:
version += str(ext_version)
return hashlib.sha1(version.encode("utf-8")).hexdigest()
def construct_from_config_value(self, context, config_value):
return self._func(context, config_value)
def required_resource_keys(self):
return frozenset(self._required_resource_keys)
def _create_type_loader_for_decorator(
config_type, func, required_resource_keys, loader_version=None, external_version_fn=lambda: None
):
return DagsterTypeLoaderFromDecorator(
config_type, func, required_resource_keys, loader_version, external_version_fn
)
def input_hydration_config(config_schema=None, required_resource_keys=None, config_cls=None):
"""Deprecated in favor of dagster_type_loader"""
rename_warning("dagster_type_loader", "input_hydration_config", "0.10.0")
config_schema = canonicalize_backcompat_args(
config_schema, "config_schema", config_cls, "config_cls", "0.10.0",
)
return dagster_type_loader(config_schema, required_resource_keys)
def dagster_type_loader(
config_schema,
required_resource_keys=None,
loader_version=None,
external_version_fn=lambda: None,
):
"""Create an dagster type loader that maps config data to a runtime value.
The decorated function should take the execution context and parsed config value and return the
appropriate runtime value.
Args:
config_schema (ConfigSchema): The schema for the config that's passed to the decorated
function.
loader_version (str): (Experimental) The version of the decorated compute function. Two
loading functions should have the same version if and only if they deterministically
produce the same outputs when provided the same inputs.
external_version_fn (Callable): A function that takes in the same parameters as the loader
function (config_value) and returns a representation of the version of the external
asset (str). Two external assets with identical versions are treated as identical to one
another.
Examples:
.. code-block:: python
@dagster_type_loader(Permissive())
def load_dict(_context, value):
return value
"""
from dagster.config.field import resolve_to_config_type
config_type = resolve_to_config_type(config_schema)
EXPECTED_POSITIONALS = ["context", "*"]
def wrapper(func):
fn_positionals, _ = split_function_parameters(func, EXPECTED_POSITIONALS)
missing_positional = validate_decorated_fn_positionals(fn_positionals, EXPECTED_POSITIONALS)
if missing_positional:
raise DagsterInvalidDefinitionError(
"@dagster_type_loader '{solid_name}' decorated function does not have required positional "
"parameter '{missing_param}'. Solid functions should only have keyword arguments "
"that match input names and a first positional parameter named 'context'.".format(
solid_name=func.__name__, missing_param=missing_positional
)
)
return _create_type_loader_for_decorator(
config_type, func, required_resource_keys, loader_version, external_version_fn
)
return wrapper
def input_selector_schema(config_cls, required_resource_keys=None):
"""
Deprecated in favor of dagster_type_loader.
A decorator for annotating a function that can take the selected properties
from a ``config_value`` in to an instance of a custom type.
Args:
config_cls (Selector)
"""
rename_warning("dagster_type_loader", "input_selector_schema", "0.10.0")
from dagster.config.field import resolve_to_config_type
config_type = resolve_to_config_type(config_cls)
check.param_invariant(config_type.kind == ConfigTypeKind.SELECTOR, "config_cls")
def _wrap(func):
def _selector(context, config_value):
selector_key, selector_value = ensure_single_item(config_value)
return func(context, selector_key, selector_value)
return _create_type_loader_for_decorator(config_type, _selector, required_resource_keys)
return _wrap
class DagsterTypeMaterializerForDecorator(DagsterTypeMaterializer):
def __init__(self, config_type, func, required_resource_keys):
self._config_type = check.inst_param(config_type, "config_type", ConfigType)
self._func = check.callable_param(func, "func")
self._required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
@property
def schema_type(self):
return self._config_type
def materialize_runtime_values(self, context, config_value, runtime_value):
return ensure_gen(self._func(context, config_value, runtime_value))
def required_resource_keys(self):
return frozenset(self._required_resource_keys)
def _create_output_materializer_for_decorator(config_type, func, required_resource_keys):
return DagsterTypeMaterializerForDecorator(config_type, func, required_resource_keys)
def output_materialization_config(config_schema=None, required_resource_keys=None, config_cls=None):
"""Deprecated in favor of dagster_type_materializer"""
rename_warning("dagster_type_materializer", "output_materialization_config", "0.10.0")
config_schema = canonicalize_backcompat_args(
config_schema, "config_schema", config_cls, "config_cls", "0.10.0",
)
return dagster_type_materializer(config_schema, required_resource_keys)
def dagster_type_materializer(config_schema, required_resource_keys=None):
"""Create an output materialization hydration config that configurably materializes a runtime
value.
The decorated function should take the execution context, the parsed config value, and the
runtime value and the parsed config data, should materialize the runtime value, and should
return an appropriate :py:class:`AssetMaterialization`.
Args:
config_schema (Any): The type of the config data expected by the decorated function.
Examples:
.. code-block:: python
# Takes a list of dicts such as might be read in using csv.DictReader, as well as a config
value, and writes
@dagster_type_materializer(str)
def materialize_df(_context, path, value):
with open(path, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=value[0].keys())
writer.writeheader()
writer.writerows(rowdicts=value)
return AssetMaterialization.file(path)
"""
from dagster.config.field import resolve_to_config_type
config_type = resolve_to_config_type(config_schema)
return lambda func: _create_output_materializer_for_decorator(
config_type, func, required_resource_keys
)
def output_selector_schema(config_cls, required_resource_keys=None):
"""
Deprecated in favor of dagster_type_materializer.
A decorator for a annotating a function that can take the selected properties
of a ``config_value`` and an instance of a custom type and materialize it.
Args:
config_cls (Selector):
"""
rename_warning("dagster_type_materializer", "output_selector_schema", "0.10.0")
from dagster.config.field import resolve_to_config_type
config_type = resolve_to_config_type(config_cls)
check.param_invariant(config_type.kind == ConfigTypeKind.SELECTOR, "config_cls")
def _wrap(func):
def _selector(context, config_value, runtime_value):
selector_key, selector_value = ensure_single_item(config_value)
return func(context, selector_key, selector_value, runtime_value)
return _create_output_materializer_for_decorator(
config_type, _selector, required_resource_keys
)
return _wrap
| 37.138408
| 107
| 0.724122
|
43c46b118676e930174fb52942f6f952fd173a19
| 33,223
|
py
|
Python
|
aikit/cross_validation.py
|
gfournier/aikit
|
23257f365a4f387cbb86f0ed3994b696a81b57c6
|
[
"BSD-2-Clause"
] | 23
|
2018-09-14T07:29:21.000Z
|
2021-07-08T19:48:23.000Z
|
aikit/cross_validation.py
|
gfournier/aikit
|
23257f365a4f387cbb86f0ed3994b696a81b57c6
|
[
"BSD-2-Clause"
] | 40
|
2019-05-28T09:17:25.000Z
|
2022-01-31T15:53:36.000Z
|
aikit/cross_validation.py
|
gfournier/aikit
|
23257f365a4f387cbb86f0ed3994b696a81b57c6
|
[
"BSD-2-Clause"
] | 11
|
2018-11-21T09:38:45.000Z
|
2020-09-25T10:24:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 09:40:33 2018
@author: Lionel Massoulard
"""
import numpy as np
import pandas as pd
import scipy.sparse as sp
from collections import OrderedDict
from time import time
import numbers
from joblib import Parallel, delayed
import sklearn.model_selection
from sklearn.model_selection._split import BaseCrossValidator, _num_samples, train_test_split
try:
from sklearn.utils.validation import _check_fit_params # In sklearn 0.22
except ImportError:
_check_fit_params = None
if _check_fit_params is None:
from sklearn.model_selection._validation import _index_param_value
def _check_fit_params(X, fit_params, indices=None):
return {k: _index_param_value(X, v, indices) for k,v in fit_params.items()}
import sklearn.base
from aikit.tools.helper_functions import function_has_named_argument
def is_clusterer(estimator):
"""Returns True if the given estimator is (probably) a clusterer.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "clusterer"
def create_cv(cv=3, y=None, classifier=False, shuffle=False, random_state=None):
"""Input checker utility for building a cross-validator, difference from sklearn.model_selection.check_cv :
* shuffle and random_state params
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
shuffle : boolean, optional, default False
if True will use shuffle = True from StratiedKFold
random_state : int or None, default = None
will be passed to the StratifiedKFold object
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, sklearn.model_selection._split.numbers.Integral):
if (
classifier
and (y is not None)
and (sklearn.model_selection._split.type_of_target(y) in ("binary", "multiclass"))
):
return sklearn.model_selection.StratifiedKFold(cv, shuffle=shuffle, random_state=random_state)
else:
return sklearn.model_selection.KFold(cv, shuffle=shuffle, random_state=random_state)
if not hasattr(cv, "split") or isinstance(cv, str):
if not isinstance(cv, sklearn.model_selection._split.Iterable) or isinstance(cv, str):
raise ValueError(
"Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv
)
return sklearn.model_selection._split._CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def create_scoring(estimator, scoring):
""" create the scoring object, see sklearn check_scoring function """
if not isinstance(scoring, (tuple, list)) and not isinstance(scoring, dict):
scoring = [scoring]
scorers = OrderedDict()
### Handle scoring ###
if isinstance(scoring, dict):
# I assume dictionnary with key = name of scoring and value = scoring
for k, s in scoring.items():
scorers[k] = sklearn.model_selection._validation.check_scoring(estimator, scoring=s)
else:
def find_name(s):
if s is None:
return "default_score"
return str(s)
for i, s in enumerate(scoring):
k = find_name(s)
if k in scorers:
s = s + ("_%d" % i)
if k in scorers:
raise ValueError("duplicate scorer %s" % s)
scorers[k] = sklearn.model_selection._validation.check_scoring(estimator, scoring=s)
return scorers
def _score_with_group(estimator, X_test, y_test, groups_test, scorer, is_multimetric=False):
"""Compute the score(s) of an estimator on a given test set.
Will return a single float if is_multimetric is False and a dict of floats,
if is_multimetric is True
"""
# Copy of sklearn '_score' but where the 'groups' can be passed to the scorer
if isinstance(y_test, pd.DataFrame):
y_test = y_test.values
if is_multimetric:
return _multimetric_score_with_group(estimator, X_test, y_test, groups_test, scorer)
else:
has_group = groups_test is not None and function_has_named_argument(scorer, "groups")
# True if :
# * group is passed to the function
# * the scorer accepts a 'group' argument
if y_test is None:
if has_group:
score = scorer(estimator, X_test, groups_test)
else:
score = scorer(estimator, X_test)
else:
if has_group:
score = scorer(estimator, X_test, y_test, groups_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, "item"):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError(
"scoring must return a number, got %s (%s) " "instead. (scorer=%r)" % (str(score), type(score), scorer)
)
return score
def _multimetric_score_with_group(estimator, X_test, y_test, groups_test, scorers):
"""Return a dict of score for multimetric scoring"""
# Copy of sklearn '_multimetric_score' but where the 'groups' can be passed to the scorer
scores = {}
for name, scorer in scorers.items():
has_group = groups_test is not None and function_has_named_argument(scorer, "groups")
if y_test is None:
if has_group:
score = scorer(estimator, X_test, groups_test)
else:
score = scorer(estimator, X_test)
else:
if has_group:
score = scorer(estimator, X_test, y_test, groups_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, "item"):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if isinstance(score, dict):
for k, v in score.items():
scores[str(name) + "_" + str(k)] = v
elif isinstance(score, pd.Series):
for k, v in score.to_dict().items():
scores[str(name) + "_" + str(k)] = v
else:
scores[name] = score
if not isinstance(score, numbers.Number):
raise ValueError(
"scoring must return a number, got %s (%s) "
"instead. (scorer=%s)" % (str(score), type(score), name)
)
return scores
def _compute_one_fold(
fold_index,
train,
test,
multi_output_proba,
all_classes,
classes,
estimator,
X,
y,
groups,
scorers,
verbose,
fit_params,
return_predict,
method,
no_scoring,
):
if verbose:
print("cv %d started\n" % fold_index)
### Clone the estimator ###
cloned_estimator = sklearn.base.clone(estimator)
### split train test ###
X_train, y_train = sklearn.model_selection._validation._safe_split(estimator, X, y, train)
if groups is not None:
groups_train, _ = sklearn.model_selection._validation._safe_split(estimator, groups, None, train)
else:
groups_train = None
X_test, y_test = sklearn.model_selection._validation._safe_split(estimator, X, y, test, train)
if groups is not None:
groups_test, _ = sklearn.model_selection._validation._safe_split(estimator, groups, None, test, train)
else:
groups_test = None
if hasattr(X_test, "index"):
index_test = X_test.index
else:
index_test = test
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
# Try to subset the fit_params if that is possible, Ex : 'sample_weight=np.array(....)' should be subsetted but not 'epochs=10'
start_fit = time()
### Fit estimator ###
if y_train is None:
if groups_train is not None and function_has_named_argument(cloned_estimator.fit, "groups"):
cloned_estimator.fit(X_train, groups=groups_train, **fit_params)
else:
cloned_estimator.fit(X_train, **fit_params)
else:
if groups_train is not None and function_has_named_argument(cloned_estimator.fit, "groups"):
cloned_estimator.fit(X_train, y_train, groups=groups_train, **fit_params)
else:
cloned_estimator.fit(X_train, y_train, **fit_params)
fit_time = time() - start_fit
result_predict = None
if return_predict:
func = getattr(cloned_estimator, method)
predictions = func(X_test)
## re-alignement with class ##
if method in ("predict_proba", "predict_log_proba", "decision_function"):
def _align_predict(predictions, classes, cloned_estimator_classes_):
float_min = np.finfo(predictions.dtype).min
default_values = {"decision_function": float_min, "predict_log_proba": float_min, "predict_proba": 0}
predictions_for_all_classes = pd.DataFrame(default_values[method], index=index_test, columns=classes)
for j, c in enumerate(cloned_estimator_classes_):
predictions_for_all_classes[c] = predictions[:, j]
return predictions_for_all_classes
if multi_output_proba:
predictions = [
_align_predict(p, c, cloned_c)
for p, c, cloned_c in zip(predictions, all_classes, cloned_estimator.classes_)
]
else:
predictions = _align_predict(predictions, classes, cloned_estimator.classes_)
result_predict = (predictions, test)
result = OrderedDict()
### Score test ###
test_scores_dictionary = None
if not no_scoring:
start_score = time()
test_scores_dictionary = _score_with_group(
cloned_estimator, X_test, y_test, groups_test, scorer=scorers, is_multimetric=True
)
# Here : scorers is a dictionary of scorers, hence is_multimetric = True
score_time = time() - start_score
### Score train ###
train_scores_dictionary = _score_with_group(
cloned_estimator, X_train, y_train, groups_train, scorer=scorers, is_multimetric=True
)
### Put everything into a dictionnary ###
for k, v in test_scores_dictionary.items():
result["test_%s" % k] = v
for k, v in train_scores_dictionary.items():
result["train_%s" % k] = v
result["fit_time"] = fit_time
if not no_scoring:
result["score_time"] = score_time
result["n_test_samples"] = sklearn.model_selection._validation._num_samples(X_test)
result["fold_nb"] = fold_index
return result, result_predict, test_scores_dictionary
def cross_validation(
estimator,
X,
y,
groups=None,
scoring=None,
cv=None,
verbose=1,
fit_params=None,
return_predict=False,
method=None,
no_scoring=False,
stopping_round=None,
stopping_threshold=None,
approximate_cv=False,
n_jobs=1,
parallel_kwargs=None,
**kwargs
):
""" Does a cross-validation on a model,
Modification of sklearn cross-validation, The main differences from sklearn function are
* allow more than one scoring
* allow return scores and probas or predictions
* return score on test and train set for each fold
* bypass complete cv if score are too low
* call the 'approx_cross_validation' method in the estimator if it exists (allow specific approximate cv for each estimator)
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, optional, default: None
The groups to use for the CVs
scoring : string or list of string for each scores
Can also be a dictionnary of scorers
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
fit_params : dict or None
Parameters to pass to the fit method of the estimator.
verbose : integer, optional
The verbosity level.
return_predict: boolean, default:False
if True will also return the out-of-sample predictions
method : None or string
the name of the method to use to return predict ('transform','predict','predict_proba',...). if None will guess based on type of estimator
no_scoring : boolean, default: False
if True won't score predictions, cv_result will None in that case
stopping_round : int or None
if not None the number of the round on which to start looking if the cv must be stopped (ex: stopping_round = 0, stops after first round)
stopping_threshold : number of None
if not None the value bellow which we'll stop the CV
approximate_cv : boolean, default:False
if True will try to do an approximate cv by call the method on the estimator (if it exist)
n_jobs : int or None, optional (default=None)
The number of CPUs to use to do the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
parallel_kwargs : kwargs to pass to drive parallelization
**kwargs : keywords arguments to be passed to method call
Returns
-------
cv_res : pd.DataFrame (None if 'no_scoring = True')
outsample prediction (only if return_predict is True)
"""
if not return_predict and no_scoring:
raise ValueError("Nothing will be returned")
############################
### Argument preparation ###
############################
### make everything indexable ###
X, y = sklearn.model_selection._validation.indexable(X, y)
if groups is not None:
groups, _ = sklearn.model_selection._validation.indexable(groups, None)
if isinstance(scoring, str):
scoring = [scoring]
### Scoring ###
scorers = None
if not no_scoring:
scorers = create_scoring(estimator, scoring)
# Here : scorers is a dictionary of scorers objects
estimator_is_classifier = sklearn.base.is_classifier(estimator)
estimator_is_regressor = sklearn.base.is_regressor(estimator)
### Checks ###
if not estimator_is_classifier and not estimator_is_regressor:
# This is a transformer
if not return_predict:
raise ValueError("This is a transformer it should only be called with 'return_predict = True'")
if not no_scoring:
raise ValueError("This is a transformer it should only be called with 'no_scoring = True'")
### Handle cv ###
cv = create_cv(cv, y, classifier=estimator_is_classifier, shuffle=True, random_state=123)
### Handle fit params ###
if fit_params is None:
fit_params = {}
#####################################
### Method to use for predictions ###
#####################################
if method is None:
if estimator_is_classifier:
method = "predict_proba"
elif estimator_is_regressor:
method = "predict"
else:
method = "transform"
if hasattr(estimator, "approx_cross_validation") and approximate_cv:
if verbose:
print("use approx_cross_validation of estimator (%s)" % str(type(estimator)))
##########################################################
### estimator can do its own 'approx_cross_validation' ###
##########################################################
result = estimator.approx_cross_validation(
X=X,
y=y,
groups=groups,
cv=cv,
scoring=scoring,
verbose=verbose,
fit_params=fit_params,
return_predict=return_predict,
method=method,
no_scoring=no_scoring,
stopping_round=stopping_round,
stopping_threshold=stopping_threshold,
**kwargs
)
return result
###########################################
### Check that cv_transform is possible ###
###########################################
if method == "transform" and return_predict:
if hasattr(estimator, "can_cv_transform"):
if not estimator.can_cv_transform():
raise ValueError(
"You can't use both method = 'transform' and return_predict for this estimator : %s"
% (str(type(estimator)))
)
##########################################################################
### estimator doesn't have a special 'approx_cross_val_predict' method ###
##########################################################################
prediction_blocks = []
all_results = []
multi_output_proba = False
all_classes = None
classes = None
if method in ("predict_proba", "predict_log_proba", "decision_function"):
if getattr(y, "ndim", 1) == 1 or y.shape[1] == 1:
classes = np.sort(np.unique(y)) # ca vda pas marcher en multi output
multi_output_proba = False
else:
if y.ndim > 2:
raise TypeError("This function doesn't work for y that has %d dimension" % y.ndim)
multi_output_proba = True
all_classes = []
for d in range(y.shape[1]):
if hasattr(y, "iloc"):
classes = np.sort(np.unique(y.iloc[:, d].values))
else:
classes = np.sort(np.unique(y[:, d]))
all_classes.append(classes)
stop_cv = False
max_main_scorer = None
#################
### Main Loop ###
#################
if no_scoring:
try_early_stopping = False
else:
try_early_stopping = stopping_round is not None and stopping_threshold is not None
if not try_early_stopping:
if parallel_kwargs is None:
parallel_kwargs = {}
if "pre_dispatch" not in parallel_kwargs:
parallel_kwargs["pre_dispatch"] = "2*n_jobs"
if "verbose" not in parallel_kwargs:
parallel_kwargs["verbose"] = True
parallel = Parallel(n_jobs=n_jobs, **parallel_kwargs)
temp_results = parallel(
delayed(_compute_one_fold)(
fold_index=fold_index,
train=train,
test=test,
multi_output_proba=multi_output_proba,
all_classes=all_classes,
classes=classes,
estimator=estimator,
X=X,
y=y,
groups=groups,
scorers=scorers,
verbose=verbose,
fit_params=fit_params,
return_predict=return_predict,
method=method,
no_scoring=no_scoring,
)
for fold_index, (train, test) in enumerate(cv.split(X, y, groups=groups))
)
for result, result_prediction, test_scores_dictionary in temp_results:
all_results.append(result)
prediction_blocks.append(result_prediction)
# No early stopping in that case...
else:
if n_jobs > 1 and verbose:
print("I won't use Parallel since you ask for early stopping")
for fold_index, (train, test) in enumerate(cv.split(X, y, groups=groups)):
if verbose:
print("cv %d started\n" % fold_index)
result, result_prediction, test_scores_dictionary = _compute_one_fold(
fold_index=fold_index,
train=train,
test=test,
multi_output_proba=multi_output_proba,
all_classes=all_classes,
classes=classes,
estimator=estimator,
X=X,
y=y,
groups=groups,
scorers=scorers,
verbose=verbose,
fit_params=fit_params,
return_predict=return_predict,
method=method,
no_scoring=no_scoring,
)
all_results.append(result)
prediction_blocks.append(result_prediction)
### Look if I need to stop ###
if not no_scoring:
stop_cv = False
if stopping_round is not None and fold_index >= stopping_round and stopping_threshold is not None:
if isinstance(scoring, list) and scoring[0] in test_scores_dictionary:
main_score_name = scoring[0]
else:
main_score_name = sorted(test_scores_dictionary.keys())[0]
if max_main_scorer is None:
max_main_scorer = test_scores_dictionary[main_score_name]
else:
max_main_scorer = max(max_main_scorer, test_scores_dictionary[main_score_name])
if max_main_scorer <= stopping_threshold:
# I stop if ALL the scorers are bad
stop_cv = True
if stop_cv:
break
### Merge everything together ###
# Concatenate the predictions
if return_predict:
if stop_cv:
predictions = None
if verbose:
print("I can't return predictions since I stopped the CV")
else:
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i for _, indices_i in prediction_blocks])
if not sklearn.model_selection._validation._check_is_permutation(
test_indices, sklearn.model_selection._validation._num_samples(X)
):
if verbose:
print("I can't return predictions as this CV isn't a partition only works for partitions")
predictions = None
else:
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
def _concat(predictions, inv_test_indices):
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
predictions = predictions[inv_test_indices]
elif hasattr(predictions[0], "iloc"):
predictions = pd.concat(predictions, axis=0)
predictions = predictions.iloc[inv_test_indices, :]
else:
predictions = np.concatenate(predictions)
predictions = predictions[inv_test_indices]
return predictions
if multi_output_proba:
predictions = [_concat([p[d] for p in predictions], inv_test_indices) for d in range(y.shape[1])]
else:
predictions = _concat(predictions, inv_test_indices)
### Result ###
if not no_scoring:
cv_res = pd.DataFrame(all_results)
else:
cv_res = None
if return_predict:
return cv_res, predictions
else:
return cv_res
########################################
#### score prediction for clustering ###
########################################
def score_from_params_clustering(
estimator,
X,
scoring=None,
verbose=1,
fit_params=None,
return_predict=False,
method=None,
no_scoring=False,
**kwargs
):
""" scores a clustering model
Parameters
----------
estimator : estimator object implementing 'fit'
The clusterer object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
scoring : string or list of string for each scores
Can also be a dictionnary of scorers
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict or None
Parameters to pass to the fit method of the estimator.
verbose : integer, optional
The verbosity level.
return_predict: boolean, default:False
if True will return the predictions
method : None or string
the name of the method to use to return predict ('transform','predict'). if None will guess based on type of estimator
no_scoring : boolean, default: False
if True won't score predictions, result will None in that case
**kwargs : keywords arguments to be passed to method call
Returns
-------
cv_res : pd.DataFrame (None if 'no_scoring = True')
prediction (only if return_predict is True)
"""
if not return_predict and no_scoring:
raise ValueError("Nothing will be returned")
estimator_is_clusterer = is_clusterer(estimator)
### Checks ###
if not estimator_is_clusterer:
# This is a transformer
if not return_predict:
raise ValueError("This is a transformer it should only be called with 'return_predict = True'")
if not no_scoring:
raise ValueError("This is a transformer it should only be called with 'no_scoring = True'")
############################
### Argument preparation ###
############################
if isinstance(scoring, str):
scoring = [scoring]
### Scoring ###
if not no_scoring:
scorers = create_scoring(estimator, scoring)
# Here : scorers is a dictionary of scorers objects
### Handle fit params ###
if fit_params is None:
fit_params = {}
#####################################
### Method to use for predictions ###
#####################################
### TODO: method depends on the scoring function
if method is None:
method = "fit_predict"
# method = "transform"
### Clone the estimator ###
cloned_estimator = sklearn.base.clone(estimator)
start_fit = time()
### Fit estimator ###
pred = cloned_estimator.fit_predict(X, **fit_params)
fit_time = time() - start_fit
if return_predict:
predictions = pred
### Score ###
if not no_scoring:
start_score = time()
scores_dictionnary = _score_with_group(cloned_estimator, X, None, None, scorer=scorers, is_multimetric=True)
# Remark : the predict is actually done twice...
# Here : scorers is a dictionary of scorers, hence is_multimetric = True
score_time = time() - start_score
### Put everything into a dictionnary ###
result = OrderedDict()
if not no_scoring:
for k, v in scores_dictionnary.items():
result["test_%s" % k] = v
result["fit_time"] = fit_time
if not no_scoring:
result["score_time"] = score_time
### Result ###
if not no_scoring:
res = pd.DataFrame([result])
else:
res = None
if return_predict:
return res, predictions
else:
return res
# In[]
class IndexTrainTestCv(BaseCrossValidator):
""" cv like object but with only 1 fold.
Use that object if you want to specify exactly which observations are used from training and testing.
This object behaves likes any cross-validation object so you can use it in cross-validation functions.
Parameters
----------
test_index : array like
the index of the testing set
"""
def __init__(self, test_index):
self.test_index = test_index
def get_n_splits(self, X, y=None, groups=None):
return 1
def _iter_test_indices(self, X, y=None, groups=None):
yield self.test_index
class RandomTrainTestCv(BaseCrossValidator):
""" cv like object but with only 1 fold
Use that object if you want to do a random train/test split.
This object behaves likes any cross-validation object so you can use it in cross-validation functions.
Parameters
----------
test_size : float, default = 0.1
the percentage of observations to keep in the test set
random_state : int, default = 123
the random state to use to make the random split
"""
def __init__(self, test_size=0.1, random_state=123):
self.test_size = test_size
self.random_state = random_state
def get_n_splits(self, X, y=None, groups=None):
return 1
def _iter_test_indices(self, X, y=None, groups=None):
n = _num_samples(X)
index = np.arange(n)
train_index, test_index = train_test_split(index, test_size=self.test_size, random_state=self.random_state)
yield test_index
class SpecialGroupCV(BaseCrossValidator):
""" crossvalidator object that compute its folds based on the groups
Parameters
----------
base_cv : cv object
the cv to use ON the groups
groups_columns : None or columns
if groups is a DataFrame or array, the columns corresponding to the split
"""
def __init__(self, base_cv, groups_column=None):
self.base_cv = base_cv
self.groups_column = groups_column
def get_n_splits(self, X=None, y=None, groups=None):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
ugroups = np.sort(np.unique(self._groups_to_use(groups)))
return self.base_cv.get_n_splits(ugroups)
def _groups_to_use(self, groups):
if self.groups_column is None:
if hasattr(groups, "values"):
groups = groups.values
shape = getattr(groups, "shape", None)
if shape is not None and len(shape) > 1:
groups = groups[:, 0] # retrieve first column
else:
groups = groups[self.groups_column]
return groups
def _iter_test_masks(self, X, y, groups=None):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = self._groups_to_use(groups)
ugroups = np.sort(np.unique(groups))
for index_train, index_test in self.base_cv.split(ugroups):
groups_test = np.sort(np.unique(ugroups[index_test])) # groups to keep in test
boolean_test_index = pd.Series(groups).isin(groups_test).values
yield boolean_test_index
| 33.256256
| 146
| 0.604461
|
87cf0393cba6c3c7f1662ba9624bdbd45fe92e0f
| 2,931
|
py
|
Python
|
tests/test_table_creator.py
|
schafrn/s3parq
|
f854819540e069f53fe42938a9bb6b8f800907b5
|
[
"MIT"
] | null | null | null |
tests/test_table_creator.py
|
schafrn/s3parq
|
f854819540e069f53fe42938a9bb6b8f800907b5
|
[
"MIT"
] | null | null | null |
tests/test_table_creator.py
|
schafrn/s3parq
|
f854819540e069f53fe42938a9bb6b8f800907b5
|
[
"MIT"
] | null | null | null |
import pytest
from mock import patch
import s3parq.publish_redshift as rs
from s3parq.session_helper import SessionHelper
class MockScopeObj():
def execute(self, schema_string: str):
pass
def scope_execute_mock(mock_session_helper):
pass
class Test():
# Test that the function is called with the table name
@patch('s3parq.publish_redshift.SessionHelper')
@patch('tests.test_table_creator.scope_execute_mock')
def test_create_table(self, mock_session_helper, mock_execute):
mock_execute.return_value = MockScopeObj()
mock_session_helper.db_session_scope.return_value.__enter__ = scope_execute_mock
table_name = "my_string"
schema_name = "my_schema"
path = "s3://lol"
columns = {'grouped_col': 'object', 'text_col': 'object', 'int_col': 'int64', 'float_col': 'float64'}
partitions = {'fish': 'object'}
expected_sql = f'CREATE EXTERNAL TABLE IF NOT EXISTS {schema_name}.{table_name} {columns} \
PARTITIONED BY {partitions} STORED AS PARQUET \
LOCATION "{path}";'
with mock_session_helper.db_session_scope() as mock_scope:
rs.create_table(table_name, schema_name, columns, partitions, path, mock_session_helper)
assert mock_scope.execute.called_once_with(expected_sql)
# Test that the function is called with the table name without partitions
@patch('s3parq.publish_redshift.SessionHelper')
@patch('tests.test_table_creator.scope_execute_mock')
def test_create_table_without_partitions(self, mock_session_helper, mock_execute):
mock_execute.return_value = MockScopeObj()
mock_session_helper.db_session_scope.return_value.__enter__ = scope_execute_mock
table_name = "my_string"
schema_name = "my_schema"
path = "s3://lol"
columns = {'grouped_col': 'object', 'text_col': 'object', 'int_col': 'int64', 'float_col': 'float64'}
partitions = {}
expected_sql = f'CREATE EXTERNAL TABLE IF NOT EXISTS {schema_name}.{table_name} {columns} \
STORED AS PARQUET \
LOCATION "{path}";'
with mock_session_helper.db_session_scope() as mock_scope:
rs.create_table(table_name, schema_name, columns, partitions, path, mock_session_helper)
assert mock_scope.execute.called_once_with(expected_sql)
#Test to check that the passed in datatype maps correctly
def test_datatype_mapper(self):
columns = {'grouped_col': 'object', 'text_col': 'object', 'int_col': 'int64', 'float_col': 'float64'}
expected = {'grouped_col': 'VARCHAR', 'text_col': 'VARCHAR', 'int_col': 'BIGINT', 'float_col': 'FLOAT'}
sql = ""
for key, val in expected.items():
sql += f'{key} {val}, '
sql = "(" + sql[:-2] + ")"
actual = rs._datatype_mapper(columns)
assert actual == sql
| 42.478261
| 111
| 0.669737
|
6bd396487493814fb8cb42657951c90f99beb143
| 1,479
|
py
|
Python
|
stats/frames.py
|
fc8240/Python-Baseball
|
68c94963006838c22770b3ce69b18a98af68acab
|
[
"MIT"
] | null | null | null |
stats/frames.py
|
fc8240/Python-Baseball
|
68c94963006838c22770b3ce69b18a98af68acab
|
[
"MIT"
] | null | null | null |
stats/frames.py
|
fc8240/Python-Baseball
|
68c94963006838c22770b3ce69b18a98af68acab
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
try:
from data import games
plays_frame = games.query("type == 'play' & event != 'NP'")
plays_frame.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches', 'event', 'game_id', 'year']
info = games.query("type == 'info' & (multi2 == 'visteam' | multi2 == 'hometeam')")
info = info.loc[:, ['year', 'game_id', 'multi2', 'multi3']]
info.columns = ['year', 'game_id', 'team', 'defense']
info.loc[info['team'] == 'visteam', 'team'] = '1'
info.loc[info['team'] == 'hometeam', 'team'] = '0'
info = info.sort_values(['year', 'game_id', 'team']).reset_index(drop=True)
events = plays_frame.query("~(event.str.contains('^\d+') & ~event.str.contains('E'))")
events = events.query("~event.str.contains('^(?:P|C|F|I|O)')")
events = events.drop(['type', 'player', 'count', 'pitches'], axis=1)
events = events.sort_values(['team', 'inning']).reset_index()
replacements = {
r'^(?:S|D|T).*': 'H',
r'^HR.*': 'HR',
r'^W.*': 'BB',
r'.*K.*': 'SO',
r'^HP.*': 'HBP',
r'.*E.*\..*B-.*': 'RO',
r'.*E.*': 'E',
}
event_type = events['event'].replace(replacements, regex=True)
events = events.assign(event_type=event_type)
events = events.groupby(['year', 'game_id', 'team', 'event_type']).size().reset_index(name='count')
except ImportError:
print('It looks as if `data.py` is incomplete.')
| 43.5
| 111
| 0.555781
|
65502fd927e25057221882134dea60be9b1a9b95
| 1,320
|
py
|
Python
|
gcloud/tasktmpl3/migrations/0005_tasktemplate_tmp_field.py
|
gangh/bk-sops
|
29f4b4915be42650c2eeee637e0cf798e4066f09
|
[
"Apache-2.0"
] | 1
|
2019-12-23T07:23:35.000Z
|
2019-12-23T07:23:35.000Z
|
gcloud/tasktmpl3/migrations/0005_tasktemplate_tmp_field.py
|
bk-sops/bk-sops
|
9f5950b13473bf7b5032528b20016b7a571bb3cd
|
[
"Apache-2.0"
] | 9
|
2020-02-12T03:15:49.000Z
|
2021-06-10T22:04:51.000Z
|
gcloud/tasktmpl3/migrations/0005_tasktemplate_tmp_field.py
|
bk-sops/bk-sops
|
9f5950b13473bf7b5032528b20016b7a571bb3cd
|
[
"Apache-2.0"
] | 1
|
2022-01-17T11:32:05.000Z
|
2022-01-17T11:32:05.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0013_old_template_process'),
('tasktmpl3', '0004_auto_20180822_1206'),
]
operations = [
migrations.AddField(
model_name='tasktemplate',
name='tmp_field',
field=models.ForeignKey(related_name='tmp_field_id', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='pipeline.PipelineTemplate', null=True),
),
]
| 38.823529
| 166
| 0.738636
|
2743bc2c43954cd790a7312943facee63d01fc87
| 292
|
py
|
Python
|
config.py
|
Wilscos/recommender-systems-bibliometric-analysis
|
bbba45340bf4d494278784ca1376e443f47d5012
|
[
"MIT"
] | null | null | null |
config.py
|
Wilscos/recommender-systems-bibliometric-analysis
|
bbba45340bf4d494278784ca1376e443f47d5012
|
[
"MIT"
] | null | null | null |
config.py
|
Wilscos/recommender-systems-bibliometric-analysis
|
bbba45340bf4d494278784ca1376e443f47d5012
|
[
"MIT"
] | null | null | null |
import os
import pathlib
MASTER = pathlib.Path().absolute()
DATA_PATH = os.path.join(MASTER, 'data')
PLOT_PATH = os.path.join(MASTER, 'plots')
PDF_PATH = os.path.join(DATA_PATH, 'articles_pdf')
TXT_PATH = os.path.join(DATA_PATH, 'texts')
POS_PATH = os.path.join(DATA_PATH, 'pos_tags.json')
| 26.545455
| 51
| 0.736301
|
6489923fe5c2757dd3b558298b99d07c6fdcdfcf
| 5,093
|
py
|
Python
|
cache_utils/tests.py
|
listingmirror/django-cache-utils
|
086a8a60625023aa744b3f0de90bd8d9198681be
|
[
"BSD-3-Clause"
] | 25
|
2015-04-26T13:44:03.000Z
|
2022-02-14T16:08:35.000Z
|
cache_utils/tests.py
|
listingmirror/django-cache-utils
|
086a8a60625023aa744b3f0de90bd8d9198681be
|
[
"BSD-3-Clause"
] | 20
|
2015-06-04T23:45:06.000Z
|
2022-03-29T23:57:41.000Z
|
cache_utils/tests.py
|
listingmirror/django-cache-utils
|
086a8a60625023aa744b3f0de90bd8d9198681be
|
[
"BSD-3-Clause"
] | 20
|
2015-10-30T02:27:38.000Z
|
2022-02-24T01:40:44.000Z
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from django.core.cache import cache
from cache_utils.decorators import cached
from cache_utils.utils import sanitize_memcached_key, _func_type, _func_info
def foo(a, b):
pass
class Foo(object):
def foo(self, a, b):
pass
@classmethod
def bar(cls, x):
pass
class Store(object):
""" Class for encoding error test """
def __unicode__(self):
return u'Вася'
def __repr__(self):
return u'Вася'.encode('utf8')
class FuncTypeTest(TestCase):
def assertFuncType(self, func, tp):
self.assertEqual(_func_type(func), tp)
def test_func(self):
self.assertFuncType(foo, 'function')
def test_method(self):
self.assertFuncType(Foo.foo, 'method')
def test_classmethod(self):
self.assertFuncType(Foo.bar, 'classmethod')
class FuncInfoTest(TestCase):
def assertFuncInfo(self, func, args_in, name, args_out):
info = _func_info(func, args_in)
self.assertEqual(info[0], name)
self.assertEqual(info[1], args_out)
def test_func(self):
self.assertFuncInfo(foo, [1, 2], 'cache_utils.tests.foo:11', [1, 2])
def test_method(self):
foo_obj = Foo()
self.assertFuncInfo(Foo.foo, [foo_obj, 1, 2], 'cache_utils.tests.Foo.foo:17', [1, 2])
def test_classmethod(self):
self.assertFuncInfo(Foo.bar, [Foo, 1], 'cache_utils.tests.Foo.bar:20', [1])
class SanitizeTest(TestCase):
def test_sanitize_keys(self):
key = u"12345678901234567890123456789012345678901234567890"
self.assertTrue(len(key) >= 40)
key = sanitize_memcached_key(key, 40)
self.assertTrue(len(key) <= 40)
class ClearMemcachedTest(TestCase):
def tearDown(self):
cache._cache.flush_all()
def setUp(self):
cache._cache.flush_all()
class InvalidationTest(ClearMemcachedTest):
def test_group_invalidation(self):
cache.set('vasia', 'foo', 60, group='names')
cache.set('petya', 'bar', 60, group='names')
cache.set('red', 'good', 60, group='colors')
self.assertEqual(cache.get('vasia', group='names'), 'foo')
self.assertEqual(cache.get('petya', group='names'), 'bar')
self.assertEqual(cache.get('red', group='colors'), 'good')
cache.invalidate_group('names')
self.assertEqual(cache.get('petya', group='names'), None)
self.assertEqual(cache.get('vasia', group='names'), None)
self.assertEqual(cache.get('red', group='colors'), 'good')
cache.set('vasia', 'foo', 60, group='names')
self.assertEqual(cache.get('vasia', group='names'), 'foo')
def test_func_invalidation(self):
self.call_count = 0
@cached(60)
def my_func(a, b):
self.call_count += 1
return self.call_count
self.assertEqual(my_func(1, 2), 1)
self.assertEqual(my_func(1, 2), 1)
self.assertEqual(my_func(3, 2), 2)
self.assertEqual(my_func(3, 2), 2)
my_func.invalidate(3, 2)
self.assertEqual(my_func(1, 2), 1)
self.assertEqual(my_func(3, 2), 3)
self.assertEqual(my_func(3, 2), 3)
def test_method_invalidation(self):
self.call_count = 0
this = self
class Foo(object):
@cached(60)
def bar(self, x):
this.call_count += 1
return this.call_count
foo = Foo()
self.assertEqual(foo.bar(1), 1)
self.assertEqual(foo.bar(1), 1)
Foo.bar.invalidate(1)
self.assertEqual(foo.bar(1), 2)
def test_invalidate_nonexisting(self):
@cached(60)
def foo(x):
return 1
foo.invalidate(5) # this shouldn't raise exception
class DecoratorTest(ClearMemcachedTest):
def test_decorator(self):
self._x = 0
@cached(60, group='test-group')
def my_func(params=""):
self._x = self._x + 1
return u"%d%s" % (self._x, params)
self.assertEqual(my_func(), "1")
self.assertEqual(my_func(), "1")
self.assertEqual(my_func("x"), u"2x")
self.assertEqual(my_func("x"), u"2x")
self.assertEqual(my_func(u"Василий"), u"3Василий")
self.assertEqual(my_func(u"Василий"), u"3Василий")
self.assertEqual(my_func(u"й"*240), u"4"+u"й"*240)
self.assertEqual(my_func(u"й"*240), u"4"+u"й"*240)
self.assertEqual(my_func(u"Ы"*500), u"5"+u"Ы"*500)
self.assertEqual(my_func(u"Ы"*500), u"5"+u"Ы"*500)
def test_key_override(self):
"""
Test the cache key naming.
"""
@cached(60*5, key='foo')
def foo():
return 'test'
key = foo.get_cache_key()
self.assertEqual(key, '[cached]foo()')
# Now test with args and kwargs argo
@cached(60*5, key='func_with_args')
def bar(i, foo='bar'):
return i * 5
key = bar.get_cache_key(2, foo='hello')
self.assertEqual(key, "[cached]func_with_args((2,){'foo':'hello'})")
| 26.94709
| 93
| 0.597683
|
873eb6de4b696f1240de38efcf7c40168581d257
| 1,780
|
py
|
Python
|
menpo/shape/base.py
|
apapaion/menpo
|
9834f0437ca3cbe6a972c2a62f7c970ae950cf32
|
[
"BSD-3-Clause"
] | 311
|
2015-01-01T17:16:18.000Z
|
2021-12-20T11:25:23.000Z
|
menpo/shape/base.py
|
apapaion/menpo
|
9834f0437ca3cbe6a972c2a62f7c970ae950cf32
|
[
"BSD-3-Clause"
] | 298
|
2015-01-02T17:30:22.000Z
|
2022-01-02T22:12:17.000Z
|
menpo/shape/base.py
|
apapaion/menpo
|
9834f0437ca3cbe6a972c2a62f7c970ae950cf32
|
[
"BSD-3-Clause"
] | 80
|
2015-02-02T14:17:36.000Z
|
2021-12-22T10:09:28.000Z
|
from menpo.base import Vectorizable
from menpo.landmark import Landmarkable
from menpo.transform.base import Transformable
from menpo.visualize import LandmarkableViewable, Viewable
class Shape(Vectorizable, Transformable, Landmarkable, LandmarkableViewable, Viewable):
"""
Abstract representation of shape. Shapes are :map:`Transformable`,
:map:`Vectorizable`, :map:`Landmarkable`, :map:`LandmarkableViewable` and
:map:`Viewable`. This base class handles transforming landmarks when the
shape is transformed. Therefore, implementations of :map:`Shape` have to
implement the abstract :meth:`_transform_self_inplace` method that handles
transforming the :map:`Shape` itself.
"""
def _transform_inplace(self, transform):
"""
Transform the landmarks and the shape itself.
Parameters
----------
transform : `function`
A function to transform the spatial data with.
Returns
-------
self : `type(self)`
A pointer to `self` (the result of :meth:`_transform_self_inplace`).
"""
if self.has_landmarks:
self.landmarks._transform_inplace(transform)
return self._transform_self_inplace(transform)
def _transform_self_inplace(self, transform):
"""
Implement this method to transform the concrete implementation of a
shape. This is then called by the Shape's :meth:`_transform_inplace`
method, which will have updated the landmarks beforehand.
Parameters
----------
transform : `function`
A function to transform the spatial data with.
Returns
-------
self : `type(self)`
A pointer to `self`.
"""
pass
| 34.230769
| 87
| 0.653933
|
21cdbfdd2f46ccb6d24b0dd201d61edff9807ad9
| 8,974
|
py
|
Python
|
openmdao/core/tests/test_system.py
|
fzahle/OpenMDAO
|
ce53b0a0862ac1162d5daad7b0ca34ae085ee47c
|
[
"Apache-2.0"
] | 1
|
2016-05-10T17:01:17.000Z
|
2016-05-10T17:01:17.000Z
|
openmdao/core/tests/test_system.py
|
gsoxley/OpenMDAO
|
709401e535cf6933215abd942d4b4d49dbf61b2b
|
[
"Apache-2.0"
] | 3
|
2016-05-10T16:55:46.000Z
|
2018-10-22T23:28:52.000Z
|
openmdao/core/tests/test_system.py
|
gsoxley/OpenMDAO
|
709401e535cf6933215abd942d4b4d49dbf61b2b
|
[
"Apache-2.0"
] | 2
|
2018-04-05T15:53:54.000Z
|
2018-10-22T22:48:00.000Z
|
""" Unit tests for the system interface."""
import unittest
from six import assertRaisesRegex
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, ExecComp
from openmdao.utils.assert_utils import assert_rel_error, assert_warning
class TestSystem(unittest.TestCase):
def test_vector_context_managers(self):
g1 = Group()
g1.add_subsystem('Indep', IndepVarComp('a', 5.0), promotes=['a'])
g2 = g1.add_subsystem('G2', Group(), promotes=['*'])
g2.add_subsystem('C1', ExecComp('b=2*a'), promotes=['a', 'b'])
model = Group()
model.add_subsystem('G1', g1, promotes=['b'])
model.add_subsystem('Sink', ExecComp('c=2*b'), promotes=['b'])
p = Problem(model=model)
p.set_solver_print(level=0)
# Test pre-setup errors
with self.assertRaises(Exception) as cm:
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(str(cm.exception),
"Cannot get vectors because setup has not yet been called.")
with self.assertRaises(Exception) as cm:
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('vec')
self.assertEqual(str(cm.exception),
"Cannot get vectors because setup has not yet been called.")
p.setup()
p.run_model()
# Test inputs with original values
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(inputs['G1.G2.C1.a'], 5.)
inputs, outputs, residuals = g1.get_nonlinear_vectors()
self.assertEqual(inputs['G2.C1.a'], 5.)
# Test inputs after setting a new value
inputs, outputs, residuals = g2.get_nonlinear_vectors()
inputs['C1.a'] = -1.
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(inputs['G1.G2.C1.a'], -1.)
inputs, outputs, residuals = g1.get_nonlinear_vectors()
self.assertEqual(inputs['G2.C1.a'], -1.)
# Test outputs with original values
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(outputs['G1.G2.C1.b'], 10.)
inputs, outputs, residuals = g2.get_nonlinear_vectors()
# Test outputs after setting a new value
inputs, outputs, residuals = model.get_nonlinear_vectors()
outputs['G1.G2.C1.b'] = 123.
self.assertEqual(outputs['G1.G2.C1.b'], 123.)
inputs, outputs, residuals = g2.get_nonlinear_vectors()
outputs['C1.b'] = 789.
self.assertEqual(outputs['C1.b'], 789.)
# Test residuals
inputs, outputs, residuals = model.get_nonlinear_vectors()
residuals['G1.G2.C1.b'] = 99.0
self.assertEqual(residuals['G1.G2.C1.b'], 99.0)
# Test linear
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('linear')
d_outputs['G1.G2.C1.b'] = 10.
self.assertEqual(d_outputs['G1.G2.C1.b'], 10.)
# Test linear with invalid vec_name
with self.assertRaises(Exception) as cm:
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('bad_name')
self.assertEqual(str(cm.exception),
"There is no linear vector named %s" % 'bad_name')
def test_set_checks_shape(self):
indep = IndepVarComp()
indep.add_output('a')
indep.add_output('x', shape=(5, 1))
g1 = Group()
g1.add_subsystem('Indep', indep, promotes=['a', 'x'])
g2 = g1.add_subsystem('G2', Group(), promotes=['*'])
g2.add_subsystem('C1', ExecComp('b=2*a'), promotes=['a', 'b'])
g2.add_subsystem('C2', ExecComp('y=2*x',
x=np.zeros((5, 1)),
y=np.zeros((5, 1))),
promotes=['x', 'y'])
model = Group()
model.add_subsystem('G1', g1, promotes=['b', 'y'])
model.add_subsystem('Sink', ExecComp(('c=2*b', 'z=2*y'),
y=np.zeros((5, 1)),
z=np.zeros((5, 1))),
promotes=['b', 'y'])
p = Problem(model=model)
p.setup()
p.set_solver_print(level=0)
p.run_model()
msg = "Incompatible shape for '.*': Expected (.*) but got (.*)"
num_val = -10
arr_val = -10*np.ones((5, 1))
bad_val = -10*np.ones((10))
inputs, outputs, residuals = g2.get_nonlinear_vectors()
#
# set input
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
inputs['C1.a'] = arr_val
# assign scalar to array
inputs['C2.x'] = num_val
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign array to array
inputs['C2.x'] = arr_val
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
inputs['C2.x'] = bad_val
# assign list to array
inputs['C2.x'] = arr_val.tolist()
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
inputs['C2.x'] = bad_val.tolist()
#
# set output
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
outputs['C1.b'] = arr_val
# assign scalar to array
outputs['C2.y'] = num_val
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign array to array
outputs['C2.y'] = arr_val
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
outputs['C2.y'] = bad_val
# assign list to array
outputs['C2.y'] = arr_val.tolist()
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
outputs['C2.y'] = bad_val.tolist()
#
# set residual
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
residuals['C1.b'] = arr_val
# assign scalar to array
residuals['C2.y'] = num_val
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign array to array
residuals['C2.y'] = arr_val
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
residuals['C2.y'] = bad_val
# assign list to array
residuals['C2.y'] = arr_val.tolist()
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
residuals['C2.y'] = bad_val.tolist()
def test_deprecated_solver_names(self):
class DummySolver():
pass
model = Group()
# check nl_solver setter & getter
msg = "The 'nl_solver' attribute provides backwards compatibility " \
"with OpenMDAO 1.x ; use 'nonlinear_solver' instead."
with assert_warning(DeprecationWarning, msg):
model.nl_solver = DummySolver()
with assert_warning(DeprecationWarning, msg):
solver = model.nl_solver
self.assertTrue(isinstance(solver, DummySolver))
# check ln_solver setter & getter
msg = "The 'ln_solver' attribute provides backwards compatibility " \
"with OpenMDAO 1.x ; use 'linear_solver' instead."
with assert_warning(DeprecationWarning, msg):
model.ln_solver = DummySolver()
with assert_warning(DeprecationWarning, msg):
solver = model.ln_solver
self.assertTrue(isinstance(solver, DummySolver))
def test_deprecated_metadata(self):
from openmdao.api import Problem, IndepVarComp
from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp
prob = Problem()
prob.model.add_subsystem('inputs', IndepVarComp('x', shape=3))
prob.model.add_subsystem('double', VectorDoublingComp())
msg = "The 'metadata' attribute provides backwards compatibility " \
"with earlier version of OpenMDAO; use 'options' instead."
with assert_warning(DeprecationWarning, msg):
prob.model.double.metadata['size'] = 3
prob.model.connect('inputs.x', 'double.x')
prob.setup()
prob['inputs.x'] = [1., 2., 3.]
prob.run_model()
assert_rel_error(self, prob['double.y'], [2., 4., 6.])
if __name__ == "__main__":
unittest.main()
| 34.121673
| 92
| 0.585358
|
fb456b19d9a00a7fd5e0d692313892c0fd834462
| 27,188
|
py
|
Python
|
nnunet/network_architecture/generic_BTS_Netv5_5.py
|
zjdcts/H-FC
|
60a00322d77ae07519174a3eb0b02270aa8578c1
|
[
"Apache-2.0"
] | 2
|
2022-03-17T07:12:58.000Z
|
2022-03-17T07:13:02.000Z
|
nnunet/network_architecture/generic_BTS_Netv5_5.py
|
zjdcts/H-FC
|
60a00322d77ae07519174a3eb0b02270aa8578c1
|
[
"Apache-2.0"
] | null | null | null |
nnunet/network_architecture/generic_BTS_Netv5_5.py
|
zjdcts/H-FC
|
60a00322d77ae07519174a3eb0b02270aa8578c1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
import torch
import numpy as np
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
import torch.nn.functional
class ConvDropoutNormNonlin(nn.Module):
"""
fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.
"""
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = self.nonlin(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
class ConvDropoutNonlinNorm(ConvDropoutNormNonlin):
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.instnorm(self.lrelu(x))
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):
'''
stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers
:param input_feature_channels:
:param output_feature_channels:
:param num_convs:
:param dilation:
:param kernel_size:
:param padding:
:param dropout:
:param initial_stride:
:param conv_op:
:param norm_op:
:param dropout_op:
:param inplace:
:param neg_slope:
:param norm_affine:
:param conv_bias:
'''
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([basic_block(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[basic_block(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def print_module_training_status(module):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \
isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \
or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,
nn.BatchNorm1d):
print(str(module), module.training)
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):
super(Upsample, self).__init__()
self.align_corners = align_corners
self.mode = mode
self.scale_factor = scale_factor
self.size = size
def forward(self, x):
return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True,
bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm3d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == 'avg':
avg_pool = nn.functional.avg_pool3d(x, (x.size(2), x.size(3), x.size(4)),
stride=(x.size(2), x.size(3), x.size(4)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = nn.functional.max_pool3d(x, (x.size(2), x.size(3), x.size(4)),
stride=(x.size(2), x.size(3), x.size(4)))
channel_att_raw = self.mlp(max_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).unsqueeze(4).expand_as(x)
return x * scale
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size - 1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, out_dim, drop=0.1):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, out_dim),
nn.ReLU(inplace=True),
nn.Dropout(drop),
)
def forward(self, x):
return self.net(x)
class SpAtt(nn.Module):
def __init__(self, in_planes, num_layers, region_size=2, reduction=2):
super(SpAtt, self).__init__()
self.conv1 = BasicConv(in_planes, in_planes // reduction, 3, padding=1)
self.fc = nn.ModuleList()
self.sigmoid = nn.Sigmoid()
self.rs = region_size
self.num_layers = num_layers
for i in range(num_layers):
self.fc.append(FeedForward(region_size ** 3, 64, region_size ** 3))
# self.fc.append(FeedForward(region_size ** 3, 64, region_size ** 3))
self.conv2 = BasicConv(in_planes // reduction, 1, 3, padding=1)
def forward(self, x):
residual = x
out = None
x = self.conv1(x)
for i in range(self.num_layers):
x = x.unfold(4, self.rs, self.rs).unfold(3, self.rs, self.rs).unfold(2, self.rs, self.rs).contiguous()
b, c, h, w, d, _, _, _ = x.size()
x = self.fc[i](x.view(b, c, h, w, d, -1))
x = x.view(b, c, h, w, d, self.rs, self.rs, self.rs).permute(0, 1, 2, 5, 3, 6, 4, 7).contiguous(). \
view(b, c, h * self.rs, w * self.rs, d * self.rs)
if out is None:
out = nn.functional.interpolate(x, scale_factor=2 ** i, mode='trilinear', align_corners=False)
else:
out += nn.functional.interpolate(x, scale_factor=2 ** i, mode='trilinear', align_corners=False)
x = nn.functional.interpolate(x, scale_factor=0.5, mode='trilinear', align_corners=False)
# b, c, h, w, d = x.size()
# x = self.fc[-1](x.view(b, c, -1))
# x = x.view(b, c, h, w, d)
# if out is None:
# out = nn.functional.interpolate(x, scale_factor=2 ** self.num_layers, mode='trilinear', align_corners=False)
# else:
# out += nn.functional.interpolate(x, scale_factor=2 ** self.num_layers, mode='trilinear',
# align_corners=False)
return residual * self.sigmoid(self.conv2(out))
class CBAM(nn.Module):
def __init__(self, gate_channels, num_layers, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
super(CBAM, self).__init__()
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.no_spatial = no_spatial
if not no_spatial:
# self.SpatialGate = SpatialGate()
self.SpatialGate = SpAtt(gate_channels, num_layers)
def forward(self, x):
x_out = self.ChannelGate(x)
if not self.no_spatial:
x_out = self.SpatialGate(x_out)
return x_out
class Generic_BTS_Netv5_5(SegmentationNetwork):
DEFAULT_BATCH_SIZE_3D = 2
DEFAULT_PATCH_SIZE_3D = (64, 192, 160)
SPACING_FACTOR_BETWEEN_STAGES = 2
BASE_NUM_FEATURES_3D = 30
MAX_NUMPOOL_3D = 999
MAX_NUM_FILTERS_3D = 320
DEFAULT_PATCH_SIZE_2D = (256, 256)
BASE_NUM_FEATURES_2D = 30
DEFAULT_BATCH_SIZE_2D = 50
MAX_NUMPOOL_2D = 999
MAX_FILTERS_2D = 480
use_this_for_batch_size_computation_2D = 19739648
use_this_for_batch_size_computation_3D = 520000000 # 505789440
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
conv_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,
max_num_features=None, basic_block=ConvDropoutNormNonlin,
seg_output_use_bias=False):
"""
basically more flexible than v1, architecture is the same
Does this look complicated? Nah bro. Functionality > usability
This does everything you need, including world peace.
Questions? -> f.isensee@dkfz.de
"""
super(Generic_BTS_Netv5_5, self).__init__()
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
self.num_classes = num_classes
self.final_nonlin = final_nonlin
self._deep_supervision = deep_supervision
self.do_ds = deep_supervision
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3)] * (num_pool + 1)
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_pad_sizes = []
for krnl in self.conv_kernel_sizes:
self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])
if max_num_features is None:
if self.conv_op == nn.Conv3d:
self.max_num_features = self.MAX_NUM_FILTERS_3D
else:
self.max_num_features = self.MAX_FILTERS_2D
else:
self.max_num_features = max_num_features
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
self.cbam = nn.ModuleList()
output_features = base_num_features
input_features = input_channels
for d in range(num_pool):
# determine the first stride
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d - 1]
else:
first_stride = None
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
# add convolutions
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride, basic_block=basic_block))
self.cbam.append(CBAM(output_features, max(1, num_pool - d - 1)))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
output_features = min(output_features, self.max_num_features)
# now the bottleneck.
# determine the first stride
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
# the output of the last conv must match the number of features from the skip connection if we are not using
# convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be
# done by the transposed conv
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride, basic_block=basic_block),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, basic_block=basic_block)))
self.cbam.append(CBAM(final_num_features, 1))
# if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
# now lets build the localization pathway
for u in range(num_pool):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[
-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_pool - 1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],
pool_op_kernel_sizes[-(u + 1)], bias=False))
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]
self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,
self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs, basic_block=basic_block)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,
1, 1, 0, 1, 1, seg_output_use_bias))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_pool - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),
mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(
self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
if self.weightInitializer is not None:
self.apply(self.weightInitializer)
# self.apply(print_module_training_status)
def forward(self, x):
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
x = self.cbam[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
x = self.cbam[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))
if self._deep_supervision and self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in
zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
@staticmethod
def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,
conv_per_stage=2):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param deep_supervision:
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:param num_modalities:
:param num_classes:
:param pool_op_kernel_sizes:
:return:
"""
if not isinstance(num_pool_per_axis, np.ndarray):
num_pool_per_axis = np.array(num_pool_per_axis)
npool = len(pool_op_kernel_sizes)
map_size = np.array(patch_size)
tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +
num_modalities * np.prod(map_size, dtype=np.int64) +
num_classes * np.prod(map_size, dtype=np.int64))
num_feat = base_num_features
for p in range(npool):
for pi in range(len(num_pool_per_axis)):
map_size[pi] /= pool_op_kernel_sizes[p][pi]
num_feat = min(num_feat * 2, max_num_features)
num_blocks = (conv_per_stage * 2 + 1) if p < (
npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv
tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat
if deep_supervision and p < (npool - 2):
tmp += np.prod(map_size, dtype=np.int64) * num_classes
# print(p, map_size, num_feat, tmp)
return tmp
| 45.013245
| 147
| 0.617405
|
53bb5936b5c59c77bfea56da9dd3efe0cee4c117
| 489
|
py
|
Python
|
official/vision/keypoints/models/__init__.py
|
ChenXinhao/Models
|
91fa3b9710caefaeeb0d95cab003b58c2c8ba043
|
[
"Apache-2.0"
] | null | null | null |
official/vision/keypoints/models/__init__.py
|
ChenXinhao/Models
|
91fa3b9710caefaeeb0d95cab003b58c2c8ba043
|
[
"Apache-2.0"
] | null | null | null |
official/vision/keypoints/models/__init__.py
|
ChenXinhao/Models
|
91fa3b9710caefaeeb0d95cab003b58c2c8ba043
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from .simplebaseline import (
simplebaseline_res50,
simplebaseline_res101,
simplebaseline_res152,
)
| 34.928571
| 88
| 0.748466
|
6a40e198b75e479a57dd99f5d3a651eb34f4db37
| 36,553
|
py
|
Python
|
TrainingExtensions/tensorflow/test/python/test_qc_quantize_op.py
|
styler00dollar/Colab-docker-aimet
|
9e70d1e3729be8e890bc58e1f5af30a706a15e96
|
[
"BSD-3-Clause"
] | 945
|
2020-04-30T02:23:55.000Z
|
2022-03-31T08:44:32.000Z
|
TrainingExtensions/tensorflow/test/python/test_qc_quantize_op.py
|
seaun163/aimet
|
de94e5522e0c9250fb422d064b77ef9ecc70f239
|
[
"BSD-3-Clause"
] | 563
|
2020-05-01T03:07:22.000Z
|
2022-03-30T05:35:58.000Z
|
TrainingExtensions/tensorflow/test/python/test_qc_quantize_op.py
|
seaun163/aimet
|
de94e5522e0c9250fb422d064b77ef9ecc70f239
|
[
"BSD-3-Clause"
] | 186
|
2020-04-30T00:55:26.000Z
|
2022-03-30T09:54:51.000Z
|
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import pytest
import unittest
import numpy as np
import os
import tensorflow as tf
from aimet_tensorflow.utils.constants import QuantizeOpIndices
import libpymo
from aimet_tensorflow import quantsim_straight_through_grad
tf.compat.v1.logging.set_verbosity(tf.logging.WARN)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class TestTrainingExtensionsQcQuantizeOp(unittest.TestCase):
def test_qc_quantize_op_cpu(self):
"""
test custom op with CPU
"""
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.updateStats),
trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertTrue(np.allclose(out_data, inp_data))
# compute encodings
self.assertFalse(tensor_quantizer.isEncodingValid)
encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding, False, False)
self.assertTrue(tensor_quantizer.isEncodingValid)
print('min=', encoding.min, ', max=', encoding.max)
# get the output
inp_data = np.random.rand(10) * 2
print(inp_data)
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertFalse(np.allclose(out_data, inp_data))
sess.close()
def test_qc_quantize_op_oneshot_cpu(self):
"""
test custom op with CPU
"""
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = False
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
trainable=False, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10) * 256
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
self.assertTrue(tensor_quantizer.isEncodingValid)
encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding, False, False)
print('min=', encoding.min, ', max=', encoding.max)
# compare qc_quantize op's output with input
self.assertFalse(np.allclose(out_data, inp_data))
sess.close()
@pytest.mark.cuda
def test_qc_quantize_op_gpu(self):
"""
test custom op with GPU
"""
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = False
with graph.as_default():
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.updateStats),
trainable=False, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])
# place holder for the input
with tf.device("/device:GPU:0"):
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10)
# get the output
print(inp_data)
with tf.device("/device:GPU:0"):
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertTrue(np.allclose(out_data, inp_data))
# compute encodings
self.assertFalse(tensor_quantizer.isEncodingValid)
encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding, False, False)
self.assertTrue(tensor_quantizer.isEncodingValid)
print('min=', encoding.min, ', max=', encoding.max)
# get the output
inp_data = np.random.rand(10) * 2
print(inp_data)
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
with tf.device("/device:GPU:0"):
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertFalse(np.allclose(out_data, inp_data))
sess.close()
def test_qc_quantize_static_op_cpu(self):
"""
test custom static op with CPU
"""
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,
encoding_min=-1.0,
encoding_max=1.0,
bitwidth=8,
quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
op_mode=libpymo.TensorQuantizerOpMode.passThrough,
is_symmetric=False)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10).astype(np.float32)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertTrue(np.allclose(out_data, inp_data, atol=1e-6))
sess.close()
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,
encoding_min=-1.0,
encoding_max=0.5,
bitwidth=8,
quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
op_mode=libpymo.TensorQuantizerOpMode.quantizeDequantize,
is_symmetric=False)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10).astype(np.float32)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertFalse(np.allclose(out_data, inp_data, atol=1e-1))
sess.close()
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,
encoding_min=-1.0,
encoding_max=1.0,
bitwidth=8,
quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
op_mode=libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize,
is_symmetric=False)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10).astype(np.float32)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertFalse(np.allclose(out_data, inp_data, atol=1e-3))
sess.close()
@pytest.mark.cuda
def test_qc_quantize_static_op_gpu(self):
"""
test custom static op with GPU
"""
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
with graph.as_default():
# place holder for the input
with tf.device("/device:GPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,
encoding_min=-1.0,
encoding_max=1.0,
bitwidth=8,
quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
op_mode=libpymo.TensorQuantizerOpMode.passThrough,
is_symmetric=False)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10).astype(np.float32)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertTrue(np.allclose(out_data, inp_data, atol=1e-6))
sess.close()
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
with graph.as_default():
# place holder for the input
with tf.device("/device:GPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,
encoding_min=-1.0,
encoding_max=0.5,
bitwidth=8,
quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
op_mode=libpymo.TensorQuantizerOpMode.quantizeDequantize,
is_symmetric=False)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10).astype(np.float32)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertFalse(np.allclose(out_data, inp_data, atol=1e-1))
sess.close()
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
with graph.as_default():
# place holder for the input
with tf.device("/device:GPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
pass_through_op_output = zero_out_module.qc_quantize_static(name='quant_op', in_tensor=inp,
encoding_min=-1.0,
encoding_max=1.0,
bitwidth=8,
quant_scheme=libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
op_mode=libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize,
is_symmetric=False)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10).astype(np.float32)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
self.assertFalse(np.allclose(out_data, inp_data, atol=1e-3))
sess.close()
def test_qc_quantize_op_straight_through_gradient_computation(self):
"""
test to validate tensorflow quantize op straight through estimator gradient computation
"""
from aimet_tensorflow import quantsim_straight_through_grad
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
with graph.as_default():
inp = tf.compat.v1.placeholder(tf.float32, shape=[2, 2], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
trainable=False, dtype=tf.int32)
# fix min max and bitwidth to be used
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=5.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=8, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])
# use default gradient
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding)
# pass_through_op = graph.get_operation_by_name('quant_op')
inp_tensor = sess.graph.get_tensor_by_name('input:0')
# set the encodings
tensor_quantizer.isEncodingValid = True
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
# compute default gradient
grads = tf.gradients(pass_through_op_output, [inp_tensor])
dlossbydx = grads
# send input, note the last value sent here is > 5.0 ,
# we set encodings earlier to be min = 0.0 , max = 5.0
# input has data > p
inp_data = [[1.4581, 0.4829], [0.3125, 5.6150]]
# check the gradient returned is a gated version, in this case should be [[1.0, 1.0],[1.0, 0.0]]
with graph.as_default():
input_gradient = sess.run([dlossbydx], feed_dict={inp_tensor: inp_data})[0]
# validate valid clamping in gradient computation
self.assertTrue(input_gradient[0][0][0] == 1.0)
self.assertTrue(input_gradient[0][0][1] == 1.0)
self.assertTrue(input_gradient[0][1][0] == 1.0)
self.assertTrue(input_gradient[0][1][1] == 0.0)
# pass input in correct range
inp_data = [[1.4581, 0.4829], [0.3125, 1.6150]]
# check the gradient returned is a gated version, in this case should be [[1.0, 1.0],[1.0, 0.0]]
with graph.as_default():
input_gradient = sess.run([dlossbydx], feed_dict={inp_tensor: inp_data})[0]
# validate no clamping case in gradient computation
self.assertTrue(input_gradient[0][0][0] == 1.0)
self.assertTrue(input_gradient[0][0][1] == 1.0)
self.assertTrue(input_gradient[0][1][0] == 1.0)
self.assertTrue(input_gradient[0][1][1] == 1.0)
# pass input with data < n , first value here is -0.5
inp_data = [[-0.5, 0.4829], [0.3125, 1.6150]]
# check the gradient returned is a gated version, in this case should be [[1.0, 1.0],[1.0, 0.0]]
with graph.as_default():
input_gradient = sess.run([dlossbydx], feed_dict={inp_tensor: inp_data})[0]
# validate valid clamping case in gradient computation
self.assertTrue(input_gradient[0][0][0] == 0.0)
self.assertTrue(input_gradient[0][0][1] == 1.0)
self.assertTrue(input_gradient[0][1][0] == 1.0)
self.assertTrue(input_gradient[0][1][1] == 1.0)
def test_qc_quantize_recurrent_param_op(self):
"""
test custom recurrent param quantize op with CPU
"""
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
time_step_tensor = tf.constant(1, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=-0.5, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.5, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])
pass_through_op_output = zero_out_module.qc_quantize_recurrent_param(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding,
time_steps=time_step_tensor)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
# inp_data = np.random.rand(10).astype(np.float32)
np.random.seed(18)
inp_data = np.random.randint(low=-1, high=2, size=10).astype(np.float32)
# get the output
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
# compare qc_quantize op's output with input
# encodings being set to -0.5 and 0.5 should not have a bearing on this quantized output
# we should not observe truncation if op's encoding min/max input values are used instead of cached values
self.assertTrue(np.allclose(out_data, inp_data, atol=1e-6))
sess.close()
def test_qc_quantize_op_gradient_computation(self):
"""
test to validate tensorflow custom gradient computation
against golden test data (in this case : an equivalent Pytorch test with auto grad)
"""
zero_out_module = tf.load_op_library('libaimet_tf_ops.so')
graph = tf.Graph()
config = tf.ConfigProto(log_device_placement=False)
sess = tf.Session(graph=graph, config=config)
with graph.as_default():
# place holder for the input
with tf.device("/device:CPU:0"):
inp = tf.placeholder(tf.float32, shape=[2, 2], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED,
libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
trainable=False, dtype=tf.int32)
# fix min max and bitwidth to be used
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=5.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=8, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,
encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])
with graph.gradient_override_map(
{"QcQuantize": "QcQuantizeRangeLearningCustomGradient"}):
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp,
op_mode=mode_var,
tensor_quantizer_reference=tensor_quant_ref,
encoding_min=encoding_min,
encoding_max=encoding_max,
bit_width=bit_width,
use_symmetric_encoding=use_symmetric_encoding)
pass_through_op = graph.get_operation_by_name('quant_op')
inp_tensor = sess.graph.get_tensor_by_name('input:0')
# fixed input data used
inp_data = [[0.4581, 0.4829], [0.3125, 0.6150]]
# get the output data @todo match these
tensor_quantizer.isEncodingValid = True
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
# for matching with golden output, truncate to 4
tf_output_data = np.around(sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data}), 4)
exp_output = [[0.4510, 0.4902], [0.3137, 0.6078]]
# dummy loss function to match with Pytorch
def custom_loss(y_actual, y_pred):
return tf.reduce_sum(tf.subtract(y_pred, y_actual-y_actual))
with graph.as_default():
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
labels_placeholder = tf.placeholder(tf.float32, [2, 2], name='labels')
# output tensor
logits = sess.graph.get_tensor_by_name('quant_op:0')
# dummy loss function is set to sum(output)
current_loss = custom_loss(labels_placeholder, logits)
labels = np.ones((2), dtype=int) # np.random.randint(2, size=batches)
one_hot_labels = np.eye(2)[labels]
update_ops = []
global_step = tf.train.create_global_step()
# Stochastic GD in tf with momentum param
optimizer = tf.train.MomentumOptimizer(learning_rate=0.05, momentum=0.5)
gradients = optimizer.compute_gradients(current_loss, var_list)
grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)
init_global = tf.global_variables_initializer()
init_local = tf.local_variables_initializer()
init = tf.group(init_global, init_local)
sess.run(init)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(current_loss, name='train_op')
# enable this to check current loss value used
_ = sess.run(current_loss, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})
# start training
_ = sess.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})
tf_enc_min_after_train = sess.run(pass_through_op.inputs[QuantizeOpIndices.encoding_min])
tf_enc_max_after_train = sess.run(pass_through_op.inputs[QuantizeOpIndices.encoding_max])
# match outputs
self.assertTrue(np.allclose(exp_output, tf_output_data))
# compare min and max after update with expected values (Pytorch values)
expected_enc_min_after_train = -5.7160621508955956e-05
expected_enc_max_after_train = 5.000057220458984
self.assertAlmostEqual(tf_enc_min_after_train, expected_enc_min_after_train, 6)
self.assertAlmostEqual(tf_enc_max_after_train, expected_enc_max_after_train, 6)
sess.close()
| 53.05225
| 140
| 0.572046
|
1f2c8c9110006e29e721717eeefa09ee02643c3e
| 548
|
py
|
Python
|
models/BOS_SVM.py
|
probeu/RDS
|
1c6cb75a000a5d95ef61a4e26b45de6b587a011d
|
[
"MIT"
] | 10
|
2020-06-12T12:29:29.000Z
|
2020-09-05T06:28:34.000Z
|
models/BOS_SVM.py
|
ReML-AI/RDS
|
1c6cb75a000a5d95ef61a4e26b45de6b587a011d
|
[
"MIT"
] | 1
|
2020-06-22T12:08:52.000Z
|
2020-06-22T12:08:52.000Z
|
models/BOS_SVM.py
|
probeu/RDS
|
1c6cb75a000a5d95ef61a4e26b45de6b587a011d
|
[
"MIT"
] | 3
|
2020-06-15T17:21:16.000Z
|
2020-08-27T02:22:54.000Z
|
import numpy as np
from sklearn.svm import SVR
class BOS_SVM:
def __init__(self):
pass
def run(self, state, action):
data_x, data_y = state
train_x, train_y, test_x, test_y = (
data_x[action == 1],
data_y[action == 1].ravel(),
data_x[action == 0],
data_y[action == 0].ravel(),
)
np.random.seed(123)
model = SVR(kernel='linear')
model.fit(train_x, train_y)
return np.expand_dims(model.predict(test_x), axis=1)
| 26.095238
| 60
| 0.536496
|
27d6b61321c60319bee66bfe49f4ebca87f8a1eb
| 1,450
|
py
|
Python
|
data/settings.py
|
lothesven/Help-MacGyver-to-escape
|
4df57a172fb1eb0e32af24bbfc177064d91ff69b
|
[
"Unlicense"
] | null | null | null |
data/settings.py
|
lothesven/Help-MacGyver-to-escape
|
4df57a172fb1eb0e32af24bbfc177064d91ff69b
|
[
"Unlicense"
] | 1
|
2019-07-12T16:38:20.000Z
|
2019-07-12T16:38:20.000Z
|
data/settings.py
|
lothesven/Help-MacGyver-to-escape
|
4df57a172fb1eb0e32af24bbfc177064d91ff69b
|
[
"Unlicense"
] | null | null | null |
"""Contains all settings constants such as:\n
colors, fonts, sounds, images, positions, structures and sizes"""
import os.path
from pygame import font
DIRECTORY = os.path.abspath(os.path.dirname(__file__))
HEIGHT, WIDTH = 600, 600
MARGIN = 40
FOOTLOGS = 80
TILESIZE = 40
MENU = os.path.join(DIRECTORY, "images", "macgyver_t.jpg")
WIN = os.path.join(DIRECTORY, "images", "macgyver_victory.jpg")
LOSE = os.path.join(DIRECTORY, "images", "macgyver_failure.jpg")
MACGYVER = os.path.join(DIRECTORY, "images", "macgyver.png")
GUARD = os.path.join(DIRECTORY, "images", "guard.png")
WALLSNFLOORS = os.path.join(DIRECTORY, "images", "wallsnfloors.png")
NEEDLE = os.path.join(DIRECTORY, "images", "needle.png")
PLASTUBE = os.path.join(DIRECTORY, "images", "plastic_tube.png")
ETHER = os.path.join(DIRECTORY, "images", "ether.png")
SYRINGE = os.path.join(DIRECTORY, "images", "syringe.png")
FOOTSTEPS = os.path.join(DIRECTORY, "sounds", "footsteps.ogg")
ERROR = os.path.join(DIRECTORY, "sounds", "error.ogg")
AMBIANT = os.path.join(DIRECTORY, "sounds", "ambiant.ogg")
WINNING = os.path.join(DIRECTORY, "sounds", "winning.ogg")
VICTORY = os.path.join(DIRECTORY, "sounds", "victory.ogg")
FAILURE = os.path.join(DIRECTORY, "sounds", "failure.ogg")
font.init()
FONT = font.Font(os.path.join(DIRECTORY, "fonts", "niew_cromagnon.ttf"), 30)
MAZE = os.path.join(DIRECTORY, "maze")
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
| 35.365854
| 76
| 0.707586
|
503e515a2d49e7822e79270dc3185ee187e08440
| 1,547
|
py
|
Python
|
RunModel_CoNLL_Format.py
|
tommasoc80/event_interoperability
|
0dcdb72a2bd32f65e021c15d44a40a37601e9367
|
[
"CC0-1.0"
] | null | null | null |
RunModel_CoNLL_Format.py
|
tommasoc80/event_interoperability
|
0dcdb72a2bd32f65e021c15d44a40a37601e9367
|
[
"CC0-1.0"
] | null | null | null |
RunModel_CoNLL_Format.py
|
tommasoc80/event_interoperability
|
0dcdb72a2bd32f65e021c15d44a40a37601e9367
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python
# This scripts loads a pretrained model and a input file in CoNLL format (each line a token, sentences separated by an empty line).
# The input sentences are passed to the model for tagging. Prints the tokens and the tags in a CoNLL format to stdout
# Usage: python RunModel_ConLL_Format.py modelPath inputPathToConllFile
# For pretrained models see docs/
from __future__ import print_function
from util.preprocessing import readCoNLL, createMatrices, addCharInformation, addCasingInformation
from neuralnets.BiLSTM import BiLSTM
import sys
import logging
if len(sys.argv) < 3:
print("Usage: python RunModel_CoNLL_Format.py modelPath inputPathToConllFile")
exit()
modelPath = sys.argv[1]
inputPath = sys.argv[2] # path to test set
inputColumns = {0: "tokens"}
#inputColumns = {2: "tokens"}
# :: Prepare the input ::
sentences = readCoNLL(inputPath, inputColumns)
#print(sentences)
addCharInformation(sentences)
addCasingInformation(sentences)
# :: Load the model ::
lstmModel = BiLSTM.loadModel(modelPath)
dataMatrix = createMatrices(sentences, lstmModel.mappings, True)
# :: Tag the input ::
tags = lstmModel.tagSentences(dataMatrix)
# :: Output to stdout ::
for sentenceIdx in range(len(sentences)):
tokens = sentences[sentenceIdx]['tokens']
for tokenIdx in range(len(tokens)):
tokenTags = []
for modelName in sorted(tags.keys()):
tokenTags.append(tags[modelName][sentenceIdx][tokenIdx])
print("%s\t%s" % (tokens[tokenIdx], "\t".join(tokenTags)))
print("")
| 30.333333
| 131
| 0.738849
|
62b35b79ea0bcca181dc64abe40436b3f1b57f1d
| 1,234
|
py
|
Python
|
main/auth/google.py
|
ssxenon01/contact-sharing
|
ad2bec5b03f123837eab6a7126896a8faf17639c
|
[
"MIT"
] | 1
|
2015-09-12T10:12:16.000Z
|
2015-09-12T10:12:16.000Z
|
main/auth/google.py
|
ssxenon01/contact-sharing
|
ad2bec5b03f123837eab6a7126896a8faf17639c
|
[
"MIT"
] | null | null | null |
main/auth/google.py
|
ssxenon01/contact-sharing
|
ad2bec5b03f123837eab6a7126896a8faf17639c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from google.appengine.api import users
import flask
import auth
import model
import util
from main import app
@app.route('/signin/google/')
def signin_google():
auth.save_request_params()
google_url = users.create_login_url(flask.url_for('google_authorized'))
return flask.redirect(google_url)
@app.route('/_s/callback/google/authorized/')
def google_authorized():
google_user = users.get_current_user()
if google_user is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
user_db = retrieve_user_from_google(google_user)
return auth.signin_user_db(user_db)
def retrieve_user_from_google(google_user):
auth_id = 'federated_%s' % google_user.user_id()
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
if not user_db.admin and users.is_current_user_admin():
user_db.admin = True
user_db.put()
return user_db
return auth.create_user_db(
auth_id=auth_id,
name=util.create_name_from_email(google_user.email()),
username=google_user.email(),
email=google_user.email(),
verified=True,
admin=users.is_current_user_admin(),
)
| 24.68
| 73
| 0.740681
|
7d1a423f796912b6a820e050ad98b3e355a32e9c
| 1,365
|
py
|
Python
|
project/urls.py
|
Rajkumar-brk/Emotion-recognition-using-Text
|
d854247e1ce2721bcdf19bc237957c78ff1d100c
|
[
"MIT"
] | 52
|
2019-07-26T04:19:57.000Z
|
2022-03-29T12:03:41.000Z
|
project/urls.py
|
Rajkumar-brk/Emotion-recognition-using-Text
|
d854247e1ce2721bcdf19bc237957c78ff1d100c
|
[
"MIT"
] | 2
|
2019-09-17T02:11:52.000Z
|
2021-12-07T01:30:35.000Z
|
project/urls.py
|
Rajkumar-brk/Emotion-recognition-using-Text
|
d854247e1ce2721bcdf19bc237957c78ff1d100c
|
[
"MIT"
] | 87
|
2019-07-26T18:22:24.000Z
|
2022-03-29T07:15:36.000Z
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from user import views as user_view
from django.contrib.auth import views as auth
from .router import router
from rest_framework.authtoken import views
urlpatterns = [
path('admin/', admin.site.urls),
######### api path ##########################
path('api/',include(router.urls)),
path('api-token-auth/',views.obtain_auth_token,name='api-tokn-auth'),
#####user related path##########################
path('',include('user.urls')),
path('login/',user_view.Login,name='login'),
path('logout/',auth.LogoutView.as_view(template_name='user/index.html'),name='logout'),
path('register/',user_view.register,name='register'),
]
| 34.125
| 91
| 0.676923
|
49efa0df5be83a301cd2551d1a02c0fc8c39d482
| 95
|
py
|
Python
|
mailclient/apps.py
|
sudhanshu456/email-client-django
|
fc59dc1a3ba4e7f5fec5f8ce82dfae73a64ff537
|
[
"MIT"
] | null | null | null |
mailclient/apps.py
|
sudhanshu456/email-client-django
|
fc59dc1a3ba4e7f5fec5f8ce82dfae73a64ff537
|
[
"MIT"
] | null | null | null |
mailclient/apps.py
|
sudhanshu456/email-client-django
|
fc59dc1a3ba4e7f5fec5f8ce82dfae73a64ff537
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MailclientConfig(AppConfig):
name = 'mailclient'
| 15.833333
| 34
| 0.768421
|
12483ef4de2e39137f52b9282b4e28830906dde5
| 11,591
|
py
|
Python
|
fsdet/data/builtin.py
|
luweishuang/few-shot-object-detection
|
956517f4e714a205583ce113ebc56b38af53df09
|
[
"Apache-2.0"
] | null | null | null |
fsdet/data/builtin.py
|
luweishuang/few-shot-object-detection
|
956517f4e714a205583ce113ebc56b38af53df09
|
[
"Apache-2.0"
] | null | null | null |
fsdet/data/builtin.py
|
luweishuang/few-shot-object-detection
|
956517f4e714a205583ce113ebc56b38af53df09
|
[
"Apache-2.0"
] | null | null | null |
"""
This file registers pre-defined datasets at hard-coded paths, and their metadata.
We hard-code metadata for common datasets. This will enable:
1. Consistency check when loading the datasets
2. Use models on these standard datasets directly and run demos,
without having to download the dataset annotations
We hard-code some paths to the dataset that's assumed to
exist in "./datasets/".
Here we only register the few-shot datasets and complete COCO, PascalVOC and
LVIS have been handled by the builtin datasets in detectron2.
"""
import os
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.lvis import register_lvis_instances
from .builtin_meta import _get_builtin_metadata
from .meta_coco import register_meta_coco
from .meta_lvis import register_meta_lvis
from .meta_pascal_voc import register_meta_pascal_voc
from .meta_powertower import register_meta_powertower
# ==== Predefined datasets and splits for COCO ==========
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": (
"coco/train2014",
"coco/annotations/instances_train2014.json",
),
"coco_2014_val": (
"coco/val2014",
"coco/annotations/instances_val2014.json",
),
"coco_2014_minival": (
"coco/val2014",
"coco/annotations/instances_minival2014.json",
),
"coco_2014_minival_100": (
"coco/val2014",
"coco/annotations/instances_minival2014_100.json",
),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": (
"coco/train2017",
"coco/annotations/instances_train2017.json",
),
"coco_2017_val": (
"coco/val2017",
"coco/annotations/instances_val2017.json",
),
"coco_2017_test": (
"coco/test2017",
"coco/annotations/image_info_test2017.json",
),
"coco_2017_test-dev": (
"coco/test2017",
"coco/annotations/image_info_test-dev2017.json",
),
"coco_2017_val_100": (
"coco/val2017",
"coco/annotations/instances_val2017_100.json",
),
}
def register_all_coco(root="datasets"):
# for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
# for key, (image_root, json_file) in splits_per_dataset.items():
# # Assume pre-defined datasets live in `./datasets`.
# register_coco_instances(
# key,
# _get_builtin_metadata(dataset_name),
# os.path.join(root, json_file)
# if "://" not in json_file
# else json_file,
# os.path.join(root, image_root),
# )
# register meta datasets
METASPLITS = [
(
"coco_trainval_all",
"coco/trainval2014",
"cocosplit/datasplit/trainvalno5k.json",
),
(
"coco_trainval_base",
"coco/trainval2014",
"cocosplit/datasplit/trainvalno5k.json",
),
("coco_test_all", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco_test_base", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco_test_novel", "coco/val2014", "cocosplit/datasplit/5k.json"),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for shot in [1, 2, 3, 5, 10, 30]:
for seed in range(10):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "coco_trainval_{}_{}shot{}".format(prefix, shot, seed)
METASPLITS.append((name, "coco/trainval2014", ""))
for name, imgdir, annofile in METASPLITS:
register_meta_coco(
name,
_get_builtin_metadata("coco_fewshot"),
os.path.join(root, imgdir),
os.path.join(root, annofile),
)
# ==== Predefined datasets and splits for LVIS ==========
_PREDEFINED_SPLITS_LVIS = {
"lvis_v0.5": {
# "lvis_v0.5_train": ("coco/train2017", "lvis/lvis_v0.5_train.json"),
"lvis_v0.5_train_freq": (
"coco/train2017",
"lvis/lvis_v0.5_train_freq.json",
),
"lvis_v0.5_train_common": (
"coco/train2017",
"lvis/lvis_v0.5_train_common.json",
),
"lvis_v0.5_train_rare": (
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
# "lvis_v0.5_val": ("coco/val2017", "lvis/lvis_v0.5_val.json"),
# "lvis_v0.5_val_rand_100": (
# "coco/val2017",
# "lvis/lvis_v0.5_val_rand_100.json",
# ),
# "lvis_v0.5_test": (
# "coco/test2017",
# "lvis/lvis_v0.5_image_info_test.json",
# ),
},
}
def register_all_lvis(root="datasets"):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_lvis_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# register meta datasets
METASPLITS = [
(
"lvis_v0.5_train_shots",
"coco/train2017",
"lvissplit/lvis_shots.json",
),
(
"lvis_v0.5_train_rare_novel",
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
("lvis_v0.5_val_novel", "coco/val2017", "lvis/lvis_v0.5_val.json"),
]
for name, image_root, json_file in METASPLITS:
dataset_name = "lvis_v0.5_fewshot" if "novel" in name else "lvis_v0.5"
register_meta_lvis(
name,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# ==== Predefined splits for PASCAL VOC ===========
def register_all_pascal_voc(root="datasets"):
# SPLITS = [
# ("voc_2007_trainval", "VOC2007", "trainval"),
# ("voc_2007_train", "VOC2007", "train"),
# ("voc_2007_val", "VOC2007", "val"),
# ("voc_2007_test", "VOC2007", "test"),
# ("voc_2012_trainval", "VOC2012", "trainval"),
# ("voc_2012_train", "VOC2012", "train"),
# ("voc_2012_val", "VOC2012", "val"),
# ]
# for name, dirname, split in SPLITS:
# year = 2007 if "2007" in name else 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year)
# MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# register meta datasets
METASPLITS = [
("voc_2007_trainval_base1", "VOC2007", "trainval", "base1", 1),
("voc_2007_trainval_base2", "VOC2007", "trainval", "base2", 2),
("voc_2007_trainval_base3", "VOC2007", "trainval", "base3", 3),
("voc_2012_trainval_base1", "VOC2012", "trainval", "base1", 1),
("voc_2012_trainval_base2", "VOC2012", "trainval", "base2", 2),
("voc_2012_trainval_base3", "VOC2012", "trainval", "base3", 3),
("voc_2007_trainval_all1", "VOC2007", "trainval", "base_novel_1", 1),
("voc_2007_trainval_all2", "VOC2007", "trainval", "base_novel_2", 2),
("voc_2007_trainval_all3", "VOC2007", "trainval", "base_novel_3", 3),
("voc_2012_trainval_all1", "VOC2012", "trainval", "base_novel_1", 1),
("voc_2012_trainval_all2", "VOC2012", "trainval", "base_novel_2", 2),
("voc_2012_trainval_all3", "VOC2012", "trainval", "base_novel_3", 3),
("voc_2007_test_base1", "VOC2007", "test", "base1", 1),
("voc_2007_test_base2", "VOC2007", "test", "base2", 2),
("voc_2007_test_base3", "VOC2007", "test", "base3", 3),
("voc_2007_test_novel1", "VOC2007", "test", "novel1", 1),
("voc_2007_test_novel2", "VOC2007", "test", "novel2", 2),
("voc_2007_test_novel3", "VOC2007", "test", "novel3", 3),
("voc_2007_test_all1", "VOC2007", "test", "base_novel_1", 1),
("voc_2007_test_all2", "VOC2007", "test", "base_novel_2", 2),
("voc_2007_test_all3", "VOC2007", "test", "base_novel_3", 3),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for sid in range(1, 4):
for shot in [1, 2, 3, 5, 10]:
for year in [2007, 2012]:
for seed in range(100):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "voc_{}_trainval_{}{}_{}shot{}".format(
year, prefix, sid, shot, seed
)
dirname = "VOC{}".format(year)
img_file = "{}_{}shot_split_{}_trainval".format(
prefix, shot, sid
)
keepclasses = (
"base_novel_{}".format(sid)
if prefix == "all"
else "novel{}".format(sid)
)
METASPLITS.append(
(name, dirname, img_file, keepclasses, sid)
)
for name, dirname, split, keepclasses, sid in METASPLITS:
year = 2007 if "2007" in name else 2012
register_meta_pascal_voc(
name,
_get_builtin_metadata("pascal_voc_fewshot"),
os.path.join(root, dirname),
split,
year,
keepclasses,
sid,
)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# ==== Predefined splits for powertower ===========
def register_all_powertower(root="datasets"):
# register meta datasets
METASPLITS = [
# name, dirname, img_file, keepclasses, sid
("powertower_trainval_base", "powertower", "trainval", "base1"),
("powertower_trainval_all", "powertower", "trainval", "base_novel_1"),
("powertower_test_base", "powertower", "test", "base1"),
("powertower_test_novel", "powertower", "test", "novel1"),
("powertower_test_all", "powertower", "test", "base_novel_1"),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for shot in [1, 5, 10]:
for seed in range(100):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "powertower_trainval_{}_{}shot{}".format(prefix, shot, seed)
dirname = "powertower"
img_file = "{}_{}shot_split_trainval".format(prefix, shot)
keepclasses = (
"base_novel" if prefix == "all" else "novel"
)
METASPLITS.append(
(name, dirname, img_file, keepclasses)
)
for name, dirname, split, keepclasses in METASPLITS:
register_meta_powertower(
name,
_get_builtin_metadata("powertower_fewshot"),
os.path.join(root, dirname),
split,
keepclasses
)
MetadataCatalog.get(name).evaluator_type = "powertower"
# Register them all under "./datasets"
register_all_coco()
register_all_lvis()
register_all_pascal_voc()
register_all_powertower()
| 37.390323
| 87
| 0.570356
|
47711d6f57d1e19b269abd90dd4ff104cb3a9aad
| 409
|
py
|
Python
|
code/course_py_docker/wsgi.py
|
leandrolasnor/todo
|
cdf1377511427c942b2819954880acba3830f685
|
[
"Apache-2.0"
] | null | null | null |
code/course_py_docker/wsgi.py
|
leandrolasnor/todo
|
cdf1377511427c942b2819954880acba3830f685
|
[
"Apache-2.0"
] | null | null | null |
code/course_py_docker/wsgi.py
|
leandrolasnor/todo
|
cdf1377511427c942b2819954880acba3830f685
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for course_py_docker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'course_py_docker.settings')
application = get_wsgi_application()
| 24.058824
| 78
| 0.794621
|
337a8fa821705d9ae2f4a8df167f1faf0c0d9695
| 2,700
|
py
|
Python
|
packages/cspell-lib/samples/src/sample.py
|
DenysVuika/cspell
|
126b28fc501d4e709699b381edc09e0c9dad48fb
|
[
"MIT"
] | 406
|
2019-05-29T15:57:00.000Z
|
2022-03-31T17:34:41.000Z
|
packages/cspell-lib/samples/src/sample.py
|
DenysVuika/cspell
|
126b28fc501d4e709699b381edc09e0c9dad48fb
|
[
"MIT"
] | 1,002
|
2019-05-13T08:18:49.000Z
|
2022-03-31T06:52:11.000Z
|
packages/cspell-lib/samples/src/sample.py
|
DenysVuika/cspell
|
126b28fc501d4e709699b381edc09e0c9dad48fb
|
[
"MIT"
] | 50
|
2019-06-14T20:54:27.000Z
|
2022-02-28T23:34:18.000Z
|
"""
Roman Numbers
Sample Python file.
with email address: ExampleCode <possiblyhelpfulcodeexamples@code.example.com>
"""
import re
class OutOfRangeError(ValueError): pass
class NotIntegerError(ValueError): pass
class InvalidRomanNumeral(ValueError): pass
romanNumeralValues = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000,
'II': 2,
'III': 3,
'IV': 4,
'IX': 9,
'XX': 20,
'XXX': 30,
'XL': 40,
'XC': 90,
'CC': 200,
'CCC': 300,
'CD': 400,
'CM': 900,
'MM': 2000,
'MMM': 3000,
'MMMM': 4000,
}
ordered = sorted([(a[1], a[0]) for a in romanNumeralValues.items()], reverse=True)
def to_roman(number):
if not isinstance(number, int):
raise NotIntegerError('Non-integers cannot be converted.')
if not (0 < number < 5000):
raise OutOfRangeError('Valid numbers are 1 to 4999, got {0}'.format(number))
r = ''
for (num, numeral) in ordered:
if num <= number:
number -= num
r += numeral
return r
# Match against the numerals required for each digit
reMatchRoman = re.compile(r'''
^
(M{0,4})? # thousands
(CM|CD|D?C{0,3})? # hundreds
(XC|XL|L?X{0,3})? # tens
(IX|IV|V?I{0,3})? # ones
$
''', re.VERBOSE)
# Split numerals up so we can look them up in romanNumeralValues
reSplitNumerals = re.compile(r"CM|D|CD|XC|L|XL|IX|V|IV|M+|C+|X+|I+")
def is_valid(roman):
return reMatchRoman.match(roman) != None
def to_number(roman):
if not isinstance(roman, str):
raise InvalidRomanNumeral('Only valid roman numerals are allowed.')
roman = roman.upper().strip()
if not roman:
raise InvalidRomanNumeral('Only valid roman numerals are allowed.')
match = reMatchRoman.match(roman.upper())
if match == None:
raise InvalidRomanNumeral('Only valid roman numerals are allowed.')
value = 0
for digit in match.groups():
for numeral in reSplitNumerals.findall(digit):
value += romanNumeralValues[numeral]
return value
binary = b'binary'
unicode = u'unicode'
if __name__ == '__main__':
print(to_roman(1984))
print(is_valid(to_roman(1999)))
print(is_valid('hello'))
print(to_roman(1492))
print(to_number(to_roman(1492)))
print(to_roman(1888))
print(to_number(to_roman(1888)))
for n in range(1, 4999):
# print(to_roman(n))
is_valid(to_roman(n))
if n != to_number(to_roman(n)):
raise ValueError('Failed on %d' % n)
print('Done.')
'''
Deliberate misspelling for testing purposes: garbbage.
Django terms:
bbcontains
bboverlaps
createsuperuser
dbshell
'''
| 23.478261
| 84
| 0.612222
|
767854b0fbace8cf6d38a03e5b1457428e94741a
| 5,192
|
py
|
Python
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
class TrtConvertPadTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
inputs = program_config.inputs
weights = program_config.weights
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
if attrs[0]['pad_value'] != 0.0:
return False
for x in attrs[0]['paddings']:
if x < 0:
return False
return True
def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]]):
return np.ones([1, 3, 64, 64]).astype(np.float32)
def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32)
for pad_value in [0.0, 1.0, 2.0, -100, 100.0]:
for paddings in [[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 2, 3, 4],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, -1, -1, 1, 1]]:
dics = [{"pad_value": pad_value, "paddings": paddings}, {}]
ops_config = [{
"op_type": "pad",
"op_inputs": {
"X": ["input_data"]
},
"op_outputs": {
"Out": ["pad_output_data"]
},
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input1, dics))
},
outputs=["pad_output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]}
def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
for x in range(len(program_config.ops[0].attrs['paddings']) - 4):
if program_config.ops[0].attrs['paddings'][x] != 0:
return 0, 3
return 1, 2
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-2
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-2
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
for x in range(len(program_config.ops[0].attrs['paddings']) - 4):
if program_config.ops[0].attrs['paddings'][x] != 0:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"NOT Implemented: we need to add support pad not only inplement on h or w, such as paddings = [0, 0, 1, 1, 1, 1, 1, 1]"
)
pass
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
| 37.085714
| 131
| 0.585901
|
2167091cd087a4618b75242f018ca06db6d9c38a
| 230
|
py
|
Python
|
test.py
|
qwc-services/qwc-data-service
|
b20f47ad08275439233034ef3b31a4c437be3fc7
|
[
"MIT"
] | 2
|
2021-08-11T10:34:44.000Z
|
2022-02-24T10:26:39.000Z
|
test.py
|
qwc-services/qwc-data-service
|
b20f47ad08275439233034ef3b31a4c437be3fc7
|
[
"MIT"
] | 7
|
2020-10-17T15:38:39.000Z
|
2022-01-08T09:59:21.000Z
|
test.py
|
qwc-services/qwc-data-service
|
b20f47ad08275439233034ef3b31a4c437be3fc7
|
[
"MIT"
] | 5
|
2019-05-17T11:27:55.000Z
|
2020-12-28T09:24:25.000Z
|
import unittest
# from tests.api_tests import *
from tests.feature_validation_tests import *
# from tests.feature_validation_tests_somap import *
if __name__ == '__main__':
# run all imported test cases
unittest.main()
| 20.909091
| 52
| 0.76087
|
79bcf770250a23086d0fc88be1ad1dc2cfedf0ac
| 2,075
|
py
|
Python
|
oauth2_provider/admin.py
|
redhawkIT/django-oauth-toolkit
|
3909595f5d212b689fa9903ea70001503850d689
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2021-03-21T03:35:24.000Z
|
2021-04-20T05:49:19.000Z
|
oauth2_provider/admin.py
|
redhawkIT/django-oauth-toolkit
|
3909595f5d212b689fa9903ea70001503850d689
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2018-06-20T11:33:43.000Z
|
2021-05-28T08:02:21.000Z
|
oauth2_provider/admin.py
|
redhawkIT/django-oauth-toolkit
|
3909595f5d212b689fa9903ea70001503850d689
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2018-06-20T11:14:46.000Z
|
2021-05-21T15:56:02.000Z
|
from django.contrib import admin
from oauth2_provider.models import (
get_access_token_admin_class,
get_access_token_model,
get_application_admin_class,
get_application_model,
get_grant_admin_class,
get_grant_model,
get_id_token_admin_class,
get_id_token_model,
get_refresh_token_admin_class,
get_refresh_token_model,
)
class ApplicationAdmin(admin.ModelAdmin):
list_display = ("id", "name", "user", "client_type", "authorization_grant_type")
list_filter = ("client_type", "authorization_grant_type", "skip_authorization")
radio_fields = {
"client_type": admin.HORIZONTAL,
"authorization_grant_type": admin.VERTICAL,
}
raw_id_fields = ("user",)
class AccessTokenAdmin(admin.ModelAdmin):
list_display = ("token", "user", "application", "expires")
raw_id_fields = ("user", "source_refresh_token")
class GrantAdmin(admin.ModelAdmin):
list_display = ("code", "application", "user", "expires")
raw_id_fields = ("user",)
class IDTokenAdmin(admin.ModelAdmin):
list_display = ("jti", "user", "application", "expires")
raw_id_fields = ("user",)
class RefreshTokenAdmin(admin.ModelAdmin):
list_display = ("token", "user", "application")
raw_id_fields = ("user", "access_token")
application_model = get_application_model()
access_token_model = get_access_token_model()
grant_model = get_grant_model()
id_token_model = get_id_token_model()
refresh_token_model = get_refresh_token_model()
application_admin_class = get_application_admin_class()
access_token_admin_class = get_access_token_admin_class()
grant_admin_class = get_grant_admin_class()
id_token_admin_class = get_id_token_admin_class()
refresh_token_admin_class = get_refresh_token_admin_class()
admin.site.register(application_model, application_admin_class)
admin.site.register(access_token_model, access_token_admin_class)
admin.site.register(grant_model, grant_admin_class)
admin.site.register(id_token_model, id_token_admin_class)
admin.site.register(refresh_token_model, refresh_token_admin_class)
| 32.421875
| 84
| 0.773976
|
b1f3186e133d4b984298a17501a5e530a15f3a57
| 1,559
|
py
|
Python
|
examples/user/components/run_examples.py
|
willic3/pythia
|
2657b95a0c07fd3c914ab6b5f7ec89a8edba004c
|
[
"BSD-3-Clause"
] | 1
|
2015-11-30T08:01:39.000Z
|
2015-11-30T08:01:39.000Z
|
examples/user/components/run_examples.py
|
willic3/pythia
|
2657b95a0c07fd3c914ab6b5f7ec89a8edba004c
|
[
"BSD-3-Clause"
] | 27
|
2018-05-24T18:31:25.000Z
|
2021-10-16T03:57:52.000Z
|
examples/user/components/run_examples.py
|
willic3/pythia
|
2657b95a0c07fd3c914ab6b5f7ec89a8edba004c
|
[
"BSD-3-Clause"
] | 7
|
2019-07-19T02:30:56.000Z
|
2021-06-02T22:00:01.000Z
|
#!/usr/bin/env python
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
import step01_facility
import step02_facilityvault
import step03_facilityarray
def run_app(app, args=[]):
print("Running '{}' with args={} ...".format(app.__doc__, args))
app.run(argv=[app.name] + args)
print("Finished running '{}'.\n".format(app.__doc__))
def run_step01():
app = step01_facility.GreeterApp()
run_app(app)
run_app(app, args=["--greeter.greeting=Howdy there"])
run_app(app, args=["--greeter=greeters.Alien"])
run_app(app, args=["step01_human.cfg"])
def run_step02():
app = step02_facilityvault.GreeterApp()
run_app(app)
run_app(app, args=["--greeter=robot"])
run_app(app, args=["--greeter=robot", "--robot.model=RPT56"])
run_app(app, args=["step02_robot.cfg"])
def run_step03():
app = step03_facilityarray.GreeterApp()
run_app(app)
run_app(app, args=["--greeters.left=greeters.Human"])
run_app(app, args=["--greeters.right=remotehuman"])
run_app(app, args=["step03_leftright.cfg"])
run_app(app, args=["step03_three.cfg"])
def run():
run_step01()
run_step02()
run_step03()
if __name__ == "__main__":
run()
| 25.983333
| 72
| 0.613214
|
49509263e7bebc04f863b0921f3a6d11b1d554d0
| 23,100
|
py
|
Python
|
controls/pyqgis_controls/main.py
|
cabesuon/ideuy_controls
|
508217725ffd4993d574acec6ea9d80c0401591a
|
[
"MIT"
] | null | null | null |
controls/pyqgis_controls/main.py
|
cabesuon/ideuy_controls
|
508217725ffd4993d574acec6ea9d80c0401591a
|
[
"MIT"
] | null | null | null |
controls/pyqgis_controls/main.py
|
cabesuon/ideuy_controls
|
508217725ffd4993d574acec6ea9d80c0401591a
|
[
"MIT"
] | null | null | null |
""" Module that controls the correct flow of the hydrography.
Examples:
$python main.py -h.
$python main.py dbname dbschema user password output rem conf.
$python main.py dbname dbschema user password output rem conf.
$python main.py test_vector_db duplicate_geoms test_user test_password output --server localhost --port 5432.
Attributes:
_: gettext
pyqgis_controls.main
"""
import sys
import argparse
import json
import gettext
import logging
from qgis.core import QgsApplication, QgsDataSourceUri, QgsVectorLayer
from qgis.core import QgsGeometry, QgsSpatialIndex, QgsWkbTypes, QgsFeature
from PyQt5.QtGui import *
from src.common.time import (
get_time
)
from common.file import FileManager, FileManagerError
_ = gettext.gettext
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_args():
""" Get and return arguments from input. """
parser = argparse.ArgumentParser(
description=_(
'check the correct direction of the flow, the existence endorheic basin and that' +
' the surfaces at rest have no flow direction.'
)
)
parser.add_argument('dbname', help=_('database name'))
parser.add_argument('dbschema', help=_('database schema'))
parser.add_argument('user', help=_('database user'))
parser.add_argument('password', help=_('database password'))
parser.add_argument('output', help=_('output folder'))
parser.add_argument('rem', help=_(
'shapefile of the polyline with the limits of the consignment'
))
parser.add_argument('conf', help=_('json file with the configuration of the control'))
parser.add_argument('-s', '--server', default='localhost', help=_('database host'))
parser.add_argument('-p', '--port', type=int, default=5432, help=_('database port'))
parser.add_argument('-q', '--dirqgis', default='C:\\OSGeo4W64\\apps\\qgis\\', help=_(
'osgeo app qgis directori'
))
parser.add_argument('-tol1', '--t1', type=float, default=0.1, help=_('Z lines tolerance'))
parser.add_argument('-tol2', '--t2', type=float, default=0.01, help=_('Z polygon tolerance'))
args = parser.parse_args()
return args
def qgs_init(pp):
""" PyQGIS initialization. """
QgsApplication.setPrefixPath(pp, True)
qgs = QgsApplication([], False)
qgs.initQgis()
return qgs
def qgs_exit(qgs):
""" Exit PyQGIS. """
qgs.exitQgis()
def create_indexes(l, u, args):
""" Return list and dictionary with spatial indexes. """
logger.info('{}.'.format(_('Constructing spatial indexes...')))
dic_featid = {}
l_ind = {}
for i in l:
u.setDataSource(args.dbschema, i, "geom")
vector_layer = QgsVectorLayer(u.uri(False), i, "postgres")
it_features = vector_layer.getFeatures()
index = QgsSpatialIndex(it_features)
l_ind[i] = index
all_features = {feature.id(): feature['id'] for feature in vector_layer.getFeatures()}
dic_featid[i] = all_features
logger.info('{}.'.format(_('Finished contruction of spatial indexes...')))
return l_ind, dic_featid
def init_file_manager(out_dir):
""" Initialize and return the file manager, and create output folders. """
fman = None
try:
fman = FileManager(out_dir, '.')
except FileManagerError as err:
logger.error('{}: {}'.format(_('ERROR'), str(err)), exc_info=True)
fman = None
return fman
def get_intersections_continuity(c, json_cont):
""" Return list with the layers to intersect, given one layer. """
return json_cont[c]
def control_1(capa1, uri, args, nom_sal, fman):
""" Verify the correct flow of the layer. """
uri.setDataSource(args.dbschema, capa1, "geom")
capa_eje = QgsVectorLayer(uri.uri(False), capa1, "postgres")
iterador_features = capa_eje.getFeatures()
cantidad_errores = 0
for feature in iterador_features:
geometria_feature = feature.geometry()
vertices_feature = geometria_feature.vertices()
cantidad_vertices = 0
min_altura = 0
max_altura = 0
flujo = ''
pedir_nuevo_punto = False
tiene_error = False
while (vertices_feature.hasNext() and (not tiene_error)):
cantidad_vertices = cantidad_vertices + 1
punto_vertice = vertices_feature.next()
if cantidad_vertices == 1:
first_vertex = punto_vertice.z()
if ((cantidad_vertices == 2) or pedir_nuevo_punto):
altura_2 = punto_vertice.z()
if abs(altura_2 - first_vertex) > args.t1:
if (altura_2 - first_vertex) > 0:
flujo = _('up')
pedir_nuevo_punto = False
max_altura = altura_2
elif (altura_2 - first_vertex) < 0:
flujo = _('down')
pedir_nuevo_punto = False
min_altura = altura_anterior
else:
pedir_nuevo_punto = True
elif (cantidad_vertices != 1 and not pedir_nuevo_punto):
# Verify that the next vertex has the same flow direction with the previous
if flujo == _('down'):
if (punto_vertice.z() - altura_anterior > args.t1) or (
punto_vertice.z() - min_altura > args.t1):
if (punto_vertice.z() - altura_anterior) > args.t1:
dif_imprimir = punto_vertice.z() - altura_anterior
texto_imprimir = _('Error - Previous vertex inflexion')
else:
dif_imprimir = punto_vertice.z() - min_altura
texto_imprimir = _('Error - Relative inflexion')
tiene_error = True
cantidad_errores = cantidad_errores + 1
fman.append_csv_file(nom_sal, [
capa1, feature['id'], texto_imprimir, punto_vertice.z(),
abs(dif_imprimir), punto_vertice.x(), punto_vertice.y()])
if punto_vertice.z() < min_altura:
min_altura = punto_vertice.z()
else:
if (punto_vertice.z() - altura_anterior < -1 * args.t1) or (
punto_vertice.z() - max_altura < -1 * args.t1):
if (punto_vertice.z() - altura_anterior) < (-1 * args.t1):
dif_imprimir = punto_vertice.z() - altura_anterior
texto_imprimir = _('Error - Previous vertex inflexion')
else:
dif_imprimir = punto_vertice.z() - max_altura
texto_imprimir = _('Error - Relative inflexion')
tiene_error = True
cantidad_errores = cantidad_errores + 1
fman.append_csv_file(nom_sal, [
capa1, feature['id'], texto_imprimir, punto_vertice.z(),
abs(dif_imprimir), punto_vertice.x(), punto_vertice.y()
])
if punto_vertice.z() > max_altura:
max_altura = punto_vertice.z()
altura_anterior = punto_vertice.z()
def control(capa_verificar, c, l_capas_interectar, u, lindx, n_sal, args, df, l_continuity, fman):
""" Return two list. The first list has the features to be possible max height,
and the second list possible endorreics. """
posible_endorreica = []
posible_cota_1 = []
posible_cota_2 = []
contador = 0
indice_capa = lindx[c]
hrow = [_('Input_Layer'), _('OBJECTID'), _('Description'), _('Height'),
_('Height_difference'), _('X_Coordinate'), _('Y_Coordinate')]
fman.start_csv_file(n_sal, hrow)
it_features = capa_verificar.getFeatures()
lc_inter = get_intersections_continuity(c, l_continuity)
for feature in it_features:
feature_id = feature['id']
contador = contador + 1
feature_id2 = feature.id()
contador_intersecciones = 0
c_1 = 0
c_2 = 0
geometria = feature.geometry()
if (not geometria.isNull() and not geometria.isEmpty()):
lines = geometria.constGet()
n_vertices = lines.vertexCount()
primer_vertice = geometria.vertexAt(0)
ultimo_vertice = geometria.vertexAt(n_vertices-1)
(c_1, c_2) = interseccion_misma_z(
c, u, primer_vertice, ultimo_vertice, fman, n_sal, feature_id,
indice_capa, args, df, lindx, lc_inter)
contador_intersecciones = c_1 + c_2
# Posible endorreics
if contador_intersecciones == 2:
posible_endorreica.append(feature_id2)
else:
# Posible max height
if c_1 == 1:
posible_cota_1.append([feature_id2, primer_vertice.z()])
if c_2 == 1:
posible_cota_2.append([feature_id2, ultimo_vertice.z()])
posibles_cotas = posible_cota_1 + posible_cota_2
return (posibles_cotas, posible_endorreica)
def interseccion_misma_z(
in_capa, in_uri, v1, v2, fman, n_out, fid, indx, args, df, lindx, lc_inter
):
""" Return the number of intersection of the first and last vertex with the input layer."""
altura1 = v1.z()
altura2 = v2.z()
geom_v1 = QgsGeometry(v1)
geom_v2 = QgsGeometry(v2)
contador1 = 0
contador2 = 0
in_uri.setDataSource(args.dbschema, in_capa, "geom")
capa_iterar = QgsVectorLayer(in_uri.uri(False), in_capa, "postgres")
lista_i1 = indx.intersects(geom_v1.boundingBox())
features_intersect1 = capa_iterar.getFeatures(lista_i1)
lista_features_inter1 = []
for feature2 in features_intersect1:
geometria2 = feature2.geometry()
if geom_v1.intersects(geometria2):
contador1 = contador1 + 1
geom_inter = geom_v1.intersection(geometria2)
lista_features_inter1.append(feature2.id())
interseccion_misma_z2(in_capa, geom_inter, altura1, fid, fman, n_out, args)
bandera_repetido = False
if contador1 == 2:
# If exists two intersection, check the continuity
# Verify if the features have the same atributes
if same_feat(lista_features_inter1, capa_iterar):
f = QgsFeature(fid)
f.setGeometry(geom_v1)
# Chequeo si intersecta otra capa de las definidas
if not interseccion_todas_capas(in_capa, f, lc_inter, lindx, args):
bandera_repetido = True
fman.append_csv_file(n_out, [in_capa, fid, _('Error - Continuity'), '', ''])
lista_i2 = indx.intersects(geom_v2.boundingBox())
features_intersect2 = capa_iterar.getFeatures(lista_i2)
lista_features_inter2 = []
for feature2 in features_intersect2:
geometria2 = feature2.geometry()
if geom_v2.intersects(geometria2):
contador2 = contador2 + 1
geom_inter = geom_v2.intersection(geometria2)
lista_features_inter2.append(feature2.id())
interseccion_misma_z2(in_capa, geom_inter, altura2, fid, fman, n_out, args)
if contador2 == 2:
if ((not bandera_repetido) and (same_feat(lista_features_inter2, capa_iterar))):
f = QgsFeature(fid)
f.setGeometry(geom_v2)
# Verify if intersects with the other layers
if not interseccion_todas_capas(in_capa, f, lc_inter, lindx, args):
fman.append_csv_file(n_out, [in_capa, fid, _('Error - Continuity'), '', ''])
return (contador1, contador2)
def same_feat(lf, c):
""" Return True if the two features have the same atributes. """
feat1 = c.getFeature(lf[0])
feat2 = c.getFeature(lf[1])
return feat1.attributes()[1:] == feat2.attributes()[1:]
def interseccion_misma_z2(c, g, altura, fid, fman, n_out, args):
""" Writes in file if the intersect geometry has different height from the vertex. """
# Point intersection
if g.wkbType() == QgsWkbTypes.PointZ:
if abs(g.constGet().z() - altura) >= args.t2:
fman.append_csv_file(
n_out, [
c, fid, _('Error - Difference in height intersection'),
altura, abs(g.constGet().z() - altura), g.constGet().x(), g.constGet().y()])
# Polyline intersection
elif (g.wkbType() == QgsWkbTypes.LineStringZ or g.wkbType() == QgsWkbTypes.MultiLineStringZ):
it_vertices = g.vertices()
has_error = False
while (it_vertices.hasNext() and (not has_error)):
p_vertice = it_vertices.next()
if abs(p_vertice.z() - altura) >= args.t2:
fman.append_csv_file(
n_out, [
c, fid, _('Error - Difference in height intersection'),
altura, abs(p_vertice.z() - altura), p_vertice.x(), p_vertice.y()])
has_error = True
# MultiPoint intersection
elif g.wkbType() == QgsWkbTypes.MultiPointZ:
multipolygon = g.constGet()
num_geom = g.numGeometries()
for i in range(num_geom):
punto = multipolygon.geometryN(i)
if abs(punto.z() - altura) >= args.t2:
fman.append_csv_file(
n_out, [
c, fid, _('Error - Difference in height intersection'), altura,
abs(punto.z() - altura), punto.x(), punto.y()])
break
def is_max_height(c, c_nom, l_fid, l_ci, l_index, args):
""" Return list of errors of nodes whos have not the max height. """
id_itera = []
alt_itera = []
resultado = []
for fid in l_fid:
id_itera.append(fid[0])
alt_itera.append(fid[0])
count = 0
it_feat3 = c.getFeatures(id_itera)
for feat_id in it_feat3:
geom_f = feat_id.geometry()
vertices_f = geom_f.vertices()
encontre = False
while (vertices_f.hasNext() and not encontre):
p_verti = vertices_f.next()
if p_verti.z() > alt_itera[count]:
encontre = True
if encontre:
ritc = interseccion_todas_capas(c_nom, feat_id, l_ci, l_index, args)
if not ritc:
resultado.append([
c_nom, feat_id['id'], _('Error - Node with no maximum height'), '', '', '', ''])
count = count + 1
return resultado
def is_endorreics(c, le, u, geom_remesa, l_index, l_inter, args):
""" Return list of endorheic currents."""
u.setDataSource(args.dbschema, c, "geom")
capa = QgsVectorLayer(u.uri(False), c, "postgres")
resultado = []
for e in le:
f = capa.getFeature(e)
fid = f['id']
if not f.geometry().intersects(geom_remesa):
existe_inter = interseccion_todas_capas(c, f, l_inter, l_index, args)
if not existe_inter:
resultado.append([c, fid, _('Error - Endorheic'), '', '', '', ''])
return resultado
def interseccion_todas_capas(c, f, lc, l_index, args):
""" Return true if exist intesection with other layers, false otherwise. """
uri2 = QgsDataSourceUri()
uri2.setConnection(args.server, str(args.port), args.dbname, args.user, args.password)
geom = f.geometry()
bbox_geom = geom.boundingBox()
for cap in lc:
if cap != c:
index_capa = l_index[cap]
lfea = index_capa.intersects(bbox_geom)
uri2.setDataSource(args.dbschema, cap, "geom")
capa_inter = QgsVectorLayer(uri2.uri(False), cap, 'postgres')
if lfea != []:
it_feat = capa_inter.getFeatures(lfea)
for f_inter in it_feat:
geom_inter = f_inter.geometry()
if geom_inter.intersects(geom):
return True
return False
def control_4(capa_4, uri, indices, args, nam_sal, lista_intersectar, fman):
""" Verify that the height is constant. """
uri.setDataSource(args.dbschema, capa_4, "geom")
capa_eje = QgsVectorLayer(uri.uri(False), capa_4, "postgres")
iterador_features = capa_eje.getFeatures()
hrow = [_('Input_Layer'), _('OBJECTID'), _('Description'), _('Intersection_Layer'),
_('OBJECTID'), _('Height'), _('Height_difference'), _('X_Coordinate'), _('Y_Coordinate')
]
fman.start_csv_file(nam_sal, hrow)
# Iterate with the features of the layer
for feature in iterador_features:
geometria_feature = feature.geometry()
vertices_feature = geometria_feature.vertices()
primer_vertice = True
hay_error = False
alt_total = 0
# The first vertex determine the height of the polygon
while vertices_feature.hasNext():
punto_vertice = vertices_feature.next()
alt_actual = punto_vertice.z()
if primer_vertice:
alt_total = alt_actual
primer_vertice = False
if abs(alt_total - alt_actual) >= args.t2:
fman.append_csv_file(
nam_sal, [
capa_4, feature['id'], _('Error - Polygon height'), '', '',
alt_total, abs(alt_total - alt_actual), punto_vertice.x(),
punto_vertice.y()])
hay_error = True
# Verify the intersection has the same height
if not hay_error:
for capa in lista_intersectar:
intersectar_capa(
capa, geometria_feature, alt_total, capa_4,
feature['id'], uri, indices, args, fman, nam_sal)
def intersectar_capa(
c, g_f, altura_pol, c_original, fea_original, uri, indexs, args, fman, nam_sal):
""" Intersect with layer verifing that the height is the same. """
uri.setDataSource(args.dbschema, c, "geom")
capa_cargada = QgsVectorLayer(uri.uri(False), c, "postgres")
index = indexs[c]
hay_error = False
lista_resultante = index.intersects(g_f.boundingBox())
features_intersect = capa_cargada.getFeatures(lista_resultante)
for f in features_intersect:
if g_f.intersects(f.geometry()):
geom_interseccion = g_f.intersection(f.geometry())
# Interseccion punto
if geom_interseccion.wkbType() == QgsWkbTypes.PointZ:
if abs(geom_interseccion.get().z() - altura_pol) >= args.t2:
fman.append_csv_file(nam_sal, [
c_original, fea_original, _('Error - Intersection height'), c, f['id'],
altura_pol, altura_pol - geom_interseccion.get().z()])
# Interseccion linea o multilinea
elif (
geom_interseccion.wkbType() == QgsWkbTypes.LineStringZ or
geom_interseccion.wkbType() == QgsWkbTypes.MultiLineStringZ):
it_vertices = geom_interseccion.vertices()
while (it_vertices.hasNext() and (not hay_error)):
p_vertice = it_vertices.next()
if abs(p_vertice.z() - altura_pol) >= args.t2:
fman.append_csv_file(
nam_sal, [
c_original, fea_original, _('Error - Intersection height'), c,
f['id'], altura_pol, altura_pol - p_vertice.z()])
hay_error = True
# Interseccion multipunto
elif geom_interseccion.wkbType() == QgsWkbTypes.MultiPointZ:
multipolygon = geom_interseccion.get()
num_geom = multipolygon.numGeometries()
for i in range(num_geom):
punto = multipolygon.geometryN(i)
if abs(punto.z() - altura_pol) >= args.t2:
fman.append_csv_file(nam_sal, [
c_original, fea_original, _('Error - Intersection height'), c, f['id'],
altura_pol, altura_pol - punto.z()])
break
else:
fman.append_csv_file(nam_sal, [geom_interseccion.wkbType()])
def get_geometry_layer(dir_layer):
""" Return the geometry of one feature of a layer. """
vector_layer = QgsVectorLayer(dir_layer, 'layer', "ogr")
it_features = vector_layer.getFeatures()
for feature in it_features:
f = feature
return f.geometry()
def load_config(dir_file_conf):
""" Return the json configuration of the control. """
with open(dir_file_conf) as json_data:
file = json.load(json_data)
return file
if __name__ == '__main__':
args = get_args()
params = ' '.join(sys.argv)
# start qgis
qgs = qgs_init(args.dirqgis)
# uri conection db
uri = QgsDataSourceUri()
uri.setConnection(args.server, str(args.port), args.dbname, args.user, args.password)
# load configuration
f_config = load_config(args.conf)
# initialization of variables
l_ind = {}
d_feat = {}
l_continuity = f_config["continuidad"]
fman = init_file_manager(args.output)
consignment_geometry = get_geometry_layer(args.rem)
l_ind, d_feat = create_indexes(f_config["indices"], uri, args)
# iteration of layers to verify control 1, 2, 3
for name_l_flow in f_config["flujo"]:
date_time = get_time().strftime("%Y%m%d_%H%M%S_")
logger.info('{}: {}.'.format(_('Control 1,2,3: Verifing layer'), name_l_flow))
uri.setDataSource(args.dbschema, name_l_flow, "geom")
layer_check = QgsVectorLayer(uri.uri(False), name_l_flow, "postgres")
result_name = args.dbschema + '_' + date_time \
+ 'Control_Vertex_Height_' + name_l_flow +'.csv'
cotas, endorreicas = control(
layer_check, name_l_flow, f_config["endorreicas"],
uri, l_ind, result_name, args, d_feat, l_continuity, fman)
r_cota = is_max_height(
layer_check, name_l_flow, cotas, f_config["endorreicas"], l_ind, args)
r_endo = is_endorreics(
name_l_flow, endorreicas, uri, consignment_geometry,
l_ind, f_config["endorreicas"], args)
fman.append_csv_file(result_name, r_cota)
fman.append_csv_file(result_name, r_endo)
control_1(name_l_flow, uri, args, result_name, fman)
# iteration of layers to verify control 4
for name_l_constant_height in f_config["altura_area"]:
date_time = get_time().strftime("%Y%m%d_%H%M%S_")
logger.info('{}: {}.'.format(_('Control 4: Verifing layer'), name_l_constant_height))
result_name = args.dbschema + '_' + date_time + 'Control_Polygon_Height_' \
+ name_l_constant_height +'.csv'
control_4(name_l_constant_height, uri, l_ind, args, result_name, f_config["flujo"], fman)
logger.info('{}.'.format(_('End')))
# exit qgis
qgs_exit(qgs)
| 42.385321
| 111
| 0.599048
|
28342bcafee742ffd58724a6243756276c902e06
| 80,274
|
py
|
Python
|
databricks/koalas/indexes/base.py
|
LSturtew/koalas
|
c8a3c88efeeae8ec06d2494a5e2391af21608cfb
|
[
"Apache-2.0"
] | null | null | null |
databricks/koalas/indexes/base.py
|
LSturtew/koalas
|
c8a3c88efeeae8ec06d2494a5e2391af21608cfb
|
[
"Apache-2.0"
] | null | null | null |
databricks/koalas/indexes/base.py
|
LSturtew/koalas
|
c8a3c88efeeae8ec06d2494a5e2391af21608cfb
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import partial
from typing import Any, List, Optional, Tuple, Union
import warnings
import pandas as pd
import numpy as np
from pandas.api.types import (
is_list_like,
is_interval_dtype,
is_bool_dtype,
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
is_numeric_dtype,
is_object_dtype,
)
from pandas.core.accessor import CachedAccessor
from pandas.io.formats.printing import pprint_thing
from pandas.api.types import CategoricalDtype, is_hashable
from pandas._libs import lib
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import DataType, FractionalType, IntegralType, TimestampType
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.config import get_option, option_context
from databricks.koalas.base import IndexOpsMixin
from databricks.koalas.frame import DataFrame
from databricks.koalas.missing.indexes import MissingPandasLikeIndex
from databricks.koalas.series import Series, first_series
from databricks.koalas.spark.accessors import SparkIndexMethods
from databricks.koalas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
verify_temp_column_name,
validate_bool_kwarg,
ERROR_MESSAGE_CANNOT_COMBINE,
)
from databricks.koalas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
SPARK_DEFAULT_INDEX_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from databricks.koalas.typedef import Scalar
class Index(IndexOpsMixin):
"""
Koalas Index that corresponds to pandas Index logically. This might hold Spark Column
internally.
Parameters
----------
data : array-like (1-dimensional)
dtype : dtype, default None
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
See Also
--------
MultiIndex : A multi-level, or hierarchical, Index.
DatetimeIndex : Index of datetime64 data.
Int64Index : A special case of :class:`Index` with purely integer labels.
Float64Index : A special case of :class:`Index` with purely float labels.
Examples
--------
>>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, 3]).index
Int64Index([1, 2, 3], dtype='int64')
>>> ks.DataFrame({'a': [1, 2, 3]}, index=list('abc')).index
Index(['a', 'b', 'c'], dtype='object')
>>> ks.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> ks.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
From a Series:
>>> s = ks.Series([1, 2, 3], index=[10, 20, 30])
>>> ks.Index(s)
Int64Index([1, 2, 3], dtype='int64')
From an Index:
>>> idx = ks.Index([1, 2, 3])
>>> ks.Index(idx)
Int64Index([1, 2, 3], dtype='int64')
"""
def __new__(cls, data=None, dtype=None, copy=False, name=None, tupleize_cols=True, **kwargs):
if not is_hashable(name):
raise TypeError("Index.name must be a hashable type")
if isinstance(data, Series):
if dtype is not None:
data = data.astype(dtype)
if name is not None:
data = data.rename(name)
internal = InternalFrame(
spark_frame=data._internal.spark_frame,
index_spark_columns=data._internal.data_spark_columns,
index_names=data._internal.column_labels,
index_dtypes=data._internal.data_dtypes,
column_labels=[],
data_spark_columns=[],
data_dtypes=[],
)
return DataFrame(internal).index
elif isinstance(data, Index):
if copy:
data = data.copy()
if dtype is not None:
data = data.astype(dtype)
if name is not None:
data = data.rename(name)
return data
return ks.from_pandas(
pd.Index(
data=data, dtype=dtype, copy=copy, name=name, tupleize_cols=tupleize_cols, **kwargs
)
)
@staticmethod
def _new_instance(anchor: DataFrame) -> "Index":
from databricks.koalas.indexes.category import CategoricalIndex
from databricks.koalas.indexes.datetimes import DatetimeIndex
from databricks.koalas.indexes.multi import MultiIndex
from databricks.koalas.indexes.numeric import Float64Index, Int64Index
if anchor._internal.index_level > 1:
instance = object.__new__(MultiIndex)
elif isinstance(anchor._internal.index_dtypes[0], CategoricalDtype):
instance = object.__new__(CategoricalIndex)
elif isinstance(
anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]), IntegralType
):
instance = object.__new__(Int64Index)
elif isinstance(
anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]), FractionalType
):
instance = object.__new__(Float64Index)
elif isinstance(
anchor._internal.spark_type_for(anchor._internal.index_spark_columns[0]), TimestampType
):
instance = object.__new__(DatetimeIndex)
else:
instance = object.__new__(Index)
instance._anchor = anchor
return instance
@property
def _kdf(self) -> DataFrame:
return self._anchor
@property
def _internal(self) -> InternalFrame:
internal = self._kdf._internal
return internal.copy(
column_labels=internal.index_names,
data_spark_columns=internal.index_spark_columns,
data_dtypes=internal.index_dtypes,
column_label_names=None,
)
@property
def _column_label(self):
return self._kdf._internal.index_names[0]
def _with_new_scol(self, scol: spark.Column, *, dtype=None) -> "Index":
"""
Copy Koalas Index with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Index
"""
internal = self._internal.copy(
index_spark_columns=[scol.alias(SPARK_DEFAULT_INDEX_NAME)],
index_dtypes=[dtype],
column_labels=[],
data_spark_columns=[],
data_dtypes=[],
)
return DataFrame(internal).index
spark = CachedAccessor("spark", SparkIndexMethods)
# This method is used via `DataFrame.info` API internally.
def _summary(self, name=None) -> str:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
head, tail, total_count = tuple(
self._internal.spark_frame.select(
F.first(self.spark.column), F.last(self.spark.column), F.count(F.expr("*"))
)
.toPandas()
.iloc[0]
)
if total_count > 0:
index_summary = ", %s to %s" % (pprint_thing(head), pprint_thing(tail))
else:
index_summary = ""
if name is None:
name = type(self).__name__
return "%s: %s entries%s" % (name, total_count, index_summary)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=list('abcd'))
>>> df.index.size
4
>>> df.set_index('dogs', append=True).index.size
4
"""
return len(self)
@property
def shape(self) -> tuple:
"""
Return a tuple of the shape of the underlying data.
Examples
--------
>>> idx = ks.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.shape
(3,)
>>> midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> midx.shape
(3,)
"""
return (len(self._kdf),)
def identical(self, other) -> bool:
"""
Similar to equals, but check that other comparable attributes are
also equal.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
Examples
--------
>>> from databricks.koalas.config import option_context
>>> idx = ks.Index(['a', 'b', 'c'])
>>> midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
For Index
>>> idx.identical(idx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.identical(ks.Index(['a', 'b', 'c']))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.identical(ks.Index(['b', 'b', 'a']))
False
>>> idx.identical(midx)
False
For MultiIndex
>>> midx.identical(midx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.identical(ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.identical(ks.MultiIndex.from_tuples([('c', 'z'), ('b', 'y'), ('a', 'x')]))
False
>>> midx.identical(idx)
False
"""
from databricks.koalas.indexes.multi import MultiIndex
self_name = self.names if isinstance(self, MultiIndex) else self.name
other_name = other.names if isinstance(other, MultiIndex) else other.name
return (
self_name == other_name # to support non-index comparison by short-circuiting.
and self.equals(other)
)
def equals(self, other) -> bool:
"""
Determine if two Index objects contain the same elements.
Returns
-------
bool
True if "other" is an Index and it has the same elements as calling
index; False otherwise.
Examples
--------
>>> from databricks.koalas.config import option_context
>>> idx = ks.Index(['a', 'b', 'c'])
>>> idx.name = "name"
>>> midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx.names = ("nameA", "nameB")
For Index
>>> idx.equals(idx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.equals(ks.Index(['a', 'b', 'c']))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... idx.equals(ks.Index(['b', 'b', 'a']))
False
>>> idx.equals(midx)
False
For MultiIndex
>>> midx.equals(midx)
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.equals(ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]))
True
>>> with option_context('compute.ops_on_diff_frames', True):
... midx.equals(ks.MultiIndex.from_tuples([('c', 'z'), ('b', 'y'), ('a', 'x')]))
False
>>> midx.equals(idx)
False
"""
if same_anchor(self, other):
return True
elif type(self) == type(other):
if get_option("compute.ops_on_diff_frames"):
# TODO: avoid using default index?
with option_context("compute.default_index_type", "distributed-sequence"):
# Directly using Series from both self and other seems causing
# some exceptions when 'compute.ops_on_diff_frames' is enabled.
# Working around for now via using frame.
return (
self.to_series("self").reset_index(drop=True)
== other.to_series("other").reset_index(drop=True)
).all()
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
else:
return False
def transpose(self) -> "Index":
"""
Return the transpose, For index, It will be index itself.
Examples
--------
>>> idx = ks.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.transpose()
Index(['a', 'b', 'c'], dtype='object')
For MultiIndex
>>> midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> midx.transpose() # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
"""
return self
T = property(transpose)
def _to_internal_pandas(self) -> pd.Index:
"""
Return a pandas Index directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._kdf._internal.to_pandas_frame.index
def to_pandas(self) -> pd.Index:
"""
Return a pandas Index.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=list('abcd'))
>>> df['dogs'].index.to_pandas()
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
return self._to_internal_pandas().copy()
def toPandas(self) -> pd.Index:
warnings.warn(
"Index.toPandas is deprecated as of Index.to_pandas. Please use the API instead.",
FutureWarning,
)
return self.to_pandas()
toPandas.__doc__ = to_pandas.__doc__
def to_numpy(self, dtype=None, copy=False) -> np.ndarray:
"""
A NumPy ndarray representing the values in this Index or MultiIndex.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
Examples
--------
>>> ks.Series([1, 2, 3, 4]).index.to_numpy()
array([0, 1, 2, 3])
>>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index.to_numpy()
array([(1, 4), (2, 5), (3, 6)], dtype=object)
"""
result = np.asarray(self._to_internal_pandas()._values, dtype=dtype)
if copy:
result = result.copy()
return result
@property
def values(self) -> np.ndarray:
"""
Return an array representing the data in the Index.
.. warning:: We recommend using `Index.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ks.Series([1, 2, 3, 4]).index.values
array([0, 1, 2, 3])
>>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index.values
array([(1, 4), (2, 5), (3, 6)], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
.. warning:: We recommend using `Index.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
An ndarray with int64 dtype.
Examples
--------
>>> ks.Index([1, 2, 3]).asi8
array([1, 2, 3])
Returns None for non-int64 dtype
>>> ks.Index(['a', 'b', 'c']).asi8 is None
True
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
if isinstance(self.spark.data_type, IntegralType):
return self.to_numpy()
elif isinstance(self.spark.data_type, TimestampType):
return np.array(list(map(lambda x: x.astype(np.int64), self.to_numpy())))
else:
return None
@property
def spark_type(self) -> DataType:
""" Returns the data type as defined by Spark, as a Spark DataType object."""
warnings.warn(
"Index.spark_type is deprecated as of Index.spark.data_type. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.data_type
@property
def has_duplicates(self) -> bool:
"""
If index has duplicates, return True, otherwise False.
Examples
--------
>>> idx = ks.Index([1, 5, 7, 7])
>>> idx.has_duplicates
True
>>> idx = ks.Index([1, 5, 7])
>>> idx.has_duplicates
False
>>> idx = ks.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"])
>>> idx.has_duplicates
True
>>> idx = ks.Index(["Orange", "Apple",
... "Watermelon"])
>>> idx.has_duplicates
False
"""
sdf = self._internal.spark_frame.select(self.spark.column)
scol = scol_for(sdf, sdf.columns[0])
return sdf.select(F.count(scol) != F.countDistinct(scol)).first()[0]
@property
def is_unique(self) -> bool:
"""
Return if the index has unique values.
Examples
--------
>>> idx = ks.Index([1, 5, 7, 7])
>>> idx.is_unique
False
>>> idx = ks.Index([1, 5, 7])
>>> idx.is_unique
True
>>> idx = ks.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"])
>>> idx.is_unique
False
>>> idx = ks.Index(["Orange", "Apple",
... "Watermelon"])
>>> idx.is_unique
True
"""
return not self.has_duplicates
@property
def name(self) -> Union[Any, Tuple]:
"""Return name of the Index."""
return self.names[0]
@name.setter
def name(self, name: Union[Any, Tuple]) -> None:
self.names = [name]
@property
def names(self) -> List[Union[Any, Tuple]]:
"""Return names of the Index."""
return [
name if name is None or len(name) > 1 else name[0]
for name in self._internal.index_names # type: ignore
]
@names.setter
def names(self, names: List[Union[Any, Tuple]]) -> None:
if not is_list_like(names):
raise ValueError("Names must be a list-like")
if self._internal.index_level != len(names):
raise ValueError(
"Length of new names must be {}, got {}".format(
self._internal.index_level, len(names)
)
)
if self._internal.index_level == 1:
self.rename(names[0], inplace=True)
else:
self.rename(names, inplace=True)
@property
def nlevels(self) -> int:
"""
Number of levels in Index & MultiIndex.
Examples
--------
>>> kdf = ks.DataFrame({"a": [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name="idx"))
>>> kdf.index.nlevels
1
>>> kdf = ks.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')])
>>> kdf.index.nlevels
2
"""
return self._internal.index_level
def rename(
self, name: Union[Any, Tuple, List[Union[Any, Tuple]]], inplace: bool = False
) -> Optional["Index"]:
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Parameters
----------
name : label or list of labels
Name(s) to set.
inplace : boolean, default False
Modifies the object directly, instead of creating a new Index or MultiIndex.
Returns
-------
Index or MultiIndex
The same type as the caller or None if inplace is True.
Examples
--------
>>> df = ks.DataFrame({'a': ['A', 'C'], 'b': ['A', 'B']}, columns=['a', 'b'])
>>> df.index.rename("c")
Int64Index([0, 1], dtype='int64', name='c')
>>> df.set_index("a", inplace=True)
>>> df.index.rename("d")
Index(['A', 'C'], dtype='object', name='d')
You can also change the index name in place.
>>> df.index.rename("e", inplace=True)
>>> df.index
Index(['A', 'C'], dtype='object', name='e')
>>> df # doctest: +NORMALIZE_WHITESPACE
b
e
A A
C B
Support for MultiIndex
>>> kidx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])
>>> kidx.names = ['hello', 'koalas']
>>> kidx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
names=['hello', 'koalas'])
>>> kidx.rename(['aloha', 'databricks']) # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
names=['aloha', 'databricks'])
"""
names = self._verify_for_rename(name)
internal = self._kdf._internal.copy(index_names=names)
if inplace:
self._kdf._update_internal_frame(internal)
return None
else:
return DataFrame(internal).index
def _verify_for_rename(self, name):
if is_hashable(name):
if is_name_like_tuple(name):
return [name]
elif is_name_like_value(name):
return [(name,)]
raise TypeError("Index.name must be a hashable type")
# TODO: add downcast parameter for fillna function
def fillna(self, value: Scalar) -> "Index":
"""
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes.
Returns
-------
Index :
filled with value
Examples
--------
>>> ki = ks.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, None]).index
>>> ki
Float64Index([1.0, 2.0, nan], dtype='float64')
>>> ki.fillna(0)
Float64Index([1.0, 2.0, 0.0], dtype='float64')
"""
if not isinstance(value, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(value).__name__)
sdf = self._internal.spark_frame.fillna(value)
result = DataFrame(self._kdf._internal.with_new_sdf(sdf)).index # TODO: dtype?
return result
# TODO: ADD keep parameter
def drop_duplicates(self) -> "Index":
"""
Return Index with duplicate values removed.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : Equivalent method on Series.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = ks.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
>>> idx.drop_duplicates().sort_values()
Index(['beetle', 'cow', 'hippo', 'lama'], dtype='object')
"""
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
).drop_duplicates()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
return DataFrame(internal).index
def to_series(self, name: Union[Any, Tuple] = None) -> Series:
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index.
Parameters
----------
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=list('abcd'))
>>> df['dogs'].index.to_series()
a a
b b
c c
d d
dtype: object
"""
if not is_hashable(name):
raise TypeError("Series.name must be a hashable type")
scol = self.spark.column
if name is not None:
scol = scol.alias(name_like_string(name))
elif self._internal.index_level == 1:
name = self.name
column_labels = [
name if is_name_like_tuple(name) else (name,)
] # type: List[Optional[Tuple]]
internal = self._internal.copy(
column_labels=column_labels, data_spark_columns=[scol], column_label_names=None
)
return first_series(DataFrame(internal))
def to_frame(self, index=True, name=None) -> DataFrame:
"""
Create a DataFrame with a column containing the Index.
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
name : object, default None
The passed name should substitute for the index name (if it has
one).
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = ks.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame() # doctest: +NORMALIZE_WHITESPACE
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(name='zoo') # doctest: +NORMALIZE_WHITESPACE
zoo
animal
Ant Ant
Bear Bear
Cow Cow
"""
if name is None:
if self._internal.index_names[0] is None:
name = (DEFAULT_SERIES_NAME,)
else:
name = self._internal.index_names[0]
elif not is_name_like_tuple(name):
if is_name_like_value(name):
name = (name,)
else:
raise TypeError("unhashable type: '{}'".format(type(name).__name__))
return self._to_frame(index=index, names=[name])
def _to_frame(self, index, names):
if index:
index_spark_columns = self._internal.index_spark_columns
index_names = self._internal.index_names
index_dtypes = self._internal.index_dtypes
else:
index_spark_columns = []
index_names = []
index_dtypes = []
internal = InternalFrame(
spark_frame=self._internal.spark_frame,
index_spark_columns=index_spark_columns,
index_names=index_names,
index_dtypes=index_dtypes,
column_labels=names,
data_spark_columns=self._internal.index_spark_columns,
data_dtypes=self._internal.index_dtypes,
)
return DataFrame(internal)
def is_boolean(self) -> bool:
"""
Return if the current index type is a boolean type.
Examples
--------
>>> ks.DataFrame({'a': [1]}, index=[True]).index.is_boolean()
True
"""
return is_bool_dtype(self.dtype)
def is_categorical(self) -> bool:
"""
Return if the current index type is a categorical type.
Examples
--------
>>> ks.DataFrame({'a': [1]}, index=[1]).index.is_categorical()
False
"""
return is_categorical_dtype(self.dtype)
def is_floating(self) -> bool:
"""
Return if the current index type is a floating type.
Examples
--------
>>> ks.DataFrame({'a': [1]}, index=[1]).index.is_floating()
False
"""
return is_float_dtype(self.dtype)
def is_integer(self) -> bool:
"""
Return if the current index type is a integer type.
Examples
--------
>>> ks.DataFrame({'a': [1]}, index=[1]).index.is_integer()
True
"""
return is_integer_dtype(self.dtype)
def is_interval(self) -> bool:
"""
Return if the current index type is an interval type.
Examples
--------
>>> ks.DataFrame({'a': [1]}, index=[1]).index.is_interval()
False
"""
return is_interval_dtype(self.dtype)
def is_numeric(self) -> bool:
"""
Return if the current index type is a numeric type.
Examples
--------
>>> ks.DataFrame({'a': [1]}, index=[1]).index.is_numeric()
True
"""
return is_numeric_dtype(self.dtype)
def is_object(self) -> bool:
"""
Return if the current index type is a object type.
Examples
--------
>>> ks.DataFrame({'a': [1]}, index=["a"]).index.is_object()
True
"""
return is_object_dtype(self.dtype)
def is_type_compatible(self, kind) -> bool:
"""
Whether the index type is compatible with the provided type.
Examples
--------
>>> kidx = ks.Index([1, 2, 3])
>>> kidx.is_type_compatible('integer')
True
>>> kidx = ks.Index([1.0, 2.0, 3.0])
>>> kidx.is_type_compatible('integer')
False
>>> kidx.is_type_compatible('floating')
True
"""
return kind == self.inferred_type
def dropna(self) -> "Index":
"""
Return Index or MultiIndex without NA/NaN values
Examples
--------
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', None],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
NaN 7 8
>>> df.index.dropna()
Index(['cobra', 'viper'], dtype='object')
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... [None, 'weight', 'length']],
... [[0, 1, 1, 1, 1, 1, 2, 2, 2],
... [0, 1, 1, 0, 1, 2, 1, 1, 2]])
>>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, None],
... index=midx)
>>> s
lama NaN 45.0
cow weight 200.0
weight 1.2
NaN 30.0
weight 250.0
length 1.5
falcon weight 320.0
weight 1.0
length NaN
dtype: float64
>>> s.index.dropna() # doctest: +SKIP
MultiIndex([( 'cow', 'weight'),
( 'cow', 'weight'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'weight'),
('falcon', 'weight'),
('falcon', 'length')],
)
"""
sdf = self._internal.spark_frame.select(self._internal.index_spark_columns).dropna()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
return DataFrame(internal).index
def unique(self, level=None) -> "Index":
"""
Return unique values in the index.
Be aware the order of unique values might be different than pandas.Index.unique
Parameters
----------
level : int or str, optional, default is None
Returns
-------
Index without duplicates
See Also
--------
Series.unique
groupby.SeriesGroupBy.unique
Examples
--------
>>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 1, 3]).index.unique().sort_values()
Int64Index([1, 3], dtype='int64')
>>> ks.DataFrame({'a': ['a', 'b', 'c']}, index=['d', 'e', 'e']).index.unique().sort_values()
Index(['d', 'e'], dtype='object')
MultiIndex
>>> ks.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "X")]).unique()
... # doctest: +SKIP
MultiIndex([('A', 'X'),
('A', 'Y')],
)
"""
if level is not None:
self._validate_index_level(level)
scols = self._internal.index_spark_columns
sdf = self._kdf._internal.spark_frame.select(scols).distinct()
return DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
).index
# TODO: add error parameter
def drop(self, labels) -> "Index":
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like
Returns
-------
dropped : Index
Examples
--------
>>> index = ks.Index([1, 2, 3])
>>> index
Int64Index([1, 2, 3], dtype='int64')
>>> index.drop([1])
Int64Index([2, 3], dtype='int64')
"""
internal = self._internal.resolved_copy
sdf = internal.spark_frame[~internal.index_spark_columns[0].isin(labels)]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
column_labels=[],
data_spark_columns=[],
data_dtypes=[],
)
return DataFrame(internal).index
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(
"Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level,)
)
elif level > 0:
raise IndexError("Too many levels:" " Index has only 1 level, not %d" % (level + 1))
elif level != self.name:
raise KeyError(
"Requested level ({}) does not match index name ({})".format(level, self.name)
)
def get_level_values(self, level) -> "Index":
"""
Return Index if a valid level is given.
Examples:
--------
>>> kidx = ks.Index(['a', 'b', 'c'], name='ks')
>>> kidx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='ks')
>>> kidx.get_level_values('ks')
Index(['a', 'b', 'c'], dtype='object', name='ks')
"""
self._validate_index_level(level)
return self
def copy(self, name=None, deep=None) -> "Index":
"""
Make a copy of this object. name sets those attributes on the new object.
Parameters
----------
name : string, optional
to set name of index
deep : None
this parameter is not supported but just dummy parameter to match pandas.
Examples
--------
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
>>> df.index
Index(['cobra', 'viper', 'sidewinder'], dtype='object')
Copy index
>>> df.index.copy()
Index(['cobra', 'viper', 'sidewinder'], dtype='object')
Copy index with name
>>> df.index.copy(name='snake')
Index(['cobra', 'viper', 'sidewinder'], dtype='object', name='snake')
"""
result = self._kdf.copy().index
if name:
result.name = name
return result
def droplevel(self, level) -> "Index":
"""
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex.
Parameters
----------
level : int, str, tuple, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
Index or MultiIndex
Examples
--------
>>> midx = ks.DataFrame({'a': ['a', 'b']}, index=[['a', 'x'], ['b', 'y'], [1, 2]]).index
>>> midx # doctest: +SKIP
MultiIndex([('a', 'b', 1),
('x', 'y', 2)],
)
>>> midx.droplevel([0, 1]) # doctest: +SKIP
Int64Index([1, 2], dtype='int64')
>>> midx.droplevel(0) # doctest: +SKIP
MultiIndex([('b', 1),
('y', 2)],
)
>>> midx.names = [("a", "b"), "b", "c"]
>>> midx.droplevel([('a', 'b')]) # doctest: +SKIP
MultiIndex([('b', 1),
('y', 2)],
names=['b', 'c'])
"""
names = self.names
nlevels = self.nlevels
if not is_list_like(level):
level = [level]
int_level = set()
for n in level:
if isinstance(n, int):
if n < 0:
n = n + nlevels
if n < 0:
raise IndexError(
"Too many levels: Index has only {} levels, "
"{} is not a valid level number".format(nlevels, (n - nlevels))
)
if n >= nlevels:
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(nlevels, n + 1)
)
else:
if n not in names:
raise KeyError("Level {} not found".format(n))
n = names.index(n)
int_level.add(n)
if len(level) >= nlevels:
raise ValueError(
"Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".format(len(level), nlevels)
)
index_spark_columns, index_names, index_dtypes = zip(
*[
item
for i, item in enumerate(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_dtypes,
)
)
if i not in int_level
]
)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_dtypes=list(index_dtypes),
column_labels=[],
data_spark_columns=[],
data_dtypes=[],
)
return DataFrame(internal).index
def symmetric_difference(self, other, result_name=None, sort=None) -> "Index":
"""
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
result_name : str
sort : True or None, default None
Whether to sort the resulting index.
* True : Attempt to sort the result.
* None : Do not sort the result.
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> s1 = ks.Series([1, 2, 3, 4], index=[1, 2, 3, 4])
>>> s2 = ks.Series([1, 2, 3, 4], index=[2, 3, 4, 5])
>>> s1.index.symmetric_difference(s2.index)
Int64Index([5, 1], dtype='int64')
You can set name of result Index.
>>> s1.index.symmetric_difference(s2.index, result_name='koalas')
Int64Index([5, 1], dtype='int64', name='koalas')
You can set sort to `True`, if you want to sort the resulting index.
>>> s1.index.symmetric_difference(s2.index, sort=True)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> s1.index ^ s2.index
Int64Index([5, 1], dtype='int64')
"""
if type(self) != type(other):
raise NotImplementedError(
"Doesn't support symmetric_difference between Index & MultiIndex for now"
)
sdf_self = self._kdf._internal.spark_frame.select(self._internal.index_spark_columns)
sdf_other = other._kdf._internal.spark_frame.select(other._internal.index_spark_columns)
sdf_symdiff = sdf_self.union(sdf_other).subtract(sdf_self.intersect(sdf_other))
if sort:
sdf_symdiff = sdf_symdiff.sort(self._internal.index_spark_column_names)
internal = InternalFrame(
spark_frame=sdf_symdiff,
index_spark_columns=[
scol_for(sdf_symdiff, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
result = DataFrame(internal).index
if result_name:
result.name = result_name
return result
# TODO: return_indexer
def sort_values(self, ascending=True) -> "Index":
"""
Return a sorted copy of the index.
.. note:: This method is not supported for pandas when index has NaN value.
pandas raises unexpected TypeError, but we support treating NaN
as the smallest value.
Parameters
----------
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : ks.Index or ks.MultiIndex
Sorted copy of the index.
See Also
--------
Series.sort_values : Sort values of a Series.
DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = ks.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order.
>>> idx.sort_values(ascending=False)
Int64Index([1000, 100, 10, 1], dtype='int64')
Support for MultiIndex.
>>> kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('c', 'y', 2), ('b', 'z', 3)])
>>> kidx # doctest: +SKIP
MultiIndex([('a', 'x', 1),
('c', 'y', 2),
('b', 'z', 3)],
)
>>> kidx.sort_values() # doctest: +SKIP
MultiIndex([('a', 'x', 1),
('b', 'z', 3),
('c', 'y', 2)],
)
>>> kidx.sort_values(ascending=False) # doctest: +SKIP
MultiIndex([('c', 'y', 2),
('b', 'z', 3),
('a', 'x', 1)],
)
"""
sdf = self._internal.spark_frame
sdf = sdf.orderBy(self._internal.index_spark_columns, ascending=ascending).select(
self._internal.index_spark_columns
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
return DataFrame(internal).index
def sort(self, *args, **kwargs) -> None:
"""
Use sort_values instead.
"""
raise TypeError("cannot sort an Index object in-place, use sort_values instead")
def min(self) -> Union[Scalar, Tuple[Scalar, ...]]:
"""
Return the minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = ks.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = ks.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])
>>> idx.min()
('a', 'x', 1)
"""
sdf = self._internal.spark_frame
min_row = (
sdf.select(F.min(F.struct(self._internal.index_spark_columns)).alias("min_row"))
.select("min_row.*")
.toPandas()
)
result = tuple(min_row.iloc[0])
return result if len(result) > 1 else result[0]
def max(self) -> Union[Scalar, Tuple[Scalar, ...]]:
"""
Return the maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = ks.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = ks.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])
>>> idx.max()
('b', 'y', 2)
"""
sdf = self._internal.spark_frame
max_row = (
sdf.select(F.max(F.struct(self._internal.index_spark_columns)).alias("max_row"))
.select("max_row.*")
.toPandas()
)
result = tuple(max_row.iloc[0])
return result if len(result) > 1 else result[0]
def delete(self, loc) -> "Index":
"""
Make new Index with passed location(-s) deleted.
.. note:: this API can be pretty expensive since it is based on
a global sequence internally.
Returns
-------
new_index : Index
Examples
--------
>>> kidx = ks.Index([10, 10, 9, 8, 4, 2, 4, 4, 2, 2, 10, 10])
>>> kidx
Int64Index([10, 10, 9, 8, 4, 2, 4, 4, 2, 2, 10, 10], dtype='int64')
>>> kidx.delete(0).sort_values()
Int64Index([2, 2, 2, 4, 4, 4, 8, 9, 10, 10, 10], dtype='int64')
>>> kidx.delete([0, 1, 2, 3, 10, 11]).sort_values()
Int64Index([2, 2, 2, 4, 4, 4], dtype='int64')
MultiIndex
>>> kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
>>> kidx # doctest: +SKIP
MultiIndex([('a', 'x', 1),
('b', 'y', 2),
('c', 'z', 3)],
)
>>> kidx.delete([0, 2]).sort_values() # doctest: +SKIP
MultiIndex([('b', 'y', 2)],
)
"""
length = len(self)
def is_len_exceeded(index):
"""Check if the given index is exceeded the length or not"""
return index >= length if index >= 0 else abs(index) > length
if not is_list_like(loc):
if is_len_exceeded(loc):
raise IndexError(
"index {} is out of bounds for axis 0 with size {}".format(loc, length)
)
loc = [loc]
else:
for index in loc:
if is_len_exceeded(index):
raise IndexError(
"index {} is out of bounds for axis 0 with size {}".format(index, length)
)
loc = [int(item) for item in loc]
loc = [item if item >= 0 else length + item for item in loc]
# we need a temporary column such as '__index_value_0__'
# since 'InternalFrame.attach_default_index' will be failed
# when self._scol has name of '__index_level_0__'
index_value_column_format = "__index_value_{}__"
sdf = self._internal._sdf
index_value_column_names = [
verify_temp_column_name(sdf, index_value_column_format.format(i))
for i in range(self._internal.index_level)
]
index_value_columns = [
index_scol.alias(index_vcol_name)
for index_scol, index_vcol_name in zip(
self._internal.index_spark_columns, index_value_column_names
)
]
sdf = sdf.select(index_value_columns)
sdf = InternalFrame.attach_default_index(sdf, default_index_type="distributed-sequence")
# sdf here looks as below
# +-----------------+-----------------+-----------------+-----------------+
# |__index_level_0__|__index_value_0__|__index_value_1__|__index_value_2__|
# +-----------------+-----------------+-----------------+-----------------+
# | 0| a| x| 1|
# | 1| b| y| 2|
# | 2| c| z| 3|
# +-----------------+-----------------+-----------------+-----------------+
# delete rows which are matched with given `loc`
sdf = sdf.where(~F.col(SPARK_INDEX_NAME_FORMAT(0)).isin(loc))
sdf = sdf.select(index_value_column_names)
# sdf here looks as below, we should alias them back to origin spark column names
# +-----------------+-----------------+-----------------+
# |__index_value_0__|__index_value_1__|__index_value_2__|
# +-----------------+-----------------+-----------------+
# | c| z| 3|
# +-----------------+-----------------+-----------------+
index_origin_columns = [
F.col(index_vcol_name).alias(index_scol_name)
for index_vcol_name, index_scol_name in zip(
index_value_column_names, self._internal.index_spark_column_names
)
]
sdf = sdf.select(index_origin_columns)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
return DataFrame(internal).index
def append(self, other: "Index") -> "Index":
"""
Append a collection of Index options together.
Parameters
----------
other : Index
Returns
-------
appended : Index
Examples
--------
>>> kidx = ks.Index([10, 5, 0, 5, 10, 5, 0, 10])
>>> kidx
Int64Index([10, 5, 0, 5, 10, 5, 0, 10], dtype='int64')
>>> kidx.append(kidx)
Int64Index([10, 5, 0, 5, 10, 5, 0, 10, 10, 5, 0, 5, 10, 5, 0, 10], dtype='int64')
Support for MiltiIndex
>>> kidx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])
>>> kidx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
)
>>> kidx.append(kidx) # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('a', 'x'),
('b', 'y')],
)
"""
from databricks.koalas.indexes.multi import MultiIndex
if type(self) is not type(other):
raise NotImplementedError(
"append() between Index & MultiIndex currently is not supported"
)
sdf_self = self._internal.spark_frame.select(self._internal.index_spark_columns)
sdf_other = other._internal.spark_frame.select(other._internal.index_spark_columns)
sdf_appended = sdf_self.union(sdf_other)
# names should be kept when MultiIndex, but Index wouldn't keep its name.
if isinstance(self, MultiIndex):
index_names = self._internal.index_names
else:
index_names = None
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf_appended,
index_spark_columns=[
scol_for(sdf_appended, col) for col in self._internal.index_spark_column_names
],
index_names=index_names,
)
return DataFrame(internal).index
def argmax(self) -> int:
"""
Return a maximum argument indexer.
Parameters
----------
skipna : bool, default True
Returns
-------
maximum argument indexer
Examples
--------
>>> kidx = ks.Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3])
>>> kidx
Int64Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3], dtype='int64')
>>> kidx.argmax()
4
"""
sdf = self._internal.spark_frame.select(self.spark.column)
sequence_col = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name=sequence_col)
# spark_frame here looks like below
# +-----------------+---------------+
# |__index_level_0__|__index_value__|
# +-----------------+---------------+
# | 0| 10|
# | 4| 100|
# | 2| 8|
# | 3| 7|
# | 6| 4|
# | 5| 5|
# | 7| 3|
# | 8| 100|
# | 1| 9|
# +-----------------+---------------+
return (
sdf.orderBy(
scol_for(sdf, self._internal.data_spark_column_names[0]).desc(),
F.col(sequence_col).asc(),
)
.select(sequence_col)
.first()[0]
)
def argmin(self) -> int:
"""
Return a minimum argument indexer.
Parameters
----------
skipna : bool, default True
Returns
-------
minimum argument indexer
Examples
--------
>>> kidx = ks.Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3])
>>> kidx
Int64Index([10, 9, 8, 7, 100, 5, 4, 3, 100, 3], dtype='int64')
>>> kidx.argmin()
7
"""
sdf = self._internal.spark_frame.select(self.spark.column)
sequence_col = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name=sequence_col)
return (
sdf.orderBy(
scol_for(sdf, self._internal.data_spark_column_names[0]).asc(),
F.col(sequence_col).asc(),
)
.select(sequence_col)
.first()[0]
)
def set_names(self, names, level=None, inplace=False) -> Optional["Index"]:
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label
Name(s) to set.
level : int, label or list of int or label, optional
If the index is a MultiIndex, level(s) to set (None for all
levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index
The same type as the caller or None if inplace is True.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> idx = ks.Index([1, 2, 3, 4])
>>> idx
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter')
Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
For MultiIndex
>>> idx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])
>>> idx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
)
>>> idx.set_names(['kind', 'year'], inplace=True)
>>> idx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
names=['kind', 'year'])
>>> idx.set_names('species', level=0) # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y')],
names=['species', 'year'])
"""
from databricks.koalas.indexes.multi import MultiIndex
if isinstance(self, MultiIndex):
if level is not None:
self_names = self.names
self_names[level] = names
names = self_names
return self.rename(name=names, inplace=inplace)
def difference(self, other, sort=None) -> "Index":
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : True or None, default None
Whether to sort the resulting index.
* True : Attempt to sort the result.
* None : Do not sort the result.
Returns
-------
difference : Index
Examples
--------
>>> idx1 = ks.Index([2, 1, 3, 4])
>>> idx2 = ks.Index([3, 4, 5, 6])
>>> idx1.difference(idx2, sort=True)
Int64Index([1, 2], dtype='int64')
MultiIndex
>>> midx1 = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
>>> midx2 = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'z', 2), ('k', 'z', 3)])
>>> midx1.difference(midx2) # doctest: +SKIP
MultiIndex([('b', 'y', 2),
('c', 'z', 3)],
)
"""
from databricks.koalas.indexes.multi import MultiIndex
if not is_list_like(other):
raise TypeError("Input must be Index or array-like")
if not isinstance(sort, (type(None), type(True))):
raise ValueError(
"The 'sort' keyword only takes the values of None or True; {} was passed.".format(
sort
)
)
# Handling MultiIndex
if isinstance(self, MultiIndex) and not isinstance(other, MultiIndex):
if not all([isinstance(item, tuple) for item in other]):
raise TypeError("other must be a MultiIndex or a list of tuples")
other = MultiIndex.from_tuples(other)
if not isinstance(other, Index):
other = Index(other)
sdf_self = self._internal.spark_frame
sdf_other = other._internal.spark_frame
idx_self = self._internal.index_spark_columns
idx_other = other._internal.index_spark_columns
sdf_diff = sdf_self.select(idx_self).subtract(sdf_other.select(idx_other))
internal = InternalFrame(
spark_frame=sdf_diff,
index_spark_columns=[
scol_for(sdf_diff, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
result = DataFrame(internal).index
# Name(s) will be kept when only name(s) of (Multi)Index are the same.
if isinstance(self, type(other)) and isinstance(self, MultiIndex):
if self.names == other.names:
result.names = self.names
elif isinstance(self, type(other)) and not isinstance(self, MultiIndex):
if self.name == other.name:
result.name = self.name
return result if sort is None else result.sort_values()
@property
def is_all_dates(self) -> bool:
"""
Return if all data types of the index are datetime.
remember that since Koalas does not support multiple data types in an index,
so it returns True if any type of data is datetime.
Examples
--------
>>> from datetime import datetime
>>> idx = ks.Index([datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0)])
>>> idx
DatetimeIndex(['2019-01-01', '2019-02-03'], dtype='datetime64[ns]', freq=None)
>>> idx.is_all_dates
True
>>> idx = ks.Index([datetime(2019, 1, 1, 0, 0, 0), None])
>>> idx
DatetimeIndex(['2019-01-01', 'NaT'], dtype='datetime64[ns]', freq=None)
>>> idx.is_all_dates
True
>>> idx = ks.Index([0, 1, 2])
>>> idx
Int64Index([0, 1, 2], dtype='int64')
>>> idx.is_all_dates
False
"""
return isinstance(self.spark.data_type, TimestampType)
def repeat(self, repeats: int) -> "Index":
"""
Repeat elements of a Index/MultiIndex.
Returns a new Index/MultiIndex where each element of the current Index/MultiIndex
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Index.
Returns
-------
repeated_index : Index/MultiIndex
Newly created Index/MultiIndex with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Examples
--------
>>> idx = ks.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.repeat(2)
Index(['a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
For MultiIndex,
>>> midx = ks.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c')],
)
>>> midx.repeat(2) # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('x', 'a'),
('x', 'b'),
('y', 'c')],
)
>>> midx.repeat(0) # doctest: +SKIP
MultiIndex([], )
"""
if not isinstance(repeats, int):
raise ValueError(
"`repeats` argument must be integer, but got {}".format(type(repeats).__name__)
)
elif repeats < 0:
raise ValueError("negative dimensions are not allowed")
kdf = DataFrame(self._internal.resolved_copy) # type: DataFrame
if repeats == 0:
return DataFrame(kdf._internal.with_filter(F.lit(False))).index
else:
return ks.concat([kdf] * repeats).index
def asof(self, label) -> Scalar:
"""
Return the label from the index, or, if not present, the previous one.
Assuming that the index is sorted, return the passed index label if it
is in the index, or return the previous index label if the passed one
is not in the index.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
label : object
The label up to which the method returns the latest index label.
Returns
-------
object
The passed label if it is in the index. The previous label if the
passed label is not in the sorted index or `NaN` if there is no
such label.
Examples
--------
`Index.asof` returns the latest index label up to the passed label.
>>> idx = ks.Index(['2013-12-31', '2014-01-02', '2014-01-03'])
>>> idx.asof('2014-01-01')
'2013-12-31'
If the label is in the index, the method returns the passed label.
>>> idx.asof('2014-01-02')
'2014-01-02'
If all of the labels in the index are later than the passed label,
NaN is returned.
>>> idx.asof('1999-01-02')
nan
"""
sdf = self._internal.spark_frame
if self.is_monotonic_increasing:
sdf = sdf.where(self.spark.column <= F.lit(label).cast(self.spark.data_type)).select(
F.max(self.spark.column)
)
elif self.is_monotonic_decreasing:
sdf = sdf.where(self.spark.column >= F.lit(label).cast(self.spark.data_type)).select(
F.min(self.spark.column)
)
else:
raise ValueError("index must be monotonic increasing or decreasing")
result = sdf.toPandas().iloc[0, 0]
return result if result is not None else np.nan
def union(self, other, sort=None) -> "Index":
"""
Form the union of two Index objects.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
Returns
-------
union : Index
Examples
--------
Index
>>> idx1 = ks.Index([1, 2, 3, 4])
>>> idx2 = ks.Index([3, 4, 5, 6])
>>> idx1.union(idx2).sort_values()
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
MultiIndex
>>> midx1 = ks.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c"), ("x", "d")])
>>> midx2 = ks.MultiIndex.from_tuples([("x", "c"), ("x", "d"), ("x", "e"), ("x", "f")])
>>> midx1.union(midx2).sort_values() # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('x', 'c'),
('x', 'd'),
('x', 'e'),
('x', 'f')],
)
"""
from databricks.koalas.indexes.multi import MultiIndex
sort = True if sort is None else sort
sort = validate_bool_kwarg(sort, "sort")
if type(self) is not type(other):
if isinstance(self, MultiIndex):
if not isinstance(other, list) or not all(
[isinstance(item, tuple) for item in other]
):
raise TypeError("other must be a MultiIndex or a list of tuples")
other = MultiIndex.from_tuples(other)
else:
if isinstance(other, MultiIndex):
# TODO: We can't support different type of values in a single column for now.
raise NotImplementedError(
"Union between Index and MultiIndex is not yet supported"
)
elif isinstance(other, Series):
other = other.to_frame()
other = other.set_index(other.columns[0]).index
elif isinstance(other, DataFrame):
raise ValueError("Index data must be 1-dimensional")
else:
other = Index(other)
sdf_self = self._internal.spark_frame.select(self._internal.index_spark_columns)
sdf_other = other._internal.spark_frame.select(other._internal.index_spark_columns)
sdf = sdf_self.union(sdf_other.subtract(sdf_self))
if isinstance(self, MultiIndex):
sdf = sdf.drop_duplicates()
if sort:
sdf = sdf.sort(self._internal.index_spark_column_names)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
)
return DataFrame(internal).index
def holds_integer(self) -> bool:
"""
Whether the type is an integer type.
Always return False for MultiIndex.
Notes
-----
When Index contains null values the result can be different with pandas
since Koalas cast integer to float when Index contains null values.
>>> ks.Index([1, 2, 3, None])
Float64Index([1.0, 2.0, 3.0, nan], dtype='float64')
Examples
--------
>>> kidx = ks.Index([1, 2, 3, 4])
>>> kidx.holds_integer()
True
Returns False for string type.
>>> kidx = ks.Index(["A", "B", "C", "D"])
>>> kidx.holds_integer()
False
Returns False for float type.
>>> kidx = ks.Index([1.1, 2.2, 3.3, 4.4])
>>> kidx.holds_integer()
False
"""
return isinstance(self.spark.data_type, IntegralType)
def intersection(self, other) -> "Index":
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = ks.Index([1, 2, 3, 4])
>>> idx2 = ks.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2).sort_values()
Int64Index([3, 4], dtype='int64')
"""
from databricks.koalas.indexes.multi import MultiIndex
if isinstance(other, DataFrame):
raise ValueError("Index data must be 1-dimensional")
elif isinstance(other, MultiIndex):
# Always returns a no-named empty Index if `other` is MultiIndex.
return self._kdf.head(0).index.rename(None)
elif isinstance(other, Index):
spark_frame_other = other.to_frame().to_spark()
keep_name = self.name == other.name
elif isinstance(other, Series):
spark_frame_other = other.to_frame().to_spark()
keep_name = True
elif is_list_like(other):
other = Index(other)
if isinstance(other, MultiIndex):
return other.to_frame().head(0).index
spark_frame_other = other.to_frame().to_spark()
keep_name = True
else:
raise TypeError("Input must be Index or array-like")
spark_frame_self = self.to_frame(name=SPARK_DEFAULT_INDEX_NAME).to_spark()
spark_frame_intersected = spark_frame_self.intersect(spark_frame_other)
if keep_name:
index_names = self._internal.index_names
else:
index_names = None
internal = InternalFrame( # TODO: dtypes?
spark_frame=spark_frame_intersected,
index_spark_columns=[scol_for(spark_frame_intersected, SPARK_DEFAULT_INDEX_NAME)],
index_names=index_names,
)
return DataFrame(internal).index
def item(self) -> Union[Scalar, Tuple[Scalar, ...]]:
"""
Return the first element of the underlying data as a python scalar.
Returns
-------
scalar
The first element of Index.
Raises
------
ValueError
If the data is not length-1.
Examples
--------
>>> kidx = ks.Index([10])
>>> kidx.item()
10
"""
return self.to_series().item()
def insert(self, loc: int, item) -> "Index":
"""
Make new Index inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Examples
--------
>>> kidx = ks.Index([1, 2, 3, 4, 5])
>>> kidx.insert(3, 100)
Int64Index([1, 2, 3, 100, 4, 5], dtype='int64')
For negative values
>>> kidx = ks.Index([1, 2, 3, 4, 5])
>>> kidx.insert(-3, 100)
Int64Index([1, 2, 100, 3, 4, 5], dtype='int64')
"""
if loc < 0:
length = len(self)
loc = loc + length
loc = 0 if loc < 0 else loc
index_name = self._internal.index_spark_column_names[0]
sdf_before = self.to_frame(name=index_name)[:loc].to_spark()
sdf_middle = Index([item]).to_frame(name=index_name).to_spark()
sdf_after = self.to_frame(name=index_name)[loc:].to_spark()
sdf = sdf_before.union(sdf_middle).union(sdf_after)
internal = self._internal.with_new_sdf(sdf) # TODO: dtype?
return DataFrame(internal).index
def view(self) -> "Index":
"""
this is defined as a copy with the same identity
"""
return self.copy()
def to_list(self) -> List:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
.. note:: This method should only be used if the resulting list is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
Index
>>> idx = ks.Index([1, 2, 3, 4, 5])
>>> idx.to_list()
[1, 2, 3, 4, 5]
MultiIndex
>>> tuples = [(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'green')]
>>> midx = ks.MultiIndex.from_tuples(tuples)
>>> midx.to_list()
[(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'green')]
"""
return self._to_internal_pandas().tolist()
tolist = to_list
@property
def inferred_type(self) -> str:
"""
Return a string of the type inferred from the values.
Examples
--------
>>> from datetime import datetime
>>> ks.Index([1, 2, 3]).inferred_type
'integer'
>>> ks.Index([1.0, 2.0, 3.0]).inferred_type
'floating'
>>> ks.Index(['a', 'b', 'c']).inferred_type
'string'
>>> ks.Index([True, False, True, False]).inferred_type
'boolean'
"""
return lib.infer_dtype([self.to_series().head(1).item()])
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeIndex, item):
property_or_func = getattr(MissingPandasLikeIndex, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
def __repr__(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return repr(self._to_internal_pandas())
pindex = self._kdf._get_or_create_repr_pandas_cache(max_display_count).index
pindex_length = len(pindex)
repr_string = repr(pindex[:max_display_count])
if pindex_length > max_display_count:
footer = "\nShowing only the first {}".format(max_display_count)
return repr_string + footer
return repr_string
def __iter__(self):
return MissingPandasLikeIndex.__iter__(self)
def __xor__(self, other):
return self.symmetric_difference(other)
def __bool__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
| 32.486443
| 100
| 0.517664
|
630a20308d6773943d29e7d3558c570d542d4cbb
| 2,772
|
py
|
Python
|
backend/models/entry.py
|
methodpark/digitaleswarten
|
024c0b88df54e9727925b202e139b3c5b2ce73d6
|
[
"Apache-2.0"
] | 10
|
2020-03-20T19:14:43.000Z
|
2020-10-29T21:31:40.000Z
|
backend/models/entry.py
|
methodpark/digitaleswarten
|
024c0b88df54e9727925b202e139b3c5b2ce73d6
|
[
"Apache-2.0"
] | 41
|
2020-03-20T20:27:55.000Z
|
2020-03-24T21:49:37.000Z
|
backend/models/entry.py
|
methodpark/digitaleswarten
|
024c0b88df54e9727925b202e139b3c5b2ce73d6
|
[
"Apache-2.0"
] | 1
|
2020-03-21T09:31:51.000Z
|
2020-03-21T09:31:51.000Z
|
from flask import abort
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
from app import db
from models.queue import Queue
from utils.id_generator import generate_entry_id
class Entry(db.Model):
id = db.Column(db.String, primary_key=True)
queue_id = db.Column(db.String, db.ForeignKey('queue.id'), primary_key=True)
queue = db.relationship('Queue', backref=db.backref('slots'))
name = db.Column(db.String)
ticket_number = db.Column(db.Integer)
state = db.Column(db.String, default='waiting')
def set_state(self, new_state):
"""
Modifies the state of an entry in the queue.
Args:
- new_state (String): must either be "waiting" or "called"
Returns:
- (Bool): True on success, False if the entered value was not in
the value range
"""
if new_state not in ['waiting', 'called']:
return False
self.state = new_state
return True
def __repr__(self):
return f'<Entry {id}>'
def short_json(self):
return {
'id': self.id,
'ticketNumber' : self.ticket_number,
'state': self.state
}
def full_json(self):
return {
'id': self.id,
'name': self.name,
'ticketNumber' : self.ticket_number,
'state': self.state
}
def add_new_entry_to_db(db, place, queue, entry_name):
"""
Creates a new entry with entry_name and returns.
"""
entry_id = generate_entry_id(entry_name)
place_queues = Queue.query.filter_by(place=place).all()
largest_place_entry = None
for pl_queue in place_queues:
largest_queue_entry = db.session.query(Entry) \
.filter_by(queue=pl_queue) \
.order_by(desc(Entry.ticket_number)) \
.limit(1) \
.first()
if largest_queue_entry is None:
continue
elif largest_place_entry is None:
largest_place_entry = largest_queue_entry
elif largest_place_entry.ticket_number < largest_queue_entry.ticket_number:
largest_place_entry = largest_queue_entry
else:
continue
if largest_place_entry is None:
ticket_number = 1
else:
ticket_number = largest_place_entry.ticket_number + 1
try:
new_entry = Entry(id=entry_id, name=entry_name, queue=queue, ticket_number=ticket_number)
db.session.add(new_entry)
db.session.commit()
except IntegrityError:
abort(500)
return new_entry
| 33
| 97
| 0.58153
|
bd0df77786bddd609b3dec2deef8ebe43b8d6880
| 3,125
|
py
|
Python
|
habitat/datasets/roomnav/util.py
|
medhini/habitat-api
|
f0d4cdaacb12be43e58bf0b87f43074240faf99b
|
[
"MIT"
] | null | null | null |
habitat/datasets/roomnav/util.py
|
medhini/habitat-api
|
f0d4cdaacb12be43e58bf0b87f43074240faf99b
|
[
"MIT"
] | null | null | null |
habitat/datasets/roomnav/util.py
|
medhini/habitat-api
|
f0d4cdaacb12be43e58bf0b87f43074240faf99b
|
[
"MIT"
] | null | null | null |
import csv
from typing import List
import os
import gzip
import habitat
import random
MP3D_SCENES_SPLITS_PATH = \
"datasets/scenes/scenes_mp3d.csv"
GIBSON_SCENES_SPLITS_PATH = \
"datasets/scenes/scenes_gibson_fullplus.csv"
GIBSON_HABITAT_SCENES_SPLITS_PATH = \
"datasets/scenes/scenes_gibson_habtiat.csv"
def get_mp3d_scenes(split: str = "train",
scene_template: str = "{scene}") -> List[str]:
scenes = []
with open(MP3D_SCENES_SPLITS_PATH, newline='') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if split in row["set"].split() or split == "*":
scenes.append(scene_template.format(scene=row["id"]))
return scenes
def get_gibson_scenes(split: str = "train",
scene_template: str = "{scene}") -> List[str]:
scenes = []
with open(GIBSON_SCENES_SPLITS_PATH, newline='') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if row[split] == '1' or split == "*":
scenes.append(scene_template.format(scene=row["id"]))
return scenes
def get_habitat_gibson_scenes(split: str = "train",
scene_template: str = "{scene}") -> List[str]:
scenes = []
with open(GIBSON_HABITAT_SCENES_SPLITS_PATH, newline='') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if row[split] == '1' or split == "*":
scenes.append(scene_template.format(scene=row["id"]))
return scenes
def get_avg_geo_dist(dataset):
sum_geo_dist = 0
for episode in dataset.episodes:
sum_geo_dist += episode.info["geodesic_distance"]
return sum_geo_dist / len(dataset.episodes)
def generate_sampled_train(config_path="datasets/pointnav/gibson.yaml",
num_episodes=1000):
config = habitat.get_config(config_path, config_dir="habitat-api/configs")
config.defrost()
config.DATASET.SPLIT = "train"
config.freeze()
print("Dataset is loading.")
dataset = habitat.make_dataset(id_dataset=config.DATASET.TYPE,
config=config.DATASET)
print(config.DATASET.SPLIT + ": Len episodes: ", len(dataset.episodes))
dataset.episodes = random.sample(dataset.episodes, num_episodes)
print("Average geo distance: ", get_avg_geo_dist(dataset))
json_str = str(dataset.to_json())
output_dir = "data/datasets/pointnav/gibson/v1/{}_small_2/".format(
config.DATASET.SPLIT)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
main_dataset_file = "{}/{}_small_2.json.gz".format(
output_dir,
config.DATASET.SPLIT)
with gzip.GzipFile(main_dataset_file, 'wb') as f:
f.write(json_str.encode("utf-8"))
print("Dataset file: {}".format(main_dataset_file))
def generate_mini_train_splits():
generate_sampled_train("datasets/pointnav/gibson.yaml")
#generate_sampled_train("datasets/pointnav/mp3d.yaml")
| 36.337209
| 78
| 0.65472
|
4bdeb8de9d562eed43566992e947d670909ff7f6
| 30
|
py
|
Python
|
src/tasks/routes/__init__.py
|
rrickgauer/tasks
|
6b40e71a56868497f902704808f9862196e29e87
|
[
"Apache-2.0"
] | null | null | null |
src/tasks/routes/__init__.py
|
rrickgauer/tasks
|
6b40e71a56868497f902704808f9862196e29e87
|
[
"Apache-2.0"
] | 59
|
2021-01-15T19:57:52.000Z
|
2021-09-15T01:26:52.000Z
|
src/tasks/routes/__init__.py
|
rrickgauer/tasks
|
6b40e71a56868497f902704808f9862196e29e87
|
[
"Apache-2.0"
] | null | null | null |
from . import login as login
| 10
| 28
| 0.733333
|
3d136c4f0ff95c19d0e34f9b4358cc7307312945
| 920
|
py
|
Python
|
exercícios/EX_CursoEmVideo/ex106.py
|
jose-carlos-code/CursoEmvideo-python
|
8c9b82db2c2b906f6d8f2359a680b9b3af25da43
|
[
"MIT"
] | 1
|
2021-01-11T15:10:36.000Z
|
2021-01-11T15:10:36.000Z
|
exercícios/EX_CursoEmVideo/ex106.py
|
jose-carlos-code/CursoEmvideo-python
|
8c9b82db2c2b906f6d8f2359a680b9b3af25da43
|
[
"MIT"
] | null | null | null |
exercícios/EX_CursoEmVideo/ex106.py
|
jose-carlos-code/CursoEmvideo-python
|
8c9b82db2c2b906f6d8f2359a680b9b3af25da43
|
[
"MIT"
] | null | null | null |
import time
c = ( '\033[m', # 0 - sem cores
'\033[0;30;41m', # 1 - vermelho
'\033[0;30;42m', # 2 - verde
'\033[0;30;43m', # 3 - amarelo
'\033[0;30;44m', # 4 - azul
'\033[0;30;45m', # 5 - roxo
'\033[7;30m', # 6 - branco
)
def ajuda(com):
titulo(f'acessando o manual do comando \'{com} \'', 4)
print(c[6], end='')
help(com)
print(c[0], end='')
time.sleep(1)
def titulo(msg, cor=0):
tam = len(msg) + 4
print(c[cor], end='')
print('~'*tam)
print(F' {msg}')
print('~'*tam)
print(c[0], end='')
time.sleep(1)
#programa principal
comando = ''
while True:
titulo('SISTEMA DE AJUDAD PYHELP', 2)
comando = str(input('função ou biblioteca > '))
if comando.upper() == 'FIM':
break
else:
ajuda(comando)
titulo('FINALIZANDO...', 6)
time.sleep(1.3)
titulo('FIM DO PROGRAMA', 2)
| 23.589744
| 58
| 0.504348
|
ff943d93283c1657ddd517edea0692b51a513233
| 757
|
py
|
Python
|
run_onnx_on_tvm.py
|
mshr-h/nucleo-f746zg-microtvm-example
|
284005b60a10e7560fbb6d10e6311996f3774269
|
[
"MIT"
] | null | null | null |
run_onnx_on_tvm.py
|
mshr-h/nucleo-f746zg-microtvm-example
|
284005b60a10e7560fbb6d10e6311996f3774269
|
[
"MIT"
] | null | null | null |
run_onnx_on_tvm.py
|
mshr-h/nucleo-f746zg-microtvm-example
|
284005b60a10e7560fbb6d10e6311996f3774269
|
[
"MIT"
] | null | null | null |
import tvm
from tvm import relay
import numpy as np
import onnx
from tvm.contrib import graph_executor
model_path = "add.onnx"
onnx_model = onnx.load(model_path)
input1_name = "Input1"
input2_name = "Input2"
shape_dict = {input1_name: [1], input2_name: [1]}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
dev = tvm.cpu(0)
dtype = "float32"
m_ = graph_executor.GraphModule(lib["default"](dev))
input1 = tvm.nd.array(np.array([4], dtype=dtype))
input2 = tvm.nd.array(np.array([7], dtype=dtype))
m_.set_input("Input1", input1)
m_.set_input("Input2", input2)
m_.run()
output = m_.get_output(0).asnumpy()
print('TVM:', output)
| 22.939394
| 62
| 0.729194
|
90b35ab7c8980bd936130dea7d3b95be78c7ed4d
| 119
|
py
|
Python
|
db/__init__.py
|
NovaSBE-DSKC/retention-evaluation
|
5b68b9282f0b5479a9dc5238faef68067c76b861
|
[
"MIT"
] | null | null | null |
db/__init__.py
|
NovaSBE-DSKC/retention-evaluation
|
5b68b9282f0b5479a9dc5238faef68067c76b861
|
[
"MIT"
] | null | null | null |
db/__init__.py
|
NovaSBE-DSKC/retention-evaluation
|
5b68b9282f0b5479a9dc5238faef68067c76b861
|
[
"MIT"
] | null | null | null |
from .connection import get_connector, get_connection, execute, get_engine
from .tables import *
from .pandas import *
| 29.75
| 74
| 0.806723
|
623a4b6502bb6e50ea178404c7b6d7ddc56454ce
| 3,079
|
py
|
Python
|
test/functional/wallet_keypool.py
|
DancingAxolotl/encocoinplus
|
b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8
|
[
"MIT"
] | 4
|
2020-07-07T04:51:03.000Z
|
2021-05-17T10:28:51.000Z
|
test/functional/wallet_keypool.py
|
DancingAxolotl/encocoinplus
|
b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8
|
[
"MIT"
] | 13
|
2020-05-08T11:14:37.000Z
|
2020-05-12T10:03:53.000Z
|
test/functional/wallet_keypool.py
|
DancingAxolotl/encocoinplus
|
b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8
|
[
"MIT"
] | 22
|
2020-02-10T09:17:20.000Z
|
2020-07-10T10:33:26.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet keypool and interaction with wallet encryption/locking."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class KeyPoolTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
# Encrypt wallet and wait to terminate
nodes[0].node_encrypt_wallet('test')
# Restart node 0
self.start_node(0)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].validateaddress(addr)
assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first, or unlock the wallet.",
nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize'], 7)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
#addr.add(nodes[0].getnewaddress())
#addr.add(nodes[0].getnewaddress())
#addr.add(nodes[0].getnewaddress())
#addr.add(nodes[0].getnewaddress())
#addr.add(nodes[0].getnewaddress())
#addr.add(nodes[0].getnewaddress())
#addr.add(nodes[0].getnewaddress())
#assert(len(addr) == 7)
# the next one should fail
#assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
#nodes[0].generate(1)
#nodes[0].generate(1)
#nodes[0].generate(1)
#assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize'], 101)
if __name__ == '__main__':
KeyPoolTest().main()
| 38.012346
| 120
| 0.645664
|
5318a688e75fd0eee31385ebefcf3d8a5fdb3609
| 5,986
|
py
|
Python
|
plugins/commands.py
|
maxsupun/Mod-app-bot
|
527ab2c9ee68d71f9dd23da3c12bf6d01adbc0ac
|
[
"MIT"
] | 1
|
2021-09-09T13:44:55.000Z
|
2021-09-09T13:44:55.000Z
|
plugins/commands.py
|
matheeshaofficial/Mod-app-bot
|
527ab2c9ee68d71f9dd23da3c12bf6d01adbc0ac
|
[
"MIT"
] | null | null | null |
plugins/commands.py
|
matheeshaofficial/Mod-app-bot
|
527ab2c9ee68d71f9dd23da3c12bf6d01adbc0ac
|
[
"MIT"
] | null | null | null |
import os
import time
import traceback
import psutil
import shutil
import string
import asyncio
import info
import logging
from pyromod import listen
from pyrogram import Client, filters
from asyncio import TimeoutError
from pyrogram.errors import MessageNotModified
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from pyrogram import Client, filters
from info import START_MSG, CHANNELS, ADMINS, INVITE_MSG
from helpers.database.access_db import db
from helpers.database.database import Database
from helpers.forcesub import ForceSub
from helpers.broadcast import broadcast_handler
from helpers.database.add_user import AddUserToDatabase
from helpers.humanbytes import humanbytes
from utils import Media
logger = logging.getLogger(__name__)
@Client.on_message(filters.command('start'))
async def start(bot, message):
if message.from_user.id in info.BANNED_USERS:
await message.reply_text("Sorry, You are banned to use me ☹️ Please Contact Bot Owner 😊")
return
await AddUserToDatabase(bot, message)
FSub = await ForceSub(bot, message)
if FSub == 400:
return
"""Start command handler"""
if len(message.command) > 1 and message.command[1] == 'subscribe':
await message.reply(INVITE_MSG.format(message.from_user.mention))
else:
buttons = [
[
InlineKeyboardButton('Updates Channel 🗣', url='https://t.me/SDBOTs_inifinity'),
InlineKeyboardButton('Go Inline 🎭', switch_inline_query=''),
],
[
InlineKeyboardButton('Search Mod app 🔎', switch_inline_query_current_chat=''),
],
]
reply_markup = InlineKeyboardMarkup(buttons)
await message.reply(START_MSG.format(message.from_user.mention), reply_markup=reply_markup)
@Client.on_message(filters.command('channel') & filters.user(ADMINS))
async def channel_info(bot, message):
await AddUserToDatabase(bot, message)
FSub = await ForceSub(bot, message)
if FSub == 400:
return
"""Send basic information of channel"""
if isinstance(CHANNELS, (int, str)):
channels = [CHANNELS]
elif isinstance(CHANNELS, list):
channels = CHANNELS
else:
raise ValueError("Unexpected type of CHANNELS")
text = '📑 **Indexed channels/groups**\n'
for channel in channels:
chat = await bot.get_chat(channel)
if chat.username:
text += '\n@' + chat.username
else:
text += '\n' + chat.title or chat.first_name
text += f'\n\n**Total:** {len(CHANNELS)}'
if len(text) < 4096:
await message.reply(text)
else:
file = 'Indexed channels.txt'
with open(file, 'w') as f:
f.write(text)
await message.reply_document(file)
os.remove(file)
@Client.on_message(filters.command('total') & filters.user(ADMINS))
async def total(bot, message):
await AddUserToDatabase(bot, message)
FSub = await ForceSub(bot, message)
if FSub == 400:
return
"""Show total files in database"""
msg = await message.reply("Processing...⏳\n@SDBOTs_inifinity Projects 🇱🇰", quote=True)
try:
total = await Media.count_documents()
await msg.edit(f'📁 Total files saved Tg film Bot Database: {total}')
except Exception as e:
logger.exception('Failed to check total files')
await msg.edit(f'Error: {e}')
@Client.on_message(filters.command('logger') & filters.user(ADMINS))
async def log_file(bot, message):
await AddUserToDatabase(bot, message)
FSub = await ForceSub(bot, message)
if FSub == 400:
return
"""Send log file"""
try:
await message.reply_document('TelegramBot.log')
except Exception as e:
await message.reply(str(e))
@Client.on_message(filters.command('delete') & filters.user(ADMINS))
async def delete(bot, message):
await AddUserToDatabase(bot, message)
FSub = await ForceSub(bot, message)
if FSub == 400:
return
"""Delete file from database"""
reply = message.reply_to_message
if reply and reply.media:
msg = await message.reply("Processing...⏳\n@SDBOTs_inifinity Projects 🇱🇰", quote=True)
else:
await message.reply('Reply to file with /delete which you want to delete', quote=True)
return
for file_type in ("document", "video", "audio"):
media = getattr(reply, file_type, None)
if media is not None:
break
else:
await msg.edit('This is not supported file format')
return
result = await Media.collection.delete_one({
'file_name': media.file_name,
'file_size': media.file_size,
'mime_type': media.mime_type,
'caption': reply.caption
})
if result.deleted_count:
await msg.edit('File is successfully deleted from database\n@hSDBOTs_inifinity Projects🇱🇰')
else:
await msg.edit('File not found in database\n@SDBOTs_inifinity Projects🇱🇰')
@Client.on_message(filters.private & filters.command("broadcast") & filters.user(info.BOT_OWNER) & filters.reply)
async def _broadcast(_, bot: Message):
await broadcast_handler(bot)
@Client.on_message(filters.private & filters.command("stats") & filters.user(info.BOT_OWNER))
async def show_status_count(_, bot: Message):
total, used, free = shutil.disk_usage(".")
total = humanbytes(total)
used = humanbytes(used)
free = humanbytes(free)
cpu_usage = psutil.cpu_percent()
ram_usage = psutil.virtual_memory().percent
disk_usage = psutil.disk_usage('/').percent
total_users = await db.total_users_count()
await bot.reply_text(
text=f"**💽 Total Disk Space:** {total} \n**💿 Used Space:** {used}({disk_usage}%) \n**📊 Free Space:** {free} \n**CPU Usage:** {cpu_usage}% \n**RAM Usage:** {ram_usage}%\n\n**Total Users 👀:** `{total_users}`\n\n@{info.BOT_USERNAME} 🤖",
parse_mode="Markdown",
quote=True
)
| 35.005848
| 241
| 0.669395
|
bbfaea7f04879e85d1c4573de38e5d8d7838ac7c
| 19,415
|
py
|
Python
|
third_party/nucleus/util/vis.py
|
peterdfields/deepvariant
|
33fe874a7b2b4fdb67b0f6e361dd9e45f1f52676
|
[
"BSD-3-Clause"
] | 4
|
2019-03-30T13:25:25.000Z
|
2020-10-14T18:47:21.000Z
|
third_party/nucleus/util/vis.py
|
kchennen/deepvariant
|
b92646f51df8cf157147e93ecd7a082c7b6db457
|
[
"BSD-3-Clause"
] | 2
|
2019-09-07T05:07:35.000Z
|
2019-09-07T05:08:18.000Z
|
third_party/nucleus/util/vis.py
|
kchennen/deepvariant
|
b92646f51df8cf157147e93ecd7a082c7b6db457
|
[
"BSD-3-Clause"
] | 1
|
2020-05-04T15:13:37.000Z
|
2020-05-04T15:13:37.000Z
|
# Copyright 2019 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Lint as: python3
"""Utility functions for visualization and inspection of pileup examples.
Visualization and inspection utility functions enable showing image-like array
data including those used in DeepVariant.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from IPython import display
import numpy as np
from PIL import Image
from PIL import ImageDraw
from tensorflow.python.platform import gfile
from third_party.nucleus.protos import variants_pb2
DEEPVARIANT_CHANNEL_NAMES = [
'read base', 'base quality', 'mapping quality', 'strand',
'read supports variant', 'base differs from ref', 'alternate allele 1',
'alternate allele 2'
]
def get_image_array_from_example(example):
"""Decode image/encoded and image/shape of an Example into a numpy array.
Parse image/encoded and image/shape features from a tensorflow Example and
decode the image into that shape.
Args:
example: a tensorflow Example containing features that include
"image/encoded" and "image/shape"
Returns:
numpy array of dtype np.uint8.
"""
features = example.features.feature
img = features['image/encoded'].bytes_list.value[0]
shape = features['image/shape'].int64_list.value[0:3]
return np.frombuffer(img, np.uint8).reshape(shape)
def split_3d_array_into_channels(arr):
"""Split 3D array into a list of 2D arrays.
e.g. given a numpy array of shape (100, 200, 6), return a list of 6 channels,
each with shape (100, 200).
Args:
arr: a 3D numpy array.
Returns:
list of 2D numpy arrays.
"""
return [arr[:, :, i] for i in range(arr.shape[-1])]
def channels_from_example(example):
"""Extract image from an Example and return the list of channels.
Args:
example: a tensorflow Example containing features that include
"image/encoded" and "image/shape"
Returns:
list of 2D numpy arrays, one for each channel.
"""
image = get_image_array_from_example(example)
return split_3d_array_into_channels(image)
def convert_6_channels_to_rgb(channels):
"""Convert 6-channel image from DeepVariant to RGB for quick visualization.
The 6 channels are: "read base", "base quality", "mapping quality", "strand",
"supports variant", "base != reference".
Args:
channels: a list of 6 numpy arrays.
Returns:
3D numpy array of 3 colors (Red, green, blue).
"""
base = channels[0]
# qual is the minimum of base quality and mapping quality at each position
# 254 is the max value for quality scores because the SAM specification has
# 255 reserved for unavailable values.
qual = np.minimum(channels[1], channels[2])
strand = channels[3]
# alpha is <supports variant> * <base != reference>
alpha = np.multiply(channels[4] / 254.0, channels[5] / 254.0)
return np.multiply(np.stack([base, qual, strand]),
alpha).astype(np.uint8).transpose([1, 2, 0])
def scale_colors_for_png(arr, vmin=0, vmax=255):
"""Scale an array to integers between 0 and 255 to prep it for a PNG image.
Args:
arr: numpy array. Input array made up of integers or floats.
vmin: number. Minimum data value to map to 0. Values below this will be
clamped to this value and therefore become 0.
vmax: number. Maximum data value to map to 255. Values above this will be
clamped to this value and therefore become 255.
Returns:
numpy array of dtype np.uint8 (integers between 0 and 255).
"""
if vmax == 0 or vmax <= vmin:
raise ValueError('vmin must be non-zero and higher than vmin.')
# Careful not to modify the original array
scaled = np.copy(arr)
# Snap numbers in the array falling outside the range into the range,
# otherwise they will produce artifacts due to byte overflow
scaled[scaled > vmax] = vmax
scaled[scaled < vmin] = vmin
# Scale the input into the range of vmin to vmax
if vmin != 0 or vmax != 255:
scaled = ((scaled - vmin) / (vmax - vmin)) * 255
return scaled.astype(np.uint8)
def _get_image_type_from_array(arr):
"""Find image type based on array dimensions.
Raises error on invalid image dimensions.
Args:
arr: numpy array. Input array.
Returns:
str. "RGB" or "L", meant for PIL.Image.fromarray.
"""
if len(arr.shape) == 3 and arr.shape[2] == 3:
# 8-bit x 3 colors
return 'RGB'
elif len(arr.shape) == 2:
# 8-bit, gray-scale
return 'L'
else:
raise ValueError(
'Input array must have either 2 dimensions or 3 dimensions where the '
'third dimension has 3 channels. i.e. arr.shape is (x,y) or (x,y,3). '
'Found shape {}.'.format(arr.shape))
def autoscale_colors_for_png(arr, vmin=None, vmax=None):
"""Adjust an array to prepare it for saving to an image.
Re-scale numbers in the input array to go from 0 to 255 to adapt them for a
PNG image.
Args:
arr: numpy array. Should be 2-dimensional or 3-dimensional where the third
dimension has 3 channels.
vmin: number (float or int). Minimum data value, which will correspond to
black in greyscale or lack of each color in RGB images. Default None takes
the minimum of the data from arr.
vmax: number (float or int). Maximum data value, which will correspond to
white in greyscale or full presence of each color in RGB images. Default
None takes the max of the data from arr.
Returns:
(modified numpy array, image_mode)
"""
image_mode = _get_image_type_from_array(arr)
if vmin is None:
vmin = np.min(arr)
if vmax is None:
vmax = np.max(arr)
# In cases where all elements are the same, fix the vmax so that even though
# the whole image will be black, the user can at least see the shape
if vmin == vmax:
vmax = vmin + 1
scaled = scale_colors_for_png(arr, vmin=vmin, vmax=vmax)
return scaled, image_mode
def add_header(img, labels, mark_midpoints=True, header_height=20):
"""Adds labels to the image, evenly distributed across the top.
This is primarily useful for showing the names of channels.
Args:
img: A PIL Image.
labels: list of strs. Labels for segments to write across the top.
mark_midpoints: bool. Whether to add a small vertical line marking the
center of each segment of the image.
header_height: int. Height of the header in pixels.
Returns:
A new PIL Image, taller than the original img and annotated.
"""
# Create a taller image to make space for a header at the top.
new_height = header_height + img.size[1]
new_width = img.size[0]
if img.mode == 'RGB':
placeholder_size = (new_height, new_width, 3)
else:
placeholder_size = (new_height, new_width)
placeholder = np.ones(placeholder_size, dtype=np.uint8) * 255
# Divide the image width into segments.
segment_width = img.size[0] / len(labels)
# Calculate midpoints for all segments.
midpoints = [int(segment_width * (i + 0.5)) for i in range(len(labels))]
if mark_midpoints:
# For each label, add a small line to mark the middle.
for x_position in midpoints:
placeholder[header_height - 5:header_height, x_position] = 0
# If image has an even width, it will need 2 pixels marked as the middle.
if segment_width % 2 == 0:
placeholder[header_height - 5:header_height, x_position + 1] = 0
bigger_img = Image.fromarray(placeholder, mode=img.mode)
# Place the original image inside the taller placeholder image.
bigger_img.paste(img, (0, header_height))
# Add a label for each segment.
draw = ImageDraw.Draw(bigger_img)
for i in range(len(labels)):
text = labels[i]
text_width = draw.textsize(text)[0]
# xy refers to the left top corner of the text, so to center the text on
# the midpoint, subtract half the text width from the midpoint position.
x_position = int(midpoints[i] - text_width / 2)
draw.text(xy=(x_position, 0), text=text, fill='black')
return bigger_img
def save_to_png(arr,
path=None,
image_mode=None,
show=True,
labels=None,
scale=None):
"""Make a PNG and show it from a numpy array of dtype=np.uint8.
Args:
arr: numpy array. Input array to save.
path: str. File path at which to save the image. A .png prefix is added if
the path does not already have one. Leave empty to save at /tmp/tmp.png,
which is useful when only temporarily showing the image in a Colab
notebook.
image_mode: "RGB" or "L". Leave as default=None to choose based on image
dimensions.
show: bool. Whether to display the image using IPython (for notebooks).
labels: list of str. Labels to show across the top of the image.
scale: integer. Number of pixels wide and tall to show each cell in the
array. This sizes up the image while keeping exactly the same number of
pixels for every cell in the array, preserving resolution and preventing
any interpolation or overlapping of pixels. Default None adapts to the
size of the image to multiply it up until a limit of 500 pixels, a
convenient size for use in notebooks. If saving to a file for automated
processing, scale=1 is recommended to keep output files small and simple
while still retaining all the information content.
Returns:
None. Saves an image at path and optionally shows it with IPython.display.
"""
if image_mode is None:
image_mode = _get_image_type_from_array(arr)
img = Image.fromarray(arr, mode=image_mode)
if labels is not None:
img = add_header(img, labels)
if scale is None:
scale = max(1, int(500 / max(arr.shape)))
if scale != 1:
img = img.resize((img.size[0] * scale, img.size[1] * scale))
# Saving to a temporary file is needed even when showing in a notebook
if path is None:
path = '/tmp/tmp.png'
elif not path.endswith('.png'):
# Only PNG is supported because JPEG files are unnecessarily 3 times larger.
path = '{}.png'.format(path)
with gfile.Open(path, 'wb') as fout:
img.save(fout, format=path.split('.')[-1])
# Show image (great for notebooks)
if show:
display.display(display.Image(path))
def array_to_png(arr,
path=None,
show=True,
vmin=None,
vmax=None,
scale=None,
labels=None):
"""Save an array as a PNG image with PIL and show it.
Args:
arr: numpy array. Should be 2-dimensional or 3-dimensional where the third
dimension has 3 channels.
path: str. Path for the image output. Default is /tmp/tmp.png for quickly
showing the image in a notebook.
show: bool. Whether to show the image using IPython utilities, only works in
notebooks.
vmin: number. Minimum data value, which will correspond to black in
greyscale or lack of each color in RGB images. Default None takes the
minimum of the data from arr.
vmax: number. Maximum data value, which will correspond to white in
greyscale or full presence of each color in RGB images. Default None takes
the max of the data from arr.
scale: integer. Number of pixels wide and tall to show each cell in the
array. This sizes up the image while keeping exactly the same number of
pixels for every cell in the array, preserving resolution and preventing
any interpolation or overlapping of pixels. Default None adapts to the
size of the image to multiply it up until a limit of 500 pixels, a
convenient size for use in notebooks. If saving to a file for automated
processing, scale=1 is recommended to keep output files small and simple
while still retaining all the information content.
labels: list of str. Labels to show across the top of the image.
Returns:
None. Saves an image at path and optionally shows it with IPython.display.
"""
scaled, image_mode = autoscale_colors_for_png(arr, vmin=vmin, vmax=vmax)
save_to_png(
scaled,
path=path,
show=show,
image_mode=image_mode,
labels=labels,
scale=scale)
def _deepvariant_channel_names(num_channels):
"""Get DeepVariant channel names for the given number of channels."""
# Add additional empty labels if there are more channels than expected.
filler_labels = [
'channel {}'.format(i + 1)
for i in range(len(DEEPVARIANT_CHANNEL_NAMES), num_channels)
]
labels = DEEPVARIANT_CHANNEL_NAMES + filler_labels
# Trim off any extra labels.
return labels[0:num_channels]
def draw_deepvariant_pileup(example=None,
channels=None,
composite_type=None,
annotated=True,
labels=None,
path=None,
show=True,
scale=None):
"""Quick utility for showing a pileup example as channels or RGB.
Args:
example: A tensorflow Example containing image/encoded and image/shape
features. Will be parsed through channels_from_example. Ignored if
channels are provided directly. Either example OR channels is required.
channels: list of 2D arrays containing the data to draw. Either example OR
channels is required.
composite_type: str or None. Method for combining channels. One of
[None,"RGB"].
annotated: bool. Whether to add channel labels and mark midpoints.
labels: list of str. Which labels to add to the image. If annotated=True,
use default channels labels for DeepVariant.
path: str. Output file path for saving as an image. If None, just show plot.
show: bool. Whether to display the image for ipython notebooks. Set to False
to prevent extra output when running in bulk.
scale: integer. Multiplier to enlarge the image. Default: None, which will
set it automatically for a human-readable size. Set to 1 for no scaling.
Returns:
None. Saves an image at path and optionally shows it with IPython.display.
"""
if example and not channels:
channels = channels_from_example(example)
elif not channels:
raise ValueError('Either example OR channels must be specified.')
if composite_type is None:
img_array = np.concatenate(channels, axis=1)
if annotated and labels is None:
labels = _deepvariant_channel_names(len(channels))
elif composite_type == 'RGB':
img_array = convert_6_channels_to_rgb(channels)
if annotated and labels is None:
labels = [''] # Creates one midpoint with no label.
else:
raise ValueError(
"Unrecognized composite_type: {}. Must be None or 'RGB'".format(
composite_type))
array_to_png(
img_array,
path=path,
show=show,
scale=scale,
labels=labels,
vmin=0,
vmax=254)
def variant_from_example(example):
"""Extract Variant object from the 'variant/encoded' feature of an Example.
Args:
example: a DeepVariant-style make_examples output example.
Returns:
A Nucleus Variant.
"""
features = example.features.feature
var_string = features['variant/encoded'].bytes_list.value[0]
return variants_pb2.Variant.FromString(var_string)
def locus_id_from_variant(variant):
"""Create a locus ID of form "chr:pos_ref" from a Variant object.
Args:
variant: a nucleus variant.
Returns:
str.
"""
return '{}:{}_{}'.format(variant.reference_name, variant.start,
variant.reference_bases)
def alt_allele_indices_from_example(example):
"""Extract indices of the particular alt allele(s) the example represents.
Args:
example: a DeepVariant make_examples output example.
Returns:
list of indices.
"""
features = example.features.feature
val = features['alt_allele_indices/encoded'].bytes_list.value[0]
# Extract the encoded proto into unsigned integers and convert to regular ints
mapped = [int(x) for x in np.frombuffer(val, dtype=np.uint8)]
# Format is [<field id + type>, <number of elements in array>, ...<array>].
# Extract the array only, leaving out the metadata.
return mapped[2:]
def alt_bases_from_indices(alt_allele_indices, alternate_bases):
"""Get alt allele bases based on their indices.
e.g. one alt allele: [0], ["C"] => "C"
or with two alt alleles: [0,2], ["C", "TT", "A"] => "C-A"
Args:
alt_allele_indices: list of integers. Indices of the alt alleles for a
particular example.
alternate_bases: list of strings. All alternate alleles for the variant.
Returns:
str. Alt allele(s) at the indices, joined by '-' if more than 1.
"""
alleles = [alternate_bases[i] for i in alt_allele_indices]
# Avoiding '/' to support use in file paths.
return '-'.join(alleles)
def alt_from_example(example):
"""Get alt allele(s) from a DeepVariant example.
Args:
example: a DeepVariant make_examples output example.
Returns:
str. The bases of the alt alleles, joined by a -.
"""
variant = variant_from_example(example)
indices = alt_allele_indices_from_example(example)
return alt_bases_from_indices(indices, variant.alternate_bases)
def locus_id_with_alt(example):
"""Get complete locus ID from a DeepVariant example.
Args:
example: a DeepVariant make_examples output example.
Returns:
str in the form "chr:pos_ref_alt.
"""
variant = variant_from_example(example)
locus_id = locus_id_from_variant(variant)
alt = alt_from_example(example)
return '{}_{}'.format(locus_id, alt)
def label_from_example(example):
"""Get the "label" from an example.
Args:
example: a DeepVariant make_examples output example.
Returns:
integer (0, 1, or 2 for regular DeepVariant examples) or None if the
example has no label.
"""
val = example.features.feature['label'].int64_list.value
if val:
return int(val[0])
else:
return None
| 35.108499
| 80
| 0.699768
|
793b9156e6335de4c37dc86b19f5b660dc4c2730
| 7,278
|
py
|
Python
|
fem/gui/vtk_widget/vtk_graphics/interactor_styles/my_interactor_style.py
|
mjredmond/FEMApp
|
dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8
|
[
"MIT"
] | 1
|
2019-08-03T21:40:26.000Z
|
2019-08-03T21:40:26.000Z
|
fem/gui/vtk_widget/vtk_graphics/interactor_styles/my_interactor_style.py
|
mjredmond/FEMApp
|
dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8
|
[
"MIT"
] | null | null | null |
fem/gui/vtk_widget/vtk_graphics/interactor_styles/my_interactor_style.py
|
mjredmond/FEMApp
|
dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, absolute_import
import vtk
from fem.utilities import MrSignal
from ..picking import PickingManager
from ...vtk_graphics import VTKGraphics
from ...vtk_graphics.rendering import (
vtkPVInteractorStyle, vtkPVTrackballPan, vtkPVTrackballRotate, vtkPVTrackballZoom, vtkCameraManipulator,
vtkCameraManipulatorGUIHelper
)
class MyInteractorStyle(vtkPVInteractorStyle):
def __init__(self):
vtkPVInteractorStyle.__init__(self)
self._rotate = vtkPVTrackballRotate()
self._pan = vtkPVTrackballPan()
self._zoom = vtkPVTrackballZoom()
self._hover = None
""":type: gui.vtk_widget.vtk_graphics.interactor_styles.hovered_interactor_style.HoveredInteractorStyle"""
self._pan.GUIHelper = vtkCameraManipulatorGUIHelper()
self._buttons_pressed = set()
# these need to be implemented
#selection_data.translation_factor_changed.connect(self.set_translation_factor)
#selection_data.rotation_factor_changed.connect(self.set_rotation_factor)
#selection_data.zoom_scale_factor_changed.connect(self.set_zoom_scale_factor)
self._box_picker = None
""":type: gui.vtk_widget.vtk_graphics.picking.BoxPicker"""
self._poly_picker = None
""":type: gui.vtk_widget.vtk_graphics.picking.PolyPicker"""
#selection_data.box_picker_activate.connect(self.box_picker_activate)
#selection_data.poly_picker_activate.connect(self.poly_picker_activate)
self._active_picker = None
self.vtk_graphics = None
""":type: VTKGRaphics"""
self.picking_manager = None
""":type: PickingManager"""
self.zoom_changed = MrSignal()
def build(self):
self.AddManipulator(self._rotate)
self.AddManipulator(self._pan)
self.AddManipulator(self._zoom)
self.AddManipulator(self._hover)
self._rotate.Button = 1
self._pan.Button = 2
self._zoom.Button = 3
self._hover.Button = -1
self.vtk_graphics = VTKGraphics.instance()
self.picking_manager = PickingManager.instance()
def set_hover_interactor_style(self, hover):
self._hover = hover
def set_box_picker(self, box_picker):
self._box_picker = box_picker
self._box_picker.done_picking.connect(self._reset_mousemove)
def set_poly_picker(self, poly_picker):
self._poly_picker = poly_picker
self._poly_picker.done_picking.connect(self._reset_mousemove)
def box_picker_activate(self):
self._active_picker = self._box_picker
self._picker_activate()
def poly_picker_activate(self):
self._active_picker = self._poly_picker
self._picker_activate()
def _picker_activate(self):
self.RemoveObservers("MouseMoveEvent")
self.RemoveObservers("LeftButtonPressEvent")
self.RemoveObservers("LeftButtonReleaseEvent")
self.AddObserver("MouseMoveEvent", self._PickerOnMouseMove)
self.AddObserver("LeftButtonPressEvent", self._PickerOnButtonDown)
self.AddObserver("LeftButtonReleaseEvent", self._PickerOnButtonUp)
def _reset_mousemove(self):
self.picking_manager.picking_done()
self.RemoveObservers("MouseMoveEvent")
self.RemoveObservers("LeftButtonPressEvent")
self.RemoveObservers("LeftButtonReleaseEvent")
self.AddObserver("MouseMoveEvent", self._OnMouseMove)
self.AddObserver("LeftButtonPressEvent", self._OnLeftButtonDown)
self.AddObserver("LeftButtonReleaseEvent", self._OnLeftButtonUp)
def _PickerOnMouseMove(self, *args):
current_renderer = self.GetCurrentRenderer()
interactor = self.GetInteractor()
event_pos = interactor.GetEventPosition()
if not current_renderer:
self.FindPokedRenderer(event_pos[0], event_pos[1])
current_renderer = self.GetCurrentRenderer()
if current_renderer:
self._active_picker.OnMouseMove(event_pos[0], event_pos[1], current_renderer, interactor)
self.InvokeEvent(vtk.vtkCommand.InteractionEvent)
def _PickerOnButtonDown(self, *args):
current_renderer = self.GetCurrentRenderer()
interactor = self.GetInteractor()
event_pos = interactor.GetEventPosition()
if not current_renderer:
self.FindPokedRenderer(event_pos[0], event_pos[1])
current_renderer = self.GetCurrentRenderer()
self.InvokeEvent(vtk.vtkCommand.StartInteractionEvent)
self._active_picker.StartInteraction()
self._active_picker.OnButtonDown(event_pos[0], event_pos[1], current_renderer, interactor)
def _PickerOnButtonUp(self, *args):
current_renderer = self.GetCurrentRenderer()
interactor = self.GetInteractor()
event_pos = interactor.GetEventPosition()
if not current_renderer:
self.FindPokedRenderer(event_pos[0], event_pos[1])
current_renderer = self.GetCurrentRenderer()
self._active_picker.OnButtonUp(event_pos[0], event_pos[1], current_renderer, interactor)
self.InvokeEvent(vtk.vtkCommand.EndInteractionEvent)
def set_center_of_rotation(self, center):
self.CenterOfRotation[:] = center[:]
def set_translation_factor(self, factor):
self.TranslationFactor = factor
def set_rotation_factor(self, factor):
self.RotationFactor = factor
def set_zoom_scale_factor(self, factor):
self.ZoomScaleFactor = factor
def OnButtonDown(self, button, shift, control):
if self.CurrentManipulator is self._hover:
self.OnButtonUp(self._hover.Button)
#self._hover.unload()
if button > 0:
self._buttons_pressed.add(button)
vtkPVInteractorStyle.OnButtonDown(self, button, shift, control)
def OnButtonUp(self, button):
vtkPVInteractorStyle.OnButtonUp(self, button)
if button > 0:
try:
self._buttons_pressed.remove(button)
except KeyError:
return
if not self._buttons_pressed:
old_down = self._down_pos
vtkPVInteractorStyle.OnButtonDown(self, self._hover.Button, self._hover.Shift, self._hover.Control)
self._down_pos = old_down
if self._down_pos == self._up_pos:
if self._hover.cell_picker.ClosestCellGlobalId > 0:
new_center = self._hover.cell_picker.CellCenter
else:
new_center = None
self.picking_manager.single_pick(
self._down_pos,
new_center,
self._hover.cell_picker.ClosestCellGlobalId
)
def finalize(self):
self._box_picker.done_picking.disconnect(self._reset_mousemove)
self._poly_picker.done_picking.disconnect(self._reset_mousemove)
self._reset_mousemove()
self._box_picker = None
self._poly_picker = None
self._active_picker = None
self.RemoveAllManipulators()
self._rotate = None
self._pan = None
self._zoom = None
self._hover = None
self.vtk_graphics = None
| 34.169014
| 115
| 0.681506
|
3a97cc5fd6d9ba5653f8e837919cc4637160c046
| 5,769
|
py
|
Python
|
kubernetes_asyncio/client/models/v1beta1_network_policy_egress_rule.py
|
weltonrodrigo/kubernetes_asyncio
|
b793f3e9ea43cbd0f4ff40ace1b0b677682f4042
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1beta1_network_policy_egress_rule.py
|
weltonrodrigo/kubernetes_asyncio
|
b793f3e9ea43cbd0f4ff40ace1b0b677682f4042
|
[
"Apache-2.0"
] | 13
|
2021-04-12T02:03:48.000Z
|
2022-03-28T02:08:46.000Z
|
kubernetes_asyncio/client/models/v1beta1_network_policy_egress_rule.py
|
weltonrodrigo/kubernetes_asyncio
|
b793f3e9ea43cbd0f4ff40ace1b0b677682f4042
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.16.14
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1beta1NetworkPolicyEgressRule(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ports': 'list[V1beta1NetworkPolicyPort]',
'to': 'list[V1beta1NetworkPolicyPeer]'
}
attribute_map = {
'ports': 'ports',
'to': 'to'
}
def __init__(self, ports=None, to=None, local_vars_configuration=None): # noqa: E501
"""V1beta1NetworkPolicyEgressRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ports = None
self._to = None
self.discriminator = None
if ports is not None:
self.ports = ports
if to is not None:
self.to = to
@property
def ports(self):
"""Gets the ports of this V1beta1NetworkPolicyEgressRule. # noqa: E501
List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
:return: The ports of this V1beta1NetworkPolicyEgressRule. # noqa: E501
:rtype: list[V1beta1NetworkPolicyPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1beta1NetworkPolicyEgressRule.
List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
:param ports: The ports of this V1beta1NetworkPolicyEgressRule. # noqa: E501
:type: list[V1beta1NetworkPolicyPort]
"""
self._ports = ports
@property
def to(self):
"""Gets the to of this V1beta1NetworkPolicyEgressRule. # noqa: E501
List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list. # noqa: E501
:return: The to of this V1beta1NetworkPolicyEgressRule. # noqa: E501
:rtype: list[V1beta1NetworkPolicyPeer]
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this V1beta1NetworkPolicyEgressRule.
List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list. # noqa: E501
:param to: The to of this V1beta1NetworkPolicyEgressRule. # noqa: E501
:type: list[V1beta1NetworkPolicyPeer]
"""
self._to = to
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1NetworkPolicyEgressRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1NetworkPolicyEgressRule):
return True
return self.to_dict() != other.to_dict()
| 38.205298
| 412
| 0.644132
|
41c456831bdbc07e0178b19cb79cd58800a81fb5
| 3,757
|
py
|
Python
|
travis_pypi_setup.py
|
vault-the/rigor
|
02f5ddb18092a114dc0a23c8fb60975e2b906e85
|
[
"MIT"
] | null | null | null |
travis_pypi_setup.py
|
vault-the/rigor
|
02f5ddb18092a114dc0a23c8fb60975e2b906e85
|
[
"MIT"
] | null | null | null |
travis_pypi_setup.py
|
vault-the/rigor
|
02f5ddb18092a114dc0a23c8fb60975e2b906e85
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'genomoncology/rigor'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 30.544715
| 79
| 0.700559
|
f6eb7cf181a2aca3d16f96e4bafdfbd9e5141671
| 4,130
|
py
|
Python
|
msm_pele/AdaptivePELE/freeEnergies/oldScripts/repeatSnapshotsPeleAdaptive.py
|
danielSoler93/msm_pele
|
80b187ceb6446059f6c7b0dd2c0968f0db4a17a1
|
[
"MIT"
] | 13
|
2017-06-14T14:42:22.000Z
|
2022-01-25T08:46:04.000Z
|
msm_pele/AdaptivePELE/freeEnergies/oldScripts/repeatSnapshotsPeleAdaptive.py
|
danielSoler93/msm_pele
|
80b187ceb6446059f6c7b0dd2c0968f0db4a17a1
|
[
"MIT"
] | 16
|
2018-01-16T01:32:02.000Z
|
2021-02-19T17:05:12.000Z
|
msm_pele/AdaptivePELE/freeEnergies/oldScripts/repeatSnapshotsPeleAdaptive.py
|
danielSoler93/msm_pele
|
80b187ceb6446059f6c7b0dd2c0968f0db4a17a1
|
[
"MIT"
] | 8
|
2018-02-20T10:47:07.000Z
|
2022-03-21T12:28:07.000Z
|
import os
import numpy
import glob
import re
import argparse
def parseArguments():
desc = "Adds repeated snapshots in rejected steps. If total steps are provided, add also steps until the end"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-t", "--totalSteps", type=int, default=0, help="Total number of steps in traj")
parser.add_argument("folderTraj",
help="Folder where the epochs trajs are stored")
args = parser.parse_args()
return args.totalSteps, args.folderTraj
totalSteps, folderTraj = parseArguments()
inputTrajectoryFolder = "%s/extractedCoordinates/"
baseTrajectoryName = "coord_"
reportName = '*report_'
outputTrajectoryFolder = "%s/repeatedExtractedCoordinates/"
allFolders = os.listdir(folderTraj)
Epochs = [epoch for epoch in allFolders if epoch.isdigit()]
for folder in Epochs:
pathFolder = os.path.join(folderTraj, folder)
inputTrajectoryFolderEpoch = inputTrajectoryFolder % pathFolder
outputTrajectoryFolderEpoch = outputTrajectoryFolder % pathFolder
if not os.path.exists(outputTrajectoryFolderEpoch):
os.makedirs(outputTrajectoryFolderEpoch)
inputTrajectories = glob.glob(inputTrajectoryFolderEpoch + baseTrajectoryName + '*')
for inputTrajectory in inputTrajectories:
trajectoryNumber = re.sub('\.dat$', '', inputTrajectory)
trajectoryNumber = re.sub(inputTrajectoryFolderEpoch + baseTrajectoryName, '', trajectoryNumber)
try:
reportFile = glob.glob(os.path.join(pathFolder, reportName + trajectoryNumber))[0]
except:
print "Couldn't find file that matches: ", reportName + trajectoryNumber
continue
with open(inputTrajectory) as f:
trajectory = f.read().splitlines()
steps = numpy.loadtxt(reportFile, dtype='int', comments='#', usecols=(1,))
# Improvement: Whenever the initial step is added in the trajectory,
# we can add steps[0] times the initial structure
completeTrajectory = []
counter = 0
"""
if len(trajectory) == 0: #one step traj, we could repeat #steps/epoch times
snapshot = trajectory[i].split()
snapshot[0] = str(counter)
snapshot = ' '.join(snapshot)
completeTrajectory.append(snapshot)
"""
if len(trajectory) > 0:
for i in range(len(trajectory) - 1):
try:
repeated = steps[i+1] - steps[i]
except:
# sys.exit("sth wrong in trajectory " + inputTrajectory)
print "sth wrong in trajectory " + inputTrajectory
# continue
# Changed behavior, write until the end of the information in report file
snapshot = trajectory[i].split()
snapshot[0] = str(counter)
snapshot = ' '.join(snapshot)
completeTrajectory.append(snapshot)
break
for j in range(repeated):
snapshot = trajectory[i].split()
snapshot[0] = str(counter)
snapshot = ' '.join(snapshot)
completeTrajectory.append(snapshot)
counter += 1
# !!!!
# WARNING!!! Add last snapshot DID NOT CHECK when report/traj don't match
# !!!!
if totalSteps == 0:
iterations = range(1)
else:
iterations = range(totalSteps + 1 - counter)
for i in iterations:
snapshot = trajectory[-1].split()
snapshot[0] = str(counter)
snapshot = ' '.join(snapshot)
completeTrajectory.append(snapshot)
counter += 1
outputFilename = outputTrajectoryFolderEpoch + baseTrajectoryName + trajectoryNumber + '.dat'
outputFile = open(outputFilename, 'w')
for snapshot in completeTrajectory:
outputFile.write("%s\n" % snapshot)
outputFile.close()
| 40.490196
| 113
| 0.6
|
670034f28a2ac81a84820180757e8f3ccfcf35b5
| 1,094
|
py
|
Python
|
azure-keyvault/azure/keyvault/v7_0/models/backup_certificate_result.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-06-12T13:44:34.000Z
|
2020-06-01T13:24:04.000Z
|
azure-keyvault/azure/keyvault/v7_0/models/backup_certificate_result.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 5
|
2018-04-26T01:14:29.000Z
|
2021-01-05T00:45:39.000Z
|
azure-keyvault/azure/keyvault/v7_0/models/backup_certificate_result.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 8
|
2018-04-24T22:52:48.000Z
|
2021-11-16T06:29:28.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BackupCertificateResult(Model):
"""The backup certificate result, containing the backup blob.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: The backup blob containing the backed up certificate.
:vartype value: bytes
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'base64'},
}
def __init__(self, **kwargs):
super(BackupCertificateResult, self).__init__(**kwargs)
self.value = None
| 30.388889
| 76
| 0.593236
|
c2b8c93e9fa2b94fa6e24e17b1bad6ffd79a21ab
| 1,296
|
py
|
Python
|
firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/max7219_simpletest.py
|
freeglow/microcontroller-cpy
|
5adfda49da6eefaece81be2a2f26122d68736355
|
[
"MIT"
] | null | null | null |
firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/max7219_simpletest.py
|
freeglow/microcontroller-cpy
|
5adfda49da6eefaece81be2a2f26122d68736355
|
[
"MIT"
] | null | null | null |
firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/max7219_simpletest.py
|
freeglow/microcontroller-cpy
|
5adfda49da6eefaece81be2a2f26122d68736355
|
[
"MIT"
] | null | null | null |
import time
from board import TX, RX, A1
import busio
import digitalio
from adafruit_max7219 import matrices
mosi = TX
clk = RX
cs = digitalio.DigitalInOut(A1)
spi = busio.SPI(clk, MOSI=mosi)
matrix = matrices.Matrix8x8(spi, cs)
while True:
print("Cycle start")
# all lit up
matrix.fill(True)
matrix.show()
time.sleep(0.5)
# all off
matrix.fill(False)
matrix.show()
time.sleep(0.5)
# one column of leds lit
for i in range(8):
matrix.pixel(1, i, 1)
matrix.show()
time.sleep(0.5)
# now scroll the column to the right
for j in range(8):
matrix.scroll(1, 0)
matrix.show()
time.sleep(0.5)
# show a string one character at a time
adafruit = "Adafruit"
for char in adafruit:
matrix.fill(0)
matrix.text(char, 0, 0)
matrix.show()
time.sleep(1.0)
# scroll the last character off the display
for i in range(8):
matrix.scroll(-1, 0)
matrix.show()
time.sleep(0.5)
# scroll a string across the display
for pixel_position in range(len(adafruit) * 8):
matrix.fill(0)
matrix.text(adafruit, -pixel_position, 0)
matrix.show()
time.sleep(0.25)
| 22.736842
| 52
| 0.578704
|
769425234d1745165cba95fae1dbe5a99d43cc05
| 8,350
|
py
|
Python
|
train.py
|
biomiker/pytorch-retinanet
|
0f0c1671df49adc189677a59c5137737e73c63a4
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
biomiker/pytorch-retinanet
|
0f0c1671df49adc189677a59c5137737e73c63a4
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
biomiker/pytorch-retinanet
|
0f0c1671df49adc189677a59c5137737e73c63a4
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import collections
from datetime import timedelta
import time
import os,sys
import glob
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
from retinanet import model
from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \
Normalizer
from torch.utils.data import DataLoader
from retinanet import coco_eval
from retinanet import csv_eval
assert torch.__version__.split('.')[0] == '1'
print('CUDA available: {}'.format(torch.cuda.is_available()))
def main(args=None):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.')
parser.add_argument('--coco_path', help='Path to COCO directory')
parser.add_argument('--csv_images_path', help='Path to which CSV image paths are relative')
parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')
parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50)
parser.add_argument('--epochs', help='Number of epochs', type=int, default=100)
parser.add_argument('--checkpoint', help='Saved .pt file from which to resume training, or \'latest\' for latest in current directory')
parser = parser.parse_args(args)
# Create the data loaders
if parser.dataset == 'coco':
if parser.coco_path is None:
raise ValueError('Must provide --coco_path when training on COCO,')
dataset_train = CocoDataset(parser.coco_path, set_name='train2017',
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
dataset_val = CocoDataset(parser.coco_path, set_name='val2017',
transform=transforms.Compose([Normalizer(), Resizer()]))
elif parser.dataset == 'csv':
if parser.csv_train is None:
raise ValueError('Must provide --csv_train when training on CSV,')
if parser.csv_classes is None:
raise ValueError('Must provide --csv_classes when training on CSV,')
dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]),
root_dir=parser.csv_images_path)
if parser.csv_val is None:
dataset_val = None
print('No validation annotations provided.')
else:
dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Resizer()]))
else:
raise ValueError('Dataset type not understood (must be csv or coco), exiting.')
sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False)
dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler)
if dataset_val is not None:
sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)
dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val)
# Create the model
epoch_offset = 0
if parser.checkpoint:
if parser.checkpoint == 'latest':
glob_str = f"./{parser.dataset}_retinanet_*.pt"
checkpoints = glob.glob(glob_str)
if not checkpoints:
print(f"No checkpoints ({glob_str}) found")
sys.exit()
checkpoints.sort(key=checkpoint_epoch)
checkpoint = checkpoints[-1]
print("Latest: ", checkpoint)
else:
checkpoint = parser.checkpoint
epoch_offset = checkpoint_epoch(checkpoint)
print(f"Resuming from checkpoint {parser.checkpoint}, first epoch = {epoch_offset + 1}")
retinanet = torch.load(checkpoint)
else:
if parser.depth == 18:
retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 34:
retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 50:
retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 101:
retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 152:
retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True)
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
use_gpu = True
if use_gpu:
if torch.cuda.is_available():
retinanet = retinanet.cuda()
if torch.cuda.is_available():
retinanet = torch.nn.DataParallel(retinanet).cuda()
else:
retinanet = torch.nn.DataParallel(retinanet)
retinanet.training = True
optimizer = optim.Adam(retinanet.parameters(), lr=1e-5)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
loss_hist = collections.deque(maxlen=500)
retinanet.train()
retinanet.module.freeze_bn()
print('Num training images: {}'.format(len(dataset_train)))
start_time = time.time()
for epoch_index in range(epoch_offset, parser.epochs + epoch_offset):
# epoch_numbers start from 1 please
epoch_num = epoch_index + 1
epoch_start = time.time()
retinanet.train()
retinanet.module.freeze_bn()
epoch_loss = []
for iter_num, data in enumerate(dataloader_train):
try:
optimizer.zero_grad()
if torch.cuda.is_available():
classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']])
else:
classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']])
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
loss = classification_loss + regression_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
optimizer.step()
loss_hist.append(float(loss))
epoch_loss.append(float(loss))
print(
'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(
epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)))
del classification_loss
del regression_loss
except Exception as e:
print(e)
continue
epoch_elapsed = time.time() - epoch_start
total_elapsed = time.time() - start_time
print(f"COMPLETE: Epoch {epoch_num}/{parser.epochs + epoch_offset} in {str(timedelta(seconds=epoch_elapsed)).split('.',1)[0]}, total time = {str(timedelta(seconds=total_elapsed)).split('.',1)[0]}")
if parser.dataset == 'coco':
print('Evaluating dataset')
coco_eval.evaluate_coco(dataset_val, retinanet)
elif parser.dataset == 'csv' and parser.csv_val is not None:
print('Evaluating dataset')
mAP = csv_eval.evaluate(dataset_val, retinanet)
scheduler.step(np.mean(epoch_loss))
torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num))
retinanet.eval()
torch.save(retinanet, 'model_final.pt')
def checkpoint_epoch(filename):
return int(os.path.splitext(os.path.basename(filename))[0].split("_")[2])
if __name__ == '__main__':
main()
| 37.276786
| 205
| 0.643473
|
424d5338081127cc736e5e4fa9ae75d84b73d9c3
| 2,723
|
py
|
Python
|
src/bouton.py
|
QuentinBubu/trophees-nsi-2022
|
3402b584de7b07b54b6e97332d279f670efe48c2
|
[
"MIT"
] | null | null | null |
src/bouton.py
|
QuentinBubu/trophees-nsi-2022
|
3402b584de7b07b54b6e97332d279f670efe48c2
|
[
"MIT"
] | 2
|
2022-03-01T10:51:46.000Z
|
2022-03-15T19:51:18.000Z
|
src/bouton.py
|
QuentinBubu/trophees-nsi-2022
|
3402b584de7b07b54b6e97332d279f670efe48c2
|
[
"MIT"
] | 1
|
2022-03-29T17:02:17.000Z
|
2022-03-29T17:02:17.000Z
|
"""
Une simple classe de boutons
"""
import pygame
class Bouton:
def __init__(self, pos, size, texture_path, texture_path_clicked, func=None):
"""Un bouton interractible
Args:
pos (tuple(int)): la position
size (tuple(int)): la taille du bouton
texture_path (string): la texture du bouton
texture_path_clicked (string): la texture du bouton quand il est cliqué
func (function): la fonction à executer si le bouton est cliqué
"""
self.pos = pos
self.size = size
self.texture = pygame.image.load(texture_path)
self.texture = pygame.transform.scale(self.texture, self.size)
self.texture_clicked = pygame.image.load(texture_path_clicked)
self.texture_clicked = pygame.transform.scale(self.texture_clicked, self.size)
self.function = func
self.clicked = False
######################GETTER ET SETTER###################################
def set_size(self, new_size):
self.size = new_size
def set_pos(self, new_pos):
self.pos = new_pos
def set_texture(self, new_t):
self.texture = pygame.image.load(new_t)
self.texture = pygame.transform.scale(self.texture, self.size)
def set_texture_clicked(self, new_tc):
self.texture_clicked = pygame.image.load(new_tc)
self.texture_clicked = pygame.transform.scale(self.texture_clicked, self.size)
def set_clicked(self, state):
self.clicked = state
#########################################################################
def is_clicked(self):
"""Renvoie True si le Bouton est cliqué
Returns:
bool : True si cliqué, False si non
"""
m_posX, m_posY = pygame.mouse.get_pos()
if (self.pos[0] < m_posX < self.pos[0] + self.size[0]) and (self.pos[1] < m_posY < self.pos[1] + self.size[1]):
return True
return False
def click(self):
"""Etat de clique du bouton
Returns:
NoneType ou ______________ : ____________________
"""
if self.function != None:
return self.function()
def draw(self, window: pygame.surface.Surface):
"""Dessin du bouton
Args:
window (pygame.surface.Surface): surface de dessin
"""
if self.clicked:
window.screen.blit(self.texture_clicked, self.pos)
else:
window.screen.blit(self.texture, self.pos)
def actualiser(self, window: pygame.surface.Surface):
"""actualise le dessin du bouton
Args:
window (pygame.surface.Surface): dessin du bouton
"""
self.draw(window)
| 30.595506
| 119
| 0.583915
|
999452616ff48da5281cc2e20c62355430eda43c
| 971
|
py
|
Python
|
examples/restore_cybers.py
|
pgarcia14180/henka
|
850a01a272deaf414e70a818012f8b239018f460
|
[
"MIT"
] | null | null | null |
examples/restore_cybers.py
|
pgarcia14180/henka
|
850a01a272deaf414e70a818012f8b239018f460
|
[
"MIT"
] | null | null | null |
examples/restore_cybers.py
|
pgarcia14180/henka
|
850a01a272deaf414e70a818012f8b239018f460
|
[
"MIT"
] | null | null | null |
from henka.config_helpers.henka_config import HenkaConfig as MainConfig
from henka.processes.henka import henka as dataprocess
def restore_cyber(date):
config = [{
'name': 'upd',
'source': {
'name': 'elasticsearch',
'index_name': 'txd_cyber',
'url': 'localhost',
'port': '9200',
'scheme': 'http',
'body': {
'es_range': {'fectrx_n8':{'gte': date, 'lte': date}},
'tienda': 32
},
'search_or_agg': 'search'
},
'process': {
'mock': {
'vta_transaction': 'Y',
}
},
'save': {
'name': 'elasticsearch',
'index_name': 'txd_item_upd'+'-'+date,
'index_id': 'id',
'url': 'localhost',
'port': '9200',
'scheme': 'http',
}
}]
dataprocess(MainConfig(*config))
| 29.424242
| 73
| 0.431514
|
2d50d13427b80e55795a3f3de7af331d3726d607
| 769
|
py
|
Python
|
assignments/14-open/test.py
|
pmoma/biosys-analytics
|
8822e6751699acd297679219e76d36666820846f
|
[
"MIT"
] | null | null | null |
assignments/14-open/test.py
|
pmoma/biosys-analytics
|
8822e6751699acd297679219e76d36666820846f
|
[
"MIT"
] | null | null | null |
assignments/14-open/test.py
|
pmoma/biosys-analytics
|
8822e6751699acd297679219e76d36666820846f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""test for open.py"""
from subprocess import getstatusoutput, getoutput
import os.path
import re
open = './open.py'
def test_exists():
"""scripts exist"""
assert os.path.exists(open)
def test_usage():
"""usage"""
(retval, out) = getstatusoutput(open)
assert retval > 0
assert re.match("usage", out, re.IGNORECASE)
def test_word1():
"""runs test1"""
out1 = getoutput(open + ' Foo')
assert out1.rstrip() == 'FFoFoo'
def test_word2():
"""runs test2"""
out1 = getoutput(open + ' Python')
assert out1.rstrip() == 'PPyPytPythPythoPython'
def test_word3():
"""runs test3"""
out1 = getoutput(open + ' foobarbaz')
assert out1.rstrip() == 'ffofoofoobfoobafoobarfoobarbfoobarbafoobarbaz'
| 21.971429
| 75
| 0.647594
|
3d63c3a391813161ea477c2b09430800353f0efb
| 4,125
|
py
|
Python
|
softdelete/tests/test_views.py
|
BehaviorCloud/django-softdelete
|
f74fb5a2ef2c3777d1c2a30513cf57c641649096
|
[
"BSD-2-Clause"
] | null | null | null |
softdelete/tests/test_views.py
|
BehaviorCloud/django-softdelete
|
f74fb5a2ef2c3777d1c2a30513cf57c641649096
|
[
"BSD-2-Clause"
] | null | null | null |
softdelete/tests/test_views.py
|
BehaviorCloud/django-softdelete
|
f74fb5a2ef2c3777d1c2a30513cf57c641649096
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf import settings
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.db import models
from softdelete.test_softdelete_app.models import TestModelOne, TestModelTwo, TestModelThree
from softdelete.models import *
from softdelete.signals import *
import logging
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
class ViewBase(TestCase):
def setUp(self):
u, c = User.objects.get_or_create(username="undelete_test")
u.is_active = True
u.set_password("undelete_password")
gr = create_group()
if USE_SOFTDELETE_GROUP:
gr = Group.objects.get(name="Softdelete User")
u.groups.add(gr)
u.save()
gr.save()
else:
assign_permissions(u)
u.save()
self.tmo1 = TestModelOne.objects.create(extra_bool=True)
self.tmo3 = TestModelThree.objects.create(extra_int=3)
for x in range(10):
TestModelTwo.objects.create(extra_int=x, tmo=self.tmo1)
self.tmo2 = TestModelOne.objects.create(extra_bool=False)
for x in range(10):
TestModelTwo.objects.create(extra_int=x*x, tmo=self.tmo2)
self.tmo2.delete()
class ViewTest(ViewBase):
def __init__(self, *args, **kwargs):
settings.USE_SOFTDELETE_GROUP = kwargs.get('USE_SOFTDELETE_GROUP', False)
if 'USE_SOFTDELETE_GROUP' in kwargs:
del kwargs['USE_SOFTDELETE_GROUP']
super(ViewTest, self).__init__(*args, **kwargs)
def setUp(self):
super(ViewTest, self).setUp()
self.client = Client()
self.client.login(username="undelete_test",
password="undelete_password")
def test_authorization(self):
rv = self.client.get(reverse("softdelete.changeset.list"))
pk = ChangeSet.objects.latest('created_date').pk
for view_name in [reverse("softdelete.changeset.list"),
reverse("softdelete.changeset.view", args=(pk,)),
reverse("softdelete.changeset.undelete", args=(pk,)),]:
cli2 = Client()
rv = cli2.get(view_name)
self.assertEquals(rv.status_code, 302)
self.assertTrue((settings.DOMAIN + reverse('auth_login')) in rv['Location'])
self.assertEquals(cli2.get(rv['Location']).status_code,
200)
cli2.login(username='undelete_test', password='undelete_password')
rv = cli2.get(view_name)
self.assertEquals(rv.status_code, 200)
def test_undelete(self):
self.cs_count = ChangeSet.objects.count()
self.rs_count = SoftDeleteRecord.objects.count()
self.t_count = TestModelThree.objects.count()
self.tmo3.delete()
self.assertEquals(self.t_count-1, TestModelThree.objects.count())
self.assertEquals(0, self.tmo3.tmos.count())
self.assertEquals(self.cs_count+1, ChangeSet.objects.count())
self.assertEquals(self.rs_count+1, SoftDeleteRecord.objects.count())
rv = self.client.get(reverse("softdelete.changeset.undelete",
args=(ChangeSet.objects.latest("created_date").pk,)))
self.assertEquals(rv.status_code,200)
rv = self.client.post(reverse("softdelete.changeset.undelete",
args=(ChangeSet.objects.latest("created_date").pk,)),
{'action': 'Undelete'})
self.assertEquals(rv.status_code, 302)
rv = self.client.get(rv['Location'])
self.assertEquals(rv.status_code, 200)
self.assertEquals(self.cs_count, ChangeSet.objects.count())
self.assertEquals(self.rs_count, SoftDeleteRecord.objects.count())
self.assertEquals(self.t_count, TestModelThree.objects.count())
self.assertEquals(0, self.tmo3.tmos.count())
class GroupViewTest(ViewTest):
def __init__(self, *args, **kwargs):
super(GroupViewTest, self).__init__(USE_SOFTDELETE_GROUP=True, *args, **kwargs)
| 44.354839
| 92
| 0.641697
|
5b7bf668fc19541e9d4117163ae0d302103935f6
| 3,182
|
py
|
Python
|
tests/test_uas.py
|
bbc/connected-data-pseudocone
|
452479cd44fa7d32ecb3d54b801da9024d6984ce
|
[
"MIT"
] | null | null | null |
tests/test_uas.py
|
bbc/connected-data-pseudocone
|
452479cd44fa7d32ecb3d54b801da9024d6984ce
|
[
"MIT"
] | 5
|
2018-07-30T09:01:03.000Z
|
2019-01-16T11:16:39.000Z
|
tests/test_uas.py
|
bbc/connected-data-pseudocone
|
452479cd44fa7d32ecb3d54b801da9024d6984ce
|
[
"MIT"
] | null | null | null |
# from unittest.mock import MagicMock, patch
#
# import pytest
# import requests
# from requests import HTTPError
#
# from app.uas import UASClient
# from app.settings import DEFAULT_UAS_HOST
#
#
# def test_client_init():
# """Test no exception thrown when initialising client."""
# uasclient = UASClient('', '')
#
#
# @patch('app.uas.requests.Session.get')
# def test_client_get_activity_history_session_headers(mock_session_get):
# expected_headers = {
# 'User-Agent': 'python-requests/2.18.4',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept': 'application/json',
# 'Connection': 'keep-alive',
# 'X-API-Key': 'test_api_key',
# 'Host': '',
# 'Cookie': 'ckns_atkn=test_cookie'
# }
# uasclient = UASClient('test_api_key', 'test_host')
# _ = uasclient.get_activity_history('test_activity', 'test_cookie')
# assert uasclient.session.headers == expected_headers
#
#
# def test_client_get_activity_bad_host_fail():
# uasclient = UASClient('', 'http://bad_host')
# with pytest.raises(requests.exceptions.ConnectionError):
# uasclient.get_activity_history('', '')
#
# def test_client_get_activity_bad_url_fail():
# uasclient = UASClient('', 'bad_host')
# with pytest.raises(requests.exceptions.MissingSchema):
# uasclient.get_activity_history('', '')
#
#
# @pytest.mark.integration
# def test_client_get_activity_bad_cookie_fail():
# uasclient = UASClient('bad_cookie', DEFAULT_UAS_HOST)
# with pytest.raises(HTTPError):
# uasclient.get_activity_history('', '')
#
#
# mock_uas_response = {
# 'other_stuff': {},
# 'items': [
# {'activityType': 'plays', 'resourceId': 'p05psmxq', 'resourceType': 'clip', 'resourceDomain': 'radio',
# 'created': '2018-03-26T10:32:58Z', 'action': 'paused',
# 'actionContext': 'urn:bbc:radio:version_offset:p05psmxt#2',
# '@id': 'urn:bbc:radio:clip:p05psmxq'},
# {'activityType': 'plays', 'resourceId': 'p05ysllr', 'resourceType': 'episode', 'resourceDomain': 'tv',
# 'created': '2018-03-18T23:34:24Z', 'action': 'ended',
# 'actionContext': 'urn:bbc:tv:version_offset:p05ysm2y#1959',
# '@id': 'urn:bbc:tv:episode:p05ysllr'}
# ]
# }
#
# expected_get_activity_history_response = mock_uas_response['items']
#
#
# @patch('app.uas.requests.Session.get')
# def test_client_get_activity_history(mock_session_get):
# uasclient = UASClient('test_api_key', 'test_host')
#
# mock_response = MagicMock()
# mock_response.json.return_value = mock_uas_response
#
# mock_session_get.return_value = mock_response
#
# response = uasclient.get_activity_history('', '')
# assert response == expected_get_activity_history_response
#
#
# @pytest.mark.integration
# def test_client_check_uas_connection():
# uasclient = UASClient('test_api_key', DEFAULT_UAS_HOST)
# uasclient.check_uas_connection()
#
#
# def test_client_check_uas_connection_bad_host():
# uasclient = UASClient('test_api_key', 'http://bad_host')
# with pytest.raises(requests.exceptions.ConnectionError):
# uasclient.check_uas_connection()
#
#
#
#
#
#
#
| 32.804124
| 112
| 0.670333
|
319814495d3d65c743285ce192be02c3103a057a
| 56,524
|
py
|
Python
|
src/base/android/jni_generator/jni_generator_tests.py
|
btwiuse/naiveproxy
|
67852b0abc88e59d5853c4a5db47541f298fb469
|
[
"BSD-3-Clause"
] | null | null | null |
src/base/android/jni_generator/jni_generator_tests.py
|
btwiuse/naiveproxy
|
67852b0abc88e59d5853c4a5db47541f298fb469
|
[
"BSD-3-Clause"
] | null | null | null |
src/base/android/jni_generator/jni_generator_tests.py
|
btwiuse/naiveproxy
|
67852b0abc88e59d5853c4a5db47541f298fb469
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for jni_generator.py.
This test suite contains various tests for the JNI generator.
It exercises the low-level parser all the way up to the
code generator and ensures the output matches a golden
file.
"""
from __future__ import print_function
import difflib
import inspect
import optparse
import os
import sys
import unittest
import jni_generator
import jni_registration_generator
from jni_generator import CalledByNative
from jni_generator import IsMainDexJavaClass
from jni_generator import NativeMethod
from jni_generator import Param
from jni_generator import ProxyHelpers
_SCRIPT_NAME = 'base/android/jni_generator/jni_generator.py'
_INCLUDES = ('base/android/jni_generator/jni_generator_helper.h')
_JAVA_SRC_DIR = os.path.join('java', 'src', 'org', 'chromium', 'example',
'jni_generator')
# Set this environment variable in order to regenerate the golden text
# files.
_REBASELINE_ENV = 'REBASELINE'
class TestOptions(object):
"""The mock options object which is passed to the jni_generator.py script."""
def __init__(self):
self.namespace = None
self.script_name = _SCRIPT_NAME
self.includes = _INCLUDES
self.ptr_type = 'long'
self.cpp = 'cpp'
self.javap = 'javap'
self.native_exports_optional = True
self.enable_profiling = False
self.enable_tracing = False
self.use_proxy_hash = False
self.always_mangle = False
class BaseTest(unittest.TestCase):
@staticmethod
def _MergeRegistrationForTests(results,
header_guard='HEADER_GUARD',
namespace='test'):
results.sort(key=lambda d: d['FULL_CLASS_NAME'])
combined_dict = {}
for key in jni_registration_generator.MERGEABLE_KEYS:
combined_dict[key] = ''.join(d.get(key, '') for d in results)
combined_dict['HEADER_GUARD'] = header_guard
combined_dict['NAMESPACE'] = namespace
return combined_dict
def _JoinScriptDir(self, path):
script_dir = os.path.dirname(sys.argv[0])
return os.path.join(script_dir, path)
def _JoinGoldenPath(self, golden_file_name):
return self._JoinScriptDir(os.path.join('golden', golden_file_name))
def _ReadGoldenFile(self, golden_file_name):
golden_file_name = self._JoinGoldenPath(golden_file_name)
if not os.path.exists(golden_file_name):
return None
with open(golden_file_name, 'r') as f:
return f.read()
def _CreateJniHeaderFromFile(self, fname, qualified_clazz, options=None):
with open(self._JoinScriptDir(fname)) as f:
content = f.read()
opts = options
if opts is None:
opts = TestOptions()
jni_from_java = jni_generator.JNIFromJavaSource(content, qualified_clazz,
opts)
return jni_from_java.GetContent()
def AssertObjEquals(self, first, second):
if isinstance(first, str):
return self.assertEqual(first, second)
dict_first = first.__dict__
dict_second = second.__dict__
self.assertEqual(dict_first.keys(), dict_second.keys())
for key, value in dict_first.items():
if (type(value) is list and len(value)
and isinstance(type(value[0]), object)):
self.AssertListEquals(value, second.__getattribute__(key))
else:
actual = second.__getattribute__(key)
self.assertEqual(value, actual,
'Key ' + key + ': ' + str(value) + '!=' + str(actual))
def AssertListEquals(self, first, second):
self.assertEqual(len(first), len(second))
for i in range(len(first)):
if isinstance(first[i], object):
self.AssertObjEquals(first[i], second[i])
else:
self.assertEqual(first[i], second[i])
def AssertTextEquals(self, golden_text, generated_text):
if not self.CompareText(golden_text, generated_text):
self.fail('Golden text mismatch.')
def CompareText(self, golden_text, generated_text):
def FilterText(text):
return [
l.strip() for l in text.split('\n')
if not l.startswith('// Copyright')
]
stripped_golden = FilterText(golden_text)
stripped_generated = FilterText(generated_text)
if stripped_golden == stripped_generated:
return True
print(self.id())
for line in difflib.context_diff(stripped_golden, stripped_generated):
print(line)
print('\n\nGenerated')
print('=' * 80)
print(generated_text)
print('=' * 80)
print('Run with:')
print('REBASELINE=1', sys.argv[0])
print('to regenerate the data files.')
def AssertGoldenTextEquals(self, generated_text, suffix='', golden_file=None):
"""Compares generated text with the corresponding golden_file
By default compares generated_text with the file at
script_dir/golden/{caller_name}[suffix].golden. If the parameter
golden_file is provided it will instead compare the generated text with
script_dir/golden/golden_file."""
# This is the caller test method.
caller = inspect.stack()[1][3]
if golden_file is None:
self.assertTrue(
caller.startswith('test'),
'AssertGoldenTextEquals can only be called from a '
'test* method, not %s' % caller)
golden_file = '%s%s.golden' % (caller, suffix)
golden_text = self._ReadGoldenFile(golden_file)
if os.environ.get(_REBASELINE_ENV):
if golden_text != generated_text:
with open(self._JoinGoldenPath(golden_file), 'w') as f:
f.write(generated_text)
return
# golden_text is None if no file is found. Better to fail than in
# AssertTextEquals so we can give a clearer message.
if golden_text is None:
self.fail(
'Golden file %s does not exist.' % self._JoinGoldenPath(golden_file))
self.AssertTextEquals(golden_text, generated_text)
class TestGenerator(BaseTest):
def testInspectCaller(self):
def willRaise():
# This function can only be called from a test* method.
self.AssertGoldenTextEquals('')
self.assertRaises(AssertionError, willRaise)
def testNatives(self):
test_data = """"
import android.graphics.Bitmap;
import android.view.View;
interface OnFrameAvailableListener {}
private native int nativeInit();
private native void nativeDestroy(int nativeChromeBrowserProvider);
private native long nativeAddBookmark(
int nativeChromeBrowserProvider,
String url, String title, boolean isFolder, long parentId);
private static native String nativeGetDomainAndRegistry(String url);
private static native void nativeCreateHistoricalTabFromState(
byte[] state, int tab_index);
private native byte[] nativeGetStateAsByteArray(View view);
private static native String[] nativeGetAutofillProfileGUIDs();
private native void nativeSetRecognitionResults(
int sessionId, String[] results);
private native long nativeAddBookmarkFromAPI(
int nativeChromeBrowserProvider,
String url, Long created, Boolean isBookmark,
Long date, byte[] favicon, String title, Integer visits);
native int nativeFindAll(String find);
private static native OnFrameAvailableListener nativeGetInnerClass();
private native Bitmap nativeQueryBitmap(
int nativeChromeBrowserProvider,
String[] projection, String selection,
String[] selectionArgs, String sortOrder);
private native void nativeGotOrientation(
int nativeDataFetcherImplAndroid,
double alpha, double beta, double gamma);
private static native Throwable nativeMessWithJavaException(Throwable e);
"""
jni_params = jni_generator.JniParams(
'org/chromium/example/jni_generator/SampleForTests')
jni_params.ExtractImportsAndInnerClasses(test_data)
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(
return_type='int',
static=False,
name='Init',
params=[],
java_class_name=None),
NativeMethod(
return_type='void',
static=False,
name='Destroy',
params=[Param(datatype='int', name='nativeChromeBrowserProvider')],
java_class_name=None),
NativeMethod(
return_type='long',
static=False,
name='AddBookmark',
params=[
Param(datatype='int', name='nativeChromeBrowserProvider'),
Param(datatype='String', name='url'),
Param(datatype='String', name='title'),
Param(datatype='boolean', name='isFolder'),
Param(datatype='long', name='parentId')
],
java_class_name=None),
NativeMethod(
return_type='String',
static=True,
name='GetDomainAndRegistry',
params=[Param(datatype='String', name='url')],
java_class_name=None),
NativeMethod(
return_type='void',
static=True,
name='CreateHistoricalTabFromState',
params=[
Param(datatype='byte[]', name='state'),
Param(datatype='int', name='tab_index')
],
java_class_name=None),
NativeMethod(
return_type='byte[]',
static=False,
name='GetStateAsByteArray',
params=[Param(datatype='View', name='view')],
java_class_name=None),
NativeMethod(
return_type='String[]',
static=True,
name='GetAutofillProfileGUIDs',
params=[],
java_class_name=None),
NativeMethod(
return_type='void',
static=False,
name='SetRecognitionResults',
params=[
Param(datatype='int', name='sessionId'),
Param(datatype='String[]', name='results')
],
java_class_name=None),
NativeMethod(
return_type='long',
static=False,
name='AddBookmarkFromAPI',
params=[
Param(datatype='int', name='nativeChromeBrowserProvider'),
Param(datatype='String', name='url'),
Param(datatype='Long', name='created'),
Param(datatype='Boolean', name='isBookmark'),
Param(datatype='Long', name='date'),
Param(datatype='byte[]', name='favicon'),
Param(datatype='String', name='title'),
Param(datatype='Integer', name='visits')
],
java_class_name=None),
NativeMethod(
return_type='int',
static=False,
name='FindAll',
params=[Param(datatype='String', name='find')],
java_class_name=None),
NativeMethod(
return_type='OnFrameAvailableListener',
static=True,
name='GetInnerClass',
params=[],
java_class_name=None),
NativeMethod(
return_type='Bitmap',
static=False,
name='QueryBitmap',
params=[
Param(datatype='int', name='nativeChromeBrowserProvider'),
Param(datatype='String[]', name='projection'),
Param(datatype='String', name='selection'),
Param(datatype='String[]', name='selectionArgs'),
Param(datatype='String', name='sortOrder'),
],
java_class_name=None),
NativeMethod(
return_type='void',
static=False,
name='GotOrientation',
params=[
Param(datatype='int', name='nativeDataFetcherImplAndroid'),
Param(datatype='double', name='alpha'),
Param(datatype='double', name='beta'),
Param(datatype='double', name='gamma'),
],
java_class_name=None),
NativeMethod(
return_type='Throwable',
static=True,
name='MessWithJavaException',
params=[Param(datatype='Throwable', name='e')],
java_class_name=None)
]
self.AssertListEquals(golden_natives, natives)
h1 = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.HeaderGenerator(
'',
'org/chromium/TestJni',
natives,
jni_params,
True,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content, use_hash=False),
suffix='Registrations')
def testInnerClassNatives(self):
test_data = """
class MyInnerClass {
@NativeCall("MyInnerClass")
private native int nativeInit();
}
"""
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(
return_type='int',
static=False,
name='Init',
params=[],
java_class_name='MyInnerClass')
]
self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesMultiple(self):
test_data = """
class MyInnerClass {
@NativeCall("MyInnerClass")
private native int nativeInit();
}
class MyOtherInnerClass {
@NativeCall("MyOtherInnerClass")
private native int nativeInit();
}
"""
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(
return_type='int',
static=False,
name='Init',
params=[],
java_class_name='MyInnerClass'),
NativeMethod(
return_type='int',
static=False,
name='Init',
params=[],
java_class_name='MyOtherInnerClass')
]
self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
self.AssertGoldenTextEquals(h.GetContent())
def testInnerClassNativesBothInnerAndOuter(self):
test_data = """
class MyOuterClass {
private native int nativeInit();
class MyOtherInnerClass {
@NativeCall("MyOtherInnerClass")
private native int nativeInit();
}
}
"""
natives = jni_generator.ExtractNatives(test_data, 'int')
golden_natives = [
NativeMethod(
return_type='int',
static=False,
name='Init',
params=[],
java_class_name=None),
NativeMethod(
return_type='int',
static=False,
name='Init',
params=[],
java_class_name='MyOtherInnerClass')
]
self.AssertListEquals(golden_natives, natives)
jni_params = jni_generator.JniParams('')
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni',
natives, [], [], jni_params,
TestOptions())
self.AssertGoldenTextEquals(h.GetContent())
h2 = jni_registration_generator.HeaderGenerator(
'',
'org/chromium/TestJni',
natives,
jni_params,
True,
use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content, use_hash=False),
suffix='Registrations')
def testCalledByNatives(self):
test_data = """"
import android.graphics.Bitmap;
import android.view.View;
import java.io.InputStream;
import java.util.List;
class InnerClass {}
@CalledByNative
@SomeOtherA
@SomeOtherB
public InnerClass showConfirmInfoBar(int nativeInfoBar,
String buttonOk, String buttonCancel, String title, Bitmap icon) {
InfoBar infobar = new ConfirmInfoBar(nativeInfoBar, mContext,
buttonOk, buttonCancel,
title, icon);
return infobar;
}
@CalledByNative
InnerClass showAutoLoginInfoBar(int nativeInfoBar,
String realm, String account, String args) {
AutoLoginInfoBar infobar = new AutoLoginInfoBar(nativeInfoBar, mContext,
realm, account, args);
if (infobar.displayedAccountCount() == 0)
infobar = null;
return infobar;
}
@CalledByNative("InfoBar")
void dismiss();
@SuppressWarnings("unused")
@CalledByNative
private static boolean shouldShowAutoLogin(View view,
String realm, String account, String args) {
AccountManagerContainer accountManagerContainer =
new AccountManagerContainer((Activity)contentView.getContext(),
realm, account, args);
String[] logins = accountManagerContainer.getAccountLogins(null);
return logins.length != 0;
}
@CalledByNative
static InputStream openUrl(String url) {
return null;
}
@CalledByNative
private void activateHardwareAcceleration(final boolean activated,
final int iPid, final int iType,
final int iPrimaryID, final int iSecondaryID) {
if (!activated) {
return
}
}
@CalledByNative
public static @Status int updateStatus(@Status int status) {
return getAndUpdateStatus(status);
}
@CalledByNativeUnchecked
private void uncheckedCall(int iParam);
@CalledByNative
public byte[] returnByteArray();
@CalledByNative
public boolean[] returnBooleanArray();
@CalledByNative
public char[] returnCharArray();
@CalledByNative
public short[] returnShortArray();
@CalledByNative
public int[] returnIntArray();
@CalledByNative
public long[] returnLongArray();
@CalledByNative
public double[] returnDoubleArray();
@CalledByNative
public Object[] returnObjectArray();
@CalledByNative
public byte[][] returnArrayOfByteArray();
@CalledByNative
public Bitmap.CompressFormat getCompressFormat();
@CalledByNative
public List<Bitmap.CompressFormat> getCompressFormatList();
"""
jni_params = jni_generator.JniParams('org/chromium/Foo')
jni_params.ExtractImportsAndInnerClasses(test_data)
called_by_natives = jni_generator.ExtractCalledByNatives(
jni_params, test_data, always_mangle=False)
golden_called_by_natives = [
CalledByNative(
return_type='InnerClass',
system_class=False,
static=False,
name='showConfirmInfoBar',
method_id_var_name='showConfirmInfoBar',
java_class_name='',
params=[
Param(datatype='int', name='nativeInfoBar'),
Param(datatype='String', name='buttonOk'),
Param(datatype='String', name='buttonCancel'),
Param(datatype='String', name='title'),
Param(datatype='Bitmap', name='icon')
],
env_call=('Object', ''),
unchecked=False,
),
CalledByNative(
return_type='InnerClass',
system_class=False,
static=False,
name='showAutoLoginInfoBar',
method_id_var_name='showAutoLoginInfoBar',
java_class_name='',
params=[
Param(datatype='int', name='nativeInfoBar'),
Param(datatype='String', name='realm'),
Param(datatype='String', name='account'),
Param(datatype='String', name='args')
],
env_call=('Object', ''),
unchecked=False,
),
CalledByNative(
return_type='void',
system_class=False,
static=False,
name='dismiss',
method_id_var_name='dismiss',
java_class_name='InfoBar',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='boolean',
system_class=False,
static=True,
name='shouldShowAutoLogin',
method_id_var_name='shouldShowAutoLogin',
java_class_name='',
params=[
Param(datatype='View', name='view'),
Param(datatype='String', name='realm'),
Param(datatype='String', name='account'),
Param(datatype='String', name='args')
],
env_call=('Boolean', ''),
unchecked=False,
),
CalledByNative(
return_type='InputStream',
system_class=False,
static=True,
name='openUrl',
method_id_var_name='openUrl',
java_class_name='',
params=[Param(datatype='String', name='url')],
env_call=('Object', ''),
unchecked=False,
),
CalledByNative(
return_type='void',
system_class=False,
static=False,
name='activateHardwareAcceleration',
method_id_var_name='activateHardwareAcceleration',
java_class_name='',
params=[
Param(datatype='boolean', name='activated'),
Param(datatype='int', name='iPid'),
Param(datatype='int', name='iType'),
Param(datatype='int', name='iPrimaryID'),
Param(datatype='int', name='iSecondaryID'),
],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='int',
system_class=False,
static=True,
name='updateStatus',
method_id_var_name='updateStatus',
java_class_name='',
params=[
Param(annotations=['@Status'], datatype='int', name='status')
],
env_call=('Integer', ''),
unchecked=False,
),
CalledByNative(
return_type='void',
system_class=False,
static=False,
name='uncheckedCall',
method_id_var_name='uncheckedCall',
java_class_name='',
params=[Param(datatype='int', name='iParam')],
env_call=('Void', ''),
unchecked=True,
),
CalledByNative(
return_type='byte[]',
system_class=False,
static=False,
name='returnByteArray',
method_id_var_name='returnByteArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='boolean[]',
system_class=False,
static=False,
name='returnBooleanArray',
method_id_var_name='returnBooleanArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='char[]',
system_class=False,
static=False,
name='returnCharArray',
method_id_var_name='returnCharArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='short[]',
system_class=False,
static=False,
name='returnShortArray',
method_id_var_name='returnShortArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='int[]',
system_class=False,
static=False,
name='returnIntArray',
method_id_var_name='returnIntArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='long[]',
system_class=False,
static=False,
name='returnLongArray',
method_id_var_name='returnLongArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='double[]',
system_class=False,
static=False,
name='returnDoubleArray',
method_id_var_name='returnDoubleArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='Object[]',
system_class=False,
static=False,
name='returnObjectArray',
method_id_var_name='returnObjectArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='byte[][]',
system_class=False,
static=False,
name='returnArrayOfByteArray',
method_id_var_name='returnArrayOfByteArray',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='Bitmap.CompressFormat',
system_class=False,
static=False,
name='getCompressFormat',
method_id_var_name='getCompressFormat',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='List<Bitmap.CompressFormat>',
system_class=False,
static=False,
name='getCompressFormatList',
method_id_var_name='getCompressFormatList',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
]
self.AssertListEquals(golden_called_by_natives, called_by_natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', [],
called_by_natives, [], jni_params,
TestOptions())
self.AssertGoldenTextEquals(h.GetContent())
def testCalledByNativeParseError(self):
try:
jni_params = jni_generator.JniParams('')
jni_generator.ExtractCalledByNatives(
jni_params,
"""
@CalledByNative
public static int foo(); // This one is fine
@CalledByNative
scooby doo
""",
always_mangle=False)
self.fail('Expected a ParseError')
except jni_generator.ParseError as e:
self.assertEqual(('@CalledByNative', 'scooby doo'), e.context_lines)
def testFullyQualifiedClassName(self):
contents = """
// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.content.browser;
import org.chromium.base.BuildInfo;
"""
self.assertEqual(
'org/chromium/content/browser/Foo',
jni_generator.ExtractFullyQualifiedJavaClassName(
'org/chromium/content/browser/Foo.java', contents))
self.assertEqual(
'org/chromium/content/browser/Foo',
jni_generator.ExtractFullyQualifiedJavaClassName(
'frameworks/Foo.java', contents))
self.assertRaises(SyntaxError,
jni_generator.ExtractFullyQualifiedJavaClassName,
'com/foo/Bar', 'no PACKAGE line')
def testMethodNameMangling(self):
jni_params = jni_generator.JniParams('')
self.assertEqual(
'closeV',
jni_generator.GetMangledMethodName(jni_params, 'close', [], 'void'))
self.assertEqual(
'readI_AB_I_I',
jni_generator.GetMangledMethodName(jni_params, 'read', [
Param(name='p1', datatype='byte[]'),
Param(name='p2', datatype='int'),
Param(name='p3', datatype='int'),
], 'int'))
self.assertEqual(
'openJIIS_JLS',
jni_generator.GetMangledMethodName(jni_params, 'open', [
Param(name='p1', datatype='java/lang/String'),
], 'java/io/InputStream'))
def testMethodNameAlwaysMangle(self):
test_data = """
import f.o.o.Bar;
import f.o.o.Baz;
class Clazz {
@CalledByNative
public Baz methodz(Bar bar) {
return null;
}
}
"""
jni_params = jni_generator.JniParams('org/chromium/Foo')
jni_params.ExtractImportsAndInnerClasses(test_data)
called_by_natives = jni_generator.ExtractCalledByNatives(
jni_params, test_data, always_mangle=True)
self.assertEqual(1, len(called_by_natives))
method = called_by_natives[0]
self.assertEqual('methodzFOOB_FOOB', method.method_id_var_name)
def testFromJavaPGenerics(self):
contents = """
public abstract class java.util.HashSet<T> extends java.util.AbstractSet<E>
implements java.util.Set<E>, java.lang.Cloneable, java.io.Serializable {
public void dummy();
Signature: ()V
public java.lang.Class<?> getClass();
Signature: ()Ljava/lang/Class<*>;
}
"""
jni_from_javap = jni_generator.JNIFromJavaP(
contents.split('\n'), TestOptions())
self.assertEqual(2, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testSnippnetJavap6_7_8(self):
content_javap6 = """
public class java.util.HashSet {
public boolean add(java.lang.Object);
Signature: (Ljava/lang/Object;)Z
}
"""
content_javap7 = """
public class java.util.HashSet {
public boolean add(E);
Signature: (Ljava/lang/Object;)Z
}
"""
content_javap8 = """
public class java.util.HashSet {
public boolean add(E);
descriptor: (Ljava/lang/Object;)Z
}
"""
jni_from_javap6 = jni_generator.JNIFromJavaP(
content_javap6.split('\n'), TestOptions())
jni_from_javap7 = jni_generator.JNIFromJavaP(
content_javap7.split('\n'), TestOptions())
jni_from_javap8 = jni_generator.JNIFromJavaP(
content_javap8.split('\n'), TestOptions())
self.assertTrue(jni_from_javap6.GetContent())
self.assertTrue(jni_from_javap7.GetContent())
self.assertTrue(jni_from_javap8.GetContent())
# Ensure the javap7 is correctly parsed and uses the Signature field rather
# than the "E" parameter.
self.AssertTextEquals(jni_from_javap6.GetContent(),
jni_from_javap7.GetContent())
# Ensure the javap8 is correctly parsed and uses the descriptor field.
self.AssertTextEquals(jni_from_javap7.GetContent(),
jni_from_javap8.GetContent())
def testFromJavaP(self):
contents = self._ReadGoldenFile('testInputStream.javap')
jni_from_javap = jni_generator.JNIFromJavaP(
contents.split('\n'), TestOptions())
self.assertEqual(10, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testConstantsFromJavaP(self):
for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']:
contents = self._ReadGoldenFile(f)
jni_from_javap = jni_generator.JNIFromJavaP(
contents.split('\n'), TestOptions())
self.assertEqual(86, len(jni_from_javap.called_by_natives))
self.AssertGoldenTextEquals(jni_from_javap.GetContent())
def testREForNatives(self):
# We should not match "native SyncSetupFlow" inside the comment.
test_data = """
/**
* Invoked when the setup process is complete so we can disconnect from the
* private native void nativeSyncSetupFlowHandler();.
*/
public void destroy() {
Log.v(TAG, "Destroying native SyncSetupFlow");
if (mNativeSyncSetupFlow != 0) {
nativeSyncSetupEnded(mNativeSyncSetupFlow);
mNativeSyncSetupFlow = 0;
}
}
private native void nativeSyncSetupEnded(
int nativeAndroidSyncSetupFlowHandler);
"""
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'foo/bar', TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testRaisesOnNonJNIMethod(self):
test_data = """
class MyInnerClass {
private int Foo(int p0) {
}
}
"""
self.assertRaises(SyntaxError, jni_generator.JNIFromJavaSource, test_data,
'foo/bar', TestOptions())
def testJniSelfDocumentingExample(self):
generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForTests.java'),
'org/chromium/example/jni_generator/SampleForTests')
self.AssertGoldenTextEquals(
generated_text, golden_file='SampleForTests_jni.golden')
def testNoWrappingPreprocessorLines(self):
test_data = """
package com.google.lookhowextremelylongiam.snarf.icankeepthisupallday;
class ReallyLongClassNamesAreAllTheRage {
private static native int nativeTest();
}
"""
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, ('com/google/lookhowextremelylongiam/snarf/'
'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'),
TestOptions())
jni_lines = jni_from_java.GetContent().split('\n')
line = next(
line for line in jni_lines if line.lstrip().startswith('#ifndef'))
self.assertTrue(
len(line) > 80, ('Expected #ifndef line to be > 80 chars: ', line))
def testImports(self):
import_header = """
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.content.app;
import android.app.Service;
import android.content.Context;
import android.content.Intent;
import android.graphics.SurfaceTexture;
import android.os.Bundle;
import android.os.IBinder;
import android.os.ParcelFileDescriptor;
import android.os.Process;
import android.os.RemoteException;
import android.util.Log;
import android.view.Surface;
import java.util.ArrayList;
import org.chromium.base.annotations.CalledByNative;
import org.chromium.base.annotations.JNINamespace;
import org.chromium.content.app.ContentMain;
import org.chromium.content.browser.SandboxedProcessConnection;
import org.chromium.content.common.ISandboxedProcessCallback;
import org.chromium.content.common.ISandboxedProcessService;
import org.chromium.content.common.WillNotRaise.AnException;
import org.chromium.content.common.WillRaise.AnException;
import static org.chromium.Bar.Zoo;
class Foo {
public static class BookmarkNode implements Parcelable {
}
public interface PasswordListObserver {
}
}
"""
jni_params = jni_generator.JniParams('org/chromium/content/app/Foo')
jni_params.ExtractImportsAndInnerClasses(import_header)
self.assertTrue('Lorg/chromium/content/common/ISandboxedProcessService' in
jni_params._imports)
self.assertTrue('Lorg/chromium/Bar/Zoo' in jni_params._imports)
self.assertTrue('Lorg/chromium/content/app/Foo$BookmarkNode' in jni_params.
_inner_classes)
self.assertTrue('Lorg/chromium/content/app/Foo$PasswordListObserver' in
jni_params._inner_classes)
self.assertEqual('Lorg/chromium/content/app/ContentMain$Inner;',
jni_params.JavaToJni('ContentMain.Inner'))
self.assertRaises(SyntaxError, jni_params.JavaToJni, 'AnException')
def testJniParamsJavaToJni(self):
jni_params = jni_generator.JniParams('')
self.AssertTextEquals('I', jni_params.JavaToJni('int'))
self.AssertTextEquals('[B', jni_params.JavaToJni('byte[]'))
self.AssertTextEquals('[Ljava/nio/ByteBuffer;',
jni_params.JavaToJni('java/nio/ByteBuffer[]'))
def testNativesLong(self):
test_options = TestOptions()
test_options.ptr_type = 'long'
test_data = """"
private native void nativeDestroy(long nativeChromeBrowserProvider);
"""
jni_params = jni_generator.JniParams('')
jni_params.ExtractImportsAndInnerClasses(test_data)
natives = jni_generator.ExtractNatives(test_data, test_options.ptr_type)
golden_natives = [
NativeMethod(
return_type='void',
static=False,
name='Destroy',
params=[Param(datatype='long', name='nativeChromeBrowserProvider')],
java_class_name=None,
ptr_type=test_options.ptr_type),
]
self.AssertListEquals(golden_natives, natives)
h = jni_generator.InlHeaderFileGenerator(
'', 'org/chromium/TestJni', natives, [], [], jni_params, test_options)
self.AssertGoldenTextEquals(h.GetContent())
def testMainDexAnnotation(self):
mainDexEntries = [
'@MainDex public class Test {',
'@MainDex public class Test{',
"""@MainDex
public class Test {
""",
"""@MainDex public class Test
{
""",
'@MainDex /* This class is a test */ public class Test {',
'@MainDex public class Test implements java.io.Serializable {',
'@MainDex public class Test implements java.io.Serializable, Bidule {',
'@MainDex public class Test extends BaseTest {',
"""@MainDex
public class Test extends BaseTest implements Bidule {
""",
"""@MainDex
public class Test extends BaseTest implements Bidule, Machin, Chose {
""",
"""@MainDex
public class Test implements Testable<java.io.Serializable> {
""",
'@MainDex public class Test implements Testable<java.io.Serializable> '
' {',
'@a.B @MainDex @C public class Test extends Testable<Serializable> {',
"""public class Test extends Testable<java.io.Serializable> {
@MainDex void func() {}
""",
]
for entry in mainDexEntries:
self.assertEqual(True, IsMainDexJavaClass(entry), entry)
def testNoMainDexAnnotation(self):
noMainDexEntries = [
'public class Test {', '@NotMainDex public class Test {',
'// @MainDex public class Test {', '/* @MainDex */ public class Test {',
'public class Test implements java.io.Serializable {',
'@MainDexNot public class Test {',
'public class Test extends BaseTest {'
]
for entry in noMainDexEntries:
self.assertEqual(False, IsMainDexJavaClass(entry))
def testNativeExportsOnlyOption(self):
test_data = """
package org.chromium.example.jni_generator;
/** The pointer to the native Test. */
long nativeTest;
class Test {
private static native int nativeStaticMethod(long nativeTest, int arg1);
private native int nativeMethod(long nativeTest, int arg1);
@CalledByNative
private void testMethodWithParam(int iParam);
@CalledByNative
private String testMethodWithParamAndReturn(int iParam);
@CalledByNative
private static int testStaticMethodWithParam(int iParam);
@CalledByNative
private static double testMethodWithNoParam();
@CalledByNative
private static String testStaticMethodWithNoParam();
class MyInnerClass {
@NativeCall("MyInnerClass")
private native int nativeInit();
}
class MyOtherInnerClass {
@NativeCall("MyOtherInnerClass")
private native int nativeInit();
}
}
"""
options = TestOptions()
options.native_exports_optional = False
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/example/jni_generator/SampleForTests', options)
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testOuterInnerRaises(self):
test_data = """
package org.chromium.media;
@CalledByNative
static int getCaptureFormatWidth(VideoCapture.CaptureFormat format) {
return format.getWidth();
}
"""
def willRaise():
jni_generator.JNIFromJavaSource(test_data,
'org/chromium/media/VideoCaptureFactory',
TestOptions())
self.assertRaises(SyntaxError, willRaise)
def testSingleJNIAdditionalImport(self):
test_data = """
package org.chromium.foo;
@JNIAdditionalImport(Bar.class)
class Foo {
@CalledByNative
private static void calledByNative(Bar.Callback callback) {
}
private static native void nativeDoSomething(Bar.Callback callback);
}
"""
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testMultipleJNIAdditionalImport(self):
test_data = """
package org.chromium.foo;
@JNIAdditionalImport({Bar1.class, Bar2.class})
class Foo {
@CalledByNative
private static void calledByNative(Bar1.Callback callback1,
Bar2.Callback callback2) {
}
private static native void nativeDoSomething(Bar1.Callback callback1,
Bar2.Callback callback2);
}
"""
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testTracing(self):
test_data = """
package org.chromium.foo;
@JNINamespace("org::chromium_foo")
class Foo {
@CalledByNative
Foo();
@CalledByNative
void callbackFromNative();
native void nativeInstanceMethod(long nativeInstance);
static native void nativeStaticMethod();
}
"""
options_with_tracing = TestOptions()
options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing)
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testStaticBindingCaller(self):
test_data = """
package org.chromium.foo;
class Bar {
static native void nativeShouldBindCaller(Object caller);
static native void nativeShouldBindCaller(Object caller, int a);
static native void nativeFoo(long nativeNativeObject, Bar caller);
static native void nativeFoo(long nativeNativeObject, Bar caller, int a);
native void nativeCallNativeMethod(long nativePtr);
@NativeClassQualifiedName("Foo::Bar")
native void nativeCallWithQualifiedObject(long nativePtr);
}
"""
jni_from_java = jni_generator.JNIFromJavaSource(test_data,
'org/chromium/foo/Foo',
TestOptions())
self.AssertGoldenTextEquals(jni_from_java.GetContent())
class ProxyTestGenerator(BaseTest):
def _BuildRegDictFromSample(self, options=None):
if options is None:
options = TestOptions()
path = self._JoinScriptDir(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'))
reg_dict = jni_registration_generator._DictForPath(path)
reg_dict = self._MergeRegistrationForTests([reg_dict])
return reg_dict
def testProxyNativesWithNatives(self):
test_data = """
package org.chromium.foo;
class Foo {
@NativeMethods
interface Natives {
void foo();
String bar(String s, int y, char x, short z);
String[] foobar(String[] a);
void baz(long nativePtr, BazClass caller);
void fooBar(long nativePtr);
}
void justARegularFunction();
native void nativeInstanceMethod(long nativeInstance);
static native void nativeStaticMethod();
}
"""
options_with_tracing = TestOptions()
options_with_tracing.enable_tracing = True
jni_from_java = jni_generator.JNIFromJavaSource(
test_data, 'org/chromium/foo/Foo', options_with_tracing)
self.AssertGoldenTextEquals(jni_from_java.GetContent())
def testEscapingProxyNatives(self):
test_data = """
class SampleProxyJni {
@NativeMethods
interface Natives {
void foo_bar();
void foo__bar();
}
}
"""
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long')
golden_natives = [
NativeMethod(
return_type='void',
static=True,
name='foo_bar',
params=[],
java_class_name=None,
is_proxy=True,
proxy_name='org_chromium_example_SampleProxyJni_foo_1bar'),
NativeMethod(
return_type='void',
static=True,
name='foo__bar',
params=[],
java_class_name=None,
is_proxy=True,
proxy_name='org_chromium_example_SampleProxyJni_foo_1_1bar'),
]
self.AssertListEquals(natives, golden_natives)
def testProxyNativesMainDex(self):
test_data = """
@MainDex
class Foo() {
@NativeMethods
interface Natives {
void thisismaindex();
}
void dontmatchme();
public static void metoo();
public static native void this_is_a_non_proxy_native();
}
"""
non_main_dex_test_data = """
class Bar() {
@NativeMethods
interface Natives {
void foo();
void bar();
}
}
"""
qualified_clazz = 'test/foo/Foo'
jni_params = TestOptions()
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long')
golden_natives = [
NativeMethod(
return_type='void',
static=True,
name='thisismaindex',
params=[],
java_class_name=None,
is_proxy=True,
proxy_name='test_foo_Foo_thisismaindex'),
]
self.AssertListEquals(natives, golden_natives)
jni_params = jni_generator.JniParams(qualified_clazz)
main_dex_header = jni_registration_generator.HeaderGenerator(
'',
qualified_clazz,
natives,
jni_params,
main_dex=True,
use_proxy_hash=False).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header])
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content, use_hash=False))
other_qualified_clazz = 'test/foo/Bar'
other_natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
other_qualified_clazz, non_main_dex_test_data, 'long')
jni_params = jni_generator.JniParams(other_qualified_clazz)
non_main_dex_header = jni_registration_generator.HeaderGenerator(
'',
other_qualified_clazz,
other_natives,
jni_params,
main_dex=False,
use_proxy_hash=False).Generate()
content = TestGenerator._MergeRegistrationForTests([main_dex_header] +
[non_main_dex_header])
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content, use_hash=False),
'AndNonMainDex')
def testProxyNatives(self):
test_data = """
class SampleProxyJni {
private void do_not_match();
@NativeMethods
interface Natives {
@NativeClassQualifiedName("FooAndroid::BarDelegate")
void foo(long nativePtr);
int bar(int x, int y);
String foobar(String x, String y);
}
void dontmatchme();
public static void metoo();
public static native void this_is_a_non_proxy_native();
}
"""
bad_spaced_test_data = """
class SampleProxyJni{
@NativeMethods interface
Natives
{ @NativeClassQualifiedName("FooAndroid::BarDelegate") void
foo(long nativePtr);
int bar(int
x, int y); String
foobar(String x, String y);
}
}
"""
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long')
bad_spacing_natives = jni_generator.ProxyHelpers \
.ExtractStaticProxyNatives(qualified_clazz, bad_spaced_test_data, 'long')
golden_natives = [
NativeMethod(
return_type='void',
static=True,
name='foo',
native_class_name='FooAndroid::BarDelegate',
params=[Param(datatype='long', name='nativePtr')],
java_class_name=None,
is_proxy=True,
proxy_name='org_chromium_example_SampleProxyJni_foo',
ptr_type='long'),
NativeMethod(
return_type='int',
static=True,
name='bar',
params=[
Param(datatype='int', name='x'),
Param(datatype='int', name='y')
],
java_class_name=None,
is_proxy=True,
proxy_name='org_chromium_example_SampleProxyJni_bar'),
NativeMethod(
return_type='String',
static=True,
name='foobar',
params=[
Param(datatype='String', name='x'),
Param(datatype='String', name='y')
],
java_class_name=None,
is_proxy=True,
proxy_name='org_chromium_example_SampleProxyJni_foobar'),
]
self.AssertListEquals(golden_natives, natives)
self.AssertListEquals(golden_natives, bad_spacing_natives)
jni_params = jni_generator.JniParams(qualified_clazz)
h1 = jni_generator.InlHeaderFileGenerator('', qualified_clazz, natives, [],
[], jni_params, TestOptions())
self.AssertGoldenTextEquals(h1.GetContent())
h2 = jni_registration_generator.HeaderGenerator(
'', qualified_clazz, natives, jni_params, False, use_proxy_hash=False)
content = TestGenerator._MergeRegistrationForTests([h2.Generate()])
proxy_opts = jni_registration_generator.ProxyOptions()
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(content, proxy_opts),
suffix='Java')
self.AssertGoldenTextEquals(
jni_registration_generator.CreateFromDict(content, proxy_opts.use_hash),
suffix='Registrations')
def testProxyHashedExample(self):
opts = TestOptions()
opts.use_proxy_hash = True
path = os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java')
generated_text = self._CreateJniHeaderFromFile(
path, 'org/chromium/example/jni_generator/SampleForAnnotationProcessor',
opts)
self.AssertGoldenTextEquals(
generated_text,
golden_file='HashedSampleForAnnotationProcessor_jni.golden')
reg_dict = jni_registration_generator._DictForPath(
self._JoinScriptDir(path))
reg_dict = self._MergeRegistrationForTests([reg_dict])
proxy_opts = jni_registration_generator.ProxyOptions()
self.AssertGoldenTextEquals(
jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_opts),
golden_file='HashedSampleForAnnotationProcessorGenJni.golden')
def testProxyJniExample(self):
generated_text = self._CreateJniHeaderFromFile(
os.path.join(_JAVA_SRC_DIR, 'SampleForAnnotationProcessor.java'),
'org/chromium/example/jni_generator/SampleForAnnotationProcessor')
self.AssertGoldenTextEquals(
generated_text, golden_file='SampleForAnnotationProcessor_jni.golden')
def testGenJniFlags(self):
reg_dict = self._BuildRegDictFromSample()
proxy_options = jni_registration_generator.ProxyOptions()
content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options)
self.AssertGoldenTextEquals(content, 'Disabled')
proxy_options = jni_registration_generator.ProxyOptions(enable_mocks=True)
content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options)
self.AssertGoldenTextEquals(content, 'MocksEnabled')
proxy_options = jni_registration_generator.ProxyOptions(
enable_mocks=True, require_mocks=True)
content = jni_registration_generator.CreateProxyJavaFromDict(
reg_dict, proxy_options)
self.AssertGoldenTextEquals(content, 'MocksRequired')
def testProxyTypeInfoPreserved(self):
test_data = """
package org.chromium.foo;
class Foo {
@NativeMethods
interface Natives {
char[][] fooProxy(byte[][] b);
SomeJavaType[][] barProxy(String[][] s, short z);
String[] foobarProxy(String[] a, int[][] b);
byte[][] bazProxy(long nativePtr, BazClass caller,
SomeJavaType[][] someObjects);
}
"""
natives = ProxyHelpers.ExtractStaticProxyNatives('org/chromium/foo/FooJni',
test_data, 'long')
golden_natives = [
NativeMethod(
static=True,
java_class_name=None,
return_type='char[][]',
name='fooProxy',
params=[Param(datatype='byte[][]', name='b')],
is_proxy=True,
proxy_name='org_chromium_foo_FooJni_fooProxy'),
NativeMethod(
static=True,
java_class_name=None,
return_type='Object[][]',
name='barProxy',
params=[
Param(datatype='String[][]', name='s'),
Param(datatype='short', name='z')
],
is_proxy=True,
proxy_name='org_chromium_foo_FooJni_barProxy'),
NativeMethod(
static=True,
java_class_name=None,
return_type='String[]',
name='foobarProxy',
params=[
Param(datatype='String[]', name='a'),
Param(datatype='int[][]', name='b')
],
is_proxy=True,
proxy_name='org_chromium_foo_FooJni_foobarProxy'),
NativeMethod(
static=True,
java_class_name=None,
return_type='byte[][]',
name='bazProxy',
params=[
Param(datatype='long', name='nativePtr'),
Param(datatype='Object', name='caller'),
Param(datatype='Object[][]', name='someObjects')
],
is_proxy=True,
proxy_name='org_chromium_foo_FooJni_bazProxy',
ptr_type='long')
]
self.AssertListEquals(golden_natives, natives)
def TouchStamp(stamp_path):
dir_name = os.path.dirname(stamp_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
with open(stamp_path, 'a'):
os.utime(stamp_path, None)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option(
'-v', '--verbose', action='store_true', help='Whether to output details.')
options, _ = parser.parse_args(argv[1:])
test_result = unittest.main(
argv=argv[0:1], exit=False, verbosity=(2 if options.verbose else 1))
if test_result.result.wasSuccessful() and options.stamp:
TouchStamp(options.stamp)
return not test_result.result.wasSuccessful()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 34.215496
| 80
| 0.622231
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.