hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f9c51737f11d6cbc0b18591ba15b3cd405064add
| 1,080
|
py
|
Python
|
nemnis/client.py
|
criptalia/nis-python-client
|
2d09340850770768a006680287cf11b6e5bfedb4
|
[
"MIT"
] | 17
|
2017-12-25T11:58:18.000Z
|
2021-03-24T14:50:42.000Z
|
nemnis/client.py
|
criptalia/nis-python-client
|
2d09340850770768a006680287cf11b6e5bfedb4
|
[
"MIT"
] | 3
|
2017-12-20T14:13:43.000Z
|
2018-01-02T15:48:24.000Z
|
nemnis/client.py
|
criptalia/nis-python-client
|
2d09340850770768a006680287cf11b6e5bfedb4
|
[
"MIT"
] | 16
|
2018-01-03T04:54:31.000Z
|
2022-02-21T10:10:36.000Z
|
__copyright__ = "2017 Oleksii Semeshchuk"
__license__ = "License: MIT, see LICENSE."
__version__ = "0.0.9"
__author__ = "Oleksii Semeshchuk"
__email__ = "semolex@live.com"
'''
client
------
Module for the synchronous NIS client.
'''
import requests
from .core import AbstractClient, LOCALHOST_ENDPOINT
__all__ = [
'Client',
]
class Client(AbstractClient):
"""
Synchronous variant of the main API client.
Uses a session for connection pooling.
"""
def __init__(self, endpoint=LOCALHOST_ENDPOINT):
"""
Initialize client.
:param endpoint: address of the NIS.
"""
super(Client, self).__init__(endpoint)
self.session = requests.Session()
def call(self, method, name, params=None, payload=None, **kwds):
"""
Make calls to the API via HTTP methods and passed params.
:return: response object
"""
url = self.endpoint + '/' + name
return self.session.request(method, url, params=params,
json=payload, **kwds)
| 24.545455
| 68
| 0.616667
|
232e813bbaf034a14f2d3b8ac3f99bb5bc50cda0
| 135
|
py
|
Python
|
tests/inputs-external/from_numpy_docs/np_add.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 4
|
2019-10-06T18:01:24.000Z
|
2020-07-03T05:27:35.000Z
|
tests/inputs-external/from_numpy_docs/np_add.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 5
|
2021-06-07T15:50:04.000Z
|
2021-06-07T15:50:06.000Z
|
tests/inputs-external/from_numpy_docs/np_add.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | null | null | null |
from numpy import *
add(array([-1.2, 1.2]), array([1,3]))
# array([-0.2, 4.2])
array([-1.2, 1.2]) + array([1,3])
# array([-0.2, 4.2])
| 19.285714
| 37
| 0.503704
|
b69c35896badd60ea42a14a4a1815a1e281dbdfa
| 13,026
|
py
|
Python
|
rdr_service/services/google_sheets_client.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
rdr_service/services/google_sheets_client.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/services/google_sheets_client.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
import backoff
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from oauth2client.service_account import ServiceAccountCredentials
import socket
from rdr_service.services.gcp_utils import gcp_get_iam_service_key_info
class GoogleSheetsClient:
"""
Allows for interacting with a spreadsheet in google drive. This class is designed to be used as a context manager
and requires that:
- A service account (with a json keyfile) is authenticated
- The service account has the correct permissions to edit the google spreadsheet
Please carefully verify that this works for your purpose if you re-use this. There are some things that don't
currently work (such as formula manipulation and making new tabs).
"""
def __init__(self, spreadsheet_id, service_key_id, tab_offsets=None):
"""
:param spreadsheet_id: Google Drive id of the spreadsheet.
:param service_key_id: Key id for the service account used.
:type tab_offsets: Dictionary specifying tab names and offsets for them (defined in Google Sheet cell
notation such as B4). Giving a cell value will specify that any changes for that tab use that cell
as the origin. So with an origin of B4 an update to C5 would be given as row 1 and column 1.
Used to prevent updating headers in the target spreadsheet.
WARNING: Does not support columns past Z
"""
# Load credentials from service key file
self.service_key_id = service_key_id
self._spreadsheet_id = spreadsheet_id
self._default_tab_id = None
self._tabs = None
self._empty_cell_value = ''
self._tab_offsets = {tab_name: {
'row': int(offset[1:]) - 1, # convert row number specified in a system of counting from 1
'col': ord(offset[:1].upper()) - ord('A'), # Get column number (A = 0, B = 1, ...)
'offset_str': offset
} for tab_name, offset in tab_offsets.items()} if tab_offsets else {}
def _build_service(self):
service_key_info = gcp_get_iam_service_key_info(self.service_key_id)
api_credentials = ServiceAccountCredentials.from_json_keyfile_name(service_key_info['key_path'])
# The Google API client uses sockets, and the requests can take longer than the default timeout.
# The proposed solution is to increase the default timeout manually
# https://github.com/googleapis/google-api-python-client/issues/632
# The socket seems to be created when calling discover.build, so this temporarily increases the timeout for
# new sockets when the Google service creates its socket.
default_socket_timeout = socket.getdefaulttimeout()
num_seconds_in_five_minutes = 300
socket.setdefaulttimeout(num_seconds_in_five_minutes)
# Set up for being able to interact with the sheet in Drive
sheets_api_service = discovery.build('sheets', 'v4', credentials=api_credentials)
# Set the timeout back for anything else in the code that would use sockets
socket.setdefaulttimeout(default_socket_timeout)
return sheets_api_service
def __enter__(self):
self.download_values()
return self
def __exit__(self, *_):
self.upload_values()
@classmethod
def _initialize_empty_tab(cls):
return []
def _get_offset_row_col(self, tab_id):
tab_offset_data = self._tab_offsets.get(tab_id, {
'row': 0,
'col': 0
})
return tab_offset_data['row'], tab_offset_data['col']
def _get_offset_string(self, tab_id):
tab_offset_data = self._tab_offsets.get(tab_id, {
'offset_str': 'A1'
})
return tab_offset_data['offset_str']
@backoff.on_exception(backoff.constant, HttpError, max_tries=4, jitter=None, interval=30)
def download_values(self):
"""
Retrieve the values as they currently are in google drive.
Note: this will overwrite any changes that have been made this instance of the document using `update_cell`.
:return: None
"""
self._tabs = {}
# API call documented at https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get
request = self._build_service().spreadsheets().get(spreadsheetId=self._spreadsheet_id, includeGridData=True)
response = request.execute()
# Parse the retrieved spreadsheet
tab_data = response['sheets']
for tab in tab_data:
tab_id = tab['properties'].get('title')
# Set the default tab to the first tab
if self._default_tab_id is None:
self._default_tab_id = tab_id
# Initialize the internal tab structure and parse the values from the response
self._tabs[tab_id] = self._initialize_empty_tab()
tab_grid_data = tab['data'][0].get('rowData', [])
for row_number, row_data in enumerate(tab_grid_data):
row_values = row_data.get('values')
if row_values:
for col_number, cell_data in enumerate(row_values):
row_offset, col_offset = self._get_offset_row_col(tab_id)
if row_number >= row_offset and col_number >= col_offset:
cell_value = cell_data.get('formattedValue', self._empty_cell_value)
self.update_cell(row_number - row_offset, col_number - col_offset, cell_value, tab_id)
def set_current_tab(self, tab_id):
"""
Change the default tab. Used to make updating multiple fields on one tab cleaner
(so the tab id doesn't need to be given with the location for each cell value).
:param tab_id: Name of the tab to use as the default.
:return: None
"""
self._default_tab_id = tab_id
def update_cell(self, row: int, col: int, value: str, tab_id=None):
"""
Change the value of a cell.
Any changes made will be stored locally until the next call to `upload_values`
(or when the context ends).
:param row: row number of the cell, starting from 0 at the top of the spreadsheet
:param col: column number of the cell, starting from 0 at the left of the spreadsheet
:param value: value to store
:param tab_id: Name of the tab to modify. The default tab is used if this parameter isn't provided.
:return: None
"""
if not isinstance(col, int):
col = int(col)
values_grid = self._tabs.get(tab_id or self._default_tab_id)
# Increase the number of rows we have if the caller is setting a cell on a
# row farther out than what is initialized
while row >= len(values_grid):
values_grid.append([self._empty_cell_value])
row_for_update = values_grid[row]
# Increase the number of columns we have in the row if the caller is setting a
# cell on a cell father out than what is initialized in the row
while col >= len(row_for_update):
row_for_update.append(self._empty_cell_value)
row_for_update[col] = value
def truncate_tab_at_row(self, row, tab_id=None):
"""
Clears all values from the sheet at and below the given row (setting their cells equal to an empty string).
:param row: Row to start clearing, starting from 0 at the top of the document
:param tab_id: Tab to clear values from, defaults to the current tab if not provided
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
current_row = row
while current_row < len(values_grid): # Iterate through the rows
# Replace everything in the row with empty strings
values_grid[current_row] = [self._empty_cell_value] * len(values_grid[current_row])
current_row += 1
def insert_new_row_at(self, row_index, tab_id=None):
"""
Creates a new, empty row at the given row index. The current row at the given index will be moved down.
:param row_index: Index, counting from 0, for the new row
:param tab_id: Tab to add the new row to, defaults to the current tab if not provided
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
values_grid.insert(row_index, [self._empty_cell_value])
# All the following rows will be moved down.
# Any row in front of a row that moves down will be uploaded to the document in the same position as the one
# that moved down. Any row in front of one that moves down needs to have as many cells as the one it's
# replacing, so that it will overwrite all the values left over from the row that it pushed down.
while row_index < len(values_grid) - 1: # The last row isn't replacing anything, so doesn't need to be checked
row_to_expand = values_grid[row_index]
number_of_cells_to_replace = len(values_grid[row_index + 1])
while number_of_cells_to_replace > len(row_to_expand):
row_to_expand.append(self._empty_cell_value)
row_index += 1
def remove_row_at(self, row_index, tab_id=None):
"""
Removes a row from the sheet.
:param row_index: Index, counting from 0, for the row to remove
:param tab_id: Tab to remove the row from, defaults to the current tab if not provided
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
number_of_cells_replaced = len(values_grid[row_index])
del values_grid[row_index]
# Removing a row in the document means every row moves up, including the last one.
# So we need to insert a row at the end to overwrite the values left from when the original last row moves up.
# (The number of cells is expanded later in this method).
values_grid.append([self._empty_cell_value])
# All following rows will be moved up.
# Any rows after a row that moves up will be uploaded to the document in the same position as the one before it.
# If the following row doesn't have as many cells as the row it's replacing, then it wouldn't
# overwrite all the cells and some trailing values could be left over. All rows might need to have
# extra cells added so they will overwrite all the cells left from the row they're replacing.
while row_index < len(values_grid):
next_row = values_grid[row_index]
while number_of_cells_replaced > len(next_row):
next_row.append(self._empty_cell_value)
# Get the number of cells in this row, the row after it will be taking it's place in the document
number_of_cells_replaced = len(next_row)
row_index += 1
def get_row_at(self, row_index, tab_id=None):
"""
Retrieves the list of values at the given row. If the indexed row doesn't already exist, this method will
expand the grid until it does.
:param row_index: Index, counting from 0, for the row to retrieve
:param tab_id: Tab to read the row from, defaults to the current tab if not provided
:return: List of values that make up the given row
"""
values_grid = self._tabs.get(tab_id or self._default_tab_id)
while row_index >= len(values_grid):
values_grid.append([self._empty_cell_value])
return list(values_grid[row_index])
@backoff.on_exception(backoff.constant, HttpError, max_tries=4, jitter=None, interval=30)
def upload_values(self):
"""
Upload the local data to the google drive spreadsheet.
Note: any changes made to the target spreadsheet since the last call to `download_values` will be overwritten.
"""
request = self._build_service().spreadsheets().values().batchUpdate(
spreadsheetId=self._spreadsheet_id,
body={
'valueInputOption': 'RAW',
'data': [{
'range': f"'{tab_id}'!{self._get_offset_string(tab_id)}",
'values': tab_data
} for tab_id, tab_data in self._tabs.items()]
}
)
request.execute()
def get_tab_values(self, tab_id=None):
"""
Returns the values of the specified tab (or the current tab if no tab was specified).
Empty cells are represented by empty strings.
:param tab_id: Identifier of the tab to retrieve values from.
:return: A two dimensional list of strings that represent the cell values, organized by
rows (from the top down) and then columns (from left to right).
"""
if tab_id is None:
tab_id = self._default_tab_id
value_grid = self._tabs.get(tab_id)
return [[value for value in row] for row in value_grid]
| 46.191489
| 120
| 0.659988
|
968691286fcd87be3fe87796133710cebdf96ea9
| 107
|
py
|
Python
|
Ship.py
|
SanjoSolutions/anno-1602-machine-learning
|
383e5583f03409f8e747701c544fcf323ff1a2de
|
[
"Unlicense"
] | null | null | null |
Ship.py
|
SanjoSolutions/anno-1602-machine-learning
|
383e5583f03409f8e747701c544fcf323ff1a2de
|
[
"Unlicense"
] | null | null | null |
Ship.py
|
SanjoSolutions/anno-1602-machine-learning
|
383e5583f03409f8e747701c544fcf323ff1a2de
|
[
"Unlicense"
] | null | null | null |
class Ship:
def __init__(self):
self.player = None
self.x = None
self.y = None
| 17.833333
| 26
| 0.523364
|
07a5fc5c2237ddd4ef756feacd18a8816ba26b57
| 1,981
|
py
|
Python
|
jpy_video/monotext_widget.py
|
bendichter/Jupyter_Video_Widget
|
462f875007a773ac37f421ef8c2eb30009da447b
|
[
"MIT"
] | 19
|
2017-07-31T16:07:52.000Z
|
2022-02-10T16:41:27.000Z
|
jpy_video/monotext_widget.py
|
bendichter/Jupyter_Video_Widget
|
462f875007a773ac37f421ef8c2eb30009da447b
|
[
"MIT"
] | 5
|
2017-10-02T20:26:44.000Z
|
2021-03-12T19:06:26.000Z
|
jpy_video/monotext_widget.py
|
bendichter/Jupyter_Video_Widget
|
462f875007a773ac37f421ef8c2eb30009da447b
|
[
"MIT"
] | 10
|
2018-04-09T12:12:59.000Z
|
2020-11-02T19:43:11.000Z
|
import ipywidgets
# <pre style="font-family: DejaVu Sans Mono, Consolas, Lucida Console, Monospace;'
# background-color: #f5f5f5;
_html_template = """
<pre style="font-family: Monospace;'
display: block;
white-space: pre;
font-variant: normal;
font-weight: normal;
font-style: normal;
font-size: 10pt;
color: #333333;
background-color: #fff;
margin: 0em 0;
margin-left: 1pt;
margin-right: 1pt;
margin-top: 1pt;
margin-bottom: 1pt;
border: 0px solid #ccc;
border-radius: 2px;
">
{content:s}
</pre>
"""
class MonoText(ipywidgets.HTML):
"""Monospace text version of ipywidget.Text widget
"""
def __init__(self, text=None):
super().__init__()
self._text = ''
self.text = text
@property
def text(self):
"""Widget's displayed text
"""
return self._text
@text.setter
def text(self, list_or_string):
if not list_or_string:
return
if isinstance(list_or_string, str):
list_or_string = [list_or_string]
if not isinstance(list_or_string, list):
msg = 'Input item must be a string or list of strings: {}'.format(type(list_or_string))
raise ValueError(msg)
if not isinstance(list_or_string[0], str):
msg = 'Input item(s) must be a string or list of strings: {}'.format(type(list_or_string[0]))
raise ValueError(msg)
self._text = '\n'.join(list_or_string)
self._update()
def _update(self):
"""Refresh displayed text
"""
self.value = _html_template.format(content=self.text)
#------------------------------------------------
if __name__ == '__main__':
pass
| 27.513889
| 105
| 0.51893
|
0a3c0cab830166fe6fdf934b704cc624072d82b2
| 9,001
|
py
|
Python
|
Tasks.py
|
fakegit/bilibili-live-tools
|
99dfb53d1dc3275e37f0be34e4199e672aecb1bf
|
[
"MIT"
] | 1,018
|
2017-12-24T01:53:18.000Z
|
2022-03-25T09:25:02.000Z
|
Tasks.py
|
fakegit/bilibili-live-tools
|
99dfb53d1dc3275e37f0be34e4199e672aecb1bf
|
[
"MIT"
] | 274
|
2017-12-29T13:58:19.000Z
|
2021-12-09T14:31:55.000Z
|
Tasks.py
|
fakegit/bilibili-live-tools
|
99dfb53d1dc3275e37f0be34e4199e672aecb1bf
|
[
"MIT"
] | 240
|
2018-01-22T05:01:28.000Z
|
2022-01-21T06:30:57.000Z
|
from bilibili import bilibili
import datetime
import time
import asyncio
import traceback
import os
import configloader
import utils
import ast
from printer import Printer
class Tasks:
def __init__(self):
fileDir = os.path.dirname(os.path.realpath('__file__'))
file_user = fileDir + "/conf/user.conf"
self.dic_user = configloader.load_user(file_user)
# 获取每日包裹奖励
async def Daily_bag(self):
response = await bilibili().get_dailybag()
json_response = await response.json()
for i in range(0, len(json_response['data']['bag_list'])):
Printer().printer(f"获得-{json_response['data']['bag_list'][i]['bag_name']}-成功", "Info", "green")
def CurrentTime(self):
currenttime = str(int(time.mktime(datetime.datetime.now().timetuple())))
return currenttime
# 签到功能
async def DoSign(self):
response = await bilibili().get_dosign()
temp = await response.json(content_type=None)
Printer().printer(f"签到状态:{temp['message']}", "Info", "green")
# 应援团签到
async def link_sign(self):
response = await bilibili().get_grouplist()
json_response = await response.json(content_type=None)
check = len(json_response['data']['list'])
group_id_list = []
owner_uid_list = []
for i in range(0, check):
group_id = json_response['data']['list'][i]['group_id']
owner_uid = json_response['data']['list'][i]['owner_uid']
group_id_list.append(group_id)
owner_uid_list.append(owner_uid)
for (i1, i2) in zip(group_id_list, owner_uid_list):
response = await bilibili().assign_group(i1, i2)
json_response = await response.json(content_type=None)
if json_response['code'] == 0:
if (json_response['data']['status']) == 1:
Printer().printer(f"应援团{i1}已应援过", "Info", "green")
if (json_response['data']['status']) == 0:
Printer().printer(f"应援团{i1}应援成功,获得{json_response['data']['add_num']}点亲密度", "Info", "green")
else:
Printer().printer(f"应援团{i1}应援失败,{json_response}", "Error", "red")
async def send_gift(self):
if self.dic_user['gift']['on/off'] == '1':
argvs, x = await utils.fetch_bag_list(printer=False)
for i in range(0, len(argvs)):
giftID = argvs[i][0]
giftNum = argvs[i][1]
bagID = argvs[i][2]
roomID = self.dic_user['gift']['send_to_room']
await utils.send_gift_web(roomID, giftID, giftNum, bagID)
if not argvs:
Printer().printer(f"没有将要过期的礼物~", "Info", "green")
async def auto_send_gift(self):
if self.dic_user['auto-gift']['on/off'] == "1":
a = await utils.fetch_medal(printer=False)
# res = await bilibili().gift_list()
# json_res = await res.json()
# temp_dic = {}
# for j in range(0, len(json_res['data'])):
# price = json_res['data'][j]['price']
# id = json_res['data'][j]['id']
# temp_dic[id] = price
temp_dic = {1: 100, 6: 1000}
if self.dic_user['send_exheart']['on/off'] == "1":
temp_dic = {1: 100, 6: 1000, 30607: 5000}
x, temp = await utils.fetch_bag_list(printer=False)
roomid = a[0]
today_feed = a[1]
day_limit = a[2]
left_num = int(day_limit) - int(today_feed)
calculate = 0
for i in range(0, len(temp)):
gift_id = int(temp[i][0])
gift_num = int(temp[i][1])
bag_id = int(temp[i][2])
expire = int(temp[i][3])
if gift_id in [1, 6] and expire != 0:
if (gift_num * (temp_dic[gift_id] / 100) < left_num):
calculate = calculate + temp_dic[gift_id] / 100 * gift_num
tmp2 = temp_dic[gift_id] / 100 * gift_num
await utils.send_gift_web(roomid, gift_id, gift_num, bag_id)
left_num = left_num - tmp2
elif left_num - temp_dic[gift_id] / 100 >= 0:
tmp = (left_num) / (temp_dic[gift_id] / 100)
tmp1 = (temp_dic[gift_id] / 100) * int(tmp)
calculate = calculate + tmp1
await utils.send_gift_web(roomid, gift_id, tmp, bag_id)
left_num = left_num - tmp1
Printer().printer(f"自动送礼共送出亲密度为{int(calculate)}的礼物", "Info", "green")
async def doublegain_coin2silver(self):
if self.dic_user['doublegain_coin2silver']['on/off'] == "1":
response0 = await bilibili().request_doublegain_coin2silver()
json_response0 = await response0.json()
response1 = await bilibili().request_doublegain_coin2silver()
json_response1 = await response1.json()
print(json_response0['msg'], json_response1['msg'])
async def coin2silver(self):
if self.dic_user['coin2silver']['on/off'] == '1' and int(self.dic_user['coin2silver']['num']) > 0:
response = await bilibili().coin2silver_web(self.dic_user['coin2silver']['num'])
json_response = await response.json()
Printer().printer(f"硬币兑换银瓜子状态:{json_response['msg']}", "Info", "green")
async def sliver2coin(self):
if self.dic_user['coin']['on/off'] == '1':
response1 = await bilibili().silver2coin_app()
json_response1 = await response1.json()
Printer().printer(f"银瓜子兑换硬币状态:{json_response1['msg']}", "Info", "green")
async def refresh_medals(self):
if self.dic_user['refresh_medals']['on/off'] == '1':
await utils.refresh_all_gray_medals()
async def refresh_medals_by_roomid(self):
if self.dic_user['refresh_medals_by_roomid']['on/off'] == "1":
roomids = ast.literal_eval(self.dic_user['refresh_medals_by_roomid']['room_ids'])
await utils.refresh_medals_by_roomids(roomids)
async def get_rooms(self):
room_ids = []
for _ in range(3):
response = await bilibili().request_fetchmedal()
json_response = await response.json(content_type=None)
if json_response['code']:
continue
# 有时候dict获取不完整,包括最后一项"roomid"的后半部分缺失
elif all(["roomid" not in medal for medal in json_response['data']['fansMedalList']]):
continue
else:
break
for i in range(0, len(json_response['data']['fansMedalList'])):
short_room_id = json_response['data']['fansMedalList'][i].get('roomid', None)
if short_room_id is None:
continue
response1 = await bilibili().get_room_info(short_room_id)
json_response1 = await response1.json(content_type=None)
long_room_id = json_response1['data']['room_info']['room_id']
room_ids.append(long_room_id)
return room_ids
async def XE_heartbeat(self, room_ids, room_id):
index_num = 24 // len(room_ids)
index_num += 1 if 24 % len(room_ids) else 0
data = await bilibili().heart_beat_e(room_id)
for index in range(1, index_num + 1):
try:
# print(f"房间{room_id}休眠{data['heartbeat_interval']}s后开始第 {index} 次")
await asyncio.sleep(data['heartbeat_interval'])
response = await bilibili().heart_beat_x(index, data, room_id)
response = await response.json(content_type=None)
data['ets'] = response['data']['timestamp']
data['secret_key'] = response['data']['secret_key']
data['heartbeat_interval'] = response['data']['heartbeat_interval']
except:
pass
async def run(self):
while 1:
try:
Printer().printer(f"开始执行每日任务", "Info", "green")
await self.DoSign()
room_ids = await self.get_rooms()
coroutine_list = []
for room_id in room_ids:
coroutine_list.append(self.XE_heartbeat(room_ids, room_id))
if coroutine_list:
await asyncio.wait(coroutine_list)
await self.refresh_medals_by_roomid()
await self.refresh_medals()
await self.Daily_bag()
await self.link_sign()
await self.send_gift()
await self.sliver2coin()
await self.coin2silver()
await self.auto_send_gift()
await utils.reconnect()
await asyncio.sleep(21600)
except:
await asyncio.sleep(10)
Printer().printer(traceback.format_exc(), "Error", "red")
| 44.559406
| 111
| 0.560715
|
7131ff5f396f53c0385f3268987febbf48e6746c
| 13,412
|
py
|
Python
|
elegantrl/train/run_smac.py
|
supersglzc/ElegantRL
|
727d28e075ef8c965ae59f2a12d01522c32696dd
|
[
"Apache-2.0"
] | 1
|
2021-03-19T02:17:48.000Z
|
2021-03-19T02:17:48.000Z
|
elegantrl/train/run_smac.py
|
supersglzc/ElegantRL
|
727d28e075ef8c965ae59f2a12d01522c32696dd
|
[
"Apache-2.0"
] | null | null | null |
elegantrl/train/run_smac.py
|
supersglzc/ElegantRL
|
727d28e075ef8c965ae59f2a12d01522c32696dd
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import os
import pprint
import time
import threading
import torch as th
from types import SimpleNamespace as SN
from envs.utils.marl_utils import Logger,time_left, time_str,OneHot
from os.path import dirname, abspath
from agents.AgentQMix import AgentQMix
from train.run_parallel import ParallelRunner
from train.replay_buffer import ReplayBuffer
from envs.starcraft import StarCraft2Env
from agents.net import RNNAgent
from envs.utils.marl_utils import *
import numpy as np
# This multi-agent controller shares parameters between agents
def get_agent_own_state_size(env_args):
sc_env = StarCraft2Env(**env_args)
# qatten parameter setting (only use in qatten)
return 4 + sc_env.shield_bits_ally + sc_env.unit_type_bits
def run(_run, _config, _log):
# check args sanity
_config = args_sanity_check(_config, _log)
args = SN(**_config)
args.device = "cuda" if args.use_cuda else "cpu"
# setup loggers
logger = Logger(_log)
_log.info("Experiment Parameters:")
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# configure tensorboard logger
unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
args.unique_token = unique_token
if args.use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(dirname(abspath(__file__)))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
# sacred is on by default
logger.setup_sacred(_run)
# Run and train
run_sequential(args=args, logger=logger)
# Clean up after finishing
print("Exiting Main")
print("Stopping all threads")
for t in threading.enumerate():
if t.name != "MainThread":
print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
t.join(timeout=1)
print("Thread joined")
print("Exiting script")
# Making sure framework really exits
os._exit(os.EX_OK)
def evaluate_sequential(args, runner):
for _ in range(args.test_nepisode):
runner.run(test_mode=True)
if args.save_replay:
runner.save_replay()
runner.close_env()
def run_sequential(args, logger):
# Init runner so we can get env info
runner = ParallelRunner(args=args, logger=logger)
# Set up schemes and groups here
env_info = runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
args.accumulated_episodes = getattr(args, "accumulated_episodes", None)
if getattr(args, 'agent_own_state_size', False):
args.agent_own_state_size = get_agent_own_state_size(args.env_args)
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"probs": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.float},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = MAC(buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = AgentQMix(mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
if args.checkpoint_path != "":
timesteps = []
timestep_to_load = 0
if not os.path.isdir(args.checkpoint_path):
logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
return
# Go through all files in args.checkpoint_path
for name in os.listdir(args.checkpoint_path):
full_name = os.path.join(args.checkpoint_path, name)
# Check if they are dirs the names of which are numbers
if os.path.isdir(full_name) and name.isdigit():
timesteps.append(int(name))
if args.load_step == 0:
# choose the max timestep
timestep_to_load = max(timesteps)
else:
# choose the timestep closest to load_step
timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
logger.console_logger.info("Loading model from {}".format(model_path))
learner.load_models(model_path)
runner.t_env = timestep_to_load
if args.evaluate or args.save_replay:
evaluate_sequential(args, runner)
return
# start training
episode = 0
last_test_T = -args.test_interval - 1
last_log_T = 0
model_save_time = 0
start_time = time.time()
last_time = start_time
logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
while runner.t_env <= args.t_max:
# Run for a whole episode at a time
with th.no_grad():
episode_batch = runner.run(test_mode=False)
buffer.insert_episode_batch(episode_batch)
if buffer.can_sample(args.batch_size):
next_episode = episode + args.batch_size_run
if args.accumulated_episodes and next_episode % args.accumulated_episodes != 0:
continue
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, runner.t_env, episode)
del episode_sample
# Execute test runs once in a while
n_test_runs = max(1, args.test_nepisode // runner.batch_size)
if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
last_time = time.time()
last_test_T = runner.t_env
for _ in range(n_test_runs):
runner.run(test_mode=True)
if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):
model_save_time = runner.t_env
save_path = os.path.join(args.local_results_path, "models", args.unique_token, str(runner.t_env))
#"results/models/{}".format(unique_token)
os.makedirs(save_path, exist_ok=True)
logger.console_logger.info("Saving models to {}".format(save_path))
# learner should handle saving/loading -- delegate actor save/load to mac,
# use appropriate filenames to do critics, optimizer states
learner.save_models(save_path)
episode += args.batch_size_run
if (runner.t_env - last_log_T) >= args.log_interval:
logger.log_stat("episode", episode, runner.t_env)
logger.print_recent_stats()
last_log_T = runner.t_env
runner.close_env()
logger.console_logger.info("Finished Training")
def args_sanity_check(config, _log):
# set CUDA flags
# config["use_cuda"] = True # Use cuda whenever possible!
if config["use_cuda"] and not th.cuda.is_available():
config["use_cuda"] = False
_log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
if config["test_nepisode"] < config["batch_size_run"]:
config["test_nepisode"] = config["batch_size_run"]
else:
config["test_nepisode"] = (config["test_nepisode"]//config["batch_size_run"]) * config["batch_size_run"]
return config
class BasicMAC:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
input_shape = self._get_input_shape(scheme)
self._build_agents(input_shape)
self.agent_output_type = args.agent_output_type
self.action_selector = EpsilonGreedyActionSelector(args)
self.save_probs = getattr(self.args, 'save_probs', False)
self.hidden_states = None
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
if test_mode:
self.agent.eval()
agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
agent_outs = agent_outs.reshape(ep_batch.batch_size * self.n_agents, -1)
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
self.hidden_states = self.agent.init_hidden()
if self.hidden_states is not None:
self.hidden_states = self.hidden_states.unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
def parameters(self):
return self.agent.parameters()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
def cuda(self):
self.agent.cuda()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
def _build_agents(self, input_shape):
self.agent = RNNAgent(input_shape, self.args)
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
inputs = th.cat([x.reshape(bs, self.n_agents, -1) for x in inputs], dim=-1)
return inputs
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
# This multi-agent controller shares parameters between agents
class MAC(BasicMAC):
def __init__(self, scheme, groups, args):
super(MAC, self).__init__(scheme, groups, args)
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
qvals = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(qvals[bs], avail_actions[bs], t_env, test_mode=test_mode)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
if test_mode:
self.agent.eval()
agent_inputs = self._build_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
return agent_outs
| 37.255556
| 125
| 0.651432
|
48caab224c14b1bce6671d873d669275511aab78
| 923
|
py
|
Python
|
brocklyn_app/forms.py
|
Victoria045/Brocklyn
|
727101b9bc4463667ce2d816f8584bb994681f8e
|
[
"MIT"
] | null | null | null |
brocklyn_app/forms.py
|
Victoria045/Brocklyn
|
727101b9bc4463667ce2d816f8584bb994681f8e
|
[
"MIT"
] | null | null | null |
brocklyn_app/forms.py
|
Victoria045/Brocklyn
|
727101b9bc4463667ce2d816f8584bb994681f8e
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile, NeighbourHood, Business, Post
class UserRegisterForm(UserCreationForm):
email = forms.EmailField(max_length=254, help_text='Required. Enter a valid email address.')
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UpdateUserProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['user', 'neighbourhood', 'profile_picture']
class NeighbourHoodForm(forms.ModelForm):
class Meta:
model = NeighbourHood
exclude = ('admin',)
class BusinessForm(forms.ModelForm):
class Meta:
model = Business
exclude = ('user', 'neighbourhood')
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ('user', 'hood')
| 27.969697
| 96
| 0.682557
|
a90e11f9f68c23eae81d45aef249dd69fd93cd1b
| 17,677
|
py
|
Python
|
tcapy/analysis/tcatickerloaderimpl.py
|
PontusHultkrantz/tcapy
|
3699c70031c95943f70a732849a1a6dac26760e9
|
[
"Apache-2.0"
] | null | null | null |
tcapy/analysis/tcatickerloaderimpl.py
|
PontusHultkrantz/tcapy
|
3699c70031c95943f70a732849a1a6dac26760e9
|
[
"Apache-2.0"
] | null | null | null |
tcapy/analysis/tcatickerloaderimpl.py
|
PontusHultkrantz/tcapy
|
3699c70031c95943f70a732849a1a6dac26760e9
|
[
"Apache-2.0"
] | 1
|
2020-07-22T16:53:24.000Z
|
2020-07-22T16:53:24.000Z
|
from __future__ import division
__author__ = 'saeedamen' # Saeed Amen / saeed@cuemacro.com
#
# Copyright 2018 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas as pd
from tcapy.conf.constants import Constants
from tcapy.util.loggermanager import LoggerManager
from tcapy.util.mediator import Mediator
from tcapy.data.volatilecache import CacheHandle
from tcapy.vis.displaylisteners import PlotRender
from tcapy.analysis.dataframeholder import DataFrameHolder
from tcapy.analysis.tcatickerloader import TCATickerLoader
from tcapy.analysis.tcarequest import MarketRequest
constants = Constants()
class TCATickerLoaderImpl(TCATickerLoader):
"""This add caching of market and trade data in chunks of monthly or other arbitrary periods (usually in Redis) to
speed up data fetching rather than hammering a database repeatedly.
"""
def __init__(self, version=constants.tcapy_version, volatile_cache_engine=constants.volatile_cache_engine):
super(TCATickerLoaderImpl, self).__init__(version=version, volatile_cache_engine=volatile_cache_engine)
self._plot_render = PlotRender()
def _convert_tuple_to_market_trade(self, market_trade_order_tuple):
volatile_cache = Mediator.get_volatile_cache(volatile_cache_engine=self._volatile_cache_engine)
# Gather market and trade/order data (which might be stored in a list)
if isinstance(market_trade_order_tuple, list):
market_df_list = []
trade_order_holder = DataFrameHolder()
for market_df_single, trade_order_holder_single in market_trade_order_tuple:
market_df_list.append(market_df_single)
trade_order_holder.add_dataframe_holder(trade_order_holder_single)
market_df_list = volatile_cache.get_dataframe_handle(market_df_list, burn_after_reading=True)
# to ensure that any spurious/None elements are removed
market_df_list = [x for x in market_df_list if isinstance(x, pd.DataFrame)]
# want to make sure the data is properly ordered too (not guarenteed we'll get it back in right order)
market_df = self._time_series_ops.concat_dataframe_list(market_df_list)
else:
market_df = volatile_cache.get_dataframe_handle(market_trade_order_tuple[0], burn_after_reading=True)
trade_order_holder = market_trade_order_tuple[1]
return market_df, trade_order_holder
def get_market_trade_order_holder(self, tca_request, return_cache_handles=True):
"""Gets the both the market data and trade/order data associated with a TCA calculation as a tuple of
(DataFrame, DataFrameHolder)
Parameters
----------
tca_request : TCARequest
Parameters for a TCA calculation
Returns
-------
DataFrame, DataFrameHolder
"""
logger = LoggerManager.getLogger(__name__)
logger.debug(
"Get market and trade/order data for " + str(tca_request.ticker) + " from " + str(tca_request.start_date)
+ " - " + str(tca_request.finish_date))
# Get all the trade/orders which have been requested, eg. trade_df and order_df
# do separate calls given they are assumed to be stored in different database tables
# by default these will be returned as CacheHandles, which are easier to pass around Celery
return self.get_market_data(tca_request, return_cache_handles=return_cache_handles), \
self.get_trade_order_holder(tca_request)
def get_market_data(self, market_request, return_cache_handles=False):
# Handles returns a pointer
volatile_cache = Mediator.get_volatile_cache(volatile_cache_engine=self._volatile_cache_engine)
cache = True
# Don't attempt to cache DataFrames
if hasattr(market_request, 'market_data_store'):
if (isinstance(market_request.market_data_store, pd.DataFrame)):
cache = False
elif isinstance(market_request.data_store, pd.DataFrame):
cache = False
# If we have allowed the caching of monthly/periodic market data
if market_request.multithreading_params['cache_period_market_data'] and cache:
old_start_date = market_request.start_date;
old_finish_date = market_request.finish_date
# so we can also take TCARequest objects
if hasattr(market_request, 'market_data_store'):
data_store = market_request.market_data_store
data_offset_ms = market_request.market_data_offset_ms
else:
data_store = market_request.data_store
data_offset_ms = market_request.data_offset_ms
# See if we can fetch from the cache (typically Redis)
start_date, finish_date, market_key, market_df = \
volatile_cache.get_data_request_cache(market_request, data_store, 'market_df',
data_offset_ms)
# If data is already cached, just return the existing CacheHandle (which is like a pointer to the reference
# in Redis)
if market_df is not None and start_date == old_start_date and finish_date == old_finish_date and return_cache_handles:
return CacheHandle(market_key, add_time_expiry=False)
if market_df is None:
market_request_copy = MarketRequest(market_request=market_request)
market_request_copy.start_date = start_date
market_request_copy.finish_date = finish_date
market_df = super(TCATickerLoaderImpl, self).get_market_data(market_request_copy)
volatile_cache.put_data_request_cache(market_request_copy, market_key, market_df)
market_df = self._strip_start_finish_dataframe(market_df, old_start_date, old_finish_date, market_request)
else:
market_df = super(TCATickerLoaderImpl, self).get_market_data(market_request)
# Return as a cache handle (which can be easily passed across Celery for example)
# Only if multithreading
if return_cache_handles and market_request.use_multithreading:
return volatile_cache.put_dataframe_handle(market_df,
use_cache_handles=market_request.multithreading_params['cache_period_market_data'])
return market_df
def get_trade_order_data(self, tca_request, trade_order_type, start_date=None, finish_date=None, return_cache_handles=True):
# return_cache_handles returns a pointer
logger = LoggerManager().getLogger(__name__)
volatile_cache = Mediator.get_volatile_cache(volatile_cache_engine=self._volatile_cache_engine)
# by default, assume we want trade data (rather than order data)
if trade_order_type is None:
trade_order_type = 'trade_df'
trade_order_contents = tca_request.trade_order_mapping[trade_order_type]
cache = True
# Don't attempt to catch DataFrames (or CSVs of trades)
if isinstance(trade_order_contents, pd.DataFrame):
cache = False
elif isinstance(trade_order_contents, str):
if 'csv' in trade_order_contents:
cache = False
# If we have allowed the caching of monthly/weekly trade data
if tca_request.multithreading_params['cache_period_trade_data'] and cache:
old_start_date = tca_request.start_date; old_finish_date = tca_request.finish_date
# See if we can fetch from the cache (usually Redis)
start_date, finish_date, trade_key, trade_df = \
volatile_cache.get_data_request_cache(
tca_request, tca_request.trade_data_store, trade_order_type, tca_request.trade_data_offset_ms)
# If data is already cached, just return the existing CacheHandle
if trade_df is not None and start_date == old_start_date and finish_date == old_finish_date:
return CacheHandle(trade_key, add_time_expiry=False)
# If it wasn't in the cache then fetch it and push into the cache
if trade_df is None:
logger.debug('Key not found for ' + trade_key + ".. now need to load")
# Call the superclass (get back DataFrames not return_cache_handles)
trade_df = super(TCATickerLoaderImpl, self).get_trade_order_data(tca_request, trade_order_type,
start_date=start_date,
finish_date=finish_date)
# Cache this periodic monthly/weekly data
volatile_cache.put_data_request_cache(tca_request, trade_key, trade_df)
# Strip off the start/finish dates (because when we load from cache, we get full months)
trade_df = self._strip_start_finish_dataframe(trade_df, start_date, finish_date, tca_request)
else:
if start_date is None or finish_date is None:
start_date = tca_request.start_date
finish_date = tca_request.finish_date
# Call the superclass (get back DataFrames not return_cache_handles)
trade_df = super(TCATickerLoaderImpl, self).get_trade_order_data(tca_request, trade_order_type,
start_date=start_date,
finish_date=finish_date)
if return_cache_handles and tca_request.use_multithreading:
# Return as a cache handle (which can be easily passed across Celery for example)
return volatile_cache.put_dataframe_handle(trade_df,
use_cache_handles=tca_request.multithreading_params['cache_period_trade_data'])
return trade_df
def calculate_metrics_single_ticker(self, market_trade_order_combo, tca_request, dummy_market):
volatile_cache = Mediator.get_volatile_cache(volatile_cache_engine=self._volatile_cache_engine)
market_df, trade_order_df_values, ticker, trade_order_df_keys \
= super(TCATickerLoaderImpl, self).calculate_metrics_single_ticker(market_trade_order_combo, tca_request, dummy_market)
if tca_request.use_multithreading:
# Return as a cache handle (which can be easily passed across Celery for example) or not for the market
# and trade/order data
return volatile_cache.put_dataframe_handle(market_df, tca_request.multithreading_params['return_cache_handles_market_data']), \
volatile_cache.put_dataframe_handle(trade_order_df_values, tca_request.multithreading_params['return_cache_handles_trade_data']), \
ticker, trade_order_df_keys
else:
# For single threading, don't use cache handles (no point, because sharing in the same memory space)
return market_df, trade_order_df_values, ticker, trade_order_df_keys
def _get_correct_convention_market_data(self, market_request, start_date=None, finish_date=None):
# Check that cross is in correct convention
if self._fx_conv.correct_notation(market_request.ticker) != market_request.ticker:
raise Exception('Method expecting only crosses in correct market convention')
cache = True
if isinstance(market_request.data_store, pd.DataFrame):
cache = False
if market_request.multithreading_params['cache_period_market_data'] and cache:
volatile_cache = Mediator.get_volatile_cache(volatile_cache_engine=self._volatile_cache_engine)
start_date, finish_date, market_key, market_df = \
volatile_cache.get_data_request_cache(market_request, market_request.data_store, 'market_df',
market_request.data_offset_ms)
if market_df is None:
market_df = super(TCATickerLoaderImpl, self)._get_underlying_market_data(start_date, finish_date, market_request)
volatile_cache.put_data_request_cache(market_request, market_key, market_df)
return self._strip_start_finish_dataframe(market_df, start_date, finish_date, market_request)
else:
if start_date is None or finish_date is None:
start_date = market_request.start_date
finish_date = market_request.finish_date
return super(TCATickerLoaderImpl, self)._get_underlying_market_data(start_date, finish_date,
market_request)
def _calculate_additional_metrics(self, market_df, trade_order_df_dict, tca_request):
logger = LoggerManager.getLogger(__name__)
# Add candlesticks/sparse DataFrames for plotting if requested
if tca_request.tca_type == 'detailed' or tca_request.summary_display == 'candlestick':
trade_order_list = self._util_func.dict_key_list(trade_order_df_dict.keys())
# only add the ticker name if we have a non-detailed plot to differentiate between currency pairs
if tca_request.tca_type == 'detailed':
ticker_label = ''
else:
ticker_label = tca_request.ticker + '_'
logger.debug("Generating downsampled market data for potentional display")
market_downsampled_df = self._time_series_ops.downsample_time_series_usable(market_df)
# Combine downsampled market data with trade data
fields = ['bid', 'ask', 'open', 'high', 'low', 'close', 'mid', 'vwap', 'twap',
'arrival', 'buy_trade', 'sell_trade', 'notional', 'executed_notional', 'executed_price',
'side']
for f in tca_request.extra_lines_to_plot:
fields.append(f)
# create a sparse representation of the trades/orders which can later be displayed to users
for trade_order in trade_order_list:
if trade_order in trade_order_df_dict:
trade_order_df_dict[ticker_label + 'sparse_market_' + trade_order] = \
self._join_market_downsampled_trade_orders(market_downsampled_df,
trade_order_df_dict[trade_order],
fields=fields)
trade_order_df_dict[ticker_label + 'market_df_downsampled'] = market_downsampled_df
trade_order_df_dict[ticker_label + 'candlestick_fig'] = \
self._plot_render.generate_candlesticks(market_downsampled_df)
if tca_request.summary_display == 'candlestick':
for trade_order in trade_order_list:
if trade_order in trade_order_df_dict:
title = ticker_label + " " + trade_order
lines_to_plot = self._util_func.dict_key_list(constants.detailed_timeline_plot_lines.keys())
lines_to_plot.append('candlestick')
trade_order_df_dict[ticker_label + 'sparse_market_' + trade_order.replace('df', 'fig')]\
= self._plot_render.plot_market_trade_timeline(
title=title, sparse_market_trade_df=trade_order_df_dict[ticker_label + 'sparse_market_' + trade_order],
lines_to_plot=lines_to_plot,
candlestick_fig=trade_order_df_dict[ticker_label + 'candlestick_fig'])
return trade_order_df_dict
def _join_market_downsampled_trade_orders(self, market_downsampled_df, trade_order_df, fields=None):
"""Combine market data with trade/orders, into a sparse DataFrame. Typically, used when preparing to display
a mixture of market/trades data together.
Parameters
----------
market_downsampled_df : DataFrame
Market data which has been downsampled
trade_order_df : DataFrame
Trade/order data to be combined
fields : str (list)
Fields to keep
Returns
-------
DataFrame
"""
logger = LoggerManager.getLogger(__name__)
if fields is not None:
trade_order_df = self._time_series_ops.filter_time_series_by_matching_columns(trade_order_df, fields)
logger.debug('About to join')
sparse_market_trade_df = market_downsampled_df.join(trade_order_df, how='outer')
# Add buy/sell trade prices in new columns (easier for plotting later)
if 'executed_price' not in sparse_market_trade_df.columns:
print('x')
executed_price = sparse_market_trade_df['executed_price'].values
side_to_match = sparse_market_trade_df['side'].values
sparse_market_trade_df['buy_trade'] \
= self._time_series_ops.nanify_array_based_on_other(side_to_match, -1, executed_price) # make sells NaN (NOT buys!)
sparse_market_trade_df['sell_trade'] \
= self._time_series_ops.nanify_array_based_on_other(side_to_match, 1, executed_price) # make buys NaN (NOT sells!)
logger.debug('Finished joining')
return sparse_market_trade_df
def get_tca_version(self):
return 'pro'
| 48.831492
| 151
| 0.669627
|
d6149a95748ae1f394aa5903ef360f7e9608f962
| 2,407
|
py
|
Python
|
build/lib/deepneighbor/embed.py
|
LouisBIGDATA/deepneighbor
|
01837fff138726f2b25dbd02a8863f735149ff0d
|
[
"MIT"
] | 7
|
2020-08-17T16:23:42.000Z
|
2022-02-18T01:15:28.000Z
|
build/lib/deepneighbor/embed.py
|
LouisBIGDATA/deepneighbor
|
01837fff138726f2b25dbd02a8863f735149ff0d
|
[
"MIT"
] | 1
|
2020-08-19T05:25:37.000Z
|
2020-08-19T05:25:37.000Z
|
build/lib/deepneighbor/embed.py
|
Lou1sWang/deepneighbor
|
01837fff138726f2b25dbd02a8863f735149ff0d
|
[
"MIT"
] | null | null | null |
'''
input data:
a dataframe with two columns: user item
output:
a embedding lookup dictionary {'user_id/item_id':[vector]}
'''
from gensim.models import Word2Vec
#import deepneighbor.config as config
from deepneighbor.utils import generate_sentences
from annoy import AnnoyIndex
from sklearn import preprocessing
EMBED_SIZE = 128
WINDOW_SIZE = 10
ITER = 5
WORKERS = 3
class Embed(object):
def __init__(self,data):
'''
data: a dataframe: user, item
'''
self.data = data
self.w2v_model = None
self._embeddings = {}
self.sentences = generate_sentences(data)
self.dimension = 0
def train(self, embed_size=128, window_size=5, workers=3, iter=5, **kwargs):
kwargs["sentences"] = self.sentences
kwargs["min_count"] = kwargs.get("min_count", 0)
kwargs["size"] = embed_size
kwargs["sg"] = 1 # skip gram
kwargs["hs"] = 1 # deepwalk use Hierarchical Softmax
kwargs["workers"] = workers
kwargs["window"] = window_size
kwargs["iter"] = iter
self.dimension = embed_size
print(f"There are {self.data.user.nunique()} users")
print(f"There are {self.data.item.nunique()} items")
print("Learning embedding vectors...")
model = Word2Vec(**kwargs)
print("Learning embedding vectors done!")
self.w2v_model = model
return model
# def get_embeddings(self,):
# if self.w2v_model is None:
# print("model not train")
# return {}
#
# self._embeddings = {}
# words = self.data['user'].unique().tolist() + self.data['item'].unique().tolist()
# for word in words:
# self._embeddings[word] = self.w2v_model.wv[word]
#
# return self._embeddings
def search(self, seed,k = 5):
'''
seed: seed item to find nearest neighbor
k: number of cloest neighhbors
'''
a = AnnoyIndex(self.dimension, 'angular')
words = self.data['user'].unique().tolist() + self.data['item'].unique().tolist()
le = preprocessing.LabelEncoder()
le.fit(words)
for word in words:
a.add_item(le.transform([word])[0],self.w2v_model.wv[word])
a.build(-1)
a_return = a.get_nns_by_item(le.transform([seed])[0], k)
return le.inverse_transform(a_return)
| 26.163043
| 91
| 0.601579
|
34ebe53578b2d6fffaa740fa182b23b1123cb344
| 346
|
py
|
Python
|
tests/test_operations_sliu99.py
|
dkucukarslan/challenges
|
2237298a74255d00a1329db535cb61d9e5e463ee
|
[
"MIT"
] | null | null | null |
tests/test_operations_sliu99.py
|
dkucukarslan/challenges
|
2237298a74255d00a1329db535cb61d9e5e463ee
|
[
"MIT"
] | 14
|
2018-09-18T02:00:28.000Z
|
2019-07-08T15:59:56.000Z
|
tests/test_operations_sliu99.py
|
dkucukarslan/challenges
|
2237298a74255d00a1329db535cb61d9e5e463ee
|
[
"MIT"
] | 7
|
2018-09-17T14:52:24.000Z
|
2020-10-02T21:55:20.000Z
|
from pytest import fixture
@fixture
def op():
from challenges.Operations_sliu99 import Operations
return Operations()
def test_add(op):
assert op.add(1,2) == 3
def test_subtract(op):
assert op.subtract(2,1) == 1
def test_increment(op):
assert op.increment(1) == 2
def test_decrement(op):
assert op.decrement(2) == 1
| 17.3
| 55
| 0.687861
|
49dd34c28b53423b033a08e2f3e9b5355165c002
| 5,966
|
py
|
Python
|
trax/layers/initializers.py
|
jackalhan/trax
|
24d764566b6ab44e10c717fac92367958a166eb8
|
[
"Apache-2.0"
] | 2
|
2020-03-27T17:26:58.000Z
|
2020-03-27T18:45:47.000Z
|
trax/layers/initializers.py
|
Tenoke/trax
|
bbabf6cc8a0682218927080bce33a4f90591aa0b
|
[
"Apache-2.0"
] | null | null | null |
trax/layers/initializers.py
|
Tenoke/trax
|
bbabf6cc8a0682218927080bce33a4f90591aa0b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trax initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as onp
import tensorflow.compat.v2 as tf
from trax.math import numpy as np
from trax.math import random
def _GetFans(shape, out_dim=-1, in_dim=-2):
"""Get the fan-in and fan-out sizes for the given shape and dims."""
# Temporary fix until numpy.delete supports negative indices.
if out_dim < 0:
out_dim += len(shape)
if in_dim < 0:
in_dim += len(shape)
receptive_field = np.prod(onp.delete(shape, [in_dim, out_dim]))
if len(shape) >= 2:
fan_in, fan_out = shape[in_dim], shape[out_dim]
elif len(shape) == 1:
fan_in = shape[0]
fan_out = shape[0]
else:
fan_in = 1.
fan_out = 1.
fan_in *= receptive_field
fan_out *= receptive_field
return fan_in, fan_out
def InitializerFromFile(path):
"""Loads parameters from .npy file."""
def Initializer(shape, rng):
del rng
logging.info('Loading pretrained embeddings from %s', path)
with tf.io.gfile.GFile(path, 'rb') as f:
parameters = np.load(f)
assert np.shape(parameters) == shape, (
'Expected shape %s, got %s' % (shape, np.shape(parameters)))
return parameters
return Initializer
def RandomNormalInitializer(stddev=1e-2):
"""Returns an initializer for random normal coefficients."""
return (
lambda shape, rng: (stddev * random.normal(rng, shape)).astype('float32')
)
def RandomUniformInitializer(lim=1.0):
"""Returns an initializer for random uniform coefficients."""
return lambda shape, rng: random.uniform(rng, shape, np.float32, -lim, lim)
def ScaledInitializer(out_dim, in_dim, scale, mode, distribution):
"""Returns an initializer that adjusts its scale based on weight shapes."""
if scale <= 0.:
raise ValueError('scale must be positive float, {} given'.format(scale))
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError(
'Invalid mode argument:, {}, must be either fan_in, fan_out or fan_avg'
.format(mode))
def Init(shape, rng):
"""Returns random values for initializing weights of the given `shape`."""
fan_in, fan_out = _GetFans(shape, out_dim, in_dim)
gain = scale
if mode == 'fan_in':
gain /= fan_in
elif mode == 'fan_out':
gain /= fan_out
elif mode == 'fan_avg':
gain /= (fan_in + fan_out) / 2
if distribution == 'truncated_normal':
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = np.sqrt(gain) / .87962566103423978
new_weights = random.truncated_normal(rng, -2, 2, shape) * stddev
return new_weights.astype('float32')
elif distribution == 'normal':
new_weights = random.normal(rng, shape) * np.sqrt(gain)
return new_weights.astype('float32')
elif distribution == 'uniform':
lim = np.sqrt(3. * gain)
return random.uniform(rng, shape, np.float32, -lim, lim)
else:
raise ValueError('invalid distribution for ScaleInitializer')
return Init
def GlorotNormalInitializer(out_dim=-1, in_dim=-2, scale=1.):
"""Returns an initializer for random Glorot-scaled coefficients."""
return ScaledInitializer(out_dim, in_dim, scale, 'fan_avg', 'normal')
def GlorotUniformInitializer(out_dim=-1, in_dim=-2, scale=1.):
"""Returns an initializer for random uniform Glorot-scaled coefficients."""
return ScaledInitializer(out_dim, in_dim, scale, 'fan_avg', 'uniform')
def LeCunNormalInitializer(out_dim=-1, in_dim=-2, scale=1.):
"""Returns an initializer for random LeCun-scaled coefficients."""
return ScaledInitializer(out_dim, in_dim, scale, 'fan_in', 'normal')
def LeCunUniformInitializer(out_dim=-1, in_dim=-2, scale=1.):
"""Returns an initializer for random uniform LeCun-scaled coefficients."""
return ScaledInitializer(out_dim, in_dim, scale, 'fan_in', 'uniform')
def KaimingNormalInitializer(out_dim=-1, in_dim=-2, param=0.):
"""Returns an initializer for random Kaiming-scaled coefficients."""
return ScaledInitializer(
out_dim, in_dim, 2.0 / np.sqrt(1 + param**2), 'fan_in', 'normal')
def KaimingUniformInitializer(out_dim=-1, in_dim=-2, param=0.):
"""Returns an initializer for random uniform Kaiming-scaled coefficients."""
return ScaledInitializer(
out_dim, in_dim, 2.0 / np.sqrt(1 + param**2), 'fan_in', 'uniform')
def OrthogonalInitializer(stddev=1.0):
"""Returns an orthogonal initializer."""
def Init(shape, rng):
"""Returns orthogonalized random normal values with the given `shape`."""
# Have at least 2 elements in shape.
cur_shape = list(shape)
while len(cur_shape) < 2:
cur_shape = [1] + cur_shape
# Flatten the input shape with the last dimension remaining.
n_rows = 1
for dim in cur_shape[:-1]:
n_rows *= dim
n_cols = cur_shape[-1]
flat_shape = (n_cols, n_rows) if n_rows < n_cols else (n_rows, n_cols)
# Generate a random matrix
a = random.normal(rng, flat_shape, dtype=np.float32)
# Compute the qr factorization
q, r = np.linalg.qr(a)
# Make Q uniform
d = np.diag(r)
q *= np.sign(d)
# Transpose and reshape back q if needed.
if n_rows < n_cols:
q = np.transpose(q)
q = np.reshape(q, shape)
# Return scaled as requested.
return stddev * q
return Init
| 32.78022
| 79
| 0.690077
|
e566ff91507a9c8581f4cd5620af6e172e1330e4
| 990
|
py
|
Python
|
malwareconfig/komand_malwareconfig/actions/view_config/action.py
|
GreyNoise-Intelligence/insightconnect-plugins
|
2ba3121d42fd96e1267bb095bc76b962678c1f56
|
[
"MIT"
] | null | null | null |
malwareconfig/komand_malwareconfig/actions/view_config/action.py
|
GreyNoise-Intelligence/insightconnect-plugins
|
2ba3121d42fd96e1267bb095bc76b962678c1f56
|
[
"MIT"
] | null | null | null |
malwareconfig/komand_malwareconfig/actions/view_config/action.py
|
GreyNoise-Intelligence/insightconnect-plugins
|
2ba3121d42fd96e1267bb095bc76b962678c1f56
|
[
"MIT"
] | null | null | null |
import komand
from .schema import ViewConfigInput, ViewConfigOutput
# Custom imports below
from komand_malwareconfig.util.client import *
class ViewConfig(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="view_config",
description="View config for hash",
input=ViewConfigInput(),
output=ViewConfigOutput(),
)
def run(self, params={}):
hashes = params.get("hashes")
response = self.connection.client.view_config(hashes=hashes)
status_code = response["response"]
if not status_code == "200":
raise Exception("Received non-200 status code")
self.logger.info("Run: Received status code 200")
results = list()
items = response["items"]
for key, value in items.items():
results.append(value)
return {"info": results}
def test(self):
"""TODO: Test action"""
return {}
| 25.384615
| 68
| 0.608081
|
a176388296d25f0a5c13b509b79943794457a8bf
| 5,805
|
py
|
Python
|
projects/penning/analyse_gyro_wp.py
|
Krissmedt/relativistic_sdc
|
7b294a9ab5d75a1540f192393ad0b4a570bfddb5
|
[
"MIT"
] | null | null | null |
projects/penning/analyse_gyro_wp.py
|
Krissmedt/relativistic_sdc
|
7b294a9ab5d75a1540f192393ad0b4a570bfddb5
|
[
"MIT"
] | null | null | null |
projects/penning/analyse_gyro_wp.py
|
Krissmedt/relativistic_sdc
|
7b294a9ab5d75a1540f192393ad0b4a570bfddb5
|
[
"MIT"
] | null | null | null |
from math import sqrt, fsum, pi, exp, cos, sin, floor
from decimal import Decimal
import io
import pickle as pk
import matplotlib.pyplot as plt
import numpy as np
import cmath as cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
import h5py as h5
import sys
import traceback
from collections import OrderedDict
import warnings
warnings.filterwarnings("ignore")
from tools.writing import *
from tools.plotting import *
data_root = "./"
fig_name = "beta_9999"
testcase = "A"
filenames = {}
filenames["Ref"] = "sdc_M3K2_wp_{0}.h5".format(testcase)
# filenames["Leapfrog"] = "lf_wp_vvrel.h5"
# filenames["Velocity-Verlet"] = "vvA_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M3K1"] = "sdc_M3K1_wp_{0}.h5".format(testcase)
filenames["Boris-SDC M3K2"] = "sdc_M3K2_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K1"] = "sdc_M5K1_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K2"] = "sdc_M5K2_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K3"] = "sdc_M5K3_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K4"] = "sdc_M5K4_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K5"] = "sdc_M5K5_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K6"] = "sdc_M5K6_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K7"] = "sdc_M5K7_wp_{0}.h5".format(testcase)
# filenames["Boris-SDC M5K5"] = "sdc_M5K5_wp_vvrel.h5"
plot_params = {}
plot_params['legend.fontsize'] = 16
plot_params['figure.figsize'] = (12,8)
plot_params['axes.labelsize'] = 20
plot_params['axes.titlesize'] = 20
plot_params['xtick.labelsize'] = 16
plot_params['ytick.labelsize'] = 16
plot_params['lines.linewidth'] = 4
plot_params['axes.titlepad'] = 5
plot_params['legend.loc'] = 'upper right'
plt.rcParams.update(plot_params)
r = 1
b = 1
for key,value in filenames.items():
file = h5.File(data_root+value,'r')
Nt = file["fields/Nt"][:]
rhs = file["fields/rhs"][:]
dt = file["fields/dt"]
times = file["fields/t"]
x = file["fields/pos"]
v = file["fields/vel"]
x0 = file["fields/x0"]
v0 = file["fields/v0"]
if key == "Ref":
solx = x
solv = v
continue
ref_errors = np.abs(solx[-1,:,:]-x[:,:,:])/np.abs(solx[-1,:,:])
ref_errors = np.linalg.norm(ref_errors,axis=1)
ref_errors = np.linalg.norm(ref_errors,axis=1)
refv_errors = np.abs(solv[-1,:,:]-v[:,:,:])/np.abs(solv[-1,:,:])
refv_errors = np.linalg.norm(refv_errors,axis=1)
refv_errors = np.linalg.norm(refv_errors,axis=1)
# ref_errors = np.abs(x[-1,:,:]-x[:-1,:,:])/np.abs(x[-1,:,:])
# ref_errors = np.linalg.norm(ref_errors,axis=1)
# ref_errors = np.linalg.norm(ref_errors,axis=1)
#
# refv_errors = np.abs(v[-1,:,:]-v[:-1,:,:])/np.abs(v[-1,:,:])
# refv_errors = np.linalg.norm(refv_errors,axis=1)
# refv_errors = np.linalg.norm(refv_errors,axis=1)
xfactors = np.log2(ref_errors[:-1]/ref_errors[1:])
vfactors = np.log2(refv_errors[:-1]/refv_errors[1:])
print(key+" x order: {0}".format(xfactors))
print(key+" v factors: {0}".format(vfactors))
print(rhs)
if key == "Velocity-Verlet":
c = "black"
if "Boris-SDC M3" in key:
sims = 2
c = (0,0,b)
b -= 1/sims
if "Boris-SDC M5" in key:
sims = 7
c = (r,0,b)
r -= np.round(0.7/sims,2)
label = key
##Order Plot w/ rhs
fig_rhs = plt.figure(1)
ax_rhs = fig_rhs.add_subplot(1, 1, 1)
ax_rhs.plot(rhs,ref_errors,marker="o",color=c,label=label)
ax_rhs.set_ylabel(r'$\Delta x^{\mathrm{rel}}$')
##Order Plot w/ Nt
fig_nt = plt.figure(2)
ax_nt = fig_nt.add_subplot(1, 1, 1)
ax_nt.plot(Nt,ref_errors,marker="o",color=c,label=label)
ax_nt.set_ylabel(r'$\Delta x^{\mathrm{rel}}$')
##Velocity Order Plot w/ Nt
fig_nt_v = plt.figure(3)
ax_nt_v = fig_nt_v.add_subplot(1, 1, 1)
ax_nt_v.plot(Nt,refv_errors,marker="o",color=c,label=label)
ax_nt_v.set_ylabel(r'$\Delta v^{\mathrm{rel}}$')
handles, labels = fig_rhs.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
ax_rhs.legend(by_label.values(), by_label.keys(),loc='lower left')
handles, labels = fig_nt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
ax_nt.legend(by_label.values(), by_label.keys(),loc='lower left')
handles, labels = fig_nt_v.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
ax_nt_v.legend(by_label.values(), by_label.keys(),loc='lower left')
axnl_list = []
axnl_list.append(ax_rhs)
axnl_list.append(ax_nt)
axnl_list.append(ax_nt_v)
i = 0
for ax in axnl_list:
i +=1
if i == 1:
orderSlope = -1
ax.set_xlabel('RHS evaluations')
else:
ax.set_xlabel(r'$N t$')
orderSlope = -1
ax.set_xscale('log')
#ax_rhs.set_xlim(10**3,10**5)
ax.set_yscale('log')
ax.set_ylim(10**(-15),10**(5))
xRange = ax.get_xlim()
yRange = ax.get_ylim()
ax.plot(xRange,orderLines(1*orderSlope,xRange,yRange),
ls='dashdot',c='0.2')
ax.plot(xRange,orderLines(2*orderSlope,xRange,yRange),
ls='dotted',c='0.4')
ax.plot(xRange,orderLines(4*orderSlope,xRange,yRange),
ls='dashed',c='0.6')
ax.plot(xRange,orderLines(8*orderSlope,xRange,yRange),
ls='solid',c='0.8')
fig_rhs.savefig(data_root + 'pen_x_'+ fig_name + '_rhs.pdf', dpi=150, facecolor='w', edgecolor='w',orientation='portrait',pad_inches=0.05,bbox_inches = 'tight')
fig_nt.savefig(data_root + 'pen_x_' + fig_name + '_nt.pdf', dpi=150, facecolor='w', edgecolor='w',orientation='portrait',pad_inches=0.05,bbox_inches = 'tight')
fig_nt_v.savefig(data_root + 'pen_v_' + fig_name + '_nt.pdf', dpi=150, facecolor='w', edgecolor='w',orientation='portrait',pad_inches=0.05,bbox_inches = 'tight')
| 33.75
| 161
| 0.657192
|
6dc430a46cc9d2ade537fb893dad89704db7b1d4
| 2,875
|
py
|
Python
|
vminspect/winevtx.py
|
pombredanne/vminspect
|
7e20008cd3c9a8eb5d55c4362c185051e166b015
|
[
"BSD-3-Clause"
] | 27
|
2016-08-31T18:32:02.000Z
|
2022-02-18T00:52:20.000Z
|
vminspect/winevtx.py
|
pombredanne/vminspect
|
7e20008cd3c9a8eb5d55c4362c185051e166b015
|
[
"BSD-3-Clause"
] | null | null | null |
vminspect/winevtx.py
|
pombredanne/vminspect
|
7e20008cd3c9a8eb5d55c4362c185051e166b015
|
[
"BSD-3-Clause"
] | 7
|
2016-08-22T10:13:30.000Z
|
2020-05-26T06:22:06.000Z
|
# Copyright (c) 2016-2017, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for parsing Windows Event Log files."""
import logging
from tempfile import NamedTemporaryFile
from Evtx.Evtx import FileHeader
from Evtx.Views import evtx_file_xml_view
from vminspect.filesystem import FileSystem
class WinEventLog:
"""WinEventLog class.
Allows to retrieve the Events contained within Windows Event Log files.
"""
def __init__(self, disk):
self._disk = disk
self._filesystem = None
self.logger = logging.getLogger(
"%s.%s" % (self.__module__, self.__class__.__name__))
def __enter__(self):
self._filesystem = FileSystem(self._disk)
self._filesystem.mount()
return self
def __exit__(self, *_):
self._filesystem.umount()
def __getattr__(self, attr):
return getattr(self._filesystem, attr)
def eventlog(self, path):
"""Iterates over the Events contained within the log at the given path.
For each Event, yields a XML string.
"""
self.logger.debug("Parsing Event log file %s.", path)
with NamedTemporaryFile(buffering=0) as tempfile:
self._filesystem.download(path, tempfile.name)
file_header = FileHeader(tempfile.read(), 0)
for xml_string, _ in evtx_file_xml_view(file_header):
yield xml_string
| 35.493827
| 80
| 0.730783
|
c041b4149db6bb3dc4e4a2b8ac586599dec268ee
| 599
|
py
|
Python
|
users/forms.py
|
devashar13/Blog-app-django
|
4f3f046aa77fed61f9c2f390bf68846eac709658
|
[
"MIT"
] | null | null | null |
users/forms.py
|
devashar13/Blog-app-django
|
4f3f046aa77fed61f9c2f390bf68846eac709658
|
[
"MIT"
] | 4
|
2021-03-30T13:31:58.000Z
|
2021-09-22T19:07:57.000Z
|
users/forms.py
|
devashar13/Blog-app-django
|
4f3f046aa77fed61f9c2f390bf68846eac709658
|
[
"MIT"
] | 1
|
2020-06-13T09:38:27.000Z
|
2020-06-13T09:38:27.000Z
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class ProfileUpdateForm(forms.ModelForm):
model=Profile
class Meta:
model=Profile
fields=['image']
| 27.227273
| 64
| 0.686144
|
841ba007850b0fd765eddb19415d24f2ee68b8ea
| 1,405
|
py
|
Python
|
tests/reproducibility/fixtures.py
|
LarsNeR/stellargraph
|
ee993bb600745a37d994fa4da628268b1cd657dc
|
[
"Apache-2.0"
] | 2
|
2020-03-27T07:02:09.000Z
|
2020-05-30T16:46:18.000Z
|
tests/reproducibility/fixtures.py
|
LarsNeR/stellargraph
|
ee993bb600745a37d994fa4da628268b1cd657dc
|
[
"Apache-2.0"
] | null | null | null |
tests/reproducibility/fixtures.py
|
LarsNeR/stellargraph
|
ee993bb600745a37d994fa4da628268b1cd657dc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
def models_equals(model1, model2):
w1 = model1.get_weights()
w2 = model2.get_weights()
return all(np.array_equal(w, w_new) for w, w_new in zip(w1, w2))
def assert_reproducible(func, num_iter=10):
"""
Assert Keras models produced from calling ``func`` are reproducible.
Args:
func (callable): Function to check for reproducible model
num_iter (int): Number of iterations to run through to validate reproducibility.
"""
model = func()
for i in range(num_iter):
model_new = func()
assert models_equals(model, model_new), (
model.get_weights(),
model_new.get_weights(),
)
# clear the tensorflow session to free memory
tf.keras.backend.clear_session()
| 30.543478
| 88
| 0.696797
|
4004bfb154b6d16f6eade4b4db845589dc9f7035
| 3,521
|
py
|
Python
|
apis_v1/views/views_reaction.py
|
aucoeur/WeVoteServer
|
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
|
[
"MIT"
] | 44
|
2015-11-19T04:52:39.000Z
|
2021-03-17T02:08:26.000Z
|
apis_v1/views/views_reaction.py
|
aucoeur/WeVoteServer
|
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
|
[
"MIT"
] | 748
|
2015-09-03T04:18:33.000Z
|
2022-03-10T14:08:10.000Z
|
apis_v1/views/views_reaction.py
|
aucoeur/WeVoteServer
|
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
|
[
"MIT"
] | 145
|
2015-09-19T10:10:44.000Z
|
2022-03-04T21:01:12.000Z
|
# apis_v1/views/views_voter.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from config.base import get_environment_variable
from django.views.decorators.csrf import csrf_exempt
from reaction.controllers import reaction_like_count_for_api, voter_reaction_like_off_save_for_api, \
voter_reaction_like_on_save_for_api, reaction_like_status_retrieve_for_api
import wevote_functions.admin
from wevote_functions.functions import get_voter_device_id, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
def reaction_like_count_view(request): # reactionLikeCount
"""
Retrieve the total number of Likes that an item has received, either from the perspective of the voter's
network of friends, or the entire network. (reactionLikeCount)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
liked_item_we_vote_id = request.GET.get('liked_item_we_vote_id', '')
limit_to_voters_network = request.GET.get('limit_to_voters_network', False)
limit_to_voters_network = positive_value_exists(limit_to_voters_network)
return reaction_like_count_for_api(voter_device_id=voter_device_id, liked_item_we_vote_id=liked_item_we_vote_id,
limit_to_voters_network=limit_to_voters_network)
def voter_reaction_like_off_save_view(request): # voterReactionLikeOffSave
"""
Un-mark the reaction for a single position for one voter (voterReactionLikeOffSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
liked_item_we_vote_id = request.GET.get('liked_item_we_vote_id', '')
return voter_reaction_like_off_save_for_api(
voter_device_id=voter_device_id,
liked_item_we_vote_id=liked_item_we_vote_id)
def voter_reaction_like_on_save_view(request): # voterReactionLikeOnSave
"""
Mark the reaction for a single position for one voter (voterReactionLikeOnSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
# We track activity_tidbit_we_vote_id so we can get all of the likes under on major item
activity_tidbit_we_vote_id = request.GET.get('activity_tidbit_we_vote_id', '')
liked_item_we_vote_id = request.GET.get('liked_item_we_vote_id', '')
return voter_reaction_like_on_save_for_api(
voter_device_id=voter_device_id,
liked_item_we_vote_id=liked_item_we_vote_id,
activity_tidbit_we_vote_id=activity_tidbit_we_vote_id)
@csrf_exempt
def reaction_like_status_retrieve_view(request): # reactionLikeStatusRetrieve
"""
Retrieve whether or not a reaction is marked for position (reactionLikeStatusRetrieve)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
liked_item_we_vote_id_list = request.POST.getlist('liked_item_we_vote_id_list[]')
if len(liked_item_we_vote_id_list) < 1:
# To support old client versions that aren't using POST yet
liked_item_we_vote_id_list = request.GET.getlist('liked_item_we_vote_id_list[]')
return reaction_like_status_retrieve_for_api(
voter_device_id=voter_device_id, liked_item_we_vote_id_list=liked_item_we_vote_id_list)
| 46.946667
| 116
| 0.78188
|
7662e40435293008f5ad5e652584998ac5060bca
| 216
|
py
|
Python
|
Exercises/003.py
|
GuilhermeRds1921/Python3-Guanabara
|
24cd85b076e1074a5602e54c420bcc8e70cc1854
|
[
"MIT"
] | null | null | null |
Exercises/003.py
|
GuilhermeRds1921/Python3-Guanabara
|
24cd85b076e1074a5602e54c420bcc8e70cc1854
|
[
"MIT"
] | null | null | null |
Exercises/003.py
|
GuilhermeRds1921/Python3-Guanabara
|
24cd85b076e1074a5602e54c420bcc8e70cc1854
|
[
"MIT"
] | null | null | null |
# Crie um programa que leia dois números e mostre a soma entre eles.
num1 = float(input('Digite um valor: '))
num2 = float(input('Digite outro valor: '))
print('O valor da Soma entre eles é: {}'.format(num1 + num2))
| 43.2
| 68
| 0.699074
|
1e8c07dc1300f46b877af4cec53e4ca79db16607
| 16,222
|
py
|
Python
|
cavecalc/analyse.py
|
Rob-Owen/cavecalc
|
38cbdc7c4d0f6c05b2825281d60247936c9f4839
|
[
"MIT"
] | 3
|
2020-04-04T03:47:14.000Z
|
2020-11-24T00:53:13.000Z
|
cavecalc/analyse.py
|
Rob-Owen/cavecalc
|
38cbdc7c4d0f6c05b2825281d60247936c9f4839
|
[
"MIT"
] | null | null | null |
cavecalc/analyse.py
|
Rob-Owen/cavecalc
|
38cbdc7c4d0f6c05b2825281d60247936c9f4839
|
[
"MIT"
] | 2
|
2018-07-24T13:10:03.000Z
|
2020-01-17T15:34:58.000Z
|
"""Contains the Evaluate object, which has methods to load data from Cavecalc
.pkl output files, display data and write it to other file formats.
Classes defined here:
Evaluate
"""
import pickle
import os
import copy
import matplotlib
from sys import platform
if platform != 'win32':
matplotlib.use('TkAgg') # necessary for mac
from matplotlib import pyplot as plt
import numpy as np
import cavecalc.util as ccu
import scipy.io as sio
import seaborn as sns
class Evaluate(object):
"""Processes of Cavecalc model output.
Evaluate contains methods to load, process, format and display Cavecalc
model data saved in .pkl files. It allows model output to be saved to .csv
and .mat files for further processing.
Evaluate objects may be used directly (see examples), and are also used
under the hood by the output GUI.
Methods:
get_settings_report - Get a dict summarising model settings
load_data - Load .pkl files from a directory
save_csvs - Save all loaded model output to .csv files
save_all_mat - Save all loaded model output to a .mat file
filter_out_noprecip - Filter out all model steps that don't
precipitate calcite.
filter_by_index - Filter model steps in/out based on a model step
index.
filter_by_results - Filter model steps in/out based on any output
parameter.
filter_by_settings - Filter whole models in/out based in any input
parameter.
plot_models - Plot all loaded models.
plot_points - Plot selected steps from all loaded models.
"""
def __init__(self):
"""Initialise an Evaluate object.
After initialisation, load_data must be called to read data from .pkl
files.
"""
self._models = []
self._settings = []
@property
def model_settings(self):
"""Return a list of dicts containing model settings.
Generates a list of dicts from from all loaded SettingsObjects by
calling their .dict() method.
Returns:
A list of settings dicts.
"""
if not self._settings:
raise ValueError("Object %r has no models loaded." % self)
o = [s.dict() for s in self._settings]
for d in o:
try:
d.pop('id')
except KeyError:
pass
if o:
return copy.deepcopy(o)
else:
raise ValueError("Object %r has no models loaded." % self)
@property
def model_results(self):
"""Return a list of dicts containing model output.
Returns:
A list of results dicts.
"""
if self._models:
return copy.deepcopy(self._models)
else:
raise ValueError("Object %r has no models loaded." % self)
def get_settings_report(self):
"""Get a summary of the range of model settings.
Returns:
A dict of model settings, with one entry for each unique value detected.
"""
d = self.model_settings[0]
o = dict.fromkeys(d.keys(),[])
for s in self.model_settings:
for k, v in s.items():
if v not in o[k]:
o[k].append(v)
try:
o.pop('id')
except KeyError:
pass
return o
def load_data(self, *args):
"""Load .pkl data into the Evaluate object for processing.
Data is loaded from a directory. The directory must contain
settings.pkl and results.pkl. load_data may be called multiple times
to merge different model suites for comparison.
Args:
*args: The directories to load data from.
"""
ret_dir = os.getcwd()
if len(args) == 0:
args = (ret_dir,)
for d in args:
os.chdir(d)
try:
print("Attempting to load data from %s..." % d, end="")
with open('settings.pkl', 'rb') as f:
self._settings.extend(pickle.load(f))
with open('results.pkl', 'rb') as f:
r = pickle.load(f)
self._models.extend([a for (a,b) in r])
print(" Done")
finally:
os.chdir(ret_dir)
def save_csvs(self, directory=None):
"""Save model output to .csv files.
One file is saved for each model loaded. Note that only model output
can be saved to csv, not the settings used.
csv files may be easily read in any spreadsheet program.
Args:
directory (str): The directory to save output to.
"""
if not directory:
directory = os.getcwd()
for (id, model) in enumerate(self._models):
f = os.path.join(directory, "out_%i.csv" % id)
ccu.save_csv(model, os.path.join(f))
def save_all_mat(self, file):
"""Save all loaded data to a .mat file.
Data is saved as two matlab structs, reflecting the data structures
inside settings.pkl and results.pkl respectively.
Args:
file: Filename to save to (.mat will be auto-appended)
"""
s = dict()
for i, SO in enumerate(self._settings): # for each model
set = SO.dict() # settings dict
res = self._models[i] # results dict
# remove any 'None' values from output (savemat can't handle them)
# replace with -999 value, like PHREEQC
n_res = dict()
for k,v in res.items():
nv = []
for e in v:
if e is None:
nv.append(-999)
else:
nv.append(e)
n_res[k] = nv
o = {k:(v if type(v) is list else [v]) for k,v in set.items()}
a = ccu.numpify(o) # settings
b = ccu.numpify(n_res) # output
c = { 'settings' : a,
'results' : b }
name = "m" + str(i)
s[name] = c
sio.savemat(file, s)
def filter_out_noprecip(self):
"""Returns a filtered copy of the Evalulate object.
Models are filtered out of they do not include any precipitation
reactions. This is useful for 'eq' mode analyses to remove
non-precipitating solutions.
"""
A = Evaluate()
for i,m in enumerate(self._models):
a = False
for s in m['step_desc']:
if 'precip' in s:
a = True
break
if a:
A._models.append(copy.deepcopy(m))
A._settings.append(copy.deepcopy(self._settings[i]))
return A
def filter_by_index(self, ind, n=False):
"""Return a filtered copy of the Evaluate object.
Filter the model output data contained in the object. Data is filtered
based on list index position - this corresponds to the calculation step
in the model. This method is useful for subsetting data in preparation
for plotting. It works similarly to filter_by_results.
Example:
e = Evaluate()
e.load_data('./my_data/')
f = e.filter_by_index(-1) # extracts the final dripwater chemistry
Args:
ind: An integer index to filter by. This corresponds to a model
step number. E.g. index 0 is the first PHREEQC calculation
(initial water chemistry), index 1 is the bedrock dissolution
product, index -1 is the final solution chemistry.
n: Optional boolean argument. If True, the filter is inverted.
Default False.
Returns:
A modified copy of the object. The copy only contains model output
that meet the filter criteria.
"""
A = Evaluate()
A._settings = copy.deepcopy(self._settings)
for m in self._models:
if ind < 0:
explicitIndex = len(m['step_desc']) + ind
else:
explicitIndex = ind
if n is False:
fil = {k : [a[explicitIndex]] for k,a in m.items()}
else:
fil = {k : [v for i,v in enumerate(a) if i != explicitIndex] for k,a in m.items()}
A._models.append(fil)
rem = []
for i,r in enumerate(A._models):
if max([len(v) for k,v in r.items()]) == 0:
rem.append(i)
[A._models.pop(i) for i in rem]
[A._settings.pop(i) for i in rem]
return copy.deepcopy(A)
def filter_by_results(self, key, value, n=False):
"""Return a filtered copy of the Evaluate object.
Filter the model output data contained in the object. Data is filtered
based on a key, value combination. This method is useful for
subsetting data in preparation for plotting. It works similarly to
filter_by_index.
Example:
e = Evaluate()
e.load_data('./my_data/')
f = e.filter_by_settings('step_desc', 'degas')
# f includes only data from the degassing steps
Args:
key: Key in model output dicts to filter by.
value: Value to filter 'key' by. Accepts substrings for step_desc.
n: Optional boolean argument. If True, the filter is inverted.
Default False.
Returns:
A filtered copy of the Evaluate object.
"""
A = Evaluate()
A._models = []
A._settings = self._settings
# filter object
for i, m in enumerate(self._models):
fil = {}
a = m[key]
for j, v in m.items():
if len(v) == len(a):
if n:
fil[j] = [v[k] for k in range(len(v)) if value not in a[k]]
else:
fil[j] = [v[k] for k in range(len(v)) if value in a[k]]
else:
fil[j] = v
A._models.append(fil)
return copy.deepcopy(A)
def filter_by_settings(self, setting, value, n=False):
"""Return a filtered copy of the Evaluate object.
The returned Evaluate object contains a subset of the models in the
original. Models are filtered based on the settings, value combination
provided. Models that meet the critera have their data included in the
copy.
Args:
setting (str): model parameter to filter by (e.g. 'gas_volume')
value: value of 'setting' to include (e.g. 20).
n: Optional boolean argument. If True, the filter is inverted.
Default False.
Returns:
A filtered copy of the Evaluate object.
"""
A = Evaluate()
A._models = []
A._settings = []
for i, b in enumerate(self._settings):
d = b.dict()
if n:
if isinstance(value, str):
if value not in d[setting]:
A._models.append(self._models[i])
A._settings.append(b)
else:
if d[setting] != value:
A._models.append(self._models[i])
A._settings.append(b)
else:
if isinstance(value, str):
if value in d[setting]:
A._models.append(self._models[i])
A._settings.append(b)
else:
if d[setting] == value:
A._models.append(self._models[i])
A._settings.append(b)
return copy.deepcopy(A)
def plot_models(self, *args, x_key=None, y_key=None,
label_with=None, ax=None, **kwargs):
"""Plot Model results with one series per model.
Creates a simple matplotlib figure. Useful, for example, to quickly
display the degassing evolution of a suite of models. May be combined
with filter_by_settings, filter_by_results or filter_by_index to
include / exclude certain parts of the dataset.
Args:
*args: Optional formatting parameters passed to pyplot.plot()
x_key: Model output to plot on x-axis
y_key: Model output to plot on y-axis
label_with (optional): Model input parameter to annotate series
ax (optional): Add data to a pre-existing matplotlib axis
**kwargs (optional): kwargs to be passed to pyplot.plot()
Returns:
Axes object.
"""
sns.set_style('darkgrid')
if not ax:
fig, ax = plt.subplots()
ax.set_ylabel(y_key)
ax.set_xlabel(x_key)
for i, m in enumerate(self._models):
if label_with:
s = self._settings[i].get(label_with)
a = "%s: %s" % (label_with, s)
ax.plot(m[x_key], m[y_key], label = a, *args, **kwargs)
ax.legend(prop={'size':6})
else:
ax.plot(m[x_key], m[y_key], *args, **kwargs)
return ax
def plot_points(self, *args, x_key=None, y_key=None, plot_index=1,
label_with=None, ax=None, **kwargs):
"""Plot Model results for a point-by-point inter-model comparison.
Useful, for example, to show different bedrock dissolution products
across a suite of models.
Args:
x_key: Model output or setting parameter to plot on x-axis
y_key: Model output to plot on y-axis
*args (optional): Formatting parameters passed to pyplot.plot()
plot_index: Which point to plot. e.g. 0 (initial water), 1 (bedrock
dissolution product), -1 (fully degassed solution)
label_with (optional): Model input parameter to label points with
ax (optional): Add data to a pre-existing plot
**kwargs (optional): kwargs to be passed to pyplot.plot()
Returns:
Axes object.
"""
sns.set_style('darkgrid')
x_vals = []
y_vals = []
labels = []
# look for x_key in results
if x_key in list(self._models[0].keys()):
for i, m in enumerate(self._models):
try:
x_vals.append(m[x_key][plot_index])
except IndexError:
pass
# otherwise, find it in settings
else:
for i, s in enumerate(self._settings):
x_vals.append(s.dict()[x_key])
for i, m in enumerate(self._models):
if label_with:
s = self._settings[i].dict()
try:
y_vals.append(m[y_key][plot_index])
if label_with:
labels.append(s[label_with])
except IndexError:
pass
if not ax:
fig, ax = plt.subplots()
ax.set_ylabel(y_key)
ax.set_xlabel(x_key)
ax.plot(x_vals, y_vals, *args, **kwargs)
if label_with:
for lab, x, y in zip(labels, x_vals, y_vals):
ax.annotate('%s=%s' % (label_with, lab),
xy=(x, y), fontsize=8)
return ax
| 35.112554
| 98
| 0.520898
|
9a54ce517b3f6227af1ffbc3e33d33bc8d17de3d
| 18,167
|
py
|
Python
|
njunmt/nmt_experiment.py
|
whr94621/NJUNMT-tf
|
29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118
|
[
"Apache-2.0"
] | 1
|
2018-10-27T12:04:03.000Z
|
2018-10-27T12:04:03.000Z
|
njunmt/nmt_experiment.py
|
whr94621/NJUNMT-tf
|
29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118
|
[
"Apache-2.0"
] | null | null | null |
njunmt/nmt_experiment.py
|
whr94621/NJUNMT-tf
|
29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Natural Language Processing Group, Nanjing University, zhaocq.nlp@gmail.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Define base experiment class and basic experiment classes. """
import time
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from njunmt.data.dataset import Dataset
from njunmt.data.text_inputter import ParallelTextInputter
from njunmt.data.text_inputter import TextLineInputter
from njunmt.data.vocab import Vocab
from njunmt.inference.decode import evaluate_with_attention
from njunmt.inference.decode import infer
from njunmt.models.model_builder import model_fn
from njunmt.utils.configurable import ModelConfigs
from njunmt.utils.configurable import parse_params
from njunmt.utils.configurable import print_params
from njunmt.utils.configurable import update_eval_metric
from njunmt.utils.configurable import update_infer_params
from njunmt.utils.constants import ModeKeys
from njunmt.utils.metrics import multi_bleu_score_from_file
@six.add_metaclass(ABCMeta)
class Experiment:
""" Define base experiment class. """
def __init__(self):
"""Initializes. """
pass
@abstractmethod
def run(self, **kwargs):
""" Runs the process. """
raise NotImplementedError
@staticmethod
def _build_default_session():
""" Returns default tf.Session(). """
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = False
config.allow_soft_placement = True
return tf.Session(config=config)
class TrainingExperiment(Experiment):
""" Define an experiment for training. """
def __init__(self, model_configs):
""" Initializes the training experiment.
Args:
model_configs: A dictionary of all configurations.
"""
super(TrainingExperiment, self).__init__()
# training options
training_options = parse_params(
params=model_configs["train"],
default_params=self.default_training_options())
# for datasets
datasets_params = parse_params(
params=model_configs["data"],
default_params=self.default_datasets_params())
self._model_configs = model_configs
self._model_configs["train"] = training_options
self._model_configs["data"] = datasets_params
print_params("Datasets: ", self._model_configs["data"])
print_params("Training parameters: ", self._model_configs["train"])
ModelConfigs.dump(self._model_configs, self._model_configs["model_dir"])
@staticmethod
def default_datasets_params():
""" Returns a dictionary of default "dataset" parameters. """
return {
"source_words_vocabulary": None,
"target_words_vocabulary": None,
"train_features_file": None,
"train_labels_file": None,
"eval_features_file": None,
"eval_labels_file": None,
"source_bpecodes": {},
"target_bpecodes": {}
}
@staticmethod
def default_training_options():
""" Returns a dictionary of default training options. """
return {
"batch_size": 80,
"batch_tokens_size": None,
"save_checkpoint_steps": 1000,
"train_steps": 10000000,
"eval_steps": 100,
"update_cycle": 1, # for pseudo multi-gpu
"pretrain_model": None,
"reverse_target": False,
"maximum_features_length": None,
"maximum_labels_length": None,
"shuffle_every_epoch": None
}
def run(self):
""" Trains the model. """
# vocabulary
self._vocab_source = Vocab(
filename=self._model_configs["data"]["source_words_vocabulary"],
bpe_codes=self._model_configs["data"]["source_bpecodes"],
reverse_seq=False)
self._vocab_target = Vocab(
filename=self._model_configs["data"]["target_words_vocabulary"],
bpe_codes=self._model_configs["data"]["target_bpecodes"],
reverse_seq=self._model_configs["train"]["reverse_target"])
# build dataset
dataset = Dataset(
self._vocab_source,
self._vocab_target,
train_features_file=self._model_configs["data"]["train_features_file"],
train_labels_file=self._model_configs["data"]["train_labels_file"],
eval_features_file=self._model_configs["data"]["eval_features_file"],
eval_labels_file=self._model_configs["data"]["eval_labels_file"])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
estimator_spec = model_fn(model_configs=self._model_configs,
mode=ModeKeys.TRAIN,
dataset=dataset,
name=self._model_configs["problem_name"])
train_ops = estimator_spec.train_ops
hooks = estimator_spec.training_hooks
# build training session
sess = tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(),
checkpoint_dir=None,
master="",
config=config),
hooks=hooks)
train_text_inputter = ParallelTextInputter(
dataset,
"train_features_file",
"train_labels_file",
self._model_configs["train"]["batch_size"],
self._model_configs["train"]["batch_tokens_size"],
self._model_configs["train"]["shuffle_every_epoch"],
fill_full_batch=True)
train_data = train_text_inputter.make_feeding_data(
input_fields=estimator_spec.input_fields,
maximum_features_length=self._model_configs["train"]["maximum_features_length"],
maximum_labels_length=self._model_configs["train"]["maximum_labels_length"])
eidx = [0, 0]
update_cycle = [self._model_configs["train"]["update_cycle"], 1]
def step_fn(step_context):
step_context.session.run(train_ops["zeros_op"])
try:
while update_cycle[0] != update_cycle[1]:
data = train_data.next()
step_context.session.run(
train_ops["collect_op"], feed_dict=data["feed_dict"])
update_cycle[1] += 1
data = train_data.next()
update_cycle[1] = 1
return step_context.run_with_hooks(
train_ops["train_op"], feed_dict=data["feed_dict"])
except StopIteration:
eidx[1] += 1
while not sess.should_stop():
if eidx[0] != eidx[1]:
tf.logging.info("STARTUP Epoch {}".format(eidx[1]))
eidx[0] = eidx[1]
sess.run_step_fn(step_fn)
class InferExperiment(Experiment):
""" Define an experiment for inference. """
def __init__(self, model_configs):
""" Initializes the inference experiment.
Args:
model_configs: A dictionary of all configurations.
"""
super(InferExperiment, self).__init__()
infer_options = parse_params(
params=model_configs["infer"],
default_params=self.default_inference_options())
infer_data = []
for item in model_configs["infer_data"]:
infer_data.append(parse_params(
params=item,
default_params=self.default_inferdata_params()))
self._model_configs = model_configs
self._model_configs["infer"] = infer_options
self._model_configs["infer_data"] = infer_data
print_params("Inference parameters: ", self._model_configs["infer"])
print_params("Inference datasets: ", self._model_configs["infer_data"])
@staticmethod
def default_inference_options():
""" Returns a dictionary of default inference options. """
return {
"source_words_vocabulary": None,
"target_words_vocabulary": None,
"source_bpecodes": {},
"target_bpecodes": {},
"batch_size": 32,
"beam_size": 10,
"length_penalty": -1.0,
"maximum_labels_length": 150,
"delimiter": " ",
"char_level": False}
@staticmethod
def default_inferdata_params():
""" Returns a dictionary of default infer data parameters. """
return {
"features_file": None,
"output_file": None,
"labels_file": None,
"output_attention": False}
def run(self):
"""Infers data files. """
# build datasets
self._vocab_source = Vocab(
filename=self._model_configs["infer"]["source_words_vocabulary"],
bpe_codes=self._model_configs["infer"]["source_bpecodes"],
reverse_seq=False)
self._vocab_target = Vocab(
filename=self._model_configs["infer"]["target_words_vocabulary"],
bpe_codes=self._model_configs["infer"]["target_bpecodes"],
reverse_seq=self._model_configs["train"]["reverse_target"])
# build dataset
dataset = Dataset(
self._vocab_source,
self._vocab_target,
eval_features_file=[p["features_file"] for p
in self._model_configs["infer_data"]])
self._model_configs = update_infer_params(
self._model_configs,
beam_size=self._model_configs["infer"]["beam_size"],
maximum_labels_length=self._model_configs["infer"]["maximum_labels_length"],
length_penalty=self._model_configs["infer"]["length_penalty"])
# build model
estimator_spec = model_fn(model_configs=self._model_configs,
mode=ModeKeys.INFER,
dataset=dataset,
name=self._model_configs["problem_name"])
predict_op = estimator_spec.predictions
sess = self._build_default_session()
text_inputter = TextLineInputter(
dataset=dataset,
data_field_name="eval_features_file",
batch_size=self._model_configs["infer"]["batch_size"])
# reload
checkpoint_path = tf.train.latest_checkpoint(self._model_configs["model_dir"])
if checkpoint_path:
tf.logging.info("reloading models...")
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
else:
raise OSError("File NOT Found. Fail to find checkpoint file from: {}"
.format(self._model_configs["model_dir"]))
tf.logging.info("Start inference.")
overall_start_time = time.time()
for infer_data, param in zip(text_inputter.make_feeding_data(
input_fields=estimator_spec.input_fields),
self._model_configs["infer_data"]):
tf.logging.info("Infer Source File: {}.".format(param["features_file"]))
start_time = time.time()
infer(sess=sess,
prediction_op=predict_op,
infer_data=infer_data,
output=param["output_file"],
vocab_source=self._vocab_source,
vocab_target=self._vocab_target,
delimiter=self._model_configs["infer"]["delimiter"],
output_attention=param["output_attention"],
tokenize_output=self._model_configs["infer"]["char_level"],
verbose=True)
tf.logging.info("FINISHED {}. Elapsed Time: {}."
.format(param["features_file"], str(time.time() - start_time)))
if param["labels_file"] is not None:
bleu_score = multi_bleu_score_from_file(
hypothesis_file=param["output_file"],
references_files=param["labels_file"],
char_level=self._model_configs["infer"]["char_level"])
tf.logging.info("BLEU score (%s): %.2f"
% (param["features_file"], bleu_score))
tf.logging.info("Total Elapsed Time: %s" % str(time.time() - overall_start_time))
class EvalExperiment(Experiment):
""" Define an experiment for evaluation using loss functions. """
def __init__(self, model_configs):
""" Initializes the evaluation experiment.
Args:
model_configs: A dictionary of all configurations.
"""
super(EvalExperiment, self).__init__()
eval_options = parse_params(
params=model_configs["eval"],
default_params=self.default_evaluation_options())
eval_data = []
for item in model_configs["eval_data"]:
eval_data.append(parse_params(
params=item,
default_params=self.default_evaldata_params()))
self._model_configs = model_configs
self._model_configs["eval"] = eval_options
self._model_configs["eval_data"] = eval_data
print_params("Evaluation parameters: ", self._model_configs["eval"])
print_params("Evaluation datasets: ", self._model_configs["eval_data"])
@staticmethod
def default_evaluation_options():
""" Returns a dictionary of default inference options. """
return {
"metric": None,
"source_words_vocabulary": None,
"target_words_vocabulary": None,
"source_bpecodes": {},
"target_bpecodes": {},
"batch_size": 32}
@staticmethod
def default_evaldata_params():
""" Returns a dictionary of default infer data parameters. """
return {
"features_file": None,
"labels_file": None,
"output_attention": False}
def run(self):
"""Infers data files. """
# build datasets
self._vocab_source = Vocab(
filename=self._model_configs["eval"]["source_words_vocabulary"],
bpe_codes=self._model_configs["eval"]["source_bpecodes"],
reverse_seq=False)
self._vocab_target = Vocab(
filename=self._model_configs["eval"]["target_words_vocabulary"],
bpe_codes=self._model_configs["eval"]["target_bpecodes"],
reverse_seq=self._model_configs["train"]["reverse_target"])
# build dataset
dataset = Dataset(
self._vocab_source,
self._vocab_target,
eval_features_file=[p["features_file"] for p
in self._model_configs["eval_data"]],
eval_labels_file=[p["labels_file"] for p
in self._model_configs["eval_data"]])
# update evaluation model config
self._model_configs, metric_str = update_eval_metric(
self._model_configs, self._model_configs["eval"]["metric"])
tf.logging.info("Evaluating using {}".format(metric_str))
# build model
estimator_spec = model_fn(model_configs=self._model_configs,
mode=ModeKeys.EVAL,
dataset=dataset,
name=self._model_configs["problem_name"])
sess = self._build_default_session()
text_inputter = ParallelTextInputter(
dataset=dataset,
features_field_name="eval_features_file",
labels_field_name="eval_labels_file",
batch_size=self._model_configs["eval"]["batch_size"],
bucketing=(sum([p["output_attention"]
for p in self._model_configs["eval_data"]]) == 0))
# reload
checkpoint_path = tf.train.latest_checkpoint(self._model_configs["model_dir"])
if checkpoint_path:
tf.logging.info("reloading models...")
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
else:
raise OSError("File NOT Found. Fail to load checkpoint file from: {}"
.format(self._model_configs["model_dir"]))
tf.logging.info("Start evaluation.")
overall_start_time = time.time()
for eval_data, param in zip(text_inputter.make_feeding_data(
input_fields=estimator_spec.input_fields, in_memory=True),
self._model_configs["eval_data"]):
tf.logging.info("Evaluation Source File: {}.".format(param["features_file"]))
tf.logging.info("Evaluation Target File: {}.".format(param["labels_file"]))
start_time = time.time()
result = evaluate_with_attention(
sess=sess,
loss_op=estimator_spec.loss,
eval_data=eval_data,
vocab_source=self._vocab_source,
vocab_target=self._vocab_target,
attention_op=estimator_spec.predictions \
if param["output_attention"] else None,
output_filename_prefix=param["labels_file"].strip().split("/")[-1])
tf.logging.info("FINISHED {}. Elapsed Time: {}."
.format(param["features_file"], str(time.time() - start_time)))
tf.logging.info("Evaluation Score ({} on {}): {}"
.format(metric_str, param["features_file"], result))
tf.logging.info("Total Elapsed Time: %s" % str(time.time() - overall_start_time))
| 42.248837
| 93
| 0.609512
|
6866cc2c983700e197f6b7030dccddd9f12eda68
| 916
|
py
|
Python
|
src/tools/telemetry/PRESUBMIT.py
|
jxjnjjn/chromium
|
435c1d02fd1b99001dc9e1e831632c894523580d
|
[
"Apache-2.0"
] | 9
|
2018-09-21T05:36:12.000Z
|
2021-11-15T15:14:36.000Z
|
tools/telemetry/PRESUBMIT.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/PRESUBMIT.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2018-11-28T14:54:13.000Z
|
2020-07-02T07:36:07.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
PYLINT_BLACKLIST = []
PYLINT_DISABLED_WARNINGS = ['R0923', 'R0201', 'E1101']
def _CommonChecks(input_api, output_api):
results = []
old_sys_path = sys.path
try:
sys.path = [os.path.join('..', 'telemetry')] + sys.path
results.extend(input_api.canned_checks.RunPylint(
input_api, output_api,
black_list=PYLINT_BLACKLIST,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return results
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
| 26.171429
| 72
| 0.731441
|
001ae2977ae59124aebb77dde0fd0567a1a09097
| 16,644
|
py
|
Python
|
oggm/tests/funcs.py
|
jmalles/oggm
|
45eb5a9481c7704e8ade8410899e184191039b5e
|
[
"BSD-3-Clause"
] | null | null | null |
oggm/tests/funcs.py
|
jmalles/oggm
|
45eb5a9481c7704e8ade8410899e184191039b5e
|
[
"BSD-3-Clause"
] | null | null | null |
oggm/tests/funcs.py
|
jmalles/oggm
|
45eb5a9481c7704e8ade8410899e184191039b5e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
from distutils.util import strtobool
import hashlib
import numpy as np
import xarray as xr
import shapely.geometry as shpg
from scipy import optimize as optimization
# Local imports
import oggm
import oggm.cfg as cfg
from oggm.utils import (get_demo_file, mkdir, get_git_ident, get_sys_info,
get_env_info, apply_test_ref_tstars)
from oggm.workflow import execute_entity_task
from oggm.core import flowline
from oggm import tasks
from oggm.core.flowline import RectangularBedFlowline
_TEST_DIR = None
def dummy_constant_bed(hmax=3000., hmin=1000., nx=200, map_dx=100.,
widths=3.):
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h
widths = surface_h * 0. + widths
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_constant_bed_cliff(hmax=3000., hmin=1000., nx=200, map_dx=100.,
cliff_height=250.):
"""
I introduce a cliff in the bed to test the mass conservation of the models
Such a cliff could be real or a DEM error/artifact
"""
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
surface_h[50:] = surface_h[50:] - cliff_height
bed_h = surface_h
widths = surface_h * 0. + 1.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_constant_bed_obstacle(hmax=3000., hmin=1000., nx=200):
"""
I introduce an obstacle in the bed
"""
map_dx = 100.
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
cliff_height = 200.0
surface_h[60:] = surface_h[60:] + cliff_height
bed_h = surface_h
widths = surface_h * 0. + 1.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_bumpy_bed():
map_dx = 100.
dx = 1.
nx = 200
coords = np.arange(0, nx - 0.5, 1)
surface_h = np.linspace(3000, 1000, nx)
surface_h += 170. * np.exp(-((coords - 30) / 5) ** 2)
bed_h = surface_h
widths = surface_h * 0. + 3.
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_noisy_bed(map_dx=100.):
dx = 1.
nx = 200
np.random.seed(42)
coords = np.arange(0, nx - 0.5, 1)
surface_h = np.linspace(3000, 1000, nx)
surface_h += 100 * np.random.rand(nx) - 50.
bed_h = surface_h
widths = surface_h * 0. + 3.
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_parabolic_bed(hmax=3000., hmin=1000., nx=200, map_dx=100.,
default_shape=5.e-3,
from_other_shape=None, from_other_bed=None):
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h * 1
shape = surface_h * 0. + default_shape
if from_other_shape is not None:
shape[0:len(from_other_shape)] = from_other_shape
if from_other_bed is not None:
bed_h[0:len(from_other_bed)] = from_other_bed
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.ParabolicBedFlowline(line, dx, map_dx, surface_h,
bed_h, shape)]
def dummy_mixed_bed(deflambdas=3.5, map_dx=100., mixslice=None):
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
shape = surface_h * 0. + 3.e-03
if mixslice:
shape[mixslice] = np.NaN
else:
shape[10:20] = np.NaN
is_trapezoid = ~np.isfinite(shape)
lambdas = shape * 0.
lambdas[is_trapezoid] = deflambdas
widths_m = bed_h * 0. + 10
section = bed_h * 0.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
fls = flowline.MixedBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=shape,
is_trapezoid=is_trapezoid,
lambdas=lambdas, widths_m=widths_m)
return [fls]
def dummy_trapezoidal_bed(hmax=3000., hmin=1000., nx=200, map_dx=100.,
def_lambdas=2):
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h
widths = surface_h * 0. + 1.6
lambdas = surface_h * 0. + def_lambdas
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.TrapezoidalBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths, lambdas)]
def dummy_width_bed():
"""This bed has a width of 6 during the first 20 points and then 3"""
map_dx = 100.
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
widths = surface_h * 0. + 3.
widths[0:20] = 6.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_width_bed_tributary(map_dx=100., n_trib=1):
# bed with tributary glacier
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
widths = surface_h * 0. + 3.
widths[0:20] = 6 / (n_trib + 1)
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
fl_0 = flowline.RectangularBedFlowline(line, dx, map_dx, surface_h, bed_h,
widths)
coords = np.arange(0, 19.1, 1)
line = shpg.LineString(np.vstack([coords, coords * 0. + 1]).T)
out = [fl_0]
for i in range(n_trib):
fl_1 = flowline.RectangularBedFlowline(line, dx, map_dx,
surface_h[0:20],
bed_h[0:20],
widths[0:20])
fl_1.set_flows_to(fl_0)
out.append(fl_1)
return out[::-1]
def dummy_bed_tributary_tail_to_head(map_dx=100., n_trib=1, small_cliff=False):
# bed with tributary glacier(s) flowing directly into their top
# (for splitted flowline experiments)
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
widths = surface_h * 0. + 3.
pix_id = np.linspace(20, 180, n_trib).round().astype(int)
fls = [flowline.RectangularBedFlowline(dx=dx, map_dx=map_dx,
surface_h=surface_h[:pix_id[0]],
bed_h=bed_h[:pix_id[0]],
widths=widths[:pix_id[0]])]
for i, pid in enumerate(pix_id):
if i == (len(pix_id) - 1):
eid = nx + 1
else:
eid = pix_id[i + 1]
dh = -100 if small_cliff else 0
fl = flowline.RectangularBedFlowline(dx=dx, map_dx=map_dx,
surface_h=surface_h[pid:eid] + dh,
bed_h=bed_h[pid:eid] + dh,
widths=widths[pid:eid])
fls[-1].set_flows_to(fl, to_head=True, check_tail=False)
fls.append(fl)
return fls
def bu_tidewater_bed(gridsize=200, gridlength=6e4, widths_m=600,
b_0=260, alpha=0.017, b_1=350, x_0=4e4, sigma=1e4,
water_level=0, split_flowline_before_water=None):
# Bassis & Ultee bed profile
dx_meter = gridlength / gridsize
x = np.arange(gridsize+1) * dx_meter
bed_h = b_0 - alpha * x + b_1 * np.exp(-((x - x_0) / sigma)**2)
bed_h += water_level
surface_h = bed_h
widths = surface_h * 0. + widths_m / dx_meter
if split_flowline_before_water is not None:
bs = np.min(np.nonzero(bed_h < 0)[0]) - split_flowline_before_water
fls = [RectangularBedFlowline(dx=1, map_dx=dx_meter,
surface_h=surface_h[:bs],
bed_h=bed_h[:bs],
widths=widths[:bs]),
RectangularBedFlowline(dx=1, map_dx=dx_meter,
surface_h=surface_h[bs:],
bed_h=bed_h[bs:],
widths=widths[bs:]),
]
fls[0].set_flows_to(fls[1], check_tail=False, to_head=True)
return fls
else:
return [
RectangularBedFlowline(dx=1, map_dx=dx_meter, surface_h=surface_h,
bed_h=bed_h, widths=widths)]
def patch_minimal_download_oggm_files(*args, **kwargs):
"""A simple patch to make sure we don't download."""
raise RuntimeError('We should not be there in minimal mode')
def use_multiprocessing():
try:
return strtobool(os.getenv("OGGM_TEST_MULTIPROC", "False"))
except BaseException:
return False
def get_test_dir():
global _TEST_DIR
if _TEST_DIR is None:
s = get_git_ident()
s += ''.join([str(k) + str(v) for k, v in get_sys_info()])
s += ''.join([str(k) + str(v) for k, v in get_env_info()])
s = hashlib.md5(s.encode()).hexdigest()
out = os.path.join(cfg.PATHS['test_dir'], s)
if 'PYTEST_XDIST_WORKER' in os.environ:
out = os.path.join(out, os.environ.get('PYTEST_XDIST_WORKER'))
mkdir(out)
_TEST_DIR = out
# If new ident, remove all other dirs so spare space
for d in os.listdir(cfg.PATHS['test_dir']):
if d and d != s:
shutil.rmtree(os.path.join(cfg.PATHS['test_dir'], d))
return _TEST_DIR
def init_hef(reset=False, border=40, logging_level='INFO'):
from oggm.core import gis, inversion, climate, centerlines, flowline
import geopandas as gpd
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_border{}'.format(border))
if not os.path.exists(testdir):
os.makedirs(testdir)
reset = True
# Init
cfg.initialize(logging_level=logging_level)
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PARAMS['baseline_climate'] = ''
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['trapezoid_lambdas'] = 1
cfg.PARAMS['border'] = border
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, reset=reset)
if 'inversion_glen_a' not in gdir.get_diagnostics():
reset = True
gdir = oggm.GlacierDirectory(entity, reset=reset)
if not reset:
return gdir
gis.define_glacier_region(gdir)
execute_entity_task(gis.glacier_masks, [gdir])
execute_entity_task(centerlines.compute_centerlines, [gdir])
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
climate.process_custom_climate_data(gdir)
mbdf = gdir.get_ref_mb_data()['ANNUAL_BALANCE']
res = climate.t_star_from_refmb(gdir, mbdf=mbdf)
climate.local_t_star(gdir, tstar=res['t_star'], bias=res['bias'])
climate.mu_star_calibration(gdir)
inversion.prepare_for_inversion(gdir)
ref_v = 0.573 * 1e9
glen_n = cfg.PARAMS['glen_n']
def to_optimize(x):
# For backwards compat
_fd = 1.9e-24 * x[0]
glen_a = (glen_n+2) * _fd / 2.
fs = 5.7e-20 * x[1]
v = inversion.mass_conservation_inversion(gdir, fs=fs,
glen_a=glen_a)
return (v - ref_v)**2
out = optimization.minimize(to_optimize, [1, 1],
bounds=((0.01, 10), (0.01, 10)),
tol=1e-4)['x']
_fd = 1.9e-24 * out[0]
glen_a = (glen_n+2) * _fd / 2.
fs = 5.7e-20 * out[1]
v = inversion.mass_conservation_inversion(gdir, fs=fs,
glen_a=glen_a,
write=True)
inversion.filter_inversion_output(gdir)
inversion.distribute_thickness_interp(gdir, varname_suffix='_interp')
inversion.distribute_thickness_per_altitude(gdir, varname_suffix='_alt')
flowline.init_present_time_glacier(gdir)
return gdir
def init_columbia(reset=False):
from oggm.core import gis, centerlines
import geopandas as gpd
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_columbia')
if not os.path.exists(testdir):
os.makedirs(testdir)
reset = True
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_Columbia.tif')
cfg.PARAMS['border'] = 10
cfg.PARAMS['use_kcalving_for_inversion'] = True
cfg.PARAMS['use_kcalving_for_run'] = True
entity = gpd.read_file(get_demo_file('01_rgi60_Columbia.shp')).iloc[0]
gdir = oggm.GlacierDirectory(entity, reset=reset)
if gdir.has_file('climate_historical'):
return gdir
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
tasks.process_dummy_cru_file(gdir, seed=0)
apply_test_ref_tstars()
return gdir
def init_columbia_eb(dir_name, reset=False):
from oggm.core import gis, centerlines
import geopandas as gpd
# test directory
testdir = os.path.join(get_test_dir(), dir_name)
mkdir(testdir, reset=reset)
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_Columbia.tif')
cfg.PARAMS['border'] = 10
cfg.PARAMS['use_kcalving_for_inversion'] = True
cfg.PARAMS['use_kcalving_for_run'] = True
entity = gpd.read_file(get_demo_file('01_rgi60_Columbia.shp')).iloc[0]
gdir = oggm.GlacierDirectory(entity)
if gdir.has_file('climate_historical'):
return gdir
gis.define_glacier_region(gdir)
gis.simple_glacier_masks(gdir)
centerlines.elevation_band_flowline(gdir)
centerlines.fixed_dx_elevation_band_flowline(gdir)
centerlines.compute_downstream_line(gdir)
tasks.process_dummy_cru_file(gdir, seed=0)
apply_test_ref_tstars()
return gdir
def characs_apply_func(gdir, d):
# add some new stats to the mix
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
glc_ext = ds['glacier_ext'].values
glc_mask = ds['glacier_mask'].values
d['glc_ext_num_perc'] = np.sum(glc_ext) / np.sum(glc_mask)
class TempEnvironmentVariable:
"""Context manager for environment variables
https://gist.github.com/devhero/7e015f0ce0abacab3880d33c26f07674
"""
def __init__(self, **kwargs):
self.envs = kwargs
def __enter__(self):
self.old_envs = {}
for k, v in self.envs.items():
self.old_envs[k] = os.environ.get(k)
if v is not None:
os.environ[k] = v
elif k in os.environ:
del os.environ[k]
def __exit__(self, *args):
for k, v in self.old_envs.items():
if v is not None:
os.environ[k] = v
elif k in os.environ:
del os.environ[k]
| 32.255814
| 79
| 0.600697
|
9651ba5959acabff0d30fcc92bb803ae82d30cd4
| 1,497
|
py
|
Python
|
PaddleOCR/ppocr/metrics/cls_metric.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
PaddleOCR/ppocr/metrics/cls_metric.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
PaddleOCR/ppocr/metrics/cls_metric.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ClsMetric(object):
def __init__(self, main_indicator='acc', **kwargs):
self.main_indicator = main_indicator
self.reset()
def __call__(self, pred_label, *args, **kwargs):
preds, labels = pred_label
correct_num = 0
all_num = 0
for (pred, pred_conf), (target, _) in zip(preds, labels):
if pred == target:
correct_num += 1
all_num += 1
self.correct_num += correct_num
self.all_num += all_num
return {'acc': correct_num / all_num, }
def get_metric(self):
"""
return metrics {
'acc': 0
}
"""
acc = self.correct_num / self.all_num
self.reset()
return {'acc': acc}
def reset(self):
self.correct_num = 0
self.all_num = 0
| 32.543478
| 75
| 0.60521
|
10af9059de501a3f97b72296c26fce1977c7e089
| 2,728
|
py
|
Python
|
phonotactics/nucleus/ver_1_5_9.py
|
shlomo-Kallner/coventreiya
|
aa0773693220025f8d2c23644a2c5d9d884773e9
|
[
"Apache-2.0"
] | null | null | null |
phonotactics/nucleus/ver_1_5_9.py
|
shlomo-Kallner/coventreiya
|
aa0773693220025f8d2c23644a2c5d9d884773e9
|
[
"Apache-2.0"
] | null | null | null |
phonotactics/nucleus/ver_1_5_9.py
|
shlomo-Kallner/coventreiya
|
aa0773693220025f8d2c23644a2c5d9d884773e9
|
[
"Apache-2.0"
] | null | null | null |
from nucleus import Nucleus
from nucleus import register
from coventreiya.utils.fsm import fsm_state
from coventreiya.phonology.vowels import Vowels
from coventreiya.phonology.vowels.ver_1_5_9 import ver_1_5_9 as vols_ver
from coventreiya.phonology.vowels.Vowels import Height, Backness, Rounding
#Note: There is no difference between version 1.5.8 and version 1.5.9
# EXCEPT the Version NUMBER!!!
class ver_1_5_9( Nucleus ):
__doc__ = "The phonotactics used by Morphosyntax version 1.3.5 ."
def __init__(self, vols_=vols_ver):
min_length = 1
max_length = 4
if issubclass(vols_, Vowels):
self.__vols_ = vols_()
else:
raise TypeError()
super().__init__(min_length, max_length,1,5,9)
def finite_state_machine(self):
""" Generates the full Finite State Machine """
""" and returns it and the Starting State as a Tuple. """
# setting up the Finite State Machine for parsing...
# for parse string "(S)V1(V1)(V1)"
# will be using:
# 1 for S
# 2 for V1
# in the generator.
fsm_ = [ fsm_state(str(x),False) for x in range(0,6) ]
fsm_[0].remap(False, {1 : fsm_[0],
2 : fsm_[0]} )
fsm_[1].remap(False, {1 : fsm_[2],
2 : fsm_[3]} )
fsm_[2].remap(False, {1 : fsm_[0],
2 : fsm_[3]} )
fsm_[3].remap(True, {1 : fsm_[0],
2 : fsm_[4]} )
fsm_[4].remap(True, {1 : fsm_[0],
2 : fsm_[5]} )
fsm_[5].remap(True, {1 : fsm_[0],
2 : fsm_[0]} )
raise tuple( fsm_, fsm_[1] )
def categories(self):
''' Generate the Categories Lists. '''
# setting up the Finite State Machine for parsing...
# for parse string "(S)V1(V1)(V1)"
# will be using:
# 1 for S
# 2 for V1
# in the generator.
cat_ = [ list() for x in range(0,2) ]
# cat_[0] is S
tmp = self.__vols_.get_phone(Height.Mid_,
Backness.Central_,
Rounding.Unrounded_)
cat_[0].append(tmp)
# cat_[1] is V1
cat_[1].extend(self.__vols_.phonemes())
cat_[1].remove(tmp)
return cat_
def replacment_map(self):
''' Generate the Replacement Map. '''
cat_ = self.categories()
return { 0 : cat_[0],
1 : cat_[1] }
def gen_ver_1_5_9():
return ver_1_5_9()
nuc_ = register( ver_1_5_9(), gen_ver_1_5_9 )
| 34.531646
| 75
| 0.519062
|
1dcbed38345097d0b12af7a2fb46f188739d62fd
| 402
|
py
|
Python
|
2020/2/1.py
|
AmauryLiet/CodeJam
|
3e02bce287e3c640d89eea4b0d5878319c79d59b
|
[
"MIT"
] | null | null | null |
2020/2/1.py
|
AmauryLiet/CodeJam
|
3e02bce287e3c640d89eea4b0d5878319c79d59b
|
[
"MIT"
] | null | null | null |
2020/2/1.py
|
AmauryLiet/CodeJam
|
3e02bce287e3c640d89eea4b0d5878319c79d59b
|
[
"MIT"
] | null | null | null |
from itertools import count
N = int(input())
for case_id in range(1, N + 1):
left, right = map(int, input().split())
for customer_id in count(1):
if customer_id > max(left, right):
break
if left >= right:
left -= customer_id
else:
right -= customer_id
print('Case #{}: {} {} {}'.format(case_id, customer_id - 1, left, right))
| 23.647059
| 77
| 0.544776
|
148a7faf41d6b86792265b2656ad7205b7921988
| 106
|
py
|
Python
|
instagram_api/exceptions/request_headers_too_large.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | 13
|
2019-08-07T21:24:34.000Z
|
2020-12-12T12:23:50.000Z
|
instagram_api/exceptions/request_headers_too_large.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
instagram_api/exceptions/request_headers_too_large.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
from .request import RequestException
class RequestHeadersTooLargeException(RequestException):
pass
| 17.666667
| 56
| 0.839623
|
fbf89a630337012b3c16d1a8e6398a11b414c881
| 3,909
|
py
|
Python
|
demo/custom/demo.py
|
ZJCV/TSN
|
ec6ad668d20f477df44eab7035e2553d95a835f3
|
[
"Apache-2.0"
] | 2
|
2021-11-29T10:29:40.000Z
|
2022-03-22T02:39:44.000Z
|
demo/custom/demo.py
|
ZJCV/TSN
|
ec6ad668d20f477df44eab7035e2553d95a835f3
|
[
"Apache-2.0"
] | 1
|
2022-03-13T09:28:52.000Z
|
2022-03-13T09:28:52.000Z
|
demo/custom/demo.py
|
ZJCV/TSN
|
ec6ad668d20f477df44eab7035e2553d95a835f3
|
[
"Apache-2.0"
] | 1
|
2021-03-09T08:14:22.000Z
|
2021-03-09T08:14:22.000Z
|
# -*- coding: utf-8 -*-
"""
@date: 2020/10/30 下午3:42
@file: visualization.py
@author: zj
@description:
"""
import numpy as np
import torch
import torch.multiprocessing as mp
import time
from tsn.util.parser import load_test_config, parse_test_args
from demo.multiprocess.stop_token import _StopToken
from demo.multiprocess.manager.video_provider import VideoProvider
from demo.multiprocess.manager import VideoManager
from demo.multiprocess.predictor.action_predictor import ActionPredictor
from demo.multiprocess.visualizer.video_visualizor import VideoVisualizer
time_decay = 0.001
def read(cfg, task_queue):
provider = VideoProvider(cfg)
for able_to_read, task in provider:
if not able_to_read:
task_queue.put(_StopToken())
break
start = time.time()
task_queue.put(task, block=False)
print('one put task_queue need: {}'.format(time.time() - start))
provider.clean()
time.sleep(100)
def write(cfg, result_queue):
manager = VideoManager(cfg)
while True:
start = time.time()
if result_queue.empty():
time.sleep(time_decay)
continue
task = result_queue.get()
end = time.time()
print('one get result_queue need: {}'.format(end - start))
if isinstance(task, _StopToken):
break
ret = manager(task)
print('one compute manager need: {}'.format(time.time() - end))
manager.clean()
def predict(cfg, task_queue, predict_queue):
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
predictor = ActionPredictor(cfg)
while True:
start = time.time()
if task_queue.empty():
time.sleep(time_decay)
continue
task = task_queue.get()
end = time.time()
print('one get task_queue need: {}'.format(end - start))
if isinstance(task, _StopToken):
predict_queue.put(_StopToken())
break
task = predictor(task)
end1 = time.time()
print('one task predict need: {}'.format(end1 - end))
predict_queue.put(task, block=False)
print('one put predict_queue need: {}'.format(time.time() - end1))
time.sleep(100)
def visualize(cfg, predict_queue, result_queue):
visualizer = VideoVisualizer(cfg)
while True:
start = time.time()
if predict_queue.empty():
time.sleep(time_decay)
continue
task = predict_queue.get()
end = time.time()
print('one get predict_queue need: {}'.format(end - start))
if isinstance(task, _StopToken):
result_queue.put(_StopToken())
break
task = visualizer(task)
end1 = time.time()
print('one compute visualizer need: {}'.format(end1 - end))
result_queue.put(task, block=False)
print('one put result_queue need: {}'.format(time.time() - end1))
time.sleep(100)
def main():
args = parse_test_args()
cfg = load_test_config(args)
# 任务队列,保存待预测数据
task_queue = mp.Queue()
# 预测队列,保存待绘制数据
predict_queue = mp.Queue()
# 结果队列,保存待显示数据
result_queue = mp.Queue()
process_read = mp.Process(target=read, args=(cfg, task_queue), daemon=True)
process_predict = mp.Process(target=predict, args=(cfg, task_queue, predict_queue), daemon=True)
process_visualize = mp.Process(target=visualize, args=(cfg, predict_queue, result_queue), daemon=True)
process_write = mp.Process(target=write, args=(cfg, result_queue))
process_write.start()
process_visualize.start()
process_predict.start()
time.sleep(2)
process_read.start()
process_read.join()
process_predict.join()
process_visualize.join()
process_write.join()
if __name__ == '__main__':
main()
| 28.326087
| 106
| 0.64748
|
d987d201713e1e1a859309d573eadae2bc752337
| 21,666
|
py
|
Python
|
monk/tf_keras_1/finetune/level_13_updates_main.py
|
Shreyashwaghe/monk_v1
|
4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b
|
[
"Apache-2.0"
] | 2
|
2020-09-16T06:05:50.000Z
|
2021-04-07T12:05:20.000Z
|
monk/tf_keras_1/finetune/level_13_updates_main.py
|
Shreyashwaghe/monk_v1
|
4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b
|
[
"Apache-2.0"
] | null | null | null |
monk/tf_keras_1/finetune/level_13_updates_main.py
|
Shreyashwaghe/monk_v1
|
4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b
|
[
"Apache-2.0"
] | null | null | null |
from tf_keras_1.finetune.imports import *
from system.imports import *
from tf_keras_1.finetune.level_12_losses_main import prototype_losses
class prototype_updates(prototype_losses):
'''
Main class for all parametric update functions
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
##########################################################################################################################################################
@warning_checks(None, ["gte", 32, "lte", 1024], post_trace=True)
@error_checks(None, ["gt", 0], post_trace=True)
@accepts("self", int, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_input_size(self, input_size):
'''
Update input size.
Args:
input_size (int): New input size
Returns:
None
'''
self.system_dict = set_input_size(input_size, self.system_dict);
self.custom_print("Update: Input size - {}".format(self.system_dict["dataset"]["params"]["input_size"]));
self.custom_print("");
@warning_checks(None, ["lte", 128], post_trace=True)
@error_checks(None, ["gt", 0], post_trace=True)
@accepts("self", int, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_batch_size(self, batch_size):
'''
Update batch size.
Args:
batch_size (int): New batch size
Returns:
None
'''
self.system_dict = set_batch_size(batch_size, self.system_dict);
self.custom_print("Update: Batch size - {}".format(self.system_dict["dataset"]["params"]["batch_size"]));
self.custom_print("");
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_shuffle_data(self, shuffle):
'''
Update to shuffle data or not.
Args:
shuffle (bool): If True, will shuffle data
Returns:
None
'''
self.system_dict = set_data_shuffle(shuffle, self.system_dict);
self.custom_print("Update: Data shuffle - {}".format(self.system_dict["dataset"]["params"]["train_shuffle"]));
self.custom_print("");
@warning_checks(None, ["lte", psutil.cpu_count()], post_trace=True)
@error_checks(None, ["gt", 0], post_trace=True)
@accepts("self", int, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_num_processors(self, num_processors):
'''
Update num processors for data loader.
Args:
num_processors (int): Max CPUs for data sampling
Returns:
None
'''
self.system_dict = set_num_processors(num_processors, self.system_dict);
self.custom_print("Update: Num processors - {}".format(self.system_dict["dataset"]["params"]["num_workers"]));
self.custom_print("");
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_weighted_sampling(self, sample):
'''
Function inactive
'''
self.system_dict = set_weighted_sampling(sample, self.system_dict);
self.custom_print("Update: Weighted Sampling - {}".format(self.system_dict["dataset"]["params"]["weighted_sample"]));
self.custom_print("");
@warning_checks(None, ["gt", 0.5, "lt", 1], post_trace=True)
@error_checks(None, ["gt", 0, "lt", 1], post_trace=True)
@accepts("self", float, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_trainval_split(self, value):
'''
Update training-validation split
Args:
split (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
Returns:
None
'''
if(self.system_dict["dataset"]["dataset_type"] == "train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv = self.system_dict["dataset"]["csv_train"];
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv = [self.system_dict["dataset"]["csv_train"], self.system_dict["dataset"]["csv_val"]];
else:
msg = "Dataset Type invalid.\n";
msg += "Cannot update split"
ConstraintsWarning(msg)
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, value, path_to_csv, self.system_dict["dataset"]["params"]["delimiter"]);
@warning_checks(None, dataset_path=None, split=["gt", 0.5, "lt", 1], path_to_csv=None, delimiter=None, post_trace=True)
@error_checks(None, dataset_path=["folder", 'r'], split=["gt", 0, "lt", 1], path_to_csv=["file", 'r'], delimiter=["in", [",", ";", "-", " "]], post_trace=True)
@accepts("self", dataset_path=[str, list], split=float, path_to_csv=[str, list, bool], delimiter=str, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_dataset(self, dataset_path=False, split=0.9, path_to_csv=False, delimiter=","):
'''
Update dataset path
Args:
dataset_path (str, list): Path to Dataset folder
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
path_to_csv (str, list): Path to csv file pointing towards images
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
value (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
delimiter (str): Delimiter for csv file
Returns:
None
'''
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, split, path_to_csv, delimiter);
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", str, force=bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_model_name(self, model_name, force=False):
'''
Update model name
Args:
model_name (str): Select from available models. Check via List_Models() function
force (bool): Dummy function
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", [str, list], force=bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_model_path(self, model_path, force=False):
'''
Update to use gpu or cpu
Args:
gpu (bool): If True, then use GPU
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_use_gpu(self, gpu):
'''
Update to use gpu or cpu
Args:
gpu (bool): If True, then use GPU
Returns:
None
'''
self.system_dict = set_device(gpu, self.system_dict);
self.custom_print("Update: Use Gpu - {}".format(self.system_dict["model"]["params"]["use_gpu"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_use_pretrained(self, pretrained):
'''
Update to use pretrained wights or randomly initialized weights
Args:
pretrained (bool): If True, use pretrained weights
else, use randomly initialized weights
Returns:
None
'''
self.system_dict = set_pretrained(pretrained, self.system_dict);
self.custom_print("Update: Use pretrained - {}".format(self.system_dict["model"]["params"]["use_pretrained"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_base_network(self, freeze):
'''
Update whether freeze base network or not
Args:
freeze (bool): If True, then base network is non-trainable, works as a feature extractor
Returns:
None
'''
self.system_dict = set_freeze_base_network(freeze, self.system_dict);
self.custom_print("Update: Freeze Base Network - {}".format(self.system_dict["model"]["params"]["freeze_base_network"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, ["gte", 0], post_trace=True)
@accepts("self", int, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_layers(self, num_freeze):
'''
Update to freeze certain layers in the network
Args:
num_freeze (int): Number of layers to freeze in network starting from top
Returns:
None
'''
self.system_dict["model"]["params"]["num_freeze"] = num_freeze;
self.custom_print("Update: Freeze layers - {}".format(self.system_dict["model"]["params"]["num_freeze"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 100], post_trace=True)
@error_checks(None, ["gt", 0], post_trace=True)
@accepts("self", int, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_num_epochs(self, num_epochs):
'''
Update number of epochs to train the network
Args:
num_epochs (int): New number of epochs
Returns:
None
'''
self.system_dict = set_num_epochs(num_epochs, self.system_dict);
self.custom_print("Update: Num Epochs - {}".format(self.system_dict["hyper-parameters"]["num_epochs"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 1], post_trace=True)
@error_checks(None, ["gt", 0], post_trace=True)
@accepts("self", [int, float], post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_learning_rate(self, learning_rate):
'''
Update base learning rate for training
Args:
learning_rate (float): New base learning rate
Returns:
None
'''
self.system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
self.system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
self.custom_print("Update: Learning Rate - {}".format(self.system_dict["hyper-parameters"]["learning_rate"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress_realtime(self, value):
'''
Update display progress param
Args:
value (bool): If True, then real time progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress_realtime(value, self.system_dict);
self.custom_print("Update: Display progress realtime - {}".format(self.system_dict["training"]["settings"]["display_progress_realtime"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress(self, value):
'''
Update display progress param
Args:
value (bool): If True, then per epoch progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress(value, self.system_dict);
self.custom_print("Update: Display progress - {}".format(self.system_dict["training"]["settings"]["display_progress"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, None, prefix=["name", ["A-Z", "a-z", "0-9", "-", "_"]], post_trace=True)
@accepts("self", bool, prefix=str, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_save_intermediate_models(self, value, prefix="intermediate_model_"):
'''
Update whether to save intermediate models or not
Args:
value (bool): If True, saves model weight post every epoch
prefix (str): Appends a prefix to intermediate weights
Returns:
None
'''
if(value):
if(not os.access(self.system_dict["model_dir"], os.W_OK)):
msg = "Folder \"{}\" has no read access".format(self.system_dict["model_dir"])
msg += "Cannot save Intermediate models";
raise ConstraintError(msg);
self.system_dict = set_save_intermediate_models(value, self.system_dict);
self.system_dict = set_intermediate_model_prefix(prefix, self.system_dict);
self.custom_print("Update: Save Intermediate models - {}".format(self.system_dict["training"]["settings"]["save_intermediate_models"]));
if(self.system_dict["training"]["settings"]["save_intermediate_models"]):
self.custom_print("Update: Intermediate model prefix - {}".format(self.system_dict["training"]["settings"]["intermediate_model_prefix"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def update_save_training_logs(self, value):
'''
Update whether to save training logs or not
Args:
value (bool): If True, saves all training and validation metrics. Required for comparison.
Returns:
None
'''
self.system_dict = set_save_training_logs(value, self.system_dict);
self.custom_print("Update: Save Training logs - {}".format(self.system_dict["training"]["settings"]["save_training_logs"]));
self.custom_print("");
##########################################################################################################################################################
| 45.612632
| 166
| 0.482323
|
41b5adcc8949754ce003125351c504bd80c8dc7d
| 1,301
|
py
|
Python
|
setup.py
|
MichiganLabs/flask-principal
|
2967b387573ddd4f9fb4dd862d4c7824148f6a03
|
[
"MIT"
] | 366
|
2015-01-05T02:09:06.000Z
|
2021-11-17T17:52:20.000Z
|
setup.py
|
MichiganLabs/flask-principal
|
2967b387573ddd4f9fb4dd862d4c7824148f6a03
|
[
"MIT"
] | 23
|
2015-02-25T17:44:49.000Z
|
2022-03-21T11:56:16.000Z
|
setup.py
|
MichiganLabs/flask-principal
|
2967b387573ddd4f9fb4dd862d4c7824148f6a03
|
[
"MIT"
] | 94
|
2015-01-04T14:53:03.000Z
|
2022-02-14T22:52:01.000Z
|
"""
Flask Principal
---------------
Identity management for Flask.
Links
`````
* `documentation <http://packages.python.org/Flask-Principal/>`_
* `source <https://github.com/mattupstate/flask-principal>`_
* `development version
<https://github.com/mattupstate/flask-principal/raw/master#egg=Flask-Principal-dev>`_
"""
from setuptools import setup
setup(
name='Flask-Principal',
version='0.3.5',
url='http://packages.python.org/Flask-Principal/',
license='MIT',
author='Ali Afshar',
author_email='aafshar@gmail.com',
maintainer='Matt Wright',
maintainer_email='matt@nobien.net',
description='Identity management for flask',
long_description=__doc__,
py_modules=['flask_principal'],
zip_safe=False,
platforms='any',
install_requires=['Flask', 'blinker'],
test_suite='nose.collector',
tests_require=['nose'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 27.104167
| 87
| 0.651038
|
99804fb81a65a9d4eba207e90cc17e59aaaccf12
| 9,532
|
py
|
Python
|
tests/graph/test_dijkstra.py
|
IvanIsCoding/retworkx
|
2dc3552476488df2ec305aab1d09790e283af1c8
|
[
"Apache-2.0"
] | null | null | null |
tests/graph/test_dijkstra.py
|
IvanIsCoding/retworkx
|
2dc3552476488df2ec305aab1d09790e283af1c8
|
[
"Apache-2.0"
] | null | null | null |
tests/graph/test_dijkstra.py
|
IvanIsCoding/retworkx
|
2dc3552476488df2ec305aab1d09790e283af1c8
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
class TestDijkstraGraph(unittest.TestCase):
def setUp(self):
self.graph = retworkx.PyGraph()
self.a = self.graph.add_node("A")
self.b = self.graph.add_node("B")
self.c = self.graph.add_node("C")
self.d = self.graph.add_node("D")
self.e = self.graph.add_node("E")
self.f = self.graph.add_node("F")
self.graph.add_edge(self.a, self.b, 7)
self.graph.add_edge(self.c, self.a, 9)
self.graph.add_edge(self.a, self.d, 14)
self.graph.add_edge(self.b, self.c, 10)
self.graph.add_edge(self.d, self.c, 2)
self.graph.add_edge(self.d, self.e, 9)
self.graph.add_edge(self.b, self.f, 15)
self.graph.add_edge(self.c, self.f, 11)
self.graph.add_edge(self.e, self.f, 6)
def test_dijkstra(self):
path = retworkx.graph_dijkstra_shortest_path_lengths(
self.graph, self.a, lambda x: float(x), self.e
)
expected = {4: 20.0}
self.assertEqual(expected, path)
def test_dijkstra_path(self):
path = retworkx.graph_dijkstra_shortest_paths(
self.graph, self.a, weight_fn=lambda x: float(x), target=self.e
)
# a -> d -> e = 23
# a -> c -> d -> e = 20
expected = {4: [self.a, self.c, self.d, self.e]}
self.assertEqual(expected, path)
def test_dijkstra_with_no_goal_set(self):
path = retworkx.graph_dijkstra_shortest_path_lengths(
self.graph, self.a, lambda x: 1
)
expected = {1: 1.0, 2: 1.0, 3: 1.0, 4: 2.0, 5: 2.0}
self.assertEqual(expected, path)
def test_dijkstra_path_with_no_goal_set(self):
path = retworkx.graph_dijkstra_shortest_paths(self.graph, self.a)
expected = {
1: [0, 1],
2: [0, 2],
3: [0, 3],
4: [0, 3, 4],
5: [0, 1, 5],
}
self.assertEqual(expected, path)
def test_dijkstra_with_no_path(self):
g = retworkx.PyGraph()
a = g.add_node("A")
g.add_node("B")
path = retworkx.graph_dijkstra_shortest_path_lengths(
g, a, lambda x: float(x)
)
expected = {}
self.assertEqual(expected, path)
def test_dijkstra_path_with_no_path(self):
g = retworkx.PyGraph()
a = g.add_node("A")
g.add_node("B")
path = retworkx.graph_dijkstra_shortest_paths(
g, a, weight_fn=lambda x: float(x)
)
expected = {}
self.assertEqual(expected, path)
def test_dijkstra_with_disconnected_nodes(self):
g = retworkx.PyGraph()
a = g.add_node("A")
b = g.add_node("B")
g.add_edge(a, b, 1.2)
g.add_node("C")
d = g.add_node("D")
g.add_edge(b, d, 2.4)
path = retworkx.graph_dijkstra_shortest_path_lengths(
g, a, lambda x: round(x, 1)
)
# Computers never work:
expected = {1: 1.2, 3: 3.5999999999999996}
self.assertEqual(expected, path)
def test_dijkstra_graph_with_digraph_input(self):
g = retworkx.PyDAG()
g.add_node(0)
with self.assertRaises(TypeError):
retworkx.graph_dijkstra_shortest_path_lengths(g, 0, lambda x: x)
def test_dijkstra_all_pair_path_lengths(self):
lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
self.graph, float
)
expected = {
0: {1: 7.0, 2: 9.0, 3: 11.0, 4: 20.0, 5: 20.0},
1: {0: 7.0, 2: 10.0, 3: 12.0, 4: 21.0, 5: 15.0},
2: {0: 9.0, 1: 10.0, 3: 2.0, 4: 11.0, 5: 11.0},
3: {0: 11.0, 1: 12.0, 2: 2.0, 4: 9.0, 5: 13.0},
4: {0: 20.0, 1: 21.0, 2: 11.0, 3: 9.0, 5: 6.0},
5: {0: 20.0, 1: 15.0, 2: 11.0, 3: 13.0, 4: 6.0},
}
self.assertEqual(expected, lengths)
def test_dijkstra_all_pair_paths(self):
paths = retworkx.graph_all_pairs_dijkstra_shortest_paths(
self.graph, float
)
expected = {
0: {
1: [0, 1],
2: [0, 2],
3: [0, 2, 3],
4: [0, 2, 3, 4],
5: [0, 2, 5],
},
1: {0: [1, 0], 2: [1, 2], 3: [1, 2, 3], 4: [1, 2, 3, 4], 5: [1, 5]},
2: {0: [2, 0], 1: [2, 1], 3: [2, 3], 4: [2, 3, 4], 5: [2, 5]},
3: {0: [3, 2, 0], 1: [3, 2, 1], 2: [3, 2], 4: [3, 4], 5: [3, 2, 5]},
4: {
0: [4, 3, 2, 0],
1: [4, 5, 1],
2: [4, 3, 2],
3: [4, 3],
5: [4, 5],
},
5: {0: [5, 2, 0], 1: [5, 1], 2: [5, 2], 3: [5, 2, 3], 4: [5, 4]},
}
self.assertEqual(expected, paths)
def test_dijkstra_all_pair_path_lengths_with_node_removal(self):
self.graph.remove_node(3)
lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
self.graph, float
)
expected = {
0: {1: 7.0, 2: 9.0, 4: 26.0, 5: 20.0},
1: {0: 7.0, 2: 10.0, 4: 21.0, 5: 15.0},
2: {0: 9.0, 1: 10.0, 4: 17.0, 5: 11.0},
4: {0: 26.0, 1: 21.0, 2: 17.0, 5: 6.0},
5: {0: 20.0, 1: 15.0, 2: 11.0, 4: 6.0},
}
self.assertEqual(expected, lengths)
def test_dijkstra_all_pair_paths_with_node_removal(self):
self.graph.remove_node(3)
paths = retworkx.graph_all_pairs_dijkstra_shortest_paths(
self.graph, float
)
expected = {
0: {1: [0, 1], 2: [0, 2], 4: [0, 2, 5, 4], 5: [0, 2, 5]},
1: {0: [1, 0], 2: [1, 2], 4: [1, 5, 4], 5: [1, 5]},
2: {0: [2, 0], 1: [2, 1], 4: [2, 5, 4], 5: [2, 5]},
4: {0: [4, 5, 2, 0], 1: [4, 5, 1], 2: [4, 5, 2], 5: [4, 5]},
5: {0: [5, 2, 0], 1: [5, 1], 2: [5, 2], 4: [5, 4]},
}
self.assertEqual(expected, paths)
def test_dijkstra_all_pair_path_lengths_empty_graph(self):
graph = retworkx.PyGraph()
self.assertEqual(
{}, retworkx.graph_all_pairs_dijkstra_path_lengths(graph, float)
)
def test_dijkstra_all_pair_shortest_paths_empty_graph(self):
graph = retworkx.PyGraph()
self.assertEqual(
{}, retworkx.graph_all_pairs_dijkstra_shortest_paths(graph, float)
)
def test_dijkstra_all_pair_path_lengths_graph_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(1000)))
expected = {x: {} for x in range(1000)}
self.assertEqual(
expected,
retworkx.graph_all_pairs_dijkstra_path_lengths(graph, float),
)
def test_dijkstra_all_pair_shortest_paths_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(1000)))
expected = {x: {} for x in range(1000)}
self.assertEqual(
expected,
retworkx.graph_all_pairs_dijkstra_shortest_paths(graph, float),
)
def dijkstra_with_invalid_weights(self):
graph = retworkx.generators.path_graph(2)
for invalid_weight in [float("nan"), -1]:
for as_undirected in [False, True]:
with self.subTest(
invalid_weight=invalid_weight, as_undirected=as_undirected
):
with self.assertRaises(ValueError):
retworkx.graph_dijkstra_shortest_paths(
graph,
source=0,
weight_fn=lambda _: invalid_weight,
as_undirected=as_undirected,
)
def dijkstra_lengths_with_invalid_weights(self):
graph = retworkx.generators.path_graph(2)
for invalid_weight in [float("nan"), -1]:
with self.subTest(invalid_weight=invalid_weight):
with self.assertRaises(ValueError):
retworkx.graph_dijkstra_shortest_path_lengths(
graph, node=0, edge_cost_fn=lambda _: invalid_weight
)
def all_pairs_dijkstra_with_invalid_weights(self):
graph = retworkx.generators.path_graph(2)
for invalid_weight in [float("nan"), -1]:
with self.subTest(invalid_weight=invalid_weight):
with self.assertRaises(ValueError):
retworkx.graph_all_pairs_dijkstra_shortest_paths(
graph, edge_cost_fn=lambda _: invalid_weight
)
def all_pairs_dijkstra_lenghts_with_invalid_weights(self):
graph = retworkx.generators.path_graph(2)
for invalid_weight in [float("nan"), -1]:
with self.subTest(invalid_weight=invalid_weight):
with self.assertRaises(ValueError):
retworkx.graph_all_pairs_dijkstra_path_lengths(
graph, edge_cost_fn=lambda _: invalid_weight
)
| 37.976096
| 80
| 0.542488
|
289b1d1262d4168e049713707fd824966476e08d
| 326
|
py
|
Python
|
Python/celciustofahrenheit.py
|
OluSure/Hacktoberfest2021-1
|
ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea
|
[
"MIT"
] | 215
|
2021-10-01T08:18:16.000Z
|
2022-03-29T04:12:03.000Z
|
Python/celciustofahrenheit.py
|
OluSure/Hacktoberfest2021-1
|
ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea
|
[
"MIT"
] | 232
|
2021-10-02T14:51:43.000Z
|
2021-11-14T08:23:27.000Z
|
Python/celciustofahrenheit.py
|
OluSure/Hacktoberfest2021-1
|
ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea
|
[
"MIT"
] | 807
|
2021-10-01T08:11:45.000Z
|
2021-11-21T18:57:09.000Z
|
try:
degrees_in_celcius = float(input("What is the temperature in Celcius?: "))
def C2F_Converter():
degrees_in_fahrenheit = (degrees_in_celcius * 9/5) + 32
return degrees_in_fahrenheit
print(C2F_Converter(),"F")
except ValueError:
print("This is not a number.")
| 20.375
| 78
| 0.619632
|
78d5ed0f61e40d6df579c45da488d76602272078
| 13,609
|
py
|
Python
|
utils.py
|
ultmaster/openimages2coco
|
2d48a152753105f229afe6fe96dd608c202301a2
|
[
"MIT"
] | null | null | null |
utils.py
|
ultmaster/openimages2coco
|
2d48a152753105f229afe6fe96dd608c202301a2
|
[
"MIT"
] | null | null | null |
utils.py
|
ultmaster/openimages2coco
|
2d48a152753105f229afe6fe96dd608c202301a2
|
[
"MIT"
] | null | null | null |
import multiprocessing
import matplotlib
matplotlib.use('Agg')
import os
import sys
import csv
import json
import time
import numpy as np
import skimage.io as io
from openimages import OpenImages
def _url_to_license(licenses, mode='http'):
# create dict with license urls as
# mode is either http or https
# create dict
licenses_by_url = {}
for license in licenses:
# Get URL
if mode == 'https':
url = 'https:' + license['url'][5:]
else:
url = license['url']
# Add to dict
licenses_by_url[url] = license
return licenses_by_url
def convert_category_annotations(original_category_info):
categories = []
num_categories = len(original_category_info)
for i in range(num_categories):
cat = {}
cat['id'] = i + 1
cat['name'] = original_category_info[i][1]
cat['original_id'] = original_category_info[i][0]
categories.append(cat)
return categories
def _convert_image_annotation_chunk(original_image_metadata, image_dir, licenses, verbose, idx):
# Get dict with license urls
licenses_by_url_http = _url_to_license(licenses, mode='http')
licenses_by_url_https = _url_to_license(licenses, mode='https')
# Create list
images = []
# Set starting time
start_time = time.time()
print("Running on chunk %d" % idx)
sys.stdout.flush()
# loop through entries skipping title line
num_images = len(original_image_metadata)
for i in range(0, num_images):
# Print status info
if verbose > 0:
if i % 10 == 0:
elapsed_time = time.time() - start_time
elapsed_hours = elapsed_time // 3600
elapsed_mins = (elapsed_time % 3600) // 60
total_time = elapsed_time * num_images / (i + 1)
total_hours = total_time // 3600
total_mins = (total_time % 3600) // 60
print('Image {}/{} Time: {:.0f}h {:.0f}min / {:.0f}h {:.0f}min'.format(i, num_images - 1,
elapsed_hours, elapsed_mins,
total_hours, total_mins),
end='\r')
sys.stdout.flush()
# Select image ID as key
key = original_image_metadata[i][0]
# Copy information
img = {}
img['id'] = key
img['file_name'] = key + '.jpg'
img['original_url'] = original_image_metadata[i][2]
license_url = original_image_metadata[i][4]
# Look up license id
try:
img['license'] = licenses_by_url_https[license_url]['id']
except:
img['license'] = licenses_by_url_http[license_url]['id']
# Load image to extract height and width
filename = os.path.join(image_dir, img['file_name'])
try:
img_data = io.imread(filename)
except:
continue
# catch weird image file type
if len(img_data.shape) < 2:
img['height'] = img_data[0].shape[0]
img['width'] = img_data[0].shape[1]
else:
img['height'] = img_data.shape[0]
img['width'] = img_data.shape[1]
# Add to list of images
images.append(img)
return images
def chunk_helper(image_file_list, image_dir, licenses, verbose, idx):
return _convert_image_annotation_chunk(image_file_list, image_dir, licenses, verbose, idx)
def convert_image_annotations(original_image_metadata, image_dir, licenses, mode='parallel', verbose=1):
# Enclosing function of _convert_image_annotation_chunk to make it parallelizable
# in parallel mode:
# verbose: 0 = no status info, 1 = some progress info, 50 = info for every finished chunk
# in feed-forward mode:
# verbose: 0 = no status info, 1 = progress info every 10 images
if mode == 'parallel':
N = 10000 # chunk size
chunks = []
for i in range(0, len(original_image_metadata), N):
chunks.append(original_image_metadata[i:i + N])
print("Total: %d chunks" % len(chunks))
with multiprocessing.Pool(64) as pool:
images_in_chunks = pool.starmap(chunk_helper, [(c, image_dir, licenses, 0, i) for i, c in enumerate(chunks)])
images = [chunk[i] for chunk in images_in_chunks for i in range(len(chunk))]
else:
images = _convert_image_annotation_chunk(original_image_metadata, image_dir, licenses, verbose=verbose, idx=0)
return images
def _image_list_to_dict(images):
# Helper function to create dict of images by image id
# modelled from the cocoapi
imgs = {}
for img in images:
imgs[img['id']] = img
return imgs
def _category_list_to_dict(categories):
# Helper function to create dict of categories by integer category id
# modelled from the cocoapi
cats = {}
for cat in categories:
cats[cat['id']] = cat
return cats
def _categories_by_original_ids(cats):
# Helperfunction to create dict mapping original utf8 category ids to categories
origCats = {}
for i in cats.keys():
key = cats[i]['original_id']
origCats[key] = cats[i]
return origCats
def convert_instance_annotations(original_annotations, images, categories, start_index=0):
imgs = _image_list_to_dict(images)
cats = _category_list_to_dict(categories)
orig_cats = _categories_by_original_ids(cats)
annotations = []
num_instances = len(original_annotations)
for i in range(0, num_instances):
# print progress
if i % 5000 == 0:
print('{}/{} annotations processed'.format(i, num_instances - 1))
sys.stdout.flush()
# set individual instance id
# use start_index to separate indices between dataset splits
key = i + start_index
csv_line = i
ann = {}
ann['id'] = key
image_id = original_annotations[csv_line][0]
ann['image_id'] = image_id
if image_id not in imgs:
continue
ann['original_category_id'] = original_annotations[csv_line][2]
ann['category_id'] = orig_cats[original_annotations[csv_line][2]]['id']
x = float(original_annotations[csv_line][4]) * imgs[image_id]['width']
y = float(original_annotations[csv_line][6]) * imgs[image_id]['height']
dx = (float(original_annotations[csv_line][5]) - float(original_annotations[csv_line][4])) * imgs[image_id][
'width']
dy = (float(original_annotations[csv_line][7]) - float(original_annotations[csv_line][6])) * imgs[image_id][
'height']
ann['bbox'] = [round(a, 2) for a in [x, y, dx, dy]]
ann['area'] = round(dx * dy, 2)
ann['isoccluded'] = int(original_annotations[csv_line][8])
ann['istruncated'] = int(original_annotations[csv_line][9])
ann['iscrowd'] = int(original_annotations[csv_line][10])
ann['isdepiction'] = int(original_annotations[csv_line][11])
ann['isinside'] = int(original_annotations[csv_line][12])
annotations.append(ann)
return annotations
def convert_openimages_subset(annotation_dir, image_dir, subset, return_data=False):
# Select correct source files for each subset
category_sourcefile = 'class-descriptions-boxable.csv'
if subset == 'train':
image_sourcefile = 'train-images-boxable-with-rotation.csv'
annotation_sourcefile = 'train-annotations-bbox.csv'
elif subset == 'val':
image_sourcefile = 'validation-images-with-rotation.csv'
annotation_sourcefile = 'validation-annotations-bbox.csv'
elif subset == 'test':
image_sourcefile = 'test-images-with-rotation.csv'
annotation_sourcefile = 'test-annotations-bbox.csv'
# Load original annotations
print('loading original annotations ...')
sys.stdout.flush()
with open('{}/{}'.format(annotation_dir, category_sourcefile), 'r', encoding='utf-8') as f:
csv_f = csv.reader(f)
original_category_info = []
for row in csv_f:
original_category_info.append(row)
print("First step done")
sys.stdout.flush()
dirty_count, total_count = 0, 0
with open('{}/{}'.format(annotation_dir, image_sourcefile), 'r', encoding='utf-8') as f:
csv_f = csv.reader(f)
original_image_metadata = []
for i, row in enumerate(csv_f):
if i == 0: continue
if i % 10000 == 0:
print("Source file progress:", i)
sys.stdout.flush()
total_count += 1
if not os.path.exists(os.path.join(image_dir, row[0] + ".jpg")):
dirty_count += 1
continue
original_image_metadata.append(row)
print("Image sourcefile: %d out of %d dirty" % (dirty_count, total_count))
sys.stdout.flush()
dirty_count = total_count = 0
with open('{}/{}'.format(annotation_dir, annotation_sourcefile), 'r') as f:
csv_f = csv.reader(f)
original_annotations = []
for i, row in enumerate(csv_f):
if i == 0: continue
if i % 10000 == 0:
print("Annotation file progress:", i)
sys.stdout.flush()
total_count += 1
if not os.path.exists(os.path.join(image_dir, row[0] + ".jpg")):
dirty_count += 1
continue
original_annotations.append(row)
print("Annotation file: %d out of %d dirty" % (dirty_count, total_count))
print('loading original annotations ... Done')
sys.stdout.flush()
# Create dataset class to store annotations
oi = OpenImages()
# Add basic dataset info
print('adding basic dataset info')
oi.dataset['info'] = {'contributos': 'Krasin I., Duerig T., Alldrin N., \
Ferrari V., Abu-El-Haija S., Kuznetsova A., Rom H., \
Uijlings J., Popov S., Kamali S., Malloci M., Pont-Tuset J., \
Veit A., Belongie S., Gomes V., Gupta A., Sun C., Chechik G., \
Cai D., Feng Z., Narayanan D., Murphy K.',
'date_announced': '2018/04/30',
'description': 'Open Images Dataset v4',
'url': 'https://storage.googleapis.com/openimages/web/index.html',
'version': '4.0',
'year': 2018}
# Add license information
print('adding basic license info')
oi.dataset['licenses'] = [{'id': 1,
'name': 'Attribution-NonCommercial-ShareAlike License',
'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/'},
{'id': 2,
'name': 'Attribution-NonCommercial License',
'url': 'http://creativecommons.org/licenses/by-nc/2.0/'},
{'id': 3,
'name': 'Attribution-NonCommercial-NoDerivs License',
'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/'},
{'id': 4,
'name': 'Attribution License',
'url': 'http://creativecommons.org/licenses/by/2.0/'},
{'id': 5,
'name': 'Attribution-ShareAlike License',
'url': 'http://creativecommons.org/licenses/by-sa/2.0/'},
{'id': 6,
'name': 'Attribution-NoDerivs License',
'url': 'http://creativecommons.org/licenses/by-nd/2.0/'},
{'id': 7,
'name': 'No known copyright restrictions',
'url': 'http://flickr.com/commons/usage/'},
{'id': 8,
'name': 'United States Government Work',
'url': 'http://www.usa.gov/copyright.shtml'}]
# Convert category information
print('converting category info')
oi.dataset['categories'] = convert_category_annotations(original_category_info)
# Convert image mnetadata
print('converting image info ...')
oi.dataset['images'] = convert_image_annotations(original_image_metadata,
image_dir,
oi.dataset['licenses'],
mode='parallel',
verbose=10)
# Convert instance annotations
print('converting annotations ...')
oi.dataset['annotations'] = convert_instance_annotations(original_annotations,
oi.dataset['images'],
oi.dataset['categories'],
start_index=0)
# Write annotations into .json file
filename = "{}/{}-annotations-bbox.json".format(annotation_dir, subset)
print('writing output to {}'.format(filename))
sys.stdout.flush()
with open(filename, "w") as write_file:
json.dump(oi.dataset, write_file)
print('Done')
sys.stdout.flush()
if return_data:
return oi
| 38.994269
| 121
| 0.563818
|
7f31ea19132e40e394642093a318d90fae0a1d0b
| 332
|
py
|
Python
|
apistar_jwt/__init__.py
|
jgirardet/apistar-jwt
|
46559eae367cf3c90cabd713c48ff0ef2d4d799d
|
[
"MIT"
] | 42
|
2017-10-05T00:58:10.000Z
|
2020-02-22T22:36:30.000Z
|
apistar_jwt/__init__.py
|
jgirardet/apistar-jwt
|
46559eae367cf3c90cabd713c48ff0ef2d4d799d
|
[
"MIT"
] | 19
|
2017-10-06T18:31:42.000Z
|
2019-10-18T16:22:22.000Z
|
apistar_jwt/__init__.py
|
jgirardet/apistar-jwt
|
46559eae367cf3c90cabd713c48ff0ef2d4d799d
|
[
"MIT"
] | 11
|
2017-10-06T20:02:20.000Z
|
2018-08-14T12:10:32.000Z
|
"""Top-level package for apistar-jwt."""
from apistar_jwt.token import JWTUser, JWT
from apistar_jwt.decorators import anonymous_allowed, authentication_required
__author__ = """Ryan Castner"""
__email__ = 'castner.rr@gmail.com'
__version__ = '0.5.0'
__all__ = ['JWTUser', 'JWT', 'anonymous_allowed', 'authentication_required']
| 27.666667
| 77
| 0.762048
|
ee145eb88d5d33d68fb224219d751a880d379ef0
| 206
|
py
|
Python
|
Algorithms/717.1-bit-and-2-bit-characters/1-bit-and-2-bit-characters_2.py
|
OctopusLian/leetcode-solutions
|
40920d11c584504e805d103cdc6ef3f3774172b3
|
[
"MIT"
] | 1
|
2020-12-01T18:35:24.000Z
|
2020-12-01T18:35:24.000Z
|
Algorithms/717.1-bit-and-2-bit-characters/1-bit-and-2-bit-characters_2.py
|
OctopusLian/leetcode-solutions
|
40920d11c584504e805d103cdc6ef3f3774172b3
|
[
"MIT"
] | 18
|
2020-11-10T05:48:29.000Z
|
2020-11-26T08:39:20.000Z
|
Algorithms/717.1-bit-and-2-bit-characters/1-bit-and-2-bit-characters_2.py
|
OctopusLian/leetcode-solutions
|
40920d11c584504e805d103cdc6ef3f3774172b3
|
[
"MIT"
] | 5
|
2020-11-09T07:43:00.000Z
|
2021-12-02T14:59:37.000Z
|
class Solution:
def isOneBitCharacter(self, bits: List[int]) -> bool:
for inx, i in enumerate(bits):
if i:
bits[inx] = bits[inx+1] = None
return bits[-1] == 0
| 34.333333
| 57
| 0.524272
|
219a1dd6ac69685917deb2dc5dfa076ab52a0136
| 1,566
|
py
|
Python
|
cmdb-usage/libs/report/s3_report.py
|
zjj1002/aws-cloud-cmdb-system
|
47982007688e5db1272435891cb654ab11d0d60a
|
[
"Apache-2.0"
] | null | null | null |
cmdb-usage/libs/report/s3_report.py
|
zjj1002/aws-cloud-cmdb-system
|
47982007688e5db1272435891cb654ab11d0d60a
|
[
"Apache-2.0"
] | 1
|
2022-01-04T13:53:16.000Z
|
2022-01-04T13:53:16.000Z
|
cmdb-usage/libs/report/s3_report.py
|
zjj1002/aws-cloud-cmdb-system
|
47982007688e5db1272435891cb654ab11d0d60a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-05-08 15:14
# @Author : jianxlin
# @Site :
# @File : s3_report.py
# @Software: PyCharm
from pandasql import sqldf
from libs.report.base import ReportData
class S3ReportData(ReportData):
_NAME = "S3"
def __init__(self, s3_bill=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._s3_bill = s3_bill
self._report_data = self._s3_bill.get_bu_bucket_bill()
self.merge()
self.report_data.fillna(0.0, inplace=True)
@property
def s3_bill(self):
return self._s3_bill
def merge(self):
"""
合并费用数据。
:return:
"""
self.merge_total_cost()
self.insert_to_db()
def merge_total_cost(self):
"""
添加账单总计信息。
:return:
"""
self.report_data["TotalCost"] = self.report_data.iloc[:, 2:].apply(lambda x: x.sum(), axis=1)
def get_dep_total_bill(self):
"""
查询dep总费用。
:return:
"""
dep_tag_name = self.dep_tag_name
sql = """
select %(dep_tag_name)s,sum(TotalCost) as S3Cost
from s3_report
group by %(dep_tag_name)s
""" % locals()
return sqldf(sql, {"s3_report": self.report_data})
if __name__ == '__main__':
from libs.bill.base import Bill
from libs.bill.s3_bill import S3Bill
b = Bill()
s3_bill = S3Bill(base_bill=b)
erd = S3ReportData(s3_bill=s3_bill)
erd.insert_to_db()
# logging.info(erd.report_data)
| 23.727273
| 101
| 0.579183
|
c8d1cc10bcc752d01a5447ab2c7637c714cacc03
| 2,981
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/backend_address_pool_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/backend_address_pool_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/backend_address_pool_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class BackendAddressPool(SubResource):
"""Pool of backend IP addresses.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar backend_ip_configurations: Gets collection of references to IP
addresses defined in network interfaces.
:vartype backend_ip_configurations:
list[~azure.mgmt.network.v2017_09_01.models.NetworkInterfaceIPConfiguration]
:ivar load_balancing_rules: Gets load balancing rules that use this
backend address pool.
:vartype load_balancing_rules:
list[~azure.mgmt.network.v2017_09_01.models.SubResource]
:ivar outbound_nat_rule: Gets outbound rules that use this backend address
pool.
:vartype outbound_nat_rule:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param provisioning_state: Get provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_ip_configurations': {'readonly': True},
'load_balancing_rules': {'readonly': True},
'outbound_nat_rule': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'outbound_nat_rule': {'key': 'properties.outboundNatRule', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(BackendAddressPool, self).__init__(id=id, **kwargs)
self.backend_ip_configurations = None
self.load_balancing_rules = None
self.outbound_nat_rule = None
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 42.585714
| 128
| 0.657162
|
f4a2112387b1631979700d1e7501fac298609643
| 437
|
py
|
Python
|
array/Ratndeep/one diff in adj of number.py
|
ayroti-18/Competitive-Programming
|
d1ea08a91c63e54f0bba2365e56f98db71eb9054
|
[
"MIT"
] | 3
|
2020-12-20T10:23:11.000Z
|
2021-06-16T10:34:18.000Z
|
array/Ratndeep/one diff in adj of number.py
|
Spring-dot/Competitive-Programming
|
98add277a8b029710c749d1082de25c524e12408
|
[
"MIT"
] | null | null | null |
array/Ratndeep/one diff in adj of number.py
|
Spring-dot/Competitive-Programming
|
98add277a8b029710c749d1082de25c524e12408
|
[
"MIT"
] | null | null | null |
n = int(input())
if n<=10:
for i in range(1,n):
print(i,end=" ")
print()
else:
for i in range(1,11):
print(i,end=" ")
for i in range(11,n):
cur_num = str(i)
x=0
flag=0
while x<len(cur_num)-1:
if abs(int(cur_num[x])-int(cur_num[x+1]))!=1:
flag=1
break
x+=1
if flag==0:
print(i,end=" ")
print()
| 20.809524
| 57
| 0.409611
|
07bc63b277a76568ef34f303f549ec4d0d12aa9d
| 33,154
|
py
|
Python
|
lib-python/2.4.1/test/test_re.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
lib-python/2.4.1/test/test_re.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
lib-python/2.4.1/test/test_re.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
import sys
sys.path = ['.'] + sys.path
from test.test_support import verbose, run_unittest
import re
from sre import Scanner
import sys, os, traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefuly modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
def test_getattr(self):
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
u"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
u"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", u"\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
def test_bigcharset(self):
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222").group(1), u"\u2222")
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222", re.UNICODE).group(1), u"\u2222")
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def test_re_escape(self):
p=""
for i in range(0, 256):
p = p + chr(i)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)) is not None,
True)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def test_pickling(self):
import pickle
self.pickle_test(pickle)
import cPickle
self.pickle_test(cPickle)
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"\%03o" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\%03o0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\%03o8" % i, chr(i)+"8"), None)
self.assertNotEqual(re.match(r"\x%02x" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\x%02x0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\x%02xz" % i, chr(i)+"z"), None)
self.assertRaises(re.error, re.match, "\911", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"[\%03o]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o8]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02xz]" % i, chr(i)), None)
self.assertRaises(re.error, re.match, "[\911]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat=u"["+re.escape(u"\u2039")+u"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
def s_float(scanner, token): return float(token)
def s_int(scanner, token): return int(token)
scanner = Scanner([
(r"[a-zA-Z_]\w*", s_ident),
(r"\d+\.\d*", s_float),
(r"\d+", s_int),
(r"=|\+|-|\*|/", s_operator),
(r"\s+", None),
])
self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
(['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
try:
unicode
except NameError:
return # no problem if we have no unicode
class my_unicode(unicode): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
def test_bug_926075(self):
try:
unicode
except NameError:
return # no problem if we have no unicode
self.assert_(re.compile('bug_926075') is not
re.compile(eval("u'bug_926075'")))
def test_bug_931848(self):
try:
unicode
except NameError:
pass
pattern = eval('u"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(iter.next().span(), (1,2))
self.assertRaises(StopIteration, iter.next)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(iter.next().span(), (0, 4))
self.assertEqual(iter.next().span(), (4, 4))
self.assertRaises(StopIteration, iter.next)
def run_re_tests():
from test.re_tests import benchmarks, tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print 'Running re_tests test suite'
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError, ('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print '=== Syntax error:', t
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print '*** Unexpected error ***', t
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error, msg:
print '=== Unexpected exception', t, repr(msg)
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print '=== Succeeded incorrectly', t
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print '=== grouping error', t,
print repr(repl) + ' should be ' + repr(expected)
else:
print '=== Failed incorrectly', t
# Try the match on a unicode string, and check that it
# still succeeds.
try:
result = obj.search(unicode(s, "latin-1"))
if result is None:
print '=== Fails on unicode match', t
except NameError:
continue # 1.5.2
except TypeError:
continue # unicode test case
# Try the match on a unicode pattern, and check that it
# still succeeds.
obj=re.compile(unicode(pattern, "latin-1"))
result = obj.search(s)
if result is None:
print '=== Fails on unicode pattern match', t
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print '=== Failed on range-limited match', t
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print '=== Fails on case-insensitive match', t
# Try the match with LOCALE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print '=== Fails on locale-sensitive match', t
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print '=== Fails on unicode-sensitive match', t
def test_main():
run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
| 46.304469
| 85
| 0.482476
|
0cafd008a0491de1970c2c7b7348896c1ce8f684
| 721
|
py
|
Python
|
OR_web_GUI/migrations/0003_auto_20190211_0848.py
|
AntaeusNar/Orange-Relay
|
5a327a531831946f921b81648df500b9c5aab73d
|
[
"MIT"
] | null | null | null |
OR_web_GUI/migrations/0003_auto_20190211_0848.py
|
AntaeusNar/Orange-Relay
|
5a327a531831946f921b81648df500b9c5aab73d
|
[
"MIT"
] | 8
|
2018-11-27T16:12:45.000Z
|
2021-06-10T21:10:58.000Z
|
OR_web_GUI/migrations/0003_auto_20190211_0848.py
|
AntaeusNar/Orange-Relay
|
5a327a531831946f921b81648df500b9c5aab73d
|
[
"MIT"
] | 1
|
2021-02-24T19:13:44.000Z
|
2021-02-24T19:13:44.000Z
|
# Generated by Django 2.1.3 on 2019-02-11 16:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('OR_web_GUI', '0002_output_channel'),
]
operations = [
migrations.AlterField(
model_name='input',
name='last_used',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='output',
name='last_used',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='rule',
name='last_used',
field=models.DateTimeField(auto_now_add=True),
),
]
| 24.862069
| 58
| 0.576976
|
8c600f14a9e2a9d2f485b72e52d488532b59d87c
| 845
|
py
|
Python
|
youtube/baseparser.py
|
Glenpl/open_yt_api
|
aa439bafd25248509ff4301e48c15005abef17a4
|
[
"MIT"
] | 4
|
2016-03-26T11:32:10.000Z
|
2017-05-05T20:13:06.000Z
|
youtube/baseparser.py
|
Glenpl/open_yt_api
|
aa439bafd25248509ff4301e48c15005abef17a4
|
[
"MIT"
] | 11
|
2016-03-25T10:57:29.000Z
|
2017-03-28T15:18:06.000Z
|
youtube/baseparser.py
|
Glenpl/open_yt_api
|
aa439bafd25248509ff4301e48c15005abef17a4
|
[
"MIT"
] | 2
|
2016-04-09T09:40:17.000Z
|
2016-04-14T09:18:18.000Z
|
from bs4 import BeautifulSoup
class BaseParser():
_html_parser = None # beautifulsoup parser
def _initialize_parser(self, html_code):
self._html_parser = BeautifulSoup(html_code, 'html.parser')
def _find_by_class(self, tag, class_name):
return self._html_parser.find(tag, {'class': class_name})
def _remove_non_breaking_spaces(self, string):
return string.replace('\xa0', '')
def _extract_results(self, html_source, class_name, tag):
self._initialize_parser(html_source)
return self._html_parser.find_all(tag, class_=class_name)
class BaseSearchParser(BaseParser):
def parse(self, html_source):
search_results = self._extract_results(html_source, self._tile_class_name, 'div')
return tuple([self._parse_single_result(result) for result in search_results])
| 33.8
| 89
| 0.727811
|
63ff21a096f1de320a457773b004c2f8c7f6d711
| 1,994
|
py
|
Python
|
setup.py
|
Czzzzzzh/RLSchool
|
4147811e8c1bd6437f56c61f1dd148b9db1422b6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Czzzzzzh/RLSchool
|
4147811e8c1bd6437f56c61f1dd148b9db1422b6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Czzzzzzh/RLSchool
|
4147811e8c1bd6437f56c61f1dd148b9db1422b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from setuptools import setup, find_packages
__version__ = '1.0.1'
with io.open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name='rlschool',
version=__version__,
author='parl_dev',
author_email='',
description=('RLSchool: Excellent environments for reinforcement Learning benchmarking'),
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/PaddlePaddle/RLSchool',
license="GPLv3",
packages=[package for package in find_packages()
if package.startswith('rlschool')],
package_data={'rlschool': [
'./liftsim/config.ini',
'./liftsim/environment/animation/resources/*.png',
'./liftsim/environment/mansion/person_generators/mansion_flow.npy',
'./quadrotor/quadcopter.stl',
'./quadrotor/texture.png',
'./quadrotor/config.json']
},
tests_require=['pytest', 'mock'],
include_package_data=True,
install_requires=[
'pyglet==1.5.15; python_version>="3"',
'pyglet==1.4.0; python_version<"3"',
'Pillow>=6.2.2',
'six>=1.12.0',
'numpy>=1.16.4',
'configparser>=3.7.4',
'trimesh>=3.2.39',
'networkx>=2.2',
'colour>=0.1.5',
'scipy>=0.12.0',
'gym==0.18.3',
],
zip_safe=False,
)
| 33.233333
| 93
| 0.657974
|
d761f6b6c51be3e4c8de66d7bc7a636c1a3a9cc1
| 5,873
|
py
|
Python
|
sdk/python/pulumi_aws/efs/get_file_system.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/efs/get_file_system.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/efs/get_file_system.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetFileSystemResult:
"""
A collection of values returned by getFileSystem.
"""
def __init__(__self__, arn=None, creation_token=None, dns_name=None, encrypted=None, file_system_id=None, id=None, kms_key_id=None, lifecycle_policy=None, performance_mode=None, provisioned_throughput_in_mibps=None, tags=None, throughput_mode=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
__self__.arn = arn
"""
Amazon Resource Name of the file system.
"""
if creation_token and not isinstance(creation_token, str):
raise TypeError("Expected argument 'creation_token' to be a str")
__self__.creation_token = creation_token
if dns_name and not isinstance(dns_name, str):
raise TypeError("Expected argument 'dns_name' to be a str")
__self__.dns_name = dns_name
"""
The DNS name for the filesystem per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html).
"""
if encrypted and not isinstance(encrypted, bool):
raise TypeError("Expected argument 'encrypted' to be a bool")
__self__.encrypted = encrypted
"""
Whether EFS is encrypted.
"""
if file_system_id and not isinstance(file_system_id, str):
raise TypeError("Expected argument 'file_system_id' to be a str")
__self__.file_system_id = file_system_id
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
if kms_key_id and not isinstance(kms_key_id, str):
raise TypeError("Expected argument 'kms_key_id' to be a str")
__self__.kms_key_id = kms_key_id
"""
The ARN for the KMS encryption key.
"""
if lifecycle_policy and not isinstance(lifecycle_policy, dict):
raise TypeError("Expected argument 'lifecycle_policy' to be a dict")
__self__.lifecycle_policy = lifecycle_policy
"""
A file system [lifecycle policy](https://docs.aws.amazon.com/efs/latest/ug/API_LifecyclePolicy.html) object.
"""
if performance_mode and not isinstance(performance_mode, str):
raise TypeError("Expected argument 'performance_mode' to be a str")
__self__.performance_mode = performance_mode
"""
The file system performance mode.
"""
if provisioned_throughput_in_mibps and not isinstance(provisioned_throughput_in_mibps, float):
raise TypeError("Expected argument 'provisioned_throughput_in_mibps' to be a float")
__self__.provisioned_throughput_in_mibps = provisioned_throughput_in_mibps
"""
The throughput, measured in MiB/s, that you want to provision for the file system.
* `tags` -A mapping of tags to assign to the file system.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
if throughput_mode and not isinstance(throughput_mode, str):
raise TypeError("Expected argument 'throughput_mode' to be a str")
__self__.throughput_mode = throughput_mode
"""
Throughput mode for the file system.
"""
class AwaitableGetFileSystemResult(GetFileSystemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFileSystemResult(
arn=self.arn,
creation_token=self.creation_token,
dns_name=self.dns_name,
encrypted=self.encrypted,
file_system_id=self.file_system_id,
id=self.id,
kms_key_id=self.kms_key_id,
lifecycle_policy=self.lifecycle_policy,
performance_mode=self.performance_mode,
provisioned_throughput_in_mibps=self.provisioned_throughput_in_mibps,
tags=self.tags,
throughput_mode=self.throughput_mode)
def get_file_system(creation_token=None,file_system_id=None,tags=None,opts=None):
"""
Provides information about an Elastic File System (EFS).
:param str creation_token: Restricts the list to the file system with this creation token.
:param str file_system_id: The ID that identifies the file system (e.g. fs-ccfc0d65).
"""
__args__ = dict()
__args__['creationToken'] = creation_token
__args__['fileSystemId'] = file_system_id
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:efs/getFileSystem:getFileSystem', __args__, opts=opts).value
return AwaitableGetFileSystemResult(
arn=__ret__.get('arn'),
creation_token=__ret__.get('creationToken'),
dns_name=__ret__.get('dnsName'),
encrypted=__ret__.get('encrypted'),
file_system_id=__ret__.get('fileSystemId'),
id=__ret__.get('id'),
kms_key_id=__ret__.get('kmsKeyId'),
lifecycle_policy=__ret__.get('lifecyclePolicy'),
performance_mode=__ret__.get('performanceMode'),
provisioned_throughput_in_mibps=__ret__.get('provisionedThroughputInMibps'),
tags=__ret__.get('tags'),
throughput_mode=__ret__.get('throughputMode'))
| 43.503704
| 253
| 0.670186
|
3f8afd40c60c4b67813308b52070a08ac3838310
| 339
|
py
|
Python
|
info/migrations/0011_auto_20181111_2017.py
|
tarpalantiri/institute-erp-system
|
d024ef31948d456f3772ce1f4dfb2bcfa2e33c61
|
[
"MIT"
] | 1
|
2021-06-21T16:11:56.000Z
|
2021-06-21T16:11:56.000Z
|
info/migrations/0011_auto_20181111_2017.py
|
tarpalantiri/institute-erp-system
|
d024ef31948d456f3772ce1f4dfb2bcfa2e33c61
|
[
"MIT"
] | null | null | null |
info/migrations/0011_auto_20181111_2017.py
|
tarpalantiri/institute-erp-system
|
d024ef31948d456f3772ce1f4dfb2bcfa2e33c61
|
[
"MIT"
] | 1
|
2020-11-18T13:31:09.000Z
|
2020-11-18T13:31:09.000Z
|
# Generated by Django 2.1.2 on 2018-11-11 14:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0010_auto_20181111_1218'),
]
operations = [
migrations.AlterUniqueTogether(
name='assigntime',
unique_together=set(),
),
]
| 18.833333
| 47
| 0.60472
|
69832370e1532ca357459abf99fbf841b4694e2a
| 19,334
|
py
|
Python
|
parseyak/jetyak.py
|
vpreston/jetyak-parsing
|
2f2ac14a7030962f15aae9272641f623e8ab5412
|
[
"MIT"
] | null | null | null |
parseyak/jetyak.py
|
vpreston/jetyak-parsing
|
2f2ac14a7030962f15aae9272641f623e8ab5412
|
[
"MIT"
] | null | null | null |
parseyak/jetyak.py
|
vpreston/jetyak-parsing
|
2f2ac14a7030962f15aae9272641f623e8ab5412
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Creates JetYak Class which contains various sensors, and allows for operations between sensors.
Maintainer: vpreston-at-{whoi, mit}-dot-edu
'''
import parseyak.sensors
import pandas as pd
import numpy as np
import copy
import utm
from gasex import sol
import time
class JetYak(object):
''' Class which allows for sensor interfaces and basic queries to be made about a jetyak mission '''
def __init__(self, bounds=None, trim_vals=None, args=None):
''' A list of filepaths for the sensors deployed on the mission provided '''
self.ctd = None
self.suna = None
self.optode = None
self.mini_optode = None
self.sonde = None
self.pixhawk = None
self.gga = None
self.airmar = None
self.phone_gps = None
self.bottle_samples = None
self.mission = []
self.bounds = bounds
self.trim_vals = trim_vals
self.sensors = []
self.sensor_names = []
if args is not None:
self.offset = args[0]
if len(args) > 1:
self.gga_offset = args[1]
def attach_sensor(self, sensor, dirs):
''' Method to add sensors for parsing and cleaning on the Jetyak '''
if 'ctd' in sensor:
print('Attaching CTD')
self.ctd = sensors.CTD(dirs, self.bounds, self.trim_vals)
self.ctd.clean_ctd()
self.sensors.append(self.ctd)
self.sensor_names.append('ctd')
elif 'gga' in sensor:
print('Attaching GGA')
self.gga = sensors.GGA(dirs, self.bounds, self.trim_vals)
self.gga.set_characteristics(offset=self.gga_offset)
self.gga.clean_gga()
self.sensors.append(self.gga)
self.sensor_names.append('gga')
elif 'airmar' in sensor:
print('Attaching AirMar')
self.airmar = sensors.AirMar(dirs, self.bounds, self.trim_vals)
self.airmar.clean_airmar()
self.sensors.append(self.airmar)
self.sensor_names.append('airmar')
elif 'pixhawk' in sensor:
print('Attaching Pixhawk')
self.pixhawk = sensors.Pixhawk(dirs, self.bounds, self.trim_vals)
self.pixhawk.clean_pixhawk()
self.sensors.append(self.pixhawk)
self.sensor_names.append('pixhawk')
elif 'mini_optode' in sensor:
print('Attaching Mini_Optode')
self.mini_optode = sensors.MiniOptode(dirs, self.bounds, self.trim_vals)
self.mini_optode.set_characteristics(offset=self.offset)
self.mini_optode.clean_mini_optode()
self.sensors.append(self.mini_optode)
self.sensor_names.append('mini_optode')
elif 'optode' in sensor:
print('Attaching Optode')
self.optode = sensors.Optode(dirs, self.bounds, self.trim_vals)
self.optode.set_characteristics(offset=self.offset)
self.optode.clean_optode()
self.sensors.append(self.optode)
self.sensor_names.append('optode')
elif 'phone_gps' in sensor:
print('Attaching Phone GPS')
self.phone_gps = sensors.PhoneGPS(dirs, self.bounds, self.trim_vals)
# self.optode.set_characteristics(offset=self.offset)
self.phone_gps.clean_phone_gps()
self.sensors.append(self.phone_gps)
self.sensor_names.append('phone_gps')
elif 'sonde' in sensor:
print('Attaching Sonde')
self.sonde = sensors.Sonde(dirs, self.bounds, self.trim_vals)
self.sonde.clean_sonde()
self.sensors.append(self.sonde)
self.sensor_names.append('sonde')
else:
print('Only supporting CTD, GGA, Optode, Mini-Optode, Airmar, and Phone GPS inputs \
at this time.')
def create_mission(self, args):
'''Method to combine (geo associate and time associate) all valid sensor signals.
Args is a dictionary of values which may be useful in processing some mission data
geoframe (string): which sensor to use for lat, lon
geolabels (tuple strings): labels to use for the chosen geoframe'''
# get the index of the geoframe for interpolating
ind = self.sensor_names.index(args['geoframe'])
# kill duplicate timestamps if they exist
df = []
for s in self.sensors:
temp = s.get_df().drop_duplicates(subset='Julian_Date', keep='last').set_index('Julian_Date')
df.append(temp)
# create meta dataframe and perform interpolation on geoframe
all_temp = pd.concat(df, axis=1, keys=self.sensor_names)
inter_temp = all_temp.interpolate()
df_index = df[ind].index
self.mission.append(inter_temp.loc[df_index])
def save_mission(self, save_path, mission_name):
'''Method to save sensors and mission files'''
# save sensors first
for n, s in zip(self.sensor_names, self.sensors):
s.get_df().to_csv(save_path+n+'.csv')
# save the mission
if len(self.mission) > 0:
for i, m in enumerate(self.mission):
# m = m.dropna(axis=1)
if 'trimmed' in mission_name:
m.to_csv(save_path+mission_name+'_'+str(i)+'.csv')
else:
m.to_csv(save_path+mission_name)
def load_mission(self, mission_path, header=0, simplify_mission=True, meth_eff=0.03, carb_eff=0.70):
'''Method to load previously cleaned and collated mission into working memory
simplify_mission is a boolean flag for whether to store the loaded mission in a
smaller working memory format'''
# mission is a list of dataframes
self.mission = []
for path in mission_path:
temp = pd.read_table(path, delimiter=',', header=header)
if not simplify_mission:
self.mission.append(temp)
else:
self.mission.append(strip_mission(temp, meth_eff=meth_eff, carb_eff=carb_eff))
def add_bottle_samples(self, file_path):
''' Method to add bottle samples taken in parallel to the jetyak mission '''
self.bottle_samples = clean_samples(file_path)
def collapse_bottle_samples(self):
'''Method to collapse the bottle sample frame to kill triplicates'''
df = self.bottle_samples
cstats = []
cdepth = []
clat = []
clon = []
cch4_mean = []
cch4_std = []
cco2_mean = []
cco2_std = []
cmonth = []
cday = []
unique_stations = np.unique(df['station'].values)
for st_id in unique_stations:
#get the station
temp = df[df['station'] == st_id]
depth_id = np.unique(temp['depth'].values)
for d in depth_id:
extracted = temp[temp['depth'] == d]
cstats.append(st_id)
cdepth.append(d)
clat.append(extracted['lat'].values[0])
clon.append(extracted['lon'].values[0])
cch4_mean.append(np.mean(extracted['[CH4] nM'].values))
cch4_std.append(np.std(extracted['[CH4] nM'].values))
cco2_mean.append(np.mean(extracted['pCO2'].values))
cco2_std.append(np.std(extracted['pCO2'].values))
cmonth.append(extracted['month'].values[0])
cday.append(extracted['day'].values[0])
collapsed_df = pd.DataFrame()
collapsed_df.loc[:, 'station'] = cstats
collapsed_df.loc[:, 'depth'] = cdepth
collapsed_df.loc[:, 'lat'] = clat
collapsed_df.loc[:, 'lon'] = clon
collapsed_df.loc[:, 'ch4_mean'] = cch4_mean
collapsed_df.loc[:, 'ch4_std'] = cch4_std
collapsed_df.loc[:, 'co2_mean'] = cco2_mean
collapsed_df.loc[:, 'co2_std'] = cco2_std
collapsed_df.loc[:, 'month'] = cmonth
collapsed_df.loc[:, 'day'] = cday
return collapsed_df
def match_bottles(self, cdf, geo_epsilon=10.0, depth_epsilon=0.1):
'''Method to match the collapsed bottle samples to JetYak observations'''
match_df = copy.copy(cdf)
#there should be no duplicates, so let's just run through the dataframe
jch4_mean = []
jch4_std = []
jco2_mean = []
jco2_std = []
jsal_mean = []
jsal_std = []
jtemp_mean = []
jtemp_std = []
missions = []
for m in self.mission:
m.loc[:, 'utmlat'] = m.apply(lambda x: convert_to_utm((x['Latitude'], x['Longitude']))[0], axis=1)
m.loc[:, 'utmlon'] = m.apply(lambda x: convert_to_utm((x['Latitude'], x['Longitude']))[1], axis=1)
missions.append(m)
for i in range(len(cdf.index)):
entry = cdf.iloc[i]
day = entry['day']
if day == 28:
m = missions[0]
elif day == 29:
m = missions[1]
elif day == 30:
m = missions[2]
elif day == 1:
m = missions[3]
elif day == 2:
m = missions[4]
else:
m = None
if entry['depth'] == 0.75:
d = entry['depth']-0.15
de = 0.36
elif entry['depth'] > 0.75:
d = entry['depth']
de = 0.36
else:
d = entry['depth']
de = depth_epsilon
if m is None:
jch4_mean.append(None)
jch4_std.append(None)
jco2_mean.append(None)
jco2_std.append(None)
jsal_mean.append(None)
jsal_std.append(None)
jtemp_mean.append(None)
jtemp_std.append(None)
else:
entry_lat, entry_lon = convert_to_utm((entry['lat'], entry['lon']))
temp = m[(m['Depth'] <= d + de) & (m['Depth'] >= d-de)]
temp = temp[(((temp['utmlat']-entry_lat)**2 + (temp['utmlon']-entry_lon)**2) <= geo_epsilon**2)]
# temp = temp[(temp['Latitude']-entry['lat'])**2 + (temp['Longitude']-entry['lon'])**2 <= geo_epsilon**2]
jch4_mean.append(np.mean(temp['CH4_nM'].values))
jch4_std.append(np.std(temp['CH4_nM'].values))
jco2_mean.append(np.mean(temp['CO2_uatm'].values))
jco2_std.append(np.std(temp['CO2_uatm'].values))
jsal_mean.append(np.mean(temp['Salinity'].values))
jsal_std.append(np.std(temp['Salinity'].values))
jtemp_mean.append(np.mean(temp['Temperature'].values))
jtemp_std.append(np.std(temp['Temperature'].values))
match_df.loc[:, 'jch4_mean'] = jch4_mean
match_df.loc[:, 'jch4_std'] = jch4_std
match_df.loc[:, 'jco2_mean'] = jco2_mean
match_df.loc[:, 'jco2_std'] = jco2_std
match_df.loc[:, 'jsal_mean'] = jsal_mean
match_df.loc[:, 'jsal_std'] = jsal_std
match_df.loc[:, 'jtemp_mean'] = jtemp_mean
match_df.loc[:, 'jtemp_std'] = jtemp_std
return match_df
def extract_bottle_locations(self, geo_epsilon=10.0, depth_epsilon=0.1, save_path=None):
'''Method to create dataset which matches jetyak and bottle sample information.
Saves to file is filepath is provided'''
compare = [('station', 'day', 'bottle_ch4_nM', 'bottle_co2_uatm', 'bottle_depth', 'lat', 'lon',
'jy_ch4_ppm', 'jy_ch4_uatm', 'jy_ch4_nm', 'jy_ch4_umolkg',
'jy_ch4_pstd', 'jy_ch4_ustd', 'jy_ch4_nstd', 'jy_ch4_umolstd',
'jy_co2_ppm', 'jy_co2_uatm', 'jy_co2_pstd', 'jy_co2_ustd',
'salinity', 'temperature', 'depth')]
unavg = [('station', 'day', 'bottle_ch4_nM', 'bottle_co2_uatm', 'bottle_depth', 'lat', 'lon',
'jy_ch4_ppm', 'jy_ch4_uatm', 'jy_ch4_nm', 'jy_ch4_umolkg', 'jy_co2_ppm', 'jy_co2_uatm',
'salinity', 'temperature', 'depth')]
for i, day in enumerate([28, 29, 30, 1, 2]):
samples = self.bottle_samples[self.bottle_samples['day'] == day]
print(np.unique(samples['day'].values))
methane = samples['[CH4] nM'].values
co2 = samples['pCO2'].values
lat = samples['lat'].values
lon = -samples['lon'].values
depth = samples['depth'].values
station = samples['station'].values
if day == 2:
print(methane, lat, lon)
jy_df = self.mission[i]
for j in range(0, len(methane)):
jy_df.loc[:, 'Distance'] = jy_df.apply(lambda x: get_distance((lat[j], lon[j]), (x['Latitude'], x['Longitude']), geo_epsilon), axis=1)
if depth[j] == 0.75:
d = depth[j]-0.15
de = 0.36
elif depth[j] > 0.75:
d = depth[j]
de = 0.36
else:
d = depth[j]
de = depth_epsilon
chopped = jy_df[(jy_df['Distance'] == True) &
(jy_df['Depth'] <= d + de) &
(jy_df['Depth'] >= d - de)]
print(len(chopped))
jy_methane = chopped['CH4_ppm'].values
jy_uatm = chopped['CH4_uatm'].values
jy_nm = chopped['CH4_nM'].values
jy_umolkg = chopped['CH4_umolkg'].values
jy_co2 = chopped['CO2_ppm'].values
jy_co2_uatm = chopped['CO2_uatm'].values
jy_salinity = chopped['Salinity'].values
jy_temperature = chopped['Temperature'].values
jy_depth = chopped['Depth'].values
if len(jy_methane) > 1:
compare.append((station[j], day, methane[j], co2[j], depth[j], lat[j], lon[j],
np.mean(jy_methane), np.mean(jy_uatm), np.mean(jy_nm), np.mean(jy_umolkg),
np.std(jy_methane), np.std(jy_uatm), np.std(jy_nm), np.std(jy_umolkg),
np.mean(jy_co2), np.mean(jy_co2_uatm),
np.std(jy_co2), np.std(jy_co2_uatm),
np.mean(jy_salinity), np.mean(jy_temperature), np.mean(jy_depth)))
for k in range(0,len(jy_methane)):
unavg.append((station[j], day, methane[j], co2[j], depth[j], lat[j], lon[j],
jy_methane[k], jy_uatm[k], jy_nm[k], jy_umolkg[k],
jy_co2[k], jy_co2_uatm[k],
jy_salinity[k], jy_temperature[k], jy_depth[k]))
if save_path is not None:
np.savetxt(save_path+'bottle_averaged.csv', [x for x in compare], fmt='%s')
np.savetxt(save_path+'bottle.csv', [x for x in unavg], fmt='%s')
return compare[1:], unavg[1:]
def clean_samples(filepath):
'''
Reads in sample data from filepath and creates data structure
'''
samples_df = pd.read_table(filepath, delimiter=',', header=0)
samples_df['lat'] = pd.to_numeric(samples_df['lat'], errors='coerce')
samples_df['lon'] = -pd.to_numeric(samples_df['lon'], errors='coerce')
# samples_df = samples_df.dropna()
return samples_df
def strip_mission(df, geo_frame='airmar', geo_labels=('lon_mod', 'lat_mod'), meth_eff=0.03, carb_eff=0.70):
''' Creates simple frame of the relevant data of interest '''
print(meth_eff)
new_frame = pd.DataFrame()
new_frame.loc[:, 'Year'] = df['ctd']['Year']
new_frame.loc[:, 'Month'] = df['ctd']['Month']
new_frame.loc[:, 'Day'] = df['ctd']['Day']
new_frame.loc[:, 'Hour'] = df['ctd']['Hour']
new_frame.loc[:, 'Minute'] = df['ctd']['Minute']
new_frame.loc[:, 'Second'] = df['ctd']['Second']
new_frame.loc[:, 'CO2_ppm'] = df['gga']['CO2_ppm']
new_frame.loc[:, 'CO2_uatm'] = df.apply(lambda x: apply_efficiency(x['gga']['CO2_ppm'], eff=carb_eff, gppm=411.), axis=1)
new_frame.loc[:, 'CH4_ppm'] = df['gga']['CH4_ppm']
new_frame.loc[:, 'CH4_uatm'] = df.apply(lambda x: apply_efficiency(x['gga']['CH4_ppm'], eff=meth_eff), axis=1)
new_frame.loc[:, 'CH4_nM'] = df.apply(lambda x: determine_methane(apply_efficiency(x['gga']['CH4_ppm'], eff=meth_eff)*1e-6,
x['ctd']['Salinity'],
x['ctd']['Temperature'])*1e6, axis=1)
# new_frame.loc[:, 'CH4_umolkg'] = df.apply(lambda x: determine_methane(apply_efficiency(x['gga']['CH4_ppm'], eff=meth_eff)*1e-6,
# x['ctd']['Salinity'],
# x['ctd']['Temperature'],
# units='umolkg')*1e9, axis=1)
new_frame.loc[:, 'O2Concentration'] = df['optode']['O2Concentration']
new_frame.loc[:, 'Longitude'] = df[geo_frame][geo_labels[0]]
new_frame.loc[:, 'Latitude'] = df[geo_frame][geo_labels[1]]
new_frame.loc[:, 'Temperature'] = df['ctd']['Temperature']
new_frame.loc[:, 'Salinity'] = df['ctd']['Salinity']
new_frame.loc[:, 'Depth'] = df['ctd']['Depth']
return new_frame
def determine_methane(fCH4, sal, temp, units='mM'):
''' Interfaces with the gasex library to convert to desired units'''
return sol.sol_SP_pt(sal, temp, gas='CH4', p_dry=fCH4, units=units)
def get_distance(coord1, coord2, limit):
'''Method to get the distance in meters between two points'''
try:
e1, n1, zn1, zl1 = utm.from_latlon(coord1[0], coord1[1])
e2, n2, zn2, zl2 = utm.from_latlon(coord2[0], -coord2[1])
dist = (e1-e2)**2 + (n1-n2)**2
if dist <= limit**2 is True:
pass
return dist <= limit**2
except:
return False
def get_distance_val(coord1, coord2):
'''Method to get the distance in meters between two points'''
try:
e1, n1, zn1, zl1 = utm.from_latlon(coord1[0], coord1[1])
e2, n2, zn2, zl2 = utm.from_latlon(coord2[0], -coord2[1])
dist = (e1-e2)**2 + (n1-n2)**2
return dist
except:
return 10000.
def convert_to_utm(coord):
try:
e1, n1, zn1, zl1 = utm.from_latlon(coord[0], coord[1])
return e1, n1
except:
return 0., 0.
def apply_efficiency(x, eff=0.15, gppm=1.86, peq=495.):
'''Method for applying the extraction efficiency'''
return ((x-gppm)/eff + gppm)*(peq/1000.)
def convert_CH4(x, eff=0.035, peq=495., gppm=1.86):
''' Method to convert the raw ppm measurements from the GGA to compensated
uatm units '''
ui = peq * gppm / 1000.
return (x * peq / 1000. - ui) / eff + ui
def convert_CO2(x, eff=0.70, peq=495., gppm=1.86):
''' Method to convert the raw ppm measurements from the GGA to compensated
uatm units '''
ui = peq * gppm / 1000.
return (x * peq / 1000. - ui) / eff + ui
| 43.643341
| 150
| 0.553274
|
f0a60293b8876d93dc4a258bc965e8cc9a844ddb
| 94
|
py
|
Python
|
tests/__code/main.py
|
wyhaya/lok
|
96f78982d81b63bea146d30a77ff0e65254ec031
|
[
"MIT"
] | 34
|
2019-08-01T08:17:10.000Z
|
2022-03-12T12:15:57.000Z
|
tests/__code/main.py
|
wyhaya/lok
|
96f78982d81b63bea146d30a77ff0e65254ec031
|
[
"MIT"
] | 1
|
2019-08-06T04:50:49.000Z
|
2019-08-06T04:50:49.000Z
|
tests/__code/main.py
|
wyhaya/rots
|
c2b9a1154bcc948f30a631707a16aea2b05b32e1
|
[
"MIT"
] | 2
|
2019-08-06T18:47:32.000Z
|
2019-08-07T12:26:38.000Z
|
#!/usr/bin/python
"""
"""
print "0"
# comment
'''
hello world
'''
print "0"
# comment
| 4.947368
| 17
| 0.510638
|
dc49561903f56bb43f1e9d4a4eb01db30e254084
| 1,120
|
py
|
Python
|
ex47/skeleton/tests/ex47_tests.py
|
bardia52/Learn.Python.The.Hard.Way
|
6a7dfece2017acb5b47a9aeb8b33b4dad725fbd5
|
[
"MIT"
] | null | null | null |
ex47/skeleton/tests/ex47_tests.py
|
bardia52/Learn.Python.The.Hard.Way
|
6a7dfece2017acb5b47a9aeb8b33b4dad725fbd5
|
[
"MIT"
] | null | null | null |
ex47/skeleton/tests/ex47_tests.py
|
bardia52/Learn.Python.The.Hard.Way
|
6a7dfece2017acb5b47a9aeb8b33b4dad725fbd5
|
[
"MIT"
] | null | null | null |
from nose.tools import *
from nose.tools import *
from ex47.game import Room
def test_room():
gold = Room("GoldRoom",
""" This room has gold in it you can grab. There's a \
door to the north."""
)
assert_equal(gold.name, "GoldRoom")
assert_equal(gold.paths, {})
def test_room_paths():
center = Room("Center", "Test room in the center.")
north = Room("North", "Test room in the north.")
south = Room("South", "Test room in the south.")
center.add_paths({'north': north, 'south': south})
assert_equal(center.go('north'), north)
assert_equal(center.go('south'), south)
def test_map():
start = Room("Start", "You can go west and down a hole.")
west = Room("Trees", "There are trees here, you can go east.")
down = Room("Dungeon", "It's dark down here, you can go up.")
start.add_paths({'west': west, 'down': down})
west.add_paths({'east': start})
down.add_paths({'up': start})
assert_equal(start.go('west'), west)
assert_equal(start.go('west').go('east'), start)
assert_equal(start.go('down').go('up'), start)
| 32.941176
| 70
| 0.620536
|
f1357d3352c47ce0b2715d8c95f52d043411d0c5
| 421
|
py
|
Python
|
get_embeddings.py
|
harrisonjansma/face_recognition
|
744b8b524077c1fa0ece44c910e7a1fca618f9a9
|
[
"MIT"
] | null | null | null |
get_embeddings.py
|
harrisonjansma/face_recognition
|
744b8b524077c1fa0ece44c910e7a1fca618f9a9
|
[
"MIT"
] | null | null | null |
get_embeddings.py
|
harrisonjansma/face_recognition
|
744b8b524077c1fa0ece44c910e7a1fca618f9a9
|
[
"MIT"
] | null | null | null |
import face_recognition
import os
import csv
path = os.path.dirname(os.path.realpath(__file__))
files = os.listdir(os.path.join(path,'img'))
embs = []
for f in files:
img = face_recognition.load_image_file(os.path.join(path,img,f))
embs.append(face_recognition.face_encodings(img)[0])
with open("taco.txt", 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(embs)
| 26.3125
| 68
| 0.714964
|
c12b9224f753895eee05344a419994750cd5a59f
| 112
|
py
|
Python
|
app/backend/mails/urls.py
|
NicholasCF/meridien
|
dd00caf341d4c9979b89dc8441224ff2b97eac7f
|
[
"MIT"
] | null | null | null |
app/backend/mails/urls.py
|
NicholasCF/meridien
|
dd00caf341d4c9979b89dc8441224ff2b97eac7f
|
[
"MIT"
] | 41
|
2020-05-24T06:47:53.000Z
|
2022-02-27T11:10:41.000Z
|
app/backend/mails/urls.py
|
NicholasCF/meridien
|
dd00caf341d4c9979b89dc8441224ff2b97eac7f
|
[
"MIT"
] | 2
|
2020-11-26T12:19:30.000Z
|
2020-12-19T01:14:02.000Z
|
from django.urls import path
from mails import views
urlpatterns = [
path('send_html_email', views.mail)
]
| 16
| 39
| 0.741071
|
fe2af0334254f9ed2bb321e1598b8f5d959118b8
| 1,888
|
py
|
Python
|
nanome/api/ui/io/menu_io.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 3
|
2020-07-02T13:08:27.000Z
|
2021-11-24T14:32:53.000Z
|
nanome/api/ui/io/menu_io.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 11
|
2020-09-14T17:01:47.000Z
|
2022-02-18T04:00:52.000Z
|
nanome/api/ui/io/menu_io.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 5
|
2020-08-12T16:30:03.000Z
|
2021-12-06T18:04:23.000Z
|
import json
from nanome.util import Logs
from nanome._internal._ui._io._json_helper import _JsonHelper
from nanome._internal._ui._io import _menu_json
from nanome._internal import _Addon
class MenuIO(_Addon):
def __init__(self, base_object=None):
_Addon.__init__(self, base_object)
def to_json(self, path):
"""
| Serializes this instance's base_object to the json file specified by path.
:param path: The path to serialize base_object's json representation to
:type path: :class:`str`
"""
helper = _JsonHelper()
_menu_json.write_json(helper, self.base_object)
menu_string = json.dumps(helper.get_dict())
try:
with open(path, "w") as f:
f.write(menu_string)
except:
Logs.error("Could not write to file: " + path)
raise
def from_json(self, path):
"""
| Parses a Menu json file and returns a Menu.
:param path: The path to the Menu json to parse
:type path: :class:`str`
"""
try:
with open(path, "r") as f:
menu_string = f.read()
menu_json = json.loads(menu_string)
except:
Logs.error("Could not read json file: " + path)
raise
try:
json_helper = _JsonHelper(menu_json)
return _menu_json.parse_json(json_helper)
except:
Logs.error("Json does not correctly represent a menu.")
raise
def update_json(self, path):
"""
| Updates a menu written for an old version of the library.
| Call once before reading and run once. Then you can remove the call.
:param path: path to the menu you wish to update.
:type path: :class:`str`
"""
menu = self.from_json(path)
menu.io.to_json(path)
| 31.466667
| 84
| 0.590042
|
21636b0883c6e91e3ff9f5b0f8a79b62ff1f224a
| 308
|
py
|
Python
|
Hackerrank/Practice/Python/6.itertools/54.Maximize It!.py
|
kushagra1212/Competitive-Programming
|
5b68774c617d6abdf1b29893b1b13d47f62161e8
|
[
"MIT"
] | 994
|
2017-02-28T06:13:47.000Z
|
2022-03-31T10:49:00.000Z
|
Hackerrank_python/6.itertools/54.Maximize It!.py
|
devesh17m/Competitive-Programming
|
2d459dc8dc5ac628d94700b739988b0ea364cb71
|
[
"MIT"
] | 16
|
2018-01-01T02:59:55.000Z
|
2021-11-22T12:49:16.000Z
|
Hackerrank_python/6.itertools/54.Maximize It!.py
|
devesh17m/Competitive-Programming
|
2d459dc8dc5ac628d94700b739988b0ea364cb71
|
[
"MIT"
] | 325
|
2017-06-15T03:32:43.000Z
|
2022-03-28T22:43:42.000Z
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import*
m,n=map(int,input().split())
l1=list()
for i in range(m):
l=list(map(int,input().split()))[1:]
l1.append(l)
final_result=max(map(lambda x:sum(i*i for i in x)%n,product(*l1)))
print(final_result)
| 30.8
| 70
| 0.662338
|
6224953197619ecd1c8c65f589bfa915dfb46509
| 850
|
py
|
Python
|
attr_dict/__init__.py
|
miigotu/Attr-Dict
|
e0c876a682a268194f940d65c3e7f4bd30cb2ad5
|
[
"MIT"
] | 1
|
2022-03-01T12:24:30.000Z
|
2022-03-01T12:24:30.000Z
|
attr_dict/__init__.py
|
miigotu/Attr-Dict
|
e0c876a682a268194f940d65c3e7f4bd30cb2ad5
|
[
"MIT"
] | null | null | null |
attr_dict/__init__.py
|
miigotu/Attr-Dict
|
e0c876a682a268194f940d65c3e7f4bd30cb2ad5
|
[
"MIT"
] | 1
|
2022-03-25T04:33:44.000Z
|
2022-03-25T04:33:44.000Z
|
"""Attr-Dict
Yet another Attribute Dict implementation !
Python module that provides a dictionary with attribute access to keys.
It is especially convenient when working with deeply nested data.
"""
from attr_dict.attrdict import AttrDict, LazyAttrDict, RestrictedAttrDict
from attr_dict.index import AttrIndex, LazyIndex, RestrictedIndex
name = "Attr-Dict"
__all__ = [
'AttrDict',
'LazyAttrDict',
'RestrictedAttrDict',
'AttrIndex',
'LazyIndex',
'RestrictedIndex',
]
__version__ = "1.0.0"
__version_info__ = (1, 0, 0)
__build__ = "20190403"
__title__ = "Attr-Dict"
__summary__ = "Yet another Attribute Dict implementation !"
__uri__ = "https://github.com/LuckyDams/Attr-Dict"
__author__ = "LuckyDams"
__email__ = "LuckyDams@gmail.org"
__license__ = "MIT License"
__copyright__ = "Copyright 2019 {}".format(__author__)
| 24.285714
| 73
| 0.744706
|
ff9fc022dd135eab77041ebc3c7e96204ec7d501
| 2,917
|
py
|
Python
|
usaspending_api/awards/migrations/0013_auto_20160927_1154.py
|
toolness/usaspending-api
|
ed9a396e20a52749f01f43494763903cc371f9c2
|
[
"CC0-1.0"
] | 1
|
2021-06-17T05:09:00.000Z
|
2021-06-17T05:09:00.000Z
|
usaspending_api/awards/migrations/0013_auto_20160927_1154.py
|
toolness/usaspending-api
|
ed9a396e20a52749f01f43494763903cc371f9c2
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/awards/migrations/0013_auto_20160927_1154.py
|
toolness/usaspending-api
|
ed9a396e20a52749f01f43494763903cc371f9c2
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-27 15:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0012_auto_20160926_1430'),
]
operations = [
migrations.RemoveField(
model_name='financialaccountsbyawards',
name='create_user_id',
),
migrations.RemoveField(
model_name='financialaccountsbyawards',
name='update_user_id',
),
migrations.RemoveField(
model_name='financialaccountsbyawardstransactionobligations',
name='create_user_id',
),
migrations.RemoveField(
model_name='financialaccountsbyawardstransactionobligations',
name='update_user_id',
),
migrations.RemoveField(
model_name='financialassistanceaward',
name='create_user_id',
),
migrations.RemoveField(
model_name='financialassistanceaward',
name='update_user_id',
),
migrations.RemoveField(
model_name='procurement',
name='create_user_id',
),
migrations.RemoveField(
model_name='procurement',
name='update_user_id',
),
migrations.AlterField(
model_name='financialaccountsbyawards',
name='create_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='financialaccountsbyawards',
name='update_date',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='financialaccountsbyawardstransactionobligations',
name='create_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='financialaccountsbyawardstransactionobligations',
name='update_date',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='financialassistanceaward',
name='create_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='financialassistanceaward',
name='update_date',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='procurement',
name='create_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='procurement',
name='update_date',
field=models.DateTimeField(auto_now=True, null=True),
),
]
| 33.147727
| 73
| 0.598903
|
2e90f110703ef91548133b9f28eac1e7e611f60e
| 2,330
|
py
|
Python
|
zinnia/markups.py
|
rowanv/django-blog-zinnia
|
3b9f63d8a727373216d4bb53cca481f35248f173
|
[
"BSD-3-Clause"
] | null | null | null |
zinnia/markups.py
|
rowanv/django-blog-zinnia
|
3b9f63d8a727373216d4bb53cca481f35248f173
|
[
"BSD-3-Clause"
] | 9
|
2017-05-09T02:00:31.000Z
|
2017-06-12T11:08:26.000Z
|
zinnia_blog_example/zinnia/markups.py
|
17-1-SKKU-OSS/116A
|
ef840eba40e62da1014685931c17d49372152a08
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Set of" markup" function to transform plain text into HTML for Zinnia.
Code originally provided by django.contrib.markups
"""
import warnings
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
from django.utils.html import linebreaks
from zinnia.settings import MARKDOWN_EXTENSIONS
from zinnia.settings import MARKUP_LANGUAGE
from zinnia.settings import RESTRUCTUREDTEXT_SETTINGS
def textile(value):
"""
Textile processing.
"""
try:
import textile
except ImportError:
warnings.warn("The Python textile library isn't installed.",
RuntimeWarning)
return value
return textile.textile(force_text(value),
encoding='utf-8', output='utf-8')
def markdown(value, extensions=MARKDOWN_EXTENSIONS):
"""
Markdown processing with optionally using various extensions
that python-markdown supports.
`extensions` is an iterable of either markdown.Extension instances
or extension paths.
"""
try:
import markdown
except ImportError:
warnings.warn("The Python markdown library isn't installed.",
RuntimeWarning)
return value
return markdown.markdown(force_text(value), extensions=extensions)
def restructuredtext(value, settings=RESTRUCTUREDTEXT_SETTINGS):
"""
RestructuredText processing with optionnally custom settings.
"""
try:
from docutils.core import publish_parts
except ImportError:
warnings.warn("The Python docutils library isn't installed.",
RuntimeWarning)
return value
parts = publish_parts(source=force_bytes(value),
writer_name='html4css1',
settings_overrides=settings)
return force_text(parts['fragment'])
def html_format(value):
"""
Returns the value formatted in HTML,
depends on MARKUP_LANGUAGE setting.
"""
if not value:
return ''
elif MARKUP_LANGUAGE == 'markdown':
return markdown(value)
elif MARKUP_LANGUAGE == 'textile':
return textile(value)
elif MARKUP_LANGUAGE == 'restructuredtext':
return restructuredtext(value)
elif '</p>' not in value:
return linebreaks(value)
return value
| 28.765432
| 70
| 0.672532
|
2899377e84fe75dbff4e9c907e72181c7f7fa98c
| 2,008
|
py
|
Python
|
tests/payment_test.py
|
jhoe123/Elastos.Hive.Node
|
96b0c3c4a6ba29db4a4920a03c7efb9e7a991833
|
[
"MIT"
] | 2
|
2022-01-30T05:24:17.000Z
|
2022-03-29T21:31:21.000Z
|
tests/payment_test.py
|
jhoe123/Elastos.Hive.Node
|
96b0c3c4a6ba29db4a4920a03c7efb9e7a991833
|
[
"MIT"
] | 3
|
2021-11-25T13:38:56.000Z
|
2022-03-16T02:08:39.000Z
|
tests/payment_test.py
|
jhoe123/Elastos.Hive.Node
|
96b0c3c4a6ba29db4a4920a03c7efb9e7a991833
|
[
"MIT"
] | 2
|
2022-02-17T09:14:52.000Z
|
2022-03-01T07:23:50.000Z
|
# -*- coding: utf-8 -*-
"""
Testing file for the payment module.
"""
import unittest
from tests.utils.http_client import HttpClient
from tests import init_test
class PaymentTestCase(unittest.TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
init_test()
self.cli = HttpClient(f'/api/v2/payment')
self.order_id = '60ee8c056fdd17b16bb5b4c2'
self.transaction_id = '280a24034bfb241c31b5a73c792c9d05df2b1f79bb98733c5358aeb909c27010'
@staticmethod
def _subscribe():
HttpClient(f'/api/v2').put('/subscription/vault')
@classmethod
def setUpClass(cls):
cls._subscribe()
def test01_get_version(self):
response = self.cli.get('/version')
self.assertEqual(response.status_code, 200)
self.assertTrue('version' in response.json())
@unittest.skip
def test02_place_order(self):
response = self.cli.put('/order', body={'subscription': 'vault', 'pricing_name': 'Rookie'})
self.assertEqual(response.status_code, 200)
self.assertTrue('order_id' in response.json())
self.order_id = response.json().get('order_id')
@unittest.skip
def test03_pay_order(self):
response = self.cli.post(f'/order/{self.order_id}', body={'transaction_id': self.transaction_id})
self.assertEqual(response.status_code, 201)
self.assertTrue('receipt_id' in response.json())
self.assertTrue('order_id' in response.json())
@unittest.skip
def test04_get_orders(self):
response = self.cli.get('/order?subscription=vault')
self.assertEqual(response.status_code, 200)
self.assertTrue('orders' in response.json())
@unittest.skip
def test05_get_receipt(self):
response = self.cli.get(f'/receipt?order_id={self.order_id}')
self.assertEqual(response.status_code, 200)
self.assertTrue('order_id' in response.json())
self.assertTrue('receipt_id' in response.json())
| 33.466667
| 105
| 0.676793
|
f147f0f6629684b9983bc98acf5dfee673cb9a85
| 265
|
py
|
Python
|
latteys/latteys/doctype/warranty_registration/warranty_registration.py
|
hrgadesha/lattyeys
|
428b752ac99620ac7ad706fd305f07210bdcb315
|
[
"MIT"
] | 1
|
2021-09-10T03:51:22.000Z
|
2021-09-10T03:51:22.000Z
|
latteys/latteys/doctype/warranty_registration/warranty_registration.py
|
hrgadesha/lattyeys
|
428b752ac99620ac7ad706fd305f07210bdcb315
|
[
"MIT"
] | null | null | null |
latteys/latteys/doctype/warranty_registration/warranty_registration.py
|
hrgadesha/lattyeys
|
428b752ac99620ac7ad706fd305f07210bdcb315
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, B2Grow and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class WarrantyRegistration(Document):
pass
| 24.090909
| 49
| 0.781132
|
baf3a0c51d4e4551394603ca3fa933a43010e638
| 10,097
|
py
|
Python
|
sumo/electronic_structure/optics.py
|
mkrack/sumo
|
e885844de07e03bde552f9e38e2230edd73b10da
|
[
"MIT"
] | null | null | null |
sumo/electronic_structure/optics.py
|
mkrack/sumo
|
e885844de07e03bde552f9e38e2230edd73b10da
|
[
"MIT"
] | null | null | null |
sumo/electronic_structure/optics.py
|
mkrack/sumo
|
e885844de07e03bde552f9e38e2230edd73b10da
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright (c) Scanlon Materials Theory Group
# Distributed under the terms of the MIT License.
"""
Module containing functions to process dielectric and optical absorption data.
TODO:
* Remove magic values
"""
import os
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
def broaden_eps(dielectric, sigma):
"""Apply gaussian broadening to the dielectric response.
Args:
dielectric (tuple): The high-frequency dielectric data, following
the same format as
:attr:`pymatgen.io.vasp.outputs.Vasprun.dielectric`.
This is a :obj:`tuple` containing the energy, the real part of the
dielectric tensor, and the imaginary part of the tensor, as a
:obj:`list` of :obj:`floats`. E.g.::
(
[energies],
[[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]],
[[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]]
)
sigma (float): Standard deviation for gaussian broadening.
Returns:
:obj:`tuple` of :obj:`list` of :obj:`list` of :obj:`float`: The
broadened dielectric response. Returned as a tuple containing the
energy, the real part of the dielectric tensor, and the imaginary
part of the tensor. E.g.::
(
[energies],
[[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]],
[[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]]
)
"""
e = dielectric[0]
diff = [e[i + 1] - e[i] for i in range(len(e) - 1)]
diff_avg = sum(diff) / len(diff)
real = [gaussian_filter1d(np.array(dielectric[1])[:, x], sigma / diff_avg)
for x in range(6)]
imag = [gaussian_filter1d(np.array(dielectric[2])[:, x], sigma / diff_avg)
for x in range(6)]
return e, np.array(real).T, np.array(imag).T
def calculate_dielectric_properties(dielectric, properties,
average=True):
r"""Calculate optical properties from the dielectric function
Supported properties:
*Absorption*
The unit of alpha is :math:`\mathrm{cm}^{-1}`.
Refractive index :math:`n` has real and imaginary parts:
.. math::
n = [(e^\prime + ie^{\prime\prime} / e_0]^{1/2}
= n^\prime + in^{\prime\prime}
Relationship between :math:`a` and imaginary :math:`n^{\prime\prime}`:
.. math::
a = 4 \pi n^{\prime\prime} / \lambda
Where:
.. math:: \lambda = hc/E
Args:
dielectric (tuple): The high-frequency dielectric data, following
the same format as :obj:`pymatgen.io.vasp.Vasprun.dielectric`.
This is a :obj:`tuple` containing the energy, the real part of the
dielectric tensor, and the imaginary part of the tensor, as a
:obj:`list` of :obj:`floats`. E.g.::
(
[energies],
[[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]],
[[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]]
)
properties (set):
The set of properties to return. Intermediate properties will be
calculated as needed. Accepted values: 'eps_real', 'eps_im',
'absorption', 'loss', 'n_real', 'n_imag'
average (:obj:`bool`, optional): Average the dielectric response across
the xx, yy, zz directions and calculate properties with scalar
maths. Defaults to ``True``. If False, solve dielectric matrix to
obtain directional properties, returning xx, yy, zz components.
This may be significantly slower!
Returns:
dict of :obj:`tuple` of :obj:`list` of :obj:`float`: The optical
absorption in :math:`\mathrm{cm}^{-1}`. If ``average`` is ``True``, the
data will be returned as::
([energies], [property]).
If ``average`` is ``False``, the data will be returned as::
([energies], [property_xx, property_yy, property_zz]).
In both cases these are collected in a results dictionary with keys
corresponding to the selected properties, e.g.::
{'absorption': ([energies], [absorption]),
'eps_real': ([energies], [eps_real])}
"""
results = {}
def _update_results(keys_vals):
"""Update results dict with selected properties only"""
results.update({prop: (energies, data)
for prop, data in keys_vals.items()
if (prop in properties)})
return results
energies = np.array(dielectric[0])
real_eps = np.array(dielectric[1])
imag_eps = np.array(dielectric[2])
if average:
real_eps = np.average(real_eps[:, :3], axis=1)
imag_eps = np.average(imag_eps[:, :3], axis=1)
results = _update_results({'eps_real': real_eps,
'eps_imag': imag_eps})
eps = real_eps + 1j * imag_eps
if 'loss' in properties:
loss = -np.imag(1/eps)
_update_results({'loss': loss})
if properties.intersection({'n_real', 'n_imag', 'absorption'}):
n = np.sqrt(eps)
_update_results({'n_real': n.real,
'n_imag': n.imag})
if 'absorption' in properties:
alpha = n.imag * energies * 4 * np.pi / 1.23984212E-4
_update_results({'absorption': alpha})
else:
# Work with eps as complex numbers in 9-column 'flattened' matrix
# First interpret 6-column data as symmetric matrix
# Input form xx yy zz xy yz xz
# Indices 0 1 2 3 4 5
n_rows = real_eps.shape[0]
eps = real_eps + 1j * imag_eps
eps = np.array([eps[:, 0], eps[:, 3], eps[:, 5],
eps[:, 3], eps[:, 1], eps[:, 4],
eps[:, 5], eps[:, 4], eps[:, 2]]).T
_update_results(
{'eps_real': eps.real[:, [0, 4, 8]],
'eps_imag': eps.imag[:, [0, 4, 8]]})
# Invert epsilon to obtain energy-loss function
if 'loss' in properties:
def matrix_loss_func(eps_row):
eps_matrix = eps_row.reshape(3, 3)
return -np.linalg.inv(eps_matrix).imag.flatten()
loss = np.array([matrix_loss_func(row) for row in eps])
_update_results({'loss': loss[:, [0, 4, 8]]})
if properties.intersection({'n_real', 'n_imag', 'absorption'}):
def matrix_n(eps_row):
eps_matrix = eps_row.reshape(3, 3)
eigenvals, v = np.linalg.eig(eps_matrix)
d = np.diag(eigenvals)
n = v @ np.sqrt(d) @ np.linalg.inv(v) # Py3.5 matrix mult @ =D
return n.flatten()
n = np.array([matrix_n(row) for row in eps])
_update_results({'n_real': n.real[:, [0, 4, 8]],
'n_imag': n.imag[:, [0, 4, 8]]})
if 'absorption' in properties:
alpha = (n.imag * energies.reshape(n_rows, 1) *
4 * np.pi / 1.23984212E-4)
_update_results({'absorption': alpha[:, [0, 4, 8]]})
return results
def write_files(abs_data, basename='absorption', prefix=None, directory=None):
"""Write the absorption or loss spectra to a file.
Note that this function expects to receive an iterable series of spectra.
Args:
abs_data (tuple): Series (either :obj:`list` or :obj:`tuple`) of
optical absorption or loss spectra. Each spectrum should be
formatted as a :obj:`tuple` of :obj:`list` of :obj:`float`. If the
data has been averaged, each spectrum should be::
([energies], [alpha])
Else, if the data has not been averaged, each spectrum should be::
([energies], [alpha_xx, alpha_yy, alpha_zz]).
prefix (:obj:`str`, optional): Prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
"""
for i, absorption in enumerate(abs_data):
num_txt = '_{}'.format(i + 1) if len(abs_data) > 1 else ''
prefix_txt = '{}_'.format(prefix) if prefix else ''
filename = prefix_txt + basename + num_txt + '.dat'
if directory:
filename = os.path.join(directory, filename)
header = 'energy(eV)'
if len(absorption[1].shape) == 2:
header += ' alpha_xx alpha_yy alpha_zz'
data = np.concatenate((absorption[0][:, None], absorption[1]),
axis=1)
else:
header += ' alpha'
data = np.stack((absorption[0], absorption[1]), axis=1)
np.savetxt(filename, data, header=header)
def kkr(de, eps_imag, cshift=1e-6):
"""Kramers Kronig transformation of imaginary dielectric function
Args:
de (:obj:`float`): Energy difference between evenly-spaced energy
values corresponding to dielectric data
eps_imag (:obj:`list` or :obj:`np.array`): Evenly-spaced sequence of
frequency-dependent 3x3 dielectric matrices (imaginary component
only)
cshift (:obj:`float`, optional): imaginary finite shift used in
integration; this should be small (and results should not be very
sensitive)
Returns:
(:obj:`numpy.array`) Real part of frequency-dependent dielectric function
corresponding to eps_imag. Array shape (NEDOS, 3, 3)
"""
eps_imag = np.array(eps_imag)
nedos = eps_imag.shape[0]
cshift = complex(0, cshift)
w_i = np.arange(0, (nedos - 0.5)*de, de, dtype=np.complex_)
w_i = np.reshape(w_i, (nedos, 1, 1))
def integration_element(w_r):
factor = w_i / (w_i**2 - w_r**2 + cshift)
total = np.sum(eps_imag * factor, axis=0)
return total * (2/np.pi) * de + np.diag([1, 1, 1])
return np.real([integration_element(w_r) for w_r in w_i[:, 0, 0]])
| 36.189964
| 81
| 0.569872
|
ada19347a5f391571c5b917af44c48267133975a
| 3,804
|
py
|
Python
|
code/src/main/python/expt/test_typed.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 5
|
2020-04-05T18:04:13.000Z
|
2021-04-13T20:34:19.000Z
|
code/src/main/python/expt/test_typed.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 1
|
2020-04-29T21:42:26.000Z
|
2020-05-01T23:45:45.000Z
|
code/src/main/python/expt/test_typed.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 3
|
2020-01-27T16:02:14.000Z
|
2021-02-08T13:25:15.000Z
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
from collections import OrderedDict
from misconceptions.rUtils import functions as r_functions, datatypes, generator, dataframer
from misconceptions.pdUtils import functions as pd_functions
from sos.function import Function, Outputs
from analysis.helpers import helper
from expt import test_clustering
r_source_file = '/Users/panzer/Raise/ProgramRepair/CodeSeer/projects/src/main/R/Example/PandasR/r_snippets.R'
pd_source_file = '/Users/panzer/Raise/ProgramRepair/CodeSeer/projects/src/main/python/Example/PandasR/generated_py_d123.py'
ARGS_FILE = '/Users/panzer/Raise/ProgramRepair/CodeSeer/projects/src/main/R/Example/PandasR/arg_example.R'
def get_sample_args(arg_names):
sample_args = {}
env_variables = r_functions.get_env_variables(ARGS_FILE)
if not env_variables:
return sample_args
for arg_name in arg_names:
sample_args[arg_name] = env_variables[arg_name]
return sample_args
def get_generated_args(r_func, n_args):
arg_names = r_functions.get_function_arg_names(r_func)
sample_args = get_sample_args(arg_names)
assert len(arg_names) == len(sample_args)
extracted_dfs = r_functions.extract_col_names(r_func)
sample_variables = OrderedDict()
for name, sample_arg in sample_args.items():
analyzed_sample_variable = datatypes.analyze_sample_variable(sample_arg)
analyzed_sample_variable.name = name
sample_variables[name] = analyzed_sample_variable
for name, df in extracted_dfs.items():
if name not in sample_variables:
continue
for col_name, column in df.columns.items():
if col_name not in sample_variables[name].columns:
sample_variables[name].columns[col_name] = column
generated_args = generator.generate_args_from_sample(sample_variables, n_args)
return generated_args
def compare_functions(r_func_name, pd_func_name):
r_func = r_functions.get_r_function(r_source_file, r_func_name)
pd_func = pd_functions.get_pd_functions(pd_source_file, as_dict=True)[pd_func_name]
generated_args = get_generated_args(r_func, 100)
r_results = r_functions.execute_R_function_on_args(r_func, generated_args)
r_executed = {
"name": r_func_name,
"body": r_functions.get_function_body(r_func),
"inputKey": "DF-KEY",
"outputs": r_results
}
pd_results = pd_functions.execute_pd_function_on_args(pd_func, generated_args)
pd_executed = {
"name": pd_func_name,
"body": helper.get_func_body(pd_func),
"inputKey": "DF-KEY",
"outputs": pd_results
}
r_func = Function(name=r_func_name, input_key=r_executed["inputKey"],
outputs=Outputs(r_executed["outputs"]),
body=test_clustering.get_body(r_executed), source=r_source_file)
pd_func = Function(name=pd_func_name, input_key=pd_executed["inputKey"],
outputs=Outputs(pd_executed["outputs"]),
body=test_clustering.get_body(pd_executed), source=pd_source_file)
print(test_clustering.execution_similarity(r_func, pd_func))
def delta(r_func_name, pd_func_name):
r_func = r_functions.get_r_function(r_source_file, r_func_name)
pd_func = pd_functions.get_pd_functions(pd_source_file, as_dict=True)[pd_func_name]
generated_args = get_generated_args(r_func, 1)
print(generated_args[0])
r_result = r_functions.execute_R_function_on_args(r_func, generated_args)[0]
pd_result = pd_functions.execute_pd_function_on_args(pd_func, generated_args)[0]
difference = dataframer.difference(r_result['return'], pd_result['return'])
print(difference)
if __name__ == "__main__":
# compare_functions("gen_func_r_select3", "func_py_select1")
delta("gen_func_r_select3", "func_py_select1")
# delta("gen_func_r_head", "func_py_head")
| 40.903226
| 123
| 0.773396
|
216595bc6fffa2dd1a666d6718af018b21e96ae5
| 27,456
|
py
|
Python
|
scripts/bootstrap_history.py
|
dfornika/galaxy
|
2dae50a9bdc28b4e7c159b52245351c3a9d79f36
|
[
"CC-BY-3.0"
] | 2
|
2017-03-28T12:11:41.000Z
|
2017-04-22T02:58:25.000Z
|
scripts/bootstrap_history.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | 2
|
2019-04-03T15:37:17.000Z
|
2019-04-03T19:37:09.000Z
|
scripts/bootstrap_history.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | 1
|
2016-06-14T13:21:13.000Z
|
2016-06-14T13:21:13.000Z
|
#!/usr/bin/env python
# Little script to make HISTORY.rst more easy to format properly, lots TODO
# pull message down and embed, use arg parse, handle multiple, etc...
from __future__ import print_function
import calendar
import datetime
import json
import os
import re
import string
import sys
import textwrap
from collections import OrderedDict
try:
import requests
except ImportError:
requests = None
try:
from pygithub3 import Github
except ImportError:
Github = None
from six import string_types
from six.moves.urllib.parse import urljoin
PROJECT_DIRECTORY = os.path.join(os.path.dirname(__file__), os.pardir)
GALAXY_VERSION_FILE = os.path.join(PROJECT_DIRECTORY, "lib", "galaxy", "version.py")
PROJECT_OWNER = "galaxyproject"
PROJECT_NAME = "galaxy"
PROJECT_URL = "https://github.com/%s/%s" % (PROJECT_OWNER, PROJECT_NAME)
PROJECT_API = "https://api.github.com/repos/%s/%s/" % (PROJECT_OWNER, PROJECT_NAME)
RELEASES_PATH = os.path.join(PROJECT_DIRECTORY, "doc", "source", "releases")
RELEASE_DELTA_MONTHS = 4 # Number of months between releases.
# Uncredit pull requestors... kind of arbitrary at this point.
DEVTEAM = [
"afgane", "dannon", "blankenberg",
"davebx", "martenson", "jmchilton",
"tnabtaf", "natefoo", "jgoecks",
"guerler", "jennaj", "nekrut", "jxtx",
"VJalili", "WilliamHolden", "Nerdinacan",
"ic4f",
]
TEMPLATE = """
.. to_doc
${release}
===============================
.. announce_start
Enhancements
-------------------------------
.. major_feature
.. feature
.. enhancement
.. small_enhancement
Fixes
-------------------------------
.. major_bug
.. bug
.. include:: ${release}_prs.rst
"""
ANNOUNCE_TEMPLATE = string.Template("""
===========================================================
${month_name} 20${year} Galaxy Release (v ${release})
===========================================================
.. include:: _header.rst
Highlights
===========================================================
**Feature1**
Feature description.
**Feature2**
Feature description.
**Feature3**
Feature description.
Get Galaxy
==========
The code lives at `GitHub <https://github.com/galaxyproject/galaxy>`__ and you should have `Git <https://git-scm.com/>`__ to obtain it.
To get a new Galaxy repository run:
.. code-block:: shell
$$ git clone -b release_${release} https://github.com/galaxyproject/galaxy.git
To update an existing Galaxy repository run:
.. code-block:: shell
$$ git fetch origin && git checkout release_${release} && git pull --ff-only origin release_${release}
See the `community hub <https://galaxyproject.org/develop/source-code/>`__ for additional details regarding the source code locations.
Release Notes
===========================================================
.. include:: ${release}.rst
:start-after: announce_start
.. include:: _thanks.rst
""")
ANNOUNCE_USER_TEMPLATE = string.Template("""
===========================================================
${month_name} 20${year} Galaxy Release (v ${release})
===========================================================
.. include:: _header.rst
Highlights
===========================================================
**Feature1**
Feature description.
**Feature2**
Feature description.
**Feature3**
Feature description.
New Visualizations
===========================================================
.. visualizations
New Datatypes
===========================================================
.. datatypes
Builtin Tool Updates
===========================================================
.. tools
Release Notes
===========================================================
Please see the `full release notes <${release}_announce.html>`_ for more details.
.. include:: ${release}_prs.rst
.. include:: _thanks.rst
""")
NEXT_TEMPLATE = string.Template("""
===========================================================
${month_name} 20${year} Galaxy Release (v ${version})
===========================================================
Schedule
===========================================================
* Planned Freeze Date: ${freeze_date}
* Planned Release Date: ${release_date}
""")
PRS_TEMPLATE = """
.. github_links
"""
RELEASE_ISSUE_TEMPLATE = string.Template("""
- [X] **Prep**
- [X] ~~Create this release issue ``make release-issue RELEASE_CURR=${version}``.~~
- [X] ~~Set freeze date (${freeze_date}).~~
- [ ] **Branch Release (on or around ${freeze_date})**
- [ ] Ensure all [blocking milestone PRs](https://github.com/galaxyproject/galaxy/pulls?q=is%3Aopen+is%3Apr+milestone%3A${version}) have been merged, delayed, or closed.
make release-check-blocking-prs RELEASE_CURR=${version}
- [ ] Merge the latest release into dev and push upstream.
make release-merge-stable-to-next RELEASE_PREVIOUS=release_${previous_version}
make release-push-dev
- [ ] Create and push release branch:
make release-create-rc RELEASE_CURR=${version} RELEASE_NEXT=${next_version}
- [ ] Open PRs from your fork of branch ``version-${version}`` to upstream ``release_${version}`` and of ``version-${next_version}.dev`` to ``dev``.
- [ ] Update ``next_milestone`` in [P4's configuration](https://github.com/galaxyproject/p4) to `${next_version}` so it properly tags new PRs.
- [ ] Set the ``release_${version}`` branch in GitHub [settings](https://github.com/galaxyproject/galaxy/settings/branches) as protected.
- [ ] **Issue Review Timeline Notes**
- [ ] Ensure any security fixes will be ready prior to ${freeze_date} + 1 week, to allow time for notification prior to release.
- [ ] Ensure ownership of outstanding bugfixes and track progress during freeze.
- [ ] **Deploy and Test Release**
- [ ] Update test.galaxyproject.org to ensure it is running a dev at or past branch point (${freeze_date} + 1 day).
- [ ] Update testtoolshed.g2.bx.psu.edu to ensure it is running a dev at or past branch point (${freeze_date} + 1 day).
- [ ] Deploy to usegalaxy.org (${freeze_date} + 1 week).
- [ ] Deploy to toolshed.g2.bx.psu.edu (${freeze_date} + 1 week).
- [ ] [Update BioBlend CI testing](https://github.com/galaxyproject/bioblend/commit/b74b1c302a1b8fed86786b40d7ecc3520cbadcd3) to include a ``release_${version}`` target: add ``- TOX_ENV=py27 GALAXY_VERSION=release_${version}`` to the ``env`` list in ``.travis.yml`` .
- [ ] **Create Release Notes**
- [ ] Review merged PRs and ensure they all have a milestones attached. [Link](https://github.com/galaxyproject/galaxy/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Amerged+no%3Amilestone+-label%3Amerge+)
- [ ] Checkout release branch
git checkout release_${version} -b ${version}_release_notes
- [ ] Check for obvious missing metadata in release PRs
make release-check-metadata RELEASE_CURR=${version}
- [ ] Bootstrap the release notes
make release-bootstrap-history RELEASE_CURR=${version}
- [ ] Open newly created files and manually curate major topics and release notes.
- [ ] Commit release notes.
git add docs/; git commit -m "Release notes for $version"; git push upstream ${version}_release_notes
- [ ] Open a pull request for new release note branch.
- [ ] Merge release note pull request.
- [ ] **Do Release**
- [ ] Ensure all [blocking milestone issues](https://github.com/galaxyproject/galaxy/issues?q=is%3Aopen+is%3Aissue+milestone%3A${version}) have been resolved.
make release-check-blocking-issues RELEASE_CURR=${version}
- [ ] Ensure all [blocking milestone PRs](https://github.com/galaxyproject/galaxy/pulls?q=is%3Aopen+is%3Apr+milestone%3A${version}) have been merged or closed.
make release-check-blocking-prs RELEASE_CURR=${version}
- [ ] Ensure previous release is merged into current. [GitHub branch comparison](https://github.com/galaxyproject/galaxy/compare/release_${version}...release_${previous_version})
- [ ] Create and push release tag:
make release-create RELEASE_CURR=${version}
- [ ] Add the branch `*/release_{version}` to Jenkins documentation build [configuration matrix](https://jenkins.galaxyproject.org/job/galaxy-sphinx-by-branch/configure).
- [ ] Trigger the [branch documentation build](https://jenkins.galaxyproject.org/job/galaxy-sphinx-by-branch/)
- [ ] Verify that everything is merged from ${version}->master, and then trigger the ['latest' documentation build](https://jenkins.galaxyproject.org/job/latest-Sphinx-Docs/)
- [ ] **Do Docker Release**
- [ ] Change the [dev branch](https://github.com/bgruening/docker-galaxy-stable/tree/dev) of the Galaxy Docker container to ${next_version}
- [ ] Merge dev into master
- [ ] **Ensure Tool Tests use Latest Release**
- [ ] Update GALAXY_RELEASE in https://github.com/galaxyproject/tools-iuc/blob/master/.travis.yml#L6
- [ ] Update GALAXY_RELEASE in https://github.com/galaxyproject/tools-devteam/blob/master/.travis.yml#L6
- [ ] **Announce Release**
- [ ] Verify release included in https://docs.galaxyproject.org/en/master/releases/index.html
- [ ] Review announcement in https://github.com/galaxyproject/galaxy/blob/dev/doc/source/releases/${version}_announce.rst
- [ ] Stage announcement content (Hub, Galaxy Help, etc.) on announce date to capture date tags. Note: all final content does not need to be completed to do this.
- [ ] Create hub *highlights* and post as a new "news" content item. [An example](https://galaxyproject.org/news/2018-9-galaxy-release/).
- [ ] Tweet docs news *highlights* link as @galaxyproject on twitter. [An example](https://twitter.com/galaxyproject/status/973646125633695744).
- [ ] Post *highlights* with tags `news` and `release` to [Galaxy Help](https://help.galaxyproject.org/). [An example](https://help.galaxyproject.org/t/galaxy-release-19-01/712).
- [ ] Email *highlights* to [galaxy-dev](http://dev.list.galaxyproject.org/) and [galaxy-announce](http://announce.list.galaxyproject.org/) @lists.galaxyproject.org. [An example](http://dev.list.galaxyproject.org/The-Galaxy-release-16-04-is-out-tp4669419.html)
- [ ] Adjust http://getgalaxy.org text and links to match current master branch by opening a PR at https://github.com/galaxyproject/galaxy-hub/
- [ ] **Prepare for next release**
- [ ] Close milestone ``${version}`` and ensure milestone ``${next_version}`` exists.
- [ ] Create release issue for next version ``make release-issue RELEASE_CURR=${next_version}``.
- [ ] Schedule committer meeting to discuss re-alignment of priorities.
- [ ] Close this issue.
""")
GROUPPED_TAGS = OrderedDict([
('area/visualizations', 'viz'),
('area/datatypes', 'datatypes'),
('area/tools', 'tools'),
('area/workflows', 'workflows'),
('area/client', 'ui'),
('area/jobs', 'jobs'),
('area/admin', 'admin'),
])
# https://api.github.com/repos/galaxyproject/galaxy/pulls?base=dev&state=closed
# https://api.github.com/repos/galaxyproject/galaxy/pulls?base=release_15.07&state=closed
# https://api.github.com/repos/galaxyproject/galaxy/compare/release_15.05...dev
def print_next_minor_version():
minor_version_str = None
with open(GALAXY_VERSION_FILE) as f:
for line in f:
result = re.match(r'VERSION_MINOR = "(.*)"', line)
if result:
minor_version_str = result.group(1)
break
try:
minor_version = int(minor_version_str)
except (TypeError, ValueError):
minor_version = 0
print(minor_version + 1)
def release_issue(argv):
release_name = argv[2]
previous_release = _previous_release(release_name)
new_version_params = _next_version_params(release_name)
next_version = new_version_params["version"]
freeze_date, release_date = _release_dates(release_name)
release_issue_template_params = dict(
version=release_name,
next_version=next_version,
previous_version=previous_release,
freeze_date=freeze_date,
)
release_issue_contents = RELEASE_ISSUE_TEMPLATE.safe_substitute(**release_issue_template_params)
github = _github_client()
github.issues.create(
data=dict(
title="Publication of Galaxy Release v %s" % release_name,
body=release_issue_contents,
),
user=PROJECT_OWNER,
repo=PROJECT_NAME,
)
return release_issue
def do_release(argv):
release_name = argv[2]
release_file = _release_file(release_name + ".rst")
enhancement_targets = "\n\n".join([".. enhancement_tag_%s" % a for a in GROUPPED_TAGS.values()])
bug_targets = "\n\n".join([".. bug_tag_%s" % a for a in GROUPPED_TAGS.values()])
template = TEMPLATE
template = template.replace(".. enhancement", "%s\n\n.. enhancement" % enhancement_targets)
template = template.replace(".. bug", "%s\n\n.. bug" % bug_targets)
release_info = string.Template(template).safe_substitute(release=release_name)
open(release_file, "w").write(release_info.encode("utf-8"))
month = int(release_name.split(".")[1])
month_name = calendar.month_name[month]
year = release_name.split(".")[0]
announce_info = ANNOUNCE_TEMPLATE.substitute(
month_name=month_name,
year=year,
release=release_name
)
announce_file = _release_file(release_name + "_announce.rst")
_write_file(announce_file, announce_info)
announce_user_info = ANNOUNCE_USER_TEMPLATE.substitute(
month_name=month_name,
year=year,
release=release_name
)
announce_user_file = _release_file(release_name + "_announce_user.rst")
_write_file(announce_user_file, announce_user_info)
prs_file = _release_file(release_name + "_prs.rst")
_write_file(prs_file, PRS_TEMPLATE)
next_version_params = _next_version_params(release_name)
next_version = next_version_params["version"]
next_release_file = _release_file(next_version + "_announce.rst")
next_announce = NEXT_TEMPLATE.substitute(**next_version_params)
open(next_release_file, "w").write(next_announce.encode("utf-8"))
releases_index = _release_file("index.rst")
releases_index_contents = _read_file(releases_index)
releases_index_contents = releases_index_contents.replace(".. announcements\n", ".. announcements\n " + next_version + "_announce\n")
_write_file(releases_index, releases_index_contents)
for pr in _get_prs(release_name):
# 2015-06-29 18:32:13 2015-04-22 19:11:53 2015-08-12 21:15:45
as_dict = {
"title": pr.title,
"number": pr.number,
"head": pr.head,
"labels": _pr_to_labels(pr),
}
main([argv[0], "--release_file", "%s.rst" % release_name, "--request", as_dict, "pr" + str(pr.number)])
def check_release(argv):
github = _github_client()
release_name = argv[2]
for pr in _get_prs(release_name):
_text_target(github, pr, labels=_pr_to_labels(pr))
def check_blocking_prs(argv):
release_name = argv[2]
block = 0
for pr in _get_prs(release_name, state="open"):
print("WARN: Blocking PR| %s" % _pr_to_str(pr))
block = 1
sys.exit(block)
def check_blocking_issues(argv):
release_name = argv[2]
block = 0
github = _github_client()
issues = github.issues.list_by_repo(
user='galaxyproject',
repo='galaxy',
state="open"
)
for page in issues:
for issue in page:
if issue.milestone and issue.milestone.title == release_name and "Publication of Galaxy Release" not in issue.title:
print("WARN: Blocking issue| %s" % _issue_to_str(issue))
block = 1
sys.exit(block)
def _pr_to_str(pr):
if isinstance(pr, string_types):
return pr
return "PR #%s (%s) %s" % (pr.number, pr.title, pr.html_url)
def _issue_to_str(pr):
if isinstance(pr, string_types):
return pr
return "Issue #%s (%s) %s" % (pr.number, pr.title, pr.html_url)
def _next_version_params(release_name):
month = int(release_name.split(".")[1])
year = release_name.split(".")[0]
next_month = (((month - 1) + RELEASE_DELTA_MONTHS) % 12) + 1
next_month_name = calendar.month_name[next_month]
if next_month < RELEASE_DELTA_MONTHS:
next_year = int(year) + 1
else:
next_year = year
next_version = "%s.%02d" % (next_year, next_month)
freeze_date, release_date = _release_dates(next_version)
return dict(
version=next_version,
year=next_year,
month_name=next_month_name,
freeze_date=freeze_date,
release_date=release_date,
)
def _release_dates(version):
year, month = version.split(".")
first_of_month = datetime.date(int(year) + 2000, int(month), 1)
freeze_date = next_weekday(first_of_month, 0)
release_date = next_weekday(first_of_month, 0) + datetime.timedelta(21)
return freeze_date, release_date
def _get_prs(release_name, state="closed"):
github = _github_client()
pull_requests = github.pull_requests.list(
state=state,
user=PROJECT_OWNER,
repo=PROJECT_NAME,
)
reached_old_prs = False
for page in pull_requests:
if reached_old_prs:
break
for pr in page:
if pr.created_at < datetime.datetime(2016, 11, 1, 0, 0):
reached_old_prs = True
pass
merged_at = pr.merged_at
milestone = pr.milestone
proper_state = state != "closed" or merged_at
if not proper_state or not milestone or milestone['title'] != release_name:
continue
yield pr
def main(argv):
if requests is None:
raise Exception("Requests library not found, please pip install requests")
github = _github_client()
newest_release = None
if argv[1] == "--print-next-minor-version":
print_next_minor_version()
return
if argv[1] == "--check-blocking-prs":
check_blocking_prs(argv)
return
if argv[1] == "--check-blocking-issues":
check_blocking_issues(argv)
return
if argv[1] == "--create-release-issue":
release_issue(argv)
return
if argv[1] == "--release":
do_release(argv)
return
if argv[1] == "--check-release":
check_release(argv)
return
if argv[1] == "--release_file":
newest_release = argv[2]
argv = [argv[0]] + argv[3:]
if argv[1] == "--request":
req = argv[2]
argv = [argv[0]] + argv[3:]
else:
req = None
if newest_release is None:
newest_release = sorted(os.listdir(RELEASES_PATH))[-1]
history_path = os.path.join(RELEASES_PATH, newest_release)
user_announce_path = history_path[0:-len(".rst")] + "_announce_user.rst"
prs_path = history_path[0:-len(".rst")] + "_prs.rst"
history = _read_file(history_path)
user_announce = _read_file(user_announce_path)
prs_content = _read_file(prs_path)
def extend_target(target, line, source=history):
from_str = ".. %s\n" % target
if target not in source:
raise Exception("Failed to find target [%s] in source [%s]" % (target, source))
return source.replace(from_str, from_str + line + "\n")
ident = argv[1]
message = ""
if len(argv) > 2:
message = argv[2]
elif not (ident.startswith("pr") or ident.startswith("issue")):
api_url = urljoin(PROJECT_API, "commits/%s" % ident)
if req is None:
req = requests.get(api_url).json()
commit = req["commit"]
message = commit["message"]
message = get_first_sentence(message)
elif requests is not None and ident.startswith("pr"):
pull_request = ident[len("pr"):]
api_url = urljoin(PROJECT_API, "pulls/%s" % pull_request)
if req is None:
req = requests.get(api_url).json()
message = req["title"]
elif requests is not None and ident.startswith("issue"):
issue = ident[len("issue"):]
api_url = urljoin(PROJECT_API, "issues/%s" % issue)
if req is None:
req = requests.get(api_url).json()
message = req["title"]
else:
message = ""
text_target = "to_doc"
to_doc = message + " "
owner = None
if ident.startswith("pr"):
pull_request = ident[len("pr"):]
user = req["head"]["user"]
owner = user["login"]
if owner in DEVTEAM:
owner = None
text = ".. _Pull Request {0}: {1}/pull/{0}".format(pull_request, PROJECT_URL)
prs_content = extend_target("github_links", text, prs_content)
if owner:
to_doc += "\n(thanks to `@%s <https://github.com/%s>`__)." % (
owner, owner,
)
to_doc += "\n`Pull Request {0}`_".format(pull_request)
if github:
labels = None
if req and 'labels' in req:
labels = req['labels']
text_target = _text_target(github, pull_request, labels=labels)
elif ident.startswith("issue"):
issue = ident[len("issue"):]
text = ".. _Issue {0}: {1}/issues/{0}".format(issue, PROJECT_URL)
prs_content = extend_target("github_links", text, prs_content)
to_doc += "`Issue {0}`_".format(issue)
else:
short_rev = ident[:7]
text = ".. _{0}: {1}/commit/{0}".format(short_rev, PROJECT_URL)
prs_content = extend_target("github_links", text, prs_content)
to_doc += "{0}_".format(short_rev)
to_doc = wrap(to_doc)
if text_target is not None:
history = extend_target(text_target, to_doc, history)
if req and 'labels' in req:
labels = req['labels']
if 'area/datatypes' in labels:
user_announce = extend_target("datatypes", to_doc, user_announce)
if 'area/visualizations' in labels:
user_announce = extend_target("visualizations", to_doc, user_announce)
if 'area/tools' in labels:
user_announce = extend_target("tools", to_doc, user_announce)
_write_file(history_path, history)
_write_file(prs_path, prs_content)
_write_file(user_announce_path, user_announce)
def _read_file(path):
with open(path, "r") as f:
return f.read().decode("utf-8")
def _write_file(path, contents):
with open(path, "w") as f:
f.write(contents.encode("utf-8"))
def _text_target(github, pull_request, labels=None):
pr_number = None
if isinstance(pull_request, string_types):
pr_number = pull_request
else:
pr_number = pull_request.number
if labels is None:
labels = []
try:
labels = github.issues.labels.list_by_issue(int(pr_number), user=PROJECT_OWNER, repo=PROJECT_NAME)
labels = [l.name.lower() for l in labels]
except Exception as e:
print(e)
is_bug = is_enhancement = is_feature = is_minor = is_major = is_merge = is_small_enhancement = False
if len(labels) == 0:
print('No labels found for %s' % pr_number)
return None
for label_name in labels:
if label_name == "minor":
is_minor = True
elif label_name == "major":
is_major = True
elif label_name == "merge":
is_merge = True
elif label_name == "kind/bug":
is_bug = True
elif label_name == "kind/feature":
is_feature = True
elif label_name == "kind/enhancement":
is_enhancement = True
elif label_name in ["kind/testing", "kind/refactoring"]:
is_small_enhancement = True
elif label_name == "procedures":
# Treat procedures as an implicit enhancement.
is_enhancement = True
is_some_kind_of_enhancement = is_enhancement or is_feature or is_small_enhancement
if not(is_bug or is_some_kind_of_enhancement or is_minor or is_merge):
print("No 'kind/*' or 'minor' or 'merge' or 'procedures' label found for %s" % _pr_to_str(pull_request))
text_target = None
if is_minor or is_merge:
return
if is_some_kind_of_enhancement and is_major:
text_target = "major_feature"
elif is_feature:
text_target = "feature"
elif is_enhancement:
for group_name in GROUPPED_TAGS.keys():
if group_name in labels:
text_target = "enhancement_tag_%s" % GROUPPED_TAGS[group_name]
break
else:
text_target = "enhancement"
elif is_some_kind_of_enhancement:
text_target = "small_enhancement"
elif is_major:
text_target = "major_bug"
elif is_bug:
for group_name in GROUPPED_TAGS.keys():
if group_name in labels:
text_target = "bug_tag_%s" % GROUPPED_TAGS[group_name]
break
else:
text_target = "bug"
else:
print("Logic problem, cannot determine section for %s" % _pr_to_str(pull_request))
text_target = None
return text_target
def _pr_to_labels(pr):
labels = [l["name"].lower() for l in pr.labels]
return labels
def _previous_release(to):
previous_release = None
for release in _releases():
if release == to:
break
previous_release = release
return previous_release
def _releases():
all_files = sorted(os.listdir(RELEASES_PATH))
release_note_file_pattern = re.compile(r"\d+\.\d+.rst")
release_note_files = [f for f in all_files if release_note_file_pattern.match(f)]
return sorted(f.rstrip('.rst') for f in release_note_files)
def _github_client():
try:
github_json = os.path.expanduser("~/.github.json")
github = Github(**json.load(open(github_json, "r")))
except Exception:
github = None
return github
def _release_file(release):
releases_path = os.path.join(PROJECT_DIRECTORY, "doc", "source", "releases")
if release is None:
release = sorted(os.listdir(releases_path))[-1]
history_path = os.path.join(releases_path, release)
return history_path
def get_first_sentence(message):
first_line = message.split("\n")[0]
return first_line
def process_sentence(message):
# Strip tags like [15.07].
message = re.sub(r"^\s*\[.*\]\s*", r"", message)
# Link issues and pull requests...
issue_url = "https://github.com/%s/%s/issues" % (PROJECT_OWNER, PROJECT_NAME)
message = re.sub(r'#(\d+)', r'`#\1 <%s/\1>`__' % issue_url, message)
return message
def wrap(message):
message = process_sentence(message)
wrapper = textwrap.TextWrapper(initial_indent="* ")
wrapper.subsequent_indent = ' '
wrapper.width = 78
message_lines = message.splitlines()
first_lines = "\n".join(wrapper.wrap(message_lines[0]))
wrapper.initial_indent = " "
rest_lines = "\n".join(["\n".join(wrapper.wrap(m)) for m in message_lines[1:]])
return first_lines + ("\n" + rest_lines if rest_lines else "")
def next_weekday(d, weekday):
""" Return the next week day (0 for Monday, 6 for Sunday) starting from ``d``. """
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
return d + datetime.timedelta(days_ahead)
if __name__ == "__main__":
main(sys.argv)
| 34.710493
| 271
| 0.6364
|
dd1cb5a1e7c1d85ec1a972a12d6260a655b73e43
| 49
|
py
|
Python
|
disco/version.py
|
daniel-thom/disco
|
2de6869888c015ff70985bf6f6b1a0fcc15fc561
|
[
"BSD-3-Clause"
] | null | null | null |
disco/version.py
|
daniel-thom/disco
|
2de6869888c015ff70985bf6f6b1a0fcc15fc561
|
[
"BSD-3-Clause"
] | null | null | null |
disco/version.py
|
daniel-thom/disco
|
2de6869888c015ff70985bf6f6b1a0fcc15fc561
|
[
"BSD-3-Clause"
] | null | null | null |
"""disco version number"""
__version__ = "0.1.4"
| 16.333333
| 26
| 0.653061
|
e25a7ae261c8817d4889757d8d67dff20ebfd848
| 543
|
py
|
Python
|
guet/steps/check/help_check.py
|
AbhishekMashetty/pairprogrammingmasetty
|
0528d4999b472ec6d94058193275a505eaf2c762
|
[
"Apache-2.0"
] | 13
|
2018-12-21T22:47:28.000Z
|
2021-12-17T14:27:35.000Z
|
guet/steps/check/help_check.py
|
chiptopher/guet
|
1099ee623311ba1d052237612efc9b06b7ff68bb
|
[
"Apache-2.0"
] | 63
|
2018-08-30T11:19:12.000Z
|
2021-05-13T12:11:08.000Z
|
guet/steps/check/help_check.py
|
chiptopher/guet
|
1099ee623311ba1d052237612efc9b06b7ff68bb
|
[
"Apache-2.0"
] | 7
|
2019-05-21T13:52:37.000Z
|
2022-01-30T22:57:21.000Z
|
from typing import List
from guet.steps.check.check import Check
class HelpCheck(Check):
def __init__(self, stop_message, *, stop_on_no_args=False):
super().__init__(stop_message)
self.stop_on_no_args = stop_on_no_args
def should_stop(self, args: List[str]) -> bool:
return '--help' in args or '-h' in args or self._should_stop_for_empty_args(args)
def _should_stop_for_empty_args(self, args: List[str]) -> bool:
if self.stop_on_no_args:
return len(args) == 0
return False
| 28.578947
| 89
| 0.679558
|
9c180c65cce1a3f6823468915ad00294ac3a304c
| 902
|
py
|
Python
|
unlabel_app/unlabel_backend/migrations/0007_capability.py
|
Amechi101/unlabel_agency
|
f482d425753d8afe820f155bded80349b6e50db3
|
[
"MIT"
] | null | null | null |
unlabel_app/unlabel_backend/migrations/0007_capability.py
|
Amechi101/unlabel_agency
|
f482d425753d8afe820f155bded80349b6e50db3
|
[
"MIT"
] | null | null | null |
unlabel_app/unlabel_backend/migrations/0007_capability.py
|
Amechi101/unlabel_agency
|
f482d425753d8afe820f155bded80349b6e50db3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-06-22 23:00
import cloudinary.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unlabel_backend', '0006_auto_20180622_0225'),
]
operations = [
migrations.CreateModel(
name='Capability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='Image')),
('description', models.TextField(max_length=1000, null=True)),
],
options={
'verbose_name': 'Capability',
'verbose_name_plural': 'Capabilities',
},
),
]
| 32.214286
| 122
| 0.588692
|
de357b679824a982303804a7e543f8e7b96a3acc
| 7,698
|
py
|
Python
|
sudoku-5421/solution.py
|
Prider/AI
|
e8c7c4f26279f73591ce1d3b73d6e157ff2ce776
|
[
"MIT"
] | null | null | null |
sudoku-5421/solution.py
|
Prider/AI
|
e8c7c4f26279f73591ce1d3b73d6e157ff2ce776
|
[
"MIT"
] | null | null | null |
sudoku-5421/solution.py
|
Prider/AI
|
e8c7c4f26279f73591ce1d3b73d6e157ff2ce776
|
[
"MIT"
] | null | null | null |
import collections
assignments = []
rows = 'ABCDEFGHI'
cols = '123456789'
revCols = '987654321'
def cross(A, B):
"Cross product of elements in A and elements in B."
return [s+t for s in A for t in B]
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
d1_units = [[rows[i]+cols[i] for i in range(len(rows))]]
d2_units = [[rows[i]+revCols[i] for i in range(len(rows))]]
unitlist = row_units + column_units + square_units + d1_units + d2_units
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
def assign_value(values, box, value):
"""
Please use this function to update your values dictionary!
Assigns a value to a given box. If it updates the board record it.
"""
# Don't waste memory appending actions that don't actually change any values
if values[box] == value:
return values
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def replace_digit(box, twins, values):
"""Replace digits of twins in each box in units.
Args:
box: an element in A1,A2
twins: two digit belong of twin
values(dict): a dictionary of the form {'box_name': '123456789', ...}
"""
for digit in twins:
new_values = values[box].replace(digit, '')
values = assign_value(values, box, new_values)
pass
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
# dictionary of digit as a key and number of occurrences in a unit
for unit in unitlist:
key_digit_counter = collections.Counter([values[box] for box in unit])
for key_digit, count in key_digit_counter.items():
# focus on count more than 1 and number of digit equal to count
if len(key_digit) == count and count > 1 :
for box in unit:
# skip the twin box
if values[box] == key_digit:
continue
# replace other boxes which had twin digists
if set(values[box]).intersection(set(key_digit)):
replace_digit(box, key_digit, values)
return values
def grid_values(grid):
"""Convert grid string into {<box>: <value>} dict with '123456789' value for empties.
Args:
grid: Sudoku grid in string form, 81 characters long
Returns:
Sudoku grid in dictionary form:
- keys: Box labels, e.g. 'A1'
- values: Value in corresponding box, e.g. '8', or '123456789' if it is empty.
"""
values = []
all_digits = '123456789'
for c in grid:
if c == '.':
values.append(all_digits)
elif c in all_digits:
values.append(c)
assert len(values) == 81
return dict(zip(boxes, values))
def display(values):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: None
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
def eliminate(values):
"""Eliminate values from peers of each box with a single value.
Go through all the boxes, and whenever there is a box with a single value,
eliminate this value from the set of values of all its peers.
Args:
values: Sudoku in dictionary form.
Returns:
Resulting Sudoku in dictionary form after eliminating values.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
def only_choice(values):
"""Finalize all values that are the only choice for a unit.
Go through all the units, and whenever there is a unit with a value
that only fits in one box, assign the value to this box.
Input: Sudoku in dictionary form.
Output: Resulting Sudoku in dictionary form after filling in only choices.
"""
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Use the Eliminate Strategy
values = eliminate(values)
# Use the Naked Twin Strategy
values = naked_twins(values)
# Use the Only Choice Strategy
values = only_choice(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"Using depth-first search and propagation, try all possible values."
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
values = grid_values(grid)
values = search(values)
return values # solve by search solution
if __name__ == '__main__':
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(solve(diag_sudoku_grid))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| 36.140845
| 115
| 0.62133
|
c562dc868d682265aa783ba7f9a3be67238dc3d2
| 9,489
|
py
|
Python
|
zvt/factors/technical_factor.py
|
freedom6xiaobai/zvt
|
f4ba510a30f1014cc0e48b85370b0d3936bd851a
|
[
"MIT"
] | 1
|
2019-10-28T08:03:26.000Z
|
2019-10-28T08:03:26.000Z
|
zvt/factors/technical_factor.py
|
freedom6xiaobai/zvt
|
f4ba510a30f1014cc0e48b85370b0d3936bd851a
|
[
"MIT"
] | null | null | null |
zvt/factors/technical_factor.py
|
freedom6xiaobai/zvt
|
f4ba510a30f1014cc0e48b85370b0d3936bd851a
|
[
"MIT"
] | null | null | null |
from typing import List, Union
import pandas as pd
from zvdata.factor import Factor
from zvdata import IntervalLevel
from zvdata.utils.pd_utils import df_is_not_null
from zvdata.utils.pd_utils import index_df_with_category_xfield
from zvt.api.common import get_kdata_schema
from zvt.api.computing import ma, macd
class TechnicalFactor(Factor):
def __init__(self,
entity_ids: List[str] = None,
entity_type: str = 'stock',
exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None,
the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None,
filters: List = None,
order: object = None,
limit: int = None,
provider: str = 'joinquant',
level: IntervalLevel = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id',
time_field: str = 'timestamp',
trip_timestamp: bool = True,
auto_load: bool = True,
# child added arguments
fq='qfq',
indicators=['ma', 'macd'],
indicators_param=[{'window': 5}, {'slow': 26, 'fast': 12, 'n': 9}],
valid_window=26
) -> None:
self.fq = fq
self.indicators = indicators
self.indicators_param = indicators_param
self.data_schema = get_kdata_schema(entity_type, level=level)
self.valid_window = valid_window
self.indicator_cols = set()
super().__init__(self.data_schema, entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp,
end_timestamp, columns, filters, order, limit, provider, level, category_field, time_field,
trip_timestamp, auto_load, keep_all_timestamp=False,
fill_method=None, effective_number=None)
def do_compute(self):
if df_is_not_null(self.data_df):
self.depth_df = self.data_df.reset_index(level='timestamp')
for idx, indicator in enumerate(self.indicators):
if indicator == 'ma':
window = self.indicators_param[idx].get('window')
col = 'ma{}'.format(window)
self.indicator_cols.add(col)
for entity_id, df in self.depth_df.groupby('entity_id'):
if self.entity_type == 'stock':
self.depth_df.loc[entity_id, col] = ma(df['qfq_close'], window=window)
else:
self.depth_df.loc[entity_id, col] = ma(df['close'], window=window)
if indicator == 'macd':
slow = self.indicators_param[idx].get('slow')
fast = self.indicators_param[idx].get('fast')
n = self.indicators_param[idx].get('n')
self.indicator_cols.add('diff')
self.indicator_cols.add('dea')
self.indicator_cols.add('macd')
for entity_id, df in self.depth_df.groupby('entity_id'):
if self.entity_type == 'stock' and self.fq == 'qfq':
diff, dea, m = macd(df['qfq_close'], slow=slow, fast=fast, n=n)
else:
diff, dea, m = macd(df['close'], slow=slow, fast=fast, n=n)
self.depth_df.loc[entity_id, 'diff'] = diff
self.depth_df.loc[entity_id, 'dea'] = dea
self.depth_df.loc[entity_id, 'macd'] = m
self.depth_df = self.depth_df.set_index('timestamp', append=True)
def on_category_data_added(self, category, added_data: pd.DataFrame):
size = len(added_data)
df = self.data_df.loc[category].iloc[-self.valid_window - size:]
for idx, indicator in enumerate(self.indicators):
if indicator == 'ma':
window = self.indicators_param[idx].get('window')
if self.entity_type == 'stock':
df['ma{}'.format(window)] = ma(df['qfq_close'], window=window)
else:
df['ma{}'.format(window)] = ma(df['close'], window=window)
if indicator == 'macd':
slow = self.indicators_param[idx].get('slow')
fast = self.indicators_param[idx].get('fast')
n = self.indicators_param[idx].get('n')
if self.entity_type == 'stock':
df['diff'], df['dea'], df['m'] = macd(df['qfq_close'], slow=slow, fast=fast, n=n)
else:
df['diff'], df['dea'], df['m'] = macd(df['close'], slow=slow, fast=fast, n=n)
df = df.iloc[-size:, ]
df = df.reset_index()
df[self.category_field] = category
df = index_df_with_category_xfield(df)
self.depth_df = self.depth_df.append(df)
self.depth_df = self.depth_df.sort_index(level=[0, 1])
def draw_depth(self, chart='kline', plotly_layout=None, annotation_df=None, render='html', file_name=None,
width=None, height=None, title=None, keep_ui_state=True, **kwargs):
return super().draw_depth('kline', plotly_layout, render, file_name, width, height, title, keep_ui_state,
indicators=self.indicator_cols, **kwargs)
def __json__(self):
result = super().__json__()
result['indicator_cols'] = list(self.indicator_cols)
result['indicators'] = self.indicators
result['indicators_param'] = self.indicators_param
return result
for_json = __json__ # supported by simplejson
class CrossMaFactor(TechnicalFactor):
def __init__(self,
entity_ids: List[str] = None,
entity_type: str = 'stock',
exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None,
the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None,
filters: List = None,
order: object = None,
limit: int = None,
provider: str = 'joinquant',
level: IntervalLevel = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id',
auto_load: bool = True,
# child added arguments
short_window=5,
long_window=10) -> None:
self.short_window = short_window
self.long_window = long_window
super().__init__(entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp, end_timestamp,
columns, filters, order, limit, provider, level, category_field,
time_field='timestamp', auto_load=auto_load, fq='qfq', indicators=['ma', 'ma'],
indicators_param=[{'window': short_window}, {'window': long_window}], valid_window=long_window)
def do_compute(self):
super().do_compute()
s = self.depth_df['ma{}'.format(self.short_window)] > self.depth_df['ma{}'.format(self.long_window)]
self.result_df = s.to_frame(name='score')
def on_category_data_added(self, category, added_data: pd.DataFrame):
super().on_category_data_added(category, added_data)
# TODO:improve it to just computing the added data
self.do_compute()
class BullFactor(TechnicalFactor):
def __init__(self,
entity_ids: List[str] = None,
entity_type: str = 'stock',
exchanges: List[str] = ['sh', 'sz'],
codes: List[str] = None,
the_timestamp: Union[str, pd.Timestamp] = None,
start_timestamp: Union[str, pd.Timestamp] = None,
end_timestamp: Union[str, pd.Timestamp] = None,
columns: List = None,
filters: List = None,
order: object = None,
limit: int = None,
provider: str = 'joinquant',
level: IntervalLevel = IntervalLevel.LEVEL_1DAY,
category_field: str = 'entity_id',
auto_load: bool = True,
indicators=['macd'],
indicators_param=[{'slow': 26, 'fast': 12, 'n': 9}],
valid_window=26) -> None:
super().__init__(entity_ids, entity_type, exchanges, codes, the_timestamp, start_timestamp, end_timestamp,
columns, filters, order, limit, provider, level, category_field,
time_field='timestamp', auto_load=auto_load, fq='qfq', indicators=indicators,
indicators_param=indicators_param, valid_window=valid_window)
def do_compute(self):
super().do_compute()
s = (self.depth_df['diff'] > 0) & (self.depth_df['dea'] > 0)
self.result_df = s.to_frame(name='score')
if __name__ == '__main__':
factor1 = BullFactor(codes=['000338'], start_timestamp='2018-01-01', end_timestamp='2019-02-01')
factor1.load_data()
factor1.draw_depth()
| 45.401914
| 120
| 0.552113
|
c25fc44765270fb37b03f303def143f3c20b22a9
| 67,716
|
py
|
Python
|
tinynn/converter/operators/optimize.py
|
Wenzhe-Liu/TinyNeuralNetwork
|
07ae39fdf29fbb66be1257ea737f7fa227870850
|
[
"MIT"
] | 1
|
2021-12-20T07:21:35.000Z
|
2021-12-20T07:21:35.000Z
|
tinynn/converter/operators/optimize.py
|
zhiqwang/TinyNeuralNetwork
|
6b01812bd397270ca1960d719eeba686da8e576d
|
[
"MIT"
] | null | null | null |
tinynn/converter/operators/optimize.py
|
zhiqwang/TinyNeuralNetwork
|
6b01812bd397270ca1960d719eeba686da8e576d
|
[
"MIT"
] | 1
|
2021-12-20T07:21:37.000Z
|
2021-12-20T07:21:37.000Z
|
import copy
import functools
import re
import typing
import warnings
import igraph as ig
import numpy as np
from tinynn.util.util import get_logger
from tflite.ActivationFunctionType import ActivationFunctionType
from . import tflite as tfl
from .base import FUSE_ACTIVATION_MAP, ExtendedOperator
from .graph import CommonGraph
log = get_logger(__name__, 'INFO')
class GraphOptimizer(object):
graph: CommonGraph
fuse_tensor_count: int
fuse_attr_count: int
def __init__(self, graph: CommonGraph) -> None:
self.graph = graph
self.fuse_tensor_count = 0
self.fuse_attr_count = 0
def create_attr_tensor(self, tensor: tfl.Tensor, name: str = None, quantization: typing.Optional[tfl.QuantizationParameters] = None):
if name is None:
if self.fuse_attr_count == 0:
name = 'fuse_attr'
else:
name = f'fuse_attr_{self.fuse_attr_count}'
self.fuse_attr_count += 1
return tfl.Tensor(tensor, name, has_buffer=True, quantization=quantization)
def create_transform_tensor(self, tensor: tfl.Tensor, name: str = None, quantization: typing.Optional[tfl.QuantizationParameters] = None):
if name is None:
if self.fuse_tensor_count == 0:
name = 'fuse_transform'
else:
name = f'fuse_transform_{self.fuse_tensor_count}'
self.fuse_tensor_count += 1
return tfl.Tensor(tensor, name, has_buffer=False, quantization=quantization)
def fuse_conv_fc_bn(self):
# Find fusable ops
edges = self.graph.graph.es.select(functools.partial(is_bn_fusable_edge, graph_converter=self.graph.graph))
filtered_pairs = ((self.graph.graph.vs[x.source], self.graph.graph.vs[x.target], x) for x in edges)
remove_ids = []
actions = []
for conv, bn, tensor in filtered_pairs:
# Find out the output of the batch-norm nodes
new_output = bn['outputs'][0]
assert new_output in self.graph.tensor_map
# For each node that is next of a batch-norm node, we connect it with the conv node
self.graph.connect_next_tensors(bn, conv, new_output)
# Update graph, prepare to drop the output tensor of the conv node and use the output tensor of the batch-norm instead
conv['outputs'][0] = new_output
conv['op'].outputs[0] = self.graph.tensor_map[new_output]
self.graph.tensor_node_map[new_output] = conv['name']
tensor['name'] = bn['outputs'][0]
tensor['label'] = bn['outputs'][0]
bn_activ = bn['op'].fusedActivationFunction
conv_activ = conv['op'].fusedActivationFunction
if bn_activ != ActivationFunctionType.NONE and conv_activ == ActivationFunctionType.NONE:
conv['op'].fusedActivationFunction = bn_activ
# Collect the arguments of the conv and batch-norm nodes
weight = conv['op'].inputs[1]
bias = conv['op'].inputs[2] if len(conv['op'].inputs) > 2 else None
bn_w, bn_b, bn_mean, bn_var = bn['op'].inputs[1:]
bn_w, bn_b, bn_mean, bn_var = bn_w.tensor.copy(), bn_b.tensor.copy(), bn_mean.tensor.copy(), bn_var.tensor.copy()
activ_w = weight.tensor.copy()
activ_b = bias.tensor.copy() if bias is not None else None
eps = bn['op'].eps
# Fuse conv/fc and batch-norm
new_weight = fuse_bn_weight(eps, bn_w, bn_var, activ_w)
new_bias = fuse_bn_bias(eps, bn_w, bn_var, bn_mean, bn_b, activ_b)
# New attribute tensors
new_w = self.create_attr_tensor(new_weight)
new_b = self.create_attr_tensor(new_bias)
# Collect the actions we should take here
# The reason that we don't do the actions here is because we are currently in the loop of vertices,
# the iterator will be invalidated once `replace_operator_input` is called
actions.append((self.graph.replace_operator_input, (conv, 1, new_w)))
if bias is not None:
actions.append((self.graph.replace_operator_input, (conv, 2, new_b)))
else:
actions.append((self.graph.append_operator_input, (conv, new_b)))
remove_ids.append(bn.index)
# Process actions
for func, args in actions:
func(*args)
# Delete batch-norm nodes
for id in remove_ids:
vertex = self.graph.graph.vs[id]
assert vertex['node_type'] == ExtendedOperator.BATCH_NORM
self.graph.graph.delete_vertices(remove_ids)
def fuse_activation(self):
# Find fusable ops
edges = self.graph.graph.es.select(functools.partial(is_activ_fusable_edge, graph_converter=self.graph.graph))
filtered_pairs = ((self.graph.graph.vs[x.source], self.graph.graph.vs[x.target], x) for x in edges)
remove_ids = []
for pre_activ, activ, tensor in filtered_pairs:
# Find out the output of the batch-norm nodes
new_output = activ['outputs'][0]
assert new_output in self.graph.tensor_map
# For each node that is next of the activation node, we connect it with the previous node
self.graph.connect_next_tensors(activ, pre_activ, new_output)
# Update graph, prepare to drop the output tensor of the conv node and use the output tensor of the batch-norm instead
pre_activ['outputs'][0] = new_output
pre_activ['op'].outputs[0] = self.graph.tensor_map[new_output]
self.graph.tensor_node_map[new_output] = pre_activ['name']
tensor['name'] = activ['outputs'][0]
tensor['label'] = activ['outputs'][0]
# Fuse activation
pre_activ['op'].fusedActivationFunction = FUSE_ACTIVATION_MAP[activ['node_type']]
remove_ids.append(activ.index)
# Delete activation nodes
self.graph.graph.delete_vertices(remove_ids)
def transform_graph(self):
# Find transformable ops
filtered_nodes = self.graph.graph.vs.select(functools.partial(
is_transformable_node, graph_converter=self.graph.graph))
remove_ids = []
ops = []
restore_mapping = []
for node in filtered_nodes:
restore_nodes = []
# For each node that is next of a transformable node,
# a. if it is an output node, remove it anyway since it will always be reconstructed
# b. otherwise, record the info of the edge so that we may restore it after reconstruction
for out_edge in node.out_edges():
next_node = self.graph.graph.vs[out_edge.target]
if next_node['node_type'] == ExtendedOperator.OUTPUT_NODE:
remove_ids.append(next_node.index)
del self.graph.tensor_map[next_node['outputs'][0]]
del self.graph.tensor_node_map[next_node['outputs'][0]]
else:
restore_nodes.append((out_edge['name'], next_node['name']))
# Remove the mapping since they are going to be removed
for output_name in node['outputs']:
del self.graph.tensor_map[output_name]
del self.graph.tensor_node_map[output_name]
restore_mapping.append(restore_nodes)
ops.append(node)
remove_ids.append(node.index)
# Make sure the nodes are topologically sorted
sorted_ops = [node['op'] for node in sorted(ops, key=lambda x: int(re.search(r'\d+', x['name'])[0]))]
# Delete nodes before transformation in the graph
self.graph.graph.delete_vertices(remove_ids)
# Do transformation
for op, mapping in zip(sorted_ops, restore_mapping):
op.transform(self.graph, mapping)
def fuse_simple_transpose_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_transpose_fusable_edge, graph_converter=self.graph.graph))
filtered_pairs = [[self.graph.graph.vs[x.source], self.graph.graph.vs[x.target]] for x in edges]
# Try to fuse the edges
filtered_pairs = fuse_connected_edges(filtered_pairs)
def _remove_first_pred(seq):
new_perm = fuse_transpose_perms(seq)
remove_first = np.all(new_perm == np.sort(new_perm))
return remove_first, new_perm
def _remove_first_action(first_node, last_node, custom_data):
# Set fused perm to the first transpose node
new_perm = custom_data
new_perm_tensor = self.create_attr_tensor(new_perm)
action = (self.graph.replace_operator_input, (first_node, 1, new_perm_tensor))
return [action]
elinimate_sequences(self.graph, filtered_pairs, _remove_first_pred, _remove_first_action)
def fuse_simple_reshape_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_reshape_fusable_edge, graph_converter=self.graph.graph))
filtered_pairs = [[self.graph.graph.vs[x.source], self.graph.graph.vs[x.target]] for x in edges]
# Try to fuse the edge
filtered_pairs = fuse_connected_edges(filtered_pairs)
def _remove_first_pred(seq):
first_node, last_node = seq[0], seq[-1]
new_shape = last_node['op'].inputs[1].tensor
orig_shape = np.array(first_node['op'].inputs[0].shape, dtype='int32')
remove_first = np.all(new_shape == orig_shape)
return remove_first, new_shape
def _remove_first_action(first_node, last_node, custom_data):
# Set final shape to the first reshape node
new_shape = custom_data
new_shape_tensor = self.create_attr_tensor(np.array(new_shape, dtype='int32'))
first_node['op'].newShape = new_shape_tensor.tensor
action = (self.graph.replace_operator_input, (first_node, 1, new_shape_tensor))
return [action]
elinimate_sequences(self.graph, filtered_pairs, _remove_first_pred, _remove_first_action)
def fuse_simple_slice_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_slice_fusable_edge, graph_converter=self.graph.graph))
filtered_pairs = [[self.graph.graph.vs[x.source], self.graph.graph.vs[x.target]] for x in edges]
# Try to fuse the edge
filtered_pairs = fuse_connected_edges(filtered_pairs)
def _remove_first_pred(seq):
fused_info = fuse_slices(seq)
return False, fused_info
def _remove_first_action(first_node, last_node, custom_data):
# Set final shape to the first reshape node
start, size = custom_data
start_tensor = self.create_attr_tensor(np.array(start, dtype='int32'))
size_tensor = self.create_attr_tensor(np.array(size, dtype='int32'))
actions = [(self.graph.replace_operator_input, (first_node, 1, start_tensor)),
(self.graph.replace_operator_input, (first_node, 2, size_tensor))]
return actions
elinimate_sequences(self.graph, filtered_pairs, _remove_first_pred, _remove_first_action)
def cleanup_dead_nodes(self):
cleanup_nodes = []
if not self.graph.graph.is_connected('weak'):
while True:
for vertex in self.graph.graph.vs:
if vertex['node_type'] != ExtendedOperator.OUTPUT_NODE and vertex.outdegree() == 0:
if vertex['node_type'] == ExtendedOperator.INPUT_NODE:
continue
if vertex['node_type'] != ExtendedOperator.CONSTANT_NODE:
warnings.warn('Non constant node removed, something must be wrong there')
log.warning('-' * 30)
log.warning('Info of the deleted node:')
log.warning('vertex:', vertex)
# edge = self.graph.graph.es.select(name=vertex['outputs'][0])
# assert edge is None, f'The edge {vertex["outputs"][0]} exists but the connection to the vertex {vertex["name"]} is broken, \
# probably there have some conflicts in the names of the nodes'
cleanup_nodes.append(vertex.index)
if len(cleanup_nodes) == 0:
break
self.graph.graph.delete_vertices(cleanup_nodes)
cleanup_nodes.clear()
def fold_transpose_buffer(self):
edges = self.graph.graph.es.select(functools.partial(
is_constant_transpose_fusable_edge, graph_converter=self.graph.graph))
filtered_pairs = ((self.graph.graph.vs[x.source], self.graph.graph.vs[x.target], x) for x in edges)
remove_ids = []
for constant, transpose, tensor in filtered_pairs:
# Calculate the output of the transposed constant nodes
constant_tensor = transpose['op'].inputs[0].tensor
perm_tensor = transpose['op'].inputs[1].tensor
new_constant = np.transpose(constant_tensor, perm_tensor)
new_tensor = self.create_attr_tensor(new_constant, quantization=transpose['op'].inputs[0].quantization)
new_node = self.graph.add_nodes([new_tensor])[0]
# For each node that is next of a constant transpose node, we connect it with the new constant node
for out_edge in transpose.out_edges():
next_node = self.graph.graph.vs[out_edge.target]
self.graph.graph.add_edge(new_node, next_node, name=new_tensor.name, label=new_tensor.name)
log.debug(f'NEW EDGE: {new_node["label"]} -> {next_node["label"]} {self.graph.tensor_map[out_edge["name"]]}')
op = next_node['op']
for idx in range(len(op.inputs)):
if op.inputs[idx].name == transpose['op'].outputs[0].name:
op.inputs[idx] = new_tensor
remove_ids.append(transpose.index)
# Delete constant transpose nodes
self.graph.graph.delete_vertices(remove_ids)
def transpose_to_reshape_pass(self):
filtered_nodes = self.graph.graph.vs.select(functools.partial(
is_transformable_transpose_node, graph_converter=self.graph.graph))
# Collect actions for the transformable transpose nodes
actions = []
for node in filtered_nodes:
original_op = node['op']
output_shape = np.array(original_op.outputs[0].shape, dtype='int32')
shape_tensor = self.create_attr_tensor(output_shape)
new_op = tfl.ReshapeOperator(original_op.inputs, original_op.outputs, output_shape)
node['op'] = new_op
node['node_type'] = ExtendedOperator.RESHAPE
node['label'] = new_op.type_name()
actions.append((self.graph.replace_operator_input, (node, 1, shape_tensor)))
# Process actions
for func, args in actions:
node = args[0]
func(*args)
def fold_reshape_buffer(self):
edges = self.graph.graph.es.select(functools.partial(
is_constant_reshape_fusable_edge, graph_converter=self.graph.graph))
filtered_pairs = ((self.graph.graph.vs[x.source], self.graph.graph.vs[x.target], x) for x in edges)
remove_ids = []
for constant, reshape, tensor in filtered_pairs:
# Calculate the output of the transposed constant nodes
constant_tensor = reshape['op'].inputs[0].tensor
shape_tensor = reshape['op'].inputs[1].tensor
new_constant = np.reshape(constant_tensor, shape_tensor)
new_tensor = self.create_attr_tensor(new_constant, quantization=reshape['op'].inputs[0].quantization)
new_node = self.graph.add_nodes([new_tensor])[0]
# For each node that is next of a constant transpose node, we connect it with the new constant node
for out_edge in reshape.out_edges():
next_node = self.graph.graph.vs[out_edge.target]
self.graph.graph.add_edge(new_node, next_node, name=new_tensor.name, label=new_tensor.name)
log.debug(f'NEW EDGE: {new_node["label"]} -> {next_node["label"]} {self.graph.tensor_map[out_edge["name"]]}')
op = next_node['op']
for idx in range(len(op.inputs)):
if op.inputs[idx].name == reshape['op'].outputs[0].name:
op.inputs[idx] = new_tensor
remove_ids.append(reshape.index)
# Delete constant transpose nodes
self.graph.graph.delete_vertices(remove_ids)
def remove_noop_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_ending_with_noop_edge, graph_converter=self.graph.graph))
filtered_pairs = [[self.graph.graph.vs[x.source], self.graph.graph.vs[x.target]] for x in edges]
# Try to fuse the edges
filtered_pairs = fuse_connected_edges(filtered_pairs)
elinimate_sequences(self.graph, filtered_pairs)
def fuse_wrapped_reshape_within_transpose_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_wrapped_reshape_within_transpose_edge, graph_converter=self.graph.graph))
filtered_pairs = [[self.graph.graph.vs[x.source], self.graph.graph.vs[x.target]] for x in edges]
# Try to fuse the edges
filtered_pairs = fuse_connected_edges(filtered_pairs)
# Only TRANSPOSE->RESHAPE->TRANSPOSE is supported here
filtered_pairs = [seq for seq in filtered_pairs if len(
seq) == 3 and seq[0]['node_type'] == ExtendedOperator.TRANSPOSE]
def _skip_pred(seq):
mid_node = seq[1]
orig_shape = mid_node['op'].inputs[0].shape
new_shape = mid_node['op'].outputs[0].shape
if not is_simple_reshape(orig_shape, new_shape):
return False
new_perm = fuse_transpose_perms_extended(seq)
return (new_perm != np.sort(new_perm)).any()
def _remove_last_pred(seq):
orig_tensor = seq[0]['op'].inputs[0].tensor
return False, (seq[2], orig_tensor)
def _remove_last_action(first_node, last_node, custom_data):
# Set final shape to the first reshape node
last_trans, orig_tensor = custom_data
actions = []
original_op = last_trans['op']
output_shape = np.array(original_op.outputs[0].shape, dtype='int32')
shape_tensor = self.create_attr_tensor(output_shape)
new_op = tfl.ReshapeOperator(original_op.inputs, original_op.outputs, output_shape)
last_trans['op'] = new_op
last_trans['node_type'] = ExtendedOperator.RESHAPE
last_trans['label'] = new_op.type_name()
new_op.inputs[0].tensor = orig_tensor
new_op.inputs[0].shape = new_op.inputs[0].tensor.shape
actions.append((self.graph.replace_operator_input, (last_trans, 1, shape_tensor)))
return actions
elinimate_sequences(self.graph, filtered_pairs, True, None, _remove_last_pred, _remove_last_action, _skip_pred)
def branch_reshape_expand_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_reshape_branch_edge, graph_converter=self.graph.graph))
branch_transpose_nodes = list(set(self.graph.graph.vs[edge.source] for edge in edges))
def _new_reshape(node: ig.Vertex, prev_node: ig.Vertex, next_node: ig.Vertex):
actions = []
op = node['op']
op_out = op.outputs[0]
op_shape = op.inputs[1]
prev_idx = prev_node['outputs'].index(op.inputs[0].name)
prev_op = prev_node['op']
prev_out = prev_op.outputs[prev_idx]
new_tensor = self.create_transform_tensor(op_out.tensor.copy(), quantization=op_out.quantization)
new_shape = self.create_attr_tensor(op_shape.tensor.copy())
new_op = tfl.ReshapeOperator([prev_out, new_shape], [new_tensor], new_shape.tensor)
self.graph.add_operator(new_op)
next_indices = []
for i, t in enumerate(next_node['op'].inputs):
if t.name == op_out.name:
actions.append((self.graph.replace_operator_input, (next_node, i, new_tensor)))
next_indices.append(i)
assert len(next_indices) > 0, f'{op_out.name} not in {[t.name for t in next_node["op"].inputs]}'
return actions
expand_op_outputs_in_branches(branch_transpose_nodes, _new_reshape, self.graph)
def branch_transpose_expand_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_transpose_branch_edge, graph_converter=self.graph.graph))
branch_transpose_nodes = list(set(self.graph.graph.vs[edge.source] for edge in edges))
def _new_transpose(node: ig.Vertex, prev_node: ig.Vertex, next_node: ig.Vertex):
actions = []
op = node['op']
op_out = op.outputs[0]
op_perm = op.inputs[1]
prev_idx = prev_node['outputs'].index(op.inputs[0].name)
prev_op = prev_node['op']
prev_out = prev_op.outputs[prev_idx]
new_tensor = self.create_transform_tensor(op_out.tensor.copy(), quantization=op_out.quantization)
new_perm = self.create_attr_tensor(op_perm.tensor.copy())
new_op = tfl.TransposeOperator([prev_out, new_perm], [new_tensor])
self.graph.add_operator(new_op)
next_indices = []
for i, t in enumerate(next_node['op'].inputs):
if t.name == op_out.name:
actions.append((self.graph.replace_operator_input, (next_node, i, new_tensor)))
next_indices.append(i)
assert len(next_indices) > 0, f'{op_out.name} not in {[t.name for t in next_node["op"].inputs]}'
return actions
expand_op_outputs_in_branches(branch_transpose_nodes, _new_transpose, self.graph)
def elementwise_op_transpose_passthrough_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_transpose_elementwise_op_edge, graph_converter=self.graph.graph))
pairs = ((self.graph.graph.vs[edge.source], self.graph.graph.vs[edge.target]) for edge in edges)
filtered_nodes = (k[0] if k[0]['node_type'] != ExtendedOperator.TRANSPOSE else k[1] for k in pairs)
unique_nodes = list(set(filtered_nodes))
actions = []
remove_edges = []
remove_vertices = []
for node in unique_nodes:
op = node['op']
if node['node_type'] == ExtendedOperator.CONCATENATION:
input_indices = range(len(op.inputs))
elif node['node_type'] == ExtendedOperator.SPLIT:
input_indices = (1, )
elif node['node_type'] in (ExtendedOperator.ADD,
ExtendedOperator.SUB,
ExtendedOperator.MUL,
ExtendedOperator.DIV):
input_indices = range(2)
else:
input_indices = range(1)
prev_nodes = []
cand_perms = dict()
prev_output_indices = []
for i in input_indices:
prev_node_name = op.inputs[i].name
prev_node = self.graph.graph.vs.find(name=self.graph.tensor_node_map[prev_node_name])
prev_nodes.append(prev_node)
prev_output_indices.append(prev_node['outputs'].index(prev_node_name))
if prev_node['node_type'] == ExtendedOperator.TRANSPOSE:
perm = tuple(prev_node['op'].inputs[1].tensor.tolist())
cand_perms.setdefault(perm, 0)
cand_perms[perm] += 1
next_nodes = []
next_edges = []
out_nodes = []
for edge in node.out_edges():
if edge.index in remove_edges:
continue
next_node = self.graph.graph.vs[edge.target]
if next_node['node_type'] == ExtendedOperator.OUTPUT_NODE:
out_nodes.append(next_node)
else:
next_nodes.append(next_node)
next_edges.append(edge)
if next_node['node_type'] == ExtendedOperator.TRANSPOSE:
perm = tuple(np.argsort(next_node['op'].inputs[1].tensor).tolist())
cand_perms.setdefault(perm, 0)
cand_perms[perm] += 1
cur_transpose_size = sum(cand_perms.values())
new_transpose_size = len(prev_nodes) + len(next_nodes) - cur_transpose_size
# Skip if the number of transpose nodes is not decreasing
if len(next_nodes) == 0 or new_transpose_size >= cur_transpose_size:
continue
remove_edges.extend([x.index for x in next_edges])
remove_vertices.extend([x.index for x in out_nodes])
for node in out_nodes:
del self.graph.tensor_map[node['outputs'][0]]
del self.graph.tensor_node_map[node['outputs'][0]]
perm = max(cand_perms.items(), key=lambda x: x[1])[0]
perm_arr = np.array(perm, dtype='int32')
inv_perm_arr = np.argsort(perm_arr).astype('int32')
for prev_node, prev_idx, next_idx in zip(prev_nodes, input_indices, prev_output_indices):
prev_out = prev_node['op'].outputs[next_idx]
perm_tensor = self.create_attr_tensor(inv_perm_arr)
prev_new_out = self.create_transform_tensor(np.transpose(
prev_out.tensor, inv_perm_arr), quantization=prev_out.quantization)
self.graph.add_operator(tfl.TransposeOperator([prev_out, perm_tensor], [prev_new_out]))
actions.append((self.graph.replace_operator_input, (node, prev_idx, prev_new_out, True)))
tensor_node_dict = {}
for i, op_out in enumerate(op.outputs):
perm_tensor = self.create_attr_tensor(perm_arr)
new_out = self.create_transform_tensor(np.transpose(
op_out.tensor, inv_perm_arr), quantization=op_out.quantization)
# Update relations
if op_out.name in self.graph.tensor_node_map:
del self.graph.tensor_node_map[op_out.name]
self.graph.tensor_node_map[new_out.name] = node['name']
self.graph.tensor_map[new_out.name] = new_out
node['outputs'][i] = new_out.name
op.outputs[i] = new_out
self.graph.add_operator(tfl.TransposeOperator([new_out, perm_tensor], [op_out]))
tensor_node_dict[op_out.name] = self.graph.graph.vs.find(name=self.graph.tensor_node_map[op_out.name])
# OP specific dim handling logic
if node['node_type'] == ExtendedOperator.CONCATENATION:
old_axis = op.axis
new_axis = np.where(inv_perm_arr == old_axis)[0][0]
op.axis = new_axis
elif node['node_type'] == ExtendedOperator.SPLIT_V:
old_dim = op.inputs[2].tensor
new_dim = np.where(inv_perm_arr == old_dim)[0][0]
new_dim_tensor = self.create_attr_tensor(np.array([new_dim], dtype='int32'))
actions.append((self.graph.replace_operator_input, (node, 2, new_dim_tensor, True)))
elif node['node_type'] == ExtendedOperator.SPLIT:
old_dim = op.inputs[0].tensor
new_dim = np.where(inv_perm_arr == old_dim)[0][0]
new_dim_tensor = self.create_attr_tensor(np.array([new_dim], dtype='int32'))
actions.append((self.graph.replace_operator_input, (node, 0, new_dim_tensor, True)))
for edge in next_edges:
source = tensor_node_dict[edge['name']]
self.graph.graph.add_edge(source, edge.target_vertex, name=edge['name'], label=edge['name'])
# Process actions
ids = []
for func, args in actions:
node = args[0]
res = func(*args)
if res is not None:
ids.extend(res)
remove_edges = list(set(remove_edges + ids))
self.graph.graph.delete_edges(remove_edges)
self.graph.graph.delete_vertices(remove_vertices)
def elementwise_op_reshape_passthrough_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_reshape_elementwise_op_edge, graph_converter=self.graph.graph))
pairs = ((self.graph.graph.vs[edge.source], self.graph.graph.vs[edge.target]) for edge in edges)
filtered_nodes = (k[0] if k[0]['node_type'] != ExtendedOperator.RESHAPE else k[1] for k in pairs)
unique_nodes = list(set(filtered_nodes))
actions = []
remove_edges = []
remove_vertices = []
for node in unique_nodes:
op = node['op']
dim_indice = None
if node['node_type'] == ExtendedOperator.CONCATENATION:
input_indices = range(len(op.inputs))
dim_indice = op.axis
elif node['node_type'] == ExtendedOperator.SPLIT:
input_indices = (1, )
dim_indice = op.inputs[0].tensor[0]
elif node['node_type'] == ExtendedOperator.SPLIT_V:
input_indices = range(1)
dim_indice = op.inputs[2].tensor[0]
elif node['node_type'] in (ExtendedOperator.ADD,
ExtendedOperator.SUB,
ExtendedOperator.MUL,
ExtendedOperator.DIV):
input_indices = range(2)
else:
input_indices = range(1)
prev_nodes = []
cand_shapes = dict()
cand_next_shapes = dict()
for i in input_indices:
prev_node_name = op.inputs[i].name
prev_node = self.graph.graph.vs.find(name=self.graph.tensor_node_map[prev_node_name])
prev_nodes.append(prev_node)
if prev_node['node_type'] == ExtendedOperator.RESHAPE:
mapping = dict()
if is_simple_reshape(prev_node['op'].inputs[0].shape, prev_node['op'].outputs[0].shape, mapping):
continue
new_dim = None
if dim_indice is not None:
rev_mapping = {v: k for k, v in mapping.items()}
if dim_indice not in rev_mapping:
continue
new_dim = rev_mapping[dim_indice]
shape = tuple(prev_node['op'].inputs[0].shape)
shape = tuple(x if i != new_dim else -1 for i, x in enumerate(shape))
cand_shapes.setdefault(shape, 0)
cand_shapes[shape] += 1
next_shape = tuple(prev_node['op'].outputs[0].shape)
next_shape = tuple(x if i != dim_indice else -1 for i, x in enumerate(next_shape))
cand_next_shapes.setdefault(next_shape, 0)
cand_next_shapes[next_shape] += 1
next_nodes = []
next_edges = []
out_nodes = []
for edge in node.out_edges():
if edge.index in remove_edges:
continue
next_node = self.graph.graph.vs[edge.target]
if next_node['node_type'] == ExtendedOperator.OUTPUT_NODE:
out_nodes.append(next_node)
else:
next_nodes.append(next_node)
next_edges.append(edge)
if next_node['node_type'] == ExtendedOperator.RESHAPE:
mapping = dict()
if is_simple_reshape(next_node['op'].inputs[0].shape, next_node['op'].outputs[0].shape, mapping):
continue
new_dim = None
if dim_indice is not None:
if dim_indice not in mapping:
continue
new_dim = mapping[dim_indice]
shape = tuple(next_node['op'].outputs[0].shape)
shape = tuple(x if i != new_dim else -1 for i, x in enumerate(shape))
cand_shapes.setdefault(shape, 0)
cand_shapes[shape] += 1
next_shape = tuple(next_node['op'].inputs[0].shape)
next_shape = tuple(x if i != dim_indice else -1 for i, x in enumerate(next_shape))
cand_next_shapes.setdefault(next_shape, 0)
cand_next_shapes[next_shape] += 1
cur_reshape_size = max(cand_shapes.values())
cur_next_reshape_size = max(cand_next_shapes.values())
full_size = len(prev_nodes) + len(next_nodes)
# Skip if not wrapped by reshapes
if len(next_nodes) == 0 or cur_reshape_size < full_size or cur_next_reshape_size < full_size:
continue
remove_edges.extend([x.index for x in next_edges])
remove_vertices.extend([x.index for x in out_nodes])
for node in out_nodes:
del self.graph.tensor_map[node['outputs'][0]]
del self.graph.tensor_node_map[node['outputs'][0]]
prev_shape = max(cand_shapes.items(), key=lambda x: x[1])[0]
next_shape = max(cand_next_shapes.items(), key=lambda x: x[1])[0]
for i, prev_node in enumerate(prev_nodes):
prev_out = prev_node['op'].outputs[0]
prev_new_out = self.create_transform_tensor(np.reshape(
prev_out.tensor, prev_shape), quantization=prev_out.quantization)
shape_tensor = self.create_attr_tensor(np.array(prev_new_out.shape, dtype='int32'))
self.graph.add_operator(tfl.ReshapeOperator([prev_out, shape_tensor], [
prev_new_out], newShape=shape_tensor.tensor))
actions.append((self.graph.replace_operator_input, (node, i, prev_new_out)))
tensor_node_dict = {}
for i, op_out in enumerate(op.outputs):
new_out = self.create_transform_tensor(np.reshape(
op_out.tensor, next_shape), quantization=op_out.quantization)
shape_tensor = self.create_attr_tensor(np.array(new_out.shape, dtype='int32'))
# Update relations
if op_out.name in self.graph.tensor_node_map:
del self.graph.tensor_node_map[op_out.name]
self.graph.tensor_node_map[new_out.name] = node['name']
self.graph.tensor_map[new_out.name] = new_out
node['outputs'][i] = new_out.name
op.outputs[i] = new_out
self.graph.add_operator(tfl.ReshapeOperator([new_out, shape_tensor], [op_out], shape_tensor.tensor))
tensor_node_dict[op_out.name] = self.graph.graph.vs.find(name=self.graph.tensor_node_map[op_out.name])
# OP specific dim handling logic
if node['node_type'] in ExtendedOperator.CONCATENATION:
new_axis = prev_shape.index(-1)
op.axis = new_axis
elif node['node_type'] in ExtendedOperator.SPLIT_V:
new_dim = prev_shape.index(-1)
new_dim_tensor = self.create_attr_tensor(new_dim)
actions.append(self.graph.replace_operator_input, (node, 2, new_dim_tensor))
elif node['node_type'] in ExtendedOperator.SPLIT:
new_dim = prev_shape.index(-1)
new_dim_tensor = self.create_attr_tensor(new_dim)
actions.append(self.graph.replace_operator_input, (node, 0, new_dim_tensor))
for edge in next_edges:
source = tensor_node_dict[edge['name']]
self.graph.graph.add_edge(source, edge.target_vertex, name=edge['name'], label=edge['name'])
self.graph.graph.delete_vertices(remove_vertices)
self.graph.graph.delete_edges(remove_edges)
# Process actions
for func, args in actions:
node = args[0]
func(*args)
def fuse_bmm_add_pass(self):
edges = self.graph.graph.es.select(functools.partial(
is_bmm_add_edge, graph_converter=self.graph.graph))
filtered_pairs = [[self.graph.graph.vs[x.source], self.graph.graph.vs[x.target]] for x in edges]
remove_ids = []
ops = []
restore_mapping = []
for bmm, add in filtered_pairs:
restore_nodes = []
# For each node that is next of a transformable node,
# a. if it is an output node, remove it anyway since it will always be reconstructed
# b. otherwise, record the info of the edge so that we may restore it after reconstruction
for out_edge in add.out_edges():
next_node = self.graph.graph.vs[out_edge.target]
if next_node['node_type'] == ExtendedOperator.OUTPUT_NODE:
remove_ids.append(next_node.index)
del self.graph.tensor_map[next_node['outputs'][0]]
del self.graph.tensor_node_map[next_node['outputs'][0]]
else:
restore_nodes.append((out_edge['name'], next_node['name']))
# Remove the mapping since they are going to be removed
for output_name in add['outputs']:
del self.graph.tensor_map[output_name]
del self.graph.tensor_node_map[output_name]
restore_mapping.append(restore_nodes)
ops.append((bmm, add))
remove_ids.append(bmm.index)
remove_ids.append(add.index)
# Make sure the nodes are topologically sorted
sorted_ops = [(nodes[0]['op'], nodes[1]['op'])
for nodes in sorted(ops, key=lambda x: int(re.search(r'\d+', x[1]['name'])[0]))]
# Delete nodes before transformation in the graph
self.graph.graph.delete_vertices(remove_ids)
for (bmm, add), mapping in zip(sorted_ops, restore_mapping):
input_tensor = bmm.inputs[0]
weight_tensor = bmm.inputs[1]
bias_tensor = add.inputs[1]
output_tensor = add.outputs[0]
ops = []
input_as_2d = self.create_transform_tensor(input_tensor.tensor[0])
input_2d_shape = self.create_attr_tensor(np.array(input_as_2d.shape, dtype='int32'))
ops.append(tfl.ReshapeOperator([input_tensor, input_2d_shape], [input_as_2d], input_2d_shape.tensor))
weight_t = self.create_transform_tensor(np.transpose(weight_tensor.tensor))
weight_perm = self.create_attr_tensor(np.array([1, 0], dtype='int32'))
ops.append(tfl.TransposeOperator([weight_tensor, weight_perm], [weight_t]))
output_as_2d = self.create_transform_tensor(output_tensor.tensor[0])
ops.append(tfl.FullyConnectedOperator([input_as_2d, weight_t, bias_tensor], [
output_as_2d], fusedActivationFunction=add.fusedActivationFunction))
output_3d_shape = self.create_attr_tensor(np.array(output_tensor.shape, dtype='int32'))
ops.append(tfl.ReshapeOperator([output_as_2d, output_3d_shape], [output_tensor], output_3d_shape.tensor))
for op in ops:
self.graph.add_operator(op, transform=True)
self.graph.try_restore_edges(mapping)
def input_transpose_pass(self):
nhwc2nchw_perm = np.array([0, 3, 1, 2], dtype='int32')
nchw2nhwc_perm = np.array([0, 2, 3, 1], dtype='int32')
remove_edges = []
for name, transpose in zip(self.graph.inputs, self.graph.input_transpose):
if transpose is True:
node_name = self.graph.tensor_node_map[name]
node = self.graph.graph.vs.find(name=node_name)
assert node['node_type'] == ExtendedOperator.INPUT_NODE
# For quantized graphs, we insert the transpose op after the quantize op
next_node = None
if node.outdegree() == 1:
next_node = node.out_edges()[0].target_vertex
if next_node['node_type'] != ExtendedOperator.QUANTIZE:
next_node = None
# Transpose input tensor shapes
input_tensor = self.graph.tensor_map[node['name']]
input_tensor.tensor = np.transpose(input_tensor.tensor, nchw2nhwc_perm)
input_tensor.shape = input_tensor.tensor.shape
# Transpose quantize output tensor shapes
last_tensor = input_tensor
last_node = node
if next_node is not None:
last_node = next_node
last_tensor = next_node['op'].outputs[0]
last_tensor.tensor = np.transpose(last_tensor.tensor, nchw2nhwc_perm)
last_tensor.shape = last_tensor.tensor.shape
# Create new transpose op
nhwc2nchw_perm_tensor = self.create_attr_tensor(nhwc2nchw_perm)
transposed = self.create_transform_tensor(np.transpose(
last_tensor.tensor, nhwc2nchw_perm), quantization=last_tensor.quantization)
transpose_op = tfl.TransposeOperator([last_tensor, nhwc2nchw_perm_tensor], [transposed])
self.graph.add_operator(transpose_op)
# Get the newly-generated node
new_node_name = self.graph.tensor_node_map[transposed.name]
new_node = self.graph.graph.vs.find(name=new_node_name)
# Connect the transpose op to the graph
self.graph.replace_next_tensors(last_node, new_node, transposed.name, [new_node_name])
# Collect the unused connections
for edge in last_node.out_edges():
target_vertex = edge.target_vertex
if target_vertex['name'] != new_node_name:
remove_edges.append(edge.index)
# Remove the collected edges
self.graph.graph.delete_edges(remove_edges)
def output_list_unpack_pass(self):
output_names = []
unpacked_outputs = []
for name in self.graph.outputs:
if name in self.graph.iterable_map:
names = self.graph.get_list_expanded_names(name)
unpacked_outputs.extend(names)
output_names.extend(names)
else:
unpacked_outputs.append(name)
self.graph.outputs.clear()
self.graph.outputs.extend(unpacked_outputs)
self.graph.add_outputs(output_names)
def optimize(self):
# Input/output passes
self.output_list_unpack_pass()
self.input_transpose_pass()
# Transpose, Reshape and NO-OP cleanup
self.branch_reshape_expand_pass()
self.fuse_simple_reshape_pass()
self.branch_transpose_expand_pass()
self.fuse_simple_transpose_pass()
self.remove_noop_pass()
self.fuse_wrapped_reshape_within_transpose_pass()
# Buffer folding, which is needed by the fusion passes below
for _ in range(2):
self.fold_reshape_buffer()
self.fold_transpose_buffer()
# OP fusion passes before transformation
self.fuse_conv_fc_bn()
self.fuse_activation()
# Convert TinyNeuralNetwork ops to TFLite ops
self.transform_graph()
# OP fusion passes after transformation
self.fuse_bmm_add_pass()
self.fuse_activation()
# Transpose and reshape cleanup
self.branch_reshape_expand_pass()
self.branch_transpose_expand_pass()
self.fuse_simple_transpose_pass()
self.fuse_simple_reshape_pass()
# Branch transpose cleanup
for _ in range(3):
self.elementwise_op_transpose_passthrough_pass()
self.branch_transpose_expand_pass()
self.fuse_simple_transpose_pass()
# Other cleanups
self.fuse_simple_slice_pass()
self.remove_noop_pass()
self.fuse_wrapped_reshape_within_transpose_pass()
# Buffer folding
for _ in range(2):
self.fold_reshape_buffer()
self.fold_transpose_buffer()
# Transpose and reshape cleanup
for _ in range(2):
self.transpose_to_reshape_pass()
self.fuse_simple_reshape_pass()
self.fuse_simple_transpose_pass()
# Final cleanup
self.cleanup_dead_nodes()
def is_bn_fusable_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] in (ExtendedOperator.GENERIC_CONV, ExtendedOperator.GENERIC_DECONV, ExtendedOperator.FULLY_CONNECTED) \
and target_vertex['node_type'] == ExtendedOperator.BATCH_NORM and source_vertex.outdegree() == 1 \
and target_vertex['op'].inputs[1].buffer is not None and target_vertex['op'].inputs[2].buffer is not None \
and source_vertex['op'].inputs[1].buffer is not None \
and (target_vertex['op'].fusedActivationFunction == ActivationFunctionType.NONE or
source_vertex['op'].fusedActivationFunction in (ActivationFunctionType.NONE, target_vertex['op'].fusedActivationFunction))
def is_activ_fusable_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] in (ExtendedOperator.FULLY_CONNECTED, ExtendedOperator.GENERIC_CONV,
ExtendedOperator.ADD, ExtendedOperator.SUB, ExtendedOperator.MUL,
ExtendedOperator.DIV, ExtendedOperator.MAX_POOL_2D, ExtendedOperator.AVERAGE_POOL_2D) \
and target_vertex['node_type'] in (ExtendedOperator.RELU, ExtendedOperator.RELU6) \
and source_vertex['op'].fusedActivationFunction == ActivationFunctionType.NONE \
and source_vertex.outdegree() == 1
def is_transformable_node(vertex: ig.Vertex, graph_converter: ig.Graph):
return vertex['node_type'] <= ExtendedOperator.BATCH_NORM and vertex.outdegree() >= 1
def is_transformable_transpose_node(vertex: ig.Vertex, graph_converter: ig.Graph):
return vertex['node_type'] == ExtendedOperator.TRANSPOSE and vertex.outdegree() >= 1 \
and is_transpose_same_to_reshape_op(vertex['op'])
def is_transpose_elementwise_op_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return ((source_vertex['node_type'] == ExtendedOperator.TRANSPOSE and
(is_elementwise_unary_op(target_vertex['node_type'], target_vertex['op']) or
is_elementwise_binary_op(target_vertex['node_type'], target_vertex['op']))) or
(target_vertex['node_type'] == ExtendedOperator.TRANSPOSE and
(is_elementwise_unary_op(source_vertex['node_type'], source_vertex['op']) or
is_elementwise_binary_op(source_vertex['node_type'], source_vertex['op'])))) \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_reshape_elementwise_op_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return ((source_vertex['node_type'] == ExtendedOperator.RESHAPE
and (is_elementwise_unary_op(target_vertex['node_type'], target_vertex['op'])
or is_elementwise_binary_op(target_vertex['node_type'], target_vertex['op'])))
or (target_vertex['node_type'] == ExtendedOperator.RESHAPE
and (is_elementwise_unary_op(source_vertex['node_type'], source_vertex['op'])
or is_elementwise_binary_op(source_vertex['node_type'], source_vertex['op'])))) \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_elementwise_unary_op(op_code: ExtendedOperator, op: tfl.BaseOperator):
return op_code in (ExtendedOperator.RELU,
ExtendedOperator.SIN,
ExtendedOperator.COS,
ExtendedOperator.TANH,
ExtendedOperator.ELU,
ExtendedOperator.PRELU,
ExtendedOperator.EXP,
ExtendedOperator.LOG,
ExtendedOperator.NEG,
ExtendedOperator.FLOOR,
ExtendedOperator.RELU6,
ExtendedOperator.QUANTIZE,
ExtendedOperator.DEQUANTIZE,
ExtendedOperator.SQRT,
ExtendedOperator.RSQRT,
ExtendedOperator.CAST,
ExtendedOperator.LOGISTIC,
ExtendedOperator.SOFTMAX,
ExtendedOperator.LOG_SOFTMAX,
ExtendedOperator.HARD_SWISH,
ExtendedOperator.LEAKY_RELU)
def is_elementwise_binary_op(op_code: ExtendedOperator, op: tfl.BaseOperator):
return (op_code in (ExtendedOperator.CONCATENATION,
ExtendedOperator.ADD,
ExtendedOperator.SUB,
ExtendedOperator.MUL,
ExtendedOperator.DIV) and
len(op.inputs) >= 2 and
op.inputs[0].tensor.ndim == op.inputs[1].tensor.ndim) \
or (op_code in (ExtendedOperator.SPLIT,
ExtendedOperator.SPLIT_V))
def is_ending_with_noop_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex.outdegree() == 1 and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name \
and ((target_vertex['node_type'] == ExtendedOperator.RESHAPE and
target_vertex['op'].inputs[0].shape == target_vertex['op'].outputs[0].shape) or
(target_vertex['node_type'] == ExtendedOperator.TRANSPOSE and
(np.diff(target_vertex['op'].inputs[1].tensor) == 1).all()) or
(target_vertex['node_type'] == ExtendedOperator.PAD and
target_vertex['op'].inputs[0].shape == target_vertex['op'].outputs[0].shape) or
(target_vertex['node_type'] == ExtendedOperator.SLICE and
target_vertex['op'].inputs[0].shape == target_vertex['op'].outputs[0].shape) or
(target_vertex['node_type'] == ExtendedOperator.CAST and
target_vertex['op'].inDataType == target_vertex['op'].outDataType))
def is_bmm_add_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.BATCH_MATMUL \
and target_vertex['node_type'] == ExtendedOperator.ADD \
and source_vertex['op'].inputs[0].tensor.ndim == 3 \
and source_vertex['op'].inputs[0].shape[0] == 1 \
and source_vertex['op'].inputs[1].tensor.ndim == 2 \
and target_vertex['op'].inputs[1].tensor.ndim == 1 \
and target_vertex['op'].inputs[1].shape[0] == source_vertex['op'].inputs[1].shape[-1] \
and source_vertex.outdegree() == 1 and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_wrapped_reshape_within_transpose_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return ((target_vertex['node_type'] == ExtendedOperator.TRANSPOSE and
source_vertex['node_type'] == ExtendedOperator.RESHAPE) or
(source_vertex['node_type'] == ExtendedOperator.TRANSPOSE and
target_vertex['node_type'] == ExtendedOperator.RESHAPE)) \
and source_vertex.outdegree() == 1 and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_slice_fusable_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.SLICE and source_vertex.outdegree() == 1 \
and target_vertex['node_type'] == ExtendedOperator.SLICE and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_transpose_fusable_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.TRANSPOSE and source_vertex.outdegree() == 1 \
and target_vertex['node_type'] == ExtendedOperator.TRANSPOSE and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_reshape_branch_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.RESHAPE and source_vertex.outdegree() > 1 \
and target_vertex['node_type'] == ExtendedOperator.RESHAPE \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_transpose_branch_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.TRANSPOSE and source_vertex.outdegree() > 1 \
and target_vertex['node_type'] == ExtendedOperator.TRANSPOSE \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_reshape_fusable_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.RESHAPE and source_vertex.outdegree() == 1 \
and target_vertex['node_type'] == ExtendedOperator.RESHAPE and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_constant_transpose_fusable_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.CONSTANT_NODE and source_vertex.outdegree() == 1 \
and target_vertex['node_type'] == ExtendedOperator.TRANSPOSE and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_constant_reshape_fusable_edge(edge: ig.Edge, graph_converter: ig.Graph):
source_vertex = graph_converter.vs[edge.source]
target_vertex = graph_converter.vs[edge.target]
return source_vertex['node_type'] == ExtendedOperator.CONSTANT_NODE and source_vertex.outdegree() == 1 \
and target_vertex['node_type'] == ExtendedOperator.RESHAPE and target_vertex.outdegree() >= 1 \
and source_vertex['outputs'][0] == target_vertex['op'].inputs[0].name
def is_transpose_same_to_reshape_op(op: tfl.BaseOperator):
num_elements = np.prod(op.inputs[0].shape)
input_shape = np.array(op.inputs[0].shape, dtype='int32')
output_shape = np.array(op.outputs[0].shape, dtype='int32')
if np.all(input_shape[input_shape != 1] == output_shape[output_shape != 1]):
input_tensor = np.arange(num_elements).reshape(input_shape)
perm = op.inputs[1].tensor
new_tensor = np.transpose(input_tensor, perm)
return np.all(new_tensor.flatten() == input_tensor.flatten())
else:
return False
def fuse_bn_weight(eps, scale, var, weight):
while weight.ndim > scale.ndim:
scale = scale[:, None]
while weight.ndim > var.ndim:
var = var[:, None]
eps = np.array(eps, dtype='float32')
return weight * scale / np.sqrt(var + eps, dtype='float32')
def fuse_bn_bias(eps, scale, var, mean, bn_b, activ_b):
if scale.ndim > 1:
scale = scale.flatten()
if var.ndim > 1:
var = var.flatten()
eps = np.array(eps, dtype='float32')
if activ_b is not None:
if activ_b.shape != mean.shape and activ_b.ndim == 1 and activ_b.size == 1:
activ_b = activ_b.repeat(mean.size)
return ((activ_b - mean) * scale) / (np.sqrt(var + eps, dtype='float32')) + bn_b
else:
return ((- mean) * scale) / (np.sqrt(var + eps, dtype='float32')) + bn_b
def fuse_slices(seq: typing.Iterable[ig.Vertex]):
cur_start = None
cur_size = None
for node in seq:
assert node['node_type'] == ExtendedOperator.SLICE
next_start = node['op'].inputs[1].tensor
next_size = node['op'].inputs[2].tensor
if cur_start is None and cur_size is None:
cur_start = next_start
cur_size = next_size
else:
cur_start += next_start
cur_size = np.min((cur_size, next_size), axis=0)
return cur_start, cur_size
def fuse_transpose_perms(seq: typing.Iterable[ig.Vertex]):
cur_perm = None
for node in seq:
assert node['node_type'] == ExtendedOperator.TRANSPOSE
next_perm = node['op'].inputs[1].tensor
if cur_perm is None:
cur_perm = next_perm
else:
cur_perm = cur_perm[next_perm]
return cur_perm
def fuse_transpose_perms_extended(seq: typing.Iterable[ig.Vertex]):
cur_perm = None
# Reverse the sequence if dim is expanding
if seq[1]['node_type'] == ExtendedOperator.RESHAPE:
if len(seq[1]['op'].inputs[0].shape) < len(seq[1]['op'].outputs[0].shape):
seq = list(reversed(list(seq)))
for node in seq:
if node['node_type'] == ExtendedOperator.TRANSPOSE:
next_perm = node['op'].inputs[1].tensor
if cur_perm is None:
cur_perm = next_perm
else:
cur_perm = cur_perm[next_perm]
elif node['node_type'] == ExtendedOperator.RESHAPE:
if len(seq[1]['op'].inputs[0].shape) > len(seq[1]['op'].outputs[0].shape):
old_shape = node['op'].inputs[0].shape
new_shape = node['op'].outputs[0].shape
else:
new_shape = node['op'].inputs[0].shape
old_shape = node['op'].outputs[0].shape
if old_shape != new_shape:
new_shape_padded = list(new_shape) + [None] * (len(old_shape) - len(new_shape))
next_perm = []
new_idx = 0
for old, item in zip(old_shape, cur_perm):
if old == new_shape_padded[new_idx]:
next_perm.append(item)
new_idx += 1
cur_perm = np.argsort(next_perm)
return cur_perm
def fuse_connected_edges(filtered_pairs: typing.List[typing.Iterable[ig.Vertex]]) -> typing.List[typing.Iterable[ig.Vertex]]:
while True:
heads = {n[0]: i for i, n in enumerate(filtered_pairs)}
tails = {n[-1]: i for i, n in enumerate(filtered_pairs)}
connectables = heads.keys() & tails.keys()
if len(connectables) > 0:
curr_filtered = []
for seq in filtered_pairs:
head_connectable = seq[0] in connectables
preserve = head_connectable and filtered_pairs[tails[seq[0]]][0] in connectables
if preserve:
curr_filtered.append(seq)
elif not head_connectable:
if seq[-1] in connectables:
curr_filtered.append(seq + filtered_pairs[heads[seq[-1]]][1:])
else:
curr_filtered.append(seq)
filtered_pairs = curr_filtered
else:
break
return filtered_pairs
def is_simple_reshape(orig_shape, new_shape, mapping: typing.Optional[typing.Dict[int, int]] = None):
if orig_shape == new_shape:
return True
i = 0
j = 0
while True:
if i == len(orig_shape) and j == len(new_shape):
break
elif i == len(orig_shape):
if new_shape[j] == 1:
j += 1
else:
break
elif j == len(new_shape):
if orig_shape[i] == 1:
i += 1
else:
break
elif orig_shape[i] == new_shape[j]:
if mapping:
mapping[i] = j
i += 1
j += 1
elif orig_shape[i] == 1:
i += 1
elif new_shape[j] == 1:
j += 1
else:
break
if i != len(orig_shape) or j != len(new_shape):
return False
else:
return True
def elinimate_sequences(graph_converter: CommonGraph, filtered_pairs: typing.List[typing.Iterable[ig.Vertex]],
remove_first_pred: typing.Union[bool, typing.Callable] = False,
remove_first_node_action: typing.Optional[typing.Callable] = None,
remove_last_pred: typing.Union[bool, typing.Callable] = True,
remove_last_node_action: typing.Optional[typing.Callable] = None,
skip_pred: typing.Union[bool, typing.Callable] = False):
remove_ids = []
actions = []
for seq in filtered_pairs:
first_node = seq[0]
last_node = seq[-1]
if type(skip_pred) == bool:
skip = skip_pred
elif skip_pred is not None:
skip = skip_pred(seq)
if skip:
continue
if type(remove_first_pred) == bool:
remove_first = remove_first_pred
custom_data = None
elif remove_first_pred is not None:
remove_first, custom_data = remove_first_pred(seq)
if type(remove_last_pred) == bool:
remove_last = remove_last_pred
custom_data_last = None
elif remove_last_pred is not None:
remove_last, custom_data_last = remove_last_pred(seq)
# If the first node can also be eliminated, then set the previous node as the first node
if remove_first:
first_node = graph_converter.graph.vs.find(
name=graph_converter.tensor_node_map[first_node['op'].inputs[0].name])
if not remove_last:
last_node = seq[-2]
# We use the forward input tensor under the following circumstances.
# 1. If the previous node before the sequence is an input node
# 2. If the first node has multiple outputs
use_forward_input = False
if first_node['node_type'] == ExtendedOperator.INPUT_NODE or first_node.outdegree() > 1:
use_forward_input = True
if use_forward_input:
# Find out the output of the first node in the sequence
new_output = first_node['outputs'][0]
assert new_output in graph_converter.tensor_map
# For each node that is next of the last node, we connect it with the first node
# Also, the replace the tensors when needed
graph_converter.replace_next_tensors(last_node, first_node, new_output)
else:
# Find out the output of the last node in the sequence
new_output = last_node['outputs'][0]
assert new_output in graph_converter.tensor_map
# For each node that is next of the last node, we connect it with the first node
graph_converter.connect_next_tensors(last_node, first_node, new_output)
# Update graph, prepare to drop the output tensor of the intermediate nodes and use the output tensor of the last node instead
first_node['outputs'][0] = new_output
if first_node['op'] is not None:
first_node['op'].outputs[0] = graph_converter.tensor_map[new_output]
graph_converter.tensor_node_map[new_output] = first_node['name']
if remove_first and remove_last:
# When the first node is a constant node, we need to set the buffer back
if first_node['node_type'] == ExtendedOperator.CONSTANT_NODE and not use_forward_input:
old_tensor = seq[0]['op'].inputs[0]
new_tensor = seq[-1]['op'].outputs[0]
new_tensor.buffer = old_tensor.buffer
# Push the sequence to the removing list
remove_ids.extend([x.index for x in seq])
else:
# Collect actions when removing the first node
start_index = 0
end_index = len(seq)
if not remove_first:
start_index = 1
if remove_first_node_action is not None:
action = remove_first_node_action(first_node, last_node, custom_data)
if action is not None:
actions.extend(action)
if not remove_last:
end_index = len(seq) - 1
if remove_last_node_action is not None:
action = remove_last_node_action(first_node, last_node, custom_data_last)
if action is not None:
actions.extend(action)
# Push the sequence (except the first node) to the removing list
remove_ids.extend([x.index for x in seq[start_index:end_index]])
for func, args in actions:
func(*args)
graph_converter.graph.delete_vertices(remove_ids)
def expand_op_outputs_in_branches(nodes: typing.List[ig.Vertex], new_op_func: typing.Callable[[ig.Vertex, ig.Vertex, ig.Vertex], None],
graph_converter: CommonGraph):
actions = []
for node in nodes:
preserve_node = None
prev_node_name = node['op'].inputs[0].name
prev_node = graph_converter.graph.vs.find(name=graph_converter.tensor_node_map[prev_node_name])
# Collect next nodes and choose one to preserve
next_nodes = []
for edge in node.out_edges():
next_node = graph_converter.graph.vs[edge.target]
if preserve_node is None or next_node['node_type'] == ExtendedOperator.OUTPUT_NODE:
preserve_node = next_node
next_nodes.append(next_node)
# For the filtered nodes, use the cloned op as the previous op
filtered_nodes = list(set(next_nodes) - set([preserve_node]))
for next_node in filtered_nodes:
actions.extend(new_op_func(node, prev_node, next_node))
# Process actions
for func, args in actions:
node = args[0]
func(*args)
| 46.285714
| 154
| 0.617358
|
b5d667f39a9fb4a06914f2cbd77fdc446f5037ff
| 158
|
py
|
Python
|
selfdrive/version.py
|
UVA-DSA/OpenPilot0.3.5
|
b5777bbb075b92b0c11969e56f9fd264bf0ce291
|
[
"MIT"
] | 1
|
2017-12-30T11:54:39.000Z
|
2017-12-30T11:54:39.000Z
|
selfdrive/version.py
|
UVA-DSA/OpenPilot0.3.5
|
b5777bbb075b92b0c11969e56f9fd264bf0ce291
|
[
"MIT"
] | 1
|
2021-03-26T00:23:15.000Z
|
2021-03-26T00:23:15.000Z
|
selfdrive/version.py
|
UVA-DSA/OpenPilot0.3.5
|
b5777bbb075b92b0c11969e56f9fd264bf0ce291
|
[
"MIT"
] | 1
|
2020-06-01T16:20:06.000Z
|
2020-06-01T16:20:06.000Z
|
import os
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "common", "version.h")) as _versionf:
version = _versionf.read().split('"')[1]
| 39.5
| 104
| 0.702532
|
9cd060522bf59b6deca3cb0541d4a39aba26ceec
| 10,674
|
py
|
Python
|
app/main/views/api_keys.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
app/main/views/api_keys.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
app/main/views/api_keys.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
from flask import (
Markup,
abort,
flash,
redirect,
render_template,
request,
url_for,
)
from flask_login import current_user, login_required
from app import (
api_key_api_client,
current_service,
notification_api_client,
service_api_client,
)
from app.main import main
from app.main.forms import (
CreateKeyForm,
ServiceDeliveryStatusCallbackForm,
ServiceReceiveMessagesCallbackForm,
Whitelist,
)
from app.notify_client.api_key_api_client import (
KEY_TYPE_NORMAL,
KEY_TYPE_TEAM,
KEY_TYPE_TEST,
)
from app.utils import email_safe, user_has_permissions
dummy_bearer_token = 'bearer_token_set'
@main.route("/services/<service_id>/api")
@login_required
@user_has_permissions('manage_api_keys')
def api_integration(service_id):
callbacks_link = (
'.api_callbacks' if 'inbound_sms' in current_service['permissions']
else '.delivery_status_callback'
)
return render_template(
'views/api/index.html',
callbacks_link=callbacks_link,
api_notifications=notification_api_client.get_api_notifications_for_service(service_id)
)
@main.route("/services/<service_id>/api/documentation")
@login_required
@user_has_permissions('manage_api_keys')
def api_documentation(service_id):
return redirect(url_for('.documentation'), code=301)
@main.route("/services/<service_id>/api/whitelist", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_api_keys')
def whitelist(service_id):
form = Whitelist()
if form.validate_on_submit():
service_api_client.update_whitelist(service_id, {
'email_addresses': list(filter(None, form.email_addresses.data)),
'phone_numbers': list(filter(None, form.phone_numbers.data))
})
flash('Whitelist updated', 'default_with_tick')
return redirect(url_for('.api_integration', service_id=service_id))
if not form.errors:
form.populate(**service_api_client.get_whitelist(service_id))
return render_template(
'views/api/whitelist.html',
form=form
)
@main.route("/services/<service_id>/api/keys")
@login_required
@user_has_permissions('manage_api_keys')
def api_keys(service_id):
return render_template(
'views/api/keys.html',
keys=api_key_api_client.get_api_keys(service_id=service_id)['apiKeys']
)
@main.route("/services/<service_id>/api/keys/create", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_api_keys', restrict_admin_usage=True)
def create_api_key(service_id):
key_names = [
key['name'] for key in api_key_api_client.get_api_keys(service_id=service_id)['apiKeys']
]
form = CreateKeyForm(key_names)
form.key_type.choices = [
(KEY_TYPE_NORMAL, 'Live – sends to anyone'),
(KEY_TYPE_TEAM, 'Team and whitelist – limits who you can send to'),
(KEY_TYPE_TEST, 'Test – pretends to send messages'),
]
disabled_options, option_hints = [], {}
if current_service['restricted']:
disabled_options = [KEY_TYPE_NORMAL]
option_hints[KEY_TYPE_NORMAL] = Markup(
'Not available because your service is in '
'<a href="{}#trial-mode">trial mode</a>'.format(url_for(".using_notify"))
)
if 'letter' in current_service['permissions']:
option_hints[KEY_TYPE_TEAM] = 'Can’t be used to send letters'
if form.validate_on_submit():
if form.key_type.data in disabled_options:
abort(400)
secret = api_key_api_client.create_api_key(
service_id=service_id,
key_name=form.key_name.data,
key_type=form.key_type.data
)
return render_template(
'views/api/keys/show.html',
secret=secret,
service_id=service_id,
key_name=email_safe(form.key_name.data, whitespace='_')
)
return render_template(
'views/api/keys/create.html',
form=form,
disabled_options=disabled_options,
option_hints=option_hints
)
@main.route("/services/<service_id>/api/keys/revoke/<key_id>", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_api_keys')
def revoke_api_key(service_id, key_id):
key_name = api_key_api_client.get_api_keys(service_id=service_id, key_id=key_id)['apiKeys'][0]['name']
if request.method == 'GET':
return render_template(
'views/api/keys.html',
revoke_key=key_name,
keys=api_key_api_client.get_api_keys(service_id=service_id)['apiKeys'],
)
elif request.method == 'POST':
api_key_api_client.revoke_api_key(service_id=service_id, key_id=key_id)
flash('‘{}’ was revoked'.format(key_name), 'default_with_tick')
return redirect(url_for('.api_keys', service_id=service_id))
def get_apis():
callback_api = None
inbound_api = None
if current_service['service_callback_api']:
callback_api = service_api_client.get_service_callback_api(
current_service['id'],
current_service.get('service_callback_api')[0]
)
if current_service['inbound_api']:
inbound_api = service_api_client.get_service_inbound_api(
current_service['id'],
current_service.get('inbound_api')[0]
)
return (callback_api, inbound_api)
def check_token_against_dummy_bearer(token):
if token != dummy_bearer_token:
return token
else:
return ''
@main.route("/services/<service_id>/api/callbacks", methods=['GET'])
@login_required
def api_callbacks(service_id):
if 'inbound_sms' not in current_service['permissions']:
return redirect(url_for('.delivery_status_callback', service_id=service_id))
delivery_status_callback, received_text_messages_callback = get_apis()
return render_template(
'views/api/callbacks.html',
received_text_messages_callback=received_text_messages_callback['url']
if received_text_messages_callback else None,
delivery_status_callback=delivery_status_callback['url'] if delivery_status_callback else None
)
def get_delivery_status_callback_details():
if current_service['service_callback_api']:
return service_api_client.get_service_callback_api(
current_service['id'],
current_service.get('service_callback_api')[0]
)
@main.route("/services/<service_id>/api/callbacks/delivery-status-callback", methods=['GET', 'POST'])
@login_required
def delivery_status_callback(service_id):
delivery_status_callback = get_delivery_status_callback_details()
back_link = (
'.api_callbacks' if 'inbound_sms' in current_service['permissions']
else '.api_integration'
)
form = ServiceDeliveryStatusCallbackForm(
url=delivery_status_callback.get('url') if delivery_status_callback else '',
bearer_token=dummy_bearer_token if delivery_status_callback else ''
)
if form.validate_on_submit():
if delivery_status_callback and form.url.data:
if (
delivery_status_callback.get('url') != form.url.data or
form.bearer_token.data != dummy_bearer_token
):
service_api_client.update_service_callback_api(
service_id,
url=form.url.data,
bearer_token=check_token_against_dummy_bearer(form.bearer_token.data),
user_id=current_user.id,
callback_api_id=delivery_status_callback.get('id')
)
elif delivery_status_callback and not form.url.data:
service_api_client.delete_service_callback_api(
service_id,
delivery_status_callback['id'],
)
elif form.url.data:
service_api_client.create_service_callback_api(
service_id,
url=form.url.data,
bearer_token=form.bearer_token.data,
user_id=current_user.id
)
else:
# If no callback is set up and the user chooses to continue
# having no callback (ie both fields empty) then there’s
# nothing for us to do here
pass
return redirect(url_for(back_link, service_id=service_id))
return render_template(
'views/api/callbacks/delivery-status-callback.html',
back_link=back_link,
form=form,
)
def get_received_text_messages_callback():
if current_service['inbound_api']:
return service_api_client.get_service_inbound_api(
current_service['id'],
current_service.get('inbound_api')[0]
)
@main.route("/services/<service_id>/api/callbacks/received-text-messages-callback", methods=['GET', 'POST'])
@login_required
def received_text_messages_callback(service_id):
if 'inbound_sms' not in current_service['permissions']:
return redirect(url_for('.api_integration', service_id=service_id))
received_text_messages_callback = get_received_text_messages_callback()
form = ServiceReceiveMessagesCallbackForm(
url=received_text_messages_callback.get('url') if received_text_messages_callback else '',
bearer_token=dummy_bearer_token if received_text_messages_callback else ''
)
if form.validate_on_submit():
if received_text_messages_callback and form.url.data:
if (
received_text_messages_callback.get('url') != form.url.data or
form.bearer_token.data != dummy_bearer_token
):
service_api_client.update_service_inbound_api(
service_id,
url=form.url.data,
bearer_token=check_token_against_dummy_bearer(form.bearer_token.data),
user_id=current_user.id,
inbound_api_id=received_text_messages_callback.get('id')
)
elif received_text_messages_callback and not form.url.data:
service_api_client.delete_service_inbound_api(
service_id,
received_text_messages_callback['id'],
)
elif form.url.data:
service_api_client.create_service_inbound_api(
service_id,
url=form.url.data,
bearer_token=form.bearer_token.data,
user_id=current_user.id
)
return redirect(url_for('.api_callbacks', service_id=service_id))
return render_template(
'views/api/callbacks/received-text-messages-callback.html',
form=form,
)
| 35.58
| 108
| 0.672194
|
2b47685d5419c0885d876c0a75bd3d2bb6d74819
| 6,083
|
py
|
Python
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/self_hosted_integration_runtime_node.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/self_hosted_integration_runtime_node.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/self_hosted_integration_runtime_node.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SelfHostedIntegrationRuntimeNode(Model):
"""Properties of Self-hosted integration runtime node.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar node_name: Name of the integration runtime node.
:vartype node_name: str
:ivar machine_name: Machine name of the integration runtime node.
:vartype machine_name: str
:ivar host_service_uri: URI for the host machine of the integration
runtime.
:vartype host_service_uri: str
:ivar status: Status of the integration runtime node. Possible values
include: 'NeedRegistration', 'Online', 'Limited', 'Offline', 'Upgrading',
'Initializing', 'InitializeFailed'
:vartype status: str or
~azure.mgmt.datafactory.models.SelfHostedIntegrationRuntimeNodeStatus
:ivar capabilities: The integration runtime capabilities dictionary
:vartype capabilities: dict[str, str]
:ivar version_status: Status of the integration runtime node version.
:vartype version_status: str
:ivar version: Version of the integration runtime node.
:vartype version: str
:ivar register_time: The time at which the integration runtime node was
registered in ISO8601 format.
:vartype register_time: datetime
:ivar last_connect_time: The most recent time at which the integration
runtime was connected in ISO8601 format.
:vartype last_connect_time: datetime
:ivar expiry_time: The time at which the integration runtime will expire
in ISO8601 format.
:vartype expiry_time: datetime
:ivar last_start_time: The time the node last started up.
:vartype last_start_time: datetime
:ivar last_stop_time: The integration runtime node last stop time.
:vartype last_stop_time: datetime
:ivar last_update_result: The result of the last integration runtime node
update. Possible values include: 'Succeed', 'Fail'
:vartype last_update_result: str or
~azure.mgmt.datafactory.models.IntegrationRuntimeUpdateResult
:ivar last_start_update_time: The last time for the integration runtime
node update start.
:vartype last_start_update_time: datetime
:ivar last_end_update_time: The last time for the integration runtime node
update end.
:vartype last_end_update_time: datetime
:ivar is_active_dispatcher: Indicates whether this node is the active
dispatcher for integration runtime requests.
:vartype is_active_dispatcher: bool
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration
runtime node.
:vartype concurrent_jobs_limit: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration
runtime.
:vartype max_concurrent_jobs: int
"""
_validation = {
'node_name': {'readonly': True},
'machine_name': {'readonly': True},
'host_service_uri': {'readonly': True},
'status': {'readonly': True},
'capabilities': {'readonly': True},
'version_status': {'readonly': True},
'version': {'readonly': True},
'register_time': {'readonly': True},
'last_connect_time': {'readonly': True},
'expiry_time': {'readonly': True},
'last_start_time': {'readonly': True},
'last_stop_time': {'readonly': True},
'last_update_result': {'readonly': True},
'last_start_update_time': {'readonly': True},
'last_end_update_time': {'readonly': True},
'is_active_dispatcher': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
'machine_name': {'key': 'machineName', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '{str}'},
'version_status': {'key': 'versionStatus', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'register_time': {'key': 'registerTime', 'type': 'iso-8601'},
'last_connect_time': {'key': 'lastConnectTime', 'type': 'iso-8601'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'last_start_time': {'key': 'lastStartTime', 'type': 'iso-8601'},
'last_stop_time': {'key': 'lastStopTime', 'type': 'iso-8601'},
'last_update_result': {'key': 'lastUpdateResult', 'type': 'str'},
'last_start_update_time': {'key': 'lastStartUpdateTime', 'type': 'iso-8601'},
'last_end_update_time': {'key': 'lastEndUpdateTime', 'type': 'iso-8601'},
'is_active_dispatcher': {'key': 'isActiveDispatcher', 'type': 'bool'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
}
def __init__(self):
self.node_name = None
self.machine_name = None
self.host_service_uri = None
self.status = None
self.capabilities = None
self.version_status = None
self.version = None
self.register_time = None
self.last_connect_time = None
self.expiry_time = None
self.last_start_time = None
self.last_stop_time = None
self.last_update_result = None
self.last_start_update_time = None
self.last_end_update_time = None
self.is_active_dispatcher = None
self.concurrent_jobs_limit = None
self.max_concurrent_jobs = None
| 45.395522
| 85
| 0.657406
|
1b4cf9c40e156dee56884e98d53c61421ab66cc5
| 4,870
|
py
|
Python
|
rmgpy/tools/testGenerateReactions.py
|
cainja/RMG-Py
|
f9ad0f4244e476a28768c8a4a37410ad55bcd556
|
[
"MIT"
] | 7
|
2017-10-04T16:04:14.000Z
|
2021-03-27T21:54:41.000Z
|
code/rmgpy/tools/testGenerateReactions.py
|
tiger-tiger/Molecular-Image-Recognition
|
413e74bb526f56077cd5f70bb41fb7a075636174
|
[
"MIT"
] | null | null | null |
code/rmgpy/tools/testGenerateReactions.py
|
tiger-tiger/Molecular-Image-Recognition
|
413e74bb526f56077cd5f70bb41fb7a075636174
|
[
"MIT"
] | 6
|
2017-10-04T15:37:05.000Z
|
2021-12-29T06:50:16.000Z
|
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import unittest
import os.path
import shutil
from nose.plugins.attrib import attr
import rmgpy
from rmgpy.tools.generate_reactions import *
@attr('functional')
class GenerateReactionsTest(unittest.TestCase):
def test(self):
folder = os.path.join(os.path.dirname(rmgpy.__file__), 'tools/data/generate')
input_file = os.path.join(folder, 'input.py')
rmg = RMG(inputFile=input_file, outputDirectory=folder)
rmg = execute(rmg)
self.assertIsNotNone(rmg)
self.assertIsNotNone(rmg.reactionModel.outputSpeciesList)
self.assertIsNotNone(rmg.reactionModel.outputReactionList)
shutil.rmtree(os.path.join(folder, 'pdep'))
def testDuplicateReaction(self):
"""
Test that the radical addition reaction
HCJ=O + CH2O = [CH2]OC=O
present in the reaction library "Methylformate",
only appears once in the model.
"""
from rmgpy.reaction import Reaction
from rmgpy.molecule import Molecule
folder = os.path.join(os.path.dirname(rmgpy.__file__), 'tools/data/generate/duplicates')
input_file = os.path.join(folder, 'input.py')
rmg = RMG(inputFile=input_file, outputDirectory=folder)
rmg = execute(rmg)
self.assertIsNotNone(rmg)
rxn_flagged = Reaction(reactants=[Molecule(SMILES='[CH]=O'), Molecule(SMILES='C=O')],
products=[Molecule(SMILES='[CH2]OC=O')])
count = 0
for reaction in rmg.reactionModel.core.reactions:
if reaction.isIsomorphic(rxn_flagged):
count += 1
self.assertEquals(count, 1)
shutil.rmtree(os.path.join(folder, 'pdep'))
def testLibraryReactionEntersCore(self):
"""
Test that a reaction from a Reaction Library enters the core
right after the initialization step if all the input species are
present in that reaction.
The following reaction from the Methylformate library
HCjO + CH2O <=> Fmoml
should appear in the model if HCjO, CH2O and Fmoml are all used as input species
"""
from rmgpy.reaction import Reaction
from rmgpy.molecule import Molecule
folder = os.path.join(os.path.dirname(rmgpy.__file__), 'tools/data/generate/libraryReaction')
input_file = os.path.join(folder, 'input.py')
rmg = RMG(inputFile=input_file, outputDirectory=folder)
rmg = execute(rmg)
self.assertIsNotNone(rmg)
# Assert that the flagged reaction occurs
rxn_flagged = Reaction(reactants=[Molecule(SMILES='[CH]=O'), Molecule(SMILES='C=O')],
products=[Molecule(SMILES='[CH2]OC=O')])
count = 0
for reaction in rmg.reactionModel.core.reactions:
if reaction.isIsomorphic(rxn_flagged):
count += 1
self.assertEquals(count, 1)
# Assert that the core only has 1 reaction
self.assertEquals(len(rmg.reactionModel.core.reactions), 1)
shutil.rmtree(os.path.join(folder, 'pdep'))
def setUp(self):
import rmgpy.data.rmg
rmgpy.data.rmg.database = None
def tearDown(self):
"""
Reset the loaded database
"""
import rmgpy.data.rmg
rmgpy.data.rmg.database = None
| 36.343284
| 101
| 0.632649
|
9a369e9c0d5bdb2a4bd77d141dd83a02507ba557
| 1,518
|
py
|
Python
|
verlaengern.py
|
csicar/hosteurope-letsencrypt
|
d4f5087343a14c958d6b23da797dcdca00270109
|
[
"Apache-2.0"
] | 59
|
2017-10-04T17:09:09.000Z
|
2022-03-28T08:26:23.000Z
|
verlaengern.py
|
csicar/hosteurope-letsencrypt
|
d4f5087343a14c958d6b23da797dcdca00270109
|
[
"Apache-2.0"
] | 12
|
2018-03-06T18:42:26.000Z
|
2022-02-20T21:52:53.000Z
|
verlaengern.py
|
csicar/hosteurope-letsencrypt
|
d4f5087343a14c958d6b23da797dcdca00270109
|
[
"Apache-2.0"
] | 14
|
2017-11-07T21:36:26.000Z
|
2021-09-04T11:18:02.000Z
|
#!/usr/bin/env python3
# coding=utf-8
import json
import os
from shared import domain_list, config_file
# certbot tries to write to /var/log/letsencrypt by default; because of this, running as root is required.
# certbot Error Message:
# Either run as root, or set --config-dir, --work-dir, and --logs-dir to writeable paths.
is_root = os.geteuid() == 0
home_dir = os.path.expanduser('~/.config/hosteurope-letsencrypt')
certbot_config_dir = home_dir
certbot_work_dir = home_dir
certbot_logs_dir = os.path.expanduser('~/.config/hosteurope-letsencrypt/logs')
if not is_root and not os.path.exists(certbot_logs_dir):
os.makedirs(certbot_logs_dir)
# Einstellungen einlesen
with open(config_file('einstellungen.json')) as cfg_file:
config = json.load(cfg_file)
email = config['email']
staging = config['staging']
challenge = config.get('preferred-challenge', 'http')
# certbot Kommando zusammenbauen
cmd = 'certbot certonly -n --manual --agree-tos --manual-public-ip-logging-ok'
cmd += ' -m ' + email
if 'http' == challenge:
cmd += ' --manual-auth-hook "python3 validate.py"'
if staging:
cmd += ' --staging'
if not is_root:
cmd += ' --logs-dir ' + certbot_logs_dir
cmd += ' --work-dir ' + certbot_work_dir
cmd += ' --config-dir ' + certbot_config_dir
cmd += domain_list
# Sicherheitsabfrage
print(cmd)
answer = input('Zertifikate verlängern? (j/n): ')
if answer != 'j':
print('Abbruch, es wurde kein Zertifikat verlängert.')
exit(0)
# neues Zertifikat erstellen
os.system(cmd)
| 29.192308
| 106
| 0.714756
|
73da56bba4eeed748efefd32c4f54fd1b19a311b
| 3,743
|
py
|
Python
|
ask-smapi-model/ask_smapi_model/v1/skill/skill_credentials.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/skill_credentials.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/skill_credentials.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.skill_messaging_credentials import SkillMessagingCredentialsV1
class SkillCredentials(object):
"""
Structure for skill credentials response.
:param skill_messaging_credentials:
:type skill_messaging_credentials: (optional) ask_smapi_model.v1.skill.skill_messaging_credentials.SkillMessagingCredentials
"""
deserialized_types = {
'skill_messaging_credentials': 'ask_smapi_model.v1.skill.skill_messaging_credentials.SkillMessagingCredentials'
} # type: Dict
attribute_map = {
'skill_messaging_credentials': 'skillMessagingCredentials'
} # type: Dict
supports_multiple_types = False
def __init__(self, skill_messaging_credentials=None):
# type: (Optional[SkillMessagingCredentialsV1]) -> None
"""Structure for skill credentials response.
:param skill_messaging_credentials:
:type skill_messaging_credentials: (optional) ask_smapi_model.v1.skill.skill_messaging_credentials.SkillMessagingCredentials
"""
self.__discriminator_value = None # type: str
self.skill_messaging_credentials = skill_messaging_credentials
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SkillCredentials):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 34.027273
| 132
| 0.629709
|
b7e80bef156906d030efc2da3a8a43914316c8b9
| 1,796
|
py
|
Python
|
crawler/functions/crawler_functions.py
|
mihai97ionita/web_crawler
|
65f73466c98ca27272fcdeb2abafca44de72e312
|
[
"MIT"
] | null | null | null |
crawler/functions/crawler_functions.py
|
mihai97ionita/web_crawler
|
65f73466c98ca27272fcdeb2abafca44de72e312
|
[
"MIT"
] | null | null | null |
crawler/functions/crawler_functions.py
|
mihai97ionita/web_crawler
|
65f73466c98ca27272fcdeb2abafca44de72e312
|
[
"MIT"
] | null | null | null |
import json
import os
import redis
from crawler.concurrency.FutureCollector import FutureCollector
from crawler.factory.chrome_driver_factory import create_web_driver
from crawler.factory.redis_connection_factory import create_redis_connection
from crawler.functions.parse_functions import future_of_functions, future_of_function, find_university
# 10 minutes
cache_expiry_time = 10 * 60 * 1000
def crawler_and_parse(parse_function, parse_url):
try:
return crawler_and_parse_with_cache(parse_function, parse_url)
except:
return parse_function(parse_url)
def crawler_and_parse_with_cache(parse_function, parse_url):
redis_connection = create_redis_connection()
crawled_page = get_crawled_page_from_redis(redis_connection, parse_url)
if crawled_page:
return crawled_page
else:
crawled_page = parse_function(parse_url)
set_crawled_page_in_redis(redis_connection, parse_url, crawled_page)
return crawled_page
def get_crawled_page_from_redis(redis_connection, key):
value = redis_connection.get(key)
if value:
return json.loads(value)
else:
None
def get_value_from_redis(redis_connection, key):
return redis_connection.get(key)
def set_crawled_page_in_redis(redis_connection, key, crawled_page):
json_crawled_page = json.dumps(crawled_page)
set_value_in_redis(redis_connection, key, json_crawled_page)
def set_value_in_redis(redis_connection, key, value):
redis_connection.mset({key: value})
redis_connection.pexpire(key, cache_expiry_time)
def filter_by_info_selected(crawled_page, selected_info):
crawled_dict_info = {}
for k in crawled_page:
if k in selected_info:
crawled_dict_info[k] = crawled_page[k]
return crawled_dict_info
| 29.442623
| 102
| 0.778953
|
827de06f9c7c46368a7573280687c75a13029f4c
| 37,644
|
py
|
Python
|
jira/resources.py
|
artbycrunk/jira
|
af39e84c938ea3e19acaef73c7163d7554f2f4a4
|
[
"BSD-2-Clause"
] | 3
|
2020-04-23T02:33:12.000Z
|
2021-11-18T15:12:52.000Z
|
jira/resources.py
|
artbycrunk/jira
|
af39e84c938ea3e19acaef73c7163d7554f2f4a4
|
[
"BSD-2-Clause"
] | null | null | null |
jira/resources.py
|
artbycrunk/jira
|
af39e84c938ea3e19acaef73c7163d7554f2f4a4
|
[
"BSD-2-Clause"
] | 1
|
2020-07-15T05:34:44.000Z
|
2020-07-15T05:34:44.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
"""
This module implements the Resource classes that translate JSON from JIRA REST resources
into usable objects.
"""
import logging
import re
import time
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
import json
from six import iteritems
from six import string_types
from six import text_type
from jira.utils import CaseInsensitiveDict
from jira.utils import json_loads
from jira.utils import threaded_requests
__all__ = (
'Resource',
'Issue',
'Comment',
'Project',
'Attachment',
'Component',
'Dashboard',
'Filter',
'Votes',
'Watchers',
'Worklog',
'IssueLink',
'IssueLinkType',
'IssueType',
'Priority',
'Version',
'Role',
'Resolution',
'SecurityLevel',
'Status',
'User',
'Group',
'CustomFieldOption',
'RemoteLink',
'Customer',
'ServiceDesk',
'RequestType',
)
logging.getLogger('jira').addHandler(NullHandler())
def get_error_list(r):
error_list = []
if r.status_code >= 400:
if r.status_code == 403 and "x-authentication-denied-reason" in r.headers:
error_list = [r.headers["x-authentication-denied-reason"]]
elif r.text:
try:
response = json_loads(r)
if 'message' in response:
# JIRA 5.1 errors
error_list = [response['message']]
elif 'errorMessages' in response and len(response['errorMessages']) > 0:
# JIRA 5.0.x error messages sometimes come wrapped in this array
# Sometimes this is present but empty
errorMessages = response['errorMessages']
if isinstance(errorMessages, (list, tuple)):
error_list = errorMessages
else:
error_list = [errorMessages]
elif 'errors' in response and len(response['errors']) > 0:
# JIRA 6.x error messages are found in this array.
error_list = response['errors'].values()
else:
error_list = [r.text]
except ValueError:
error_list = [r.text]
return error_list
class Resource(object):
"""Models a URL-addressable resource in the JIRA REST API.
All Resource objects provide the following:
``find()`` -- get a resource from the server and load it into the current object
(though clients should use the methods in the JIRA class instead of this method directly)
``update()`` -- changes the value of this resource on the server and returns a new resource object for it
``delete()`` -- deletes this resource from the server
``self`` -- the URL of this resource on the server
``raw`` -- dict of properties parsed out of the JSON response from the server
Subclasses will implement ``update()`` and ``delete()`` as appropriate for the specific resource.
All Resources have a resource path of the form:
* ``issue``
* ``project/{0}``
* ``issue/{0}/votes``
* ``issue/{0}/comment/{1}``
where the bracketed numerals are placeholders for ID values that are filled in from the
``ids`` parameter to ``find()``.
"""
JIRA_BASE_URL = '{server}/rest/{rest_path}/{rest_api_version}/{path}'
# A prioritized list of the keys in self.raw most likely to contain a human
# readable name or identifier, or that offer other key information.
_READABLE_IDS = ('displayName', 'key', 'name', 'filename', 'value',
'scope', 'votes', 'id', 'mimeType', 'closed')
def __init__(self, resource, options, session, base_url=JIRA_BASE_URL):
"""Initializes a generic resource.
:param resource: The name of the resource.
:type resource: str
:param options: Options for the new resource
:type options: Dict[str,str]
:param session: Session used for the resource.
:type session: ResilientSession
:param base_url: The Base JIRA url.
:type base_url: Optional[str]
"""
self._resource = resource
self._options = options
self._session = session
self._base_url = base_url
# Explicitly define as None so we know when a resource has actually
# been loaded
self.raw = None
def __str__(self):
"""Return the first value we find that is likely to be human readable.
:rtype: str
"""
if self.raw:
for name in self._READABLE_IDS:
if name in self.raw:
pretty_name = text_type(self.raw[name])
# Include any child to support nested select fields.
if hasattr(self, 'child'):
pretty_name += ' - ' + text_type(self.child)
return pretty_name
# If all else fails, use repr to make sure we get something.
return repr(self)
def __repr__(self):
"""Identify the class and include any and all relevant values.
:rtype: str
"""
names = []
if self.raw:
for name in self._READABLE_IDS:
if name in self.raw:
names.append(name + '=' + repr(self.raw[name]))
if not names:
return '<JIRA %s at %s>' % (self.__class__.__name__,
id(self))
return '<JIRA %s: %s>' % (self.__class__.__name__, ', '.join(names))
def __getattr__(self, item):
"""Allow access of attributes via names.
:param item: Attribute name
:type item: str
:rtype: Any
:raises KeyError: When the attribute does not exist.
:raises AttributeError: When attribute does not exist.
"""
try:
return self[item]
except Exception as e:
# Make sure pickling doesn't break
# *MORE INFO*: This conditional wouldn't be necessary if __getattr__ wasn't used. But
# since it is in use (no worries), we need to give the pickle.dump*
# methods what they expect back. They expect to either get a KeyError
# exception or a tuple of args to be passed to the __new__ method upon
# unpickling (i.e. pickle.load* methods).
# *NOTE*: if the __new__ method were to be implemented in this class, this may have
# to be removed or changed.
if item == '__getnewargs__':
raise KeyError(item)
if hasattr(self, 'raw') and item in self.raw:
return self.raw[item]
else:
raise AttributeError("%r object has no attribute %r (%s)" % (self.__class__, item, e))
# def __getstate__(self):
# """
# Pickling the resource; using the raw dict
# """
# return self.raw
#
# def __setstate__(self, raw_pickled):
# """
# Unpickling of the resource
# """
# self._parse_raw(raw_pickled)
#
def find(self,
id,
params=None,
):
"""Finds a resource based on the input parameters.
:type id: Union[Tuple[str, str], int, str]
:type params: Optional[Dict[str, str]]
"""
if params is None:
params = {}
if isinstance(id, tuple):
path = self._resource.format(*id)
else:
path = self._resource.format(id)
url = self._get_url(path)
self._load(url, params=params)
def _get_url(self, path):
""" Gets the url for the specified path.
:type path: str
:rtype: str
"""
options = self._options.copy()
options.update({'path': path})
return self._base_url.format(**options)
def update(self, fields=None, async_=None, jira=None, notify=True, **kwargs):
"""Update this resource on the server.
Keyword arguments are marshalled into a dict before being sent. If this
resource doesn't support ``PUT``, a :py:exc:`.JIRAError` will be raised; subclasses that specialize this method
will only raise errors in case of user error.
:param fields: Fields which should be updated for the object.
:type fields: Optional[Dict[str, Any]]
:param async_: If true the request will be added to the queue so it can be executed later using async_run()
:type async_: bool
:param jira: Instance of JIRA Client
:type jira: jira.JIRA
:param notify: Whether or not to notify users about the update. (Default: True)
:type notify: bool
:type kwargs: **Any
"""
if async_ is None:
async_ = self._options['async']
data = {}
if fields is not None:
data.update(fields)
data.update(kwargs)
data = json.dumps(data)
if not notify:
querystring = "?notifyUsers=false"
else:
querystring = ""
r = self._session.put(
self.self + querystring, data=data)
if 'autofix' in self._options and \
r.status_code == 400:
user = None
error_list = get_error_list(r)
logging.error(error_list)
if "The reporter specified is not a user." in error_list:
if 'reporter' not in data['fields']:
logging.warning(
"autofix: setting reporter to '%s' and retrying the update." % self._options['autofix'])
data['fields']['reporter'] = {
'name': self._options['autofix']}
if "Issues must be assigned." in error_list:
if 'assignee' not in data['fields']:
logging.warning("autofix: setting assignee to '%s' for %s and retrying the update." % (
self._options['autofix'], self.key))
data['fields']['assignee'] = {
'name': self._options['autofix']}
# for some reason the above approach fails on Jira 5.2.11
# so we need to change the assignee before
if "Issue type is a sub-task but parent issue key or id not specified." in error_list:
logging.warning(
"autofix: trying to fix sub-task without parent by converting to it to bug")
data['fields']['issuetype'] = {"name": "Bug"}
if "The summary is invalid because it contains newline characters." in error_list:
logging.warning("autofix: trying to fix newline in summary")
data['fields'][
'summary'] = self.fields.summary.replace("/n", "")
for error in error_list:
if re.search(r"^User '(.*)' was not found in the system\.", error, re.U):
m = re.search(
r"^User '(.*)' was not found in the system\.", error, re.U)
if m:
user = m.groups()[0]
else:
raise NotImplementedError()
if re.search(r"^User '(.*)' does not exist\.", error):
m = re.search(r"^User '(.*)' does not exist\.", error)
if m:
user = m.groups()[0]
else:
raise NotImplementedError()
if user:
logging.warning(
"Trying to add missing orphan user '%s' in order to complete the previous failed operation." % user)
jira.add_user(user, 'noreply@example.com', 10100, active=False)
# if 'assignee' not in data['fields']:
# logging.warning("autofix: setting assignee to '%s' and retrying the update." % self._options['autofix'])
# data['fields']['assignee'] = {'name': self._options['autofix']}
# EXPERIMENTAL --->
if async_:
if not hasattr(self._session, '_async_jobs'):
self._session._async_jobs = set()
self._session._async_jobs.add(threaded_requests.put(
self.self, data=json.dumps(data)))
else:
r = self._session.put(
self.self, data=json.dumps(data))
time.sleep(self._options['delay_reload'])
self._load(self.self)
def delete(self, params=None):
"""Delete this resource from the server, passing the specified query parameters.
If this resource doesn't support ``DELETE``, a :py:exc:`.JIRAError`
will be raised; subclasses that specialize this method will only raise errors
in case of user error.
:param params: Parameters for the delete request.
:type params: Optional[Dict[str, Any]]
:rtype: Response
"""
if self._options['async']:
if not hasattr(self._session, '_async_jobs'):
self._session._async_jobs = set()
self._session._async_jobs.add(
threaded_requests.delete(url=self.self, params=params))
else:
return self._session.delete(url=self.self, params=params)
def _load(self,
url,
headers=CaseInsensitiveDict(),
params=None,
path=None,
):
""" Load a resource.
:type url: str
:type headers: CaseInsensitiveDict
:type params: Optional[Dict[str,str]]
:type path: Optional[str]
"""
r = self._session.get(url, headers=headers, params=params)
try:
j = json_loads(r)
except ValueError as e:
logging.error("%s:\n%s" % (e, r.text))
raise e
if path:
j = j[path]
self._parse_raw(j)
def _parse_raw(self, raw):
"""Parse a raw dictionary to create a resource.
:type raw: Dict[str, Any]
"""
self.raw = raw
if not raw:
raise NotImplementedError("We cannot instantiate empty resources: %s" % raw)
dict2resource(raw, self, self._options, self._session)
def _default_headers(self, user_headers):
# result = dict(user_headers)
# result['accept'] = 'application/json'
return CaseInsensitiveDict(self._options['headers'].items() + user_headers.items())
class Attachment(Resource):
"""An issue attachment."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'attachment/{0}', options, session)
if raw:
self._parse_raw(raw)
def get(self):
"""Return the file content as a string."""
r = self._session.get(self.content, headers={'Accept': '*/*'})
return r.content
def iter_content(self, chunk_size=1024):
"""Return the file content as an iterable stream."""
r = self._session.get(self.content, stream=True)
return r.iter_content(chunk_size)
class Component(Resource):
"""A project component."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'component/{0}', options, session)
if raw:
self._parse_raw(raw)
def delete(self, moveIssuesTo=None):
"""Delete this component from the server.
:param moveIssuesTo: the name of the component to which to move any issues this component is applied
"""
params = {}
if moveIssuesTo is not None:
params['moveIssuesTo'] = moveIssuesTo
super(Component, self).delete(params)
class CustomFieldOption(Resource):
"""An existing option for a custom issue field."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'customFieldOption/{0}', options, session)
if raw:
self._parse_raw(raw)
class Dashboard(Resource):
"""A JIRA dashboard."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'dashboard/{0}', options, session)
if raw:
self._parse_raw(raw)
class Filter(Resource):
"""An issue navigator filter."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'filter/{0}', options, session)
if raw:
self._parse_raw(raw)
class Issue(Resource):
"""A JIRA issue."""
class _IssueFields(object):
def __init__(self):
self.attachment = None
""" :type : list[Attachment] """
self.description = None
""" :type : str """
self.project = None
""" :type : Project """
self.comment = None
""" :type : list[Comment] """
self.issuelinks = None
""" :type : list[IssueLink] """
self.worklog = None
""" :type : list[Worklog] """
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issue/{0}', options, session)
self.fields = None
""" :type: :class:`~Issue._IssueFields` """
self.id = None
""" :type: int """
self.key = None
""" :type: str """
if raw:
self._parse_raw(raw)
def update(self, fields=None, update=None, async_=None, jira=None, notify=True, **fieldargs):
"""Update this issue on the server.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in an issue. This information is available through the :py:meth:`.JIRA.editmeta` method. Further examples
are available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Edit+issues
:param fields: a dict containing field names and the values to use
:param update: a dict containing update operations to apply
:param notify: query parameter notifyUsers. If true send the email with notification that the issue was updated
to users that watch it. Admin or project admin permissions are required to disable the notification.
:param fieldargs: keyword arguments will generally be merged into fields, except lists,
which will be merged into updates
:type fields: dict
:type update: dict
"""
data = {}
if fields is not None:
fields_dict = fields
else:
fields_dict = {}
data['fields'] = fields_dict
if update is not None:
update_dict = update
else:
update_dict = {}
data['update'] = update_dict
for field in sorted(fieldargs.keys()):
value = fieldargs[field]
# apply some heuristics to make certain changes easier
if isinstance(value, string_types):
if field == 'assignee' or field == 'reporter':
fields_dict['assignee'] = {'name': value}
elif field == 'comment':
if 'comment' not in update_dict:
update_dict['comment'] = []
update_dict['comment'].append({
'add': {'body': value}})
else:
fields_dict[field] = value
elif isinstance(value, list):
if field not in update_dict:
update_dict[field] = []
update_dict[field].extend(value)
else:
fields_dict[field] = value
super(Issue, self).update(async_=async_, jira=jira, notify=notify, fields=data)
def add_field_value(self, field, value):
"""Add a value to a field that supports multiple values, without resetting the existing values.
This should work with: labels, multiple checkbox lists, multiple select
:param field: The field name
:param value: The field's value
:type field: str
"""
super(Issue, self).update(fields={"update": {field: [{"add": value}]}})
def delete(self, deleteSubtasks=False):
"""Delete this issue from the server.
:param deleteSubtasks: if the issue has subtasks, this argument must be set to true for the call to succeed.
:type deleteSubtasks: bool
"""
super(Issue, self).delete(params={'deleteSubtasks': deleteSubtasks})
def permalink(self):
"""Get the URL of the issue, the browsable one not the REST one.
:return: URL of the issue
:rtype: str
"""
return "%s/browse/%s" % (self._options['server'], self.key)
def __eq__(self, other):
"""Comparison method."""
return other is not None and self.id == other.id
class Comment(Resource):
"""An issue comment."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issue/{0}/comment/{1}', options, session)
if raw:
self._parse_raw(raw)
def update(self, fields=None, async_=None, jira=None, body='', visibility=None):
data = {}
if body:
data['body'] = body
if visibility:
data['visibility'] = visibility
super(Comment, self).update(data)
class RemoteLink(Resource):
"""A link to a remote application from an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issue/{0}/remotelink/{1}', options, session)
if raw:
self._parse_raw(raw)
def update(self, object, globalId=None, application=None, relationship=None):
"""Update a RemoteLink. 'object' is required.
For definitions of the allowable fields for 'object' and the keyword arguments 'globalId', 'application' and
'relationship', see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param object: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
data = {
'object': object}
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
if relationship is not None:
data['relationship'] = relationship
super(RemoteLink, self).update(**data)
class Votes(Resource):
"""Vote information on an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issue/{0}/votes', options, session)
if raw:
self._parse_raw(raw)
class Watchers(Resource):
"""Watcher information on an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issue/{0}/watchers', options, session)
if raw:
self._parse_raw(raw)
def delete(self, username):
"""Remove the specified user from the watchers list."""
super(Watchers, self).delete(params={'username': username})
class TimeTracking(Resource):
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issue/{0}/worklog/{1}', options, session)
self.remainingEstimate = None
if raw:
self._parse_raw(raw)
class Worklog(Resource):
"""Worklog on an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issue/{0}/worklog/{1}', options, session)
if raw:
self._parse_raw(raw)
def delete(self, adjustEstimate=None, newEstimate=None, increaseBy=None):
"""Delete this worklog entry from its associated issue.
:param adjustEstimate: one of ``new``, ``leave``, ``manual`` or ``auto``.
``auto`` is the default and adjusts the estimate automatically.
``leave`` leaves the estimate unchanged by this deletion.
:param newEstimate: combined with ``adjustEstimate=new``, set the estimate to this value
:param increaseBy: combined with ``adjustEstimate=manual``, increase the remaining estimate by this amount
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if increaseBy is not None:
params['increaseBy'] = increaseBy
super(Worklog, self).delete(params)
class IssueLink(Resource):
"""Link between two issues."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issueLink/{0}', options, session)
if raw:
self._parse_raw(raw)
class IssueLinkType(Resource):
"""Type of link between two issues."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issueLinkType/{0}', options, session)
if raw:
self._parse_raw(raw)
class IssueType(Resource):
"""Type of an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'issuetype/{0}', options, session)
if raw:
self._parse_raw(raw)
class Priority(Resource):
"""Priority that can be set on an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'priority/{0}', options, session)
if raw:
self._parse_raw(raw)
class Project(Resource):
"""A JIRA project."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'project/{0}', options, session)
if raw:
self._parse_raw(raw)
class Role(Resource):
"""A role inside a project."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'project/{0}/role/{1}', options, session)
if raw:
self._parse_raw(raw)
def update(self, users=None, groups=None):
"""Add the specified users or groups to this project role. One of ``users`` or ``groups`` must be specified.
:param users: a user or users to add to the role
:type users: string, list or tuple
:param groups: a group or groups to add to the role
:type groups: string, list or tuple
"""
if users is not None and isinstance(users, string_types):
users = (users,)
if groups is not None and isinstance(groups, string_types):
groups = (groups,)
data = {
'id': self.id,
'categorisedActors': {
'atlassian-user-role-actor': users,
'atlassian-group-role-actor': groups}}
super(Role, self).update(**data)
def add_user(self, users=None, groups=None):
"""Add the specified users or groups to this project role.
One of ``users`` or ``groups`` must be specified.
:param users: a user or users to add to the role
:type users: string, list or tuple
:param groups: a group or groups to add to the role
:type groups: string, list or tuple
"""
if users is not None and isinstance(users, string_types):
users = (users,)
if groups is not None and isinstance(groups, string_types):
groups = (groups,)
data = {
'user': users}
self._session.post(self.self, data=json.dumps(data))
class Resolution(Resource):
"""A resolution for an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'resolution/{0}', options, session)
if raw:
self._parse_raw(raw)
class SecurityLevel(Resource):
"""A security level for an issue or project."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'securitylevel/{0}', options, session)
if raw:
self._parse_raw(raw)
class Status(Resource):
"""Status for an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'status/{0}', options, session)
if raw:
self._parse_raw(raw)
class StatusCategory(Resource):
"""StatusCategory for an issue."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'statuscategory/{0}', options, session)
if raw:
self._parse_raw(raw)
class User(Resource):
"""A JIRA user."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'user?username={0}', options, session)
if raw:
self._parse_raw(raw)
def __hash__(self):
"""Hash calculation."""
return hash(str(self.name))
def __eq__(self, other):
"""Comparison."""
return str(self.name) == str(other.name)
class Group(Resource):
"""A JIRA user group."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'group?groupname={0}', options, session)
if raw:
self._parse_raw(raw)
def __hash__(self):
"""Hash calculation."""
return hash(str(self.name))
def __eq__(self, other):
"""Equality by name."""
return str(self.name) == str(other.name)
class Version(Resource):
"""A version of a project."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'version/{0}', options, session)
if raw:
self._parse_raw(raw)
def delete(self, moveFixIssuesTo=None, moveAffectedIssuesTo=None):
"""Delete this project version from the server.
If neither of the arguments are specified, the version is
removed from all issues it is attached to.
:param moveFixIssuesTo: in issues for which this version is a fix
version, add this argument version to the fix version list
:param moveAffectedIssuesTo: in issues for which this version is an
affected version, add this argument version to the affected version list
"""
params = {}
if moveFixIssuesTo is not None:
params['moveFixIssuesTo'] = moveFixIssuesTo
if moveAffectedIssuesTo is not None:
params['moveAffectedIssuesTo'] = moveAffectedIssuesTo
return super(Version, self).delete(params)
def update(self, **args):
"""Update this project version from the server. It is prior used to archive versions."""
data = {}
for field in args:
data[field] = args[field]
super(Version, self).update(**data)
def __eq__(self, other):
"""Comparison."""
return self.id == other.id and self.name == other.name
# GreenHopper
class GreenHopperResource(Resource):
"""A generic GreenHopper resource."""
AGILE_BASE_URL = '{server}/rest/{agile_rest_path}/{agile_rest_api_version}/{path}'
GREENHOPPER_REST_PATH = "greenhopper"
""" Old, private API. Deprecated and will be removed from JIRA on the 1st February 2016. """
AGILE_EXPERIMENTAL_REST_PATH = "greenhopper/experimental-api"
""" Experimental API available in JIRA Agile 6.7.3 - 6.7.6, basically the same as Public API """
AGILE_BASE_REST_PATH = "agile"
""" Public API introduced in JIRA Agile 6.7.7. """
def __init__(self, path, options, session, raw):
self.self = None
Resource.__init__(self, path, options, session, self.AGILE_BASE_URL)
if raw:
self._parse_raw(raw)
# Old GreenHopper API did not contain self - create it for backward compatibility.
if not self.self:
self.self = self._get_url(path.format(raw['id']))
class Sprint(GreenHopperResource):
"""A GreenHopper sprint."""
def __init__(self, options, session, raw=None):
GreenHopperResource.__init__(self, 'sprint/{0}', options, session, raw)
def find(self, id, params=None):
if self._options['agile_rest_path'] != GreenHopperResource.GREENHOPPER_REST_PATH:
Resource.find(self, id, params)
else:
# Old, private GreenHopper API had non-standard way of loading Sprint
url = self._get_url('sprint/%s/edit/model' % id)
self._load(url, params=params, path='sprint')
class Board(GreenHopperResource):
"""A GreenHopper board."""
def __init__(self, options, session, raw=None):
path = 'rapidview/{0}' if options['agile_rest_path'] == self.GREENHOPPER_REST_PATH else 'board/{id}'
GreenHopperResource.__init__(self, path, options, session, raw)
def delete(self, params=None):
if self._options['agile_rest_path'] != GreenHopperResource.GREENHOPPER_REST_PATH:
raise NotImplementedError('JIRA Agile Public API does not support Board removal')
Resource.delete(self, params)
# Service Desk
class Customer(Resource):
"""A Service Desk customer."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'customer', options, session, '{server}/rest/servicedeskapi/{path}')
if raw:
self._parse_raw(raw)
class ServiceDesk(Resource):
"""A Service Desk."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'servicedesk/{0}', options, session, '{server}/rest/servicedeskapi/{path}')
if raw:
self._parse_raw(raw)
class RequestType(Resource):
"""A Service Desk Request Type."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'servicedesk/{0}/requesttype', options, session, '{server}/rest/servicedeskapi/{path}')
if raw:
self._parse_raw(raw)
# Utilities
def dict2resource(raw, top=None, options=None, session=None):
"""Convert a dictionary into a Jira Resource object.
Recursively walks a dict structure, transforming the properties into attributes
on a new ``Resource`` object of the appropriate type (if a ``self`` link is present)
or a ``PropertyHolder`` object (if no ``self`` link is present).
"""
if top is None:
top = PropertyHolder(raw)
seqs = tuple, list, set, frozenset
for i, j in iteritems(raw):
if isinstance(j, dict):
if 'self' in j:
resource = cls_for_resource(j['self'])(options, session, j)
setattr(top, i, resource)
elif i == 'timetracking':
setattr(top, 'timetracking', TimeTracking(options, session, j))
else:
setattr(
top, i, dict2resource(j, options=options, session=session))
elif isinstance(j, seqs):
seq_list = []
for seq_elem in j:
if isinstance(seq_elem, dict):
if 'self' in seq_elem:
resource = cls_for_resource(seq_elem['self'])(
options, session, seq_elem)
seq_list.append(resource)
else:
seq_list.append(
dict2resource(seq_elem, options=options, session=session))
else:
seq_list.append(seq_elem)
setattr(top, i, seq_list)
else:
setattr(top, i, j)
return top
resource_class_map = {
# JIRA specific resources
r'attachment/[^/]+$': Attachment,
r'component/[^/]+$': Component,
r'customFieldOption/[^/]+$': CustomFieldOption,
r'dashboard/[^/]+$': Dashboard,
r'filter/[^/]$': Filter,
r'issue/[^/]+$': Issue,
r'issue/[^/]+/comment/[^/]+$': Comment,
r'issue/[^/]+/votes$': Votes,
r'issue/[^/]+/watchers$': Watchers,
r'issue/[^/]+/worklog/[^/]+$': Worklog,
r'issueLink/[^/]+$': IssueLink,
r'issueLinkType/[^/]+$': IssueLinkType,
r'issuetype/[^/]+$': IssueType,
r'priority/[^/]+$': Priority,
r'project/[^/]+$': Project,
r'project/[^/]+/role/[^/]+$': Role,
r'resolution/[^/]+$': Resolution,
r'securitylevel/[^/]+$': SecurityLevel,
r'status/[^/]+$': Status,
r'statuscategory/[^/]+$': StatusCategory,
r'user\?(username|accountId).+$': User,
r'group\?groupname.+$': Group,
r'version/[^/]+$': Version,
# GreenHopper specific resources
r'sprints/[^/]+$': Sprint,
r'views/[^/]+$': Board}
class UnknownResource(Resource):
"""A Resource from JIRA that is not (yet) supported."""
def __init__(self, options, session, raw=None):
Resource.__init__(self, 'unknown{0}', options, session)
if raw:
self._parse_raw(raw)
def cls_for_resource(resource_literal):
for resource in resource_class_map:
if re.search(resource, resource_literal):
return resource_class_map[resource]
else:
# Generic Resource cannot directly be used b/c of different constructor signature
return UnknownResource
class PropertyHolder(object):
def __init__(self, raw):
__bases__ = raw # noqa
| 34.662983
| 125
| 0.588248
|
f2866af3f8e4ca5da35e0f27cfd35fa5927abd34
| 3,321
|
py
|
Python
|
data/dataset.py
|
hsientzucheng/CP-360-Weakly-Supervised-Saliency
|
dde55cccf220e72f94a3f9129036c56518c3a474
|
[
"MIT"
] | 23
|
2018-10-30T14:02:31.000Z
|
2021-11-15T15:38:51.000Z
|
data/dataset.py
|
hsientzucheng/CP-360-Weakly-Supervised-Saliency
|
dde55cccf220e72f94a3f9129036c56518c3a474
|
[
"MIT"
] | null | null | null |
data/dataset.py
|
hsientzucheng/CP-360-Weakly-Supervised-Saliency
|
dde55cccf220e72f94a3f9129036c56518c3a474
|
[
"MIT"
] | 7
|
2019-04-18T15:52:11.000Z
|
2021-04-05T04:44:48.000Z
|
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import cv2
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from PIL import Image
class Sal360Dataset:
def __init__(self, video_dir, motion_dir, input_d_list, seq_len, transform=None):
self.video_dir = video_dir
self.motion_dir = motion_dir
self.data_list = input_d_list
self.seq_len = seq_len
ffile = open(input_d_list, "r")
ddd_list = ffile.readlines()
self.data_list = [x.split('\n')[0] for x in ddd_list]
self.data = []
self.motion = []
video_categories = os.listdir(video_dir)
video_categories.sort()
for video_category in video_categories:
if video_category not in self.data_list:
continue
print("Got {}".format(video_category))
feat_sequences = os.listdir(os.path.join(
self.video_dir, video_category, 'cube_feat'))
feat_sequences.sort()
max_len = int(feat_sequences[-1].split('.')[0])
for seq in feat_sequences:
if ('.npy' in seq) and int(seq.split('.')[0]) < (max_len-seq_len+1):
self.data.append(os.path.join(
self.video_dir, video_category, 'cube_feat', seq))
motion_sequences = os.listdir(os.path.join(
self.motion_dir, video_category, 'motion'))
motion_sequences.sort()
for seq in motion_sequences:
if ('.npy' in seq) and int(seq.split('.')[0]) < (max_len-seq_len+1):
self.motion.append(os.path.join(
self.motion_dir, video_category, 'motion', seq))
assert len(self.data) == len(self.motion)
self.transform = transform
def __getitem__(self, index):
seq = []
motion = []
category = self.data[index].split('/')[-3]
filename = self.data[index].split('/')[-1]
for offset in range(self.seq_len):
# Static model features
category, mid_filename, filename = self.data[index].split('/')[-3:]
targ_feat_path = os.path.join(self.data[index].split(mid_filename)[0], mid_filename,
'{:06}{}'.format(int(filename.split('.')[0]) + offset, filename[-4:]))
if os.path.exists(targ_feat_path):
cam = np.load(targ_feat_path)
seq.append(torch.Tensor(cam))
else:
print("{} doesn't exist.".format(targ_feat_path))
# Optical flow
mcategory, mmid_filename, mfilename = self.motion[index].split(
'/')[-3:]
targ_motion_path = os.path.join(self.motion[index].split(mmid_filename)[0], mmid_filename,
'{:06}{}'.format(int(mfilename.split('.')[0]) + offset, mfilename[-4:]))
if os.path.exists(targ_motion_path):
cam = np.load(targ_motion_path)
motion.append(torch.Tensor(cam))
else:
print("{} doesn't exist.".format(targ_motion_path))
return seq, motion, category, filename
def __len__(self):
return len(self.data)
| 39.535714
| 116
| 0.569407
|
cc9d72698faa1ef784888bba3998055b1b571867
| 2,281
|
py
|
Python
|
f5_cccl/resource/ltm/irule.py
|
michaeldayreads/f5-cccl
|
5aaa584f9fc77742eb9d976954e6dad805e6f4ff
|
[
"Apache-2.0"
] | null | null | null |
f5_cccl/resource/ltm/irule.py
|
michaeldayreads/f5-cccl
|
5aaa584f9fc77742eb9d976954e6dad805e6f4ff
|
[
"Apache-2.0"
] | null | null | null |
f5_cccl/resource/ltm/irule.py
|
michaeldayreads/f5-cccl
|
5aaa584f9fc77742eb9d976954e6dad805e6f4ff
|
[
"Apache-2.0"
] | null | null | null |
"""Provides a class for managing BIG-IP iRule resources."""
# coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from f5_cccl.resource import Resource
LOGGER = logging.getLogger(__name__)
class IRule(Resource):
"""iRule class."""
# The property names class attribute defines the names of the
# properties that we wish to compare.
properties = dict(
name=None,
partition=None,
apiAnonymous=None
)
def __init__(self, name, partition, **data):
"""Create the iRule"""
super(IRule, self).__init__(name, partition)
self._data['apiAnonymous'] = data.get(
'apiAnonymous',
self.properties.get('apiAnonymous')
)
# Strip any leading/trailing whitespace
if self._data['apiAnonymous'] is not None:
self._data['apiAnonymous'] = self._data['apiAnonymous'].strip()
def __eq__(self, other):
"""Check the equality of the two objects.
Only compare the properties as defined in the
properties class dictionany.
"""
if not isinstance(other, IRule):
return False
for key in self.properties:
if self._data[key] != other.data.get(key, None):
return False
return True
def __hash__(self): # pylint: disable=useless-super-delegation
return super(IRule, self).__hash__()
def _uri_path(self, bigip):
return bigip.tm.ltm.rules.rule
def __str__(self):
return str(self._data)
class IcrIRule(IRule):
"""iRule object created from the iControl REST object"""
pass
class ApiIRule(IRule):
"""IRule object created from the API configuration object"""
pass
| 28.160494
| 75
| 0.660237
|
9d00d4fcde96fb95d77b2c0a50f3be6bab25f80b
| 93
|
py
|
Python
|
playerRatings/__init__.py
|
MichaelBoshell/RSCBot
|
6a77a76e7beab073bc40e8cab300b3031279298b
|
[
"MIT"
] | 12
|
2018-12-19T17:00:00.000Z
|
2021-06-10T13:27:01.000Z
|
playerRatings/__init__.py
|
MichaelBoshell/RSCBot
|
6a77a76e7beab073bc40e8cab300b3031279298b
|
[
"MIT"
] | 37
|
2020-03-10T18:42:29.000Z
|
2021-09-29T19:36:42.000Z
|
playerRatings/__init__.py
|
MichaelBoshell/RSCBot
|
6a77a76e7beab073bc40e8cab300b3031279298b
|
[
"MIT"
] | 14
|
2018-12-31T02:12:18.000Z
|
2021-11-13T01:49:53.000Z
|
from .playerRatings import PlayerRatings
def setup(bot):
bot.add_cog(PlayerRatings(bot))
| 23.25
| 40
| 0.784946
|
da5cb19b09d88738dde9d1c3590620e5378f42e5
| 3,108
|
py
|
Python
|
common-bill-ingestion/bill_upload.py
|
flexera/optima-tools
|
549be817cba61cecaf8868207d596a3e020090e9
|
[
"MIT"
] | null | null | null |
common-bill-ingestion/bill_upload.py
|
flexera/optima-tools
|
549be817cba61cecaf8868207d596a3e020090e9
|
[
"MIT"
] | 2
|
2021-12-29T16:10:06.000Z
|
2021-12-29T16:10:06.000Z
|
common-bill-ingestion/bill_upload.py
|
flexera/optima-tools
|
549be817cba61cecaf8868207d596a3e020090e9
|
[
"MIT"
] | 1
|
2022-03-10T15:53:37.000Z
|
2022-03-10T15:53:37.000Z
|
#!/usr/bin/env python3
'''
Basic Bill Upload example
---------------------------
Creates a bill upload, uploads a file, then commits that bill_upload.
Note: it's OK to create a new upload for a given org_id, bill connect and period,
provided any previous one (for that same org_id/bill connect/period) has been committed (or aborted).
Usage: ./bill_upload.py <refresh_token> <org_id> <bill_connect_id> <period> <filename>
Parameters:
<refresh token>: obtained from Cloud Management:
- go to Settings, then API Credentials in the Account Settings;
- enable the token and pass its value to this script.
<org_id>: the relevant organization id, e.g. "25667"
<bill_connect_id>: for WotC, for now, please use "cbi-wotc-1"
<period>: the billing month in YYYY-MM format, e.g. "2020-06"
<filename>: the file path, e.g. "./testfiles/my_file01.csv"
'''
import json
import logging
import requests
import sys
import time
# Tweak the destination (e.g. sys.stdout instead) and level (e.g. logging.DEBUG instead) to taste!
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s', stream=sys.stderr, level=logging.INFO)
if len(sys.argv) < 6:
logging.error('Missing command-line options!!!')
print(__doc__)
sys.exit(1)
refresh_token, org_id, bill_connect_id, period, filename = sys.argv[1:]
logging.info("Using org_id {}, bill_connect_id {}, period {}, filename {}".format(
org_id, bill_connect_id, period, filename))
token_url = "https://login.flexera.com/oidc/token"
bill_upload_url = "https://api.optima.flexeraeng.com/optima/orgs/{}/billUploads".format(org_id)
logging.info("OAuth2: Getting Access Token via Refresh Token...")
r = requests.post(token_url, data={"grant_type": "refresh_token", "refresh_token": refresh_token})
r.raise_for_status()
access_token = r.json()["access_token"]
# ===== Use Access Token as Bearer token from them on ===== #
auth_headers = {"Authorization": "Bearer " + access_token}
kwargs = {"headers": auth_headers, "allow_redirects": False}
logging.info("1. Creating Bill Upload...")
bill_upload = {"billConnectId": bill_connect_id, "billingPeriod": period}
r = requests.post(bill_upload_url, json.dumps(bill_upload), **kwargs)
logging.info("Response: {}\n{}".format(r.status_code, json.dumps(r.json(), indent=4)))
r.raise_for_status()
bill_upload_id = r.json()["id"]
logging.info("2. Upload file {} to Bill Upload {}...".format(filename, bill_upload_id))
upload_file_url = "{}/{}/files/{}".format(bill_upload_url, bill_upload_id, filename)
r = requests.post(upload_file_url, data=open(filename, 'rb').read(), **kwargs)
logging.info("Response: {}\n{}".format(r.status_code, json.dumps(r.json(), indent=4)))
logging.info("3. Committing the Bill Upload {}...".format(bill_upload_id))
operations_url = "{}/{}/operations".format(bill_upload_url, bill_upload_id)
r = requests.post(operations_url, '{"operation":"commit"}', **kwargs)
logging.info("Response: {}\n{}".format(r.status_code, json.dumps(r.json(), indent=4)))
r.raise_for_status()
# ===== That's all, folks! =====
sys.exit(0)
| 44.4
| 106
| 0.703346
|
24370bf808d59eecdd28d259098c00d686a8b838
| 673
|
py
|
Python
|
tests/test_utils_get_percent.py
|
loonghao/dayu_widgets
|
42758872993197880f68d141ee1ce314f9b2cfea
|
[
"MIT"
] | null | null | null |
tests/test_utils_get_percent.py
|
loonghao/dayu_widgets
|
42758872993197880f68d141ee1ce314f9b2cfea
|
[
"MIT"
] | null | null | null |
tests/test_utils_get_percent.py
|
loonghao/dayu_widgets
|
42758872993197880f68d141ee1ce314f9b2cfea
|
[
"MIT"
] | null | null | null |
"""
Test get_percent.
"""
# Import future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import third-party modules
from dayu_widgets import utils
import pytest
@pytest.mark.parametrize(
"value, mini, maxi, result",
(
(0, 0, 100, 0),
(100, 0, 100, 100),
(1, 0, 100, 1),
(99, 0, 100, 99),
(-1, 0, 100, 0),
(101, 0, 100, 100),
(101, 10, 110, 91),
(10, 100, 100, 100),
),
)
def test_get_percent(value, mini, maxi, result):
"""Test get_percent with normal arg."""
assert utils.get_percent(value, mini, maxi) == result
| 22.433333
| 57
| 0.604755
|
fa5093b0e6a7cb8549d778cf9421d38ddf7addaf
| 15,296
|
py
|
Python
|
bin/ser.py
|
samsledje/evolocity
|
2b162ff61d4239ba5af06a601e5bb62f501d4a0f
|
[
"MIT"
] | 1
|
2021-06-15T02:36:57.000Z
|
2021-06-15T02:36:57.000Z
|
bin/ser.py
|
samsledje/evolocity
|
2b162ff61d4239ba5af06a601e5bb62f501d4a0f
|
[
"MIT"
] | null | null | null |
bin/ser.py
|
samsledje/evolocity
|
2b162ff61d4239ba5af06a601e5bb62f501d4a0f
|
[
"MIT"
] | null | null | null |
from mutation import *
from evolocity_graph import *
import evolocity as evo
np.random.seed(1)
random.seed(1)
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Serpin sequence analysis')
parser.add_argument('model_name', type=str,
help='Type of language model (e.g., hmm, lstm)')
parser.add_argument('--namespace', type=str, default='ser',
help='Model namespace')
parser.add_argument('--dim', type=int, default=512,
help='Embedding dimension')
parser.add_argument('--batch-size', type=int, default=1000,
help='Training minibatch size')
parser.add_argument('--n-epochs', type=int, default=20,
help='Number of training epochs')
parser.add_argument('--seed', type=int, default=1,
help='Random seed')
parser.add_argument('--checkpoint', type=str, default=None,
help='Model checkpoint')
parser.add_argument('--train', action='store_true',
help='Train model')
parser.add_argument('--train-split', action='store_true',
help='Train model on portion of data')
parser.add_argument('--test', action='store_true',
help='Test model')
parser.add_argument('--ancestral', action='store_true',
help='Analyze ancestral sequences')
parser.add_argument('--evolocity', action='store_true',
help='Analyze evolocity')
args = parser.parse_args()
return args
def load_taxonomy():
tax_fnames = [
'data/cyc/taxonomy_archaea.tab.gz',
'data/cyc/taxonomy_bacteria.tab.gz',
'data/cyc/taxonomy_eukaryota.tab.gz',
'data/cyc/taxonomy_unclassified.tab.gz',
'data/cyc/taxonomy_viruses.tab.gz',
]
import gzip
taxonomy = {}
for fname in tax_fnames:
with gzip.open(fname) as f:
header = f.readline().decode('utf-8').rstrip().split('\t')
assert(header[0] == 'Taxon' and header[8] == 'Lineage')
for line in f:
fields = line.decode('utf-8').rstrip().split('\t')
tax_id = fields[0]
lineage = fields[8]
taxonomy[tax_id] = lineage
return taxonomy
def parse_meta(record, taxonomy):
if 'GN=' in record:
(_, accession, gene_id, name, species, species_id,
gene_symbol, pe, sv) = record.split('|')
else:
(_, accession, gene_id, name, species, species_id,
pe, sv) = record.split('|')
gene_symbol = None
tax_id = species_id[3:]
lineage = taxonomy[tax_id]
tax_kingdom = None
tax_group = None
if 'Archaea' in lineage:
tax_group = 'archaea'
tax_kingdom = 'archaea'
if 'Bacteria' in lineage:
tax_group = 'bacteria'
tax_kingdom = 'bacteria'
if 'Eukaryota' in lineage:
tax_group = 'eukaryota'
tax_kingdom = 'eukaryota'
if 'Fungi' in lineage:
tax_group = 'fungi'
tax_kingdom = 'eukaryota'
if 'Viridiplantae' in lineage:
tax_group = 'viridiplantae'
tax_kingdom = 'eukaryota'
if 'Arthropoda' in lineage:
tax_group = 'arthropoda'
tax_kingdom = 'eukaryota'
if 'Chordata' in lineage:
tax_group = 'chordata'
tax_kingdom = 'eukaryota'
if 'Mammalia' in lineage:
tax_group = 'mammalia'
tax_kingdom = 'eukaryota'
if 'Primate' in lineage:
tax_group = 'primate'
tax_kingdom = 'eukaryota'
if 'Virus' in lineage:
tax_group = 'virus'
tax_kingdom = 'other'
if 'unclassified sequences;' in lineage:
tax_group = 'unclassified'
tax_kingdom = 'other'
if 'metagenomes;' in lineage:
tax_group = 'metagenome'
tax_kingdom = 'other'
assert(tax_group is not None)
return {
'accession': accession,
'gene_id': gene_id,
'name': name,
'species': species[3:],
'tax_id': tax_id,
'tax_group': tax_group,
'tax_kingdom': tax_kingdom,
'lineage': lineage,
'gene_symbol': gene_symbol[3:] if gene_symbol is not None else None,
'pe': pe[3:],
'sv': sv[3:],
}
def process(fnames):
taxonomy = load_taxonomy()
seqs = {}
for fname in fnames:
for record in SeqIO.parse(fname, 'fasta'):
if len(record.seq) < 300 or len(record.seq) > 525:
continue
meta = parse_meta(record.id, taxonomy)
if record.seq not in seqs:
seqs[record.seq] = []
meta['seq_len'] = len(record.seq)
seqs[record.seq].append(meta)
#seqs = training_distances(seqs, namespace=args.namespace)
return seqs
def split_seqs(seqs, split_method='random'):
raise NotImplementedError('split_seqs not implemented')
def setup(args):
fnames = [ 'data/ser/uniprot_serpins.fa' ]
import pickle
cache_fname = 'target/ev_cache/ser_seqs.pkl'
try:
with open(cache_fname, 'rb') as f:
seqs = pickle.load(f)
except:
seqs = process(fnames)
with open(cache_fname, 'wb') as of:
pickle.dump(seqs, of)
seq_len = max([ len(seq) for seq in seqs ]) + 2
vocab_size = len(AAs) + 2
model = get_model(args, seq_len, vocab_size)
return model, seqs
def plot_umap(adata, namespace='ser'):
sc.pl.umap(adata, color='tax_group', edges=True, edges_color='#cccccc',
save='_{}_taxonomy.png'.format(namespace))
sc.pl.umap(adata, color='louvain', edges=True, edges_color='#cccccc',
save='_{}_louvain.png'.format(namespace))
sc.pl.umap(adata, color='seq_len', edges=True, edges_color='#cccccc',
save='_{}_seqlen.png'.format(namespace))
sc.pl.umap(adata, color='homology', edges=True, edges_color='#cccccc',
save='_{}_homology.png'.format(namespace))
sc.pl.umap(adata, color='tax_kingdom', edges=True, edges_color='#cccccc',
save='_{}_kingdom.png'.format(namespace))
def seqs_to_anndata(seqs):
X, obs = [], {}
obs['n_seq'] = []
obs['seq'] = []
for seq in seqs:
meta = seqs[seq][0]
X.append(meta['embedding'])
for key in meta:
if key == 'embedding':
continue
if key not in obs:
obs[key] = []
obs[key].append(Counter([
meta[key] for meta in seqs[seq]
]).most_common(1)[0][0])
obs['n_seq'].append(len(seqs[seq]))
obs['seq'].append(str(seq))
X = np.array(X)
adata = AnnData(X)
for key in obs:
adata.obs[key] = obs[key]
return adata
def serpin_ancestral(args, model, seqs, vocabulary, namespace='ser'):
path_fname = 'data/ser/ancestral_ser_codeml.fa'
nodes = [
(record.id, str(record.seq))
for record in SeqIO.parse(path_fname, 'fasta')
]
######################################
## See how local likelihoods change ##
######################################
tax_types = {
'archaea',
'bacteria',
'eukaryota',
}
dist_data = []
for idx, (name, seq) in enumerate(nodes):
for uniprot_seq in seqs:
tax_type = Counter([
meta['tax_kingdom'] for meta in seqs[uniprot_seq]
]).most_common(1)[0][0]
if tax_type not in tax_types:
continue
score = likelihood_muts(seq, uniprot_seq,
args, vocabulary, model,)
homology = fuzz.ratio(seq, uniprot_seq)
dist_data.append([ tax_type, name, score, homology ])
df = pd.DataFrame(dist_data, columns=[
'tax_type', 'name', 'score', 'homology'
])
plot_ancestral(df, meta_key='tax_type', namespace=namespace)
plot_ancestral(df, meta_key='name', name_key='tax_type', namespace=namespace)
def evo_serpins(args, model, seqs, vocabulary, namespace='ser'):
#########################
## Visualize landscape ##
#########################
adata_cache = 'target/ev_cache/ser_adata.h5ad'
try:
import anndata
adata = anndata.read_h5ad(adata_cache)
except:
seqs = populate_embedding(args, model, seqs, vocabulary, use_cache=True)
adata = seqs_to_anndata(seqs)
sc.pp.neighbors(adata, n_neighbors=50, use_rep='X')
sc.tl.louvain(adata, resolution=1.)
sc.tl.umap(adata, min_dist=1.)
adata.write(adata_cache)
if 'homologous' in namespace:
adata = adata[adata.obs['homology'] > 80.]
sc.pp.neighbors(adata, n_neighbors=50, use_rep='X')
sc.tl.louvain(adata, resolution=1.)
sc.tl.umap(adata, min_dist=1.)
tprint('Analyzing {} sequences...'.format(adata.X.shape[0]))
evo.set_figure_params(dpi_save=500)
plot_umap(adata, namespace=namespace)
#####################################
## Compute evolocity and visualize ##
#####################################
cache_prefix = f'target/ev_cache/{namespace}_knn50'
try:
from scipy.sparse import load_npz
adata.uns["velocity_graph"] = load_npz(
'{}_vgraph.npz'.format(cache_prefix)
)
adata.uns["velocity_graph_neg"] = load_npz(
'{}_vgraph_neg.npz'.format(cache_prefix)
)
adata.obs["velocity_self_transition"] = np.load(
'{}_vself_transition.npy'.format(cache_prefix)
)
adata.layers["velocity"] = np.zeros(adata.X.shape)
except:
evo.tl.velocity_graph(adata, model_name=args.model_name)
from scipy.sparse import save_npz
save_npz('{}_vgraph.npz'.format(cache_prefix),
adata.uns["velocity_graph"],)
save_npz('{}_vgraph_neg.npz'.format(cache_prefix),
adata.uns["velocity_graph_neg"],)
np.save('{}_vself_transition.npy'.format(cache_prefix),
adata.obs["velocity_self_transition"],)
evo.tl.velocity_embedding(adata, basis='umap', scale=1.,
self_transitions=True,
use_negative_cosines=True,
retain_scale=False,
autoscale=True,)
evo.pl.velocity_embedding(
adata, basis='umap', color='tax_group',
save=f'_{namespace}_taxonomy_velo.png',
)
# Grid visualization.
plt.figure()
ax = evo.pl.velocity_embedding_grid(
adata, basis='umap', min_mass=1., smooth=1.,
arrow_size=1., arrow_length=3.,
color='tax_kingdom', show=False,
)
plt.tight_layout(pad=1.1)
plt.subplots_adjust(right=0.85)
plt.savefig(f'figures/evolocity__{namespace}_taxonomy_velogrid.png', dpi=500)
plt.close()
# Streamplot visualization.
plt.figure()
ax = evo.pl.velocity_embedding_stream(
adata, basis='umap', min_mass=2., smooth=1., density=1.,
color='tax_kingdom', legend_loc=False, show=False,
)
sc.pl._utils.plot_edges(ax, adata, 'umap', 0.1, '#aaaaaa')
plt.tight_layout(pad=1.1)
plt.subplots_adjust(right=0.85)
plt.savefig(f'figures/evolocity__{namespace}_taxonomy_velostream.png', dpi=500)
plt.close()
plt.figure()
ax = evo.pl.velocity_contour(
adata,
basis='umap', smooth=0.5, pf_smooth=1., levels=100,
arrow_size=1., arrow_length=3., cmap='coolwarm',
c='#aaaaaa', show=False,
)
plt.tight_layout(pad=1.1)
plt.savefig(f'figures/evolocity__{namespace}_contour.png', dpi=500)
plt.close()
sc.pl.umap(adata, color=[ 'root_nodes', 'end_points' ],
edges=True, edges_color='#cccccc',
cmap=plt.cm.get_cmap('magma').reversed(),
save=f'_{namespace}_origins.png')
plt.figure(figsize=(3, 6))
sns.boxplot(data=adata.obs, x='tax_kingdom', y='pseudotime',
order=[
'archaea', 'bacteria', 'eukaryota',
])
plt.xticks(rotation=60)
plt.tight_layout()
plt.savefig(f'figures/{namespace}_taxonomy_pseudotime.svg', dpi=500)
plt.close()
sc.pl.umap(adata, color='pseudotime',
edges=True, edges_color='#cccccc',
cmap='inferno', save=f'_{namespace}_pseudotime.png')
with open(f'target/ev_cache/{namespace}_pseudotime.txt', 'w') as of:
of.write('\n'.join([ str(x) for x in adata.obs['pseudotime'] ]) + '\n')
nnan_idx = (np.isfinite(adata.obs['homology']) &
np.isfinite(adata.obs['pseudotime']))
tprint('Pseudotime-homology Spearman r = {}, P = {}'
.format(*ss.spearmanr(adata.obs['pseudotime'][nnan_idx],
adata.obs['homology'][nnan_idx],
nan_policy='omit')))
tprint('Pseudotime-homology Pearson r = {}, P = {}'
.format(*ss.pearsonr(adata.obs['pseudotime'][nnan_idx],
adata.obs['homology'][nnan_idx])))
seqlens = [ len(seq) for seq in adata.obs['seq'] ]
tprint('Pseudotime-length Spearman r = {}, P = {}'
.format(*ss.spearmanr(adata.obs['pseudotime'], seqlens,
nan_policy='omit')))
if __name__ == '__main__':
args = parse_args()
namespace = args.namespace
if args.model_name == 'tape':
namespace += '_tape'
AAs = [
'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H',
'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W',
'Y', 'V', 'X', 'Z', 'J', 'U', 'B', 'Z'
]
vocabulary = { aa: idx + 1 for idx, aa in enumerate(sorted(AAs)) }
model, seqs = setup(args)
if 'esm' in args.model_name:
vocabulary = { tok: model.alphabet_.tok_to_idx[tok]
for tok in model.alphabet_.tok_to_idx
if '<' not in tok and tok != '.' and tok != '-' }
args.checkpoint = args.model_name
elif args.model_name == 'tape':
vocabulary = { tok: model.alphabet_[tok]
for tok in model.alphabet_ if '<' not in tok }
args.checkpoint = args.model_name
elif args.checkpoint is not None:
model.model_.load_weights(args.checkpoint)
tprint('Model summary:')
tprint(model.model_.summary())
if args.train or args.train_split or args.test:
train_test(args, model, seqs, vocabulary, split_seqs)
if args.ancestral:
if args.checkpoint is None and not args.train:
raise ValueError('Model must be trained or loaded '
'from checkpoint.')
tprint('Ancestral analysis...')
serpin_ancestral(args, model, seqs, vocabulary, namespace=namespace)
if args.evolocity:
if args.checkpoint is None and not args.train:
raise ValueError('Model must be trained or loaded '
'from checkpoint.')
tprint('All serpin sequences:')
evo_serpins(args, model, seqs, vocabulary, namespace=namespace)
if args.model_name != 'tape':
tprint('Restrict based on similarity to training')
evo_serpins(args, model, seqs, vocabulary, namespace='ser_homologous')
| 35.24424
| 83
| 0.572699
|
37dc517d901bc8d9ce258a04105c3c398c118489
| 7,637
|
py
|
Python
|
pynamodb/constants.py
|
ikonst/PynamoDB
|
cd705cc4e0e3dd365c7e0773f6bc02fe071a0631
|
[
"MIT"
] | null | null | null |
pynamodb/constants.py
|
ikonst/PynamoDB
|
cd705cc4e0e3dd365c7e0773f6bc02fe071a0631
|
[
"MIT"
] | null | null | null |
pynamodb/constants.py
|
ikonst/PynamoDB
|
cd705cc4e0e3dd365c7e0773f6bc02fe071a0631
|
[
"MIT"
] | null | null | null |
"""
Pynamodb constants
"""
# Operations
BATCH_WRITE_ITEM = 'BatchWriteItem'
DESCRIBE_TABLE = 'DescribeTable'
BATCH_GET_ITEM = 'BatchGetItem'
CREATE_TABLE = 'CreateTable'
UPDATE_TABLE = 'UpdateTable'
DELETE_TABLE = 'DeleteTable'
LIST_TABLES = 'ListTables'
UPDATE_ITEM = 'UpdateItem'
DELETE_ITEM = 'DeleteItem'
GET_ITEM = 'GetItem'
PUT_ITEM = 'PutItem'
QUERY = 'Query'
SCAN = 'Scan'
# Request Parameters
GLOBAL_SECONDARY_INDEX_UPDATES = 'GlobalSecondaryIndexUpdates'
RETURN_ITEM_COLL_METRICS = 'ReturnItemCollectionMetrics'
EXCLUSIVE_START_TABLE_NAME = 'ExclusiveStartTableName'
RETURN_CONSUMED_CAPACITY = 'ReturnConsumedCapacity'
COMPARISON_OPERATOR = 'ComparisonOperator'
SCAN_INDEX_FORWARD = 'ScanIndexForward'
ATTR_DEFINITIONS = 'AttributeDefinitions'
ATTR_VALUE_LIST = 'AttributeValueList'
TABLE_DESCRIPTION = 'TableDescription'
UNPROCESSED_KEYS = 'UnprocessedKeys'
UNPROCESSED_ITEMS = 'UnprocessedItems'
CONSISTENT_READ = 'ConsistentRead'
DELETE_REQUEST = 'DeleteRequest'
RETURN_VALUES = 'ReturnValues'
REQUEST_ITEMS = 'RequestItems'
ATTRS_TO_GET = 'AttributesToGet'
ATTR_UPDATES = 'AttributeUpdates'
TABLE_STATUS = 'TableStatus'
SCAN_FILTER = 'ScanFilter'
TABLE_NAME = 'TableName'
KEY_SCHEMA = 'KeySchema'
ATTR_NAME = 'AttributeName'
ATTR_TYPE = 'AttributeType'
ITEM_COUNT = 'ItemCount'
CAMEL_COUNT = 'Count'
PUT_REQUEST = 'PutRequest'
INDEX_NAME = 'IndexName'
ATTRIBUTES = 'Attributes'
TABLE_KEY = 'Table'
RESPONSES = 'Responses'
RANGE_KEY = 'RangeKey'
KEY_TYPE = 'KeyType'
ACTION = 'Action'
UPDATE = 'Update'
EXISTS = 'Exists'
SELECT = 'Select'
ACTIVE = 'ACTIVE'
LIMIT = 'Limit'
ITEMS = 'Items'
ITEM = 'Item'
KEYS = 'Keys'
UTC = 'UTC'
KEY = 'Key'
# Defaults
DEFAULT_ENCODING = 'utf-8'
DEFAULT_REGION = 'us-east-1'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f%z'
SERVICE_NAME = 'dynamodb'
HTTP_OK = 200
HTTP_BAD_REQUEST = 400
# Create Table arguments
PROVISIONED_THROUGHPUT = 'ProvisionedThroughput'
READ_CAPACITY_UNITS = 'ReadCapacityUnits'
WRITE_CAPACITY_UNITS = 'WriteCapacityUnits'
STRING_SHORT = 'S'
STRING_SET_SHORT = 'SS'
NUMBER_SHORT = 'N'
NUMBER_SET_SHORT = 'NS'
BINARY_SHORT = 'B'
BINARY_SET_SHORT = 'BS'
MAP_SHORT = 'M'
LIST_SHORT = 'L'
BOOLEAN = 'BOOL'
BOOLEAN_SHORT = 'BOOL'
STRING = 'String'
STRING_SET = 'StringSet'
NUMBER = 'Number'
NUMBER_SET = 'NumberSet'
BINARY = 'Binary'
BINARY_SET = 'BinarySet'
MAP = 'Map'
LIST = 'List'
NULL = 'NULL'
NULL_SHORT = 'NULL'
SHORT_ATTR_TYPES = [STRING_SHORT, STRING_SET_SHORT, NUMBER_SHORT, NUMBER_SET_SHORT,
BINARY_SHORT, BINARY_SET_SHORT, MAP_SHORT, LIST_SHORT, BOOLEAN_SHORT, NULL_SHORT]
ATTR_TYPE_MAP = {
STRING: STRING_SHORT,
STRING_SET: STRING_SET_SHORT,
NUMBER: NUMBER_SHORT,
NUMBER_SET: NUMBER_SET_SHORT,
BINARY: BINARY_SHORT,
BINARY_SET: BINARY_SET_SHORT,
STRING_SHORT: STRING,
STRING_SET_SHORT: STRING_SET,
NUMBER_SHORT: NUMBER,
NUMBER_SET_SHORT: NUMBER_SET,
BINARY_SHORT: BINARY,
BINARY_SET_SHORT: BINARY_SET,
MAP: MAP_SHORT,
LIST: LIST_SHORT,
BOOLEAN: BOOLEAN_SHORT,
NULL: NULL_SHORT,
}
# Constants needed for creating indexes
LOCAL_SECONDARY_INDEX = 'LocalSecondaryIndex'
LOCAL_SECONDARY_INDEXES = 'LocalSecondaryIndexes'
GLOBAL_SECONDARY_INDEX = 'GlobalSecondaryIndex'
GLOBAL_SECONDARY_INDEXES = 'GlobalSecondaryIndexes'
PROJECTION = 'Projection'
PROJECTION_TYPE = 'ProjectionType'
NON_KEY_ATTRIBUTES = 'NonKeyAttributes'
KEYS_ONLY = 'KEYS_ONLY'
ALL = 'ALL'
INCLUDE = 'INCLUDE'
# Constants for Dynamodb Streams
STREAM_VIEW_TYPE = 'StreamViewType'
STREAM_SPECIFICATION = 'StreamSpecification'
STREAM_ENABLED = 'StreamEnabled'
STREAM_NEW_IMAGE = 'NEW_IMAGE'
STREAM_OLD_IMAGE = 'OLD_IMAGE'
STREAM_NEW_AND_OLD_IMAGE = 'NEW_AND_OLD_IMAGES'
STREAM_KEYS_ONLY = 'KEYS_ONLY'
# These are constants used in the KeyConditions parameter
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-KeyConditions
EXCLUSIVE_START_KEY = 'ExclusiveStartKey'
LAST_EVALUATED_KEY = 'LastEvaluatedKey'
QUERY_FILTER = 'QueryFilter'
BEGINS_WITH = 'BEGINS_WITH'
BETWEEN = 'BETWEEN'
EQ = 'EQ'
NE = 'NE'
LE = 'LE'
LT = 'LT'
GE = 'GE'
GT = 'GT'
IN = 'IN'
KEY_CONDITIONS = 'KeyConditions'
COMPARISON_OPERATOR_VALUES = [EQ, LE, LT, GE, GT, BEGINS_WITH, BETWEEN]
QUERY_OPERATOR_MAP = {
'eq': EQ,
'le': LE,
'lt': LT,
'ge': GE,
'gt': GT,
'begins_with': BEGINS_WITH,
'between': BETWEEN
}
# These are the valid select values for the Scan operation
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html#DDB-Scan-request-Select
NOT_NULL = 'NOT_NULL'
CONTAINS = 'CONTAINS'
NOT_CONTAINS = 'NOT_CONTAINS'
ALL_ATTRIBUTES = 'ALL_ATTRIBUTES'
ALL_PROJECTED_ATTRIBUTES = 'ALL_PROJECTED_ATTRIBUTES'
SPECIFIC_ATTRIBUTES = 'SPECIFIC_ATTRIBUTES'
COUNT = 'COUNT'
SELECT_VALUES = [ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT]
SCAN_OPERATOR_MAP = {
'eq': EQ,
'ne': NE,
'le': LE,
'lt': LT,
'ge': GE,
'gt': GT,
'not_null': NOT_NULL,
'null': NULL,
'contains': CONTAINS,
'not_contains': NOT_CONTAINS,
'begins_with': BEGINS_WITH,
'in': IN,
'between': BETWEEN
}
QUERY_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
DELETE_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
UPDATE_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
PUT_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
# These are the valid comparison operators for the Scan operation
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html#DDB-Scan-request-ScanFilter
SEGMENT = 'Segment'
TOTAL_SEGMENTS = 'TotalSegments'
SCAN_FILTER_VALUES = [EQ, NE, LE, LT, GE, GT, NOT_NULL, NULL, CONTAINS, NOT_CONTAINS, BEGINS_WITH, IN, BETWEEN]
QUERY_FILTER_VALUES = SCAN_FILTER_VALUES
DELETE_FILTER_VALUES = SCAN_FILTER_VALUES
# These are constants used in the expected condition for PutItem
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-Expected
VALUE = 'Value'
EXPECTED = 'Expected'
# These are the valid ReturnConsumedCapacity values used in multiple operations
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchGetItem.html#DDB-BatchGetItem-request-ReturnConsumedCapacity
CONSUMED_CAPACITY = 'ConsumedCapacity'
CAPACITY_UNITS = 'CapacityUnits'
INDEXES = 'INDEXES'
TOTAL = 'TOTAL'
NONE = 'NONE'
RETURN_CONSUMED_CAPACITY_VALUES = [INDEXES, TOTAL, NONE]
# These are the valid ReturnItemCollectionMetrics values used in multiple operations
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html#DDB-BatchWriteItem-request-ReturnItemCollectionMetrics
SIZE = 'SIZE'
RETURN_ITEM_COLL_METRICS_VALUES = [SIZE, NONE]
# These are the valid ReturnValues values used in the PutItem operation
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-ReturnValues
ALL_OLD = 'ALL_OLD'
UPDATED_OLD = 'UPDATED_OLD'
ALL_NEW = 'ALL_NEW'
UPDATED_NEW = 'UPDATED_NEW'
RETURN_VALUES_VALUES = [NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW]
# These are constants used in the AttributeUpdates parameter for UpdateItem
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdates
PUT = 'PUT'
DELETE = 'DELETE'
ADD = 'ADD'
ATTR_UPDATE_ACTIONS = [PUT, DELETE, ADD]
BATCH_GET_PAGE_LIMIT = 100
BATCH_WRITE_PAGE_LIMIT = 25
META_CLASS_NAME = "Meta"
REGION = "region"
HOST = "host"
# The constants are needed for the ConditionalOperator argument used
# UpdateItem, PutItem and DeleteItem
CONDITIONAL_OPERATOR = 'ConditionalOperator'
AND = 'AND'
OR = 'OR'
CONDITIONAL_OPERATORS = [AND, OR]
| 30.305556
| 147
| 0.771507
|
147199c5a8dd97154948d8177e6c2b1742cc8c18
| 6,917
|
py
|
Python
|
virtualenv/lib/python3.6/site-packages/docutils/parsers/rst/directives/images.py
|
coderunn3r/HadoopSpike
|
3e57219d0489fae1d755bc4bd97eaf22f1898464
|
[
"MIT"
] | null | null | null |
virtualenv/lib/python3.6/site-packages/docutils/parsers/rst/directives/images.py
|
coderunn3r/HadoopSpike
|
3e57219d0489fae1d755bc4bd97eaf22f1898464
|
[
"MIT"
] | null | null | null |
virtualenv/lib/python3.6/site-packages/docutils/parsers/rst/directives/images.py
|
coderunn3r/HadoopSpike
|
3e57219d0489fae1d755bc4bd97eaf22f1898464
|
[
"MIT"
] | null | null | null |
# $Id: images.py 7594 2013-01-21 17:14:17Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
import urllib.request, urllib.parse, urllib.error
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, states
from docutils.nodes import fully_normalize_name, whitespace_normalize_name
from docutils.parsers.rst.roles import set_classes
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
class Image(Directive):
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, Image.align_values)
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'name': directives.unchanged,
'target': directives.unchanged_required,
'class': directives.class_option}
def run(self):
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value for '
'the "align" option. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_h_values)))
messages = []
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
reference_node = None
if 'target' in self.options:
block = states.escape2null(
self.options['target']).splitlines()
block = [line for line in block]
target_type, data = self.state.parse_target(
block, self.block_text, self.lineno)
if target_type == 'refuri':
reference_node = nodes.reference(refuri=data)
elif target_type == 'refname':
reference_node = nodes.reference(
refname=fully_normalize_name(data),
name=whitespace_normalize_name(data))
reference_node.indirect_reference_name = data
self.state.document.note_refname(reference_node)
else: # malformed target
messages.append(data) # data is a system message
del self.options['target']
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else:
return messages + [image_node]
class Figure(Image):
def align(argument):
return directives.choice(argument, Figure.align_h_values)
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
option_spec['align'] = align
has_content = True
def run(self):
figwidth = self.options.pop('figwidth', None)
figclasses = self.options.pop('figclass', None)
align = self.options.pop('align', None)
(image_node,) = Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if PIL and self.state.document.settings.file_insertion_enabled:
imagepath = urllib.request.url2pathname(image_node['uri'])
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.state.document.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
figure_node['width'] = img.size[0]
del img
elif figwidth is not None:
figure_node['width'] = figwidth
if figclasses:
figure_node['classes'] += figclasses
if align:
figure_node['align'] = align
if self.content:
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
caption.source = first_node.source
caption.line = first_node.line
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = self.state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
| 41.921212
| 78
| 0.579297
|
fa7cc78d24aa0494751f6ff87fcc78c384ab4415
| 662
|
py
|
Python
|
prime_numbers_between_U_&_L.py
|
Kaustav-CS/Y_2021
|
db21c67ec14f5aae511f6e0d1ef2c68e39b075db
|
[
"Unlicense"
] | 1
|
2021-10-03T04:06:53.000Z
|
2021-10-03T04:06:53.000Z
|
prime_numbers_between_U_&_L.py
|
Kaustav-CS/Y_2021
|
db21c67ec14f5aae511f6e0d1ef2c68e39b075db
|
[
"Unlicense"
] | 1
|
2021-09-17T21:08:57.000Z
|
2021-09-17T21:08:57.000Z
|
prime_numbers_between_U_&_L.py
|
Kaustav-CS/Y_2021
|
db21c67ec14f5aae511f6e0d1ef2c68e39b075db
|
[
"Unlicense"
] | null | null | null |
import time
fh = open("Prime_numbers_02.txt",'w')
#L =input('Enter Starting number of the nember set:\t')
L = 10
lower=int(L)
#U =input('Enter Last number of the nember set:\t')
U = 10**31
upper=int(U)
print("Prime numbers between", lower, "and", upper, "are:\t")
print("Prime numbers between", lower, "and", upper, "are:\t",file = fh)
for num in reversed(range(lower, upper + 1)):
# all prime numbers are greater than 1
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
print(num )
print(num, file = fh)
time.sleep(10)
print("\t\t Task End")
#quit()
| 23.642857
| 72
| 0.567976
|
fe0b2dd46475556b10f7689155b4a63badd07c4f
| 759
|
py
|
Python
|
examples/flight_delay_prediction/flight_delay_prediction.py
|
akshitsingla/amadeus-python
|
d8f3595e556b674998156f98d8a318045bb4c21c
|
[
"MIT"
] | null | null | null |
examples/flight_delay_prediction/flight_delay_prediction.py
|
akshitsingla/amadeus-python
|
d8f3595e556b674998156f98d8a318045bb4c21c
|
[
"MIT"
] | null | null | null |
examples/flight_delay_prediction/flight_delay_prediction.py
|
akshitsingla/amadeus-python
|
d8f3595e556b674998156f98d8a318045bb4c21c
|
[
"MIT"
] | null | null | null |
from amadeus import Client, ResponseError
amadeus = Client()
try:
'''
Will there be a delay from BRU to FRA? If so how much delay?
'''
response = amadeus.travel.predictions.flight_delay.get(originLocationCode='NCE', destinationLocationCode='IST',
departureDate='2020-08-01', departureTime='18:20:00',
arrivalDate='2020-08-01', arrivalTime='22:15:00',
aircraftCode='321', carrierCode='TK',
flightNumber='1816', duration='PT31H10M')
# print(response.data)
except ResponseError as error:
raise error
| 44.647059
| 115
| 0.500659
|
a80caa883350a9dbb83fdcf28932e83feabb0bf0
| 675
|
py
|
Python
|
backend/infovis/nicetable/serializers.py
|
cientopolis/data-viz
|
1c555db4af1f744b22c1a84d8fc73817263373a7
|
[
"MIT"
] | null | null | null |
backend/infovis/nicetable/serializers.py
|
cientopolis/data-viz
|
1c555db4af1f744b22c1a84d8fc73817263373a7
|
[
"MIT"
] | 12
|
2020-02-12T01:22:45.000Z
|
2022-02-26T16:37:56.000Z
|
backend/infovis/nicetable/serializers.py
|
cientopolis/data-viz
|
1c555db4af1f744b22c1a84d8fc73817263373a7
|
[
"MIT"
] | 1
|
2019-10-02T20:04:23.000Z
|
2019-10-02T20:04:23.000Z
|
from rest_framework import serializers
from .models import TablePersistence, ColumnPersistence, ChartPersistence
class ColumnSerializer(serializers.ModelSerializer):
class Meta:
model = ColumnPersistence
fields = ['id', 'table', 'index', 'title', 'column_type', 'visible']
class TableSerializer(serializers.ModelSerializer):
columns = ColumnSerializer(many=True, read_only=True)
class Meta:
model = TablePersistence
fields = ['id', 'domain', 'identificator', 'columns']
class ChartSerializer(serializers.ModelSerializer):
class Meta:
model = ChartPersistence
fields = ['id', 'table', 'chart_type', 'conf']
| 32.142857
| 76
| 0.705185
|
84b08cb9d6a98b5eec15439e357c510f65ac0cb4
| 3,311
|
py
|
Python
|
.mywaflib/waflib/extras/run_pl_script.py
|
simonjheiler/ui_human_capital
|
4c5f9b3c56f03cb777cc78958b72d6ffc65a6e79
|
[
"MIT"
] | null | null | null |
.mywaflib/waflib/extras/run_pl_script.py
|
simonjheiler/ui_human_capital
|
4c5f9b3c56f03cb777cc78958b72d6ffc65a6e79
|
[
"MIT"
] | null | null | null |
.mywaflib/waflib/extras/run_pl_script.py
|
simonjheiler/ui_human_capital
|
4c5f9b3c56f03cb777cc78958b72d6ffc65a6e79
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker and David Birke, 2012-16
"""
Run a Perl script in the directory specified by **ctx.bldnode**.
Strings supplied to the **prepend** and **append** keywords will be added
to the command line.
Usage::
ctx(
features='run_pl_script',
source='some_script.pl',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv',
append='',
prepend=''
)
"""
import os
from waflib import Task, TaskGen, Logs
PERL_COMMANDS = ['perl']
def configure(ctx):
ctx.find_program(
PERL_COMMANDS,
var='PERLCMD',
errmsg="""\n
No Perl executable found!\n\n
If Perl is needed:\n
1) Check the settings of your system path.
2) Note we are looking for Perl executables called: %s
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_pl_script' tool in the compile_datasets_bls.py wscript.\n\n"""
% PERL_COMMANDS
)
class run_pl_script(Task.Task):
"""Run a Perl script."""
run_str = '${PREPEND} "${PERLCMD}" "${SRC[0].abspath()}" ${APPEND}'
shell = True
def exec_command(self, cmd, **kw):
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
if not self.buffer_output:
kw["stdout"] = kw["stderr"] = None
return bld.exec_command(cmd, **kw)
def keyword(self):
"""
Override the 'Compiling' default.
"""
return 'Running'
def __str__(self):
"""
More useful output.
"""
return "{prepend} [Perl] {fn} {append}".format(
prepend=self.env.PREPEND,
fn=self.inputs[0].path_from(self.inputs[0].ctx.launch_node()),
append=self.env.APPEND
)
@TaskGen.feature('run_pl_script')
@TaskGen.before_method('process_source')
def apply_run_pl_script(tg):
"""Task generator customising the options etc. to call Perl
for running a script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
if src_node is None:
tg.bld.fatal(
"Could not find source file: {}".format(os.path.join(tg.path.relpath(), tg.source))
)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_pl_script', src=src_node, tgt=tgt_nodes)
tsk.env.APPEND = getattr(tg, 'append', '')
tsk.env.PREPEND = getattr(tg, 'prepend', '')
tsk.buffer_output = getattr(tg, 'buffer_output', True)
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal(
'Could not find dependency %r for running %r'
% (x, src_node.relpath())
)
tsk.dep_nodes.append(node)
Logs.debug(
'deps: found dependencies %r for running %r'
% (tsk.dep_nodes, src_node.relpath())
)
# Bypass the execution of process_source by setting the source to an empty
# list
tg.source = []
| 27.139344
| 95
| 0.601027
|
eea0204f42e8194a42ba507fd2bc27c26207b795
| 3,445
|
py
|
Python
|
src/networks/cifar10_LeNet_elu.py
|
Flsahkong/Deep-SVDD-PyTorch
|
c20442fb394f679222ae49d299bcb3c95e2d67c8
|
[
"MIT"
] | null | null | null |
src/networks/cifar10_LeNet_elu.py
|
Flsahkong/Deep-SVDD-PyTorch
|
c20442fb394f679222ae49d299bcb3c95e2d67c8
|
[
"MIT"
] | null | null | null |
src/networks/cifar10_LeNet_elu.py
|
Flsahkong/Deep-SVDD-PyTorch
|
c20442fb394f679222ae49d299bcb3c95e2d67c8
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.base.base_net import BaseNet
class CIFAR10_LeNet_ELU(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.elu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.elu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.elu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class CIFAR10_LeNet_ELU_Autoencoder(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv1.weight)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv2.weight)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv3.weight)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv1.weight)
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight)
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight)
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.elu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.elu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.elu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.bn1d(self.fc1(x))
x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.elu(x)
x = self.deconv1(x)
x = F.interpolate(F.elu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.elu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.elu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return x
| 38.277778
| 101
| 0.600871
|
f5c0da683a3e328309ec792eecd8a9f806d89a90
| 1,347
|
py
|
Python
|
setup.py
|
JakeCover/Flare-DiscordPy
|
24cc2541a6ef548583e46d58ae18abe72da5f37f
|
[
"MIT"
] | 1
|
2021-04-02T20:16:03.000Z
|
2021-04-02T20:16:03.000Z
|
setup.py
|
JakeCover/Flare-DiscordPy
|
24cc2541a6ef548583e46d58ae18abe72da5f37f
|
[
"MIT"
] | null | null | null |
setup.py
|
JakeCover/Flare-DiscordPy
|
24cc2541a6ef548583e46d58ae18abe72da5f37f
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from __about__ import __version__
with open("README.md") as rdme:
long_description = rdme.read()
setup(
name="flare-discord.py",
version=__version__,
description="Provides a simple, single import cog to add a checkable page to your bot for uptime status monitoring",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://github.com/JakeCover/Flare",
author="Cobular",
author_email="python@cobular.com",
license="MIT",
packages=["flare"],
install_requires=["discord.py>=1.5.0,<1.7.0"],
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: System :: Monitoring',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
keywords=[
"Discord",
"Discord.py",
"Uptime",
"Monitoring",
"Flare",
"Discord Uptime Monitoring",
"UptimeRobot"
],
)
| 31.325581
| 120
| 0.616184
|
d1671767d92990d0c01230628d6d8fa825d4d40d
| 22,886
|
py
|
Python
|
src/bkl/plugins/gnu.py
|
minoki/bakefile
|
80c360b0b618d1d04d7f2ad64d327cd42a0230a7
|
[
"MIT"
] | null | null | null |
src/bkl/plugins/gnu.py
|
minoki/bakefile
|
80c360b0b618d1d04d7f2ad64d327cd42a0230a7
|
[
"MIT"
] | null | null | null |
src/bkl/plugins/gnu.py
|
minoki/bakefile
|
80c360b0b618d1d04d7f2ad64d327cd42a0230a7
|
[
"MIT"
] | null | null | null |
#
# This file is part of Bakefile (http://bakefile.org)
#
# Copyright (C) 2009-2013 Vaclav Slavik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
GNU tools (GCC, GNU Make, ...) toolset.
"""
import os.path
from bkl.api import FileCompiler, FileType
from bkl.makefile import MakefileToolset, MakefileFormatter, MakefileExprFormatter
import bkl.compilers
import bkl.expr
# FIXME: shouldn't be needed later
from bkl.expr import ListExpr, LiteralExpr, BoolExpr, NonConstError
from bkl.error import Error
# GCC flags for supported architectures:
OSX_ARCH_FLAGS = {
'x86' : '-arch i386',
'x86_64' : '-arch x86_64',
}
def _is_multiarch_target(target):
"""
Checks if the target builds for >1 archs, i.e. would use more -arch options
and be incompatible with gcc's -M* flags.
"""
try:
return len(target["archs"]) > 1
except KeyError:
return False # not an executable
# Apple's GCC doesn't handle the standard -MD -MP flags (which are used to
# generate .d files with object files' dependencies on sources and headers) in presence
# of multiple -arch options. Clang can handle it, but we must support GCC. So we run
# GCC's preprocessor once more to generate the dependencies, but let's not do
# it unless necessary, because it a) costs some time and b) may omit some deps.
GCC_DEPS_FLAGS = "-MD -MP"
OSX_GCC_DEPS_RULES = """
# Support for generating .d files with multiple -arch options:
CC_is_clang := $(if $(shell $(CC) --version | grep clang),yes,no)
CXX_is_clang := $(if $(shell $(CXX) --version | grep clang),yes,no)
ifeq "$(CC_is_clang)" "yes"
CC_deps_flags = -MD -MP
CC_deps_cmd =
else
CC_deps_flags =
CC_deps_cmd = $1 -M -MP -o $(patsubst %.o,%.d,$@) $2
endif
ifeq "$(CXX_is_clang)" "yes"
CXX_deps_flags = -MD -MP
CXX_deps_cmd =
else
CXX_deps_flags =
CXX_deps_cmd = $1 -M -MP -o $(patsubst %.o,%.d,$@) $2
endif
"""
# These are just some unique strings, the exact syntax doesn't matter currently.
GMAKE_IFEXPR_MACROS_PLACEHOLDER = "{{{BKL_GMAKE_IFEXPR_MACROS}}}"
GMAKE_BUILDDIR_DEF_PLACEHOLDER = "{{{BKL_GMAKE_BUILDDIR_DEF}}}"
# GNU Make has some boolean functions, but not all that we need, so define them
GMAKE_IFEXPR_MACROS = """
_true := true
_false :=
_not = $(if $(1),$(_false),$(_true_))
_equal = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
"""
class GnuObjectFileType(FileType):
name = "gnu-object"
def __init__(self):
FileType.__init__(self, extensions=["o"])
class GnuFileCompiler(FileCompiler):
"""Base class for GNU compilers/linkers."""
def is_supported(self, toolset):
return isinstance(toolset, GnuToolset)
# TODO: a hack, not exactly clean
def _arch_flags(self, toolset, target):
if isinstance(toolset, OSXGnuToolset):
flags = []
for a in target["archs"]:
a = a.as_py()
if a in OSX_ARCH_FLAGS:
flags.append(LiteralExpr(OSX_ARCH_FLAGS[a]))
return flags
else:
return []
class GnuCCompiler(GnuFileCompiler):
"""
GNU C compiler.
"""
name = "GNU C"
in_type = bkl.compilers.CFileType.get()
out_type = GnuObjectFileType.get()
_compiler = "CC"
_flags_var_name = "CFLAGS"
_options_prop_name = "c-compiler-options"
def commands(self, toolset, target, input, output):
needs_extra_deps_code = (isinstance(toolset, OSXGnuToolset) and
_is_multiarch_target(target)) # see GCC_DEPS_FLAGS
cmd = [LiteralExpr("$(%s) -c -o $@ $(CPPFLAGS) $(%s)" %
(self._compiler, self._flags_var_name))]
if needs_extra_deps_code:
cmd += [LiteralExpr("$(%s_deps_flags)" % self._compiler)]
else:
cmd += [LiteralExpr(toolset.deps_flags)]
# FIXME: evaluating the flags here every time is inefficient
cmd += self._arch_flags(toolset, target)
if toolset.pic_flags and target["pic"]:
cmd.append(LiteralExpr(toolset.pic_flags))
if target["multithreading"]:
cmd.append(LiteralExpr(toolset.pthread_cc_flags))
cmd += bkl.expr.add_prefix("-D", target["defines"])
cmd += bkl.expr.add_prefix("-I", target["includedirs"])
if target["warnings"] == "no":
cmd.append(LiteralExpr("-w"))
elif target["warnings"] == "all":
cmd.append(LiteralExpr("-Wall"))
#else: don't do anything special for "minimal" and "default"
cmd += target["compiler-options"]
cmd += target[self._options_prop_name]
# FIXME: use a parser instead of constructing the expression manually
# in here
cmd.append(input)
retval = [ListExpr(cmd)]
if needs_extra_deps_code:
# add command for generating the deps:
cmd = [LiteralExpr("$(call %s_deps_cmd,$(%s),$(CPPFLAGS) $(%s)" %
(self._compiler, self._compiler, self._flags_var_name))]
cmd += bkl.expr.add_prefix("-D", target["defines"])
cmd += bkl.expr.add_prefix("-I", target["includedirs"])
cmd += target["compiler-options"]
cmd += target[self._options_prop_name]
cmd.append(input)
cmd.append(LiteralExpr(")"))
retval.append(ListExpr(cmd))
return retval
class GnuCXXompiler(GnuCCompiler):
"""
GNU C++ compiler.
"""
name = "GNU C++"
in_type = bkl.compilers.CxxFileType.get()
out_type = GnuObjectFileType.get()
_compiler = "CXX"
_flags_var_name = "CXXFLAGS"
_options_prop_name = "cxx-compiler-options"
class GnuLinker(GnuFileCompiler):
"""
GNU executables linker.
"""
name = "GNU LD"
in_type = GnuObjectFileType.get()
out_type = bkl.compilers.NativeProgramFileType.get()
def _linker_flags(self, toolset, target):
cmd = self._arch_flags(toolset, target)
libdirs = target.type.get_libdirs(target)
if libdirs:
cmd += bkl.expr.add_prefix("-L", ListExpr(libdirs))
libs = target.type.get_libfiles(toolset, target)
ldlibs = target.type.get_ldlibs(target)
cmd += libs
cmd += bkl.expr.add_prefix("-l", ListExpr(ldlibs)).items
cmd += target.type.get_link_options(target)
if toolset.extra_link_flags:
cmd.append(LiteralExpr(toolset.extra_link_flags))
if toolset.pthread_ld_flags and target["multithreading"]:
cmd.append(LiteralExpr(toolset.pthread_ld_flags))
return cmd
def commands(self, toolset, target, input, output):
cmd = [LiteralExpr("$(CXX) -o $@ $(LDFLAGS)"), input]
# FIXME: use a parser instead of constructing the expression manually
# in here
cmd += self._linker_flags(toolset, target)
return [ListExpr(cmd)]
class GnuSharedLibLinker(GnuLinker):
"""
GNU shared libraries linker.
"""
name = "GNU shared LD"
in_type = GnuObjectFileType.get()
out_type = bkl.compilers.NativeSharedLibraryFileType.get()
def commands(self, toolset, target, input, output):
cmd = [LiteralExpr("$(CXX) %s -o $@" % toolset.shared_library_link_flag)]
if toolset.soname_flags:
cmd.append(LiteralExpr(toolset.soname_flags))
cmd.append(LiteralExpr("$(LDFLAGS)"))
cmd.append(input)
# FIXME: use a parser instead of constructing the expression manually
# in here
cmd += self._linker_flags(toolset, target)
return [ListExpr(cmd)]
class GnuLoadableModuleLinker(GnuLinker):
"""
GNU loadable modules linker.
"""
name = "GNU module LD"
in_type = GnuObjectFileType.get()
out_type = bkl.compilers.NativeLoadableModuleFileType.get()
def commands(self, toolset, target, input, output):
cmd = [LiteralExpr("$(CXX) %s -o $@" % toolset.loadable_module_link_flag)]
cmd.append(LiteralExpr("$(LDFLAGS)"))
cmd.append(input)
# FIXME: use a parser instead of constructing the expression manually
# in here
cmd += self._linker_flags(toolset, target)
return [ListExpr(cmd)]
class GnuLibLinker(GnuFileCompiler):
"""
GNU library linker.
"""
name = "AR"
in_type = GnuObjectFileType.get()
out_type = bkl.compilers.NativeLibFileType.get()
def commands(self, toolset, target, input, output):
# FIXME: use a parser instead of constructing the expression manually
# in here
return [ListExpr([LiteralExpr("$(AR) rcu $@"), input]),
ListExpr([LiteralExpr("$(RANLIB) $@")])]
class GnuMakefileFormatter(MakefileFormatter):
"""
Formatter for the GNU Make syntax.
"""
def var_definition(self, var, value):
# TODO: use = if it depends on any of the macros defined later
return "%s ?= %s\n" % (var, " \\\n\t".join(value.split("\n")))
def submake_command(self, directory, filename, target):
return "$(MAKE) -C %s -f %s %s" % (directory, filename, target)
def multifile_target(self, outputs, outfiles, deps, commands):
# Use a helper intermediate target to handle multiple outputs of a rule,
# because we can't easily use GNU Make's pattern rules matching. The
# absence of an intermediate file is not a problem and does not cause
# spurious builds. See for details:
# http://www.gnu.org/software/make/manual/html_node/Chained-Rules.html
# http://stackoverflow.com/a/10609434/237188
for c in commands:
if '$@' in c:
raise Error("The use of $@ or %%(out) not supported with multiple outputs (in \"%s\")" % c)
inter_name = ".dummy_" + "_".join("_".join(c.as_py() for c in f.components) for f in outputs)
return "\n".join([
"%s: %s" % (" ".join(outfiles), inter_name),
".INTERMEDIATE: %s" % inter_name,
self.target(inter_name, deps, commands)
])
class GnuExprFormatter(MakefileExprFormatter):
def path(self, e):
# We handle all build paths in a very special way to allow customizing
# them at make time by setting the make builddir variable, which is
# used to initialize another make variable called _builddir which is
# then used to construct all build paths.
if e.anchor == bkl.expr.ANCHOR_BUILDDIR:
# Notice that _builddir is either empty or contains the
# trailing slash, so we must not add another one here.
self.toolset.uses_builddir = True
return "$(_builddir)" + "/".join(self.format(c) for c in e.components)
super_self = super(GnuExprFormatter, self)
if e.anchor == bkl.expr.ANCHOR_TOP_BUILDDIR:
self.toolset.uses_builddir = True
# To handle top build directory-relative paths correctly, just
# interpret the path relatively to the top source directory.
p = super_self.path(bkl.expr.PathExpr(e.components, bkl.expr.ANCHOR_TOP_SRCDIR))
# But then root it at build directory.
return "$(_builddir)" + p
return super_self.path(e)
def bool_value(self, e):
self.toolset.uses_non_std_bool_macros = True
return "$(_true)" if e.value else "$(_false)"
def bool(self, e):
l = self.format(e.left)
if e.right is not None:
r = self.format(e.right)
if e.operator == BoolExpr.AND:
return "$(and %s,%s)" % (l, r)
if e.operator == BoolExpr.OR:
return "$(or %s,%s)" % (l, r)
if e.operator == BoolExpr.EQUAL:
self.toolset.uses_non_std_bool_macros = True
return "$(call _equal,%s,%s)" % (l, r)
if e.operator == BoolExpr.NOT_EQUAL:
self.toolset.uses_non_std_bool_macros = True
return "$(call _not,$(call _equal,%s,%s))" % (l, r)
if e.operator == BoolExpr.NOT:
self.toolset.uses_non_std_bool_macros = True
return "$(call _not,%s)" % l
assert False, "invalid operator"
def if_(self, e):
try:
return super(GnuExprFormatter, self).if_(e)
except NonConstError:
c = self.format(e.cond)
y = self.format(e.value_yes)
n = self.format(e.value_no)
return "$(if %s,%s,%s)" % (c, y, n)
class GnuToolset(MakefileToolset):
"""
GNU toolchain for Unix systems.
This toolset generates makefiles for the GNU toolchain -- GNU Make, GCC compiler,
GNU LD linker etc. -- running on Unix system.
Currently, only Linux systems (or something sufficiently compatible) are supported.
In particular, file extensions and linker behavior (symlinks, sonames) are assumed
to be Linux ones.
See :ref:`ref_toolset_gnu-osx` for OS X variant.
"""
name = "gnu"
Formatter = GnuMakefileFormatter
ExprFormatter = GnuExprFormatter
default_makefile = "GNUmakefile"
default_cc = "cc"
default_cxx = "c++"
autoclean_extensions = ["o", "d"]
del_command = "rm -f"
object_type = GnuObjectFileType.get()
library_prefix = "lib"
library_extension = "a"
shared_library_prefix = "lib"
shared_library_extension = "so"
shared_library_link_flag = "-shared -Wl,-z,defs"
loadable_module_prefix = ""
loadable_module_extension = "so"
loadable_module_link_flag = "-shared -Wl,-z,defs"
deps_flags = GCC_DEPS_FLAGS
pic_flags = "-fPIC -DPIC"
pthread_cc_flags = "-pthread"
pthread_ld_flags = "-pthread"
soname_flags = "-Wl,-soname,$(notdir $@)"
extra_link_flags = None
def output_default_flags(self, file, configs):
"""
Helper of on_header() which outputs default, config-dependent,
values for all the usual compilation flags.
"""
# Check if we have any custom configurations: we always have at least
# two standard ones, "Debug" and "Release".
if len(configs) > 2:
# We do, so check which of them should use debug settings and
# which -- the release ones.
debug_config, release_config = configs['Debug'], configs['Release']
debug_configs_names = ['Debug']
release_configs_names = ['Release']
for name, config in configs.iteritems():
if config.derived_from(debug_config):
debug_configs_names.append(name)
elif config.derived_from(release_config):
release_configs_names.append(name)
# Assume that tilde characters are never used in the configuration
# names (it's certainly not common at the very least).
non_config_sep = '~~'
make_test_fmt = 'ifneq (,$(findstring %s$(config)%s,%s%%s%s))' % \
(non_config_sep, non_config_sep, non_config_sep, non_config_sep)
make_debug_test = make_test_fmt % non_config_sep.join(debug_configs_names)
make_release_test = make_test_fmt % non_config_sep.join(release_configs_names)
else:
# If we only have the two predefined configs, use simpler tests.
make_debug_test = 'ifeq ($(config),Debug)'
make_release_test = 'ifeq ($(config),Release)'
file.write("""
# You may also specify config=%s
# or their corresponding lower case variants on make command line to select
# the corresponding default flags values.
""" % '|'.join(configs.keys()))
# Accept configs in lower case too to be more Unix-ish.
for name in configs:
file.write(
"""ifeq ($(config),%s)
override config := %s
endif
""" % (name.lower(), name))
file.write(make_debug_test)
file.write(
"""
override CPPFLAGS += -DDEBUG
override CFLAGS += -g -O0
override CXXFLAGS += -g -O0
override LDFLAGS += -g
else """
)
file.write(make_release_test)
file.write(
"""
override CPPFLAGS += -DNDEBUG
override CFLAGS += -O2
override CXXFLAGS += -O2
else ifneq (,$(config))
$(warning Unknown configuration "$(config)")
endif
""")
def on_header(self, file, module):
super(GnuToolset, self).on_header(file, module)
file.write("""
# You may define standard make variables such as CFLAGS or
# CXXFLAGS to affect the build. For example, you could use:
#
# make CXXFLAGS=-g
#
# to build with debug information. The full list of variables
# that can be used by this makefile is:
# AR, CC, CFLAGS, CPPFLAGS, CXX, CXXFLAGS, LD, LDFLAGS, MAKE, RANLIB.
""")
self.output_default_flags(file, module.project.configurations)
if module.project.settings:
file.write("""#
# Additionally, this makefile is customizable with the following
# settings:
#
""")
alls = [(s.name, s["help"]) for s in module.project.settings.itervalues()]
width = max(len(x[0]) for x in alls)
fmtstr = "# %%-%ds %%s\n" % width
for name, doc in alls:
file.write(fmtstr % (name, doc if doc else ""))
file.write("""
# Use \"make RANLIB=''\" for platforms without ranlib.
RANLIB ?= ranlib
CC := %s
CXX := %s
""" % (self.default_cc, self.default_cxx))
# This placeholder will be replaced either with the definition of the
# macros, if they turn out to be really needed, or nothing otherwise.
file.write(GMAKE_IFEXPR_MACROS_PLACEHOLDER)
self.uses_non_std_bool_macros = False
# Similarly, this one will be replaced with the definition of the
# build directory variable if we are building any files in this
# makefile or nothing if we don't (this does happen in top level
# makefiles which just dispatch the work to other makefiles, no need
# to clutter them).
file.write(GMAKE_BUILDDIR_DEF_PLACEHOLDER)
def _get_builddir_fragment(self, module):
# Build the value actually representing the build directory, it is
# only used here (see GnuExprFormatter.path) and only to initialize
# the internal _builddir in the fragment below.
makefile = module["%s.makefile" % self.name]
rel_dir_comps = makefile.components[:-1]
# None of the complications is needed in the top level makefile,
# _builddir is the same as $builddir in it anyhow.
if rel_dir_comps == []:
builddir_path = "$(builddir)"
else:
# Build the relative path to the top source directory.
to_top_srcdir = "../"*len(rel_dir_comps)
# First a hack to ensure we start from the top build directory: we
# need to do this only if the user-defined builddir is relative at
# make time, so we check for this by comparing it with its absolute
# path.
builddir_path = """\
$(if $(findstring $(abspath $(builddir)),$(builddir)),,%s)\
""" % to_top_srcdir
# Next the build directory itself, whether relative or absolute.
builddir_path = builddir_path + "$(builddir)"
# Finally tackle on the relative path to this directory.
builddir_path = builddir_path + "/" + "/".join(c.as_py() for c in rel_dir_comps)
return """
# The directory for the build files, may be overridden on make command line.
builddir = .
ifneq ($(builddir),.)
_builddir := %s/
_builddir_error := $(shell mkdir -p $(_builddir) 2>&1)
$(if $(_builddir_error),$(error Failed to create build directory: $(_builddir_error)))
endif
""" % builddir_path
def on_phony_targets(self, file, targets):
file.write(".PHONY: %s\n" % " ".join(targets))
def on_footer(self, file, module):
file.replace(GMAKE_IFEXPR_MACROS_PLACEHOLDER,
GMAKE_IFEXPR_MACROS if self.uses_non_std_bool_macros
else "")
file.replace(GMAKE_BUILDDIR_DEF_PLACEHOLDER,
self._get_builddir_fragment(module) if self.uses_builddir
else "")
file.write("\n"
"# Dependencies tracking:\n"
"-include *.d\n")
class OSXGnuToolset(GnuToolset):
"""
GNU toolchain for OS X.
This toolset is for building on OS X using makefiles, not Xcode. It
incorporates some of the oddities of OS X's toolchain and should be used
instead of :ref:`ref_toolset_gnu`.
"""
# FIXME: This is temporary solution, will be integrated into GnuToolset
# with runtime platform detection.
name = "gnu-osx"
default_makefile = "Makefile.osx"
shared_library_extension = "dylib"
loadable_module_extension = "bundle"
loadable_module_link_flag = "-bundle"
pic_flags = None
soname_flags = None
pthread_ld_flags = None
def on_footer(self, file, module):
for t in module.targets.itervalues():
if _is_multiarch_target(t):
file.write(OSX_GCC_DEPS_RULES)
break
super(OSXGnuToolset, self).on_footer(file, module)
class SunCCGnuToolset(GnuToolset):
"""
GNU toolchain for Sun CC compiler.
This toolset is for building using the Sun CC (aka Oracle Studio) toolset.
"""
# FIXME: This is temporary solution, will be integrated into GnuToolset
# with runtime platform detection.
name = "gnu-suncc"
default_makefile = "Makefile.suncc"
default_cc = "suncc"
default_cxx = "sunCC"
shared_library_link_flag = "-G -Kpic -z defs"
loadable_module_link_flag = "-G -Kpic -z defs"
deps_flags = "-xMD"
pic_flags = "-Kpic -DPIC"
pthread_cc_flags = "-D_THREAD_SAFE -mt"
pthread_ld_flags = "-mt -lpthread"
soname_flags = "-h $(notdir $@)"
# FIXME: Do this for C++ only
extra_link_flags = "-lCstd -lCrun"
| 35.927786
| 107
| 0.633531
|
8e916936a4c3fc02f78e871f7f532223e6209a3f
| 3,255
|
py
|
Python
|
userbot/modules/www.py
|
Patrick489/PatrickBot
|
8c6f8bd8c346887792cff31700f820c20db93042
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4
|
2020-12-14T14:21:21.000Z
|
2021-02-06T14:29:26.000Z
|
userbot/modules/www.py
|
Razer-Cynosa/IsengUserbot
|
6eb9058871eb8dc9cf12aa164dd26cbef9cba553
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2021-01-04T13:13:05.000Z
|
2021-03-23T02:06:32.000Z
|
userbot/modules/www.py
|
Razer-Cynosa/IsengUserbot
|
6eb9058871eb8dc9cf12aa164dd26cbef9cba553
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 20
|
2020-12-19T19:21:42.000Z
|
2021-10-05T15:07:42.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to the \
Information Superhighway (yes, Internet). """
from datetime import datetime
from speedtest import Speedtest
from userbot import CMD_HELP, StartTime, ALIVE_NAME
from userbot.events import register
import time
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
remainder, result = divmod(
seconds, 60) if count < 3 else divmod(
seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@register(outgoing=True, pattern="^.speed$")
async def speedtst(spd):
""" For .speed command, use SpeedTest to check server speeds. """
await spd.edit("`Running high speed test . . .`")
test = Speedtest()
test.get_best_server()
test.download()
test.upload()
test.results.share()
result = test.results.dict()
await spd.edit("`"
"Started at "
f"{result['timestamp']} \n\n"
"Download "
f"{speed_convert(result['download'])} \n"
"Upload "
f"{speed_convert(result['upload'])} \n"
"Ping "
f"{result['ping']} \n"
"ISP "
f"{result['client']['isp']}"
"`")
def speed_convert(size):
"""
Hi human, you can't read bytes?
"""
power = 2**10
zero = 0
units = {0: '', 1: 'Kb/s', 2: 'Mb/s', 3: 'Gb/s', 4: 'Tb/s'}
while size > power:
size /= power
zero += 1
return f"{round(size, 2)} {units[zero]}"
@register(outgoing=True, pattern="^.ping$")
async def pingme(pong):
""" For .ping command, ping the userbot from any chat. """
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await pong.edit("`Pinging....`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await pong.edit(f"**PONG!! 🍭** : `{ALIVE_NAME}`\n**Pinger** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration))
@register(outgoing=True, pattern="^.pong$")
async def pingme(pong):
""" For .ping command, ping the userbot from any chat. """
start = datetime.now()
await pong.edit("`**wait!**`")
end = datetime.now()
duration = (end - start).microseconds / 9000
await pong.edit("`Ping!\n%sms`" % (duration))
CMD_HELP.update(
{"ping": "`.ping`\
\nUsage: Shows how long it takes to ping your bot.\
\n\n`.speed`\
\nUsage: Does a speedtest and shows the results.\
\n\n`.pong`\
\nUsage: Shows how long it takes to ping your bot."
})
| 29.324324
| 113
| 0.571121
|
3272a8537ddea2c76f4dde43de15297de4f8fa9c
| 1,972
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/git/v4_0/models/git_cherry_pick.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/git/v4_0/models/git_cherry_pick.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/git/v4_0/models/git_cherry_pick.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .git_async_ref_operation import GitAsyncRefOperation
class GitCherryPick(GitAsyncRefOperation):
"""GitCherryPick.
:param _links:
:type _links: :class:`ReferenceLinks <git.v4_0.models.ReferenceLinks>`
:param detailed_status:
:type detailed_status: :class:`GitAsyncRefOperationDetail <git.v4_0.models.GitAsyncRefOperationDetail>`
:param parameters:
:type parameters: :class:`GitAsyncRefOperationParameters <git.v4_0.models.GitAsyncRefOperationParameters>`
:param status:
:type status: object
:param url:
:type url: str
:param cherry_pick_id:
:type cherry_pick_id: int
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'detailed_status': {'key': 'detailedStatus', 'type': 'GitAsyncRefOperationDetail'},
'parameters': {'key': 'parameters', 'type': 'GitAsyncRefOperationParameters'},
'status': {'key': 'status', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'cherry_pick_id': {'key': 'cherryPickId', 'type': 'int'}
}
def __init__(self, _links=None, detailed_status=None, parameters=None, status=None, url=None, cherry_pick_id=None):
super(GitCherryPick, self).__init__(_links=_links, detailed_status=detailed_status, parameters=parameters, status=status, url=url)
self.cherry_pick_id = cherry_pick_id
| 48.097561
| 139
| 0.589249
|
d485733c1f8da253000a88ac9bb6c36d335c7325
| 7,244
|
py
|
Python
|
wtforms_sqlalchemy/fields.py
|
SleeveShirtholes/wtforms-sqlalchemy
|
e172387992601ab8477d767580e957209ac46ea1
|
[
"BSD-3-Clause"
] | 84
|
2015-02-12T17:43:00.000Z
|
2022-03-05T15:23:18.000Z
|
wtforms_sqlalchemy/fields.py
|
SleeveShirtholes/wtforms-sqlalchemy
|
e172387992601ab8477d767580e957209ac46ea1
|
[
"BSD-3-Clause"
] | 23
|
2016-01-31T14:35:11.000Z
|
2022-02-18T17:45:17.000Z
|
wtforms_sqlalchemy/fields.py
|
SleeveShirtholes/wtforms-sqlalchemy
|
e172387992601ab8477d767580e957209ac46ea1
|
[
"BSD-3-Clause"
] | 37
|
2015-07-05T01:26:10.000Z
|
2021-12-29T00:47:14.000Z
|
"""
Useful form fields for use with SQLAlchemy ORM.
"""
import operator
from wtforms import widgets
from wtforms.compat import string_types
from wtforms.compat import text_type
from wtforms.fields import SelectFieldBase
from wtforms.validators import ValidationError
try:
from sqlalchemy.orm.util import identity_key
has_identity_key = True
except ImportError:
has_identity_key = False
__all__ = (
"QuerySelectField",
"QuerySelectMultipleField",
"QueryRadioField",
"QueryCheckboxField",
)
class QuerySelectField(SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(
self,
label=None,
validators=None,
query_factory=None,
get_pk=None,
get_label=None,
allow_blank=False,
blank_text="",
**kwargs
):
super().__init__(label, validators, **kwargs)
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception(
"The sqlalchemy identity_key function could not be imported."
)
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query if self.query is not None else self.query_factory()
get_pk = self.get_pk
self._object_list = list((text_type(get_pk(obj)), obj) for obj in query)
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield ("__None", self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == "__None":
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
data = self.data
if data is not None:
for _, obj in self._get_object_list():
if data == obj:
break
else:
raise ValidationError(self.gettext("Not a valid choice"))
elif self._formdata or not self.allow_blank:
raise ValidationError(self.gettext("Not a valid choice"))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super().__init__(label, validators, default=default, **kwargs)
if kwargs.get("allow_blank", False):
import warnings
warnings.warn(
"allow_blank=True does not do anything for QuerySelectMultipleField."
)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext("Not a valid choice"))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext("Not a valid choice"))
class QueryRadioField(QuerySelectField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.RadioInput()
class QueryCheckboxField(QuerySelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return ":".join(text_type(x) for x in key)
| 33.077626
| 85
| 0.636803
|
58fdcad3311d46d9559e159600152f7a1f67b0ef
| 1,021
|
py
|
Python
|
neutron/plugins/plumgrid/common/exceptions.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | 3
|
2015-02-02T02:51:39.000Z
|
2015-02-23T10:20:23.000Z
|
neutron/plugins/plumgrid/common/exceptions.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | 4
|
2015-02-23T10:21:11.000Z
|
2015-03-04T09:28:20.000Z
|
neutron/plugins/plumgrid/common/exceptions.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc.
"""Neutron PLUMgrid Plugin exceptions"""
from neutron.common import exceptions as base_exec
class PLUMgridException(base_exec.NeutronException):
message = _("PLUMgrid Plugin Error: %(err_msg)s")
class PLUMgridConnectionFailed(PLUMgridException):
message = _("Connection failed with PLUMgrid Director: %(err_msg)s")
| 35.206897
| 78
| 0.742409
|
f61ce4de8ad3a43ecece7f7687b9f1009848e429
| 13,203
|
py
|
Python
|
test/cli/output/test_default.py
|
chr1st1ank/schemathesis
|
f2e160d56c1fdce9eac7fee5875b209c8944f54a
|
[
"MIT"
] | null | null | null |
test/cli/output/test_default.py
|
chr1st1ank/schemathesis
|
f2e160d56c1fdce9eac7fee5875b209c8944f54a
|
[
"MIT"
] | null | null | null |
test/cli/output/test_default.py
|
chr1st1ank/schemathesis
|
f2e160d56c1fdce9eac7fee5875b209c8944f54a
|
[
"MIT"
] | null | null | null |
import os
import sys
import click
import hypothesis
import pytest
from hypothesis.reporting import report
import schemathesis
from schemathesis import models, runner, utils
from schemathesis.cli.output import default
@pytest.fixture(autouse=True)
def click_context():
"""Add terminal colors to the output in tests."""
with click.Context(schemathesis.cli.run, color=True):
yield
@pytest.fixture()
def execution_context():
return runner.events.ExecutionContext([])
@pytest.fixture
def endpoint(swagger_20):
return models.Endpoint("/success", "GET", definition={}, base_url="http://127.0.0.1:8080", schema=swagger_20)
@pytest.fixture()
def results_set(endpoint):
statistic = models.TestResult(endpoint)
return models.TestResultSet([statistic])
@pytest.fixture()
def after_execution(results_set, endpoint, swagger_20):
return runner.events.AfterExecution(
results=results_set, schema=swagger_20, endpoint=endpoint, status=models.Status.success
)
@pytest.mark.parametrize(
"title,separator,printed,expected",
[
("TEST", "-", "data in section", "------- TEST -------"),
("TEST", "*", "data in section", "******* TEST *******"),
],
)
def test_display_section_name(capsys, title, separator, printed, expected):
# When section name is displayed
default.display_section_name(title, separator=separator)
out = capsys.readouterr().out.strip()
terminal_width = default.get_terminal_width()
# It should fit into the terminal width
assert len(click.unstyle(out)) == terminal_width
# And the section name should be bold
assert click.style(click.unstyle(out), bold=True) == out
assert expected in out
def test_handle_initialized(capsys, execution_context, results_set, swagger_20):
# Given Initialized event
event = runner.events.Initialized(
results=results_set, schema=swagger_20, checks=(), hypothesis_settings=hypothesis.settings()
)
# When this even is handled
default.handle_initialized(execution_context, event)
out = capsys.readouterr().out
lines = out.split("\n")
# Then initial title is displayed
assert " Schemathesis test session starts " in lines[0]
# And platform information is there
assert lines[1].startswith("platform")
# And current directory
assert f"rootdir: {os.getcwd()}" in lines
# And number of collected endpoints
assert click.style("collected endpoints: 1", bold=True) in lines
# And the output has an empty line in the end
assert out.endswith("\n\n")
def test_display_statistic(capsys, swagger_20, endpoint):
# Given multiple successful & failed checks in a single test
success = models.Check("not_a_server_error", models.Status.success)
failure = models.Check("not_a_server_error", models.Status.failure)
single_test_statistic = models.TestResult(
endpoint, [success, success, success, failure, failure, models.Check("different_check", models.Status.success)]
)
results = models.TestResultSet([single_test_statistic])
# When test results are displayed
default.display_statistic(results)
lines = [line for line in capsys.readouterr().out.split("\n") if line]
failed = click.style("FAILED", bold=True, fg="red")
not_a_server_error = click.style("not_a_server_error", bold=True)
different_check = click.style("different_check", bold=True)
passed = click.style("PASSED", bold=True, fg="green")
# Then all check results should be properly displayed with relevant colors
assert lines[1:3] == [
f"{not_a_server_error} 3 / 5 passed {failed} ",
f"{different_check} 1 / 1 passed {passed} ",
]
def test_display_statistic_empty(capsys, results_set):
default.display_statistic(results_set)
assert capsys.readouterr().out.split("\n")[2] == click.style("No checks were performed.", bold=True)
def test_capture_hypothesis_output():
# When Hypothesis output us captured
with utils.capture_hypothesis_output() as hypothesis_output:
value = "Some text"
report(value)
report(value)
# Then all calls to internal Hypothesis reporting will put its output to a list
assert hypothesis_output == [value, value]
@pytest.mark.parametrize("position, length, expected", ((1, 100, "[ 1%]"), (20, 100, "[ 20%]"), (100, 100, "[100%]")))
def test_get_percentage(position, length, expected):
assert default.get_percentage(position, length) == expected
@pytest.mark.parametrize("current_line_length", (0, 20))
@pytest.mark.parametrize("endpoints_processed, percentage", ((0, "[ 0%]"), (1, "[100%]")))
def test_display_percentage(
capsys, execution_context, after_execution, swagger_20, current_line_length, endpoints_processed, percentage
):
execution_context.current_line_length = current_line_length
execution_context.endpoints_processed = endpoints_processed
# When percentage is displayed
default.display_percentage(execution_context, after_execution)
out = capsys.readouterr().out
# Then the whole line fits precisely to the terminal width
assert len(click.unstyle(out)) + current_line_length == default.get_terminal_width()
# And the percentage displayed as expected in cyan color
assert out.strip() == click.style(percentage, fg="cyan")
def test_display_hypothesis_output(capsys):
# When Hypothesis output is displayed
default.display_hypothesis_output(["foo", "bar"])
lines = capsys.readouterr().out.split("\n")
# Then the relevant section title is displayed
assert " HYPOTHESIS OUTPUT" in lines[0]
# And the output is displayed as separate lines in red color
assert " ".join(lines[1:3]) == click.style("foo bar", fg="red")
@pytest.mark.parametrize("body", ({}, {"foo": "bar"}, None))
def test_display_single_failure(capsys, swagger_20, endpoint, body):
# Given a single test result with multiple successful & failed checks
success = models.Check("not_a_server_error", models.Status.success)
failure = models.Check("not_a_server_error", models.Status.failure, models.Case(endpoint, body=body))
test_statistic = models.TestResult(
endpoint, [success, success, success, failure, failure, models.Check("different_check", models.Status.success)]
)
# When this failure is displayed
default.display_failures_for_single_test(test_statistic)
out = capsys.readouterr().out
lines = out.split("\n")
# Then the endpoint name is displayed as a subsection
assert " GET: /success " in lines[0]
# And check name is displayed in red
assert lines[1] == click.style("Check : not_a_server_error", fg="red")
# And body should be displayed if it is not None
if body is None:
assert "Body" not in out
else:
assert click.style(f"Body : {body}", fg="red") in lines
# And empty parameters are not present in the output
assert "Path parameters" not in out
# And not needed attributes are not displayed
assert "Path" not in out
assert "Method" not in out
assert "Base url" not in out
@pytest.mark.parametrize(
"status, expected_symbol, color",
((models.Status.success, ".", "green"), (models.Status.failure, "F", "red"), (models.Status.error, "E", "red")),
)
def test_handle_after_execution(capsys, execution_context, after_execution, status, expected_symbol, color):
# Given AfterExecution even with certain status
after_execution.status = status
# When this event is handled
default.handle_after_execution(execution_context, after_execution)
lines = capsys.readouterr().out.strip().split("\n")
symbol, percentage = lines[0].split()
# Then the symbol corresponding to the status is displayed with a proper color
assert click.style(expected_symbol, fg=color) == symbol
# And percentage is displayed in cyan color
assert click.style("[100%]", fg="cyan") == percentage
def test_after_execution_attributes(execution_context, after_execution):
# When `handle_after_execution` is executed
default.handle_after_execution(execution_context, after_execution)
# Then number of endpoints processed grows by 1
assert execution_context.endpoints_processed == 1
# And the line length grows by 1 symbol
assert execution_context.current_line_length == 1
default.handle_after_execution(execution_context, after_execution)
assert execution_context.endpoints_processed == 2
assert execution_context.current_line_length == 2
@pytest.mark.parametrize("show_errors_tracebacks", (True, False))
def test_display_single_error(capsys, swagger_20, endpoint, execution_context, show_errors_tracebacks):
# Given exception is multiline
exception = None
try:
exec("some invalid code")
except SyntaxError as exc:
exception = exc
result = models.TestResult(endpoint)
result.add_error(exception)
# When the related test result is displayed
execution_context.show_errors_tracebacks = show_errors_tracebacks
default.display_single_error(execution_context, result)
lines = capsys.readouterr().out.strip().split("\n")
# Then it should be correctly formatted and displayed in red color
if sys.version_info <= (3, 8):
expected = ' File "<string>", line 1\n some invalid code\n ^\nSyntaxError: invalid syntax\n'
else:
expected = ' File "<string>", line 1\n some invalid code\n ^\nSyntaxError: invalid syntax\n'
if show_errors_tracebacks:
lines = click.unstyle("\n".join(lines)).split("\n")
assert lines[1] == "Traceback (most recent call last):"
# There is a path on the next line, it is simpler to not check it since it doesn't give much value
# But presence of traceback itself is checked
expected = f' exec("some invalid code")\n{expected}'
assert "\n".join(lines[3:8]) == expected.strip("\n")
else:
assert "\n".join(lines[1:6]) == click.style(expected, fg="red")
def test_display_failures(swagger_20, capsys, results_set):
# Given two test results - success and failure
endpoint = models.Endpoint("/api/failure", "GET", {}, base_url="http://127.0.0.1:8080", schema=swagger_20)
failure = models.TestResult(endpoint)
failure.add_failure("test", models.Case(endpoint), "Message")
results_set.append(failure)
# When the failures are displayed
default.display_failures(results_set)
out = capsys.readouterr().out.strip()
# Then section title is displayed
assert " FAILURES " in out
# And endpoint with a failure is displayed as a subsection
assert " GET: /api/failure " in out
assert "Message" in out
# And check name is displayed
assert "Check : test" in out
assert "Run this Python code to reproduce this failure: " in out
assert "requests.get('http://127.0.0.1:8080/api/failure')" in out
@pytest.mark.parametrize("show_errors_tracebacks", (True, False))
def test_display_errors(swagger_20, capsys, results_set, execution_context, show_errors_tracebacks):
# Given two test results - success and error
endpoint = models.Endpoint("/api/error", "GET", {}, swagger_20)
error = models.TestResult(endpoint, seed=123)
error.add_error(ConnectionError("Connection refused!"), models.Case(endpoint, query={"a": 1}))
results_set.append(error)
# When the errors are displayed
execution_context.show_errors_tracebacks = show_errors_tracebacks
default.display_errors(execution_context, results_set)
out = capsys.readouterr().out.strip()
# Then section title is displayed
assert " ERRORS " in out
help_message_exists = (
"Add this option to your command line parameters to " "see full tracebacks: --show-error-tracebacks" in out
)
# And help message is displayed only if tracebacks are not shown
assert help_message_exists is not show_errors_tracebacks
# And endpoint with an error is displayed as a subsection
assert " GET: /api/error " in out
# And the error itself is displayed
assert "ConnectionError: Connection refused!" in out
# And the example is displayed
assert "Query : {'a': 1}" in out
assert "Or add this option to your command line parameters: --hypothesis-seed=123" in out
@pytest.mark.parametrize(
"attribute, expected", ((models.Case.__attrs_attrs__[3], "Cookies"), (models.Case.__attrs_attrs__[4], "Query"))
)
def test_make_verbose_name(attribute, expected):
assert default.make_verbose_name(attribute) == expected
def test_display_summary(capsys, results_set, swagger_20):
# Given the Finished event
event = runner.events.Finished(results=results_set, schema=swagger_20, running_time=1.257)
# When `display_summary` is called
with pytest.raises(click.exceptions.Exit):
default.display_summary(event)
out = capsys.readouterr().out.strip()
# Then number of total tests & total running time should be displayed
assert "=== 1 passed in 1.26s ===" in out
# And it should be in green & bold style
assert click.style(click.unstyle(out), fg="green", bold=True) == out
| 42.728155
| 119
| 0.708324
|
771fe70eb4ba196c49a1d5e11e8fea4d6faf2653
| 3,654
|
py
|
Python
|
contact_test.py
|
kibetrono/Contact-List
|
d7449b9895bc7cfc6d3a2810d942f643c94ad0df
|
[
"Unlicense"
] | null | null | null |
contact_test.py
|
kibetrono/Contact-List
|
d7449b9895bc7cfc6d3a2810d942f643c94ad0df
|
[
"Unlicense"
] | null | null | null |
contact_test.py
|
kibetrono/Contact-List
|
d7449b9895bc7cfc6d3a2810d942f643c94ad0df
|
[
"Unlicense"
] | null | null | null |
import unittest # Importing the unittest module
from contact import Contact # Importing the contact class
import pyperclip
class TestContact(unittest.TestCase):
'''
Test class that defines test cases for the contact class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
# Items up here .......
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_contact = Contact("James", "Muriuki", "0712345678", "james@ms.com") # create contact object
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
Contact.contact_list = []
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_contact.first_name, "James")
self.assertEqual(self.new_contact.last_name, "Muriuki")
self.assertEqual(self.new_contact.phone_number, "0712345678")
self.assertEqual(self.new_contact.email, "james@ms.com")
def test_save_contact(self):
'''
test_save_contact test case to test if the contact object is saved into
the contact list
'''
self.new_contact.save_contact() # saving the new contact
self.assertEqual(len(Contact.contact_list), 1)
def test_save_multiple_contact(self):
'''
test_save_multiple_contact to check if we can save multiple contact
objects to our contact_list
'''
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0712345678", "test@user.com") # new contact
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list), 2)
def test_delete_contact(self):
'''
test_delete_contact to test if we can remove a contact from our contact list
'''
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0712345678", "test@user.com") # new contact
test_contact.save_contact()
self.new_contact.delete_contact() # Deleting a contact object
self.assertEqual(len(Contact.contact_list), 1)
def test_find_contact_by_number(self):
'''
test to check if we can find a contact by phone number and display information
'''
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0711223344", "test@user.com") # new contact
test_contact.save_contact()
found_contact = Contact.find_by_number("0711223344")
self.assertEqual(found_contact.email, test_contact.email)
def test_contact_exists(self):
'''
test to check if we can return a Boolean if we cannot find the contact.
'''
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0711223344", "test@user.com") # new contact
test_contact.save_contact()
contact_exists = Contact.contact_exist("0711223344")
self.assertTrue(contact_exists)
def test_display_all_contacts(self):
'''
method that returns a list of all contacts saved
'''
self.assertEqual(Contact.display_contacts(), Contact.contact_list)
def test_copy_email(self):
'''
Test to confirm that we are copying the email address from a found contact
'''
self.new_contact.save_contact()
Contact.copy_email("0712345678")
self.assertEqual(self.new_contact.email, pyperclip.paste())
if __name__ == '__main__':
unittest.main()
| 32.625
| 109
| 0.648878
|
d0435cfdae221bf39d8392b12fdb7064f7bc9b8d
| 16,280
|
py
|
Python
|
toontown/racing/RaceGlobals.py
|
mghost99/open-toontown-py3
|
072c3f5c06e1aab2bf4186681a247319b5d25e80
|
[
"BSD-3-Clause"
] | null | null | null |
toontown/racing/RaceGlobals.py
|
mghost99/open-toontown-py3
|
072c3f5c06e1aab2bf4186681a247319b5d25e80
|
[
"BSD-3-Clause"
] | null | null | null |
toontown/racing/RaceGlobals.py
|
mghost99/open-toontown-py3
|
072c3f5c06e1aab2bf4186681a247319b5d25e80
|
[
"BSD-3-Clause"
] | null | null | null |
TrackSignDuration = 15
RaceCountdown = 3
MaxRacers = 4
MaxTickets = 99999
Practice = 0
ToonBattle = 1
Circuit = 2
Speedway = 0
Rural = 1
Urban = 2
RT_Speedway_1 = 0
RT_Speedway_1_rev = 1
RT_Rural_1 = 20
RT_Rural_1_rev = 21
RT_Urban_1 = 40
RT_Urban_1_rev = 41
RT_Speedway_2 = 60
RT_Speedway_2_rev = 61
RT_Rural_2 = 62
RT_Rural_2_rev = 63
RT_Urban_2 = 64
RT_Urban_2_rev = 65
KARTING_TICKETS_HOLIDAY_MULTIPLIER = 2
def getTrackGenre(trackId):
if trackId in (RT_Speedway_1,
RT_Speedway_1_rev,
RT_Speedway_2,
RT_Speedway_2_rev):
return Speedway
elif trackId in (RT_Rural_1,
RT_Rural_1_rev,
RT_Rural_2,
RT_Rural_2_rev):
return Rural
else:
return Urban
RT_Speedway_1_Gags = ((923.052, -1177.431, 0.024),
(926.099, -1187.345, 0.024),
(925.68, -1197.327, 0.024),
(925.169, -1209.502, 0.024),
(394.009, 209.219, 0.025),
(279.109, 279.744, 0.025),
(204.366, 316.238, 0.025),
(118.646, 358.009, 0.025),
(-1462.098, 791.722, 0.025),
(-1459.446, 730.064, 0.025),
(-1450.731, 666.811, 0.025),
(-1438.388, 615.1, 0.025))
RT_Speedway_2_Gags = ((-355.18, -2430.1, -0.126728),
(-343.456, -2421.43, -0.0116951),
(-329.644, -2411.06, -0.0169053),
(-315.054, -2402.91, -0.0800667),
(243.293, -906.412, 0.021832),
(216.555, -910.885, -0.146125),
(192.16, -915.93, -0.242366),
(165.941, -922.381, -0.247588),
(-840.626, 2405.96, 58.4195),
(-868.154, 2370.54, 56.7396),
(-896.126, 2332.55, 53.8607),
(-921.952, 2291.16, 49.8209))
RT_Speedway_1_rev_Gags = ((1364.601, -664.452, 0.025),
(1312.491, -588.218, 0.025),
(1251.775, -509.556, 0.025),
(1214.052, -461.743, 0.025),
(-976.044, 995.072, 0.025),
(-1043.917, 1018.78, 0.025),
(-1124.555, 1038.362, 0.025),
(-1187.95, 1047.006, 0.025),
(-1174.542, -208.968, 0.025),
(-1149.34, -270.698, 0.025),
(-1121.2, -334.367, 0.025),
(-1090.627, -392.662, 0.026))
RT_Rural_1_Gags = ((814.276, -552.928, 2.107),
(847.738, -551.97, 2.106),
(889.265, -549.569, 2.107),
(922.022, -554.813, 2.106),
(1791.42, 2523.91, 2.106),
(1754.14, 2540.25, 2.107),
(1689.66, 2557.28, 2.107),
(1614.01, 2577.16, 2.106),
(-1839.0, 654.477, 86.83),
(-1894.33, 640.125, 80.39),
(-1955.3, 625.09, 73.07),
(-2016.99, 611.746, 65.86))
RT_Rural_2_Gags = ((2001.53, 560.532, 198.912),
(2002.45, 574.292, 198.912),
(2003.42, 588.612, 198.912),
(2004, 602.849, 198.912),
(-2107.4, 2209.67, 198.913),
(-2086.13, 2224.31, 198.913),
(-2058.11, 2244.31, 198.912),
(-2023.85, 2268.77, 198.912),
(-331.746, -1010.57, 222.332),
(-358.595, -1007.68, 225.129),
(-388.556, -1004.87, 228.239),
(-410.122, -1003.03, 230.482),
(69.763, -2324.5, 198.912),
(63.5314, -2334.02, 198.913),
(57.9662, -2349.14, 198.913),
(51.8838, -2363.87, 198.913))
RT_Urban_1_Gags = ((51.9952, 2431.62, 55.7053),
(39.5407, 2421.64, 65.7053),
(27.7504, 2411.67, 55.7053),
(15.55, 2401.65, 65.7053),
(-1008.36, 2116.41, 0.0246798),
(-1050.31, 2099.78, 0.025),
(-1092.26, 2083.15, 0.0253202),
(-1134.21, 2066.52, 0.0256404),
(-1966.68, 1139.32, 1.76981),
(-1970.46, 1120.57, 1.76981),
(-1974.18, 1101.82, 1.76981),
(-1977.93, 1084.07, 1.76981),
(1419.05, -2987.18, 0.025),
(1411.09, -3004.09, 0.025),
(1403.13, -3021.01, 0.025),
(1395.17, -3037.92, 0.025),
(948.131, -1216.77, 0.025),
(935.545, -1204.09, 0.025),
(922.959, -1191.41, 0.025),
(909.959, -1177.41, 0.025))
RT_Urban_2_Gags = ((-2761.49, -3070.97, -0.255122),
(-2730.18, -3084.09, -0.255153),
(-2701.45, -3096.26, -0.255669),
(-2669.81, -3108.9, -0.255252),
(735.479, -423.828, 23.7334),
(759.026, -427.198, 23.0068),
(783.232, -430.659, 22.2569),
(809.914, -434.476, 21.4326),
(3100.09, 240.411, 23.4672),
(3089.09, 242.019, 23.5251),
(3077.68, 243.688, 23.6857),
(3064.82, 245.567, 23.8771),
(-10.7389, 2980.48, -0.255609),
(-41.2644, 2974.53, -0.255122),
(-69.8423, 2989.98, -0.255682),
(-102.331, 2986.1, -0.255637),
(-1978.67, 588.981, -0.255685),
(-1977.07, 560.797, -0.255415),
(-1948.58, 544.782, -0.255122),
(-1943.42, 510.262, -0.255866))
RT_Urban_1_rev_Gags = ((1034.43, -366.371, 0.025),
(1051.84, -360.473, 0.025),
(1069.25, -354.575, 0.025),
(1086.66, -348.677, 0.025),
(1849.66, -2807.21, 0.0246158),
(1858.55, -2795.99, 0.0246158),
(1867.44, -2784.76, 0.0246158),
(1876.33, -2773.53, 0.0246158),
(316.342, -44.9529, 0.025),
(305.173, -63.4405, 0.025),
(294.004, -81.9281, 0.025),
(282.835, -100.416, 0.025),
(-762.377, 2979.25, 0.025),
(-753.029, 2995.69, 0.025),
(-743.681, 3012.14, 0.025),
(-734.333, 3028.58, 0.025),
(470.628, 1828.32, 55.0),
(481.284, 1836.89, 55.0),
(491.941, 1845.47, 55.0),
(502.597, 1854.04, 55.0))
Speedway_1_Boosts = (((-320, 685, 1), (415, 0, 0)),)
Speedway_1_Rev_Boosts = (((-320, 685, 0.1), (235, 0, 0)),)
Speedway_2_Boosts = (((-120, 430, 1.0), (-50, 0, 0)),)
Speedway_2_Rev_Boosts = (((176, 625, 1.0), (130, 0, 0)),)
Rural_1_Boosts = (((3132.64, 859.56, 5.0), (384.44, 363.5, 0)), ((-3050.33, -1804.97, 207.7), (229.4, 353.25, 342.9)))
Rural_1_Rev_Boosts = (((3132.64, 859.56, 5.0), (197.1, -2.25, 0)), ((-3151.34, -1569.56, 200.621), (189.46, 182.75, 195.255)))
Rural_2_Boosts = (((873.255, -593.664, 199.5), (87.715, 0, 0)), ((-1747.62, 801.56, 199.5), (-126.516, 0, 0)))
Rural_2_Rev_Boosts = (((-428.004, -243.692, 324.516), (51.428, 6, 1)), ((-384.043, 211.62, 193.5), (-127.859, 1, 0)))
Urban_1_Boosts = (((677.057, 1618.24, 0.025), (35.9995, 0, 0)), ((-2250.35, 1618.1, 0.0241526), (-154.8, 0, 0)), ((400.13, -1090.26, 0.025), (-175.204, 0, 0)))
Urban_1_Rev_Boosts = (((488.739, -2055.07, 0.025), (3.59753, 0, 0)), ((-1737.29, 588.138, 0.025), (26.3975, 0, 0)), ((-212.314, 2638.34, 0.025), (-128.404, 0, 0)))
Urban_2_Boosts = (((358.134, -1655.42, 0.3), (-4.95, 1, 0)), ((2058.77, 2560.03, 0.3), (77.31, 0, 0)), ((-3081.33, -1037.55, 0.25), (177.359, 0, 0)))
Urban_2_Rev_Boosts = (((-2007.38, 484.878, 0.25), (30.9102, 0, 0)), ((2646.51, 1455.15, 0.25), (-120.172, 0, 0)), ((-472.215, -2048.21, 0.25), (136.192, 0, 0)))
def RaceInfo2RacePadId(trackId, trackType):
rev = trackId % 2
if not rev:
if trackType == Practice:
padId = 0
else:
padId = 2
elif trackType == Practice:
padId = 1
else:
padId = 3
return padId
def getTrackGenreString(genreId):
genreStrings = ['Speedway', 'Country', 'City']
return genreStrings[genreId].lower()
def getTunnelSignName(genreId, padId):
if genreId == 2 and padId == 0:
return 'tunne1l_citysign'
elif genreId == 1 and padId == 0:
return 'tunnel_countrysign1'
else:
return 'tunnel%s_%ssign' % (padId + 1, getTrackGenreString(genreId))
RacePadId2RaceInfo = {0: (0, Practice, 3),
1: (1, Practice, 3),
2: (0, ToonBattle, 3),
3: (1, ToonBattle, 3)}
def getGenreFromString(string):
if string == 'town':
return Urban
elif string == 'stadium':
return Speedway
else:
return Rural
def getTrackListByType(genre, type):
return Rural
def getTrackListByType(genre, type):
genreDict = {Urban: [[RT_Urban_1, RT_Urban_2], [RT_Urban_1_rev, RT_Urban_2_rev]],
Rural: [[RT_Rural_1, RT_Rural_2], [RT_Rural_1_rev, RT_Rural_2_rev]],
Speedway: [[RT_Speedway_1, RT_Speedway_2], [RT_Speedway_1_rev, RT_Speedway_2_rev]]}
trackIdList = genreDict.get(genre)
return trackIdList[type]
def getCanonicalPadId(padId):
return padId % 4
def getNextRaceInfo(prevTrackId, genreString, padId):
genre = getGenreFromString(genreString)
cPadId = getCanonicalPadId(padId)
raceInfo = RacePadId2RaceInfo.get(cPadId)
trackList = getTrackListByType(genre, raceInfo[0])
if trackList.count(prevTrackId) == 0:
trackId = trackList[1]
else:
index = trackList.index(prevTrackId)
index += 1
index %= len(trackList)
trackId = trackList[index]
return (trackId, raceInfo[1], raceInfo[2])
TrackPath = 'phase_6/models/karting/'
TrackDict = {RT_Speedway_1: (TrackPath + 'RT_SpeedwayA',
240.0,
115.0,
(50, 500),
RT_Speedway_1_Gags,
Speedway_1_Boosts,
1.0,
'GS_Race_SS.ogg',
(0.01, 0.015)),
RT_Speedway_1_rev: (TrackPath + 'RT_SpeedwayA',
240.0,
115.0,
(50, 500),
RT_Speedway_1_rev_Gags,
Speedway_1_Rev_Boosts,
1.0,
'GS_Race_SS.ogg',
(0.01, 0.015)),
RT_Speedway_2: (TrackPath + 'RT_SpeedwayB',
335.0,
210.0,
(75, 1000),
RT_Speedway_2_Gags,
Speedway_2_Boosts,
1.0,
'GS_Race_SS.ogg',
(0.01, 0.015)),
RT_Speedway_2_rev: (TrackPath + 'RT_SpeedwayB',
335.0,
210.0,
(75, 1000),
RT_Speedway_2_Gags,
Speedway_2_Rev_Boosts,
1.0,
'GS_Race_SS.ogg',
(0.01, 0.015)),
RT_Rural_1: (TrackPath + 'RT_RuralB',
360.0,
230.0,
(100, 500),
RT_Rural_1_Gags,
Rural_1_Boosts,
0.75,
'GS_Race_RR.ogg',
(0.003, 0.004)),
RT_Rural_1_rev: (TrackPath + 'RT_RuralB',
360.0,
230.0,
(100, 500),
RT_Rural_1_Gags,
Rural_1_Rev_Boosts,
0.75,
'GS_Race_RR.ogg',
(0.003, 0.004)),
RT_Rural_2: (TrackPath + 'RT_RuralB2',
480.0,
360.0,
(150, 1000),
RT_Rural_2_Gags,
Rural_2_Boosts,
0.75,
'GS_Race_RR.ogg',
(0.003, 0.004)),
RT_Rural_2_rev: (TrackPath + 'RT_RuralB2',
480.0,
360.0,
(150, 1000),
RT_Rural_2_Gags,
Rural_2_Rev_Boosts,
0.75,
'GS_Race_RR.ogg',
(0.003, 0.004)),
RT_Urban_1: (TrackPath + 'RT_UrbanA',
480.0,
305.0,
(300, 500),
RT_Urban_1_Gags,
Urban_1_Boosts,
1.0,
'GS_Race_CC.ogg',
(0.002, 0.003)),
RT_Urban_1_rev: (TrackPath + 'RT_UrbanA',
480.0,
305.0,
(300, 500),
RT_Urban_1_rev_Gags,
Urban_1_Rev_Boosts,
1.0,
'GS_Race_CC.ogg',
(0.002, 0.003)),
RT_Urban_2: (TrackPath + 'RT_UrbanB',
480.0,
280.0,
(400, 1000),
RT_Urban_2_Gags,
Urban_2_Boosts,
1.0,
'GS_Race_CC.ogg',
(0.002, 0.003)),
RT_Urban_2_rev: (TrackPath + 'RT_UrbanB',
480.0,
280.0,
(400, 1000),
RT_Urban_2_Gags,
Urban_2_Rev_Boosts,
1.0,
'GS_Race_CC.ogg',
(0.002, 0.003))}
TrackIds = list(TrackDict.keys())
sorted(TrackIds)
def getEntryFee(trackId, raceType):
fee = 0
if raceType == ToonBattle:
fee = TrackDict[trackId][3][0]
elif raceType == Circuit:
fee = TrackDict[trackId][3][1]
return fee
def getQualifyingTime(trackId):
return TrackDict[trackId][1]
def getDefaultRecordTime(trackId):
return TrackDict[trackId][2]
def getDefaultRecord(trackId):
return getDefaultRecordTime(trackId), 0, 1, 'Goofy'
Daily = 0
Weekly = 1
AllTime = 2
PeriodDict = {Daily: 10,
Weekly: 100,
AllTime: 1000}
PeriodIds = list(PeriodDict.keys())
NumRecordPeriods = len(PeriodIds)
NumRecordsPerPeriod = 10
Winnings = [3.0,
1.0,
0.5,
0.15]
PracticeWinnings = 20
SpeedwayQuals = 0
RuralQuals = 1
UrbanQuals = 2
SpeedwayWins = 3
RuralWins = 4
UrbanWins = 5
CircuitWins = 6
TwoPlayerWins = 7
ThreePlayerWins = 8
FourPlayerWins = 9
CircuitSweeps = 10
CircuitQuals = 11
QualsList = [SpeedwayQuals, RuralQuals, UrbanQuals]
WinsList = [SpeedwayWins, RuralWins, UrbanWins]
SpeedwayQuals1 = 0
SpeedwayQuals2 = 1
SpeedwayQuals3 = 2
RuralQuals1 = 3
RuralQuals2 = 4
RuralQuals3 = 5
UrbanQuals1 = 6
UrbanQuals2 = 7
UrbanQuals3 = 8
TotalQuals = 9
SpeedwayWins1 = 10
SpeedwayWins2 = 11
SpeedwayWins3 = 12
RuralWins1 = 13
RuralWins2 = 14
RuralWins3 = 15
UrbanWins1 = 16
UrbanWins2 = 17
UrbanWins3 = 18
TotalWins = 19
CircuitQuals1 = 20
CircuitQuals2 = 21
CircuitQuals3 = 22
CircuitWins1 = 23
CircuitWins2 = 24
CircuitWins3 = 25
CircuitSweeps1 = 26
CircuitSweeps2 = 27
CircuitSweeps3 = 28
GrandTouring = 29
NumTrophies = 30
TenTrophyCup = 30
TwentyTrophyCup = 31
ThirtyTrophyCup = 32
TrophyCups = [TenTrophyCup, TwentyTrophyCup, ThirtyTrophyCup]
NumCups = 3
SpeedwayQualsList = [SpeedwayQuals1, SpeedwayQuals2, SpeedwayQuals3]
RuralQualsList = [RuralQuals1, RuralQuals2, RuralQuals3]
UrbanQualsList = [UrbanQuals1, UrbanQuals2, UrbanQuals3]
SpeedwayWinsList = [SpeedwayWins1, SpeedwayWins2, SpeedwayWins3]
RuralWinsList = [RuralWins1, RuralWins2, RuralWins3]
UrbanWinsList = [UrbanWins1, UrbanWins2, UrbanWins3]
CircuitWinsList = [CircuitWins1, CircuitWins2, CircuitWins3]
CircuitSweepsList = [CircuitSweeps1, CircuitSweeps2, CircuitSweeps3]
CircuitQualList = [CircuitQuals1, CircuitQuals2, CircuitQuals3]
AllQualsList = [SpeedwayQualsList, RuralQualsList, UrbanQualsList]
AllWinsList = [SpeedwayWinsList, RuralWinsList, UrbanWinsList]
TrophiesPerCup = NumTrophies // NumCups
QualifiedRaces = [1, 10, 100]
TotalQualifiedRaces = 100
WonRaces = [1, 10, 100]
TotalWonRaces = 100
WonCircuitRaces = [1, 5, 25]
SweptCircuitRaces = [1, 5, 25]
QualifiedCircuitRaces = [1, 5, 25]
LBSubscription = {'stadium': [(RT_Speedway_1, Daily),
(RT_Speedway_1, Weekly),
(RT_Speedway_1, AllTime),
(RT_Speedway_1_rev, Daily),
(RT_Speedway_1_rev, Weekly),
(RT_Speedway_1_rev, AllTime),
(RT_Speedway_2, Daily),
(RT_Speedway_2, Weekly),
(RT_Speedway_2, AllTime),
(RT_Speedway_2_rev, Daily),
(RT_Speedway_2_rev, Weekly),
(RT_Speedway_2_rev, AllTime)],
'country': [(RT_Rural_1, Daily),
(RT_Rural_1, Weekly),
(RT_Rural_1, AllTime),
(RT_Rural_1_rev, Daily),
(RT_Rural_1_rev, Weekly),
(RT_Rural_1_rev, AllTime),
(RT_Rural_2, Daily),
(RT_Rural_2, Weekly),
(RT_Rural_2, AllTime),
(RT_Rural_2_rev, Daily),
(RT_Rural_2_rev, Weekly),
(RT_Rural_2_rev, AllTime)],
'city': [(RT_Urban_1, Daily),
(RT_Urban_1, Weekly),
(RT_Urban_1, AllTime),
(RT_Urban_1_rev, Daily),
(RT_Urban_1_rev, Weekly),
(RT_Urban_1_rev, AllTime),
(RT_Urban_2, Daily),
(RT_Urban_2, Weekly),
(RT_Urban_2, AllTime),
(RT_Urban_2_rev, Daily),
(RT_Urban_2_rev, Weekly),
(RT_Urban_2_rev, AllTime)]}
BANANA = 1
TURBO = 2
ANVIL = 3
PIE = 4
GagFreq = [[PIE,
BANANA,
BANANA,
BANANA,
TURBO,
PIE],
[PIE,
BANANA,
BANANA,
TURBO,
ANVIL,
PIE],
[PIE,
BANANA,
TURBO,
TURBO,
ANVIL,
PIE],
[BANANA,
TURBO,
TURBO,
TURBO,
ANVIL,
PIE]]
CircuitLoops = [[RT_Speedway_1, RT_Rural_1, RT_Urban_1],
[RT_Speedway_1_rev, RT_Rural_1_rev, RT_Urban_1_rev],
[RT_Speedway_2, RT_Rural_2, RT_Urban_2],
[RT_Speedway_2_rev, RT_Rural_2_rev, RT_Urban_2_rev]]
CircuitPoints = [10,
8,
6,
4]
def getCircuitLoop(startingTrack):
circuitLoop = [startingTrack]
for loop in CircuitLoops:
if startingTrack in loop:
print(loop)
numTracks = len(loop)
tempLoop = loop * 2
startingIndex = tempLoop.index(startingTrack)
circuitLoop = tempLoop[startingIndex:startingIndex + numTracks]
break
return circuitLoop
Exit_UserReq = 0
Exit_Barrier = 1
Exit_Slow = 2
Exit_BarrierNoRefund = 3
| 29.228007
| 163
| 0.576597
|
b87f7635005b2af76d30e3a90c8f2db9a7872912
| 914
|
py
|
Python
|
ip/migrations/0024_finish_tag_migration.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
ip/migrations/0024_finish_tag_migration.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
ip/migrations/0024_finish_tag_migration.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# finish tag migration
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
TAG_MODELS = ["ip_vrfgroup", "ip_vrf", "ip_prefix", "ip_address", "ip_addressrange"]
def migrate(self):
# Drop old tags
for m in self.TAG_MODELS:
self.db.delete_column(m, "tags")
# Rename new tags
for m in self.TAG_MODELS:
self.db.rename_column(m, "tmp_tags", "tags")
# Create indexes
for m in self.TAG_MODELS:
self.db.execute('CREATE INDEX x_%s_tags ON "%s" USING GIN("tags")' % (m, m))
| 35.153846
| 88
| 0.477024
|
a48961cd371f1da0061187177ccd7c4fb129b163
| 2,528
|
py
|
Python
|
Make_Dataset.py
|
huang-junhong/SIRSRGAN
|
a774416cd45a00982141a1571cb2a8a18bb05c86
|
[
"Apache-2.0"
] | null | null | null |
Make_Dataset.py
|
huang-junhong/SIRSRGAN
|
a774416cd45a00982141a1571cb2a8a18bb05c86
|
[
"Apache-2.0"
] | null | null | null |
Make_Dataset.py
|
huang-junhong/SIRSRGAN
|
a774416cd45a00982141a1571cb2a8a18bb05c86
|
[
"Apache-2.0"
] | null | null | null |
import os
import cv2
import numpy as np
import Make_Dataset_config
def mkdir(path):
folder=os.path.exists(path)
if not folder:
os.makedirs(path)
print(path,' Folder Created')
else:
print(path,' Already Exist')
def load_file_path(PATH):
filenames=[]
for root,dir,files in os.walk(PATH):
for file in files:
if os.path.splitext(file)[1]=='.jpg' or os.path.splitext(file)[1]=='.png' or os.path.splitext(file)[1]=='.bmp':
filenames.append(os.path.join(root,file))
filenames = sorted(filenames)
return filenames
def get_patch_img_rgb(img, patch_size, stride):
patch = []
temp = img.copy()
h, w, _ = temp.shape
h = h - h%patch_size
w = w - w%patch_size
for x in range(0, h, stride):
for y in range(0, w, stride):
if x+patch_size > h or y+patch_size > w:
continue
patch.append(temp[x:x+patch_size, y:y+patch_size,:])
return np.array(patch, np.float32)
def rot180(input):
temp=np.rot90(input)
temp=np.rot90(temp)
return temp
def rot270(input):
temp=np.rot90(input)
temp=np.rot90(temp)
temp=np.rot90(temp)
return temp
args = Make_Dataset_config.main()
HR_PATH = args.HR_PATH
LR_PATH = args.LR_PATH
HR_list = load_file_path(HR_PATH)
LR_list = load_file_path(LR_PATH)
HR_list = sorted(HR_list)
LR_list = sorted(LR_list)
mkdir(args.SAVE_PATH)
mkdir(args.SAVE_PATH+'/HR')
mkdir(args.SAVE_PATH+'/SRF4')
f_lr = args.SAVE_PATH + '/SRF4_PATH.txt'
f_hr = args.SAVE_PATH + '/HR_PATH.txt'
patch_count=0
for i in range(len(HR_list)):
print(i)
hr = cv2.imread(HR_list[i])
lr = cv2.imread(LR_list[i])
hr = get_patch_img_rgb(hr, 256, 128)
lr = get_patch_img_rgb(lr, 64, 32)
if len(hr) != len(lr):
print('IMG ERROR')
break
for patch_i in range(len(hr)):
patch_count +=1
hri = hr[patch_i]
lri = lr[patch_i]
cv2.imwrite(args.SAVE_PATH+'/HR/'+str(patch_count)+'.png', hri)
cv2.imwrite(args.SAVE_PATH+'/SRF4/'+str(patch_count)+'.png', lri)
with open(f_lr,'a') as file:
file.write(args.SAVE_PATH+'/SRF4/'+str(patch_count)+'.png'+'\n')
with open(f_hr,'a') as file:
file.write(args.SAVE_PATH+'/HR/'+str(patch_count)+'.png'+'\n')
print(patch_count)
print('Complete')
| 25.029703
| 124
| 0.582674
|
1eeb30e3cd27373a8ac5ea4e88ea59c4ba6686a7
| 7,538
|
py
|
Python
|
detect_fake_videos.py
|
Miguel000/FakeVideosDetector
|
2626b2cd9302320229030129e166c51c2d7862e9
|
[
"MIT"
] | 1
|
2020-12-27T16:37:21.000Z
|
2020-12-27T16:37:21.000Z
|
detect_fake_videos.py
|
Miguel000/FakeVideosDetector
|
2626b2cd9302320229030129e166c51c2d7862e9
|
[
"MIT"
] | 2
|
2022-01-13T03:51:53.000Z
|
2022-03-12T00:59:32.000Z
|
detect_fake_videos.py
|
Miguel000/FakeVideosDetector
|
2626b2cd9302320229030129e166c51c2d7862e9
|
[
"MIT"
] | null | null | null |
"""
Evaluates a folder of video files or a single file with a xception binary
classification network.
Usage:
python detect_from_video.py
-i <folder with video files or path to video file>
-m <path to model file>
-o <path to output folder, will write one or multiple output videos there>
Author: Andreas Rössler
"""
import os
import argparse
from os.path import join
import cv2
import dlib
import torch
import torch.nn as nn
from PIL import Image as pil_image
from tqdm import tqdm
from network.models import model_selection
from dataset.transform import xception_default_data_transforms
def get_boundingbox(face, width, height, scale=1.3, minsize=None):
"""
Expects a dlib face to generate a quadratic bounding box.
:param face: dlib face class
:param width: frame width
:param height: frame height
:param scale: bounding box size multiplier to get a bigger face region
:param minsize: set minimum bounding box size
:return: x, y, bounding_box_size in opencv form
"""
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
size_bb = int(max(x2 - x1, y2 - y1) * scale)
if minsize:
if size_bb < minsize:
size_bb = minsize
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
# Check for out of bounds, x-y top left corner
x1 = max(int(center_x - size_bb // 2), 0)
y1 = max(int(center_y - size_bb // 2), 0)
# Check for too big bb size for given x, y
size_bb = min(width - x1, size_bb)
size_bb = min(height - y1, size_bb)
return x1, y1, size_bb
def preprocess_image(image, cuda=False):
"""
Preprocesses the image such that it can be fed into our network.
During this process we envoke PIL to cast it into a PIL image.
:param image: numpy image in opencv form (i.e., BGR and of shape
:return: pytorch tensor of shape [1, 3, image_size, image_size], not
necessarily casted to cuda
"""
# Revert from BGR
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Preprocess using the preprocessing function used during training and
# casting it to PIL image
preprocess = xception_default_data_transforms['test']
preprocessed_image = preprocess(pil_image.fromarray(image))
# Add first dimension as the network expects a batch
preprocessed_image = preprocessed_image.unsqueeze(0)
if cuda:
preprocessed_image = preprocessed_image.cuda()
return preprocessed_image
def predict_with_model(image, model, post_function=nn.Softmax(dim=1),
cuda=False):
"""
Predicts the label of an input image. Preprocesses the input image and
casts it to cuda if required
:param image: numpy image
:param model: torch model with linear layer at the end
:param post_function: e.g., softmax
:param cuda: enables cuda, must be the same parameter as the model
:return: prediction (1 = fake, 0 = real)
"""
# Preprocess
preprocessed_image = preprocess_image(image, cuda)
# Model prediction
output = model(preprocessed_image)
output = post_function(output)
# Cast to desired
_, prediction = torch.max(output, 1) # argmax
prediction = float(prediction.cpu().numpy())
return int(prediction), output
def test_full_image_network(video_path, model_path, output_path, fast,
start_frame=0, end_frame=None, cuda=False):
"""
Reads a video and evaluates a subset of frames with the a detection network
that takes in a full frame. Outputs are only given if a face is present
and the face is highlighted using dlib.
:param video_path: path to video file
:param model_path: path to model file (should expect the full sized image)
:param output_path: path where the output video is stored
:param start_frame: first frame to evaluate
:param end_frame: last frame to evaluate
:param cuda: enable cuda
:return:
"""
print('Starting: {}'.format(video_path))
# Read and write
reader = cv2.VideoCapture(video_path)
video_fn = video_path.split('/')[-1].split('.')[0]+'.avi'
os.makedirs(output_path, exist_ok=True)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
fps = reader.get(cv2.CAP_PROP_FPS)
num_frames = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
writer = None
# Face detector
face_detector = dlib.get_frontal_face_detector()
# Load model
model, *_ = model_selection(modelname='xception', num_out_classes=2)
if model_path is not None:
model = torch.load(model_path, map_location="cuda" if torch.cuda.is_available() else "cpu")
print('Model found in {}'.format(model_path))
else:
print('No model found, initializing random model.')
if cuda:
model = model.cuda()
# Text variables
font_face = cv2.FONT_HERSHEY_SIMPLEX
thickness = 2
font_scale = 1
# Fake frames number
ff = 0
ffn = 0
# Frame numbers and length of output video
frame_num = 0
assert start_frame < num_frames - 1
end_frame = end_frame if end_frame else num_frames
pbar = tqdm(total=end_frame-start_frame)
while reader.isOpened():
_, image = reader.read()
if image is None:
break
if fast:
frame_num += 10
pbar.update(10)
else:
frame_num+= 1
pbar.update(1)
if frame_num < start_frame:
continue
# Image size
height, width = image.shape[:2]
# Init output writer
if writer is None:
writer = cv2.VideoWriter(join(output_path, video_fn), fourcc, fps,
(height, width)[::-1])
# 2. Detect with dlib
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_detector(gray, 1)
if len(faces):
# For now only take biggest face
face = faces[0]
# --- Prediction ---------------------------------------------------
# Face crop with dlib and bounding box scale enlargement
x, y, size = get_boundingbox(face, width, height)
cropped_face = image[y:y+size, x:x+size]
# Actual prediction using our model
prediction, output = predict_with_model(cropped_face, model,
cuda=cuda)
if prediction == 1:
ff += 1
ffn +=1
# ------------------------------------------------------------------
# Text and bb
x = face.left()
y = face.top()
w = face.right() - x
h = face.bottom() - y
label = 'fake' if prediction == 1 else 'real'
color = (0, 255, 0) if prediction == 0 else (0, 0, 255)
output_list = ['{0:.2f}'.format(float(x)) for x in
output.detach().cpu().numpy()[0]]
cv2.putText(image, str(output_list)+'=>'+label, (x, y+h+30),
font_face, font_scale,
color, thickness, 2)
# draw box over face
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
if frame_num >= end_frame:
break
writer.write(image)
pbar.close()
p = ff / float(ffn) * 100;
if writer is not None:
out = {}
writer.release()
out["score"] = p
out["file"] = video_fn
return out
else:
print('Input video file was empty')
| 32.213675
| 99
| 0.607721
|
36da7d042f24780f458fdead110d03d045ad481c
| 1,988
|
py
|
Python
|
pyhelpertool/HelperGraphics.py
|
dhufe/pyhelpertool
|
44d29112e9d34fa02f36c0b26d1b54f63ecdb0e4
|
[
"MIT"
] | null | null | null |
pyhelpertool/HelperGraphics.py
|
dhufe/pyhelpertool
|
44d29112e9d34fa02f36c0b26d1b54f63ecdb0e4
|
[
"MIT"
] | null | null | null |
pyhelpertool/HelperGraphics.py
|
dhufe/pyhelpertool
|
44d29112e9d34fa02f36c0b26d1b54f63ecdb0e4
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import seaborn as sns
def set_rcparams(width=6.69291, fontsize=16, for_article=True, for_beamer=False):
"""
Setting rcparams of matplotlib ready for publishing
"""
height = width / 1.618
if for_article or for_beamer:
params = {
#'backend': 'pdf',
'axes.labelsize': fontsize,
'font.size': fontsize,
'figure.figsize': (width, height),
'legend.fontsize': fontsize,
'axes.titlesize': fontsize,
'xtick.labelsize': fontsize,
'ytick.labelsize': fontsize,
'xtick.major.pad': fontsize,
'xtick.major.pad': fontsize,
'text.usetex': True,
'font.sans-serif' : 'Helvetica Neue',
'font.family': 'sans-serif',
'image.cmap' : 'viridis',
'image.interpolation' : 'bilinear',
'image.resample' : False }
#'font.serif': 'Times New Roman',
#'font.sans-serif': 'Times New Roman'}
# 'ps.usedistiller': 'xpdf'}
if for_beamer:
# params['font.family'] = 'sans-serif'
preamble = r'''\usepackage[cm]{sfmath}'''
plt.rc('text.latex', preamble=preamble)
if for_article or for_beamer:
plt.rcParams.update(params)
def set_style():
# This sets reasonable defaults for font size for
# a figure that will go in a paper
sns.set_context("paper")
# Set the font to be serif, rather than sans
sns.set(font='serif',style="ticks")
# Make the background white, and specify the
# specific font family
sns.set_style("white", {
"font.family": "serif",
"font.serif": ["Times", "Palatino", "serif"]
})
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
def set_size(fig, width = 8, height = 5):
fig.tight_layout()
fig.set_size_inches(width, height)
| 32.590164
| 81
| 0.581992
|
36c5ff08c17d22d66d660b6eaa9b1bd8f7e5b077
| 1,589
|
py
|
Python
|
azure-mgmt-storage/azure/mgmt/storage/v2017_10_01/models/storage_account_key.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-storage/azure/mgmt/storage/v2017_10_01/models/storage_account_key.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-storage/azure/mgmt/storage/v2017_10_01/models/storage_account_key.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountKey(Model):
"""An access key for the storage account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar key_name: Name of the key.
:vartype key_name: str
:ivar value: Base 64-encoded value of the key.
:vartype value: str
:ivar permissions: Permissions for the key -- read-only or full
permissions. Possible values include: 'Read', 'Full'
:vartype permissions: str or
~azure.mgmt.storage.v2017_10_01.models.KeyPermission
"""
_validation = {
'key_name': {'readonly': True},
'value': {'readonly': True},
'permissions': {'readonly': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'permissions': {'key': 'permissions', 'type': 'KeyPermission'},
}
def __init__(self, **kwargs):
super(StorageAccountKey, self).__init__(**kwargs)
self.key_name = None
self.value = None
self.permissions = None
| 33.104167
| 76
| 0.591567
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.